query_id
stringlengths
32
32
query
stringlengths
9
4.01k
positive_passages
listlengths
1
1
negative_passages
listlengths
88
101
c73d7cd464aed602cee2f59592fdf968
Looks for allocations made within the current element and declares them all at once.
[ { "docid": "d93c02f04f3fc52c8ae6861292124c81", "score": "0.0", "text": "def _writeAllocations( self, element, implicitsOnly=False ):\n\t\t# FIXME: assert(element.dataflow) fails sometimes\n\t\tif element and element.dataflow:\n\t\t\t# NOTE: Implicits can sometimes be declared twice\n\t\t\tdeclared = {}\n\t\t\tfor s in self._walkDataFlowSlots(element.dataflow):\n\t\t\t\tif s.isArgument() or s.isImported() or s.isEnvironment(): continue\n\t\t\t\tif s.isImplicit() or (not implicitsOnly):\n\t\t\t\t\tdeclared[s.getName()] = True\n\t\t\tif declared:\n\t\t\t\tyield \"var {0};\".format(\", \".join(declared.keys()))", "title": "" } ]
[ { "docid": "2dd29c606647048c35bc68cae3a29e72", "score": "0.60576046", "text": "def allocate(self):\n self.allocatables.sort(key=lambda a: len(a.constraint), reverse=True)\n allocatable = self.allocatables.pop()\n for cn in allocatable.constraint:\n if self.slots[cn] == -1:\n self.slots[cn] = allocatable.index\n self.allocation[allocatable.index] = cn\n if self.allocatables:\n for allocatable in self.allocatables:\n allocatable.constraint.discard(cn)\n return self.allocate()\n return self.allocation\n return None", "title": "" }, { "docid": "3bdc0bc6d4c13e63401bcd3ac86c5e21", "score": "0.59368944", "text": "def testInitialAllocation(self):\n\n locked_slots = {}\n adjusted_slots = {}\n self.allocater.allocate(locked_slots)", "title": "" }, { "docid": "6084407a17a61bd328f6a431bc10b242", "score": "0.5933635", "text": "def allocate(self):\n self.allocator.allocate()", "title": "" }, { "docid": "ab34fcefe8e35673647beebbdfaf1b5c", "score": "0.57973063", "text": "def allocateSize(self, allocated_size, requested_size, cells):\n for child in cells:\n cell_size = SizeAllocation((0, 0), allocated_size.size)\n child.allocateSize(cell_size)", "title": "" }, { "docid": "f353075c14e81723eb58fd1226a3e4c2", "score": "0.57514775", "text": "def allocate_early(self, core):\n if self.is_dec:\n for slice_idx in range(self.N_slices):\n # actually doesn't matter what the slice is, always allocate 64xDO\n start_addr = core.MM.allocate_dec(self.dimensions_out)\n self.slice_start_addrs += [start_addr]", "title": "" }, { "docid": "acb840264115d9cc5912053038e766a6", "score": "0.5648172", "text": "def mem_alloc():\n pass", "title": "" }, { "docid": "3069bd804e6c5c19eb020d6ae78479fb", "score": "0.561468", "text": "def memory_plan(self, feed_shapes):\n\n assert (self.ctx is not None)\n self.node_to_arr_map = {}\n for node, shape in self.node_to_shape_map.items():\n self.node_to_arr_map[node] = ndarray.empty(shape, ctx=self.ctx)", "title": "" }, { "docid": "5a9ed6c76e2d38db027d09797978ceb1", "score": "0.5597726", "text": "def _set_chunk_lists(self):\n # Backend\n vallocs = self._get_virtualallocations()\n self._check_sizes(vallocs)\n chunks, backend_free_chunks = self._get_chunks()\n self._check_sizes(chunks)\n self._check_sizes(backend_free_chunks)\n\n # chunk == self._backend_committed\n self._backend_allocs = vallocs | chunks\n self._check_sizes(self._backend_allocs)\n\n # free_lists == free_chunks.\n # either get the _get_free_lists() or use free_chunks. Its the same.\n if False:\n log.warning('Duplicate walking of free chunks')\n free_lists = self._get_freelists()\n # need to cut sizeof(HEAP_ENTRY) from address and size\n # FIXME ? why report calculation up to here ?\n sublen = ctypes.sizeof(self._heap_module.HEAP_ENTRY)\n backend_free_chunks = set([(addr + sublen, size - sublen) for addr, size in free_lists])\n if len(backend_free_chunks) != len(free_lists):\n log.warning('Weird: len(free_chunks) != len(free_lists)')\n self._check_sizes(backend_free_chunks)\n\n # adjust the user allocations based on if there is a frontend or not\n if self.get_heap().FrontEndHeapType == 0:\n self._user_allocs = self._backend_allocs\n self._user_free_chunks = backend_free_chunks\n else:\n front_allocs, front_free_chunks = self._get_frontend_chunks()\n self._check_sizes(front_allocs)\n self._check_sizes(front_free_chunks)\n # point to header\n #front_allocs2 = set([(addr + sublen, size - sublen) for addr, size in front_allocs])\n #front_free_chunks2 = set([(addr + sublen, size - sublen) for addr, size in front_free_chunks])\n # points to chunk\n front_allocs2 = set([(addr, size) for addr, size in front_allocs])\n front_free_chunks2 = set([(addr, size) for addr, size in front_free_chunks])\n self._check_sizes(front_allocs2)\n self._check_sizes(front_free_chunks2)\n\n # print \"backend_allocs\", hex(sum([s for a, s in self._backend_allocs]))\n # print \"frontend_allocs\", hex(sum([s for a, s in front_allocs2]))\n # print \"backend_free_chunks\", hex(sum([s for a, s in backend_free_chunks]))\n # print \"frontend_free_chunks\", hex(sum([s for a, s in front_free_chunks2]))\n # import code\n # code.interact(local=locals())\n\n if self.get_heap().FrontEndHeapType == 1:\n # LAL: reports vallocs and (_get_chunks-lal) as committed\n # reports lal | free_list as free\n # TODO + overhead\n # FIXME , use same code than LFH ?\n self._user_allocs = self._backend_allocs - front_free_chunks2\n self._user_free_chunks = front_free_chunks2 | backend_free_chunks\n elif self.get_heap().FrontEndHeapType == 2:\n # free chunks are backend free chunks + frontend free chunks\n self._user_free_chunks = backend_free_chunks | front_free_chunks2\n # we only keep backend allocations that are not used by LFH\n backend_allocs2 = set()\n for start, size in self._backend_allocs:\n end = start + size\n lfh_block = False\n for front_addr, front_addr_size in front_allocs2:\n if start <= front_addr < end:\n lfh_block = True\n break\n if not lfh_block:\n backend_allocs2.add((start, size))\n self._user_allocs = backend_allocs2 | front_allocs2\n\n return", "title": "" }, { "docid": "93cc370f8ba561ec4ae32ff38c0a6620", "score": "0.5587381", "text": "def _execute_flexible_allocate(self, extra_allocations):\n with FecTimer(\"Zoned routing info allocator\", TimerWork.OTHER):\n self._data_writer.set_routing_infos(\n flexible_allocate(extra_allocations))", "title": "" }, { "docid": "4e5f3c57a4e88001f3ec5fc8eb60e662", "score": "0.5585303", "text": "def _allocate_memory(self):\n debug(\"Allocating memory for %s (%s)\" % (self.name, str(self.shape)))\n self._data_object = CMemory(self.shape, dtype=self.dtype)\n if self._first_touch:\n first_touch(self)\n else:\n self.data.fill(0)", "title": "" }, { "docid": "4e5f3c57a4e88001f3ec5fc8eb60e662", "score": "0.5585303", "text": "def _allocate_memory(self):\n debug(\"Allocating memory for %s (%s)\" % (self.name, str(self.shape)))\n self._data_object = CMemory(self.shape, dtype=self.dtype)\n if self._first_touch:\n first_touch(self)\n else:\n self.data.fill(0)", "title": "" }, { "docid": "3cab6b3299cd17f71225e9443dbf126a", "score": "0.5545891", "text": "def _alloc(capacity):\n return (capacity * ctypes.py_object)()", "title": "" }, { "docid": "6ef981a04585a25f04d076d1321ae156", "score": "0.5541805", "text": "def __reportToMemory(self):\n self.__reportCnt += 1\n if not self.__reportCnt % 2:\n self.add_property('name', 'facetr_num')\n self.add_property('num', len(self.__faceList['pics'][self.__curIdx]))\n self.store_observation()", "title": "" }, { "docid": "b3ab6c3e1f9d7ca7eed2846ab2ce917e", "score": "0.55390424", "text": "def _grow(self) -> None:\n\n table = HashTable(self.capacity * 2)\n self.prime_index = table.prime_index\n i = 0\n while i < self.capacity:\n element = self.table[i]\n if element and not element.deleted:\n table._insert(element.key, element.value)\n i += 1\n\n self.table = table.table\n self.capacity = self.capacity * 2", "title": "" }, { "docid": "7313b26bf615309de793f05fbd4795d2", "score": "0.5523385", "text": "def _init_awg_memory(self) -> None:\n new_slots = self._memory_manager.get_uninitialized_slots()\n if len(new_slots) == 0:\n return\n\n self.log.info(f'Reserving awg memory for {len(new_slots)} slots')\n\n zeros: np.ndarray = np.zeros(0)\n wave = None\n total_size = 0\n total_duration = 0.0\n for slot in new_slots:\n start = time.perf_counter()\n if len(zeros) != slot.size or wave is None:\n zeros = np.zeros(slot.size, float)\n wave = keysightSD1.SD_Wave()\n result_parser(wave.newFromArrayDouble(keysightSD1.SD_WaveformTypes.WAVE_ANALOG, zeros))\n super().load_waveform(wave, slot.number)\n duration = time.perf_counter() - start\n # self.log.debug(f'uploaded {slot.size} in {duration*1000:5.2f} ms ({slot.size/duration/1e6:5.2f} MSa/s)')\n total_duration += duration\n total_size += slot.size\n\n self.log.info(f'Awg memory reserved: {len(new_slots)} slots, {total_size/1e6} MSa in '\n f'{total_duration*1000:5.2f} ms ({total_size/total_duration/1e6:5.2f} MSa/s)')", "title": "" }, { "docid": "9c418e45184457e1bdb87f21a0164fae", "score": "0.5520931", "text": "def Allocator(self):\n self.allocation = {} \n if self.allocation_type == 'random':\n for filename in self.trace.filename_freq.keys():\n if random.random() < float(self.hdd_size) / self.total_size:\n self.allocation[filename] = 'hdd'\n else:\n self.allocation[filename] = 'ssd'\n else:\n total_filesizes = sum(self.trace.filename_sizemap.values())\n prop_ssdsize = float(self.ssd_size) / self.total_size * total_filesizes\n filename_by_freq = [ (item[1], item[0]) for item in self.trace.filename_freq.items() ]\n filename_by_freq.sort(reverse = True)\n\n buffer_ssd = prop_ssdsize\n for filename in filename_by_freq:\n if buffer_ssd > 0:\n self.allocation[filename[1]] = 'ssd'\n buffer_ssd -= self.trace.filename_sizemap[filename[1]]\n #print 'buffer ', buffer_ssd\n else:\n self.allocation[filename[1]] = 'hdd'\n #print 'hdd'\n #print self.allocation", "title": "" }, { "docid": "1c8bb9b53dbe2249b4a8a00a49e9c52e", "score": "0.55166066", "text": "def _grow(self):\n tempCapacity = self.capacity * self.factor\n self.store = self._copyValues(tempCapacity)\n self.capacity = tempCapacity\n # reset the startIndex because all the elements are now pushed all the\n # way to the left\n self.startIndex = 0\n self.endIndex = self.size", "title": "" }, { "docid": "46abcb0011a70e8125f569320ed2bc38", "score": "0.54713094", "text": "def allocate(module):\n allocations = read_allocations(module)\n result = update_allocation(module, allocations)\n if result['changed'] and not module.check_mode:\n write_allocations(module, allocations)\n return result", "title": "" }, { "docid": "67d0f71417dbb106d0a4f7b2083d17a3", "score": "0.5457601", "text": "def _allocate_hook(self, target, iface_name, allocdb):\n pass", "title": "" }, { "docid": "bded51e63eeb19235ef5ad961dedc31f", "score": "0.54558456", "text": "def allocate_early(self, core):\n # for now, only MMWeights has allocate early, to allocate decoders before transforms\n pass", "title": "" }, { "docid": "366ab2f0f771e0981daefb252919005d", "score": "0.5447618", "text": "def alloc_record_space(self, record):\n if Config.page['allocAlgorithm'] == \"first fit\":\n assert(False) # TODO:\n elif Config.page['allocAlgorithm'] == \"best fit\":\n return self.alloc_best_fit(record)", "title": "" }, { "docid": "20826d0235a4bbabe8c2db85ee41c9a9", "score": "0.5419611", "text": "def extend_allocation(self, new_total_run_time):", "title": "" }, { "docid": "f194e914f815006270c60b0962bb4289", "score": "0.54087174", "text": "def HoistAllocates() -> tvm.IRModule:\n return _ffi_api.HoistAllocates()", "title": "" }, { "docid": "fdb07085379cee75e4ab6ce0469ed5de", "score": "0.54081607", "text": "def _free(self):\n raise Exception('Not implemented yet')", "title": "" }, { "docid": "efaea6fbe5aa623e51c5f31722315326", "score": "0.5392928", "text": "def _allocate(num):\n start_index = len(_volumes)\n total = start_index + num\n _volumes.resize(total, refcheck=False)\n _surface_area.resize(total, refcheck=False)\n _diffs.resize(total, refcheck=False)\n _states.resize(total, refcheck=False)\n return start_index", "title": "" }, { "docid": "84b5c01bff35fcd94c94e1e9659c517a", "score": "0.5392819", "text": "def ingresar(self, elem):\n\t\tself.elementos.append(elem)\n\t\tself.__upheap__(len(self.elementos) - 1)", "title": "" }, { "docid": "8bd97a89190aec522cca9c5016e87838", "score": "0.5392161", "text": "def allocate(self, core):\n pass", "title": "" }, { "docid": "96a0c4324c40afff761c3dc0d33e4586", "score": "0.53816265", "text": "def allocate(self, name, col, row):\n occupied = self._occupied(exclude=name)\n for icol, irow in self._space_walk(int(col), int(row)):\n key = f\"{icol}x{irow}\"\n if key not in occupied:\n logger.debug(f'Allocatted position', name=name, pos=f\"{col}x{row}\", occupied=occupied, allocated=f\"{icol}x{irow}\")\n return dict(col=icol, row=irow)", "title": "" }, { "docid": "8f42e0e7c787dfae79c957f99fd5799b", "score": "0.53720295", "text": "def test_get_allocators(self):\n pass", "title": "" }, { "docid": "45f2c7b0096405e224e73eebcefdd56d", "score": "0.536454", "text": "def __init__(self):\n self.heap = []\n self.count = 0", "title": "" }, { "docid": "04e49e7e3858e93a5033300a48e1df7e", "score": "0.5362765", "text": "def _new_allocations(context, resource_provider, consumer, resources):\n allocations = []\n for resource_class in resources:\n allocation = alloc_obj.Allocation(\n resource_provider=resource_provider,\n consumer=consumer,\n resource_class=resource_class,\n used=resources[resource_class])\n allocations.append(allocation)\n return allocations", "title": "" }, { "docid": "7eb053360f1d3f3abcfbe0e565d525e4", "score": "0.5360065", "text": "def pack(self):\n import gc\n gc.collect()\n refs = [ref for ref in self._node_arr]\n\n vals = [ref() for ref in refs]\n bad = [val is None for val in vals]\n good_vals = [val for b, val in zip(bad, vals) if not b]\n good_refs = [ref for b, ref in zip(bad, refs) if not b]\n\n # renumber all the good nodes\n for i, v in enumerate(good_vals):\n self._arr[i] = v.value\n v._value = i\n\n self._node_arr = good_refs", "title": "" }, { "docid": "ee732f5e076a10275cfd76636779244d", "score": "0.5359779", "text": "def __init__(self):\n self.__free = set()\n self.__occupied = set()", "title": "" }, { "docid": "17aa8fdc5f02a70d4a90db32e0b0f548", "score": "0.53538877", "text": "def allocate_mem(self, amount, symbol_table: SymbolTable, comment: str = ''):\n self.allocation_stack[0] += amount\n string = \"addiu $sp, $sp, -%d\" % amount # Reserve word on stack\n self.write_to_instruction(string, 2, comment)\n # Increase offset of previous variables in stack (offset can never be < 0)\n self.offset_stack[0].increase_offset(amount)", "title": "" }, { "docid": "6d637a9f2b8ab6d7a1db15ffb59abe04", "score": "0.53527534", "text": "def allocate(self):\r\n first_row_template = self._compute_chambers_for_first_assay()\r\n self._allocate_all_assays(first_row_template)\r\n\r\n return self.alloc", "title": "" }, { "docid": "d331e15db79319231826d8f1d8009fb8", "score": "0.534888", "text": "def __increase_capacity():\n self.length = int(self.length * 1.5)\n _tmp = []\n for i, x in enumerate(self.container):\n _tmp[i] = x\n self.container = _tmp", "title": "" }, { "docid": "dcdcc53b8f14e49fa551fb3cd99ea2b9", "score": "0.53312516", "text": "def fill_container():\n pass", "title": "" }, { "docid": "621b10a0c6b34aaa6e2172abd2acf0d4", "score": "0.5328139", "text": "def preallocation(self):\n return self._preallocation", "title": "" }, { "docid": "621b10a0c6b34aaa6e2172abd2acf0d4", "score": "0.5328139", "text": "def preallocation(self):\n return self._preallocation", "title": "" }, { "docid": "f491b211e18dba956eb592d5dd020db2", "score": "0.5327866", "text": "def __init__(self):\n self.heap = []", "title": "" }, { "docid": "6601c6e155cc3a34c26eaf12bfe5d72e", "score": "0.53142613", "text": "def _check_and_record_heap_alloc(self, current_path):\n p = self._p\n current_state = current_path.active[0].copy()\n current_addr = current_state.addr\n\n next_path = current_path.copy(copy_states=True).step()\n try:\n next_state = next_path.active[0]\n except:\n assert not next_path.pruned\n return\n\n next_addr = next_state.addr\n\n # skip blocks outside the scope of the considered binary\n if p.loader.find_object_containing(current_addr) != p.loader.main_object:\n return\n\n # we're about to execute an extern call\n # save the parameters, they can be useful when returning\n if self._call_to_ext_function(next_addr):\n self._save_call_pars(next_state, current_addr)\n\n # we just got back from an extern call\n # if it was a heap allocation function, save the allocated size.\n try:\n if self._returned_from_ext_call(current_state, current_addr) and \\\n self._function_is_heap_alloc(current_state):\n self._do_record_heap_alloc(current_state, current_addr)\n except:\n return", "title": "" }, { "docid": "bcdf3e7358ca18ee8e01e3586e9ebfc7", "score": "0.52987576", "text": "def set_listhintfreeList_chunks(self):\r\n for i in range(0, len(self.heaper.pheap.blocks)):\r\n block = self.heaper.pheap.blocks[i]\r\n num_of_freelists = block.ArraySize - block.BaseIndex\r\n self.listhintfreelist_chunks = {}\r\n memory = self.heaper.imm.readMemory( block.ListHints, num_of_freelists * 8 )\r\n for a in range(block.BaseIndex, num_of_freelists):\r\n entry= block.FreeList[a]\r\n bin_entry = a + block.BaseIndex\r\n e=entry[0]\r\n (flink, heap_bucket) = struct.unpack(\"LL\", memory[a * 0x8 : a * 0x8 + 0x8] )\r\n allocations = heap_bucket & 0x0000FFFF\r\n allocations = allocations / 2\r\n freelist_addr = block.ListHints + (bin_entry - block.BaseIndex) * 8\r\n if allocations > 0:\r\n lfhthreshold = 0x11\r\n else:\r\n lfhthreshold = 0x12\r\n amount_needed = lfhthreshold - allocations\r\n\r\n # if we have at least the 1 chunk\r\n self.listhintfreelist_chunks[bin_entry] = []\r\n self.listhintfreelist_chunks[bin_entry].append([]) \r\n self.listhintfreelist_chunks[bin_entry].append([])\r\n self.listhintfreelist_chunks[bin_entry][0].append(bin_entry) # bin size\r\n self.listhintfreelist_chunks[bin_entry][0].append(flink) # flink (first chunk in the freelist)\r\n self.listhintfreelist_chunks[bin_entry][0].append(heap_bucket) # heap_bucket (blink)\r\n self.listhintfreelist_chunks[bin_entry][0].append(allocations) # allocations so far\r\n self.listhintfreelist_chunks[bin_entry][0].append(amount_needed) # amount of chunks needed to trigger the LFH\r\n self.listhintfreelist_chunks[bin_entry][0].append(freelist_addr) # freelist address\r\n chunk_list = []\r\n if e[0]:\r\n chunk_list.append(e[0]) # bin chunk address\r\n chunk_list.append(e[1]) # flink\r\n chunk_list.append(e[2]) # blink\r\n encoded_header = self.heaper.imm.readMemory(e[0]-0x8,0x4)\r\n encoded_header = struct.unpack(\"L\", encoded_header)[0] \r\n result = \"%x\" % (encoded_header ^ self.heaper.pheap.EncodingKey)\r\n\r\n # The wtf 'if' statement, i was on hard drugs that night...\r\n if (int(result[len(result)-4:len(result)],16) != a+block.BaseIndex \r\n and (a+block.BaseIndex) != 0x7f and (a+block.BaseIndex) != 0x7ff):\r\n if e[1] == e[2]:\r\n chunk_list.append(\"size, flink and blink overwritten\") # size overwrite\r\n chunk_list.append(True) # we have an overwrite\r\n else:\r\n chunk_list.append(\"size overwritten\") # size overwrite\r\n chunk_list.append(True) # we have an overwrite\r\n elif (a+block.BaseIndex) != 0x7f and (a+block.BaseIndex) != 0x7ff:\r\n if e[1] == e[2]:\r\n chunk_list.append(\"flink and blink overwritten\") # validation\r\n chunk_list.append(True) # chunk is not overwritten \r\n else:\r\n chunk_list.append(\"validated\") # validation\r\n chunk_list.append(False) # chunk is not overwritten\r\n else:\r\n chunk_list.append(\"validated\") # validation\r\n chunk_list.append(False) # chunk is not overwritten \r\n if (int(a+block.BaseIndex) != 0x7f and int(a+block.BaseIndex) != 0x7ff):\r\n chunk_list.append(a+block.BaseIndex) # chunk size\r\n\r\n # Listhint[7f] or Listhint[7ff] we dont know the chunk size\r\n # so lets get it from the chunks header..\r\n elif (int(a+block.BaseIndex) == 0x7f or int(a+block.BaseIndex) == 0x7ff):\r\n decoded_size = int(result[len(result)-4:len(result)],16)\r\n chunk_list.append(decoded_size) # chunk size\r\n self.listhintfreelist_chunks[bin_entry][1].append(chunk_list)\r\n\r\n # ok loop through the rest of the chunks\r\n if len(entry[1:]) > 1:\r\n for e in entry[1:]:\r\n chunk_list = []\r\n chunk_list.append(e[0]) # bin chunk address\r\n chunk_list.append(e[1]) # flink\r\n chunk_list.append(e[2]) # blink\r\n encoded_header = self.heaper.imm.readMemory(e[0]-0x8,0x4)\r\n encoded_header = struct.unpack(\"L\", encoded_header)[0] \r\n result = \"%x\" % (encoded_header ^ self.heaper.pheap.EncodingKey)\r\n \r\n # validate that the decoded chunk size is actually matching the \r\n # bin size (a)+ BaseIndex\r\n if (int(result[len(result)-4:len(result)],16) != a+block.BaseIndex \r\n and (a+block.BaseIndex) != 0x7f and (a+block.BaseIndex) != 0x7ff):\r\n \r\n # if the flink == blink, then it must be overwritten\r\n # because there is no such thing as sentinel nodes in nt 6.x backend\r\n if e[1] == e[2]:\r\n chunk_list.append(\"size, flink and blink overwritten\") # size, flink and blink is overwritten\r\n chunk_list.append(True) # we have an overwrite \r\n else:\r\n chunk_list.append(\"size overwritten\") # size overwritten only\r\n chunk_list.append(True) # we have an overwrite \r\n \r\n # Assuming the size is safe, lets just check out the flink and blink \r\n elif (a+block.BaseIndex) != 0x7f and (a+block.BaseIndex) != 0x7ff:\r\n if e[1] == e[2]:\r\n chunk_list.append(\"flink and blink overwritten\") # flink and blink are overwritten only\r\n chunk_list.append(True) # we have an overwrite\r\n \r\n # else, it must be the just the size thats overwritten\r\n else:\r\n chunk_list.append(\"validated\") # Nothing is overwritten\r\n chunk_list.append(False) # chunk is not overwritten\r\n else:\r\n chunk_list.append(\"validated\") # Nothing is overwritten\r\n chunk_list.append(False) # chunk is not overwritten\r\n \r\n # finally append the chunk size for this list\r\n if (int(a+block.BaseIndex) != 0x7f and int(a+block.BaseIndex) != 0x7ff):\r\n chunk_list.append(a+block.BaseIndex) # chunk size\r\n\r\n # Listhint[7f] or Listhint[7ff] we dont know the chunk size\r\n # so we definitely have to get it from the chunks header..\r\n elif (int(a+block.BaseIndex) == 0x7f or int(a+block.BaseIndex) == 0x7ff):\r\n decoded_size = int(result[len(result)-4:len(result)],16)\r\n if e[1] == e[2]:\r\n chunk_list.append(\"flink and blink overwritten\") # size, flink and blink are overwritten only\r\n chunk_list.append(True) # we have an overwrite\r\n elif decoded_size > int(a+block.BaseIndex):\r\n chunk_list.append(\"size overwritten\") # size overwritten only\r\n chunk_list.append(True) # we have an overwrite\r\n elif decoded_size <= int(a+block.BaseIndex):\r\n chunk_list.append(\"validated\") # Nothing is overwritten\r\n chunk_list.append(False) # chunk is not overwritten\r\n # finally append the chunk size for this list\r\n chunk_list.append(decoded_size) # chunk size \r\n self.listhintfreelist_chunks[bin_entry][1].append(chunk_list)\r\n\r\n # build the blocksindex structure\r\n self.blocks_indexes[block.address] = self.listhintfreelist_chunks\r\n return True", "title": "" }, { "docid": "c497f44c73b61d9b6c0fc3ec7514cf25", "score": "0.5289112", "text": "def mem_alloc() -> int:\n pass", "title": "" }, { "docid": "f123ee6ee4189f44b8314cc75abe96ab", "score": "0.52816", "text": "def __init__(self):\n self.heap = [None]", "title": "" }, { "docid": "60d170b3aa656bf647cfefa805d69ea1", "score": "0.52786225", "text": "def _resize(self):\r\n self.array_size = len(self.node_array) + self.allocate_step\r\n self.node_array = np.concatenate((self.node_array, np.zeros((self.allocate_step, 3))))", "title": "" }, { "docid": "fdfb3d7c8003233326e2a9eb3384cf97", "score": "0.5277749", "text": "def _built(self):\n for node_index in range(len(self.tree) // 2 - 1, -1, -1):\n self._heapify(node_index)", "title": "" }, { "docid": "29e907d9b07662687848d7c829dbbb2f", "score": "0.5274965", "text": "def __init__(self):\n self.id = 0 # a unique id for every element\n\n self.stack = []\n self.heappool = []\n \n self.heap_delete = set()\n self.stack_delete = set()", "title": "" }, { "docid": "85014793ced5ee8f8a23536a8a264a43", "score": "0.5274826", "text": "def testOverassignedAllocationCorrect(self):\n\n locked_slots = {'git': 20}\n\n result = self.allocater.allocate(locked_slots)\n\n expected = 6\n actual = result['git']\n\n self.failUnlessEqual(expected, actual)", "title": "" }, { "docid": "944daa9bf104c03556e4b44d609c159c", "score": "0.5272759", "text": "def RecalcSizes(self):", "title": "" }, { "docid": "944daa9bf104c03556e4b44d609c159c", "score": "0.5272759", "text": "def RecalcSizes(self):", "title": "" }, { "docid": "944daa9bf104c03556e4b44d609c159c", "score": "0.5272759", "text": "def RecalcSizes(self):", "title": "" }, { "docid": "944daa9bf104c03556e4b44d609c159c", "score": "0.5271244", "text": "def RecalcSizes(self):", "title": "" }, { "docid": "944daa9bf104c03556e4b44d609c159c", "score": "0.5271244", "text": "def RecalcSizes(self):", "title": "" }, { "docid": "944daa9bf104c03556e4b44d609c159c", "score": "0.5271244", "text": "def RecalcSizes(self):", "title": "" }, { "docid": "944daa9bf104c03556e4b44d609c159c", "score": "0.5271244", "text": "def RecalcSizes(self):", "title": "" }, { "docid": "944daa9bf104c03556e4b44d609c159c", "score": "0.5271244", "text": "def RecalcSizes(self):", "title": "" }, { "docid": "0dc5bf2811a27d801e3471fd7064711a", "score": "0.5268572", "text": "def _allocate_payload(self, nentries, realloc=False):\n context = self._context\n builder = self._builder\n\n ok = cgutils.alloca_once_value(builder, cgutils.true_bit)\n\n intp_t = context.get_value_type(types.intp)\n zero = ir.Constant(intp_t, 0)\n one = ir.Constant(intp_t, 1)\n\n payload_type = context.get_data_type(types.SetPayload(self._ty))\n payload_size = context.get_abi_sizeof(payload_type)\n entry_size = self._entrysize\n # Account for the fact that the payload struct already contains an entry\n payload_size -= entry_size\n\n # Total allocation size = <payload header size> + nentries * entry_size\n allocsize, ovf = cgutils.muladd_with_overflow(builder, nentries,\n ir.Constant(intp_t, entry_size),\n ir.Constant(intp_t, payload_size))\n with builder.if_then(ovf, likely=False):\n builder.store(cgutils.false_bit, ok)\n\n with builder.if_then(builder.load(ok), likely=True):\n if realloc:\n meminfo = self._set.meminfo\n ptr = context.nrt.meminfo_varsize_alloc_unchecked(builder,\n meminfo,\n size=allocsize)\n alloc_ok = cgutils.is_null(builder, ptr)\n else:\n # create destructor to be called upon set destruction\n dtor = self._imp_dtor(context, builder.module)\n meminfo = context.nrt.meminfo_new_varsize_dtor_unchecked(\n builder, allocsize, builder.bitcast(dtor, cgutils.voidptr_t))\n alloc_ok = cgutils.is_null(builder, meminfo)\n\n with builder.if_else(alloc_ok,\n likely=False) as (if_error, if_ok):\n with if_error:\n builder.store(cgutils.false_bit, ok)\n with if_ok:\n if not realloc:\n self._set.meminfo = meminfo\n self._set.parent = context.get_constant_null(types.pyobject)\n payload = self.payload\n # Initialize entries to 0xff (EMPTY)\n cgutils.memset(builder, payload.ptr, allocsize, 0xFF)\n payload.used = zero\n payload.fill = zero\n payload.finger = zero\n new_mask = builder.sub(nentries, one)\n payload.mask = new_mask\n\n if DEBUG_ALLOCS:\n context.printf(builder,\n \"allocated %zd bytes for set at %p: mask = %zd\\n\",\n allocsize, payload.ptr, new_mask)\n\n return builder.load(ok)", "title": "" }, { "docid": "8aea10d0da5f0772831368aad08424df", "score": "0.526611", "text": "def preprocessingAllocation(self):\n\n adjusted_orgs = self.adjusted_orgs\n adjusted_slots = self.adjusted_slots\n locked_orgs = self.locked_orgs\n locked_slots = self.locked_slots\n unlocked_orgs = self.unlocked_orgs\n total_popularity = self.total_popularity\n\n available_slots = self.slots\n allocations = {}\n slack = {}\n\n for org in locked_orgs:\n popularity = self.popularity[org]\n slots = locked_slots[org]\n slots = self.rangeSlots(slots, org)\n\n total_popularity -= popularity\n available_slots -= slots\n allocations[org] = slots\n del self.popularity[org]\n\n # adjust the orgs in need of adjusting\n for org in adjusted_orgs:\n slots = float(adjusted_slots[org])\n\n adjustment = (float(total_popularity)/float(available_slots))*slots\n adjustment = int(math.ceil(adjustment))\n self.popularity[org] += adjustment\n total_popularity += adjustment\n\n # adjust the popularity so that the invariants are always met\n for org in unlocked_orgs:\n popularity = self.popularity[org]\n # mentors = self.mentors[org]\n\n slots = (float(popularity)/float(total_popularity))*available_slots\n slots = self.rangeSlots(slots, org)\n\n popularity = (float(total_popularity)/float(available_slots))*slots\n\n self.popularity[org] = popularity\n\n total_popularity = sum(self.popularity.values())\n\n # do the actual calculation\n for org in unlocked_orgs:\n popularity = self.popularity[org]\n raw_slots = (float(popularity)/float(total_popularity))*available_slots\n slots = int(math.floor(raw_slots))\n\n slack[org] = raw_slots - slots\n allocations[org] = slots\n\n slots_left = available_slots - sum(allocations.values())\n\n # add leftover slots, sorted by slack, decending\n for org, slack in sorted(slack.iteritems(),\n key=lambda (k, v): v, reverse=True):\n if slots_left < 1:\n break\n\n current = allocations[org]\n slots = self.rangeSlots(current + 1, org)\n\n slots_left += slots - current\n allocations[org] = slots\n\n return allocations", "title": "" }, { "docid": "8fddac91dfd55dd98ff02c1aa641adfa", "score": "0.526323", "text": "def __init__(self):\n self.heap = [],[]", "title": "" }, { "docid": "64435a94946b4e885ab15c29d29264d3", "score": "0.5261087", "text": "def _build_heap(self):\n for i in range(self.parent(self.size), -1, -1):\n self._heapify(i)", "title": "" }, { "docid": "a1dda49171e88fe33c53853be9542727", "score": "0.5249658", "text": "def _init_used(self):\n for m in self._umap:\n self._used[m] = False\n for i in self._umap[m]:\n self._used[i] = False\n\n for m in self._imap:\n for i in m:\n self._used[i] = False", "title": "" }, { "docid": "a7e110db81755a68cc29d4a6db3d6eca", "score": "0.52407986", "text": "def preallocation(self, preallocation):\n\n self._preallocation = preallocation", "title": "" }, { "docid": "a7e110db81755a68cc29d4a6db3d6eca", "score": "0.52407986", "text": "def preallocation(self, preallocation):\n\n self._preallocation = preallocation", "title": "" }, { "docid": "d675c7ab09cd27dea6483756ae2c0d61", "score": "0.5238737", "text": "def enlarge(self):\r\n self.capacity = self.capacity * 2\r\n new_array = [None] * self.capacity\r\n new_num_items = 0\r\n for index in range(self.num_items):\r\n new_array[index] = self.arr[index]\r\n new_num_items += 1\r\n self.arr = new_array\r\n self.num_items = new_num_items\r\n return None", "title": "" }, { "docid": "ee984e84f36530cf546fa47dc00e4f0f", "score": "0.52354205", "text": "def allocate(a_list:list, item):\n\tidx = len(a_list)\n\ta_list.append(item)\n\treturn idx", "title": "" }, { "docid": "2b9da98bb4db3ae6fa37579eb287c5cf", "score": "0.5228579", "text": "def allocate(self, core):\n if not self.is_dec:\n for slice_idx in range(self.N_slices):\n # actually doesn't matter what the slice is, always allocate 1xDO\n start_addr = core.MM.allocate_trans(self.dimensions_out)\n self.slice_start_addrs += [start_addr]", "title": "" }, { "docid": "a513ca096ffa45eea8fd09c8f12f313a", "score": "0.5223802", "text": "def grow(self):\n # Double the physical size if no more room for items\n # and add the fillValue to the new cells in the underlying list\n for count in range(len(self)):\n self._items.append(self._fillValue)", "title": "" }, { "docid": "9a003053b226e8c34963a52d1e3b1903", "score": "0.5211778", "text": "def allocate_registers(frame, instrs):\n while True:\n try:\n return colorize(frame, instrs)\n except Spill as spill_exception:\n instrs = spill_temporary(frame, instrs, spill_exception.temp)", "title": "" }, { "docid": "b8877a23daf55cfdb51bd2007d939213", "score": "0.5204506", "text": "def instrAlloc(size):\n return addHeader(EMBEDDED_INST_ALLOC, struct.pack(\"!L\", size))", "title": "" }, { "docid": "28dbec29d053a2a518e122bd5381f6be", "score": "0.52042836", "text": "def allocate_block(self) -> Block:", "title": "" }, { "docid": "c7b0235056090547922e0bba7f06164c", "score": "0.51806486", "text": "def __init__(self):\n self._heap = []\n self._length = 0", "title": "" }, { "docid": "931704f403b24cd80b572224f88c013a", "score": "0.5172589", "text": "def allocated_memory(self):\n # Becuse the Wand.Image itself uses a weakref to the image data this not trivial\n return 0", "title": "" }, { "docid": "3634d6bcfe03cdafa164593cae93ec69", "score": "0.51715124", "text": "def __init__(self):\n self.heap = []\n self.cache = {}", "title": "" }, { "docid": "c79b28adf0850c4dc4921eafac38309c", "score": "0.5171275", "text": "def _resume_allocations(self, context):\n filters = {'state': states.ALLOCATING,\n 'conductor_affinity': self.conductor.id}\n for allocation in objects.Allocation.list(context, filters=filters):\n LOG.debug('Resuming unfinished allocation %s', allocation.uuid)\n allocations.do_allocate(context, allocation)", "title": "" }, { "docid": "faa50e789bffd604400fd1420497733d", "score": "0.51696974", "text": "def free_memory(self):\n pass", "title": "" }, { "docid": "ae2a1a90abc405b5c057a7c1ea4d44e3", "score": "0.5156235", "text": "def __init__(self):\n\n self.heap = [None]", "title": "" }, { "docid": "aaa25b3cce97e9e5c4efbcbb41d57af4", "score": "0.5148934", "text": "def malloc_postprocess(self, **kwargs):\n pass", "title": "" }, { "docid": "7305f3fec1a96751a7efd64ee6c90d85", "score": "0.5147175", "text": "def AllocateElements(self, *args):\n return _itkVariableLengthVectorPython.itkVariableLengthVectorUS_AllocateElements(self, *args)", "title": "" }, { "docid": "c04779128e9b75a00e1e9ad6d62a443d", "score": "0.51441514", "text": "def _report_tag_allocations(self):\n with FecTimer(\"Tag allocator report\", TimerWork.REPORT) as timer:\n if timer.skip_if_cfg_false(\n \"Reports\", \"write_tag_allocation_reports\"):\n return\n tag_allocator_report()", "title": "" }, { "docid": "b68a703a0780dc3350334d9eb57d3fbd", "score": "0.51430017", "text": "def _reserve_ref(self):\n # See r_ref_reserve in Python-3.4/Python/marshal.c\n idx = len(self.refs)\n self.refs.append(None)\n return idx", "title": "" }, { "docid": "5844751ac97d9475c3a4ecfe1bb839f4", "score": "0.51415527", "text": "def __resize(self):\n\n self.__n_buckets *= 2\n\n new_buckets = [None] * self.__n_buckets\n old_elements = iter(self)\n\n self.__buckets = new_buckets\n self.__size = 0\n\n for element in old_elements:\n self.add(element)", "title": "" }, { "docid": "b4ff2000982630a2c09c1f3502b361a3", "score": "0.5140009", "text": "def __init__(self):\n self.heap_array = [None]", "title": "" }, { "docid": "7405627fe2a899f40c0a637307554031", "score": "0.5138195", "text": "def AnnotateAllocates():\n\n def _post_transform(allocate):\n return tvm.tir.Allocate(\n buffer_var=allocate.buffer_var,\n dtype=allocate.dtype,\n extents=allocate.extents,\n condition=allocate.condition,\n body=allocate.body,\n annotations={DISABLE_LOWER_BUILTIN: True},\n )\n\n def _ftransform(f, mod, ctx):\n return f.with_body(\n tvm.tir.stmt_functor.ir_transform(f.body, None, _post_transform, [\"tir.Allocate\"])\n )\n\n return tvm.tir.transform.prim_func_pass(\n _ftransform, opt_level=0, name=\"tir.contrib.ethos-u.annotate_allocates\"\n )", "title": "" }, { "docid": "c4dc16355bf1d1d5979933580d33822c", "score": "0.51289517", "text": "def _malloc(self, sim_size):\n raise NotImplementedError(f\"{self._malloc.__func__.__name__} not implemented for {self.__class__.__name__}\")", "title": "" }, { "docid": "96ba3768855852bd7993761d3fd6abf1", "score": "0.5126594", "text": "def _execute_global_allocate(self, extra_allocations):\n with FecTimer(\"Global allocate\", TimerWork.OTHER):\n self._data_writer.set_routing_infos(\n global_allocate(extra_allocations))", "title": "" }, { "docid": "c9df29a6be58b246954913de8a2b2744", "score": "0.51259434", "text": "def _add_gc_elements( self ):\n\n def calculate_fibre_vector( ):\n \"\"\" Calculate the fibre vector \"\"\"\n\n ids = ele.node_ids\n p1 = sum( self.mesh.nodes_map[ _ ] for _ in ids[ 0:4 ] ) / 4\n p2 = sum( self.mesh.nodes_map[ _ ] for _ in ids[ 4: ] ) / 4\n\n return p1 - p2\n\n # index order is thickness, circumference and then slice\n #\n indices = ((0, 0, 0), (1, 0, 0), (1, 0, 1), (0, 0, 1),\n (0, 1, 0), (1, 1, 0), (1, 1, 1), (0, 1, 1))\n\n for thickness_idx in range( self.mesh_cfg.num_pts_through_wall - 1 ):\n for circumf_idx in range( self.mesh_cfg.num_pts_on_semi_circumference - 1 ):\n for slice_idx in range( self.mesh_cfg.num_slices - 1 ):\n nid_indices = [ (ii[ 0 ] + thickness_idx,\n ii[ 1 ] + circumf_idx,\n ii[ 2 ] + slice_idx) for ii in indices ]\n\n nids = [ self.mesh.nodes_grid[ t, c, s ].id for t, c, s in nid_indices ]\n\n ele = elem.Hex8Element( nids )\n self.mesh.add_element( ele )\n\n ele.fibre_vector = calculate_fibre_vector( )\n\n return", "title": "" }, { "docid": "3c7a1ee9d5f32828662ee190fa2f8526", "score": "0.51132226", "text": "def smart_alloc(self, outputname):\n if not self._igraph:\n raise MiniCInternalError(\"hum, the interference graph seems to be empty\")\n # Temporary -> Operand (register or offset) dictionary,\n # specifying where a given Temporary should be allocated:\n alloc_dict = {}\n # TODO (lab5): color the graph and get back the coloring (see\n # Libgraphes.py). Then, construct a dictionary Temporary ->\n # Register or Offset. Our version is less than 15 lines\n # including debug log. Be careful, the temporary names in the\n # graph are now strings, and you need to enter Temporary\n # objects in the dictionary. You can get all temporaries in\n # self._f._pool._all_temps, and get the corresponding vertex name\n # using str(temp) to access the associated color.\n # TODO (lab5) : do not forget to update the stacksize at the end!\n self._f._pool.set_temp_allocation(alloc_dict)", "title": "" }, { "docid": "c53c738a517edd8d18418bdaf3896726", "score": "0.511159", "text": "def _heapify(self, element_id):\r\n l = len(self.heap)\r\n if l == 1:\r\n return\r\n while 2 * element_id < l:\r\n el_id = 2 * element_id\r\n if 2 * element_id + 1 < l and self.compar(self.heap[element_id * 2 + 1], self.heap[int(element_id * 2)]):\r\n el_id += 1\r\n if self.compar(self.heap[element_id], self.heap[el_id]):\r\n return\r\n self.heap[element_id], self.heap[el_id] = self.heap[el_id], self.heap[element_id]\r\n element_id = el_id\r\n # print(\"heap loop\", el_id)\r", "title": "" }, { "docid": "6864f809d3658a3f0739c4f483f9e6e3", "score": "0.5096137", "text": "def reload_allocations(self):\n pass", "title": "" }, { "docid": "499479bbef1ca0408d7e6acd28739702", "score": "0.5084545", "text": "def __init__(self):\n super(_PendingMergeTaskHeap, self).__init__()\n self._heap = []\n self._task_identifiers = set()", "title": "" }, { "docid": "63cca181fb8d92d0fca2d5a857b3e3f4", "score": "0.5083093", "text": "def allocation(self) -> float:\n raise NotImplementedError", "title": "" }, { "docid": "6adc9e107c151e2d62ecad436b9a2bcb", "score": "0.50820196", "text": "def _leak(self):\n for idx in range(1, LeakyStack.DEFAULT_CAPACITY):\n self._data[idx-1] = self._data[idx]", "title": "" }, { "docid": "f8f2e1bf9dcbced572bac4d1e70e2a5b", "score": "0.50799644", "text": "def alloc_node():\n ret = ti.atomic_add(node_table_len[None], 1)\n assert ret < T_MAX_NODES\n\n node_mass[ret] = 0\n node_centroid_pos[ret] = particle_pos[0] * 0\n\n # indicate the 4 children to be LEAF as well\n node_particle_id[ret] = LEAF\n for which in ti.grouped(ti.ndrange(*([2] * DIM))):\n node_children[ret, which] = LEAF\n return ret", "title": "" }, { "docid": "c0df02cba6194c2ea2d43eaedc4ecbe7", "score": "0.50659156", "text": "def __compact__(self):\t\n\t\tself.memorymap = []\n\t\tfor proc in self.procs:\n\t\t\tself.__proc_in_mmap__(proc)\n\t\tself.__complete_mmap__()", "title": "" }, { "docid": "4f92d991bbd598887d94183c8d7f4e06", "score": "0.50524306", "text": "def __init__(self):\n self.stack = []\n self.maxheap = []\n self.obselete = set()\n self.idx = 0", "title": "" }, { "docid": "5bdd8997fe6e71f8dac7a086d7d22088", "score": "0.50502765", "text": "def init_heap(self):\n\n segs = self.r2api.segments\n\n start = self.heap_start\n size = self.heap_size\n avail = False\n while not avail:\n avail = True\n for seg in segs:\n if (seg[\"addr\"] < start < seg[\"addr\"]+seg[\"size\"] or \n start < seg[\"addr\"] < start+size):\n\n start = start+size\n avail = False\n break\n\n self.heap_start = start\n\n self.r2api.add_segment(\n \"heap\",\n self.heap_size,\n \"-rw-\",\n self.heap_start\n )\n self.heap_init = True", "title": "" }, { "docid": "6b44a7178abd320853d0fada8ff630d1", "score": "0.50488096", "text": "def __init__(self):\n self.heap_left = []\n self.heap_right = []", "title": "" }, { "docid": "1beb73fda579eca99fb207e926ce98d3", "score": "0.50485796", "text": "def __init__(self):\n self.__heap = []\n dict.__init__(self)", "title": "" }, { "docid": "00c78ca15386b11f9227fe45ae876a54", "score": "0.5046812", "text": "def _heapify(self) -> None:\n for i in reversed(range(self.size // 2)):\n self._heapify_down(i)", "title": "" }, { "docid": "d64a9516a3f74efcaf4ed6fc5ffd5181", "score": "0.5043949", "text": "def allocate(self, quantity):\r\n if self.num_allocated is None:\r\n self.num_allocated = 0\r\n self.num_allocated += quantity\r\n self.save()", "title": "" }, { "docid": "fc62eceb65d259f4b7cd57f6a9062c53", "score": "0.5036986", "text": "def __init__(self):\n ARBITRARY_VALUE = 0\n self.heap = [ARBITRARY_VALUE]", "title": "" } ]
4dd26934f63cb73e0462327ec2c1167f
SetMaskImage(itkConnectedComponentImageFilterIUC3ISS3 self, itkImageUC3 _arg)
[ { "docid": "1a128a54e47001e0f8c40bd4b686b2b3", "score": "0.90249586", "text": "def SetMaskImage(self, _arg: 'itkImageUC3') -> \"void\":\n return _itkConnectedComponentImageFilterPython.itkConnectedComponentImageFilterIUC3ISS3_SetMaskImage(self, _arg)", "title": "" } ]
[ { "docid": "b21a7e333812da8cfb63704eebc66c7e", "score": "0.90715206", "text": "def SetMaskImage(self, _arg: 'itkImageUC3') -> \"void\":\n return _itkConnectedComponentImageFilterPython.itkConnectedComponentImageFilterIUC3IUL3_SetMaskImage(self, _arg)", "title": "" }, { "docid": "d2cbec8fd33c315dd115f2a3d39ccce2", "score": "0.90588164", "text": "def SetMaskImage(self, _arg: 'itkImageUC3') -> \"void\":\n return _itkConnectedComponentImageFilterPython.itkConnectedComponentImageFilterIUC3IUC3_SetMaskImage(self, _arg)", "title": "" }, { "docid": "6f80a60c47e9640b42061e6e7dbb036d", "score": "0.90191185", "text": "def SetMaskImage(self, _arg: 'itkImageUC3') -> \"void\":\n return _itkConnectedComponentImageFilterPython.itkConnectedComponentImageFilterIUC3IUS3_SetMaskImage(self, _arg)", "title": "" }, { "docid": "d761f6f049c4532eb91c3689f388df96", "score": "0.8793911", "text": "def SetMaskImage(self, _arg: 'itkImageUS3') -> \"void\":\n return _itkConnectedComponentImageFilterPython.itkConnectedComponentImageFilterIUS3IUC3_SetMaskImage(self, _arg)", "title": "" }, { "docid": "c004caa54ed931d2fc43eacfe7541452", "score": "0.8626497", "text": "def SetMaskImage(self, _arg: 'itkImageSS3') -> \"void\":\n return _itkConnectedComponentImageFilterPython.itkConnectedComponentImageFilterISS3IUC3_SetMaskImage(self, _arg)", "title": "" }, { "docid": "d4a91d7084a7c9c92f765fa0afca38dd", "score": "0.857761", "text": "def SetMaskImage(self, _arg: 'itkImageUS3') -> \"void\":\n return _itkConnectedComponentImageFilterPython.itkConnectedComponentImageFilterIUS3IUL3_SetMaskImage(self, _arg)", "title": "" }, { "docid": "8fe7e95b6d12663a6c92de378f996959", "score": "0.8510915", "text": "def SetMaskImage(self, _arg: 'itkImageUS3') -> \"void\":\n return _itkConnectedComponentImageFilterPython.itkConnectedComponentImageFilterIUS3ISS3_SetMaskImage(self, _arg)", "title": "" }, { "docid": "daa2c2ea062116f5aa71387719a6efda", "score": "0.8498292", "text": "def SetMaskImage(self, _arg: 'itkImageUS3') -> \"void\":\n return _itkConnectedComponentImageFilterPython.itkConnectedComponentImageFilterIUS3IUS3_SetMaskImage(self, _arg)", "title": "" }, { "docid": "4afaf62fa95e652a54d5be7462327d6a", "score": "0.844418", "text": "def SetMaskImage(self, _arg: 'itkImageSS3') -> \"void\":\n return _itkConnectedComponentImageFilterPython.itkConnectedComponentImageFilterISS3IUL3_SetMaskImage(self, _arg)", "title": "" }, { "docid": "150cb6a658d13acb91c19dc85da2f640", "score": "0.8320189", "text": "def SetMaskImage(self, _arg: 'itkImageCVF23') -> \"void\":\n return _itkConnectedComponentImageFilterPython.itkConnectedComponentImageFilterICVF23IUC3_SetMaskImage(self, _arg)", "title": "" }, { "docid": "d7f0dd384eae4d0c027b47026fc41ead", "score": "0.8312239", "text": "def SetMaskImage(self, _arg: 'itkImageSS3') -> \"void\":\n return _itkConnectedComponentImageFilterPython.itkConnectedComponentImageFilterISS3IUS3_SetMaskImage(self, _arg)", "title": "" }, { "docid": "72a43c10a97db5c006536c9a4e5608ff", "score": "0.8305528", "text": "def SetMaskImage(self, _arg: 'itkImageCVF33') -> \"void\":\n return _itkConnectedComponentImageFilterPython.itkConnectedComponentImageFilterICVF33IUC3_SetMaskImage(self, _arg)", "title": "" }, { "docid": "b55cc75a4200d00980ce5c0edc3389ee", "score": "0.8190668", "text": "def SetMaskImage(self, _arg: 'itkImageSS3') -> \"void\":\n return _itkConnectedComponentImageFilterPython.itkConnectedComponentImageFilterISS3ISS3_SetMaskImage(self, _arg)", "title": "" }, { "docid": "da1446cfcc398bf3d325c8808615e177", "score": "0.8184506", "text": "def SetMaskImage(self, _arg: 'itkImageVF23') -> \"void\":\n return _itkConnectedComponentImageFilterPython.itkConnectedComponentImageFilterIVF23IUC3_SetMaskImage(self, _arg)", "title": "" }, { "docid": "f5a3c4956e5f65d7517fd99ce90aa8e0", "score": "0.8156622", "text": "def SetMaskImage(self, _arg: 'itkImageCVF43') -> \"void\":\n return _itkConnectedComponentImageFilterPython.itkConnectedComponentImageFilterICVF43IUC3_SetMaskImage(self, _arg)", "title": "" }, { "docid": "a8b50deb600052f22118dfe79ab468c1", "score": "0.81481415", "text": "def SetMaskImage(self, _arg: 'itkImageVF33') -> \"void\":\n return _itkConnectedComponentImageFilterPython.itkConnectedComponentImageFilterIVF33IUC3_SetMaskImage(self, _arg)", "title": "" }, { "docid": "9d8f6ca92c0e1c09c39da72cdbfd74e6", "score": "0.80398643", "text": "def SetMaskImage(self, _arg: 'itkImageCVF33') -> \"void\":\n return _itkConnectedComponentImageFilterPython.itkConnectedComponentImageFilterICVF33IUS3_SetMaskImage(self, _arg)", "title": "" }, { "docid": "cbb27df36281a4b18ed2d9e66de20ec6", "score": "0.8001437", "text": "def SetMaskImage(self, _arg: 'itkImageVF43') -> \"void\":\n return _itkConnectedComponentImageFilterPython.itkConnectedComponentImageFilterIVF43IUC3_SetMaskImage(self, _arg)", "title": "" }, { "docid": "9ef0f9a19c75373d865453b3751818ed", "score": "0.80008996", "text": "def SetMaskImage(self, _arg: 'itkImageCVF23') -> \"void\":\n return _itkConnectedComponentImageFilterPython.itkConnectedComponentImageFilterICVF23IUS3_SetMaskImage(self, _arg)", "title": "" }, { "docid": "0d354db4c601cd9152c9c741e2b9030a", "score": "0.7849059", "text": "def SetMaskImage(self, _arg: 'itkImageVF23') -> \"void\":\n return _itkConnectedComponentImageFilterPython.itkConnectedComponentImageFilterIVF23IUS3_SetMaskImage(self, _arg)", "title": "" }, { "docid": "b15aa806bcade7d72a2a949d7a469818", "score": "0.78463507", "text": "def SetMaskImage(self, _arg: 'itkImageCVF33') -> \"void\":\n return _itkConnectedComponentImageFilterPython.itkConnectedComponentImageFilterICVF33ISS3_SetMaskImage(self, _arg)", "title": "" }, { "docid": "e658c436b0a59c9d682115cfe5db106e", "score": "0.78221524", "text": "def SetMaskImage(self, _arg: 'itkImageVF33') -> \"void\":\n return _itkConnectedComponentImageFilterPython.itkConnectedComponentImageFilterIVF33IUS3_SetMaskImage(self, _arg)", "title": "" }, { "docid": "c5eec94c0174d18f4a501aa8249d8b24", "score": "0.7814588", "text": "def SetMaskImage(self, _arg: 'itkImageCVF43') -> \"void\":\n return _itkConnectedComponentImageFilterPython.itkConnectedComponentImageFilterICVF43IUS3_SetMaskImage(self, _arg)", "title": "" }, { "docid": "e45ce1824ce81d00002d6355331b6bc5", "score": "0.7779256", "text": "def SetMaskImage(self, _arg: 'itkImageCVF23') -> \"void\":\n return _itkConnectedComponentImageFilterPython.itkConnectedComponentImageFilterICVF23ISS3_SetMaskImage(self, _arg)", "title": "" }, { "docid": "183c1a4d09caf3d0b62d263315b8ab16", "score": "0.7700398", "text": "def SetMaskImage(self, _arg: 'itkImageVF33') -> \"void\":\n return _itkConnectedComponentImageFilterPython.itkConnectedComponentImageFilterIVF33ISS3_SetMaskImage(self, _arg)", "title": "" }, { "docid": "081ca8e2260c443f843a9ff31a4d3176", "score": "0.7648435", "text": "def SetMaskImage(self, _arg: 'itkImageVF23') -> \"void\":\n return _itkConnectedComponentImageFilterPython.itkConnectedComponentImageFilterIVF23ISS3_SetMaskImage(self, _arg)", "title": "" }, { "docid": "672fa3c40908d93edc4be6e1185438d4", "score": "0.764822", "text": "def SetMaskImage(self, _arg: 'itkImageVF43') -> \"void\":\n return _itkConnectedComponentImageFilterPython.itkConnectedComponentImageFilterIVF43IUS3_SetMaskImage(self, _arg)", "title": "" }, { "docid": "070320a753ef43b3e5b973b34c5a9063", "score": "0.75942177", "text": "def SetMaskImage(self, _arg: 'itkImageCVF43') -> \"void\":\n return _itkConnectedComponentImageFilterPython.itkConnectedComponentImageFilterICVF43ISS3_SetMaskImage(self, _arg)", "title": "" }, { "docid": "c79d89856c898a00d1523be0c5897c0c", "score": "0.75397336", "text": "def SetMaskImage(self, _arg: 'itkImageUC2') -> \"void\":\n return _itkConnectedComponentImageFilterPython.itkConnectedComponentImageFilterIUC2IUL2_SetMaskImage(self, _arg)", "title": "" }, { "docid": "5d149157d595b913cbedc94d64bbefc4", "score": "0.748275", "text": "def SetMaskImage(self, _arg: 'itkImageVF43') -> \"void\":\n return _itkConnectedComponentImageFilterPython.itkConnectedComponentImageFilterIVF43ISS3_SetMaskImage(self, _arg)", "title": "" }, { "docid": "bdae5ffd049efc9ef4b6231d0292116d", "score": "0.7429115", "text": "def SetMaskImage(self, _arg: 'itkImageUC2') -> \"void\":\n return _itkConnectedComponentImageFilterPython.itkConnectedComponentImageFilterIUC2IUC2_SetMaskImage(self, _arg)", "title": "" }, { "docid": "6f5052ba62b3b2f6e94b785dc94e21a8", "score": "0.7308315", "text": "def SetMaskImage(self, _arg: 'itkImageUC2') -> \"void\":\n return _itkConnectedComponentImageFilterPython.itkConnectedComponentImageFilterIUC2IUS2_SetMaskImage(self, _arg)", "title": "" }, { "docid": "21726f1e82a46e611d73b45d658dc35b", "score": "0.72841233", "text": "def SetMaskImage(self, _arg: 'itkImageUS2') -> \"void\":\n return _itkConnectedComponentImageFilterPython.itkConnectedComponentImageFilterIUS2IUC2_SetMaskImage(self, _arg)", "title": "" }, { "docid": "d50b0c4dbc74274733210f05b7bcd6eb", "score": "0.72447497", "text": "def SetMaskImage(self, _arg: 'itkImageUC2') -> \"void\":\n return _itkConnectedComponentImageFilterPython.itkConnectedComponentImageFilterIUC2ISS2_SetMaskImage(self, _arg)", "title": "" }, { "docid": "0c3c0d551e78d19f15baf6ec565c4616", "score": "0.718823", "text": "def itkUnsharpMaskLevelSetImageFilterIF3IF3_cast(*args):\n return _itkUnsharpMaskLevelSetImageFilterPython.itkUnsharpMaskLevelSetImageFilterIF3IF3_cast(*args)", "title": "" }, { "docid": "1bf267192bb973a94d88821cdb7fc7bb", "score": "0.7117184", "text": "def SetMaskImage(self, _arg: 'itkImageCVF42') -> \"void\":\n return _itkConnectedComponentImageFilterPython.itkConnectedComponentImageFilterICVF42IUC2_SetMaskImage(self, _arg)", "title": "" }, { "docid": "996460bacba75529032d2c818ee54b08", "score": "0.707136", "text": "def SetMaskImage(self, _arg: 'itkImageCVF32') -> \"void\":\n return _itkConnectedComponentImageFilterPython.itkConnectedComponentImageFilterICVF32IUC2_SetMaskImage(self, _arg)", "title": "" }, { "docid": "22df6e760985553751685f57b99c1d24", "score": "0.7061713", "text": "def GetMaskImage(self) -> \"itkImageUC3 const *\":\n return _itkConnectedComponentImageFilterPython.itkConnectedComponentImageFilterIUC3IUL3_GetMaskImage(self)", "title": "" }, { "docid": "4ce153ccade439c236d3c87be18955c3", "score": "0.70440793", "text": "def GetMaskImage(self) -> \"itkImageUC3 const *\":\n return _itkConnectedComponentImageFilterPython.itkConnectedComponentImageFilterIUC3IUS3_GetMaskImage(self)", "title": "" }, { "docid": "266f47edf2e1a6b9860f046f7cf1e7c4", "score": "0.7040046", "text": "def SetMaskImage(self, _arg: 'itkImageUS2') -> \"void\":\n return _itkConnectedComponentImageFilterPython.itkConnectedComponentImageFilterIUS2IUL2_SetMaskImage(self, _arg)", "title": "" }, { "docid": "a17726423e06ecf6f3c363da18cc3aab", "score": "0.70392", "text": "def GetMaskImage(self) -> \"itkImageUC3 const *\":\n return _itkConnectedComponentImageFilterPython.itkConnectedComponentImageFilterIUC3ISS3_GetMaskImage(self)", "title": "" }, { "docid": "4521a297e938f936b366eee5be20fdc9", "score": "0.70327723", "text": "def GetMaskImage(self) -> \"itkImageUC3 const *\":\n return _itkConnectedComponentImageFilterPython.itkConnectedComponentImageFilterIUC3IUC3_GetMaskImage(self)", "title": "" }, { "docid": "9227625489596b9d528aa5c1495acf0e", "score": "0.7028726", "text": "def SetMaskImage(self, _arg: 'itkImageVF42') -> \"void\":\n return _itkConnectedComponentImageFilterPython.itkConnectedComponentImageFilterIVF42IUC2_SetMaskImage(self, _arg)", "title": "" }, { "docid": "efa360e243e167b307d9fffa91a8cf85", "score": "0.7021636", "text": "def SetMaskImage(self, _arg: 'itkImageCVF22') -> \"void\":\n return _itkConnectedComponentImageFilterPython.itkConnectedComponentImageFilterICVF22IUC2_SetMaskImage(self, _arg)", "title": "" }, { "docid": "cd21aec8fce86e715bc8f39158c3123e", "score": "0.7019389", "text": "def SetMaskImage(self, _arg: 'itkImageVF32') -> \"void\":\n return _itkConnectedComponentImageFilterPython.itkConnectedComponentImageFilterIVF32IUC2_SetMaskImage(self, _arg)", "title": "" }, { "docid": "2ed43f70ff5b62770028f6673315eb9f", "score": "0.6960129", "text": "def SetMaskImage(self, _arg: 'itkImageVF22') -> \"void\":\n return _itkConnectedComponentImageFilterPython.itkConnectedComponentImageFilterIVF22IUC2_SetMaskImage(self, _arg)", "title": "" }, { "docid": "a9196156e625e6f452b90df7e49b4c16", "score": "0.69342864", "text": "def GetMaskImage(self) -> \"itkImageUS3 const *\":\n return _itkConnectedComponentImageFilterPython.itkConnectedComponentImageFilterIUS3IUC3_GetMaskImage(self)", "title": "" }, { "docid": "c3ccfdd39a27b91691e844016e6192b0", "score": "0.6927482", "text": "def itkUnsharpMaskLevelSetImageFilterID3ID3_cast(*args):\n return _itkUnsharpMaskLevelSetImageFilterPython.itkUnsharpMaskLevelSetImageFilterID3ID3_cast(*args)", "title": "" }, { "docid": "9d537a0c4271273111e3a6542e3ce158", "score": "0.68330944", "text": "def SetMaskImage(self, _arg: 'itkImageUS2') -> \"void\":\n return _itkConnectedComponentImageFilterPython.itkConnectedComponentImageFilterIUS2IUS2_SetMaskImage(self, _arg)", "title": "" }, { "docid": "102d728ba851ef9c010edbe920a3c723", "score": "0.682514", "text": "def SetInput(self, *args):\n return _itkImageToImageFilterAPython.itkImageToImageFilterIUC3IUC3_SetInput(self, *args)", "title": "" }, { "docid": "bf9c83eda6b2a3388fd9f6996d0020f3", "score": "0.6798655", "text": "def SetMaskImage(self, _arg: 'itkImageCVF42') -> \"void\":\n return _itkConnectedComponentImageFilterPython.itkConnectedComponentImageFilterICVF42IUS2_SetMaskImage(self, _arg)", "title": "" }, { "docid": "2766c8b45322365478740b391a59b601", "score": "0.67941636", "text": "def SetValidInput(self, _arg: 'itkImageUC3') -> \"void\":\n return _itkComparisonImageFilterPython.itkComparisonImageFilterIUC3IUC3_SetValidInput(self, _arg)", "title": "" }, { "docid": "4487c6816c1bfee70c7d7605010c4bc4", "score": "0.6775806", "text": "def SetMaskImage(self, _arg: 'itkImageUS2') -> \"void\":\n return _itkConnectedComponentImageFilterPython.itkConnectedComponentImageFilterIUS2ISS2_SetMaskImage(self, _arg)", "title": "" }, { "docid": "766cb3bb1f58021aeff71973a697a98f", "score": "0.6775328", "text": "def SetMaskImage(self, _arg: 'itkImageCVF32') -> \"void\":\n return _itkConnectedComponentImageFilterPython.itkConnectedComponentImageFilterICVF32IUS2_SetMaskImage(self, _arg)", "title": "" }, { "docid": "8c025a09e5ed9ae368261afd0d24d66d", "score": "0.6769543", "text": "def SetMaskImage(self, _arg: 'itkImageSS2') -> \"void\":\n return _itkConnectedComponentImageFilterPython.itkConnectedComponentImageFilterISS2IUC2_SetMaskImage(self, _arg)", "title": "" }, { "docid": "3005d5f142f30fdf9f671edc4fc5205c", "score": "0.6738593", "text": "def GetMaskImage(self) -> \"itkImageUS3 const *\":\n return _itkConnectedComponentImageFilterPython.itkConnectedComponentImageFilterIUS3IUL3_GetMaskImage(self)", "title": "" }, { "docid": "ce31f155c0bde2ad5ec746ad0cc47165", "score": "0.66993785", "text": "def SetInput(self, *args):\n return _itkImageToImageFilterAPython.itkImageToImageFilterIUC3IF3_SetInput(self, *args)", "title": "" }, { "docid": "04ecc2b3c32fa52ed7625168d543679c", "score": "0.66988564", "text": "def SetMaskImage(self, _arg: 'itkImageVF42') -> \"void\":\n return _itkConnectedComponentImageFilterPython.itkConnectedComponentImageFilterIVF42IUS2_SetMaskImage(self, _arg)", "title": "" }, { "docid": "4733b3d53e4e5ea32207989e7a6ab67c", "score": "0.66847986", "text": "def GetMaskImage(self) -> \"itkImageUS3 const *\":\n return _itkConnectedComponentImageFilterPython.itkConnectedComponentImageFilterIUS3ISS3_GetMaskImage(self)", "title": "" }, { "docid": "11eec0a04516132ca56564074a3aa7e1", "score": "0.6663306", "text": "def SetMaskImage(self, _arg: 'itkImageVF32') -> \"void\":\n return _itkConnectedComponentImageFilterPython.itkConnectedComponentImageFilterIVF32IUS2_SetMaskImage(self, _arg)", "title": "" }, { "docid": "5fbdf93bd45a2946c2d2e45f3176c3d1", "score": "0.66619605", "text": "def SetMaskImage(self, _arg: 'itkImageCVF22') -> \"void\":\n return _itkConnectedComponentImageFilterPython.itkConnectedComponentImageFilterICVF22IUS2_SetMaskImage(self, _arg)", "title": "" }, { "docid": "517004bb72b38cb60b7d5b7aec77bbd9", "score": "0.66526675", "text": "def SetBackgroundValue(self, _arg: 'unsigned char const') -> \"void\":\n return _itkConnectedComponentImageFilterPython.itkConnectedComponentImageFilterIUS3IUC3_SetBackgroundValue(self, _arg)", "title": "" }, { "docid": "ab7ea635965a6c2fad574794d271d607", "score": "0.663555", "text": "def SetMaskImage(self, _arg: 'itkImageSS2') -> \"void\":\n return _itkConnectedComponentImageFilterPython.itkConnectedComponentImageFilterISS2IUL2_SetMaskImage(self, _arg)", "title": "" }, { "docid": "f5c9539006202a836b85ec220bffb300", "score": "0.66335404", "text": "def SetInput(self, *args):\n return _itkImageToImageFilterAPython.itkImageToImageFilterIF3IUC3_SetInput(self, *args)", "title": "" }, { "docid": "ab05569580c409d58c65bbe02d04e74e", "score": "0.66259235", "text": "def GetMaskImage(self) -> \"itkImageUS3 const *\":\n return _itkConnectedComponentImageFilterPython.itkConnectedComponentImageFilterIUS3IUS3_GetMaskImage(self)", "title": "" }, { "docid": "d7fb4233ed0c23b6428fc3a8c1709fda", "score": "0.65917534", "text": "def SetBackgroundValue(self, _arg: 'unsigned char const') -> \"void\":\n return _itkConnectedComponentImageFilterPython.itkConnectedComponentImageFilterIUC3IUC3_SetBackgroundValue(self, _arg)", "title": "" }, { "docid": "3d179b84efbe79b46e822d48eb34f135", "score": "0.6587733", "text": "def SetBackgroundValue(self, _arg: 'unsigned char const') -> \"void\":\n return _itkConnectedComponentImageFilterPython.itkConnectedComponentImageFilterISS3IUC3_SetBackgroundValue(self, _arg)", "title": "" }, { "docid": "a4ed8f61c7324693368390f378af697b", "score": "0.6584812", "text": "def SetInput(self, *args):\n return _itkImageToImageFilterAPython.itkImageToImageFilterIUC3IUL3_SetInput(self, *args)", "title": "" }, { "docid": "150574b2358930f9ee0dadc4fffb0779", "score": "0.65834576", "text": "def SetInput(self, *args):\n return _itkImageToImageFilterAPython.itkImageToImageFilterIUC3IUS3_SetInput(self, *args)", "title": "" }, { "docid": "953a00087231eacd3fc44704013c0c4f", "score": "0.6581163", "text": "def SetInput(self, *args):\n return _itkImageToImageFilterAPython.itkImageToImageFilterIUL3IUC3_SetInput(self, *args)", "title": "" }, { "docid": "409289758d91d9ae38dd8fb93b253a58", "score": "0.6563092", "text": "def SetBackgroundValue(self, _arg: 'unsigned short const') -> \"void\":\n return _itkConnectedComponentImageFilterPython.itkConnectedComponentImageFilterIUC3IUS3_SetBackgroundValue(self, _arg)", "title": "" }, { "docid": "fe0c8f3fdac5c938ff99a17f7d2a2fe3", "score": "0.655834", "text": "def cast(*args):\n return _itkUnsharpMaskLevelSetImageFilterPython.itkUnsharpMaskLevelSetImageFilterIF3IF3_cast(*args)", "title": "" }, { "docid": "72541475e9c4ad650ac068abe4ef74e5", "score": "0.65582234", "text": "def SetMaskImage(self, _arg: 'itkImageCVF42') -> \"void\":\n return _itkConnectedComponentImageFilterPython.itkConnectedComponentImageFilterICVF42ISS2_SetMaskImage(self, _arg)", "title": "" }, { "docid": "d224be54946fbe872504a7ebae0cc079", "score": "0.65580666", "text": "def SetMaskImage(self, _arg: 'itkImageVF42') -> \"void\":\n return _itkConnectedComponentImageFilterPython.itkConnectedComponentImageFilterIVF42ISS2_SetMaskImage(self, _arg)", "title": "" }, { "docid": "2e15ba6a94ed9b2893e603b388d47d44", "score": "0.6557282", "text": "def GetMaskImage(self) -> \"itkImageSS3 const *\":\n return _itkConnectedComponentImageFilterPython.itkConnectedComponentImageFilterISS3IUC3_GetMaskImage(self)", "title": "" }, { "docid": "2bc4540454cf0e2724c4c182eda41920", "score": "0.65564764", "text": "def SetMaskImage(self, _arg: 'itkImageVF22') -> \"void\":\n return _itkConnectedComponentImageFilterPython.itkConnectedComponentImageFilterIVF22IUS2_SetMaskImage(self, _arg)", "title": "" }, { "docid": "0fa48f5858c11e6fe2fd55412bae9c73", "score": "0.6554673", "text": "def SetInput(self, *args):\n return _itkImageToImageFilterAPython.itkImageToImageFilterIUS3IUC3_SetInput(self, *args)", "title": "" }, { "docid": "723c8fa68f3e9d183ced2c96592fba3e", "score": "0.65470934", "text": "def SetFixedImageMask(self, *args) -> \"void\":\n return _itkMattesMutualInformationImageToImageMetricv4Python.itkMattesMutualInformationImageToImageMetricv4IF3IF3_Superclass_SetFixedImageMask(self, *args)", "title": "" }, { "docid": "bdf29b2950e1cfb85e822647174df381", "score": "0.65327", "text": "def SetBackgroundValue(self, _arg: 'unsigned long const') -> \"void\":\n return _itkConnectedComponentImageFilterPython.itkConnectedComponentImageFilterIUC3IUL3_SetBackgroundValue(self, _arg)", "title": "" }, { "docid": "69214db2bb309c2b79c31119f9992f5e", "score": "0.65259856", "text": "def SetReplaceValue(self, *args):\n return _itkIsolatedConnectedImageFilterPython.itkIsolatedConnectedImageFilterIUC3IUC3_SetReplaceValue(self, *args)", "title": "" }, { "docid": "e06c7f915ada0600202ab33b5f013d23", "score": "0.65125424", "text": "def SetMaskImage(self, _arg: 'itkImageCVF32') -> \"void\":\n return _itkConnectedComponentImageFilterPython.itkConnectedComponentImageFilterICVF32ISS2_SetMaskImage(self, _arg)", "title": "" }, { "docid": "f2da0c57e9425dd162380a73f7c2e1bf", "score": "0.6502371", "text": "def SetValidInput(self, _arg: 'itkImageUS3') -> \"void\":\n return _itkComparisonImageFilterPython.itkComparisonImageFilterIUS3IUS3_SetValidInput(self, _arg)", "title": "" }, { "docid": "a354ef9c076e2d8cbf777e887a81a02f", "score": "0.6485092", "text": "def SetInput(self, *args) -> \"void\":\n return _itkImageToMeshFilterPython.itkImageToMeshFilterIUC3MUC3_SetInput(self, *args)", "title": "" }, { "docid": "8cd4aabe1c064d00e7b30bb4776e16cc", "score": "0.6476859", "text": "def SetMaskImage(self, _arg: 'itkImageVF32') -> \"void\":\n return _itkConnectedComponentImageFilterPython.itkConnectedComponentImageFilterIVF32ISS2_SetMaskImage(self, _arg)", "title": "" }, { "docid": "9e696ac05a248551a7c4bb825975e8ea", "score": "0.64577395", "text": "def SetInput(self, *args):\n return _itkImageToImageFilterAPython.itkImageToImageFilterID3IUC3_SetInput(self, *args)", "title": "" }, { "docid": "e3e32d19ae16668fde2380d6df072a3d", "score": "0.64389354", "text": "def SetBackgroundValue(self, _arg: 'unsigned long const') -> \"void\":\n return _itkConnectedComponentImageFilterPython.itkConnectedComponentImageFilterIUS3IUL3_SetBackgroundValue(self, _arg)", "title": "" }, { "docid": "ec5905f3455264a34092bbe364303de9", "score": "0.64359486", "text": "def SetInput(self, *args):\n return _itkImageToImageFilterAPython.itkImageToImageFilterIUC3ID3_SetInput(self, *args)", "title": "" }, { "docid": "139cd92baf95412e89b4e79d43116a30", "score": "0.64329475", "text": "def SetBackgroundValue(self, _arg: 'short const') -> \"void\":\n return _itkConnectedComponentImageFilterPython.itkConnectedComponentImageFilterIUC3ISS3_SetBackgroundValue(self, _arg)", "title": "" }, { "docid": "d52b5566186d553ac7a6ed94bafbf1ca", "score": "0.638761", "text": "def SetBackgroundValue(self, _arg: 'unsigned long const') -> \"void\":\n return _itkConnectedComponentImageFilterPython.itkConnectedComponentImageFilterISS3IUL3_SetBackgroundValue(self, _arg)", "title": "" }, { "docid": "7711d0bf004030044d7000050c296383", "score": "0.6381677", "text": "def GetMaskImage(self) -> \"itkImageSS3 const *\":\n return _itkConnectedComponentImageFilterPython.itkConnectedComponentImageFilterISS3IUL3_GetMaskImage(self)", "title": "" }, { "docid": "860e670d41d503a8618da7bce1e7989f", "score": "0.63670135", "text": "def SetMaskImage(self, _arg: 'itkImageSS2') -> \"void\":\n return _itkConnectedComponentImageFilterPython.itkConnectedComponentImageFilterISS2IUS2_SetMaskImage(self, _arg)", "title": "" }, { "docid": "5db9491f167448fd1883dfe832d092ef", "score": "0.63650584", "text": "def SetInput(self, *args) -> \"void\":\n return _itkImageToMeshFilterPython.itkImageToMeshFilterIUC3PSUC3_SetInput(self, *args)", "title": "" }, { "docid": "bc1cc75781aa525743ff66d20ac2a676", "score": "0.63580173", "text": "def SetInput(self, *args) -> \"void\":\n return _itkImageToMeshFilterPython.itkImageToMeshFilterIUC3PSUS3_SetInput(self, *args)", "title": "" }, { "docid": "1d163c5e418efbb07d732e6b5229d651", "score": "0.6351435", "text": "def SetReplaceValue(self, _arg: 'unsigned char const') -> \"void\":\n return _itkIsolatedConnectedImageFilterPython.itkIsolatedConnectedImageFilterIUC3IUC3_SetReplaceValue(self, _arg)", "title": "" }, { "docid": "0083ce16b80feacc9192bab79e6d8e08", "score": "0.6348238", "text": "def SetMaskImage(self, _arg: 'itkImageCVF22') -> \"void\":\n return _itkConnectedComponentImageFilterPython.itkConnectedComponentImageFilterICVF22ISS2_SetMaskImage(self, _arg)", "title": "" }, { "docid": "07fbe059dee9632466c3c216d8db212b", "score": "0.63480175", "text": "def SetMaskImage(self, _arg: 'itkImageVF22') -> \"void\":\n return _itkConnectedComponentImageFilterPython.itkConnectedComponentImageFilterIVF22ISS2_SetMaskImage(self, _arg)", "title": "" }, { "docid": "63cb6006e8e8ab677ea6e048af259561", "score": "0.63180155", "text": "def GetMaskImage(self) -> \"itkImageCVF33 const *\":\n return _itkConnectedComponentImageFilterPython.itkConnectedComponentImageFilterICVF33IUC3_GetMaskImage(self)", "title": "" }, { "docid": "a798bf443a7100e1230be885051fcea5", "score": "0.6314706", "text": "def itkImageToImageFilterIUC3IUC3_cast(*args):\n return _itkImageToImageFilterAPython.itkImageToImageFilterIUC3IUC3_cast(*args)", "title": "" }, { "docid": "a65744b45139c109cd4d5880bf2c6708", "score": "0.631331", "text": "def SetInput(self, *args) -> \"void\":\n return _itkImageToMeshFilterPython.itkImageToMeshFilterIUS3MUC3_SetInput(self, *args)", "title": "" }, { "docid": "a9040aea23d60f0ca4f11986cd2ce9fb", "score": "0.6305468", "text": "def SetInput(self, *args) -> \"void\":\n return _itkImageToMeshFilterPython.itkImageToMeshFilterISS3MUC3_SetInput(self, *args)", "title": "" } ]
3dbbe677964b2d559e3a5f88e09f0174
The default IPv4 gateway for the user plane interface. This should match one of the interfaces configured on your Azure Stack Edge device.
[ { "docid": "24aeb0ea7fe7434db5953e53bc3c1f16", "score": "0.73977745", "text": "def user_plane_access_ipv4_gateway(self) -> pulumi.Output[Optional[str]]:\n return pulumi.get(self, \"user_plane_access_ipv4_gateway\")", "title": "" } ]
[ { "docid": "94daa44747249a6620cb9ce4735d3dc6", "score": "0.74860233", "text": "def user_plane_access_ipv4_gateway(self) -> Optional[pulumi.Input[str]]:\n return pulumi.get(self, \"user_plane_access_ipv4_gateway\")", "title": "" }, { "docid": "94daa44747249a6620cb9ce4735d3dc6", "score": "0.74860233", "text": "def user_plane_access_ipv4_gateway(self) -> Optional[pulumi.Input[str]]:\n return pulumi.get(self, \"user_plane_access_ipv4_gateway\")", "title": "" }, { "docid": "6fb743742c66e913c8f28e279ee75cac", "score": "0.7049711", "text": "def gateway(self):\n return ip_address(self.ip_subnet.split('/')[0])", "title": "" }, { "docid": "21c3c79f503d366d4e821388ce29012d", "score": "0.6980946", "text": "def ip_address(self) -> IPv4Address:\n return self.sys_docker.network.gateway", "title": "" }, { "docid": "e7f4d0dd026b99d09ac0051cff313467", "score": "0.6826322", "text": "def ipGateway(self):\n return self._ipGateway.get_waarde()", "title": "" }, { "docid": "745e028bc08dd51ad30dce457c180f7c", "score": "0.6724415", "text": "def get_gateway_ip(timeout=10):\n\n return get_default_route(timeout)[0]", "title": "" }, { "docid": "fc696634ccc7b4cd800122969cc466e8", "score": "0.6697949", "text": "def gateway_ip_address(self) -> Optional[pulumi.Input[str]]:\n return pulumi.get(self, \"gateway_ip_address\")", "title": "" }, { "docid": "b89898544e4e3ab52011a01b5596d9ac", "score": "0.6502344", "text": "def gateway(self) -> Optional[str]:\n return pulumi.get(self, \"gateway\")", "title": "" }, { "docid": "5eb372d8d12403462e5e959ec0227bcd", "score": "0.64512074", "text": "def find_gateway(interface):\n\n address_family = interface.version\n output = subprocess.getoutput(\"ip -{} route\".format(address_family))\n\n pattern = re.compile(\"default\\s+via\\s+(\\S+)\\s+\")\n match = re.search(pattern, output)\n\n if match:\n gateway_ip = match.group(1)\n reverse_route_output = subprocess.getoutput(\"ip route get {}\"\n .format(gateway_ip))\n pattern = re.compile(\"{}.+src\\s+{}\".format(gateway_ip, interface.ip))\n if not re.search(pattern, reverse_route_output):\n logging.warning(\"Default route doesn't match iterface specified: {}\"\n .format(reverse_route_output))\n return None\n else:\n return gateway_ip\n else:\n logging.warning(\"Can't find gateway address on system\")\n return None", "title": "" }, { "docid": "341cf5e237ddf3cc14474f607aa54774", "score": "0.64302486", "text": "def gateway_address(self) -> str:\n return pulumi.get(self, \"gateway_address\")", "title": "" }, { "docid": "16acd6025a266b5e97aa045e85518f70", "score": "0.63469696", "text": "def gateway(self) -> Optional[pulumi.Input[str]]:\n return pulumi.get(self, \"gateway\")", "title": "" }, { "docid": "1cb432839a21ba6452e4f1060b609af1", "score": "0.63409764", "text": "def inband_ip_gateway(self):\n return self._inband_ip_gateway", "title": "" }, { "docid": "d94ce60def5c3a69efe2401c61e83b02", "score": "0.6292988", "text": "def gateway_endpoint(self) -> str:\n return pulumi.get(self, \"gateway_endpoint\")", "title": "" }, { "docid": "4a22490c202fdf8fef4f30cb4502819e", "score": "0.62562835", "text": "def default_route(node):\n#TODO: check node is a server\n if not node.is_server:\n LOG.debug(\"Only return default route for servers, %s is a %s\" % (node, node.device_type))\n return\n\n for link in node.network.links(node):\n if link.remote_host.is_router:\n return link.remote_ip", "title": "" }, { "docid": "4825606ab69855f3ad745d172ab52691", "score": "0.61729497", "text": "def local_gateway(self):\n return self.vpn.get(str(LogField.SECURITYGATEWAY))", "title": "" }, { "docid": "3a17156ee575bc122fc50d9f4e910d08", "score": "0.61046666", "text": "def gateway_device(self):\n return self._gateway_device", "title": "" }, { "docid": "a85ab3ece0a935804ac633627258e93e", "score": "0.60973006", "text": "def getIpmiGatewayIP(self):\n logging.debugv(\"config.py->getIpmiGatewayIP(self)\", [])\n try:\n ipmiGwIp = self.ipmi['gwip']\n return ipmiGwIp\n except KeyError:\n self.ipmi['gwip'] = \"\"\n self.ipmi.write()\n return self.ipmi['gwip']", "title": "" }, { "docid": "9c1f56b4f23ab00ff37755923298f7fe", "score": "0.609138", "text": "def gateway_default_site(self) -> Optional[pulumi.Input['SubResourceArgs']]:\n return pulumi.get(self, \"gateway_default_site\")", "title": "" }, { "docid": "fe3e632a1dbaeec2558f9ea7dd9172a5", "score": "0.6062092", "text": "def gateway(self) -> pulumi.Output[str]:\n return pulumi.get(self, \"gateway\")", "title": "" }, { "docid": "831cc7aa966c397a5017cd2449d789b7", "score": "0.6050875", "text": "def get_default_network_id(self):\n vm_port = self._get_port_from_host_iface(self.link_iface)\n\n return vm_port['network_id']", "title": "" }, { "docid": "7d32a9264f1318498e4e430eeb620aa3", "score": "0.60454845", "text": "def _find_ip4_addresses():\n global _ip4_addresses\n proto = socket.AF_INET\n if _ip4_addresses is None:\n _ip4_addresses = []\n\n #\n # Determine the interface for the default gateway\n # (if any) and, later, prioritise the INET address on\n # that interface.\n #\n default_gateway = netifaces.gateways()['default']\n if proto in default_gateway:\n _, default_gateway_interface = default_gateway[proto]\n else:\n default_gateway_interface = None\n\n for interface in netifaces.interfaces():\n for info in netifaces.ifaddresses(interface).get(netifaces.AF_INET, []):\n if info['addr']:\n if interface == default_gateway_interface:\n _ip4_addresses.insert(0, info['addr'])\n else:\n _ip4_addresses.append(info['addr'])\n\n return _ip4_addresses", "title": "" }, { "docid": "20a42ef8ae6d1c25ca2d9b737f751068", "score": "0.6036709", "text": "def gateway(self):\n return self._gateway", "title": "" }, { "docid": "78529268e7043164cbbb2b9d9f072a73", "score": "0.60029256", "text": "def set_gateway(self, gateway):\n for l3config in self._tree.iterfind(\"ipv4Configuration\"):\n l3config.attrib['addressConfiguration'] = \"Fixed\"\n\n for gateway_obj in l3config.iterfind(\"DefaultGateway\"):\n self._set_address(gateway_obj, gateway)", "title": "" }, { "docid": "7c2d2ca72bde8ed0a9a3805d7512c135", "score": "0.59850794", "text": "def __get_gateway__(self):\n\n try:\n urllib2.urlopen(\"http://\" + self.DEFAULT_MODEM_IP + \"/RouterStatus.html\", timeout=1)\n return self.DEFAULT_MODEM_IP\n except urllib2.URLError:\n with open(\"/proc/net/route\") as fh:\n for line in fh:\n fields = line.strip().split()\n if fields[1] != '00000000' or not int(fields[3], 16) & 2:\n continue\n\n return socket.inet_ntoa(struct.pack(\"<L\", int(fields[2], 16)))", "title": "" }, { "docid": "08fb89a8cbb7e8dac686787c8aa913ce", "score": "0.598173", "text": "def findGateway(self):\n __nme__ = \"findGateway\"\n info = self.getCmdPipe('netstat -rn | grep \"^0.0.0.0\"', __name__)\n gateway = info.split()[1]\n return gateway", "title": "" }, { "docid": "25ea43cf0b0450ca955c31be9b185b8a", "score": "0.59425837", "text": "def gateway_network_interfaces(self) -> Optional[pulumi.Input[Sequence[pulumi.Input['GatewayGatewayNetworkInterfaceArgs']]]]:\n return pulumi.get(self, \"gateway_network_interfaces\")", "title": "" }, { "docid": "70211bab4830add3100df1846af3398c", "score": "0.58943444", "text": "def gateway_network_interfaces(self) -> pulumi.Output[Sequence['outputs.GatewayGatewayNetworkInterface']]:\n return pulumi.get(self, \"gateway_network_interfaces\")", "title": "" }, { "docid": "d2c108f8ff01da124d93883d96865e62", "score": "0.589127", "text": "def createIpv4Ngpf(self, ethernetObj, name=None, ipv4Address='', ipv4AddressPortStep='disabled', gateway=None,\n gatewayPortStep='disabled', prefix=None, resolveGateway=True):\n ipv4Url = self.httpHeader+ethernetObj+'/ipv4'\n response = self.post(ipv4Url)\n ipv4Obj = response.json()['links'][0]['href']\n ipv4Response = self.get(self.httpHeader+ipv4Obj)\n\n if name != None:\n self.patch(self.httpHeader+ipv4Obj, data={'name': name})\n\n # Config IPv4 address\n multivalue = ipv4Response.json()['address']\n if type(ipv4Address) is dict:\n self.patch(self.httpHeader+multivalue+\"/counter\", data=ipv4Address)\n else:\n self.patch(self.httpHeader+multivalue+\"/singleValue\", data={'value': ipv4Address})\n\n # Config IPv4 port step\n portStepMultivalue = self.httpHeader+multivalue+'/nest/1'\n if ipv4AddressPortStep is not 'disabled':\n self.patch(portStepMultivalue, data={'step': ipv4AddressPortStep})\n\n if ipv4AddressPortStep == 'disabled':\n self.patch(portStepMultivalue, data={'enabled': False})\n\n # Config Gateway\n multivalue = ipv4Response.json()['gatewayIp']\n if type(gateway) is dict:\n self.patch(self.httpHeader+multivalue+\"/counter\", data=gateway)\n else:\n self.patch(self.httpHeader+multivalue+\"/singleValue\", data={'value': gateway})\n\n # Config Gateway port step\n portStepMultivalue = self.httpHeader+multivalue+'/nest/1'\n if gatewayPortStep is not 'disabled':\n self.patch(portStepMultivalue, data={'step': gatewayPortStep})\n\n if gatewayPortStep == 'disabled':\n self.patch(portStepMultivalue, data={'enabled': False})\n\n # Config resolve gateway\n multivalue = ipv4Response.json()['resolveGateway']\n self.patch(self.httpHeader+multivalue+\"/singleValue\", data={'value': resolveGateway})\n\n multivalue = ipv4Response.json()['prefix']\n self.patch(self.httpHeader+multivalue+\"/singleValue\", data={'value': prefix})\n\n self.configuredProtocols.append(ipv4Obj)\n return ipv4Obj", "title": "" }, { "docid": "ed7f91d6c364ad920b8fa5d02e4848a6", "score": "0.58804244", "text": "def get_interface_ipv4_address(device, interface):\n\n return get_interface_ip_address(device, interface, 'ipv4')", "title": "" }, { "docid": "b875b74c3e9bcaed1e538abb5c53fb20", "score": "0.5847564", "text": "def get_gateway_interface():\n with open(\"/proc/net/route\") as fh:\n for line in fh:\n fields = line.strip().split()\n if fields[1] != '00000000' or not int(fields[3], 16) & 2:\n continue\n return fields[0]", "title": "" }, { "docid": "5cbd4d0e3d1fab072b44a552a8c9813c", "score": "0.5829351", "text": "def gateway_id(self) -> Optional[pulumi.Input[str]]:\n return pulumi.get(self, \"gateway_id\")", "title": "" }, { "docid": "5cbd4d0e3d1fab072b44a552a8c9813c", "score": "0.5829351", "text": "def gateway_id(self) -> Optional[pulumi.Input[str]]:\n return pulumi.get(self, \"gateway_id\")", "title": "" }, { "docid": "4f2018747256af52f66ccdef9ff3f2eb", "score": "0.581515", "text": "def gateway_name(self) -> Optional[pulumi.Input[str]]:\n return pulumi.get(self, \"gateway_name\")", "title": "" }, { "docid": "2dd4cc175c1a362509bce1f42498790e", "score": "0.57755613", "text": "def gateway_id(self) -> pulumi.Output[Optional[str]]:\n return pulumi.get(self, \"gateway_id\")", "title": "" }, { "docid": "b5e3b1769c0c2471eafa2500fa18f704", "score": "0.5763135", "text": "def print_network(interface, host, subnet, gateway):\n # `gateway` is missing when it's coming from `ifconfig`\n print(f\"{interface:10} {host:15} {subnet:15} {gateway or '':18}\")", "title": "" }, { "docid": "f4c76a810471b8d34cade4051189b30d", "score": "0.5712656", "text": "def user_plane_access_ipv4_address(self) -> Optional[pulumi.Input[str]]:\n return pulumi.get(self, \"user_plane_access_ipv4_address\")", "title": "" }, { "docid": "f4c76a810471b8d34cade4051189b30d", "score": "0.5712656", "text": "def user_plane_access_ipv4_address(self) -> Optional[pulumi.Input[str]]:\n return pulumi.get(self, \"user_plane_access_ipv4_address\")", "title": "" }, { "docid": "40315953a8388c6197bfdc777e181eb2", "score": "0.56862533", "text": "def gateway_name(self) -> pulumi.Input[str]:\n return pulumi.get(self, \"gateway_name\")", "title": "" }, { "docid": "140db065ab60a33978cd9631a4b2c843", "score": "0.567905", "text": "def nat_gateway_id(self) -> Optional[pulumi.Input[str]]:\n return pulumi.get(self, \"nat_gateway_id\")", "title": "" }, { "docid": "198da20f1293f7e1a30b06ceeec97314", "score": "0.5626424", "text": "def gateway_name(self) -> pulumi.Output[str]:\n return pulumi.get(self, \"gateway_name\")", "title": "" }, { "docid": "f428b57e62cdb56e49ba9b5452987390", "score": "0.5626092", "text": "def ipv4_mapped(self):\n if self._ip >> 32 == 0xffff:\n return IPv4Address(self._ip & 0xffffffff)\n return None", "title": "" }, { "docid": "86e3914aec670c587c046e3aba9a1b7a", "score": "0.56218845", "text": "def public_ipv4_address(self) -> Optional[str]:\n return pulumi.get(self, \"public_ipv4_address\")", "title": "" }, { "docid": "006e743add69b60cc03881ff407f9da5", "score": "0.5618266", "text": "def nat_gateway_id(self) -> pulumi.Output[Optional[str]]:\n return pulumi.get(self, \"nat_gateway_id\")", "title": "" }, { "docid": "afc955ab5e2ec9dd9d50b5d6294e91e7", "score": "0.56127536", "text": "def public_ipv4():\n for addrinfo in socket.getaddrinfo(socket.gethostname(), None):\n if addrinfo[1] in (socket.SOCK_STREAM, socket.SOCK_DGRAM) and addrinfo[0] == socket.AF_INET:\n return addrinfo[4][0]", "title": "" }, { "docid": "10df53c4ef42c4097bc81baa1c33568b", "score": "0.5592991", "text": "def getNgpfGatewayIpMacAddress(self, gatewayIp):\n queryData = {'from': '/',\n 'nodes': [{'node': 'topology', 'properties': [], 'where': []},\n {'node': 'deviceGroup', 'properties': [], 'where': []},\n {'node': 'ethernet', 'properties': [], 'where': []},\n {'node': 'ipv4', 'properties': ['gatewayIp', 'sessionStatus'],\n 'where': [{'property': 'sessionStatus', 'regex': '.*up'}]}\n ]}\n queryResponse = self.query(data=queryData, silentMode=False)\n for topology in queryResponse.json()['result'][0]['topology']:\n for deviceGroup in topology['deviceGroup']:\n #print('\\ndeviceG:', deviceGroup)\n try:\n # Getting in here means IPv4 session status is UP.\n ipv4Href = deviceGroup['ethernet'][0]['ipv4'][0]['href']\n gatewayIpMultivalue = deviceGroup['ethernet'][0]['ipv4'][0]['gatewayIp']\n self.logInfo('\\n\\t%s' % ipv4Href)\n self.logInfo('\\tIPv4 sessionStatus: %s' % deviceGroup['ethernet'][0]['ipv4'][0]['sessionStatus'])\n self.logInfo('\\tGatewayIpMultivalue: %s' % gatewayIpMultivalue)\n\n response = self.get(self.httpHeader+gatewayIpMultivalue)\n valueList = response.json()['values']\n self.logInfo('gateway IP: %s' % valueList)\n if gatewayIp in valueList:\n gatewayIpIndex = valueList.index(gatewayIp)\n self.logInfo('\\nFound gateway: %s ; Index:%s' % (gatewayIp, gatewayIpIndex))\n # Get the IPv4 gateway mac address with the \"gatewayIpMultivalue\"\n queryData = {'from': deviceGroup['ethernet'][0]['href'],\n 'nodes': [{'node': 'ipv4', 'properties': ['gatewayIp', 'resolvedGatewayMac'], \n 'where': [{'property': 'gatewayIp', 'regex': gatewayIpMultivalue}]}\n ]}\n queryResponse = self.query(data=queryData, silentMode=False)\n gatewayMacAddress = queryResponse.json()['result'][0]['ipv4'][0]['resolvedGatewayMac'][gatewayIpIndex]\n self.logInfo('\\ngatewayIpMacAddress: %s' % gatewayMacAddress)\n if 'Unresolved' in gatewayMacAddress:\n raise IxNetRestApiException('Gateway Mac Address is unresolved.')\n return gatewayMacAddress\n except:\n pass\n return 0", "title": "" }, { "docid": "d9285251ba67d69f392c9a39019b2324", "score": "0.55845165", "text": "def DEVSERVER_DEFAULT_ADDR(self): # noqa\n if 'vagrant' in socket.gethostname():\n addr = '0.0.0.0'\n else:\n addr = '127.0.0.1'\n return addr", "title": "" }, { "docid": "bb1f8643cd7ed5d9da33deaed5470bb7", "score": "0.55707014", "text": "def peer_gateway(self):\n return self.vpn.get(str(LogField.PEERSECURITYGATEWAY))", "title": "" }, { "docid": "0ad33d1c1c240f477a8ea48f65d4fe49", "score": "0.55443305", "text": "def _get_next_hop_enable_default(self):\n return self.__next_hop_enable_default", "title": "" }, { "docid": "93266a436d44595c75e51348f213f604", "score": "0.55430305", "text": "def gateway_id(self) -> pulumi.Output[str]:\n return pulumi.get(self, \"gateway_id\")", "title": "" }, { "docid": "e1058bef67872c0129b58a7a84fec4a7", "score": "0.55202985", "text": "def user_plane_access_ipv4_subnet(self) -> Optional[pulumi.Input[str]]:\n return pulumi.get(self, \"user_plane_access_ipv4_subnet\")", "title": "" }, { "docid": "e1058bef67872c0129b58a7a84fec4a7", "score": "0.55202985", "text": "def user_plane_access_ipv4_subnet(self) -> Optional[pulumi.Input[str]]:\n return pulumi.get(self, \"user_plane_access_ipv4_subnet\")", "title": "" }, { "docid": "4ddd3ccd3049b137686ff9df87f3619f", "score": "0.55202234", "text": "def public_ipv4_address(self) -> str:\n return pulumi.get(self, \"public_ipv4_address\")", "title": "" }, { "docid": "38d41d008e99c8874c2ea19df8ae1f88", "score": "0.55137473", "text": "def Ipv4StepPerInterface(self):\n if self.force_auto_sync:\n self.get('Ipv4StepPerInterface')\n return self._Ipv4StepPerInterface", "title": "" }, { "docid": "09280faf2ad398b5b6bdf17e3477b599", "score": "0.5501738", "text": "def default_interface(self):\n if self._dinter is None:\n self.get_interfaces()\n for ip, val in self.interfaces.iteritems():\n if val['main'] == '1':\n self._dinter = val\n break\n return self._dinter", "title": "" }, { "docid": "a6b33f0d701e39772ba70a08de5671b0", "score": "0.54977655", "text": "def gateway_fqdn(self) -> str:\n return pulumi.get(self, \"gateway_fqdn\")", "title": "" }, { "docid": "4c5485b724bf2640ccbacc4b39edb2c8", "score": "0.54920024", "text": "def _get_ipv4(self):\n return self.__ipv4", "title": "" }, { "docid": "5ed36fce07f90987e999dac191307556", "score": "0.54873306", "text": "def get_virtual_network_host_ip(self):\n temp = (\n \"docker ps -q \"\n \"--filter label=com.docker.compose.project={project} \"\n \"--filter label=com.docker.compose.service={service}\"\n )\n cmd = temp.format(project=self.name, service=\"mender-client\")\n\n output = subprocess.check_output(\n cmd + \"| head -n1 | xargs -r \"\n \"docker inspect --format='{{range .NetworkSettings.Networks}}{{.Gateway}}{{end}}'\",\n shell=True,\n )\n return output.decode().split()[0]", "title": "" }, { "docid": "6ac406d5f5b0494f90d5daf802b9e5df", "score": "0.5482472", "text": "def local_gateway_id(self) -> pulumi.Output[Optional[str]]:\n return pulumi.get(self, \"local_gateway_id\")", "title": "" }, { "docid": "e7da5cf7da974dafd4c779d2dcec2ebc", "score": "0.54802746", "text": "def get_iface():\n try:\n iface = netifaces.gateways()['default'][netifaces.AF_INET][1]\n except:\n ifaces = []\n for iface in netifaces.interfaces():\n # list of ipv4 addrinfo dicts\n ipv4s = netifaces.ifaddresses(iface).get(netifaces.AF_INET, [])\n\n for entry in ipv4s:\n addr = entry.get('addr')\n if not addr:\n continue\n if not (iface.startswith('lo') or addr.startswith('127.')):\n ifaces.append(iface)\n\n iface = ifaces[0]\n\n return iface", "title": "" }, { "docid": "d980b72b885644783a3b68232e1db2f5", "score": "0.5474136", "text": "def user_plane_access_ipv4_address(self) -> pulumi.Output[Optional[str]]:\n return pulumi.get(self, \"user_plane_access_ipv4_address\")", "title": "" }, { "docid": "8db31706141e25cf809806ef62899b07", "score": "0.5463559", "text": "def local_gateway_id(self) -> Optional[pulumi.Input[str]]:\n return pulumi.get(self, \"local_gateway_id\")", "title": "" }, { "docid": "6632262dadf7d92821698afb4dcb384b", "score": "0.54538494", "text": "def GetManagementIP(self):\n return ConnectionInfo.DeviceIP", "title": "" }, { "docid": "24e5ff20e103ccbd4c440abb45777bc3", "score": "0.5433504", "text": "def out_of_band_ip_gateway(self):\n return self._out_of_band_ip_gateway", "title": "" }, { "docid": "89cdbad0a1e3f092e00f40e189d21cc0", "score": "0.54241955", "text": "def _gateway_for(self, guild_id):\n shard_count = self.shard_count\n if shard_count:\n gateway = self.gateway.gateways[(guild_id>>22)%shard_count]\n else:\n gateway = self.gateway\n return gateway", "title": "" }, { "docid": "ef5c20ba53807d603f2226cd7251c1b4", "score": "0.5403146", "text": "def transit_gateway_address(self) -> str:\n return pulumi.get(self, \"transit_gateway_address\")", "title": "" }, { "docid": "b812ab552cbb4bb4a5c66210f9aae03c", "score": "0.5375674", "text": "def getIp():\n return Ip", "title": "" }, { "docid": "9f931e2c5f5721420ec468bd8da9e003", "score": "0.5372139", "text": "def controller_address(self):\n if (self._controller_address is None or\n self._controller_address == \"auto\"):\n # Find the subnet with the lowest CIDR range.\n min_subnet = sorted(\n self.all_subnets,\n cmp=lambda x, y: cmp(IPNetwork(x.cidr_block),\n IPNetwork(y.cidr_block)))[0]\n return IPNetwork(min_subnet.cidr_block).network + 4\n return self._controller_address", "title": "" }, { "docid": "1e0a235f4e874f58b0c3d9c00cbc0cb3", "score": "0.53700894", "text": "def getNgpfGatewayIpMacAddress_backup(self, gatewayIp):\n queryData = {'from': '/',\n 'nodes': [{'node': 'topology', 'properties': [], 'where': []},\n {'node': 'deviceGroup', 'properties': [], 'where': []},\n {'node': 'ethernet', 'properties': [], 'where': []},\n {'node': 'ipv4', 'properties': ['gatewayIp'], 'where': []}\n ]}\n queryResponse = self.query(data=queryData, silentMode=False)\n for topology in queryResponse.json()['result'][0]['topology']:\n gatewayIpMultivalue = topology['deviceGroup'][0]['ethernet'][0]['ipv4'][0]['gatewayIp']\n ipv4Href = topology['deviceGroup'][0]['ethernet'][0]['ipv4'][0]\n self.logInfo('IPv4 obj:', ipv4Href)\n self.logInfo('Gateway multivalue: %s' % gatewayIpMultivalue)\n response = self.get(self.httpHeader+gatewayIpMultivalue)\n valueList = response.json()['values']\n self.logInfo(valueList)\n if gatewayIp in valueList:\n gatewayIpIndex = valueList.index(gatewayIp)\n self.logInfo('Found gateway: %s ; Index:%s' % (gatewayIp, gatewayIpIndex))\n queryData = {'from': '/',\n 'nodes': [{'node': 'topology', 'properties': [], 'where': []},\n {'node': 'deviceGroup', 'properties': [], 'where': []},\n {'node': 'ethernet', 'properties': [], 'where': []},\n {'node': 'ipv4', 'properties': ['gatewayIp', 'resolvedGatewayMac'], \n 'where': [{'property': 'gatewayIp', 'regex': gatewayIpMultivalue}]}\n ]}\n \n queryResponse = self.query(data=queryData, silentMode=False)\n for topologyNode in queryResponse.json()['result'][0]['topology']:\n try:\n gatewayMacAddress = topologyNode['deviceGroup'][0]['ethernet'][0]['ipv4'][0]['resolvedGatewayMac'][gatewayIpIndex]\n self.logInfo('\\ngatewayIpMacAddress: %s' % gatewayMacAddress)\n if 'Unresolved' in gatewayMacAddress:\n raise IxNetRestApiException('Gateway Mac Address is unresolved.')\n return gatewayMacAddress\n except:\n pass\n return 0", "title": "" }, { "docid": "5c47aec143342940a2116b8ae7535f6e", "score": "0.5368547", "text": "def user_plane_access_ipv4_subnet(self) -> pulumi.Output[Optional[str]]:\n return pulumi.get(self, \"user_plane_access_ipv4_subnet\")", "title": "" }, { "docid": "88e0b9cce5519104d09f9c3a2d722a91", "score": "0.53593224", "text": "def get_vpc_gateway_ip(cidr_block_formatting):\n vpc_gateway_ip = cidr_block_formatting.replace(\n \"\\\\\", \"\").format(0, 1)\n return vpc_gateway_ip", "title": "" }, { "docid": "3146e34ba05b88b219a79e8c80345434", "score": "0.5351517", "text": "def getGatewayIDByName(self):\n gateways = self.sitewise.list_gateways()\n\n if 'gatewaySummaries' in gateways:\n payload = gateways['gatewaySummaries']\n while 'NextToken' in gateways:\n gateways = self.sitewise.list_gateways(NextToken = gateways['NextToken'])\n payload.extend(gateways['gatewaySummaries'])\n\n for gateway in payload:\n if gateway['gatewayName'] == self.gatewayName:\n print('Gateway found: {}'.format(gateway))\n print('Gateway ID: {}'.format(gateway['gatewayId']))\n return gateway['gatewayId']\n\n return None", "title": "" }, { "docid": "ca1126fe555eb3ee9f45aa7bae60aa3f", "score": "0.5338011", "text": "def getIpmiGatewayMAC(self):\n logging.debugv(\"config.py->getIpmiGatewayMAC(self)\", [])\n try:\n ipmiGwMac = self.ipmi['gwmac']\n return ipmiGwMac\n except KeyError:\n self.ipmi['gwmac'] = \"\"\n self.ipmi.write()\n return self.ipmi['gwmac']", "title": "" }, { "docid": "e1134837a8234aba670d7267873c9fd8", "score": "0.5336309", "text": "def next_hop_ip_address(self) -> Optional[pulumi.Input[str]]:\n return pulumi.get(self, \"next_hop_ip_address\")", "title": "" }, { "docid": "e1134837a8234aba670d7267873c9fd8", "score": "0.5336309", "text": "def next_hop_ip_address(self) -> Optional[pulumi.Input[str]]:\n return pulumi.get(self, \"next_hop_ip_address\")", "title": "" }, { "docid": "b642c10a55d5ab7209cf99780d801cd2", "score": "0.5332825", "text": "def public_ip(self):\n if self.addresses['public']:\n return self.addresses['public'][0]\n else:\n return u''", "title": "" }, { "docid": "a5c979d01d7df8167fdc71d02544f075", "score": "0.5329866", "text": "def changeGateway (self, machineguid, defaultgateway = \"\", jobguid = \"\", executionparams = {}):\n\treturn self._rootobject.changeGateway(machineguid,defaultgateway,jobguid,executionparams)", "title": "" }, { "docid": "e8d67edc6b2503c8c68e1f56d955adaf", "score": "0.53179884", "text": "def gateway_bridge_source(self) -> Optional['outputs.FlowSourceGatewayBridgeSource']:\n return pulumi.get(self, \"gateway_bridge_source\")", "title": "" }, { "docid": "a9c21886cd770afc998cb2587c85d9ca", "score": "0.53057927", "text": "def ip_address(self):\n return self.ifconfig()[0]", "title": "" }, { "docid": "88221fd24ed229880ca5f1c3b0503fb1", "score": "0.53030473", "text": "def ipv4_address(self) -> str:\n return pulumi.get(self, \"ipv4_address\")", "title": "" }, { "docid": "88221fd24ed229880ca5f1c3b0503fb1", "score": "0.53030473", "text": "def ipv4_address(self) -> str:\n return pulumi.get(self, \"ipv4_address\")", "title": "" }, { "docid": "88221fd24ed229880ca5f1c3b0503fb1", "score": "0.53030473", "text": "def ipv4_address(self) -> str:\n return pulumi.get(self, \"ipv4_address\")", "title": "" }, { "docid": "88221fd24ed229880ca5f1c3b0503fb1", "score": "0.53030473", "text": "def ipv4_address(self) -> str:\n return pulumi.get(self, \"ipv4_address\")", "title": "" }, { "docid": "88221fd24ed229880ca5f1c3b0503fb1", "score": "0.53030473", "text": "def ipv4_address(self) -> str:\n return pulumi.get(self, \"ipv4_address\")", "title": "" }, { "docid": "bdfc13bfe1cb83b3d710d499e794c4f9", "score": "0.52971053", "text": "def cni_ipam(host_cidrv4: str, host_gateway: str):\n host = ipaddr.IPNetwork(host_cidrv4)\n subnet = host\n ip_cut = int(host.ip.__str__().split(\".\")[-1])\n if ConfigSyncSchedules.sub_ips:\n sub = ipaddr.IPNetwork(host.network).ip + (ip_cut * ConfigSyncSchedules.sub_ips)\n host = ipaddr.IPNetwork(sub)\n range_start = host.ip + ConfigSyncSchedules.skip_ips\n range_end = range_start + ConfigSyncSchedules.range_nb_ips\n ipam = {\n \"type\": \"host-local\",\n \"subnet\": \"%s/%s\" % (subnet.network.__str__(), subnet.prefixlen),\n \"rangeStart\": range_start.__str__(),\n \"rangeEnd\": range_end.__str__(),\n \"gateway\": host_gateway,\n \"routes\": [\n {\n \"dst\": \"%s\" % EC.kubernetes_service_cluster_ip_range,\n \"gw\": ipaddr.IPNetwork(host_cidrv4).ip.__str__()\n },\n {\"dst\": \"0.0.0.0/0\"},\n ],\n \"dataDir\": \"/var/lib/cni/networks\"\n }\n return ipam", "title": "" }, { "docid": "d0528a324cbd2a187c0c4bd2d2ce6c7b", "score": "0.52959645", "text": "def _get_overlay_gateway(self):\n return self.__overlay_gateway", "title": "" }, { "docid": "d0528a324cbd2a187c0c4bd2d2ce6c7b", "score": "0.52959645", "text": "def _get_overlay_gateway(self):\n return self.__overlay_gateway", "title": "" }, { "docid": "6a5c70e9a574a2cc8c378e59e130e156", "score": "0.5290637", "text": "def backup_controller_address(self):\n if self._backup_controller_address is None:\n return None\n if self._backup_controller_address == \"auto\":\n # Figure out which AZ the controller is in.\n controller_az = self.controller_subnet.availability_zone\n \n # Remove all subnets in the same AZ.\n other_az_subnets = [\n subnet for subnet in self.all_subnets\n if subnet.availability_zone != controller_az]\n\n if len(other_az_subnets) == 0:\n # No other AZ available.\n return None\n \n # Find the subnet with the lowest CIDR range.\n min_subnet = sorted(\n other_az_subnets,\n cmp=lambda x, y: cmp(IPNetwork(x.cidr_block),\n IPNetwork(y.cidr_block)))[0]\n return IPNetwork(min_subnet.cidr_block).network + 4\n \n return self._backup_controller_address", "title": "" }, { "docid": "448bf67107a2abd76cea9dd70d5b3de5", "score": "0.5290117", "text": "def get_phone_ip(ad):\n IP = ad.droid.connectivityGetIPv4Addresses('wlan0')[0]\n\n return IP", "title": "" }, { "docid": "429967fd1dc6ec6df5e65d570d51dc64", "score": "0.528519", "text": "def gateway_type(self) -> Optional[pulumi.Input[str]]:\n return pulumi.get(self, \"gateway_type\")", "title": "" }, { "docid": "6549f047e4d8689af836909e3ef05941", "score": "0.5272486", "text": "def Ipv4DestinationAddress(self):\n if self.force_auto_sync:\n self.get('Ipv4DestinationAddress')\n return self._Ipv4DestinationAddress", "title": "" }, { "docid": "ddc236de4450e4b33f98f727fa6348fa", "score": "0.5266652", "text": "def show_gw(update: Update, context: CallbackContext) -> None:\n with open(\"/proc/net/route\") as routeFile:\n for line in routeFile:\n fields = line.strip().split()\n if fields[1] != '00000000' or not int(fields[3], 16) & 2:\n # If not default route or not RTF_GATEWAY, skip it\n continue\n update.message.reply_text(f'%s: Current GW is -> %s' % \n (hostname, socket.inet_ntoa(struct.pack(\"<L\", int(fields[2], 16)))))", "title": "" }, { "docid": "f06533cff053b1af2360f8cc5bb5ba48", "score": "0.5257066", "text": "def get_ship_ip():\n\n agent_self_dict = consul_query('agent/self')\n return agent_self_dict['Config']['AdvertiseAddr']", "title": "" }, { "docid": "6f7eb8a56af85995428f72906981a0ab", "score": "0.5255129", "text": "def public_ipv4_pool(self) -> str:\n return pulumi.get(self, \"public_ipv4_pool\")", "title": "" }, { "docid": "1b5a490b8b9095bd942ff270611afa81", "score": "0.52500707", "text": "def local_endpoint(self):\n return self.vpn.get(str(LogField.ENDPOINT))", "title": "" }, { "docid": "fd60a572c83ef5499871515b2f0dd5a8", "score": "0.5242035", "text": "def address(self):\r\n return self.network_interfaces['storage'][0]", "title": "" }, { "docid": "b86de8d0bb8f83d70cd0654c71fb9729", "score": "0.52201766", "text": "def _get_src_ipv4(self):\n return self.__src_ipv4", "title": "" }, { "docid": "170308510d8a629c572b3bf520171be9", "score": "0.52121127", "text": "def Ipv4Destination(self):\n return self._get_attribute('ipv4Destination')", "title": "" }, { "docid": "92aa48115867cbe9a5e255a5e9354a2c", "score": "0.5205977", "text": "def _default_interface():\n import platform\n system = platform.system()\n if system == \"Linux\":\n return \"eth0\"\n elif system == \"Darwin\":\n return \"en0\"\n return None", "title": "" }, { "docid": "a4cc6a6528d049239fe6144fb3c0d06c", "score": "0.52058583", "text": "def get_host_ip(timeout=10):\n\n return get_default_route(timeout)[2]", "title": "" }, { "docid": "4ceddda8ed86eab634c48ae4b74b73f8", "score": "0.5201575", "text": "def neighbor_ip(request, mux_config): # noqa F811\n ip_version = request.param\n selected_intf = random.choice(list(mux_config.values()))\n neigh_ip = ip_interface(selected_intf[\"SERVER\"][ip_version]).ip\n logger.info(\"Using {} as neighbor IP\".format(neigh_ip))\n return neigh_ip", "title": "" } ]
7d793c2aaa9fc460d44b0b5ee6dc5fd8
Calculates the accuracy of prediction methods for the grouped evaluation problem. Only counts those predictions that have all groups correct in a predicted groupings. Ignores any partial correctness.
[ { "docid": "f34a4b76e309f215ed7a616666afd354", "score": "0.7314673", "text": "def calculateAccuracy(self, results, dataSet):\n\t\tmatch = 0\n\t\tfor key in dataSet.keys():\n\t\t\tpredictedSenseGroups = results[key]\n\t\t\t# Examples are in order so split into group size to find groups \n\t\t\torderedExamples = dataSet[key]\n\t\t\tsenseNum = len(predictedSenseGroups)\n\t\t\tgroupSize = len(predictedSenseGroups[0])\n\t\t\tactualSenseGroups = []\n\t\t\tfor i in range(senseNum):\n\t\t\t\tactualSenseGroups.append(orderedExamples[i*groupSize:i*groupSize+groupSize])\n\t\t\t\n\t\t\t# Compare actual groups to predicted groups\n\t\t\tgroupMatchCount = 0\n\t\t\tfor predictedGroup in predictedSenseGroups:\n\t\t\t\tfor actualGroup in actualSenseGroups:\n\t\t\t\t\tactualGroup = [example['sent'] for example in actualGroup]\n\t\t\t\t\tif set(predictedGroup) == set(actualGroup):\n\t\t\t\t\t\tgroupMatchCount += 1\n\t\t\tif groupMatchCount == senseNum:\n\t\t\t\tmatch += 1\t\t\t\t\t\n\t\treturn match / float(len(dataSet))", "title": "" } ]
[ { "docid": "0d4bfdaa5c68857e9cc98002b314587a", "score": "0.7493893", "text": "def accuracy(self):\n # Initialize key variables\n correct = {}\n prediction = 0\n cls_count = {}\n accuracy = {}\n\n # Analyze all the data\n for cls in self.pca_object.classes():\n # Get list of x values to test\n vectors = self.pca_object.xvalues(cls)\n\n # Process each vector\n for vector in vectors:\n # Get the prediction\n prediction = self.classifier(vector)\n\n # Only count definitive predictions\n if prediction is not None:\n # Count the number of correct predictions\n if prediction == cls:\n if cls in correct:\n correct[cls] += 1\n else:\n correct[cls] = 1\n\n # Increment the count\n if cls in cls_count:\n cls_count[cls] += 1\n else:\n cls_count[cls] = 1\n\n # Calculate per class accuracy\n correct[None] = 0\n cls_count[None] = 0\n for cls in cls_count.keys():\n if cls_count[cls] != 0:\n accuracy[cls] = 100 * (correct[cls] / cls_count[cls])\n\n # Keep a tally for all successes\n correct[None] = correct[None] + correct[cls]\n cls_count[None] = cls_count[None] + cls_count[cls]\n\n # Calulate overall accuracy\n accuracy[None] = 100 * (correct[None] / cls_count[None])\n\n # Return\n return accuracy", "title": "" }, { "docid": "2764bb6ff0327d68519dfd84354b4154", "score": "0.74459577", "text": "def accuracy(self):\n # Initialize key variables\n correct = {}\n prediction = 0\n cls_count = {}\n accuracy = {}\n\n # Analyze all the data\n for cls in self.pca_object.classes():\n # Get list of x values to test\n vectors = self.pca_object.xvalues(cls)\n\n # Process each vector\n for xvalue in vectors:\n # Get prediction\n prediction = self.classifier(xvalue)\n\n if prediction is not None:\n # Count the number of correct predictions\n if prediction == cls:\n if cls in correct:\n correct[cls] += 1\n else:\n correct[cls] = 1\n\n # Increment the count\n if cls in cls_count:\n cls_count[cls] += 1\n else:\n cls_count[cls] = 1\n\n # Calculate per class accuracy\n correct[None] = 0\n cls_count[None] = 0\n for cls in cls_count.keys():\n if cls_count[cls] != 0:\n accuracy[cls] = 100 * (correct[cls] / cls_count[cls])\n\n # Keep a tally for all successes\n correct[None] = correct[None] + correct[cls]\n cls_count[None] = cls_count[None] + cls_count[cls]\n\n # Calulate overall accuracy\n accuracy[None] = 100 * (correct[None] / cls_count[None])\n\n # Return\n return accuracy", "title": "" }, { "docid": "8734f1042cc2f5947b9fda607fa91019", "score": "0.7290938", "text": "def evaluate_accuracy_using_predictor(self):\n correct = 0\n count = 0\n for i in range(len(self.eval_labels)):\n # print(i)\n image = self.eval_data[i].flat\n mask = np.zeros_like(image)\n while mask.sum() < self.N_to_mask:\n a = np.random.randint(mask.shape[0])\n if image[a] > 0:\n mask[a] = 1\n input = np.stack((mask, image * mask), axis=1)\n input = np.reshape(input, self.shape)\n prediction = self.predictor({\"masked_x\": input})\n probs = prediction[\"probabilities\"][0]\n pred_label = np.argmax(probs)\n count += 1\n if pred_label == self.eval_labels[i]:\n correct += 1\n # print(correct / count)\n return correct / count", "title": "" }, { "docid": "f6c36fe94bd5a7009f027629ec2d9371", "score": "0.7272453", "text": "def accuracy(self, preds, Y):\n accuracy = ((preds == Y).sum()) / len(preds)\n return accuracy", "title": "" }, { "docid": "4586c5c7ad827a32c6e0518fbc7f43fc", "score": "0.72526646", "text": "def accuracy(data, predictions):\n total = 0\n correct = 0\n for i in range(len(data)):\n point = data[i]\n pred = predictions[i]\n total += 1\n guess = most_likely_class(pred)\n if guess == point.label:\n correct += 1\n return correct / total", "title": "" }, { "docid": "18635304000c26185c7ef1af2e2c1807", "score": "0.724481", "text": "def accuracy_per_class(self):\n\n class_correct = list(0. for i in range(3))\n class_total = list(0. for i in range(3))\n\n with torch.no_grad():\n for data in self.testloader:\n images, labels = data\n outputs = self.net(images)\n _, predicted = torch.max(outputs, 1)\n c = (predicted == labels).squeeze()\n\n # For each class check if the predicted output is equal to the ground truth label\n for i in range(len(labels)):\n label = labels[i]\n if c.ndimension() == 0:\n class_correct[label] += c.item()\n else:\n class_correct[label] += c[i].item()\n class_total[label] += 1\n\n for i in range(3):\n print('Accuracy of %5s : %2d %%' % (dataset.label_names().get(i),\n 100 * class_correct[i] / class_total[i]))", "title": "" }, { "docid": "48adda9e28bbfe7d6d7573d717c3b1f2", "score": "0.7200568", "text": "def accuracy(labels: List[str], predictions: List[str]) -> float:\r\n correct = sum([1 if label == prediction else 0 for label, prediction in zip(labels, predictions)])\r\n return correct / len(labels)", "title": "" }, { "docid": "ca6c35b180921015978f79cbe991b65e", "score": "0.7189987", "text": "def accuracy(self, predictions):\n\n # count the number of correct predictions\n correct = [i for i in range(len(predictions)) if predictions[i] == self.category_test[i]]\n\n # calculate accuracy of predictions\n acc = len(correct) / len(self.data_test) * 100\n\n return acc", "title": "" }, { "docid": "ed6ad4d4e5bd32f9b570fe3b06cbb69a", "score": "0.7115662", "text": "def accuracy(self, X_test, y_test):\n\n # TODO\n predictions = self.predict(X_test)\n print ('post predict')\n num_correct = 0\n for i in range(len(predictions)):\n if predictions[i] == y_test[i]:\n num_correct += 1\n return num_correct / len(predictions)", "title": "" }, { "docid": "2e60cde23cde1f570fd7b2562006304c", "score": "0.71124494", "text": "def multiclass_accuracy(prediction, ground_truth):\n correct = 0\n\n zipped = np.dstack((prediction, ground_truth))\n for p in np.rollaxis(zipped, 1):\n pair = p[0]\n if pair[0] == pair[1]:\n correct += 1\n\n return correct / prediction.shape[0]", "title": "" }, { "docid": "f7f08c74491069c42e93cf2802248ac3", "score": "0.70233655", "text": "def multiaccuracy(y_true, y_pred, normalize=False, totals=True):\n results = DataFrame({\n \"true\": y_true,\n \"pred\": y_pred,\n \"one\": 1\n }).pivot_table(index=\"true\",\n columns=\"pred\",\n values=\"one\",\n aggfunc=\"count\",\n margins=totals)\n return results / len(y_true) if normalize else results", "title": "" }, { "docid": "1613165f3e7ce4f39d36ebb0364fc8fb", "score": "0.70218706", "text": "def evaluation_accuracy(groundtruth, pred):\n true_positive_prediction = 0\n for p_key, p_value in pred.items():\n if p_key in groundtruth:\n # if prediction is no attribute values, e.g. [] and so is the groundtruth\n # May happen\n if not p_value and not groundtruth[p_key]:\n true_positive_prediction += 1\n # counts the number of good prediction for node p_key\n # here len(p_value)=1 but we could have tried to predict more values\n true_positive_prediction += len([c for c in p_value if c in groundtruth[p_key]])\n # no else, should not happen: train and test datasets are consistent\n return true_positive_prediction * 100 / sum(len(v) for v in pred.values())", "title": "" }, { "docid": "63bcc285d41a5b92213147921a719258", "score": "0.70072806", "text": "def accuracy(pred_list, real_list):\r\n\r\n pred_array = np.array(list(Evaluator.expand_list(pred_list)))\r\n real_array = np.array(list(Evaluator.expand_list(real_list)))\r\n return (pred_array == real_array).sum() * 1.0 / len(pred_array)", "title": "" }, { "docid": "251ee52f59b2d241d345e2eff1f0043c", "score": "0.7004327", "text": "def calculateSystemAccuracy():\n global testClassesList, predictionClassesList\n correctPredictions = 0\n totalPredictions = len(testClassesList)\n for i in range(0, totalPredictions):\n if testClassesList[i] == predictionClassesList[i]:\n correctPredictions += 1\n return float(correctPredictions)/float(totalPredictions)", "title": "" }, { "docid": "51352f6447a1e899c192bd9dee32d924", "score": "0.6989307", "text": "def accuracy(pred, actual):\n return sum(pred == actual) / len(pred)", "title": "" }, { "docid": "3ecc3df42b2cfea9f73d2b0665fd2b8b", "score": "0.6956965", "text": "def overall_accuracy(dct, num_attr, predictions, actual):\n total = len(predictions)\n correct_count = (predictions == actual).sum()\n print(\"%d out of %d\" % (correct_count, total))\n accuracy = (correct_count / total)\n print(\"%0.03f%% correctly predicted\" % (accuracy * 100))\n dct[num_attr] = accuracy\n return accuracy", "title": "" }, { "docid": "433e4ffa5a406a5f876982d5a905bd61", "score": "0.693092", "text": "def accuracy(true_labels, predictions):\n return np.array(\n [true_labels == predictions]\n ).sum()/len(true_labels)*100", "title": "" }, { "docid": "006be9273069c0248edb1aaea0203555", "score": "0.68724155", "text": "def compute_accuracy(predictions, ground_truth):\n\n\n true_predicted = 0\n \n # iterate over all instances\n for i in range(len(predictions)):\n\n if predictions[i] == ground_truth[i]:\n true_predicted += 1\n\n accuracy = true_predicted / len(predictions)\n\n return accuracy, true_predicted", "title": "" }, { "docid": "684dbf9760d04b28971b046032ddbf92", "score": "0.68517894", "text": "def compute_accuracy(true_labels, labels_pred): \n correct = (true_labels == labels_pred)\n correct_sum = correct.sum()\n return (float(correct_sum)/len(true_labels))", "title": "" }, { "docid": "0e97192e3e757bec55f5d1c4a7f01821", "score": "0.6832431", "text": "def score_accuracy(predictions, actuals):\n t = [pr for pr,act in zip(predictions, actuals) if pr==act]\n n = predictions\n return len(t)/len(n)", "title": "" }, { "docid": "bb147c189dc31df7962a4f2f77ce6d88", "score": "0.6827491", "text": "def accuracy(predictions, targets):\n\n ########################\n # PUT YOUR CODE HERE #\n #######################\n [batch_size, n_classes] = targets.shape\n accuracy = np.sum(targets[np.arange(0, batch_size), np.argmax(predictions, axis=1)] == 1) / batch_size\n ########################\n # END OF YOUR CODE #\n #######################\n\n return accuracy", "title": "" }, { "docid": "971ea8cc3540125f9022f4f1e5ac7bce", "score": "0.6817096", "text": "def accuracy(self, preds, labels):\n \n np_labels = np.asarray(labels)\n \n cor_syn = 0\n wrng_syn = 0\n cor_ant = 0\n wrng_ant = 0\n cor_irrel = 0\n wrng_irrel = 0\n\n for pred, label in zip(preds, np_labels):\n\n if label == 0: #irrels\n if pred == 0:\n cor_irrel += 1\n else:\n wrng_irrel += 1\n\n if label == 1: #syns\n if pred == 1:\n cor_syn += 1\n else:\n wrng_syn += 1\n\n if label == 2: #ants\n if pred == 2:\n cor_ant += 1\n else:\n wrng_ant += 1\n \n irrel_acc = (cor_irrel/(wrng_irrel+cor_irrel))*100\n syn_acc = (cor_syn/(wrng_syn+cor_syn))*100\n ant_acc = (cor_ant/(wrng_ant+cor_ant))*100\n \n return [irrel_acc, syn_acc, ant_acc]", "title": "" }, { "docid": "219b157026a3a04d17175edbaaa26086", "score": "0.6810572", "text": "def get_accuracy(y_pred, y_true, labels):\n\n stats = metrics.classification_report(y_true, y_pred)\n acc = metrics.accuracy_score(y_true, y_pred)\n \"\"\"\n TP = dict(zip(labels, [0] * len(labels)))\n TN = dict(zip(labels, [0] * len(labels)))\n FP = dict(zip(labels, [0] * len(labels)))\n FN = dict(zip(labels, [0] * len(labels)))\n for elem_pred, elem_true in zip(y_pred, y_true):\n if elem_pred == elem_true:\n TP[elem_pred] = TP[elem_pred] + 1\n for label in labels:\n if label != elem_pred:\n TN[label] = TN[label] + 1\n else:\n FP[elem_pred] = FP[elem_pred] + 1\n FN[elem_true] = FN[elem_true] + 1\n for label in labels:\n if label != elem_pred and label != elem_true:\n TN[label] = TN[label] + 1\n\n accuracies = dict(zip(labels, [0] * len(labels)))\n for label in labels:\n print (TP[label] + TN[label])\n print float(TP[label] + TN[label] + FP[label] + FN[label])\n accuracies[label] = (TP[label] + TN[label]) / float(TP[label] + sum(TN.values()) + FP[label] + FN[label])\n accuracy = (sum(TP.values()) + sum(TN.values())) / float(sum(TP.values()) + sum(TN.values()) + sum(FP.values()) + sum(FN.values()))\n return accuracies, accuracy\n \"\"\"\n return stats, acc", "title": "" }, { "docid": "12db0d7e390b4093b37b8cffd8cc0bb4", "score": "0.6807156", "text": "def accuracy(self, predicted, actual):\n denominator = len(actual)\n numerator = 0\n for i in range(len(predicted)):\n count = len(predicted[i])\n for j in range(len(predicted[i])):\n if predicted[i][j] != actual[i][j]:\n count -= 1\n if count is len(actual[i]):\n numerator += 1\n percent = (numerator/denominator)*100\n return percent", "title": "" }, { "docid": "73809d8bedceeab9c4c3d24c615add64", "score": "0.67835367", "text": "def accuracy(predictions, targets):\r\n\r\n pred = np.argmax(predictions, axis = 1)\r\n lab = np.argmax(targets, axis = 1)\r\n\r\n sum = 0\r\n for i in range(len(pred)):\r\n if pred[i] == lab[i]:\r\n sum += 1\r\n\r\n accuracy = sum / len(pred)\r\n\r\n return accuracy", "title": "" }, { "docid": "b0eb57545329f0696abe97a561b5317f", "score": "0.67791784", "text": "def calculate_accuracy(w, x_test, y_test):\n y_pred = predict_labels(w, x_test)\n total_number_predictions = y_pred.size\n numer_correct_predictions = 0\n for i in range(y_pred.size):\n if y_pred[i] == y_test[i]:\n numer_correct_predictions += 1\n accuracy = numer_correct_predictions / total_number_predictions\n return accuracy", "title": "" }, { "docid": "5f5ab9d013d247a48c308c77e213feed", "score": "0.6777185", "text": "def accuracy(self, data, labels):\n if self.is_fit:\n length = len(labels)\n correct_predictions = 0\n for datapoint, label in zip(data, labels):\n prediction = self.predict(datapoint)\n if prediction == label:\n correct_predictions += 1\n\n return correct_predictions / length\n else:\n raise Exception(\"Model has not been trained yet!\")", "title": "" }, { "docid": "3e88752fd836edc37bb1f6efd1baff13", "score": "0.6773754", "text": "def accuracy(y_true, y_pred):\n assert len(y_true) == len(y_pred)\n count = 0\n for i, _ in enumerate(y_true):\n if y_true[i] == y_pred[i]:\n count += 1\n return count / float(len(y_true))", "title": "" }, { "docid": "a32cc347fe9aec5d7152a3a9e0e5801d", "score": "0.67736137", "text": "def test_accuracy(self, test_data):\n correct = 0\n for datum in test_data:\n if datum[0] == 1 and self.predict(datum) > 0:\n correct += 1\n elif datum[0] == -1 and self.predict(datum) <= 0:\n correct += 1\n return correct / len(test_data)", "title": "" }, { "docid": "26e4cb6a09d393289ca3c4974155a25a", "score": "0.6768325", "text": "def accuracy(preds, targets):\r\n return (preds == targets).mean()", "title": "" }, { "docid": "69dae9515d594c5daf4a538d21cb04f2", "score": "0.67347634", "text": "def accuracy(train_rules, test_rules, prob_thresh):\r\n\r\n NP = Nonterminal('NP')\r\n trainAmount = 0\r\n testAmount = 0\r\n rules_train = []\r\n for rule in train_rules.productions():\r\n if NP == rule.lhs(): # and rule.lhs() != 'NNP' and rule.lhs() != 'NNPS':\r\n trainAmount += 1\r\n rules_train.append(rule.rhs())\r\n \r\n rules_test = []\r\n for rule in test_rules.productions():\r\n if NP == rule.lhs(): # and rule.lhs() != 'NNP' and rule.lhs() != 'NNPS':\r\n testAmount += 1\r\n rules_test.append(rule.rhs())\r\n \r\n rulesExclusivelyInTrain = 0\r\n for train_rule in rules_train:\r\n if train_rule not in rules_test:\r\n rulesExclusivelyInTrain += 1\r\n \r\n rulesExclusivelyInTest = 0\r\n for test_rule in rules_test:\r\n if test_rule not in rules_train:\r\n rulesExclusivelyInTest += 1\r\n\r\n return rulesExclusivelyInTrain, rulesExclusivelyInTest, trainAmount, testAmount", "title": "" }, { "docid": "100016d62cc5b2ac2eddca1383d732e8", "score": "0.6734665", "text": "def multiclass_accuracy(prediction, ground_truth):\n from sklearn.metrics import confusion_matrix\n \n conf = confusion_matrix(ground_truth, prediction)\n accuracy = np.trace(conf) / np.sum(conf)\n return accuracy", "title": "" }, { "docid": "60e24554b60c6ee6aaeadab126db797c", "score": "0.672809", "text": "def accuracy(y, pred_y):\n return float(sum(pred_y==y))/len(y)", "title": "" }, { "docid": "55a771f844c52ceefa94d6f1719db73d", "score": "0.6727731", "text": "def accuracy(self, x, actual_classes, probab_threshold=0.5):\n predicted_classes = (self.predict(x) >= probab_threshold).astype(int)\n predicted_classes = predicted_classes.flatten()\n accuracy = np.mean(predicted_classes == actual_classes)\n return accuracy * 100", "title": "" }, { "docid": "5e8e7734dda9f7655f3dec7cf24650b2", "score": "0.6709076", "text": "def accuracy(preds, targets):\n return (preds == targets).mean()", "title": "" }, { "docid": "1486e4bb7cee97f5112be123afc6a1af", "score": "0.6700241", "text": "def accuracy(predictions, targets):\n\n ########################\n # PUT YOUR CODE HERE #\n #######################\n n_samples = targets.shape[0]\n y_pred = np.argmax(predictions, axis=1)\n y_true = np.argmax(targets, axis=1)\n accuracy = np.sum(y_pred == y_true)/n_samples\n ########################\n # END OF YOUR CODE #\n #######################\n\n return accuracy", "title": "" }, { "docid": "de27286ddac8b59054e7ac5f1de53249", "score": "0.6700154", "text": "def accuracy(self, gold_tags, pred_tags):\n correct = 0.0\n total = 0.0\n for g_snt, pred_snt in zip(gold_tags, pred_tags):\n correct += sum([gold_tag == pred_tag for gold_tag, pred_tag in zip(g_snt, pred_snt)])\n total += len(g_snt)\n return correct / total", "title": "" }, { "docid": "3fc12662e7ac95a1c70d958774f8eae4", "score": "0.6696628", "text": "def accuracy_score(self, y_true, y_pred):\n \n accuracy = np.sum(y_true == y_pred, axis=0)/len(y_true)\n return accuracy", "title": "" }, { "docid": "20c8a1bdde2dfff6abbb1f2efb8dfd75", "score": "0.6680833", "text": "def accuracy_score(y_true, y_pred):\n accuracy = np.sum(y_true == y_pred, axis=0) / len(y_true)\n return accuracy", "title": "" }, { "docid": "3fec950e7e641a5f08209b64262098ee", "score": "0.6679659", "text": "def count_correct(self, predictions, labels):\n correct = None\n if self.metric == 'binary_accuracy':\n correct = np.sum(np.round(predictions) == labels)\n elif self.metric == 'accuracy':\n prediction_max = np.argmax(predictions, axis=0)\n labels_max = np.argmax(labels, axis=0)\n correct = np.sum(prediction_max == labels_max)\n\n return correct", "title": "" }, { "docid": "8fb93ba0f624526e4cabb0b97a9e59bd", "score": "0.66730374", "text": "def accuracy_score(truth, pred):\n\n # Ensure that the number of predictions matches number of outcomes\n if len(truth) == len(pred):\n\n # Calculate and return the accuracy as a percent\n return \"Predictions have an accuracy of {:.2f}%.\".format((truth == pred).mean() * 100)\n\n else:\n return \"Number of predictions does not match number of outcomes!\"", "title": "" }, { "docid": "d5da77311b93d54eacb87ce24018b89a", "score": "0.66667205", "text": "def accuracy(self, probabilities, labels):\n # TODO: calculate the batch accuracy\n count = 0\n for x in range(len(probabilities)):\n if np.argmax(labels[x]) == np.argmax(probabilities[x]):\n count += 1\n return count/len(probabilities)", "title": "" }, { "docid": "11c3ca50fb5552471a6ac94f9ce83be2", "score": "0.66666937", "text": "def accuracy(prediction, expected, labels=None):\n params = ((prediction, 'prediction'), (expected, 'expected'))\n\n prediction, expected = utils._as_2d(*params)\n utils._assert_same_dim(*params)\n\n eq = torch.eq(prediction, expected).byte()\n if labels is None:\n correct = torch.sum(eq, 1).float()\n result = correct / prediction.size(1)\n else:\n mask = filter_labels(expected, labels)\n eq &= mask\n correct = torch.sum(eq, 1).float()\n result = correct / (mask.sum(1).float())\n\n return utils._squeeze(result)", "title": "" }, { "docid": "e4a7de288f58a71822e6fd7f0151bffd", "score": "0.6663097", "text": "def getAccuracy(prediction: [int], trueResults: [int]) -> float:\n correct = 0\n wrong = 0\n for pred in range(len(prediction)):\n if prediction[pred] == trueResults[pred]:\n correct += 1\n else:\n wrong += 1\n return correct / (correct + wrong)", "title": "" }, { "docid": "550d0da0e8f472f6194ce1f3f6a32d12", "score": "0.666098", "text": "def evaluation_metric (dic):\r\n\r\n acc = np.sum(np.argmax(dic['y'], axis = 0) == np.argmax(dic['y_pred'], axis = 0))/ dic['y'].shape[-1]\r\n \r\n print(f'Test accuracy: {acc}')", "title": "" }, { "docid": "f82af6b401e0cc26a63c8794fb351d28", "score": "0.6656386", "text": "def accuracy(self, classification):\n accuracy = 0.0\n for i, c in enumerate(classification):\n if str(c) == self.classifier[i]:\n accuracy += 1\n accuracy = accuracy / (len(classification)-1)\n print(accuracy)", "title": "" }, { "docid": "75d546d9a778eb437321e3c6e6a19aeb", "score": "0.6653414", "text": "def accuracy(predictions, targets):\n\n ########################\n # PUT YOUR CODE HERE #\n #######################\n accuracy = (predictions.argmax(dim=1) == targets.argmax(dim=1)).type(dtype).mean().item()\n ########################\n # END OF YOUR CODE #\n #######################\n\n return accuracy", "title": "" }, { "docid": "1c7f508e1152b8439416d3527ea441db", "score": "0.6645023", "text": "def total_accuracy(self):\n\n # Pass on the test dataset and evaluate the model\n correct = 0\n total = 0\n with torch.no_grad():\n for data in self.testloader:\n images, labels = data\n\n outputs = self.net(images)\n _, predicted = torch.max(outputs.data, 1)\n total += labels.size(0)\n\n # Calculate the error of the prediction output\n correct += (predicted == labels).sum().item()\n\n print('Accuracy of the network on the 10000 test images: %d %%' % (100 * correct / total))", "title": "" }, { "docid": "ee63a873fcfa1fdca87595a6df8c915a", "score": "0.6640014", "text": "def accuracy(predicts, labels, flags):\n\n predicts = tf.reshape(predicts, [-1])\n labels = tf.reshape(labels, [-1])\n\n TP = [0] * flags['n_classes']\n FP = [0] * flags['n_classes']\n FN = [0] * flags['n_classes']\n TN = [0] * flags['n_classes']\n precision = [0] * flags['n_classes']\n recall = [0] * flags['n_classes']\n f1score = [0] * flags['n_classes']\n\n for iclass in range(flags['n_classes']):\n TP[iclass] = tf.reduce_sum(tf.to_float(tf.logical_and(tf.equal(predicts, iclass), tf.equal(labels, iclass))))\n FP[iclass] = tf.reduce_sum(tf.to_float(tf.logical_and(tf.equal(predicts, iclass), tf.not_equal(labels, iclass))))\n FN[iclass] = tf.reduce_sum(tf.to_float(tf.logical_and(tf.not_equal(predicts, iclass), tf.equal(labels, iclass))))\n TN[iclass] = tf.reduce_sum(tf.to_float(tf.logical_and(tf.not_equal(predicts, iclass), tf.not_equal(labels, iclass))))\n\n precision[iclass] = TP[iclass] / tf.to_float(TP[iclass] + FP[iclass])\n recall[iclass] = TP[iclass] / tf.to_float(TP[iclass] + FN[iclass])\n f1score[iclass] = 2 * precision[iclass] * recall[iclass] / tf.to_float(precision[iclass] + recall[iclass])\n\n return {'TP': TP,\n 'FP': FP,\n 'FN': FN,\n 'TN': TN,\n 'precision': precision,\n 'recall': recall,\n 'f1score': f1score}", "title": "" }, { "docid": "36d2625cbeecfc5659b33c55e407eb51", "score": "0.6638946", "text": "def calculateAccuracy(self, results, dataset):\n\t\tcorrect = 0\n\t\tfor key in dataset.keys():\n\t\t\tcorrectChoice = dataset[key]['options'][0]['sent']\n\t\t\tanswer = results[key]['solution']\n\t\t\tif answer == correctChoice:\n\t\t\t\tcorrect += 1\n\t\treturn correct / float(len(results))", "title": "" }, { "docid": "85b89ad7cba527ae9ac9c373f426d6ca", "score": "0.66371804", "text": "def accuracy(self, y_test, y_predicted):\r\n correct_prediction = 0\r\n wrong_prediction = 0\r\n total_prediction = 0\r\n for i in range(len(y_test)):\r\n if y_test[i] == y_predicted[i]:\r\n correct_prediction += 1\r\n else:\r\n wrong_prediction += 1\r\n total_prediction += 1\r\n accuracy = (correct_prediction / float(total_prediction)) * 100.0\r\n print(\"Accuracy :\",accuracy)", "title": "" }, { "docid": "ade4a5ad854ea424abaac61c34fe7c1a", "score": "0.66235024", "text": "def accuracy_score(truth, pred):\n \n # Ensure that the number of predictions matches number of outcomes\n if len(truth) == len(pred): \n \n # Calculate and return the accuracy as a percent\n return \"Predictions have an accuracy of {:.2f}%.\".format((truth == pred).mean()*100)\n \n else:\n return \"Number of predictions does not match number of outcomes!\"", "title": "" }, { "docid": "7cf581a52228b2d263fdfaad30d3a7f4", "score": "0.6604999", "text": "def accuracy(y_predict, y):\r\n # here, y and y_predict have already been converted to a single\r\n # column with one row for each example in the classification\r\n # problem.\r\n return ((y_predict.ravel() == y.ravel()) + 0).sum() \\\r\n * 1. / np.size(y_predict)", "title": "" }, { "docid": "459182279c4fef7c2b94133b8e8c65d2", "score": "0.6603623", "text": "def accuracy_score(y, y_pred):\n\n y, y_pred = convert_assert(y, y_pred)\n return np.count_nonzero(y == y_pred) / y.size", "title": "" }, { "docid": "4e15c975490aa0f15cd2efb3063d0ff7", "score": "0.66022676", "text": "def binary_classification_metrics(prediction, ground_truth):\n\n tp = 0\n tn = 0\n fp = 0\n fn = 0\n\n zipped = np.dstack((prediction, ground_truth))\n for p in np.rollaxis(zipped, 1):\n pair = p[0]\n if pair[0]:\n if pair[1]:\n tp += 1\n else:\n tn += 1\n else:\n if pair[1]:\n fn += 1\n else:\n fp += 1\n # np.apply_along_axis(resolve_case, axis=2, arr=zipped)\n\n accuracy = (tp + tn) / (tp + tn + fp + fn)\n\n precision = tp / (tp + fp)\n recall = tp / (tp + fn)\n\n f1 = 2 * (precision * recall) / (precision + recall)\n\n return precision, recall, f1, accuracy", "title": "" }, { "docid": "53cbc730dc8f4910c1e8f20a83bb2cce", "score": "0.6597444", "text": "def getaccuracy(ytest, predictions):\n correct = 0\n for i in range(len(ytest)):\n if ytest[i] == predictions[i]:\n #print \"Predicted:\", predictions[i], \"Actual:\", ytest[i]\n correct += 1\n \n return (correct/float(len(ytest))) * 100.0", "title": "" }, { "docid": "a68879a49e49a625b8744525c3e70f3d", "score": "0.65922123", "text": "def evaluate_model(self):\n print \"The accuracy score is {:.2%}\".format( self.classifier.score(self.test_arrays, self.test_labels))", "title": "" }, { "docid": "26b29d44ce6506ca57a16cfece68d763", "score": "0.6572676", "text": "def compute_test_metrics(predictions, ground_truth):\n\n tp = 0\n fp = 0\n fn = 0\n \n # iterate over all instances\n for i in range(len(predictions)):\n\n for sinlge_feature in predictions[i].features:\n if sinlge_feature in ground_truth[i].features:\n # predicted feature is correct\n tp += 1\n else:\n # predicted feature is incorrect\n fp += 1\n\n for single_feature in ground_truth[i].features:\n if single_feature not in predictions[i].features:\n # feature in ground truth is missing in prediction\n fn += 1\n\n # computing the test metrics\n precision = tp / (tp + fp)\n recall = tp / (tp + fn)\n f1 = (2 * precision * recall)/ (precision + recall)\n\n return precision, recall, f1", "title": "" }, { "docid": "5aed2b455f31db76365b5b7187571a40", "score": "0.65718806", "text": "def accuracy(predictions, targets):\n\n ########################\n # PUT YOUR CODE HERE #\n #######################\n raise NotImplementedError\n ########################\n # END OF YOUR CODE #\n #######################\n\n return accuracy", "title": "" }, { "docid": "28525b249810e3bff5d1c6bb5446b243", "score": "0.65472955", "text": "def find_accuracy(real_dataset, classified_dataset):\n return 1 - NNClassifier.find_error(real_dataset, classified_dataset)", "title": "" }, { "docid": "5f8e51a8638e3ab1ad8f324aafa83e5d", "score": "0.6541366", "text": "def return_accuracy(self, test_X, test_y):\r\n\r\n correct = 0; wrong = 0\r\n for i, x in enumerate(test_X):\r\n leafnode = self.search_leaf_by_oper(x, self.root_node, [])\r\n prediction = leafnode.predict_label\r\n if prediction == test_y[i]:\r\n correct += 1\r\n else:\r\n wrong +=1\r\n return correct/(correct + wrong)", "title": "" }, { "docid": "49ec8e4b2792a1ae77094b6ee4351dad", "score": "0.65333134", "text": "def multiclass_accuracy(prediction, ground_truth):\n\n # TODO: Implement computing accuracy\n right = (prediction == ground_truth)\n accuracy = right[right == True].shape[0] / prediction.shape[0]\n\n return accuracy", "title": "" }, { "docid": "1b9ddcd70144cb48e43fedf094785da3", "score": "0.6529971", "text": "def calculate_accuracy(self, pred_values):\n num_correct = 0\n total_samples = len(self.true_values)\n for pred, gold in zip(pred_values, self.true_values):\n if int(pred) == int(gold):\n num_correct += 1\n acc = (num_correct / total_samples) * 100\n return acc, num_correct, total_samples", "title": "" }, { "docid": "bf25336eb089d674e4053111e666c91a", "score": "0.65267634", "text": "def batch_accuracy(self, predictions, labels):\n \n max_predictions = predictions.argmax(dim=1, keepdim=True)\n correct = max_predictions.squeeze(1).eq(labels)\n \n return correct.sum() / torch.FloatTensor([labels.shape[0]])", "title": "" }, { "docid": "42d908213c2f8fb04447e6326e412912", "score": "0.6521664", "text": "def accuracy_score(y_true, y_pred):\n\n\treturn round(float(sum(y_pred == y_true))/float(len(y_true)) * 100 ,2)", "title": "" }, { "docid": "58cf06b4cf882a82dddf154654ce9bfa", "score": "0.6516501", "text": "def getAccuracy(labels, pred_labels, method = \"class\"):\n\n n = len(labels)\n #-------- check point\n if n != len(pred_labels):\n print(\"Two vectors must have equal size!\")\n \n \n correct = 0.0\n if method == \"class\": #just count correct cases \n for i in range(n):\n if labels[i] == pred_labels[i]:\n correct += 1\n correct = correct/float(n) \n \n if method == \"reg\": #return root mean squared error\n ave = np.mean(labels)\n norm = 0.0\n correct = 0.0\n for i in range(n):\n correct = correct + (labels[i] - pred_labels[i])*(labels[i] - pred_labels[i])\n norm = norm + (labels[i] - ave)*(labels[i] - ave)\n \n acc = 1.0 - np.sqrt(correct / norm) \n correct = acc\n \n return correct", "title": "" }, { "docid": "938d2cf7927db82eba427d4016323c69", "score": "0.65147024", "text": "def accuracy(y_true: ndarray, y_pred: ndarray) -> float:\n acc = (y_true == y_pred).astype(int).mean()\n return acc", "title": "" }, { "docid": "387e4550b2a7d4e5b6f997060a572cff", "score": "0.6508172", "text": "def evaluate_accuracy(self, test_data):\n total_correct = 0\n for x, y in test_data:\n # the highest output value is the networks guess\n output = mmath.argmax(self.feed_forward_evaluation(x))\n if output == y: total_correct += 1\n\n return total_correct", "title": "" }, { "docid": "69d471ea3c4d7621ded18dff1716d02a", "score": "0.65063024", "text": "def accuracy(self, data, output_fn=np.argmax):\n results = [(output_fn(self.feed_forward(x)), output_fn(y))\n for (x, y) in data]\n return sum(int(x == y) for (x, y) in results)", "title": "" }, { "docid": "6d33ac74d54c24c3a2fa1246463e3f1c", "score": "0.64979", "text": "def summarizePrediction(prediction):\n print \"Actual types: {}\".format(fuzzyIrisesDataSet.getIrisesTypes())\n print \"Predicted types: {}\".format(list(prediction))\n\n firsTypeCounter = Counter(prediction[0:50])\n bestPredictionForFirstType = getMaxFromCounter(firsTypeCounter)\n print \"first type: {}\".format(firsTypeCounter)\n secondTypeCounter = Counter(prediction[50:100])\n bestPredictionForSecondType = getMaxFromCounter(secondTypeCounter)\n print \"second type: {}\".format(secondTypeCounter)\n thirdTypeCounter = Counter(prediction[100:150])\n bestPredictionForThirdType = getMaxFromCounter(thirdTypeCounter)\n print \"third type: {}\".format(thirdTypeCounter)\n predictions = [bestPredictionForFirstType, bestPredictionForSecondType, bestPredictionForThirdType]\n groups = set(map(lambda pair: pair[0], predictions))\n hits = sum(map(lambda pair: pair[1], predictions))\n if len(groups) == 3: # All types was counted.\n print \"Positive hits: {}, hit rate: {}\".format(hits, hits / float(150))\n else:\n print \"FAILED TO CALCULATE THE AVERAGE NUMBER OF HITS\"\n\n precisionCalculator = PredictionPrecisionCalculator(fuzzyIrisesDataSet.getIrisesTypes(), list(prediction))\n maxAccuracy, bestPrediction = precisionCalculator.getAccuracyOfPrediction()\n print \"Accuracy rate: {}\".format(maxAccuracy)\n print \"Actual types: {}\".format(fuzzyIrisesDataSet.getIrisesTypes())\n print \"max accuracy types: {}\".format(bestPrediction)\n return maxAccuracy, bestPrediction", "title": "" }, { "docid": "11e8885ee09947cc77f606c60d1d2568", "score": "0.6497807", "text": "def calculateTestSetAccuracy(self, testset):\n correct = 0\n total = 0\n for inst in testset:\n probs = self.predict(inst)\n if probs['rock'] > probs['paper'] and probs['rock'] > probs['scissors']:\n if inst['output'] == 'rock':\n correct += 1\n elif probs['paper'] > probs['scissors'] and probs['paper'] > probs['rock']:\n if inst['output'] == 'paper':\n correct += 1\n else:\n if inst['output'] == 'scissors':\n correct += 1\n total += 1\n return float(correct) / float(total)", "title": "" }, { "docid": "fd9ccf09b638e20aea3b592914dcba67", "score": "0.6494728", "text": "def batch_accuracy(preds, labels):\n\n pred_flat = np.argmax(preds, axis=1).flatten()\n labels_flat = labels.flatten()\n \n return np.sum(pred_flat == labels_flat) / len(labels_flat)", "title": "" }, { "docid": "dfda075c7c028cd9188cac573cb781fe", "score": "0.6486892", "text": "def evaluate(classifier_factory, k):\r\n accuracy = 0\r\n error = 0\r\n i = 1\r\n for kfold in range(k):\r\n train_features, train_labels, test_features, test_labels = k_fold_train_and_test(kfold)\r\n\r\n classifier = classifier_factory.train(train_features, train_labels)\r\n true_positive_count = 0\r\n true_negative_count = 0\r\n false_positive_count = 0\r\n false_negative_count = 0\r\n\r\n for test_feature, test_label in zip(test_features, test_labels):\r\n prediction = classifier.classify(test_feature)\r\n if prediction == 1 and test_label == 1: # True Positive\r\n true_positive_count += 1\r\n if prediction == 1 and test_label == 0: # False Positive\r\n false_positive_count += 1\r\n if prediction == 0 and test_label == 1: # False Negative\r\n false_negative_count += 1\r\n if prediction == 0 and test_label == 0: # True Negative\r\n true_negative_count += 1\r\n\r\n current_accuracy = (true_positive_count + true_negative_count) / len(test_labels)\r\n current_error = (false_positive_count + false_negative_count) / len(test_labels)\r\n\r\n # Nice trick to calculate average incrementally:\r\n # Mn = Mn-1 + (An -Mn-1)/n\r\n accuracy = accuracy + (current_accuracy - accuracy) / i\r\n error = error + (current_error-error) / i\r\n i += 1\r\n\r\n #accuracy += current_accuracy\r\n #error += current_error\r\n\r\n #accuracy = accuracy/k\r\n #error = error/k\r\n\r\n\r\n\r\n return accuracy, error", "title": "" }, { "docid": "6439b8222435635874327dca4a874a75", "score": "0.6484574", "text": "def get_accuracy(data_loader, classifier_fn, batch_size):\n predictions, actuals = [], []\n\n # use the appropriate data loader\n for (xs,lengths, ys) in data_loader:\n # use classification function to compute all predictions for each batch\n predictions.append(classifier_fn(xs,lengths))\n actuals.append(ys)\n\n # compute the number of accurate predictions\n accurate_preds = 0\n for pred, act in zip(predictions, actuals):\n for i in range(pred.size(0)):\n v = torch.sum(pred[i] == act[i])\n accurate_preds += (v.item() == 2)\n\n # calculate the accuracy between 0 and 1\n accuracy = (accurate_preds * 1.0) / (len(predictions) * batch_size)\n return accuracy", "title": "" }, { "docid": "55ed1711365bb81cf78000cdbe38abb4", "score": "0.6467802", "text": "def compute_metrics(eval_pred):\n labels = eval_pred.label_ids\n preds = eval_pred.predictions.argmax(-1)\n\n ## TODO: Return a dictionary containing the accuracy, f1, precision, and recall scores.\n ## You may use sklearn's precision_recall_fscore_support and accuracy_score methods.\n pass", "title": "" }, { "docid": "ce4e16a8b1ad67e1a13863210bd32785", "score": "0.64584976", "text": "def accuracy(y_true, y_pred, binaryClassMatrix=False, sample_mask=None):\n if binaryClassMatrix:\n if sample_mask is not None:\n y_true = np.argmax(y_true, axis=-1)\n y_pred = np.argmax(y_pred, axis=-1)\n y_true[sample_mask == 0] = -1\n else:\n raise ValueError('Sample masks must be provided for each target if binaryClassMatrix is True.')\n return float((y_true == y_pred).sum())/float((y_true>=0).sum())", "title": "" }, { "docid": "f6507efc88b409b1252baaba5cafc1f2", "score": "0.6458294", "text": "def accuracy(predictions, targets):\n\n ########################\n # PUT YOUR CODE HERE #\n #######################\n predictions = predictions.argmax(dim=-1)\n targets = targets.argmax(dim=-1)\n matches = torch.eq(predictions, targets)\n accuracy = matches.sum().item() / predictions.shape[0]\n # raise NotImplementedError\n ########################\n # END OF YOUR CODE #\n #######################\n\n return accuracy", "title": "" }, { "docid": "e5d7497a0ce6a9b71f0c1e84260d322c", "score": "0.6443633", "text": "def accuracy(actual, predictions):\n if predictions.shape[0] != actual.shape[0]:\n raise ValueError(\"predictions and actual must be the same length!\")\n\n matrix = confusion_matrix(actual, predictions)\n matrix_sum = matrix.sum()\n accurate_cells = matrix[0, 0] + matrix[1, 1]\n accuracy = accurate_cells/matrix_sum\n return accuracy", "title": "" }, { "docid": "92859ee841e39ae577b6375a4573ff32", "score": "0.64426965", "text": "def accuracy(predictions, targets):\n assert len(predictions) == len(targets)\n n_correct = 0\n for predicted_tag, (word, gold_tag) in zip(predictions, targets):\n if predicted_tag == gold_tag:\n n_correct += 1\n\n return n_correct / len(targets) * 100.0", "title": "" }, { "docid": "2797c38c6597058691c679435b4ff87c", "score": "0.6433127", "text": "def check_accuracy(self, X, y):\n models = self.models\n param_files = self.param_files\n num_classes = self.num_classes\n num_models = len(models)\n N = len(y)\n scores_sum = np.zeros((N,num_classes))\n print \"There are %d models in the Ensemble.\" % len(models)\n \n for n in range(num_models):\n # Load trained parameters into model\n outfile = param_files[n]\n npzfile = np.load(outfile)\n models[n].params = npzfile['params'].item()\n models[n].bn_params = npzfile['bn_params'].item()\n npzfile.close()\n \n scores = models[n].loss(X)\n print 'Model %d Test set accuracy: %f' % (n+1,(np.argmax(scores, axis=1) == y).mean())\n \n scores_sum += scores\n \n y_test_pred = np.argmax(scores_sum/n, axis=1)\n acc = (y_test_pred == y).mean()\n return acc", "title": "" }, { "docid": "613851f0994a270bfcbe2a2e2bf938de", "score": "0.64152193", "text": "def calculateAccuracyPairs(self, results, dataSet):\n\t\tmatch = 0\n\t\tfor key in dataSet:\n\t\t\tpredictedSenseGroups = results[key]\n\t\t\torderedExamples = dataSet[key]\n\t\t\t# Examples are in order so split into group size to find groups \n\t\t\tsenseNum = len(predictedSenseGroups)\n\t\t\tgroupSize = len(predictedSenseGroups[0])\n\t\t\tactualSenseGroups = []\n\t\t\tfor i in range(senseNum):\n\t\t\t\tactualSenseGroups.append(orderedExamples[i*groupSize:i*groupSize+groupSize])\n\t\t\t\n\t\t\t# Find all pairs from the predicted and actual groups\n\t\t\tpredictedPairs = []\n\t\t\tactualPairs = []\n\t\t\tfor predGroup, actualGroup in zip(predictedSenseGroups, actualSenseGroups):\n\t\t\t\tactualGroup = [example['sent'] for example in actualGroup]\n\t\t\t\tpredictedPairs += combinations(predGroup, 2)\n\t\t\t\tactualPairs += combinations(actualGroup, 2)\n\n\t\t\t# Convert pairs to sets so the order does not matter i.e ab == ba\n\t\t\tpredictedPairs = [set(p) for p in predictedPairs]\n\t\t\tactualPairs = [set(a) for a in actualPairs]\n\n\t\t\t# Find number of pairs correctly predicted\n\t\t\tpairMatchCount = 0\n\t\t\tfor predPair in predictedPairs:\n\t\t\t\tif predPair in actualPairs:\n\t\t\t\t\tpairMatchCount += 1\t\t\n\t\t\tmatch += pairMatchCount / float(len(actualPairs))\n\n\t\treturn match / float(len(dataSet))", "title": "" }, { "docid": "a552fdd4a3d5123a1827cc816f99fd55", "score": "0.64145887", "text": "def accuracy(y, x, w, lower_bound, upper_bound):\n return np.mean(y == predict_labels(w, x, lower_bound, upper_bound))", "title": "" }, { "docid": "8c5c8a85a7c98bb6325ef3e04fc16fbe", "score": "0.6410469", "text": "def evaluate_prediction(predictions, answers):\n correct = sum(np.asarray(predictions) == np.asarray(answers))\n total = float(np.prod(answers.shape))\n return correct / total", "title": "" }, { "docid": "5a21b300f6a481dac143a6405c4caf47", "score": "0.64002895", "text": "def acc_score(self):\n if 0 == self.total_labels:\n return 0.0\n accuracy = float(self.correct_labels) / self.total_labels\n return accuracy", "title": "" }, { "docid": "969d935617b6d3675c2bbe6dcc2b1a7c", "score": "0.63977563", "text": "def _calculate_batch_accuracy(self, ground_truth, predicted, batch_size):\n accuracy = np.array([0., 0., 0.])\n for idx, item in enumerate(ground_truth):\n accuracy += calculate_accuracy(item.detach().cpu(),\n predicted[idx].detach().cpu())\n\n return accuracy / batch_size", "title": "" }, { "docid": "ab7b39f97bb1ecf78966d0de476a343f", "score": "0.6393955", "text": "def accuracy_for_each_class(output, target, total_vector, correct_vector):\n batch_size = target.size(0)\n _, pred = output.topk(1, 1, True, True)\n pred = pred.t()\n correct = pred.eq(target.view(1, -1)).float().cpu().squeeze()\n for i in range(batch_size):\n total_vector[target[i]] += 1\n correct_vector[torch.LongTensor([target[i]])] += correct[i]\n \n return total_vector, correct_vector", "title": "" }, { "docid": "571389521a62e2bbc4a289656f3ada75", "score": "0.6386048", "text": "def n_class_accuracy(y_pred, y_true):\n n = y_true.shape[0]\n y_pred = y_pred.argmax(dim=-1).view(n, -1)\n y_true = y_true.view(n, -1)\n return (y_pred == y_true).float().mean()", "title": "" }, { "docid": "92a4206e1d53b8198ebc82e9559e4fa3", "score": "0.6371015", "text": "def test(self):\n data, labels = self.__load_data()\n norm_data, ranges, min_vals = self.__auto_norm(data)\n m = norm_data.shape[0]\n test_num = int(m * self.__test_ratio)\n error_count = 0.0\n for i in range(test_num):\n result = classify(norm_data[i, :], norm_data[test_num:m, :], labels[test_num:m], 3)\n print(\"(classifier result, real answer) = ({0}, {1})\".format(result, labels[i]), end='')\n if result != labels[i]:\n print(\"\\tERROR\")\n error_count += 1\n else:\n print(\"\")\n\n print(\"the total error rate is: {0}\".format(error_count / test_num))\n print(\"error count = \", error_count)", "title": "" }, { "docid": "d539c32021131169a6e39596a478fb29", "score": "0.6369128", "text": "def measure_accuracy(predictions, labels):\n correct_predictions = np.equal(np.argmax(predictions,axis=1), np.argmax(labels,axis=1))\n accuracy = np.mean(correct_predictions*1.0)\n\n return accuracy", "title": "" }, { "docid": "34909f2c12422fb263e708e7841748a2", "score": "0.6368268", "text": "def accuracy(predictions, targets):\n accuracy = (predictions.argmax(axis=1) == targets.argmax(axis=1)).mean()\n\n return accuracy", "title": "" }, { "docid": "2231aafb5b99709cdc26f789e22e8aef", "score": "0.63667494", "text": "def accuracy(nn, pairs):\n\n true_positives = 0\n total = len(pairs)\n\n for (x, y) in pairs:\n nn.forward_propagate(x)\n class_prediction = nn.predict_class()\n if class_prediction != y[0]:\n true_positives += 1\n\n # outputs = nn.get_outputs()\n # print(\"y =\", y, \",class_pred =\", class_prediction, \", outputs =\", outputs)\n\n return 1 - (true_positives / total)", "title": "" }, { "docid": "d463797ce01f8512f1437c39a5eab17f", "score": "0.6365521", "text": "def test(clf, df, results=None):\n if results is None:\n results = pd.DataFrame({\n \"prediction\": clf.predict(df.drop(\"class\", axis=1)),\n \"ground\": df[\"class\"]\n }, index=df.index)\n\n summary = results.apply(pd.Series.value_counts).fillna(0)\n\n # Measure recall and precision for each class. These are defined similarly:\n # - precision: what % of objects classified as `cls` actually were\n # - recall: what % of `cls` objects were classified thus\n percent_correct = lambda s: s.fillna(0).to_dict().get(s.name, 0) / float(s.sum()) * 100\n measure = lambda s: s.value_counts().unstack().apply(percent_correct)\n summary[\"precision\"] = measure(results.groupby(\"ground\").prediction)\n summary[\"recall\"] = measure(results.groupby(\"prediction\").ground)\n\n # If nothing is classified as `cls`, the above makes it NaN instead of 0.\n summary = summary.fillna(0).sort(\"ground\", ascending=False)\n \n print summary\n return summary", "title": "" }, { "docid": "a269064d98ba4511ddc16fad52569f38", "score": "0.6356636", "text": "def get_accuracy(self, X, y):\n\n # test model\n correct = 0\n total = 0\n with torch.no_grad():\n for i, X_i in enumerate(X):\n outputs = self(X_i) # output contains labels for the whole sequence\n predictions = torch.round(outputs[-1]).item() # we only care about the last one\n total += 1\n correct += 1 if predictions == y[i].item() else 0\n return correct / total", "title": "" }, { "docid": "c68d82f508ef8f08665cec2a8f5f2c9c", "score": "0.63562196", "text": "def validation_metrics(ground_truth, bowtie2_prediction):\n metrics = {}\n for fh, predicted_IDs in bowtie2_prediction.items():\n true_IDs = ground_truth[fh]\n set_predicted_IDs = set(predicted_IDs)\n set_true_IDs = set(true_IDs)\n intersection = set_predicted_IDs.intersection(set_true_IDs)\n true_count = len(set_true_IDs)\n predicted_count = len(set_predicted_IDs)\n intersection_count = len(intersection)\n # if ground_truth, prediction and intersection is 0, recall and precision are 1\n if true_count == 0 and predicted_count == 0: # Implies intersection is also 0\n true_count = 1e-99\n predicted_count = 1e-99\n intersection = 1e-99\n try: \n specificity = intersection_count/predicted_count\n except(ZeroDivisionError):\n specificity = \"0 (ZeroDivision)\"\n try:\n recall = intersection_count/true_count\n except(ZeroDivisionError):\n recall = \"0 (ZeroDivision)\"\n #if not recall == \"0 (ZeroDivision)\":\n metrics[fh] = [true_count, predicted_count, intersection_count, recall, specificity]\n return(metrics)", "title": "" }, { "docid": "62e0f5dd44c2b598f12be301ccb817c8", "score": "0.63552445", "text": "def accuracy_fn(predictions, targets):\n\n ########################\n # PUT YOUR CODE HERE #\n #######################\n predictions = predictions.argmax(dim=-1)\n matches = torch.eq(predictions, targets)\n accuracy = matches.sum().item() / predictions.shape[0]\n # raise NotImplementedError\n ########################\n # END OF YOUR CODE #\n #######################\n\n return accuracy", "title": "" }, { "docid": "93fc0b7ec00ffc8064fd0ef880ebafce", "score": "0.63534516", "text": "def accuracy(self):\n Y=self.forward_pass()[0]\n y=np.argmax(Y,axis=1)\n return sum(y==self.y)/self.samples", "title": "" }, { "docid": "c970990905425ce6e83cb4f95c762f66", "score": "0.63459396", "text": "def get_accuracy(predictions, labels):\n\n return float(sum(predictions == labels).data[0]) / labels.size()[0]", "title": "" }, { "docid": "93976f1e92bf18e1b90248675a020e8f", "score": "0.63457423", "text": "def compute_accuracy(predictions, labels):\n\treturn labels[predictions.ravel() < 0.5].mean()", "title": "" }, { "docid": "9071c0f2ba2297ed989fba6126c408e7", "score": "0.6343984", "text": "def evaluate_accuracy(net, data_iter): #@save\n metric = Accumulator(2) # No. of correct predictions, no. of predictions\n for X, y in data_iter:\n metric.add(accuracy(net(X), y), y.size)\n return metric[0] / metric[1]", "title": "" }, { "docid": "19d089351c9b7146d4a5c5644d3d6fe8", "score": "0.6337823", "text": "def compute_mean_accuracy(self):\n self.mean_accuracy = {}\n for algorithm in self.algorithms:\n accuracy = algorithm.y_pred == self.experiment.y_test\n self.mean_accuracy[algorithm.name] = np.mean(accuracy, axis=0)", "title": "" } ]
1a7d7436bc50e8f0af440046d0fba746
Returns the monospaced terminal display width of char.
[ { "docid": "380e4c610a6f1598c0941077b73c4267", "score": "0.7959377", "text": "def GetCharacterDisplayWidth(char):\n if not isinstance(char, unicode):\n # Non-unicode chars have width 1. Don't use this function on control chars.\n return 1\n\n # Normalize to avoid special cases.\n char = unicodedata.normalize('NFC', char)\n\n if unicodedata.combining(char) != 0:\n # Modifies the previous character and does not move the cursor.\n return 0\n elif unicodedata.category(char) == 'Cf':\n # Unprintable formatting char.\n return 0\n elif unicodedata.east_asian_width(char) in 'FW':\n # Fullwidth or Wide chars take 2 character positions.\n return 2\n else:\n # Don't use this function on control chars.\n return 1", "title": "" } ]
[ { "docid": "da032b13152348ef195466b6f5f197a3", "score": "0.7047037", "text": "def DisplayWidth(self, buf):\n if not isinstance(buf, basestring):\n # Handle non-string objects like Colorizer().\n return len(buf)\n width = 0\n i = 0\n while i < len(buf):\n if self._csi and buf[i:].startswith(self._csi):\n i += self.GetControlSequenceLen(buf[i:])\n else:\n width += GetCharacterDisplayWidth(buf[i])\n i += 1\n return width", "title": "" }, { "docid": "b929e66c76c9bfa63157a0e347bc9dfa", "score": "0.6874437", "text": "def get_width(s):\n global widths\n #print(s)\n def char_width(o):\n if o == 0xe or o == 0xf:\n return 0\n for num, wid in widths:\n #print(o, num)\n if o <= num:\n return wid\n return 1\n return sum(char_width(ord(c)) for c in s)", "title": "" }, { "docid": "737282d923225215e4a599b90d366834", "score": "0.67928", "text": "def terminal_width(self):\n if self._terminal_width is None:\n try:\n self._terminal_width = get_terminal_size().columns\n except ValueError:\n # sometimes seen in unit tests:\n # ValueError: underlying buffer has been detached\n # Easy enough to work around...\n self._terminal_width = 80\n if self._terminal_width <= 0:\n self._terminal_width = 80\n return self._terminal_width", "title": "" }, { "docid": "e061e39c5abd04962bbf82243d65343e", "score": "0.6713232", "text": "def get_terminal_width(self):\n import os\n rows, columns = os.popen('stty size', 'r').read().split()\n return int(columns)", "title": "" }, { "docid": "fcd280a1d358900279a722c7723b22ec", "score": "0.6675774", "text": "def get_width(self):\n _lib.caca_get_font_width.argtypes = [_Font]\n _lib.caca_get_font_width.restype = ctypes.c_int\n\n return _lib.caca_get_font_width(self)", "title": "" }, { "docid": "d82bdc6c420e1efbb718b4d674e3092f", "score": "0.6556561", "text": "def get_width(self):\n return self._tk_font.measure(self._text)", "title": "" }, { "docid": "4d4bf3b9e586b3740acd5ef96ef9824b", "score": "0.65165704", "text": "def get_console_width() -> int:\n # Assigning the value once, as frequent call to this function\n # causes a major slow down(ImportErrors + isinstance).\n global _IN_QT\n if _IN_QT is None:\n _IN_QT = _in_qtconsole()\n\n try:\n if _IN_QT:\n # QTConsole determines and handles the max line length by itself.\n width = sys.maxsize\n else:\n width = _get_windows_console_width() if os.name == 'nt' else _get_linux_console_width()\n if width <= 0:\n return 80\n return width\n except Exception:\n # Default value.\n return 80", "title": "" }, { "docid": "2e7deefe3ff7c1cd2aa3d504541f6416", "score": "0.6468597", "text": "def width(cls, value: str):\n return sum(map(cls._get_width, map(ord, value)))", "title": "" }, { "docid": "2e7deefe3ff7c1cd2aa3d504541f6416", "score": "0.6468597", "text": "def width(cls, value: str):\n return sum(map(cls._get_width, map(ord, value)))", "title": "" }, { "docid": "7efa677eaa8b02814c8b58568fbad237", "score": "0.64613163", "text": "def get_width(self):\n return len(self.__ch_range)", "title": "" }, { "docid": "790b68cd2eabbffbbaa584356a7ed2d7", "score": "0.6399068", "text": "def _width(s):\n text.set_text(s)\n return text.get_window_extent().width", "title": "" }, { "docid": "fa7b3035d5c8a15be5947bfa128fc53f", "score": "0.63633025", "text": "def compute_width(self):\n if not self._enabled:\n return 0\n number_digits = self.compute_width_digits()\n if (self._width_cache is not None and\n self._width_cache[0] == number_digits):\n return self._width_cache[1]\n\n if self._margin:\n margin = 3 + self.editor.fontMetrics().width('9' * number_digits)\n else:\n margin = 0\n width = margin + self.get_markers_margin()\n self._width_cache = (number_digits, width)\n return width", "title": "" }, { "docid": "42a462f4c972aa31e122e4cbb1697d03", "score": "0.63364273", "text": "def width(self):\n return int()", "title": "" }, { "docid": "42a462f4c972aa31e122e4cbb1697d03", "score": "0.63364273", "text": "def width(self):\n return int()", "title": "" }, { "docid": "2a9e4690715aed5942f628bdc18f8722", "score": "0.62609124", "text": "def width(self) -> int:", "title": "" }, { "docid": "2a9e4690715aed5942f628bdc18f8722", "score": "0.62609124", "text": "def width(self) -> int:", "title": "" }, { "docid": "2a9e4690715aed5942f628bdc18f8722", "score": "0.62609124", "text": "def width(self) -> int:", "title": "" }, { "docid": "2a9e4690715aed5942f628bdc18f8722", "score": "0.62609124", "text": "def width(self) -> int:", "title": "" }, { "docid": "2a9e4690715aed5942f628bdc18f8722", "score": "0.62609124", "text": "def width(self) -> int:", "title": "" }, { "docid": "2a9e4690715aed5942f628bdc18f8722", "score": "0.62609124", "text": "def width(self) -> int:", "title": "" }, { "docid": "2a9e4690715aed5942f628bdc18f8722", "score": "0.62609124", "text": "def width(self) -> int:", "title": "" }, { "docid": "2a9e4690715aed5942f628bdc18f8722", "score": "0.62609124", "text": "def width(self) -> int:", "title": "" }, { "docid": "2a9e4690715aed5942f628bdc18f8722", "score": "0.62609124", "text": "def width(self) -> int:", "title": "" }, { "docid": "2a9e4690715aed5942f628bdc18f8722", "score": "0.62609124", "text": "def width(self) -> int:", "title": "" }, { "docid": "2a9e4690715aed5942f628bdc18f8722", "score": "0.62609124", "text": "def width(self) -> int:", "title": "" }, { "docid": "2a9e4690715aed5942f628bdc18f8722", "score": "0.62609124", "text": "def width(self) -> int:", "title": "" }, { "docid": "2a9e4690715aed5942f628bdc18f8722", "score": "0.62609124", "text": "def width(self) -> int:", "title": "" }, { "docid": "2a9e4690715aed5942f628bdc18f8722", "score": "0.62609124", "text": "def width(self) -> int:", "title": "" }, { "docid": "2a9e4690715aed5942f628bdc18f8722", "score": "0.62609124", "text": "def width(self) -> int:", "title": "" }, { "docid": "2a9e4690715aed5942f628bdc18f8722", "score": "0.62609124", "text": "def width(self) -> int:", "title": "" }, { "docid": "2a9e4690715aed5942f628bdc18f8722", "score": "0.62609124", "text": "def width(self) -> int:", "title": "" }, { "docid": "2a9e4690715aed5942f628bdc18f8722", "score": "0.62609124", "text": "def width(self) -> int:", "title": "" }, { "docid": "2a9e4690715aed5942f628bdc18f8722", "score": "0.62609124", "text": "def width(self) -> int:", "title": "" }, { "docid": "2a9e4690715aed5942f628bdc18f8722", "score": "0.62609124", "text": "def width(self) -> int:", "title": "" }, { "docid": "2a9e4690715aed5942f628bdc18f8722", "score": "0.62609124", "text": "def width(self) -> int:", "title": "" }, { "docid": "2a9e4690715aed5942f628bdc18f8722", "score": "0.62609124", "text": "def width(self) -> int:", "title": "" }, { "docid": "2a9e4690715aed5942f628bdc18f8722", "score": "0.62609124", "text": "def width(self) -> int:", "title": "" }, { "docid": "2a9e4690715aed5942f628bdc18f8722", "score": "0.62609124", "text": "def width(self) -> int:", "title": "" }, { "docid": "2a9e4690715aed5942f628bdc18f8722", "score": "0.62609124", "text": "def width(self) -> int:", "title": "" }, { "docid": "2a9e4690715aed5942f628bdc18f8722", "score": "0.62609124", "text": "def width(self) -> int:", "title": "" }, { "docid": "2a9e4690715aed5942f628bdc18f8722", "score": "0.62609124", "text": "def width(self) -> int:", "title": "" }, { "docid": "2a9e4690715aed5942f628bdc18f8722", "score": "0.62609124", "text": "def width(self) -> int:", "title": "" }, { "docid": "2a9e4690715aed5942f628bdc18f8722", "score": "0.62609124", "text": "def width(self) -> int:", "title": "" }, { "docid": "2a9e4690715aed5942f628bdc18f8722", "score": "0.62609124", "text": "def width(self) -> int:", "title": "" }, { "docid": "2a9e4690715aed5942f628bdc18f8722", "score": "0.62609124", "text": "def width(self) -> int:", "title": "" }, { "docid": "2a9e4690715aed5942f628bdc18f8722", "score": "0.62609124", "text": "def width(self) -> int:", "title": "" }, { "docid": "2a9e4690715aed5942f628bdc18f8722", "score": "0.62609124", "text": "def width(self) -> int:", "title": "" }, { "docid": "2a9e4690715aed5942f628bdc18f8722", "score": "0.62609124", "text": "def width(self) -> int:", "title": "" }, { "docid": "2a9e4690715aed5942f628bdc18f8722", "score": "0.62609124", "text": "def width(self) -> int:", "title": "" }, { "docid": "c91560845e80db60a5102d462d69ed7c", "score": "0.6217051", "text": "def _rendered_text_width(self, s):\r\n len_s = len(s)\r\n res = int(self._sprite_size[0] * len_s * self._font_scale) + self._text_spacing*len_s\r\n return res", "title": "" }, { "docid": "ba767b2ecbd9ac7df92a3e7330a99c92", "score": "0.62122214", "text": "def width(self):\n return self.xLen()", "title": "" }, { "docid": "c00e91264ceaf44861894edc1e3cdf6d", "score": "0.61802083", "text": "def string_width(string):\n # Colorclass instance.\n if hasattr(string, 'value_no_colors'):\n string = string.value_no_colors\n\n # Convert to unicode.\n try:\n decoded = string.decode('u8')\n except (AttributeError, UnicodeEncodeError):\n decoded = string\n\n width = 0\n for char in decoded:\n if unicodedata.east_asian_width(char) in ('F', 'W'):\n width += 2\n else:\n width += 1\n\n return width", "title": "" }, { "docid": "47ca20468613ab7ed1df966b79553638", "score": "0.61447465", "text": "def get_chw(self):\n \n print 'Using a channel width of %.0f' % self.chw\n return self.chw", "title": "" }, { "docid": "284c6dfc108bb3fed6931762c7e2eb72", "score": "0.6114536", "text": "def _draw_char(draw, char: str, xy: tuple, font) -> int:\n draw.text(xy, char, fill=_WHITE, font=font)\n return font.getsize(char)[0]", "title": "" }, { "docid": "ee798bc2d768c8661a320123d39a5b48", "score": "0.60607576", "text": "def get_term_width(tstream):\n\n import sys, os, termios\n from fcntl import ioctl\n from struct import unpack\n\n if hasattr(tstream, 'stream'):\n # Transparently handle a codec wrapper. Not sure of the best way to\n # generically check for wrapper streams, but this'll do for now.\n return get_term_width(tstream.stream)\n\n w = None\n\n if tstream.isatty():\n try:\n return unpack(b'hh', ioctl(tstream.fileno(), termios.TIOCGWINSZ, b'....'))[1]\n except:\n pass\n\n try:\n return int(os.environ['COLUMNS'])\n except:\n pass\n\n if tstream.isatty():\n return 80\n\n return -1", "title": "" }, { "docid": "13e2881dded18c1cfdc8c3e6b76aff01", "score": "0.59825003", "text": "def width(self):\n return self._effective_value('width')", "title": "" }, { "docid": "8e06a0216180039d9560469f31b11a6a", "score": "0.59482604", "text": "def width(self) -> float:\n return pulumi.get(self, \"width\")", "title": "" }, { "docid": "7b3bbc87128b7456212eeeb8c3b109d3", "score": "0.59459555", "text": "def stringWidth_kerning(font, text, size, encoding='utf-8'):\r\n if not isinstance(text, unicode_type):\r\n text = unicode_type(text, encoding or 'utf-8') # encoding defaults to utf-8\r\n face = font.face\r\n g = face.charWidths.get\r\n dw = face.defaultWidth\r\n kp = kerning_pairs(face, text)\r\n return 0.001*size*(sum([g(ord(u),dw) for u in text]) + sum(kp))", "title": "" }, { "docid": "64583f5fd29e862ef1dc6f6f2aeac8c1", "score": "0.59440553", "text": "def get_terminal_size(defaultw=80):\n if hasattr(shutil_get_terminal_size, \"__call__\"):\n return shutil_get_terminal_size()\n else:\n try:\n import fcntl, termios, struct\n\n fd = 0\n hw = struct.unpack(\"hh\", fcntl.ioctl(fd, termios.TIOCGWINSZ, \"1234\"))\n return (hw[1], hw[0])\n except:\n try:\n out = sp.check_output([\"tput\", \"cols\"])\n width = int(out.decode(\"utf-8\").strip())\n return (width, None)\n except:\n try:\n hw = (os.environ[\"LINES\"], os.environ[\"COLUMNS\"])\n return (hw[1], hw[0])\n except:\n return (defaultw, None)", "title": "" }, { "docid": "746fb634b01fb621ee9b79d139bed49b", "score": "0.59425956", "text": "def get_console_width(\n default: int = CONSOLE_WIDTH,\n shift: int = CONSOLE_WIDTH_SHIFT,\n) -> int:\n try:\n return logger.logger.handlers[0].console.width - shift\n except (AttributeError, IndexError): # pragma: no cover\n return default - shift", "title": "" }, { "docid": "739af968c118077915018c804819a02d", "score": "0.59365004", "text": "def _num_chars(char, n):\n\treturn char * n", "title": "" }, { "docid": "9d89e4dd0c35c430db24a49b014bada3", "score": "0.5924788", "text": "def strokeWidth(self):\n return self._stroke.width", "title": "" }, { "docid": "54c019b87d13c301b7a1bca847a7a939", "score": "0.59097046", "text": "def getTerminalSize():\n import ctypes # getTerminalSize() will most likely rarely be used, so don't bother importing ctypes all the time. TODO - Is this line of thinking valid? Does it really make a difference?\n if sys.platform == 'win32':\n # From http://code.activestate.com/recipes/440694-determine-size-of-console-window-on-windows/\n h = ctypes.windll.kernel32.GetStdHandle(-12)\n csbi = ctypes.create_string_buffer(22)\n res = ctypes.windll.kernel32.GetConsoleScreenBufferInfo(h, csbi)\n\n if res:\n import struct\n (bufx, bufy, curx, cury, wattr,\n left, top, right, bottom, maxx, maxy) = struct.unpack(\"hhhhHhhhhhh\", csbi.raw)\n return right - left + 1, bottom - top + 1\n else:\n raise PyTextCanvasException('Unable to determine terminal size. This happens when in a non-terminal environment, such as IDLE.') #sizex, sizey = 80, 25 # can't determine actual size - return default values\n\n # TODO - finish for non windows platforms.\n # Linux:\n # sizex, sizey = os.popen('stty size', 'r').read().split()\n # return int(sizex), int(sizey)\n\n #else:\n # raise PyTextCanvasException('Cannot determine the platform')", "title": "" }, { "docid": "37efb13149465da1886e41beb2b12e05", "score": "0.59048104", "text": "def col_width(self):\n return self._col_width", "title": "" }, { "docid": "e2d8a4012b6f950b6eec12e7cb313fe8", "score": "0.5886875", "text": "def get_terminal_size():\r\n def ioctl_GWINSZ(fd):\r\n try:\r\n import fcntl\r\n import termios\r\n import struct\r\n cr = struct.unpack('hh', fcntl.ioctl(fd, termios.TIOCGWINSZ,\r\n '1234'))\r\n except:\r\n return None\r\n if cr == (0, 0):\r\n return None\r\n if cr == (0, 0):\r\n return None\r\n return cr\r\n cr = ioctl_GWINSZ(0) or ioctl_GWINSZ(1) or ioctl_GWINSZ(2)\r\n if not cr:\r\n try:\r\n fd = os.open(os.ctermid(), os.O_RDONLY)\r\n cr = ioctl_GWINSZ(fd)\r\n os.close(fd)\r\n except:\r\n pass\r\n if not cr:\r\n cr = (os.environ.get('LINES', 25), os.environ.get('COLUMNS', 80))\r\n return int(cr[1]), int(cr[0])", "title": "" }, { "docid": "a0806d15256cb30ef2a3a860e63eb225", "score": "0.5874192", "text": "def printable_char_freq(char_list):\n char_count = Counter({x:0 for x in range(32,127)})\n char_count[10]=0\n for char in tqdm(char_list):\n char_count[char] += 1\n return char_count", "title": "" }, { "docid": "7499954ba7daa86cadf3b54ffe7e0daa", "score": "0.5869852", "text": "def get_width(o):\n global widths\n if o == 0xe or o == 0xf:\n return 0\n for num, wid in widths:\n if o <= num:\n return (wid/2)*1.6\n return 1", "title": "" }, { "docid": "9dfb7c2258e50220365d4ff18c127272", "score": "0.5866512", "text": "def width(self):\n return float()", "title": "" }, { "docid": "49019dee4c986dbe9c8da3285cefc2be", "score": "0.5865865", "text": "def width(self) -> int:\n return self._width", "title": "" }, { "docid": "780d774e7b6c01c4583afbf3fd04faaa", "score": "0.5865501", "text": "def _get_windows_console_width() -> int:\n from ctypes import byref, windll\n import pyreadline\n\n out = windll.kernel32.GetStdHandle(-11)\n info = pyreadline.console.CONSOLE_SCREEN_BUFFER_INFO()\n windll.kernel32.GetConsoleScreenBufferInfo(out, byref(info))\n return info.dwSize.X", "title": "" }, { "docid": "d9cdefeee7001cc19a91e319899874ca", "score": "0.58359563", "text": "def _get_cell_width(self, cell):\n\n return cell.get_ncols() + \\\n (cell.content_pad[2] if cell.borders[2] == 0 else 0) + \\\n (cell.content_pad[3] if cell.borders[3] == 0 else 0)", "title": "" }, { "docid": "d1f46bca9f9ffa6181f02487a4d4ca02", "score": "0.5833633", "text": "def compute_width_digits(self):\n number_lines = self.editor.blockCount()\n return max(1, math.ceil(math.log10(\n number_lines + 1)))", "title": "" }, { "docid": "25169a6639fcf45efee56e23eab2b44f", "score": "0.58243555", "text": "def get_width(self):\n return", "title": "" }, { "docid": "d0e989d21f7907e547497d0494b1f5b0", "score": "0.58107597", "text": "def char_count(char, text):\n ratio = round(text.count(char)/len(text), 2) #ratio is being calculated here\n return f\"{char}-{ratio}\"", "title": "" }, { "docid": "b2c379c60afb9a2b334bf0343d23c231", "score": "0.5796841", "text": "def getWidth(self, *args):\n return _fife.GuiFont_getWidth(self, *args)", "title": "" }, { "docid": "84bc9a1c7c952cdfa3048abc25a67116", "score": "0.57902116", "text": "def space_width(self) -> float:\n return self._space_width", "title": "" }, { "docid": "84bc9a1c7c952cdfa3048abc25a67116", "score": "0.57902116", "text": "def space_width(self) -> float:\n return self._space_width", "title": "" }, { "docid": "1f3c626e013538bf3c54fe3eb3ca92d1", "score": "0.576465", "text": "def text_width(self, text: str) -> float:\n pass", "title": "" }, { "docid": "a4ed51932802543a9f228bffd0c33dc2", "score": "0.5751721", "text": "def width(self):\n return self._width", "title": "" }, { "docid": "a4ed51932802543a9f228bffd0c33dc2", "score": "0.5751721", "text": "def width(self):\n return self._width", "title": "" }, { "docid": "a4ed51932802543a9f228bffd0c33dc2", "score": "0.5751721", "text": "def width(self):\n return self._width", "title": "" }, { "docid": "a4ed51932802543a9f228bffd0c33dc2", "score": "0.5751721", "text": "def width(self):\n return self._width", "title": "" }, { "docid": "a4ed51932802543a9f228bffd0c33dc2", "score": "0.5751721", "text": "def width(self):\n return self._width", "title": "" }, { "docid": "a4ed51932802543a9f228bffd0c33dc2", "score": "0.5751721", "text": "def width(self):\n return self._width", "title": "" }, { "docid": "a4ed51932802543a9f228bffd0c33dc2", "score": "0.5751721", "text": "def width(self):\n return self._width", "title": "" }, { "docid": "a4ed51932802543a9f228bffd0c33dc2", "score": "0.5751721", "text": "def width(self):\n return self._width", "title": "" }, { "docid": "a4ed51932802543a9f228bffd0c33dc2", "score": "0.5751721", "text": "def width(self):\n return self._width", "title": "" }, { "docid": "a4ed51932802543a9f228bffd0c33dc2", "score": "0.5751721", "text": "def width(self):\n return self._width", "title": "" }, { "docid": "a4ed51932802543a9f228bffd0c33dc2", "score": "0.5751721", "text": "def width(self):\n return self._width", "title": "" }, { "docid": "8d15e58332ff55bd872b4891d23eb226", "score": "0.5745535", "text": "def width(self):\n return self.__width", "title": "" }, { "docid": "8d15e58332ff55bd872b4891d23eb226", "score": "0.5745535", "text": "def width(self):\n return self.__width", "title": "" }, { "docid": "8d15e58332ff55bd872b4891d23eb226", "score": "0.5745535", "text": "def width(self):\n return self.__width", "title": "" }, { "docid": "8d15e58332ff55bd872b4891d23eb226", "score": "0.5745535", "text": "def width(self):\n return self.__width", "title": "" }, { "docid": "8d15e58332ff55bd872b4891d23eb226", "score": "0.5745535", "text": "def width(self):\n return self.__width", "title": "" }, { "docid": "8d15e58332ff55bd872b4891d23eb226", "score": "0.5745535", "text": "def width(self):\n return self.__width", "title": "" }, { "docid": "0bd1aea9e8975e1f519b9215121ad712", "score": "0.57454294", "text": "def width(self):\n return self.size[0]", "title": "" }, { "docid": "8e3f5ebbdd544e4b830695bb1f31c384", "score": "0.5741446", "text": "def text_width(self, text: str) -> float:\n return self.text_width_ex(text, self.cap_height, self.width_factor)", "title": "" }, { "docid": "ae4421c616333d1ee62c8905ff762843", "score": "0.57350886", "text": "def width(self):\n return self.proto.size.x", "title": "" }, { "docid": "6efa5d03b489d2e7a1d186c3bc52d6cb", "score": "0.5733685", "text": "def getWidth(self):\n return _fife.RenderBackend_getWidth(self)", "title": "" }, { "docid": "5eec9de24b38bac7d564b65badabdd41", "score": "0.5728132", "text": "def get_data_width(self):\r\n return self._cfuncs['ka_get_data_width'](self._core._get_ka())", "title": "" } ]
14ae8c9a0de3273ad2cc6908a0638b97
full path of file in same dir as this module
[ { "docid": "7241ece943ecf2933e26465a63cbc2b5", "score": "0.0", "text": "def output_file(filename):\n return os.path.join(base_dir(), \"output\", filename)", "title": "" } ]
[ { "docid": "7e4bdee811f8cd82d52a6ce59bc3cac9", "score": "0.8570931", "text": "def getFilepath(self):\n return os.path.join(self.getModuleDir(), *(self.name().split('/')))", "title": "" }, { "docid": "db89b52e56bf59ce28b7546606fd8c1e", "score": "0.7867944", "text": "def module_path():\n\n return os.path.dirname(os.path.realpath(__file__))", "title": "" }, { "docid": "b2d9fa6e68fd5fad44eefb69174a89fe", "score": "0.7794581", "text": "def get_path():\n return os.path.dirname(os.path.abspath(__file__))", "title": "" }, { "docid": "5747ca536e9b4d97a644d4ea9f5e9bb9", "score": "0.7786061", "text": "def GET_PATH():\n return _os.path.dirname(_os.path.abspath(__file__))", "title": "" }, { "docid": "cc9d7ec03d1447e70caf34b307546cae", "score": "0.7766506", "text": "def path(self):\n return os.path.join(self._dir, self._filename)", "title": "" }, { "docid": "4408279d640c4f32748a137df3e019f9", "score": "0.7735667", "text": "def path(self):\n return os.path.join(self.config.project_directory, self.filename)", "title": "" }, { "docid": "4973bb99ecdc29d2e9c6f9b521d2b487", "score": "0.7723182", "text": "def path(self):\n\n return inspect.getfile(self.__class__)", "title": "" }, { "docid": "53b64d2d057680a1972995780826b62e", "score": "0.7722313", "text": "def getPath(self):\n\t\treturn self.path_of_module", "title": "" }, { "docid": "d7a78098742c7c7b16c3d852442476be", "score": "0.771752", "text": "def get_absolute_path_script(self):\n return str(os.path.realpath(__file__))", "title": "" }, { "docid": "f121a115ba7e9ac42737db25dac7e67e", "score": "0.770823", "text": "def __file__(self):\n return __file__", "title": "" }, { "docid": "a9058f0d80b20839d7da2d6db706ef2e", "score": "0.76578945", "text": "def get_path(fname):\n return os.path.join(os.path.dirname(__file__), fname)", "title": "" }, { "docid": "3269892a78baf4d13fc04eaacbb254b8", "score": "0.7645341", "text": "def get_file_dir():\n return os.path.dirname(os.path.abspath(__file__))", "title": "" }, { "docid": "e8230d06718917178b28b7f9bee06307", "score": "0.7625085", "text": "def getpath_this():\n path = os.path.realpath(__file__)\n return '{:s}/'.format('/'.join(path.split('/')[:-1]))", "title": "" }, { "docid": "e168c6c675637f77b4537b2c8a9c41bb", "score": "0.7597234", "text": "def _get_path_to_module():\n return Path(os.path.realpath(__file__)).parent", "title": "" }, { "docid": "8d712c01d909caa078f14fcf618d5e35", "score": "0.7591371", "text": "def __file__(self):\n\t\treturn __file__", "title": "" }, { "docid": "8d712c01d909caa078f14fcf618d5e35", "score": "0.7591371", "text": "def __file__(self):\n\t\treturn __file__", "title": "" }, { "docid": "8d712c01d909caa078f14fcf618d5e35", "score": "0.7591371", "text": "def __file__(self):\n\t\treturn __file__", "title": "" }, { "docid": "8d712c01d909caa078f14fcf618d5e35", "score": "0.7591371", "text": "def __file__(self):\n\t\treturn __file__", "title": "" }, { "docid": "8d712c01d909caa078f14fcf618d5e35", "score": "0.7591371", "text": "def __file__(self):\n\t\treturn __file__", "title": "" }, { "docid": "01f0fba40357454846213bf592186e27", "score": "0.7584166", "text": "def get_file_path(self, filename):\n return os.path.join(sys.path[0], filename)", "title": "" }, { "docid": "647a7e6d202a23daf5f8ad8bd188ee6a", "score": "0.7568828", "text": "def modulePath(self):\n base_path = os.path.dirname(self.filepath())\n module_path = self.importPath()\n\n module_path = os.path.expanduser(os.path.expandvars(module_path))\n if module_path.startswith('.'):\n module_path = os.path.abspath(os.path.join(base_path, module_path))\n\n return module_path", "title": "" }, { "docid": "a6638c951667e07e0f5a4f1eedfa04c2", "score": "0.7562989", "text": "def fullpath(fname):\n return Path(__file__).parent.parent / 'data' / fname", "title": "" }, { "docid": "1a255e218ac06ec87d5c0f34749775bd", "score": "0.75267124", "text": "def get_abs_path(self, filename):\n return os.path.join(os.path.dirname(os.path.abspath(__file__)), filename)", "title": "" }, { "docid": "fb3a0d19872dc820c30a0b940f52ba55", "score": "0.7496199", "text": "def rel_path(filename):\n return os.path.join(os.getcwd(), os.path.dirname(__file__), filename)", "title": "" }, { "docid": "de5bb50ef78e2fd0653e103c321c9894", "score": "0.7430502", "text": "def get_fullpath(self):\n return os.path.join(self.base_directory, self.filename)", "title": "" }, { "docid": "740ecdca8928843516fcf6fc73b49810", "score": "0.74288553", "text": "def __class_module_path(self):\n\t\tmodule_path = os.path.dirname(importlib.import_module(self.__module__).__file__)\n\t\tRegressionHelper.assert_file(module_path)\n\t\treturn module_path", "title": "" }, { "docid": "400bfd99b4a94018069c73792cd04014", "score": "0.74246526", "text": "def get_file_path(self) -> str:\n return self.__file_path", "title": "" }, { "docid": "35f0398ee55b1da92c315e5a4215ca2f", "score": "0.74010575", "text": "def get_src_dir(self):\n return os.path.realpath(os.path.dirname(__file__))", "title": "" }, { "docid": "4a111ae0088de2b2a4d3671bd4520b41", "score": "0.73824054", "text": "def _script_path():\n return os.path.dirname(os.path.realpath(__file__))", "title": "" }, { "docid": "41db2abef25089a7062acefc16604f5e", "score": "0.736783", "text": "def file_path(self):\n return os.path.join(self.project.folder, self.file.name)", "title": "" }, { "docid": "93eb83ef1fd8febb39f16f9b89b4c06f", "score": "0.735329", "text": "def get_module_directory():\n return os.path.dirname(os.path.abspath(__file__))", "title": "" }, { "docid": "342f87ee7bbb899e280d31a328978a96", "score": "0.7329924", "text": "def path(__file__):\n return lambda p: os.path.abspath(os.path.join(os.path.dirname(__file__), p))", "title": "" }, { "docid": "840eb5893cfc4cd379f7e701cd35f89a", "score": "0.72990245", "text": "def base_file_path():\n parts = os.path.realpath(__file__).split(os.sep)\n return os.sep.join(parts[0 : len(parts) - 2])", "title": "" }, { "docid": "d1c5ce6d2caf3d8209b25b9021a62ca7", "score": "0.7289116", "text": "def module_dir(self):\n mod = sys.modules[self.__class__.__module__]\n return os.path.dirname(mod.__file__)", "title": "" }, { "docid": "d932c839c62f2e5bfeab06ddd021017c", "score": "0.7287436", "text": "def data_path(fname):\n return join(dirname(realpath(__file__)), fname)", "title": "" }, { "docid": "0d911ecb8dedd90d79e95a867af6e0f8", "score": "0.72872365", "text": "def filepath(self):\n return os.path.abspath(os.path.join(self.dirname, self.filename))", "title": "" }, { "docid": "e76372f46be0e190d8682b8a7ac68234", "score": "0.72728664", "text": "def real_path(self):\n return os.path.join(self.real_folder, self.get_file_name())", "title": "" }, { "docid": "788ee12b2e1d4d16083eba946bf084fe", "score": "0.7264197", "text": "def scriptpath():\n return os.path.realpath(__file__)", "title": "" }, { "docid": "375394e2bb3e3167eefbecb5004b08f3", "score": "0.725387", "text": "def path(file):\n return Path(__file__).parent / 'assets' / file", "title": "" }, { "docid": "8dffd8186b157d1d16172631eecbb7b5", "score": "0.72529376", "text": "def _get_full_path(*rel_path):\n\tpath = os.path.abspath(__file__) # `.../ml_subtitle_align/preprocessing/talk.py`\n\tpath = os.path.dirname(path) # `.../ml_subtitle_align/preprocessing/`\n\tpath = os.path.dirname(path) # `.../ml_subtitle_align/`\n\treturn os.path.join(path, *rel_path)", "title": "" }, { "docid": "15ff403723eecc5b919a4f9e85ac1ca5", "score": "0.71964765", "text": "def _path(relpath):\n\tcurrent_dir = os.path.dirname(__file__)\n\treturn os.path.abspath(os.path.join(current_dir, relpath))", "title": "" }, { "docid": "38a2eebd61254437cf12ad7288d4808a", "score": "0.7181906", "text": "def get_data_path(self):\n return os.path.abspath(os.path.join(os.path.dirname(__file__), \"data\"))", "title": "" }, { "docid": "52359d90ff0cb9fa3e5342a4977ec5fd", "score": "0.7175491", "text": "def file_path(self):\n\t\treturn self._file_path", "title": "" }, { "docid": "2dc6b86d7f083c4a9cfcdbf0b6e05c33", "score": "0.71680117", "text": "def get_absolute_path_script_dir(self):\n return str(os.path.dirname(os.path.realpath(__file__)))", "title": "" }, { "docid": "a7c109a325582bdfdb12441b80914c0c", "score": "0.7159172", "text": "def get_module_dir(cls):\n\t\treturn os.path.dirname(inspect.getfile(cls))", "title": "" }, { "docid": "1a2be0e1ba771eb819127ce7b1959002", "score": "0.71475554", "text": "def find_module_path():\n #return os.path.dirname(os.path.realpath(os.path.join(inspect.getsourcefile(endpoints), \"..\")))\n path = os.path.dirname(inspect.getsourcefile(endpoints))\n return path", "title": "" }, { "docid": "358468dbea4a5c145e8b2e497faa1b64", "score": "0.7125148", "text": "def find_path():\n __dir_path__ = os.path.dirname(os.path.realpath(__file__))\n return __dir_path__", "title": "" }, { "docid": "12a1baf66fee21961d706bc4c3f2a6d3", "score": "0.71243006", "text": "def file_path(self):\n return self._file.file_path", "title": "" }, { "docid": "001334186a50e8fdbe89cbf4255062d6", "score": "0.71216047", "text": "def get_file_path(self):\n return self.file_path", "title": "" }, { "docid": "fba2821b46c90d361e3bff49f297b675", "score": "0.71211815", "text": "def _path(relpath):\n\tparent = os.path.join(os.path.dirname(__file__), \"..\")\n\treturn os.path.abspath(os.path.join(parent, relpath))", "title": "" }, { "docid": "7f69e95ea63ef556af6000a37e136c6a", "score": "0.710904", "text": "def file_path(self):\n return self._file_path", "title": "" }, { "docid": "7f69e95ea63ef556af6000a37e136c6a", "score": "0.710904", "text": "def file_path(self):\n return self._file_path", "title": "" }, { "docid": "7f69e95ea63ef556af6000a37e136c6a", "score": "0.710904", "text": "def file_path(self):\n return self._file_path", "title": "" }, { "docid": "7f69e95ea63ef556af6000a37e136c6a", "score": "0.710904", "text": "def file_path(self):\n return self._file_path", "title": "" }, { "docid": "7f69e95ea63ef556af6000a37e136c6a", "score": "0.710904", "text": "def file_path(self):\n return self._file_path", "title": "" }, { "docid": "7f69e95ea63ef556af6000a37e136c6a", "score": "0.710904", "text": "def file_path(self):\n return self._file_path", "title": "" }, { "docid": "7f69e95ea63ef556af6000a37e136c6a", "score": "0.710904", "text": "def file_path(self):\n return self._file_path", "title": "" }, { "docid": "e495048a409b5ef3be54de49d3d8925c", "score": "0.7100815", "text": "def fullpath(self, filename):\n\n thisdir = os.path.dirname(os.path.realpath(__file__))\n path = os.path.join(thisdir, self.catalogdir)\n return os.path.join(path, filename)", "title": "" }, { "docid": "4143df7cb07c5a17cf3ac2f1bd4ec272", "score": "0.7095701", "text": "def get_source_path() -> str:\n return os.path.abspath(config().get('SOURCE_PATH', './'))", "title": "" }, { "docid": "8d3435a4078f3f6134a57e306b7f296f", "score": "0.70916647", "text": "def _module_path():\r\n if _we_are_frozen():\r\n return os.path.dirname(unicode(sys.executable, sys.getfilesystemencoding( )))\r\n return os.path.dirname(unicode(__file__, sys.getfilesystemencoding( )))", "title": "" }, { "docid": "d9f523b4a9f5654bd1b7a3c8c47cd7c2", "score": "0.7088727", "text": "def file_path(file_relative):\n return os.path.join(os.path.dirname(__file__), file_relative)", "title": "" }, { "docid": "30e30b60cc120e982385fbb3ce4c4367", "score": "0.7088725", "text": "def module_dir():\n from os.path import dirname\n return dirname(module_path())", "title": "" }, { "docid": "aa9dd9babbba467cb49ebe5b4d424074", "score": "0.7079805", "text": "def get_path():\n # Theme directory is defined as our parent directory\n return os.path.abspath(os.path.dirname(os.path.dirname(__file__)))", "title": "" }, { "docid": "7eb1aaecc5d4e6f0ffc95bad1b1ea3a9", "score": "0.706917", "text": "def get_filepath(self):\n return self.path", "title": "" }, { "docid": "45d5f9b1c24722516c769e01b20bf4c3", "score": "0.70595044", "text": "def get_res_path():\n res_path = os.path.join(os.path.dirname(os.path.abspath(__file__)), 'res')\n return res_path", "title": "" }, { "docid": "4c94f527e6b9a472e5c1edbf18a2f9fd", "score": "0.70537144", "text": "def get_src_file_path(self):\r\n \r\n return self.src_file_path", "title": "" }, { "docid": "83baf7ea318314a079401fde23f7de39", "score": "0.7038526", "text": "def _data_path(fname):\n return os.path.join(os.path.split(__file__)[0], fname)", "title": "" }, { "docid": "e5d57bea92546410312228567ae4fd8c", "score": "0.70334077", "text": "def absolute_file_path(self):\n return os.path.join(self.project.absolute_path, self.filename)", "title": "" }, { "docid": "be01869b7fb4dc6994d4a0b9903f3526", "score": "0.70184827", "text": "def relpath(filename):\n return os.path.join(os.path.dirname(__file__), filename)", "title": "" }, { "docid": "b57dcfaff83e82b581b1180bb522b1cc", "score": "0.70170563", "text": "def getPathData():\n strLocalPath = os.path.dirname(sys.modules[__name__].__file__)\n if strLocalPath == \"\": strLocalPath = './'\n ret = os.path.abspath(strLocalPath + \"/../data/\") + \"/\"\n #~ print(ret)\n return ret", "title": "" }, { "docid": "97e33ec91f8de3e1bec414bee534c6a2", "score": "0.70149493", "text": "def getPath(self):\r\n\t\treturn sublime.active_window().active_view().file_name()", "title": "" }, { "docid": "10e7f0b708f39ef48357cb5655a024a3", "score": "0.70129406", "text": "def get_directory_fn():\r\n return self.model_directory.get_absolute_path()", "title": "" }, { "docid": "bb5e2eaaedd7a2a08dbe2f5639e9d051", "score": "0.7008315", "text": "def path(self):\n the_file = self._first_file()\n if the_file is None:\n return None\n return the_file.path()", "title": "" }, { "docid": "897d34267f5d0739b512fc6330c6e463", "score": "0.7005799", "text": "def ctx_testfile(self) -> pathlib.Path:\n return self.path.joinpath(self._testfile.name)", "title": "" }, { "docid": "f6b77694705c95031482d84947ca6053", "score": "0.69967926", "text": "def path(self):\n return os.path.join(os.path.expanduser(self.root), self.name)", "title": "" }, { "docid": "8d67d05d68d1ff4090226499e6fbc91e", "score": "0.69951636", "text": "def file(self, relative=False):\n obj = self.origin\n try:\n file = inspect.getfile(obj)\n except TypeError:\n return\n\n if relative:\n top_module = self.get_parent(depth=-1, index=-1)\n if top_module is None:\n top_module = self\n split_file = file.split(top_module.name)\n file = f\"{top_module.name}{split_file[-1]}\"\n return file", "title": "" }, { "docid": "0e4ccf9095d177b3ad51741029fa888a", "score": "0.6990668", "text": "def full_path(self, filename):\n return os.path.join(self.rootdir, filename)", "title": "" }, { "docid": "a6ab15f53856e7c05ccaf476648e5354", "score": "0.6989049", "text": "def base_dir():\n return os.path.dirname(__file__)", "title": "" }, { "docid": "7745c3c8051d59a16439611407377495", "score": "0.6985478", "text": "def _find_real_path(path):\r\n return os.path.join(os.path.dirname(__file__), path)", "title": "" }, { "docid": "a2e59a4818cf37d05d6e8baef1e472f7", "score": "0.69809246", "text": "def get_config_file_path() -> str:\n current_path = os.path.abspath(__file__)\n current_dir = os.path.dirname(current_path)\n return os.path.join(current_dir, \"shared\", \"dbhelper\", config_file_name)", "title": "" }, { "docid": "4fb73cae10dad81b42a53e68403de8e9", "score": "0.69801164", "text": "def _get_R_script_path(self):\n return join(self._get_R_script_dir(), self._R_script)", "title": "" }, { "docid": "0628ced01e4d082c2075a6f7b99351c4", "score": "0.6967844", "text": "def get_file_path_stub():\n import os\n cwd = os.getcwd()\n tests_root = cwd[0:cwd.find(\"tests\") + 5]\n return tests_root + \"/problem_decomposition/dmet/\"", "title": "" }, { "docid": "9dc987acba42eec3629a4a3a0233cae6", "score": "0.69642514", "text": "def GetPath(self):\n pass", "title": "" }, { "docid": "ec7d82f7f8b454512ae79f2a83afb3e8", "score": "0.6950391", "text": "def path(self) -> str:\n\t\treturn self.__path", "title": "" }, { "docid": "ec7d82f7f8b454512ae79f2a83afb3e8", "score": "0.6950391", "text": "def path(self) -> str:\n\t\treturn self.__path", "title": "" }, { "docid": "ec7d82f7f8b454512ae79f2a83afb3e8", "score": "0.6950391", "text": "def path(self) -> str:\n\t\treturn self.__path", "title": "" }, { "docid": "ae34137d263836101fadc41d1885362a", "score": "0.69472426", "text": "def test_base_dir(self):\n return os.path.dirname(__file__)", "title": "" }, { "docid": "5385ee332eea22e2d666e92fe51fff70", "score": "0.6932925", "text": "def __script_path(self, script_module_path, script):\n\t\tRegressionHelper.assert_file(script_module_path)\n\t\tpath_script = \"%s/%s.py\" % (script_module_path, script)\n\t\tRegressionHelper.assert_file(path_script)\n\t\treturn path_script", "title": "" }, { "docid": "cd88c2de8866f63c05ce4ef37d2905bd", "score": "0.692477", "text": "def menpo_src_dir_path():\n from pathlib import Path # to avoid cluttering the menpo.base namespace\n\n return Path(os.path.abspath(__file__)).parent", "title": "" }, { "docid": "b216cf324289bfe00a656ccfd7dca5dc", "score": "0.69218504", "text": "def get_path(self) -> str:", "title": "" }, { "docid": "d27e2ff0f44bfbfad63f51f14a498956", "score": "0.69191605", "text": "def file_path(self):\n\n return self._file_path", "title": "" }, { "docid": "d110bf65fdc36d5d1f5d094323c14d9d", "score": "0.69186807", "text": "def working_dir(self) -> str:", "title": "" }, { "docid": "b3b8d8239eaa90c3c071e8e6e8760dc8", "score": "0.6915403", "text": "def filepath(self):", "title": "" }, { "docid": "e4dbc9ce2313a64fe41b76db05141729", "score": "0.6913923", "text": "def get_filepath(self):\n return self.filepath", "title": "" }, { "docid": "74ffae7fa35b480f6774134d33d8f267", "score": "0.69122493", "text": "def test_files():\n return Path(__file__).parent / \"test_files\"", "title": "" }, { "docid": "cb8238f0383357e59e6a5659fad858a4", "score": "0.69092834", "text": "def currentfolder(self):\n return os.path.dirname(__file__)", "title": "" }, { "docid": "20c8173b24ae80beaf01654717592978", "score": "0.6908763", "text": "def get_current_path():\n return Path(__file__).absolute().parent.parent", "title": "" }, { "docid": "fc4133bf5b89914e9cca1ec6fe2869bc", "score": "0.6907752", "text": "def get_path(filename):\n _path = os.path.join(os.path.split(__file__)[0], \"files\", filename)\n return _path", "title": "" }, { "docid": "a56c96c92e9a1f950e679ce157538d7d", "score": "0.69056046", "text": "def relpath(filename):\n\n return os.path.join(os.path.dirname(__file__), filename)", "title": "" }, { "docid": "50736f98852e23cea53fbe9a9a86d2c9", "score": "0.69030863", "text": "def module_dir():\n from inspect import getfile\n pathname = getfile(module_dir)\n from os.path import dirname\n pathname = dirname(pathname)\n from os.path import isabs, join\n from os import getcwd\n if not isabs(pathname):\n pathname = join(getcwd(), pathname)\n from os.path import realpath\n pathname = realpath(pathname)\n return pathname", "title": "" }, { "docid": "23bc53351f98064dbe0614c951092c56", "score": "0.6901978", "text": "def module_path():\n from sys import path\n from os import getcwd\n from os.path import basename,exists\n from inspect import getmodulename,getfile\n # 'getfile' retreives the source file name name compiled into the .pyc file.\n pathname = getfile(lambda x: None)\n if exists(pathname): return pathname\n # The module might have been compiled on a different machine or in a\n # different directory.\n pathname = pathname.replace(\"\\\\\",\"/\")\n filename = basename(pathname)\n dirs = [dir for dir in [getcwd()]+path if exists(dir+\"/\"+filename)]\n if len(dirs) == 0: print \"pathname of file %r not found\" % filename\n dir = dirs[0] if len(dirs) > 0 else \".\"\n pathname = dir+\"/\"+filename\n return pathname", "title": "" } ]
e8d87379e0b817bb4911cc5b1c034938
Function called when the value must be an integer.
[ { "docid": "a719345e77eb61920c2033337574f159", "score": "0.0", "text": "def checking_value(value):\n if user_language[0] == \"fr\":\n prb_text = \"La durée doit être un nombre entier.\"\n ok_text = \"La durée a bien été modifiée.\"\n\n else: #user_language[0] == \"en\":\n prb_text = \"The duration must be an integer.\"\n ok_text = \"The duration has been changed.\"\n \n try:\n int(value)\n except ValueError:\n return False, prb_text\n\n return True, ok_text", "title": "" } ]
[ { "docid": "77a2a671d460d73c24d10626bc3aea3a", "score": "0.78474224", "text": "def isInteger(self):", "title": "" }, { "docid": "77a2a671d460d73c24d10626bc3aea3a", "score": "0.78474224", "text": "def isInteger(self):", "title": "" }, { "docid": "77a2a671d460d73c24d10626bc3aea3a", "score": "0.78474224", "text": "def isInteger(self):", "title": "" }, { "docid": "77a2a671d460d73c24d10626bc3aea3a", "score": "0.78474224", "text": "def isInteger(self):", "title": "" }, { "docid": "bf75c11cc20db592d60be9828f480670", "score": "0.78153044", "text": "def is_integer(value):\n return isinstance(value, int)", "title": "" }, { "docid": "90bddf5f1f0a35d5d41368324917a6d5", "score": "0.7806367", "text": "def _validate_int(display_name, input_value):\n\n if int(input_value) is False:\n raise ValueError(display_name + \"must be a integer type\")", "title": "" }, { "docid": "427cd6dac0c87bc93a60aa3c1786332c", "score": "0.7768368", "text": "def integer_validator(self, name, value):\n if type(value) is not int:\n raise TypeError(\"{} must be an integer\".format(name))\n if value <= 0:\n raise ValueError(\"{} ust be greater than 0\".format(name))", "title": "" }, { "docid": "4eceb3cf28e15953225a33fe09e7afc8", "score": "0.7736257", "text": "def validate_int(value, column):\n if isinstance(value, str):\n print(value)\n value = int(value)\n elif value is None:\n value = None\n else:\n if not isinstance(value, int):\n raise AssertionError(\"Invalid field {0}\".format(column))\n return value", "title": "" }, { "docid": "b711254951f0971c5c2bda05adcc488f", "score": "0.7703602", "text": "def _validate_int(display_name, input_value):\r\n\r\n if input_value != int(input_value):\r\n raise ValueError(display_name + \" must be an integer type.\")", "title": "" }, { "docid": "9fe4d69daf0b1ce0e701093197f594b6", "score": "0.7680796", "text": "def is_integer(self, *args, **kwargs): # real signature unknown\n pass", "title": "" }, { "docid": "0443cb1ae867c94323d459d52f08bde0", "score": "0.76344156", "text": "def integer_validator(self, name, value):\n if not isinstance(value, int):\n raise TypeError(\"{:s} must be an integer\".format(name))\n if not(value > 0):\n raise ValueError(\"{:s} must be greater than 0\".format(name))", "title": "" }, { "docid": "d8da2534cfce2095c4ccc617922b1717", "score": "0.76176184", "text": "def integer_validator(self, name, value):\n if type(value) is not int:\n raise TypeError(\"{:s} must be an integer\".format(name))\n if value <= 0:\n raise ValueError(\"{:s} must be greater than 0\".format(name))", "title": "" }, { "docid": "93dac8b4d0238e4292024e2511ba38b9", "score": "0.76141006", "text": "def integer_validator(self, name, value):\n if type(value) is not int:\n raise TypeError(\"{} must be an integer\".format(name))\n if value <= 0:\n raise ValueError(\"{} must be greater than 0\".format(name))", "title": "" }, { "docid": "88322d99f3a6a7651264f2dddd91feb4", "score": "0.7608299", "text": "def integer_validator(self, name, value):\n if type(value) != int:\n raise TypeError('{} must be an integer'.format(name))\n if value <= 0:\n raise ValueError('{} must be greater than 0'.format(name))", "title": "" }, { "docid": "a4e2f8165b91affcbb0e0119cfa9acd3", "score": "0.76024914", "text": "def integer_validator(self, name, value):\n if type(value) != int:\n raise TypeError(\"{} must be an integer\".format(name))\n if value <= 0:\n raise ValueError(\"{} must be greater than 0\".format(name))", "title": "" }, { "docid": "f1db72ddfed005e64d4cd0f74e10308a", "score": "0.75464445", "text": "def integer_validator(self, name, value):\n\n if type(value) != int:\n raise TypeError(\"{} must be an integer\".format(name))\n if value <= 0:\n raise ValueError(\"{} must be greater than 0\".format(name))", "title": "" }, { "docid": "698f1f2e0392724cf961604585001243", "score": "0.749977", "text": "def validate_json_integer_value(cls,json_value):\n if type(json_value)==int:\n return True \n else:\n return False", "title": "" }, { "docid": "98d312d8f464df49c019a23fc5dd7e2f", "score": "0.73785394", "text": "def check_int(**kwargs):\n int_check = isinstance(kwargs['val'], int)\n val = kwargs['val']\n if int_check == False:\n try:\n val = int(kwargs['val'])\n except Exception as e:\n val = None\n log.error('Unable to convert %s to an integer due to error %s' %\n (val, e))\n return val", "title": "" }, { "docid": "36a44d89c44550cd91eb765faaf77de2", "score": "0.7312503", "text": "def isInteger(self) -> bool:", "title": "" }, { "docid": "2f805c634bc21413fe77c627c3b58682", "score": "0.7301576", "text": "def _validate_integer(self, action_result, parameter, key):\n\n if parameter is not None:\n try:\n if not float(parameter).is_integer():\n return action_result.set_status(phantom.APP_ERROR, VALID_INTEGER_MSG.format(key)), None\n\n parameter = int(parameter)\n except:\n return action_result.set_status(phantom.APP_ERROR, VALID_INTEGER_MSG.format(key)), None\n\n if parameter < 0:\n return action_result.set_status(phantom.APP_ERROR, NON_NEGATIVE_INTEGER_MSG.format(key)), None\n return phantom.APP_SUCCESS, parameter", "title": "" }, { "docid": "0f2150cb5c0da247b2b9efff17df0ef1", "score": "0.7226838", "text": "def __validate_type(self, attr_name, val):\n if not isinstance(val, int):\n raise TypeError(attr_name + \" must be an integer\")", "title": "" }, { "docid": "bbb69acbbd6b33e12665febb32674a9c", "score": "0.7169979", "text": "def _integer(self, number):\n n = str(number)\n if not re.search(r'^[0-9]{1,}$', n):\n raise ValueError(_('Value (%s) is not an integer'%(n)))\n return True", "title": "" }, { "docid": "a8076cd8e1ab436bd7689da3baf7f3d4", "score": "0.71556824", "text": "def is_int(f):\n assert isinstance(f, int)", "title": "" }, { "docid": "16511c3fb707ae891fdb5493edb1f187", "score": "0.71427435", "text": "def isInteger(self):\n return type(self.val) == int", "title": "" }, { "docid": "f380defedce0f256effa7efda8a6f5c8", "score": "0.71371055", "text": "def check_int(val):\n\n if not isinstance(val, (int, float, list, tuple, np.ndarray)):\n raise TypeError('Invalid input type.')\n if isinstance(val, float):\n val = int(val)\n elif isinstance(val, (list, tuple)):\n val = np.array(val, dtype=int)\n elif isinstance(val, np.ndarray) and (not np.issubdtype(val.dtype,\n np.integer)):\n val = val.astype(int)\n\n return val", "title": "" }, { "docid": "b1da1f71551c15e3c402bf0d12fcc012", "score": "0.7121012", "text": "def _xbrli_integer_item_type_validator(self, value):\n errors = []\n if isinstance(value, int):\n value = int(value)\n elif isinstance(value, str):\n try:\n value = int(value)\n except:\n errors.append(\"'{}' is not a valid integer value.\".format(value))\n else:\n errors.append(\"'{}' is not a valid integer value.\".format(value))\n return value, errors", "title": "" }, { "docid": "c200fd77b345506fde0e65e47088a75a", "score": "0.71104234", "text": "def isInt(x):\n return isinstance(x, int)", "title": "" }, { "docid": "a3639d692ca38d2a919e050b0183cf57", "score": "0.70197344", "text": "def is_int(self):\n return self.ntype == 'i'", "title": "" }, { "docid": "e369f5d470773a70bd25b6b595d457b9", "score": "0.6987555", "text": "def _validate_integer_ht_0(value):\n msg = \"must be a positive integer\"\n try:\n value = int(value)\n except ValueError:\n raise argparse.ArgumentTypeError(msg)\n if not value >= 1:\n raise argparse.ArgumentTypeError(msg)\n return value", "title": "" }, { "docid": "c3eed4064223bebfc0c8992ed61e7a8c", "score": "0.6930183", "text": "def ensureInt(value):\n\tif value is True or value is False:\n\t\traise TypeError(\"Even though int(False) and int(True) work, we disallow it.\")\n\tinted = int(value)\n\tif inted != value:\n\t\traise ValueError(\"%r cannot be converted to identical integer\" % (value,))\n\treturn inted", "title": "" }, { "docid": "876c84197e84b9ed1219f0277508b670", "score": "0.69263744", "text": "def is_integer(variable, message=None):\n if not isinstance(variable, int):\n raise AssertionException(message)", "title": "" }, { "docid": "b285f48233a6b20d7de93f0ed8a098bb", "score": "0.692629", "text": "def is_valid_integer(session):\n\n try:\n session.data = int(session.data)\n except TypeError:\n raise session.field.invalid(error_type='type_error')\n except ValueError:\n raise session.field.invalid(error_type='type_error')\n return session.data", "title": "" }, { "docid": "d08ffee1024ede4ed6dea9750fa72b88", "score": "0.69145864", "text": "def integer_validator(self, name, value):\n if type(value) is not int:\n raise TypeError(\"{} must be an integer\".format(name))\n if (name == \"width\" or name == \"height\") and value <= 0:\n raise ValueError(\"{} must be > 0\".format(name))\n if (name == \"x\" or name == \"y\") and value < 0:\n raise ValueError(\"{} must be >= 0\".format(name))", "title": "" }, { "docid": "2df26d95786962d558b4eb3ba112f552", "score": "0.68979144", "text": "def isint(value):\r\n \r\n try:\r\n int(value)\r\n return True\r\n \r\n except ValueError:\r\n return False", "title": "" }, { "docid": "0c166a5e2b726ec47657c54fb3c73d2a", "score": "0.68860674", "text": "def check_integer(key_check):\n global wrong_key\n try:\n if isinstance(int(key_check), int):\n wrong_key = False\n except:\n print(\"The number you've entered is not an integer\")", "title": "" }, { "docid": "7fa89e47ddf29e76b915971de5408e91", "score": "0.6884033", "text": "def is_int(value):\n try:\n int(value)\n return True\n except ValueError:\n return False", "title": "" }, { "docid": "7fa89e47ddf29e76b915971de5408e91", "score": "0.6884033", "text": "def is_int(value):\n try:\n int(value)\n return True\n except ValueError:\n return False", "title": "" }, { "docid": "e71bde775e2187516f3b9e6fc8b413a3", "score": "0.6873587", "text": "def __call__(self, value):\n try:\n return int(value) >= 0\n except ValueError:\n return False", "title": "" }, { "docid": "b6b5035c65f8ccd05d376d817711f819", "score": "0.68240803", "text": "def test_apply_int(x):\r\n try:\r\n return int(x)\r\n except ValueError:\r\n return None", "title": "" }, { "docid": "b1a7f65ed376a32211c1a1aadc47b424", "score": "0.68235534", "text": "def _sanitize_pos_int(arg_name: str, arg_value) -> None:\n if not isinstance(arg_value, int):\n raise TypeError(\"<{}> has to be an integer!\".format(arg_name))\n if arg_value < 1:\n raise ValueError(\"<{}> has to be > 0!\".format(arg_name))", "title": "" }, { "docid": "cef022d8959f6ad12aecf79a8d111a3d", "score": "0.6814068", "text": "def _int_converter(value: typing.Union[str, int]) -> int:\n try:\n return int(value)\n except ValueError:\n raise RequestError(3101)", "title": "" }, { "docid": "4af2bbbbb29f14e902cdae2a35a2741e", "score": "0.67951614", "text": "def verify_integer(value: int) -> None:\n if value < MIN_INT or value > MAX_INT:\n raise ValueError(f'Integer overflow: {value}')", "title": "" }, { "docid": "32a3f70276585831d05971f465a0e964", "score": "0.67783344", "text": "def force_int(label, value):\n if value is None:\n return None\n try:\n result = int(value)\n except ValueError:\n raise ArgumentError(\"Expected an integer for %s.\" % label)\n return result", "title": "" }, { "docid": "0e2dfbb7990b25159e58b6e6000d2cb6", "score": "0.67604357", "text": "def ensureNonNegInt(value):\n\n\tif isinstance(value, (int, long, float)) and value is not True and value is not False:\n\t\tif value < 0:\n\t\t\traise ValueError(\"%r is < 0\" % (value,))\n\t\telif isinstance(value, float):\n\t\t\treturn ensureInt(value)\n\t\telse:\n\t\t\treturn value\n\telse:\n\t\traise TypeError(\"%r is not an int/long/float\" % (value,))", "title": "" }, { "docid": "10c6f080f447cf0c29f145940470ac59", "score": "0.6752369", "text": "def setInteger(self, value):", "title": "" }, { "docid": "10c6f080f447cf0c29f145940470ac59", "score": "0.6752369", "text": "def setInteger(self, value):", "title": "" }, { "docid": "491f6fcd9b8518a32d71d6f517513641", "score": "0.6749265", "text": "def normalise_filled(self, meta, val):\n if not isinstance(val, bool) and (isinstance(val, int) or hasattr(val, \"isdigit\") and val.isdigit()):\n return int(val)\n raise BadSpecValue(\"Expected an integer\", meta=meta, got=type(val))", "title": "" }, { "docid": "7d5a392aeeb7f6b19146211df870e843", "score": "0.67487824", "text": "def in_int(self):\n x = input()\n searchObj = re.search(r'([0-9]+).', x)\n if searchObj:\n return Integer(searchObj.group(1))\n else:\n raise TypeError(\"No integer given for call to <in_int>\")", "title": "" }, { "docid": "a946516377b784cfcb4fa0b952e52750", "score": "0.67364335", "text": "def test_convert_to_int_returns_error_for_non_integers(self):\n integer_list = convert_to_int([\"something\"])\n self.assertEqual(\n integer_list[1],\n \"Please enter integers only\",\n \"Convert to int did not return an error for non int value\",\n )", "title": "" }, { "docid": "267364293e6b561ffb9be2b1bf7481ac", "score": "0.6734868", "text": "def _integer(value):\n # Try conversion\n try:\n result = int(value)\n except:\n result = None\n\n # Return\n return result", "title": "" }, { "docid": "016dd63608c11cc5c06df3cef6578740", "score": "0.6732103", "text": "def validate(self, value):\n try:\n v = int(value)\n if v not in self.allowed:\n return None\n return value\n except ValueError:\n return None", "title": "" }, { "docid": "4d4a8b2cc9ffe24e51e78c8b98d0dd2e", "score": "0.672751", "text": "def validate_int(\n value=_undefined,\n min_value=None, max_value=None,\n required=True,\n):\n validate = _int_validator(\n min_value=min_value, max_value=max_value,\n required=required,\n )\n\n if value is not _undefined:\n validate(value)\n else:\n return validate", "title": "" }, { "docid": "7bd4ccd2d70cbdc4c71b999e367c70fb", "score": "0.6721454", "text": "def int_filter(value):\n try:\n return int(value)\n except:\n return 0", "title": "" }, { "docid": "7e0031c7c261750b88acdc32d2d84c01", "score": "0.6694197", "text": "def intValue(self, *args):\n pass", "title": "" }, { "docid": "98a687628603dc3ec7af1a775105a967", "score": "0.66899264", "text": "def _validate_integers(self, action_result, parameter, key, allow_zero=False):\r\n try:\r\n parameter = int(parameter)\r\n\r\n if parameter <= 0:\r\n if allow_zero:\r\n if parameter < 0:\r\n action_result.set_status(phantom.APP_ERROR, GC_LIMIT_VALIDATION_ALLOW_ZERO_MSG.format(parameter=key))\r\n return None\r\n else:\r\n action_result.set_status(phantom.APP_ERROR, GC_LIMIT_VALIDATION_MSG.format(parameter=key))\r\n return None\r\n except Exception as e:\r\n self.debug_print(f\"Integer validation failed. Error occurred while validating integer value. Error: {str(e)}\")\r\n error_text = GC_LIMIT_VALIDATION_ALLOW_ZERO_MSG.format(parameter=key) if allow_zero else GC_LIMIT_VALIDATION_MSG.format(parameter=key)\r\n action_result.set_status(phantom.APP_ERROR, error_text)\r\n return None\r\n\r\n return parameter", "title": "" }, { "docid": "826644c80d281b4b1039d38a0917e8a0", "score": "0.6684262", "text": "def Integer(value):\n if value is None:\n return None\n return int(value)", "title": "" }, { "docid": "3f4c3763b796f4dabec409cfe351e053", "score": "0.66796255", "text": "def is_int(number):\n return int(number) == number", "title": "" }, { "docid": "eaee0cc863424171ff49ab5b4ae9b615", "score": "0.66750085", "text": "def _check_token_integer(self, tok):\n if tok.type is TOKEN_TYPE_INTEGER:\n return int(tok.value)\n if tok.value in _KNOWN_IDENTIFIERS:\n return _KNOWN_IDENTIFIERS[tok.value]\n self._raise_exception(\"Integer expected instead of '\" + tok.value + \"'\")", "title": "" }, { "docid": "8888deef184b9ca54742e13396720a46", "score": "0.6664299", "text": "def is_int(var):\n\n return isinstance(var, int)", "title": "" }, { "docid": "5134bbbcacadf5fc2f137e40a881d779", "score": "0.66550153", "text": "def is_int(val: object) -> bool:\n if isinstance(val, (str, int)) and (isinstance(val, int) or val.isdigit()):\n return True\n else:\n return False", "title": "" }, { "docid": "68687df124699b8ee59d861ff447758f", "score": "0.66515076", "text": "def check_int(argument: str, number: Any) -> int:\n try:\n return int(number)\n except (ValueError, TypeError) as e:\n raise ArgumentError(argument, \"Invalid integer: \"\n \"{}\".format(number)) from e", "title": "" }, { "docid": "197dd649997b974cdf13333720868ce9", "score": "0.6638447", "text": "def process(value):\n return int(value)", "title": "" }, { "docid": "ee52ea3fd1bff37c5d5a78cb42a8dfc5", "score": "0.6626976", "text": "def _check_int_input(var, input_name: str) -> int:\n\n if not isinstance(var, int) and (\n not isinstance(var, float) or not var.is_integer()):\n raise ValueError(\"Input {} must be an integer.\".format(input_name))\n\n return int(var)", "title": "" }, { "docid": "66858042f288e576a432d8852c910baf", "score": "0.662375", "text": "def validate_zero(self, attr, value):\n if not type(value) == int:\n raise TypeError(\"{} must be an integer\".format(attr))\n if value < 0:\n raise ValueError(\"{} must be >= 0\".format(attr))", "title": "" }, { "docid": "30ee3fb7ad88d260abcb4c7f39cc1505", "score": "0.66216564", "text": "def validate(self, value):\n try:\n # trap blank fields here\n if not self.blank or value:\n int(value)\n return value\n except ValueError:\n return None", "title": "" }, { "docid": "374daedd4d5f61ee165734ea51a0ea4c", "score": "0.66078705", "text": "def is_int(n__):\n return isinstance(n__, int_types)", "title": "" }, { "docid": "6fd9c9e58519b9c88b0e3fc7b6b877cf", "score": "0.65987575", "text": "def insupport(self, x):\n return isinstance(x, int)", "title": "" }, { "docid": "6fd9c9e58519b9c88b0e3fc7b6b877cf", "score": "0.65987575", "text": "def insupport(self, x):\n return isinstance(x, int)", "title": "" }, { "docid": "6fd9c9e58519b9c88b0e3fc7b6b877cf", "score": "0.65987575", "text": "def insupport(self, x):\n return isinstance(x, int)", "title": "" }, { "docid": "6fd9c9e58519b9c88b0e3fc7b6b877cf", "score": "0.65987575", "text": "def insupport(self, x):\n return isinstance(x, int)", "title": "" }, { "docid": "6fd9c9e58519b9c88b0e3fc7b6b877cf", "score": "0.65987575", "text": "def insupport(self, x):\n return isinstance(x, int)", "title": "" }, { "docid": "d1fee5439be6553769480486fdde071c", "score": "0.65914565", "text": "def validate(self, value):\n try:\n # trap blank fields here\n if not self.blank or value:\n v = int(value)\n if v < 0:\n return None\n return value\n except ValueError:\n return None", "title": "" }, { "docid": "06165fcc610e21c2fa16f631a16a0350", "score": "0.65887105", "text": "def validate_value(cls, value: int | str, field: ModelField) -> int:\n if isinstance(value, str):\n value = int(value)\n\n if cls.ge is not None and not value >= cls.ge:\n raise InvalidIntegerValue(\n field_name=field.name, operation='greater than or equal to', constraint=cls.ge\n )\n if cls.gt is not None and not value > cls.gt:\n raise InvalidIntegerValue(\n field_name=field.name, operation='greater than', constraint=cls.gt\n )\n\n if cls.le is not None and not value <= cls.le:\n raise InvalidIntegerValue(\n field_name=field.name, operation='less than or equal to', constraint=cls.le\n )\n if cls.lt is not None and not value < cls.lt:\n raise InvalidIntegerValue(\n field_name=field.name, operation='less than', constraint=cls.lt\n )\n return value", "title": "" }, { "docid": "718de06fe06c0c15cc5f607e981a70ec", "score": "0.6582519", "text": "def get_int(val: Any) -> int:\n return int(val)", "title": "" }, { "docid": "268e3eba5c858c0741d0da6d247025d3", "score": "0.6566592", "text": "def yices_val_is_integer(mdl, v):\n assert mdl is not None\n return libyices.yices_val_is_integer(mdl, v)", "title": "" }, { "docid": "d7282b11262c22c03a75e2d97676129b", "score": "0.65575844", "text": "def is_int(data):\n try:\n int(data)\n return True\n except ValueError:\n return False", "title": "" }, { "docid": "278bc3072da237383447aaac30152e95", "score": "0.65502083", "text": "def _is_int(x):\n if(int(x) == x):\n return(True)\n else:\n return(False)", "title": "" }, { "docid": "be323bc50494eab756fca51d9165441f", "score": "0.65471804", "text": "def check_positive_and_not_zero_int(value):\n ivalue = int(value)\n if ivalue <= 0:\n raise argparse.ArgumentTypeError(\"{} is an invalid positive\"\n \"int value\".format(value))\n return ivalue", "title": "" }, { "docid": "e5357a656ef9304df1784c194ec4c44c", "score": "0.6545656", "text": "def is_int(maybe_int):\n try:\n int(maybe_int)\n return True\n except ValueError:\n return False", "title": "" }, { "docid": "5bfa89209c7967befd96a021a04739f2", "score": "0.653456", "text": "def parameter_value_converter_int(value):\n try:\n value = int(value)\n except ValueError:\n value = None\n \n return value", "title": "" }, { "docid": "fa59c9024fca4f190667c81b0e7c7a26", "score": "0.6522566", "text": "def number(value):\n try:\n return int(value.value)\n except (AttributeError, ValueError):\n raise ValueError('%r cannot be read as a number.' % value)", "title": "" }, { "docid": "cb11b3a757508411bbe17d7b81d18b9e", "score": "0.65164834", "text": "def check_pos(self, name, value):\n if type(value) != int:\n raise TypeError(\"%s must be an integer\" % name)\n if value < 0:\n raise ValueError(\"%s must be >= 0\" % name)", "title": "" }, { "docid": "f07b9c69575c9d4f8b46a71dc96dba49", "score": "0.65065813", "text": "def _init_int(self, value: int):\n # In the static attribute \"INTEGER_MAP\" all possible integer values, representing a rarity value are listed as\n # the keys. If a integer is passed that does not equate to a rarity an exception is risen-\n if value in self.INTEGER_MAP.keys():\n self.value = self.INTEGER_MAP[value]\n else:\n raise ValueError('The given integer \"%s\" is not a rarity!' % value)", "title": "" }, { "docid": "52d84053bda45927b2dfe70cbe1733b0", "score": "0.65038157", "text": "def CheckNumberIsInt(userInput):\n try:\n int(userInput)\n return True\n except(ValueError):\n return False", "title": "" }, { "docid": "5915e74c15a3bee7b4a2e301b72d1dfb", "score": "0.6497391", "text": "def test_does_not_cast_to_int_if_to_int_is_False(self):\n value = 10.5\n minimum = 2\n maximum = 15\n\n output = bound_value(value, minimum=minimum, maximum=maximum, to_int=False)\n\n self.assertFalse(isinstance(output, int))", "title": "" }, { "docid": "007e5f8fc76466a1de806bb7540628d4", "score": "0.64842844", "text": "def integer(self, rule, **kwargs):\n raise NotImplementedError()", "title": "" }, { "docid": "091c1095ca5dee38d26ef902297d372a", "score": "0.6480689", "text": "def _get_int(value):\n if -2147483648 <= value <= 2147483647:\n return \"int\"\n else:\n return \"long\"", "title": "" }, { "docid": "efbbb71166317e13081e209c687574de", "score": "0.6469315", "text": "def ensure_int(value, default):\n try:\n value = int(value)\n except:\n return default\n return value", "title": "" }, { "docid": "b3251e8bd90a56e4f2775f212a065d0d", "score": "0.64663005", "text": "def eh_int_pos(univ):\n return isinstance(univ, int) and univ >= 0", "title": "" }, { "docid": "1de619294d93ef7489f6e3a6e9293511", "score": "0.64439994", "text": "def IsInteger(sel):\n try:\n int(sel)\n return True\n except ValueError:\n pass\n\n return False", "title": "" }, { "docid": "fa7b22a4e051d21900017bb72784d3e9", "score": "0.6442546", "text": "def is_int(x):\r\n try:\r\n int(x)\r\n return True\r\n except ValueError:\r\n return False", "title": "" }, { "docid": "f5f760a9a5ad61db4149d79fc34c6a05", "score": "0.6439718", "text": "def isInteger(self):\n return not self.is_list", "title": "" }, { "docid": "32f65a8d89c48f5e02d9c4771814987b", "score": "0.64355165", "text": "def is_int(testable: Any) -> bool:\n if type(testable) == int:\n return True\n elif type(testable) == str:\n try:\n int(testable)\n return True\n except ValueError:\n return False\n return False", "title": "" }, { "docid": "cf9e57e6c3e50d5c24035864dd46e5a3", "score": "0.6424417", "text": "def test_is_integer():\n\n assert rdu.is_integer(1)\n assert not rdu.is_integer(1.0)\n assert not rdu.is_integer(np.array([1]))\n assert not rdu.is_integer(np.array(1))", "title": "" }, { "docid": "62468fa17738fc593d491c923d04171e", "score": "0.64227057", "text": "def is_int(self, num):\n try:\n int(num)\n return True\n except ValueError:\n return False", "title": "" }, { "docid": "46c71b9022f0c9e9c9250bec248a7b35", "score": "0.6418765", "text": "def test_integerize_arg(self):\n # Test\n test_value = '30'\n result = uri.integerize_arg(test_value)\n self.assertEqual(result, int(test_value))\n\n # Fail for non numeric strings and booleans\n result = uri.integerize_arg('Test')\n self.assertEqual(result, None)\n result = uri.integerize_arg(None)\n self.assertEqual(result, None)\n result = uri.integerize_arg(False)\n self.assertEqual(result, None)", "title": "" }, { "docid": "e12434f39e5ca26488d881ab329a8868", "score": "0.6410669", "text": "def integerTest(test, *args):\n\n try:\n int(test)\n return True\n except:\n return False", "title": "" }, { "docid": "9a4aca2c805cad2cc89c8b01308ebdde", "score": "0.6400752", "text": "def is_int(value):\n try:\n if int(f'{value}') == int(value):\n return True\n except ValueError as e:\n pass\n\n return False", "title": "" }, { "docid": "ed151a8f07f3bd5c458ed57f465c3187", "score": "0.64006335", "text": "def try_cast_to_int(self, val):\n try:\n return int(val)\n except ValueError:\n return val", "title": "" }, { "docid": "bf8d5bb39a5f450d74c336fe29cb31fe", "score": "0.6392891", "text": "def is_int(a):\r\n try:\r\n int (a)\r\n return True\r\n except:\r\n return False", "title": "" }, { "docid": "aa31d4c46592b1885d3afbe89086e6d2", "score": "0.63914144", "text": "def check_choice_is_int(num):\n is_int = False\n while is_int == False:\n try:\n num = int(num)\n except:\n num = input(\"Vous devez saisir un entier .. Veuillez reessayez : \")\n else:\n is_int = True\n return num", "title": "" } ]
81d4e11178365569ca0b96520a3dfa26
Since this view will only be used by admin, just get the whole list of attendees
[ { "docid": "83ae6a1f32ba50996ae2b9b33fd5ce4c", "score": "0.60177976", "text": "def attendee_collection(self):\n return CourseAttendeeCollection(self.request.session)", "title": "" } ]
[ { "docid": "c00c53b34d7111218c8a2117bdcc1180", "score": "0.65361494", "text": "def list(self):\n return self._list(\"/tenants\", \"tenants\")", "title": "" }, { "docid": "b13fbed2332f686985f5e76691651a63", "score": "0.60792476", "text": "def get_admins(self):\n response = self.json_api_call('GET', '/admin/v1/admins', {})\n return response", "title": "" }, { "docid": "ca84725203417f6d6319aa3b14d37f5f", "score": "0.60397255", "text": "def get_admins(self):\r\n return list(db.reference('users/').order_by_child('role').equal_to(5).get())", "title": "" }, { "docid": "670ce228b0ef2a5a0b1f7641e71dce6e", "score": "0.57860947", "text": "def get_administrators():\n return dao.get_administrators()", "title": "" }, { "docid": "dbac1b70f81e843aaa29e60e626b5079", "score": "0.5780851", "text": "def get(self):\n return get_all_incidents()", "title": "" }, { "docid": "d7fa340007d2ec9199db30010d4214eb", "score": "0.5725529", "text": "def show_supervisor_accidents(self, supervisor_id):\n query = \"select accident_id from accidents_supervisor where supervisor_id = ?\"\n temp = self.cur.execute(query, [supervisor_id]).fetchall()\n temp = [list(x)[0] for x in temp]\n query2 = \"select a.id, at.accident as type, timestamp, name as created_by, location, status from accident as a \" \\\n \"inner join worker as w on w.id = a.created_by inner join accident_type as at on at.id= \" \\\n \"a.type where status='WIP' and a.id in (%s)\" % ','.join('?'*len(temp))\n return self.cur.execute(query2, temp)", "title": "" }, { "docid": "e1daf939c422b81af4ff9e9af465466d", "score": "0.57240874", "text": "def all_admins():\n db_users = User.query.filter(User.is_admin==True).all()\n print(\"---Admins---\")\n for one_user in db_users:\n print(\"-User Name: \", one_user.user_name, \", User Email: \", one_user.user_email)\n print(\"###END###\")", "title": "" }, { "docid": "cd876aa0944a2a89fca424caa7fb2fef", "score": "0.5707112", "text": "def list_tenants_request():\n\n path = 'tenants'\n params: dict = {}\n\n response = http_request('GET', path, params)\n return response", "title": "" }, { "docid": "f230d625a4ac364dfa4f4f38f1088874", "score": "0.5641145", "text": "def get_queryset(self, request):\n return super(KartegeMemberAdmin, self).get_queryset(request).kartege_members()", "title": "" }, { "docid": "bfd0ef6cfdb67854b53d7d9166f01ab1", "score": "0.56309295", "text": "def get_queryset(self):\n\n if self.action in [\"list\"]:\n return (\n super()\n .get_queryset()\n .filter(\n user_accesses__user_id=self.request.user.id,\n user_accesses__role__in=[ADMINISTRATOR, INSTRUCTOR],\n )\n )\n\n return super().get_queryset()", "title": "" }, { "docid": "f06aed25f5649c979522145a897403c9", "score": "0.56174165", "text": "def assessors(self):\n return self.assignees_by_type.get(\"Assessor\", [])", "title": "" }, { "docid": "f2a6c13236c13a6207c45e159ffbf309", "score": "0.56128323", "text": "def get_admins(self, account_id, params={}):\n url = ADMINS_API.format(account_id)\n\n admins = []\n for data in self._get_paged_resource(url, params=params):\n admins.append(CanvasAdmin(data=data))\n return admins", "title": "" }, { "docid": "dc60c937523d03daccf7193b879776c0", "score": "0.5585256", "text": "def __init__(self, *args, **kwargs):\n super(self.__class__, self).__init__(*args, **kwargs)\n self.fields['attendees'].queryset = User.objects.filter(\n is_active=True, is_staff=False).order_by('username')", "title": "" }, { "docid": "56aba714a6a09803123084b684a5dd8f", "score": "0.55329424", "text": "def paginated_attendees(request, domain):\n limit = int(request.GET.get('limit', 10))\n page = int(request.GET.get('page', 1))\n query = request.GET.get('query')\n\n cases, total = get_paginated_attendees(\n domain=domain,\n limit=limit,\n page=page,\n query=query\n )\n\n return JsonResponse({\n 'attendees': [{'case_id': c.case_id, 'name': c.name} for c in cases],\n 'total': total,\n })", "title": "" }, { "docid": "693439f2622ad4b52b9381101ef8009d", "score": "0.5511714", "text": "def get_admins(self):\n return FacilityUser.objects.filter(dataset_id=self.dataset_id).filter(\n roles__kind=role_kinds.ADMIN, roles__collection=self\n )", "title": "" }, { "docid": "7392ca2ee2ecb8d1485abe73fcd81c52", "score": "0.54902506", "text": "def get_all(self):\n # Get the accounts\n admin = self._get_admin()\n list_accounts = admin.list_accounts()\n\n # Return appropriately\n self.request.response.status_int = 200\n return list_accounts", "title": "" }, { "docid": "35195f937f0803e0263b1395438f3daa", "score": "0.54883796", "text": "def get_admins():\n members = client.get_all_members()\n timestamp = time()\n admins = [user for user in members if 'Staff' in [role.name for role in user.roles]]\n admins = [Admin_state(user, timestamp) for user in admins]\n return admins", "title": "" }, { "docid": "c4af4b238be3f7349d3af0a5a919e19d", "score": "0.5483893", "text": "def admin_course_list_tas():\n\n # Get all the TAs in the current course context\n tas = User.query.join(TAForCourse).filter(\n TAForCourse.course_id == course_context.id,\n ).all()\n\n # Return the list of basic user information about the tas\n return success_response({'users': [\n {\n 'id': user.id, 'netid': user.netid,\n 'name': user.name, 'github_username': user.github_username\n }\n for user in tas\n ]})", "title": "" }, { "docid": "50578a5f21d515cc2c7445087f524e82", "score": "0.54485285", "text": "def get_tenants(self):\n cmds = ['show openstack config region %s' % self.region]\n command_output = self._run_eos_cmds(cmds)\n tenants = command_output[0]['tenants']\n\n return tenants", "title": "" }, { "docid": "c059b7f507b5fb20296f4cc1da4abb9a", "score": "0.5437604", "text": "def tenants(self):\n return [Tenant(data) for data in self._current.get('tenants', [])]", "title": "" }, { "docid": "a35ae7e0cf2181261ee25d69129d6a66", "score": "0.542867", "text": "def admins(self) -> Optional[Sequence['outputs.PrincipalsResponse']]:\n return pulumi.get(self, \"admins\")", "title": "" }, { "docid": "5b419797731bf9a966df8de094e37033", "score": "0.54125917", "text": "async def _adm_show(self, ctx):\n db_admin = self.bot.database.get_admins(ctx.guild.id)\n\n if db_admin is not None:\n\n admins = \"```The admins are: \\n\"\n\n for admin in db_admin:\n admin_user = ctx.bot.get_user(int(admin.user_id))\n admins += f\"{admin_user.name}#{str(admin_user.discriminator)}\\n\"\n\n await ctx.send(admins + \"```\")\n else:\n await ctx.send(\"This server does not appear to have any registered admins, sorry.\")", "title": "" }, { "docid": "e1552b888e62918fa45f5ecbadf59639", "score": "0.5405768", "text": "def get_edit_announcements(self):\n items = AnnouncementEntity.get_announcements()\n items = AnnouncementsRights.apply_rights(self, items)\n\n main_content = self.get_template(\n 'announcement_list.html', [TEMPLATE_DIR]).render({\n 'announcements': self.format_items_for_template(items),\n 'status_xsrf_token': self.create_xsrf_token(\n AnnouncementsItemRESTHandler.STATUS_ACTION)\n })\n\n self.render_page({\n 'page_title': self.format_title('Announcements'),\n 'main_content': jinja2.utils.Markup(main_content)})", "title": "" }, { "docid": "4a902027769cc230e7bf213a9cc6bf7e", "score": "0.53788435", "text": "def student_attendees(self, ):\n\n parts = self.students_state().filter(\n confirmed__gt=CONFIRM_UNSET,\n valid__gt=VALID_UNSET,\n )\n print \"attendees\", parts\n return parts", "title": "" }, { "docid": "5c1052f42882ec02d5abd39d2b052776", "score": "0.53761476", "text": "def get_attendee(host=\"http://localhost:3000\"):\n test = requests.get(host+\"/api/attendee\")\n assert test\n assert test.status_code == 200\n response = json.loads(test.text)\n assert isinstance(response, list)\n return response", "title": "" }, { "docid": "1de718977a7dd99c9bf08ddc2e976732", "score": "0.5349578", "text": "async def list_entries(self, ctx: Context) -> None:\n await ctx.send(embed=await self.get_responses_list(final=False)) # Send a list of users", "title": "" }, { "docid": "5c7a79e8c07f53e3e48515765bed49f7", "score": "0.53470033", "text": "def getAdminList(self, roomJID):\r\n return self._getAffiliationList(roomJID, 'admin')", "title": "" }, { "docid": "974ff4dbeb351d614579e0d949458f52", "score": "0.53343743", "text": "def display_accounts(cls):\n return cls.cred_list", "title": "" }, { "docid": "c6ac3335634d82cb1c5606c78c670314", "score": "0.5332542", "text": "def tournaments(self):\n return Tournaments(self)", "title": "" }, { "docid": "5a33ae1a6e02ec2dbe31d17e5c4c18b4", "score": "0.53210807", "text": "def get_queryset(self, request):\n opts = self.model._meta\n qs = super(OwnableAdmin, self).get_queryset(request)\n if request.user.is_superuser:\n return qs\n return qs.filter(user__id=request.user.id)", "title": "" }, { "docid": "02fec8c720d232a0fff337835e2c6a43", "score": "0.532012", "text": "def get_queryset(self):\n return super().get_queryset().filter(account_id=self.request.user.account_id)", "title": "" }, { "docid": "02fec8c720d232a0fff337835e2c6a43", "score": "0.532012", "text": "def get_queryset(self):\n return super().get_queryset().filter(account_id=self.request.user.account_id)", "title": "" }, { "docid": "8adcb5e0f42ece78865e28f60f995ef4", "score": "0.5307083", "text": "def admin(self):\n for u in self.users:\n if u.is_admin:\n return u", "title": "" }, { "docid": "897ad6ffaa5e78ba4ba1eb5b5cbd06f5", "score": "0.53056246", "text": "def get_queryset(self, request):\n qs = super(MyMemberAdmin, self).get_queryset(request)\n if request.user.is_superuser:\n return qs\n return qs.filter(priviledged=request.user)", "title": "" }, { "docid": "6df873f4b73774e86b09ba08e85780f2", "score": "0.5301933", "text": "def ListTenants(self, request, context):\n context.set_code(grpc.StatusCode.UNIMPLEMENTED)\n context.set_details(\"Method not implemented!\")\n raise NotImplementedError(\"Method not implemented!\")", "title": "" }, { "docid": "6c96b8023f288f6d048ab0ca0b9ef17a", "score": "0.5292755", "text": "def get_queryset(self,request):\n qs = super(VolumeAdmin,self).get_queryset(request)\n if request.user.is_superuser:\n return qs\n return qs.filter(owner=request.user)", "title": "" }, { "docid": "4248b0956f874121f9e98a818f6b0c92", "score": "0.5290618", "text": "def get_accident_members(self, accident_id):\n query = \"select supervisor_id from accidents_supervisor where accident_id = ?\"\n return self.cur.execute(query, [accident_id]).fetchall()", "title": "" }, { "docid": "18c39ee9050ca36d8bf8835406c3bff1", "score": "0.5286169", "text": "def get_user_editable(request):\n user = models.Thing.get_or_create_user_thing(request.user)\n # which things may this user administrate?\n return (models.Thing.objects.filter(\n relatedthing__thing=user, relatedthing__annotation=\"admin\")\n .values_list(\"id\", flat=True))", "title": "" }, { "docid": "a6977c4eca84ea147ff2e3205e51b607", "score": "0.5283136", "text": "def get_queryset(self):\n\t\tusuario = get_object_or_404(Usuario, user=self.request.user) # get usuario\n\t\tcliente = usuario.cliente # get specified cliente to usuario\n\t\trevenda = usuario.revenda # get revenda\n\t\tif cliente:\n\t\t\treturn Usuario.objects.filter(cliente=cliente, revenda=revenda).order_by('-user')\n\t\treturn Usuario.objects.filter(revenda=revenda).order_by('-user')", "title": "" }, { "docid": "a45824fdd771aadb11c9732dc3d53fb1", "score": "0.5248147", "text": "def get_queryset(self):\n user = self.request.user\n return Timeline.objects.filter(owner=user)", "title": "" }, { "docid": "54167f233df1a97e568b29681596d2ba", "score": "0.5243578", "text": "def get_admin_users(org):\n return __get_org_admin_users(org)", "title": "" }, { "docid": "18a5498aaae4c52cf63dbd752ce4f14f", "score": "0.5239641", "text": "def queryset(self, request):\n qs = super(OwnableAdmin, self).queryset(request)\n if request.user.is_superuser:\n return qs\n return qs.filter(user__id=request.user.id)", "title": "" }, { "docid": "c5344d0a086bd9d888d3d40f0ac42b3e", "score": "0.523745", "text": "def age_group_attendees():\n trends = Trends()\n jwt_user = get_jwt_identity()\n user = trends.database.get_item('users', jwt_user)\n\n authorized = conf.TRENDS_GROUP in user['modules']\n log_request(request, jwt_user, authorized)\n\n if not authorized:\n response = {'message': '%s does not have access to members'%(jwt_user)}\n return jsonify(response), 403\n\n group_by = request.args.get('groupBy')\n if not group_by:\n group_by = 'year'\n response = trends.get_age_group_attendees(group=group_by)\n return jsonify(response)", "title": "" }, { "docid": "e21bd5a0f8fd20dba7ccd9323c8f253a", "score": "0.5234518", "text": "def get_admins(self):\n \n result = []\n for adm_hash in self._adm_ref:\n adm = self._backend.find_user(_hash=adm_hash, parse=True)\n result.append(adm)\n return result", "title": "" }, { "docid": "b67177a10f6de04024a0ea10ffa426ec", "score": "0.52307236", "text": "def ejercitos_perdidos_atacado(self):\n\t\treturn self.atacante_victorias", "title": "" }, { "docid": "9598f8d17e6647a01a51de113eda8078", "score": "0.5228217", "text": "def user_list(cls, me):\n ulist = list()\n users = Transaction.transacting_users(me)\n for u in users:\n ulist.append(u)\n extra = AutopiaUser.objects.filter(Q(username=\"admin\") | Q(username=me));\n for u in extra:\n ulist.append(u)\n return ulist", "title": "" }, { "docid": "7a1dfc17b71869ceb3c285286fe528d8", "score": "0.522627", "text": "def get_tournaments(self):\n\n return self.model.get_tournaments()", "title": "" }, { "docid": "01b5649921f6a2e0f035c0a52cdcb7ce", "score": "0.5214182", "text": "def invitees(self):\n if self._invitees_present:\n return self._invitees_value\n else:\n raise AttributeError(\"missing required field 'invitees'\")", "title": "" }, { "docid": "220bd3064773f511e5272591cb999928", "score": "0.5211418", "text": "def queryset(self, request):\n\n # Get the full set.\n full_set = super(AbstractBrainAdmin, self).queryset(request)\n\n # The superuser can see everything.\n if request.user.is_superuser:\n return full_set\n\n # Filter it down to the owner's items.\n return full_set.filter(owner=request.user)", "title": "" }, { "docid": "c770d42a429e25cf980b17674f3afb56", "score": "0.51995474", "text": "def get_list(self):\n student = None\n user = self.personalize_page_and_get_user()\n transient_student = False\n if user is None:\n transient_student = True\n else:\n student = models.Student.get_enrolled_student_by_user(user)\n if not student:\n transient_student = True\n self.template_value['transient_student'] = transient_student\n locale = self.app_context.get_current_locale()\n if locale == self.app_context.default_locale:\n locale = None\n items = AnnouncementEntity.get_announcements(locale=locale)\n items = AnnouncementsRights.apply_rights(self, items)\n self.template_value['announcements'] = self.format_items_for_template(\n items)\n self._render()", "title": "" }, { "docid": "4645ea70be1a9e73c956c26b1ae64e20", "score": "0.51958144", "text": "def get_context_data(self, **kwargs):\n # get all movie object\n kwargs['movie_list'] = Movie.objects.all()\n # get all actor object\n kwargs['actor_list'] = Actor.objects.all()\n return super(AwardEditView, self).get_context_data(**kwargs)", "title": "" }, { "docid": "7f9e23e7472468230b6a95152eab577f", "score": "0.5188733", "text": "def get_all_offices(self):\n\n\t\treturn self.entries", "title": "" }, { "docid": "09e08a8d980d0eb9e49607b360def64c", "score": "0.51833844", "text": "def getAttendeeResource(self, attendee):\n cal = yield self.calendarUnderTest(name=\"calendar\", home=attendee)\n calobjs = yield cal.calendarObjects()\n self.assertEqual(len(calobjs), 1)\n returnValue(calobjs[0])", "title": "" }, { "docid": "437a43b58d3c149ab673c333ec843fec", "score": "0.51825964", "text": "def index_queryset(self, using=None):\n # TODO: Here is probably where you can implement the function of\n # users being able to keep their FullDayOfEating objects private.\n return self.get_model().objects.all()", "title": "" }, { "docid": "437a43b58d3c149ab673c333ec843fec", "score": "0.51825964", "text": "def index_queryset(self, using=None):\n # TODO: Here is probably where you can implement the function of\n # users being able to keep their FullDayOfEating objects private.\n return self.get_model().objects.all()", "title": "" }, { "docid": "647732ad84092d4a3eed8eeda5204222", "score": "0.5181885", "text": "def list_owners(self):\n return list(set([x.get(\"owner\") for x in self._get_list(\"admin.apc_owners\")]))", "title": "" }, { "docid": "33a652fd1fc00a2bc5fe564f5d6d6d31", "score": "0.5173844", "text": "def test_get_tenant_admin(self):\n\n # The expected content, sans uuids.\n EXPECTED_CONTENT = {'count': 1,\n 'next': None,\n 'previous': None,\n 'results': [{'name': 'tenant 1',\n 'owner': 'John',\n 'owner_contact': ''}],\n }\n\n # The tenants we'll create. The first one will be owned by the\n # tenant_admin, and the rest will not.\n TENANTS = [{\"name\": EXPECTED_CONTENT[\"results\"][0][\"name\"],\n \"owner\": EXPECTED_CONTENT[\"results\"][0][\"owner\"]},\n {\"name\": \"Mary Louise\", \"owner\": \"Parker\"},\n {\"name\": \"Angelina\", \"owner\": \"Jolie\"},\n {\"name\": \"Debra\", \"owner\": \"Winger\"},\n ]\n\n # Make four tenants.\n tenants = [Tenant.objects.create(**x) for x in TENANTS]\n\n # Create a tenant_admin user and log her in.\n token = create_and_login(tenant=tenants[0])\n\n # Get the list of tenants. Only one being adminstered by the\n # tenant_admin should be in the list.\n response = self.client.get(\n TENANTS_URL,\n HTTP_AUTHORIZATION=AUTHORIZATION_PAYLOAD % token)\n\n check_response_without_uuid(response,\n HTTP_200_OK,\n EXPECTED_CONTENT,\n True)", "title": "" }, { "docid": "b38e0a6b3f5701a939cd66e1af1a8a2f", "score": "0.5162284", "text": "def get_queryset(self, request):\n qs = super(ProjectAdmin, self).get_queryset(request)\n if request.user.is_superuser:\n # It is mine, all mine. Just return everything.\n return qs\n # Now we just add an extra filter on the queryset and\n # we're done. Assumption: Page.owner is a foreignkey\n # to a User.\n\n # print()\n return qs.filter(partner__in=request.user.partner_set.values_list('pk'))", "title": "" }, { "docid": "5ab081e31a9510729e298699799985b3", "score": "0.5158902", "text": "def get_tournament_list(self):\n path = \"tennis-t2/en/tournaments\".format()\n print(path)\n return self._make_request(path)", "title": "" }, { "docid": "c48d1b8575b082279cb3904d4ba45982", "score": "0.51584285", "text": "def get_queryset(self):\n return super(InfoUserDetailViewSet, self).get_queryset().filter(\n user=self.request.user\n ).all()", "title": "" }, { "docid": "9c9e2c72cb5c8c86737e8f885d7607e3", "score": "0.5152186", "text": "def get_employees(self):\n\t\treturn '%s' % (', '.join(a.username for a in self.employees.all()))", "title": "" }, { "docid": "f0174e2502f750a250ab4e39401fa8d0", "score": "0.51513547", "text": "def get_context_data(self, **kwargs):\n context = super().get_context_data(**kwargs)\n context[\"owners\"] = [orgowner.owner for orgowner in self.get_queryset()]\n context[\"owner_objs\"] = self.get_queryset()\n context[\"is_last_user\"] = self._is_last_user()\n context[\"invitations\"] = self._get_invitations()\n return context", "title": "" }, { "docid": "31c699f9697b04ed0bf48ca17cff6198", "score": "0.5145666", "text": "def show_amenities():\n amenities_list = []\n all_amenities = storage.all('Amenity')\n for obj in all_amenities.values():\n amenities_list.append(obj.to_dict())\n return jsonify(amenities_list)", "title": "" }, { "docid": "bf9d0b84f45f32f2b43e3e5f8bbd43d5", "score": "0.51451886", "text": "def test_try_get_all_users_as_attendant(self):\n\n access_token = self.get_token(self.test_owner)\n self.client().post(self.url + 'signup',\n headers={\"Authorization\": \"Bearer \" + access_token}, json=self.test_user)\n access_token = self.get_token(self.test_user)\n response = self.client().get(self.url + 'users',\n headers={\"Authorization\": \"Bearer \" + access_token})\n json_data = json.loads(response.data)\n self.assertTrue(json_data.get('Error'))\n self.assertEqual(json_data.get('Error'),\n \"Only admins can view all users\")\n self.assertEqual(response.status_code, 403)", "title": "" }, { "docid": "b1865e6580237a2db538a8d911eb3413", "score": "0.51320267", "text": "def get_user_choices(self):\n return list(pwncat.victim.users)", "title": "" }, { "docid": "b1c5eaec27d83cfccfc2cb0deb11d676", "score": "0.5114866", "text": "def agent_clients(self):\n # type: () -> List[User]\n return User.objects.filter(profile__agent_code=self.mt4_id)", "title": "" }, { "docid": "42b504bb52cb12f079e52711c74bf40d", "score": "0.51087576", "text": "def ejercitos_perdidos_atacante(self):\n\t\treturn self.atacado_victorias", "title": "" }, { "docid": "d39703d2be373470c45820c66556d106", "score": "0.51029325", "text": "def getAccounts ():\n return _accounts", "title": "" }, { "docid": "c421a46cbd6c4a3f9da948211498e394", "score": "0.5094764", "text": "def all(self):\r\n item_id = self.properties['listing']['itemId']\r\n url = \"%scontent/listings/%s/userEntitlements\" % (self._gis._portal.resturl, item_id)\r\n start = 1\r\n num = 100\r\n params = {\r\n 'start' : start,\r\n 'num' : num\r\n }\r\n user_entitlements = []\r\n res = self._con.get(url, params)\r\n user_entitlements += res['userEntitlements']\r\n if 'nextStart' in res:\r\n while res['nextStart'] > 0:\r\n start += num\r\n params = {\r\n 'start' : start,\r\n 'num' : num\r\n }\r\n res = self._con.get(url, params)\r\n user_entitlements += res['userEntitlements']\r\n return user_entitlements", "title": "" }, { "docid": "ddc5c215c851b549619914dab7128357", "score": "0.50858885", "text": "def users(self):\n return self.get_users()", "title": "" }, { "docid": "f26b05202eba9217d05f63c4ccb6aaa3", "score": "0.50725615", "text": "def assignees(self):\n return {self.data['fields']['assignee']['name']}", "title": "" }, { "docid": "de998a78f7685ce86b008d82d5a5b69e", "score": "0.50678325", "text": "def accounts(self):\n return self._accounts", "title": "" }, { "docid": "6b7f91e624b2f62b227657aba689718c", "score": "0.5067491", "text": "def list_tenants(page_token=None, max_results=_MAX_LIST_TENANTS_RESULTS, app=None):\n tenant_mgt_service = _get_tenant_mgt_service(app)\n def download(page_token, max_results):\n return tenant_mgt_service.list_tenants(page_token, max_results)\n return ListTenantsPage(download, page_token, max_results)", "title": "" }, { "docid": "7d2f917ad29f3107c082202ecdada22c", "score": "0.506367", "text": "def get_queryset(self):\n return self.get_organization_queryset()", "title": "" }, { "docid": "7b18e6b2f0e6bc1acf57a9797ece0f07", "score": "0.5060665", "text": "async def admins(self, ctx):\n # roles_ = ['admin', 'mod', 'moderator', 'administrator', 'owner', 'underadministrator', 'moderators', 'founder']\n admin_perms = ['administrator', 'manage_server']\n mod_perms = ['manage_messages', 'ban_members', 'kick_members']\n admin_roles = []\n mod_roles = []\n online = []\n for role in ctx.message.server.roles:\n perms = []\n for s in role.permissions:\n perms.append(s)\n for s in perms:\n for x in admin_perms:\n if s[0] == x and s[1] == True:\n admin_roles.append(role.name.lower())\n for x in mod_perms:\n if s[0] == x and s[1] == True and role.name.lower() not in admin_roles:\n mod_roles.append(role.name.lower())\n for member in ctx.message.server.members:\n if member.bot:\n continue\n roles = list((map(str, member.roles)))\n roles = list(map(lambda x: x.lower(), roles))\n if member.status != discord.Status.online:\n continue\n for x in roles:\n for s in admin_roles:\n if x == s:\n not_in_thing = True\n for idfk in online:\n if '**Admin** `{0}`'.format(member.name) == idfk or '**Mod** `{0}`'.format(member.name) == idfk:\n not_in_thing = False\n break\n if not_in_thing:\n online.append('**Admin** `{0}`'.format(member.name))\n for s in mod_roles:\n if x == s:\n not_in_thing = True\n for idfk in online:\n if '**Mod** `{0}`'.format(member.name) == idfk or '**Admin** `{0}`'.format(member.name) == idfk:\n not_in_thing = False\n break\n if not_in_thing:\n online.append('**Mod** `{0}`'.format(member.name))\n if len(online) == 0:\n await self.bot.say(\"No staff were found that are online!\")\n return\n msg = ''\n for s in online:\n msg += '{0}\\n'.format(s)\n await self.bot.say(\"**Online Staff**\\n\"+msg)", "title": "" }, { "docid": "75258304e34eb23801a3227291e4a9d4", "score": "0.50582814", "text": "def list_all(self):\n\n session = self.session\n endpoint = self.build_endpoint()\n request = HttpHelper(session)\n response = request.get(endpoint)\n announcement_list = response[\"announcements\"]\n return announcement_list", "title": "" }, { "docid": "eb75d773f9f66a389196e255eb936e12", "score": "0.50533485", "text": "def acl_entities(self):\n return self._acl_entities", "title": "" }, { "docid": "a5410136dbb7f8962dfb8d65e55702e9", "score": "0.50505257", "text": "def list(self, request):\n a_viewset = [\n 'get acces to the door',\n ]\n\n return Response({'a_viewset': a_viewset})", "title": "" }, { "docid": "77fdf581ecd200d5a9329ec56cd5bb59", "score": "0.50404495", "text": "def get_queryset(self):\n domain = self.request.user.domain\n return Advertisement.objects.filter(adgroup__campaign__domain=domain)", "title": "" }, { "docid": "c38ea3a4e078519f4714773b26c08020", "score": "0.5037301", "text": "def get_queryset(self):\n return User.objects.all()[:]", "title": "" }, { "docid": "ec61b9b080b476ebec609a935b729478", "score": "0.5036627", "text": "def astronauts():\n\n if current_user.role in ['Admin', 'Medic']:\n # pulls all users with the astronaut role from the database.\n all_astronauts = User.query.filter_by(role='Astronaut').all()\n # passes astronauts data and page title into the html.\n return render_template('users_list.html', posts=all_astronauts, title='Astronauts')\n\n return abort(403) # access denied error if current user has an incorrect role.", "title": "" }, { "docid": "f79bcd891f583345420aa35a2d4b50a9", "score": "0.503608", "text": "def get_users(self):\n return []", "title": "" }, { "docid": "6ff8cbf738b2d95c4635ef271e631e42", "score": "0.5034951", "text": "def list(self, request):\n player = Player.objects.get(user=request.auth.user)\n events = Event.objects.filter(attendee=player)\n\n events = EventSerializer(\n events, many=True, context={'request': request})\n player = PlayerSerializer(\n player, many=False, context={'request': request})\n\n # Manually construct the JSON structure you want in the response\n profile = {}\n profile[\"player\"] = player.data\n profile[\"events\"] = events.data\n\n return Response(profile)", "title": "" }, { "docid": "d91a8d2e0eb3271be9c13d24aec0e876", "score": "0.5031167", "text": "def list_users(self):", "title": "" }, { "docid": "234e47012e4c5c7df9a0788a33d2ff33", "score": "0.50294745", "text": "def get_queryset(self):\n return User.objects.filter(id=self.request.user.id)", "title": "" }, { "docid": "3c39bd8dc723e8876f653bcb729c9073", "score": "0.50246483", "text": "def allowedRolesAndUsers( self ):\n ob = self.__ob\n allowed = {}\n for r in rolesForPermissionOn('View', ob):\n allowed[r] = 1\n localroles = mergedLocalRoles(ob)\n for user, roles in localroles.items():\n for role in roles:\n if allowed.has_key(role):\n allowed['user:' + user] = 1\n if allowed.has_key('Owner'):\n del allowed['Owner']\n return list(allowed.keys())", "title": "" }, { "docid": "56e08f975707a98cd38a0a31cf30a196", "score": "0.5022126", "text": "def users_roles(request, kb, tid):\n\n if request.method == 'GET':\n return HttpResponse(\"all good\\n\" + kb)\n else:\n return HttpResponseForbidden()", "title": "" }, { "docid": "b4d1e79dbbbd0aa685fc8b78c4445696", "score": "0.50211686", "text": "def list(self, request, *args, **kwargs): # lint-amnesty, pylint: disable=unused-argument\n queryset = self.filter_queryset(self.get_queryset())\n user = self.request.user\n if not user.is_staff:\n with transaction.atomic():\n for entitlement in queryset:\n entitlement.update_expired_at()\n\n page = self.paginate_queryset(queryset)\n if page is not None:\n serializer = self.get_serializer(page, many=True)\n return self.get_paginated_response(serializer.data)\n\n serializer = self.get_serializer(queryset, many=True)\n return Response(serializer.data)", "title": "" }, { "docid": "548d67319c9fcc9c5e9e2e44537c3426", "score": "0.50144446", "text": "def get_queryset(self):\n user = self.request.user\n return Eventgroup.objects.filter(owner=user)", "title": "" }, { "docid": "00fa8f00e93c10a2152c86bb2ee4ab18", "score": "0.50002116", "text": "def get_queryset(self):\n user = self.request.user\n if not user.is_authenticated:\n return []\n # if the user is chris then return all the feeds in the system\n if user.username == 'chris':\n return Feed.objects.all()\n return Feed.objects.filter(owner=user)", "title": "" }, { "docid": "50f80508cc938fcb7f6852db620bb991", "score": "0.49894053", "text": "def get_queryset(self):\n return Habit.objects.filter(user=self.request.user)", "title": "" }, { "docid": "463d764cceb1dce0ce89c368e531ae94", "score": "0.49878815", "text": "def assignees(self):\n if not self.can_update():\n self._handle_error(910, [self.type])\n\n return self.tc_requests.assignees(self.api_type, self.unique_id)", "title": "" }, { "docid": "f1f3c11838235fb95483f33ef7654153", "score": "0.4984394", "text": "def get_all_anon():\n st = Announcement.query.all()\n anns = [{'name': r.name, 'id': r.id, 'price': r.price, 'body': r.body, 'user_id': r.user_id} for r in st]\n return jsonify(anns)", "title": "" }, { "docid": "448d80d0cf10aacf717dc21207135976", "score": "0.49802545", "text": "def show(domain):\n print \"Domain\\t\\tAdmins\"\n if domain == \"all\":\n tmpD = Domain.query.all()\n else:\n tmpD = Domain.query.filter_by(name=domain)\n for dom in tmpD:\n admins = []\n for admin in dom.admin_maps.all():\n admins.append(admin.user.email)\n print \"{0}\\t\\t{1}\".format(dom.name, \", \".join(admins))", "title": "" }, { "docid": "c583f59bb2aba2a9a0fdef8d3ac6249b", "score": "0.4975667", "text": "def get_admin_ids(bot, chat_id):\n return [admin.user.id for admin in bot.getChatAdministrators(chat_id)]", "title": "" }, { "docid": "80db22e8f9c0dbb7b70bfaa1bffc3844", "score": "0.4974565", "text": "def get_teachers():\n teachers = Teacher.query.all()\n\n return render_template('teacher/teachers_list.html', title='Teachers List',\n teachers=teachers)", "title": "" }, { "docid": "8a4744c37def83e987b8d00024304acc", "score": "0.49738643", "text": "def json_get_all_emails(request):\n emails = [p.user.email for p in models.Player.current_players()]\n \n # json.dumps creates a string from a Python object.\n json_data = json.dumps(emails)\n\n return HttpResponse(\n json_data,\n content_type=\"application/json\"\n )", "title": "" }, { "docid": "7507535d3106fe0d8fed5d5a0d9caf9b", "score": "0.4973367", "text": "def get_users(self):\n response = self.json_api_call('GET', '/admin/v1/users', {})\n return response", "title": "" }, { "docid": "e0bb6d9f5a45204f55dada51c1c66fbb", "score": "0.4968997", "text": "async def get_aliases_for_room(\n self, requester: Requester, room_id: str\n ) -> Sequence[str]:\n # allow access to server admins and current members of the room\n is_admin = await self.auth.is_server_admin(requester)\n if not is_admin:\n await self.auth.check_user_in_room_or_world_readable(room_id, requester)\n\n return await self.store.get_aliases_for_room(room_id)", "title": "" }, { "docid": "101a69ac69628aee4f71fddb373ffe26", "score": "0.49649167", "text": "def list(self, request):\n landlord = Landlord.objects.get(user=request.auth.user)\n current_users_tenants = Tenant.objects.filter(landlord=landlord)\n\n # The table on the front end requires an object where the\n # id's are keys and names are values\n table = self.request.query_params.get('table', None)\n if table is not None:\n tenant_obj = {}\n for tenant in current_users_tenants:\n tenant_obj[tenant.id] = tenant.full_name\n \n to_string = json.dumps(tenant_obj, separators=None)\n\n return Response(to_string)\n\n search_term = self.request.query_params.get('search', None)\n if search_term is not None:\n current_users_tenants = current_users_tenants.filter(phone_number__icontains=search_term\n ) | current_users_tenants.filter(email__icontains=search_term\n ) | current_users_tenants.filter(full_name__icontains=search_term\n ) \n\n # Connect rented properties to tenants through the relationship table\n try:\n for tenant in current_users_tenants:\n lease = TenantPropertyRel.objects.filter(tenant=tenant)\n tenant.rented_property = lease\n\n # Check if the lease is active and add a custom property\n current_day = date.today()\n for rp in tenant.rented_property:\n if current_day > rp.lease_start and current_day < rp.lease_end:\n rp.active = True\n else: \n rp.active = False\n\n except TenantPropertyRel.DoesNotExist:\n current_users_tenants.rented_property = None\n\n # If the tenant does not have a lease, null will be \n # returned rather than an empty array\n for tenant in current_users_tenants:\n if not tenant.rented_property:\n tenant.rented_property = None\n \n serializer = TenantSerializer(\n current_users_tenants, many=True, context={'request': request}\n )\n return Response(serializer.data)", "title": "" } ]
102470a2cbd8a8384dfb0744ab966f09
Returns ``True`` if the current node does not have any siblings.
[ { "docid": "00c7acd158b6d7a7e0a1deead49129f5", "score": "0.0", "text": "def is_single_child(context: TreeContext) -> bool:\n return len(context[-2]._children) == 1", "title": "" } ]
[ { "docid": "2b2207a21da8511908fb1ad60ade9e26", "score": "0.7273685", "text": "def isSiblingOf(self, node):\n return (self in node.siblings())", "title": "" }, { "docid": "d2491c3b0f2a6d54ff005364b5d1a2dd", "score": "0.7044521", "text": "def is_root(self):\n return not self._elements", "title": "" }, { "docid": "2aa5d14438e3fe1869be4f6a0dc24a39", "score": "0.699461", "text": "def is_empty(self) -> bool:\n return not bool(self._childs)", "title": "" }, { "docid": "377c59f8c16693b669a9d694473d1ffd", "score": "0.697707", "text": "def is_root(self):\n return not self._parent", "title": "" }, { "docid": "fb1e4f73a213b1b61125d5dab53c7cd0", "score": "0.69638973", "text": "def has_children(self):\n\n return len(self.children) == 0", "title": "" }, { "docid": "22e4292b9c22d411020347781e316d2c", "score": "0.6950718", "text": "def is_child_node(self):\n return not self.is_root_node()", "title": "" }, { "docid": "1a7f17f085766d42d57f7de9132d02e3", "score": "0.6926308", "text": "def isLeaf(self):\n return len(self.children) == 0 or not any(self.children)", "title": "" }, { "docid": "b4fc0b37e187cf20daa0bbb2097879ed", "score": "0.69150645", "text": "def is_terminal(self):\n return len(self.children) == 0", "title": "" }, { "docid": "58160f612ca2974bc42190f3ba1ba7f1", "score": "0.6893699", "text": "def is_empty(self):\n return len(self.children) == 0", "title": "" }, { "docid": "58160f612ca2974bc42190f3ba1ba7f1", "score": "0.6893699", "text": "def is_empty(self):\n return len(self.children) == 0", "title": "" }, { "docid": "23b2bd29c430db82580ff6166742c8cd", "score": "0.6870679", "text": "def is_root(self):\n return len(self.parents) == 0", "title": "" }, { "docid": "36eb1bc763fd5736aa3325f7beb30725", "score": "0.6825843", "text": "def isRoot(self):\n return not self.parent", "title": "" }, { "docid": "9c183553063883ce5743ca655bb8fff7", "score": "0.68193823", "text": "def isEmpty(self):\n return self.__firstnode == None", "title": "" }, { "docid": "311bd53ae54d95948bae1ab0ce344b7c", "score": "0.68163496", "text": "def is_leaf(self):\n\t\treturn not self.children", "title": "" }, { "docid": "32e6790d322e67d612a5ae74cb7ee1aa", "score": "0.6801577", "text": "def leaf(self):\n return len(self._children) == 0", "title": "" }, { "docid": "2b162c74ddfa5a01fe1d712e00f3dcff", "score": "0.6799686", "text": "def is_leaf(self):\n return (not self.data) or \\\n (all(not bool(c) for c, p in self.children))", "title": "" }, { "docid": "2b162c74ddfa5a01fe1d712e00f3dcff", "score": "0.6799686", "text": "def is_leaf(self):\n return (not self.data) or \\\n (all(not bool(c) for c, p in self.children))", "title": "" }, { "docid": "4702591ed0f2875a6fb09facbf4bc64d", "score": "0.6790039", "text": "def isRootNode(self):\n return self.parent == None", "title": "" }, { "docid": "4583b915ae996de45ee39b83a3ff3b8d", "score": "0.6758766", "text": "def is_leaf_node(self):\n return not self.get_descendant_count()", "title": "" }, { "docid": "a52cdd044717b566d8f365c6a74a50c4", "score": "0.6744806", "text": "def is_empty(self):\n return self.head_node is None", "title": "" }, { "docid": "249e5772646519c845880d2c90fb225a", "score": "0.6729777", "text": "def isEmptyTree(self):\n return len(self.root.children) == 0", "title": "" }, { "docid": "66561f9e3cd04dc615ef58de9cce3e8c", "score": "0.6720928", "text": "def hasNext(self) -> bool:\n return self.root.left is not None or self.root.right is not None", "title": "" }, { "docid": "674838294e130ebe5b333bdc83200f9e", "score": "0.6713312", "text": "def has_children(self):\n return self._right_child and self._left_child", "title": "" }, { "docid": "6bda59a92fa1de5162f10142b14c04a0", "score": "0.6711664", "text": "def is_leaf(self):\n return len(self.children) == 0", "title": "" }, { "docid": "6bda59a92fa1de5162f10142b14c04a0", "score": "0.6711664", "text": "def is_leaf(self):\n return len(self.children) == 0", "title": "" }, { "docid": "6bda59a92fa1de5162f10142b14c04a0", "score": "0.6711664", "text": "def is_leaf(self):\n return len(self.children) == 0", "title": "" }, { "docid": "810e405097d9428e087c428ad00ee606", "score": "0.6703406", "text": "def is_leaf(self):\n\n return not self.has_children()", "title": "" }, { "docid": "2ad8978618d75ff4a3cfccc183bae164", "score": "0.6690123", "text": "def isEmpty(self):\n\t\treturn self.root == None", "title": "" }, { "docid": "02db821c817beb3d139f25c5f68689ef", "score": "0.6673303", "text": "def has_child(self):\n return not not self.subitems", "title": "" }, { "docid": "5d3708efe55e5cf90653e319ffc862cf", "score": "0.6662752", "text": "def is_empty(self) -> bool:\n return False if self.__root else True", "title": "" }, { "docid": "b3927183cc137359c480293ea80a1dba", "score": "0.66597563", "text": "def is_empty(self):\n if (self.root == None):\n return True\n return False", "title": "" }, { "docid": "fb7821bd27db331972e5de7f6f03a383", "score": "0.66276795", "text": "def is_leaf():\n return len(self.children) == 0", "title": "" }, { "docid": "d292aaaaf1cffa4e06c274c1adbb0467", "score": "0.66276395", "text": "def at_root(self):\n\n return self.current_item.parent_item == None", "title": "" }, { "docid": "b263de452710f2efdeb886eac2ce1301", "score": "0.6626509", "text": "def is_root(self) -> bool:\n return self.parent is None", "title": "" }, { "docid": "04b42c561a183571119baf4c50dd33e5", "score": "0.66074437", "text": "def is_empty(self):\r\n if self.root is None:\r\n return True\r\n else:\r\n return False", "title": "" }, { "docid": "5db451e07bb940a6db4dba8fcdd6d11f", "score": "0.6573812", "text": "def is_empty(self):\n return self.root is None", "title": "" }, { "docid": "5db451e07bb940a6db4dba8fcdd6d11f", "score": "0.6573812", "text": "def is_empty(self):\n return self.root is None", "title": "" }, { "docid": "3ff5b928583e5efa1d69d0648362f5d7", "score": "0.6568509", "text": "def is_empty(self):\n\n # Check if the object's `header_node` attribute actually points\n # to a node (i.e., its `next_node` attribute is not equal to\n # None)\n if self.header_node.next_node is None:\n return True\n return False", "title": "" }, { "docid": "fb0f274b7c6680f93a2fcda1cbe12ec2", "score": "0.6560411", "text": "def has_children(self):\n\n return bool(self.children)", "title": "" }, { "docid": "c8479dbe91083766d1efa69997afeaa2", "score": "0.65397644", "text": "def is_empty(self):\n return self._root is None", "title": "" }, { "docid": "c8479dbe91083766d1efa69997afeaa2", "score": "0.65397644", "text": "def is_empty(self):\n return self._root is None", "title": "" }, { "docid": "c8479dbe91083766d1efa69997afeaa2", "score": "0.65397644", "text": "def is_empty(self):\n return self._root is None", "title": "" }, { "docid": "c8479dbe91083766d1efa69997afeaa2", "score": "0.65397644", "text": "def is_empty(self):\n return self._root is None", "title": "" }, { "docid": "c8479dbe91083766d1efa69997afeaa2", "score": "0.65397644", "text": "def is_empty(self):\n return self._root is None", "title": "" }, { "docid": "063bf8fb9e98b51b606c0a21d8fb4539", "score": "0.65318996", "text": "def is_terminal(self):\n return not bool(self.left_child and self.right_child)", "title": "" }, { "docid": "ed82cffe07338b641b5a5786d03019dd", "score": "0.6530976", "text": "def is_leaf(self):\n\t\treturn len(self.children) == 0", "title": "" }, { "docid": "28f66f82d22ff82f3901fb9225d9adf7", "score": "0.6522767", "text": "def is_leaf(self):\n return len(self.links) == 1", "title": "" }, { "docid": "03254bb2ead339f02d902f3f2757a8f9", "score": "0.6507665", "text": "def is_leaf(self) -> bool:\n return self.left_child is None and self.right_child is None", "title": "" }, { "docid": "ff04e285e66351288c0998e095bf1e67", "score": "0.6501473", "text": "def isLeaf(self):\n return not (self.rightChild or self.leftChild)", "title": "" }, { "docid": "178057ffabf7b00c080e785efb7f7877", "score": "0.6489759", "text": "def is_root(self):\n return self.parent is None", "title": "" }, { "docid": "0331bce4863226fa44e5b31f3ecf956d", "score": "0.64858955", "text": "def is_root(self):\n if self.isabs():\n head, tail = self.split()\n return head == self\n else:\n return False", "title": "" }, { "docid": "59498aa5328b4e5417491010539410b3", "score": "0.6467206", "text": "def isEmpty(self):\n return self.head == None", "title": "" }, { "docid": "9092dc292d98733ad065c7da3d9812ea", "score": "0.6464035", "text": "def isEmpty(self) -> bool:\n return self.head == None", "title": "" }, { "docid": "9092dc292d98733ad065c7da3d9812ea", "score": "0.6464035", "text": "def isEmpty(self) -> bool:\n return self.head == None", "title": "" }, { "docid": "bdc2bbc3c258a4b86e740a7954b9f0a3", "score": "0.64530855", "text": "def isEmpty(self):\n return True if self.head is None else False", "title": "" }, { "docid": "1061aeb00a478bb97dd0dd86a9aa3d96", "score": "0.6448253", "text": "def has_left_child(self: object) -> bool:\n return self.left is not None", "title": "" }, { "docid": "6bc753cf050036c665b5bc20a2e816e9", "score": "0.6419694", "text": "def isEmpty(self):\r\n return self.head == None and self.tail == None", "title": "" }, { "docid": "1f43071cbecb5206470be7ac3ee6b22f", "score": "0.641721", "text": "def isLeaf(self):\n return self.left == None or self.right == None", "title": "" }, { "docid": "33b77a74a0fef45e7cb5dd5f8f076b14", "score": "0.641617", "text": "def is_empty(self):\n return self.head == self.tail", "title": "" }, { "docid": "4a5ff7be614ea7c882940be057842af1", "score": "0.6410652", "text": "def is_root(self):\n return self.parent is Node.ROOT_PARENT", "title": "" }, { "docid": "48c0ec2c0a98ebfbc67bb30677ee10b3", "score": "0.6407648", "text": "def is_leaf(self):\r\n return not self.left and not self.right", "title": "" }, { "docid": "395ca0ad6aba9015cc03a0fa10d3ccd8", "score": "0.64050347", "text": "def isEmptyLL(self):\n if self.head.next == None:\n return True\n return False", "title": "" }, { "docid": "ef296d91a834afbe0390fc048f839ff2", "score": "0.6396883", "text": "def is_empty(self):\n if self.top == -1:\n return True\n return False", "title": "" }, { "docid": "480641014ea06c59528dc64a11d9c7a2", "score": "0.6394091", "text": "def is_non_oriented(self):\n for node in self.nodes:\n neighbors = self.adjacency_list[node]\n for neighbor in neighbors:\n if node not in self.adjacency_list[neighbor]:\n return False\n return True", "title": "" }, { "docid": "8ad4b33f7da8050ecc2b850b52f9048b", "score": "0.637921", "text": "def is_empty(self):\n return self.head.next is None", "title": "" }, { "docid": "76069d5fc18a7b1410f93cfe66c4227f", "score": "0.6364337", "text": "def is_root_node(self):\n return getattr(self, '%s_id' % self._meta.parent_attr) is None", "title": "" }, { "docid": "9304567c0d2ec3c5294ea0fa0d872ac7", "score": "0.6361797", "text": "def isInvalidated(self):\n\n if len(self.relations) > 0:\n return True\n\n # Check children\n if self.currentElement is None:\n return True\n\n if self.currentElement.isInvalidated():\n return True\n\n # Return false\n return False", "title": "" }, { "docid": "e02ae1ea2807bf13a6740e1804dab5dc", "score": "0.6341304", "text": "def is_empty_layout(self) -> bool:\n return not self.get_visible_children()", "title": "" }, { "docid": "0adb34782dbd1ca714788c6488f0549c", "score": "0.63385934", "text": "def is_empty(self):\n\n return not self._stack", "title": "" }, { "docid": "98a98d87d82c6f19a936779a9fe656e4", "score": "0.63358676", "text": "def hasChildren(self):\n\n return (self._children is not None and\n self._children._firstKey is not None)", "title": "" }, { "docid": "d0573da2d8c75d8a52077238226fd42a", "score": "0.63303417", "text": "def is_root(self) -> bool:\n return len(self._parts) == 1", "title": "" }, { "docid": "d7b5db6ac16dc8e246f6a013c9f81ff8", "score": "0.63263756", "text": "def is_leaf(self):\n return not (self._right_child or self._left_child)", "title": "" }, { "docid": "b21af105036d8c4b46008c90881b9e68", "score": "0.63259816", "text": "def is_root(self):\n if self.up is None:\n return True\n else:\n return False", "title": "" }, { "docid": "d6b293d914070c5f8ec03a32eb840888", "score": "0.63204545", "text": "def empty(self) -> bool:\n return not self.out_stack and not self.in_stack", "title": "" }, { "docid": "c732aa66dd5acda0606c315e0e51d977", "score": "0.63199216", "text": "def empty(self) -> bool:\r\n return not self.stack_in and not self.stack_out", "title": "" }, { "docid": "e7d8ae14004c4128a5d553b97ccf5b33", "score": "0.63184375", "text": "def _is_leaf(self, node):\n return len(node.xpath(xmlconst.XPATH_RELATIVE_CHILDREN)) == 0", "title": "" }, { "docid": "8d9db5bc8d27fb8f8e5b5a135490fbc6", "score": "0.63151616", "text": "def hasAnyChildren(self):\n return self.rightChild or self.leftChild", "title": "" }, { "docid": "a2556007fcfb6c8da696b6462be16734", "score": "0.631495", "text": "def is_empty(self):\n # TODO: Check if empty\n if self.front() is None:\n return True\n else:\n return False", "title": "" }, { "docid": "6a5e2551d0d2bb3fb022507d534378b5", "score": "0.6308982", "text": "def has_pointers_recursive(self):\n if self.linked_nodes.exists():\n return True\n for node in self.nodes_primary:\n if node.has_pointers_recursive:\n return True\n return False", "title": "" }, { "docid": "19ed9f8c6ec036974c4cd1e7be01310d", "score": "0.6292435", "text": "def is_empty(self):\n return self.head == None", "title": "" }, { "docid": "a0965ddf70d9b443852e710ad0e4fa96", "score": "0.629123", "text": "def has_next(self):\n if self._current is not None:\n return True\n else:\n return False", "title": "" }, { "docid": "37ebae89a67cce51c5d1021ae3a057d4", "score": "0.6289594", "text": "def is_leaf(self):\r\n if self.left_child is None and self.right_child is None:\r\n return True\r\n else:\r\n return False", "title": "" }, { "docid": "82c38f95a4820701c8ffc8197ad8c341", "score": "0.6287733", "text": "def IsFirstChild(self):\n\n # False for root.\n if self.IsRoot():\n return False;\n\n # Check.\n return self == self.Parent().FirstChild()", "title": "" }, { "docid": "12a4b95651b595d5624f18c862925e4c", "score": "0.6287128", "text": "def is_root(self):\n return self.root is None", "title": "" }, { "docid": "2867318ea636d3039bcb7ccf40b10146", "score": "0.6283809", "text": "def is_leaf(self):\n # TODO: Check if both left child and right child have no value\n return self.left == None and self.right == None", "title": "" }, { "docid": "68592d38fc71b905ef22d3d460f38f82", "score": "0.6282227", "text": "def is_leaf(self):\n return not self.branches", "title": "" }, { "docid": "8b5e6f977c1052004c156aec3450e6b7", "score": "0.6281796", "text": "def is_empty(self):\n return not self.stack", "title": "" }, { "docid": "08a669159821419656899979b9cb50e7", "score": "0.62809247", "text": "def has_children (self):\n return self.check_has_relative(REL_CHILD)", "title": "" }, { "docid": "9de16c1c073e5f9f4ac38cdb8465ea61", "score": "0.627981", "text": "def hasLeftChild(self):\n return self.leftChild is not None", "title": "" }, { "docid": "8b5632925745eecfaaf54d994c1f1771", "score": "0.6277201", "text": "def hasNext(self) -> bool:\n if self.current >= len(self.tree):\n return False\n else:\n return True", "title": "" }, { "docid": "4b9d949b987f50e1cd0edf6d72c177eb", "score": "0.62757987", "text": "def isEmpty(self):\n return self.stack == []", "title": "" }, { "docid": "588c002ddc5f63b5fc0bd3f8cf877360", "score": "0.6261975", "text": "def empty(self):\n return self._head is None", "title": "" }, { "docid": "4f1e45f80e1e1872cba1723a9cab3bc6", "score": "0.6259699", "text": "def isbranch(self):\n\n if len(self.children) > 1:\n return True\n else:\n return False", "title": "" }, { "docid": "57ecf161800748dfd38297562420ea96", "score": "0.625808", "text": "def is_empty(self):\n return self.root.data is None", "title": "" }, { "docid": "a6a8e3074be5c8d1e1c6e5740306184b", "score": "0.6252382", "text": "def empty(self) -> bool:\n return len(self.stack) == 0", "title": "" }, { "docid": "06f2fa2a821ea02c74d282ef9f80f1d1", "score": "0.6245496", "text": "def is_empty(self):\n return self.head is None", "title": "" }, { "docid": "06f2fa2a821ea02c74d282ef9f80f1d1", "score": "0.6245496", "text": "def is_empty(self):\n return self.head is None", "title": "" }, { "docid": "06f2fa2a821ea02c74d282ef9f80f1d1", "score": "0.6245496", "text": "def is_empty(self):\n return self.head is None", "title": "" }, { "docid": "06f2fa2a821ea02c74d282ef9f80f1d1", "score": "0.6245496", "text": "def is_empty(self):\n return self.head is None", "title": "" }, { "docid": "06f2fa2a821ea02c74d282ef9f80f1d1", "score": "0.6245496", "text": "def is_empty(self):\n return self.head is None", "title": "" }, { "docid": "06f2fa2a821ea02c74d282ef9f80f1d1", "score": "0.6245496", "text": "def is_empty(self):\n return self.head is None", "title": "" } ]
04fa42307f6d0531171fdaf923a417ac
add column with percent of each race in geographical region
[ { "docid": "90c97e2ddff3407a870a2e2fd54867e3", "score": "0.717701", "text": "def compute_percent_each_race(self, df):\n percent_black = round(\n (df.loc[:, 'Estimate!!Total:!!Not Hispanic or Latino:!!Black or African American alone']\n / df.loc[:, 'Estimate!!Total:']) * 100,\n 1)\n df['percent_black'] = percent_black\n\n percent_white = round(\n (df.loc[:, 'Estimate!!Total:!!Not Hispanic or Latino:!!White alone']\n / df.loc[:, 'Estimate!!Total:']) * 100,\n 1)\n df['percent_white'] = percent_white\n\n percent_hispanic_or_latino = round(\n (df.loc[:, 'Estimate!!Total:!!Hispanic or Latino:']\n / df.loc[:, 'Estimate!!Total:']) * 100,\n 1)\n df['percent_hispanic_or_latino'] = percent_hispanic_or_latino\n\n return df", "title": "" } ]
[ { "docid": "d6e97377f59a15631938157378aa7106", "score": "0.6053051", "text": "def percentage(municipalities, year, parties, input):\n\n for row in range(len(input)):\n if row != 0:\n # check if the municipality existed in 2017\n if input.loc[row, 'RegioNaam'] in municipalities:\n votes = input.loc[row, 'GeldigeStemmen']\n # write the results to a dictionary and append them to the year\n results = {}\n for category in parties:\n if category != parties[len(parties) - 1]:\n value = input.loc[row, category]\n if type(input.loc[row, category]) == str:\n value = input.loc[row, category]\n value = int(value.replace(',', ''))\n\n # add parties that got more than 1 percent of the vote\n if (value/votes)*100 >= 1:\n results[category] = (value/votes)*100\n\n municipalities[input.loc[row, 'RegioNaam']][year] = results\n\n return municipalities", "title": "" }, { "docid": "f654bff0c13070c5fa5acd66f708081e", "score": "0.5935591", "text": "def stats_per_race(self, race):\n\n race = Population.cast_race_string(race)\n number_of_people = 1\n number_sexual_pref = {SexualPreference.HOMOSEXUAL: 0, SexualPreference.HETERO: 0, SexualPreference.BISEXUAL: 0}\n number_sex = {Sex.MALE: 0, Sex.FEMALE: 0, Sex.OTHER: 0}\n number_skin_tone = {SkinTone.LIGHT: 0, SkinTone.MEDIUM: 0, SkinTone.DARK: 0}\n number_hair_color = {HairColor.BROWN: 0, HairColor.DARK: 0, HairColor.RED: 0, HairColor.BLONDE: 0, HairColor.WHITE: 0}\n\n for person in self.group:\n if person.race == race:\n number_of_people += 1\n number_sexual_pref[person.sexualPreference] += 1\n number_sex[person.sex] += 1\n number_skin_tone[person.skinTone] += 1\n number_hair_color[person.hairColor] += 1\n\n population_statistics_by_race = {\n \"race\": race,\n \"percentageOfPopulation\": number_of_people / len(self.group),\n \"description\": {\n \"SexualPreference\": {\n \"Heterosexual\": 100 * number_sexual_pref[SexualPreference.HETERO] / number_of_people,\n \"Homosexual\": 100 * number_sexual_pref[SexualPreference.HOMOSEXUAL] / number_of_people,\n \"Bisexual\": 100 * number_sexual_pref[SexualPreference.BISEXUAL] / number_of_people\n },\n \"Sex\": {\n \"Male\": 100 * number_sex[Sex.MALE] / number_of_people,\n \"Female\": 100 * number_sex[Sex.FEMALE] / number_of_people,\n \"Other\": 100 * number_sex[Sex.OTHER] / number_of_people\n },\n \"SkinTone\": {\n \"Light\": 100 * number_skin_tone[SkinTone.LIGHT] / number_of_people,\n \"Medium\": 100 * number_skin_tone[SkinTone.MEDIUM] / number_of_people,\n \"Dark\": 100 * number_skin_tone[SkinTone.DARK] / number_of_people\n },\n \"HairColor\": {\n \"Brown\": 100 * number_hair_color[HairColor.BROWN] / number_of_people,\n \"Dark\": 100 * number_hair_color[HairColor.DARK] / number_of_people,\n \"Red\": 100 * number_hair_color[HairColor.RED] / number_of_people,\n \"Blonde\": 100 * number_hair_color[HairColor.BLONDE] / number_of_people,\n \"White\": 100 * number_hair_color[HairColor.WHITE] / number_of_people,\n }\n }\n }\n\n return population_statistics_by_race", "title": "" }, { "docid": "5febe70c7355636a98e1fb8367352bcb", "score": "0.5638741", "text": "def add_percentage_column(dataframe, countcolumn):\n total_sum = dataframe[countcolumn].sum()\n dataframe['percentage'] = dataframe.apply(\n lambda row: row[countcolumn] / total_sum,\n axis=1)\n return dataframe", "title": "" }, { "docid": "5b811ef4ccf4670421705374c73fb72f", "score": "0.56384796", "text": "def features_percent(self):\n \"\"\"\n feature_space=pd.read_csv(os.path.join(self.current_path,'feature_space','Uspace.name'),sep=' ',header=None).iloc[0:self.subs_sis,0]\n feature_percent=pd.DataFrame(columns=('feature','percent'))\n index=0\n for feature_name in self.features_name:\n percent=feature_space.str.contains(feature_name).sum()/self.subs_sis\n feature_percent.loc[index]={'feature':feature_name,'percent':percent}\n index+=1\n feature_percent.sort_values('percent',inplace=True,ascending=False)\n return feature_percent\n \"\"\"\n feature_space=pd.read_csv(os.path.join(self.current_path,'feature_space','Uspace.name'),sep=' ',header=None).iloc[0:self.subs_sis,0]\n feature_percent=pd.DataFrame(columns=self.features_name,index=('percent',))\n for feature_name in self.features_name:\n percent=feature_space.str.contains(feature_name).sum()/self.subs_sis\n feature_percent.loc['percent',feature_name]=percent\n return feature_percent", "title": "" }, { "docid": "87a47b40c4adf2dbf313757ce0fb4525", "score": "0.55725765", "text": "def stats_for_year(year):\n p = population_of_big_50.where('time', year).drop('time')\n f = fertility.where('time', year).drop('time')\n c = child_mortality.where('time', year).drop('time')\n p = p.join(\"geo\", f, \"geo\").join(\"geo\", c, \"geo\")\n return p", "title": "" }, { "docid": "56b3aac46865082b83d9bc1c903fc399", "score": "0.5555906", "text": "def stats_for_year(year):\n p = population_of_big_50.where('time', year).drop('time')\n f = fertility.where('time', year).drop('time')\n c = child_mortality.where('time', year).drop('time')\n pfc_join = p.join('geo', f).join('geo', c)\n return pfc_join", "title": "" }, { "docid": "57a556d7a9ddd91a1f9f9ecc8ba252f2", "score": "0.5491985", "text": "def region_pct(region, pctv):\n \n ewp = (region[1] - region[0]) * (pctv * .01)\n nsp = (region[3] - region[2]) * (pctv * .01)\n return((ewp + nsp) / 2)", "title": "" }, { "docid": "728898959b07075aeb5f9f652323e6f8", "score": "0.54622024", "text": "def add_region(t):\n D = t[['pickup_lon', 'pickup_lat']].values\n assert D.shape[0] == t.shape[0], 'You set D using the incorrect table'\n # Always use the same data transformation used to compute vt\n X = (D - pca_means) / np.sqrt(pca_n) \n first_pc = D @ vt[0]\n t.loc[:,'region'] = pd.qcut(first_pc, 3, labels=[0, 1, 2])", "title": "" }, { "docid": "baac2fe708bb6d19027d1565661eef5c", "score": "0.5359229", "text": "def calc_percentages(self):\n\n if 'Stockholder Equity' in self.working.columns:\n self.working['ROE'] = self.working['Net Income'] / self.working['Stockholder Equity']\n self.working['Profit Margin'] = self.working['Net Income'] / self.working['Revenue']\n else:\n print(\"Error: Must fill DataFrame first. Run fill_working.\")", "title": "" }, { "docid": "7cedd0d6c2c687e3f7109b63249f9c82", "score": "0.5345097", "text": "def features_percent(self):\n \"\"\"\n feature_percent=pd.DataFrame(self.features_name)\n feature_percent.insert(1,'percent',np.zeros(len(self.features_name)))\n for cv in range(0,self.cv_number):\n feature_space=pd.read_csv(os.path.join(self.current_path,'%s_cv%d'%(self.property_name,cv),'feature_space','Uspace.name'),sep=' ',header=None).iloc[0:self.subs_sis,0]\n index=0\n for feature_name in self.features_name:\n count=feature_space.str.contains(feature_name).sum()\n feature_percent.loc[index,'percent']+=count\n index+=1\n feature_percent.iloc[:,1]=feature_percent.iloc[:,1]/(self.cv_number*self.subs_sis)\n feature_percent.sort_values('percent',inplace=True,ascending=False)\n return feature_percent\n \"\"\"\n feature_percent=pd.DataFrame(columns=self.features_name,index=('percent',))\n feature_percent.iloc[0,:]=0\n for cv in range(0,self.cv_number):\n feature_space=pd.read_csv(os.path.join(self.current_path,'%s_cv%d'%(self.property_name,cv),'feature_space','Uspace.name'),sep=' ',header=None).iloc[0:self.subs_sis,0]\n for feature_name in self.features_name:\n count=feature_space.str.contains(feature_name).sum()\n feature_percent.loc['percent',feature_name]+=count\n feature_percent.iloc[0,:]=feature_percent.iloc[0,:]/(self.cv_number*self.subs_sis)\n return feature_percent", "title": "" }, { "docid": "fd63aef1f6030ad9f4f60b40647cc0af", "score": "0.5318932", "text": "def calc_perc_fires(df, time_measures):\n\n for time_measure in time_measures: \n new_col_name = 'perc_fires{}'.format(time_measure)\n nearby_count_col = 'all_nearby_count{}'.format(time_measure)\n nearby_fires_count_col = 'all_nearby_fires{}'.format(time_measure)\n df[new_col_name] = df[nearby_fires_count_col] / df[nearby_count_col]\n\n return df", "title": "" }, { "docid": "ae417303f1d66ac790d847f31eb1d64d", "score": "0.52989167", "text": "def county_referedum_table(euro_referendum_result, town_list):\n df_referendum = pd.read_csv(euro_referendum_result)\n df_town = pd.read_csv(town_list)\n df_tmp = df_town.merge(\n df_referendum[[\"Area\", \"Pct_Remain\", \"Pct_Leave\", \"Pct_Rejected\"]],\n left_on=\"Town\",\n right_on=\"Area\",\n ).groupby(\"County\")\n df_county = pd.core.groupby.generic.DataFrameGroupBy.mean(df_tmp)\n return df_town.merge(df_county, left_on=\"County\", right_on=\"County\")", "title": "" }, { "docid": "71bada6459326058b780f80803005de6", "score": "0.52879745", "text": "def get_year_stats(field, table, where=None):\n year_data = OrderedDict({})\n raw_data = get_field_data(field, table, where)\n year_data[\"sum_all\"] = get_field_sum(field, table, where)\n year_data[\"count_all\"] = get_field_count(field, table, where)\n year_data[\"mean\"] = get_field_mean(field, table, where)\n year_data[\"stdev\"] = get_field_stdev(field, table, where)\n year_data[\"median\"] = raw_data[field].median()\n year_data[\"skew\"] = raw_data[field].skew()\n year_data[\"kurtosis\"] = raw_data[field].kurtosis()\n year_data[\"max\"] = raw_data[field].map(lambda x: int(x)).max()\n year_data[\"min\"] = raw_data[field].map(lambda x: int(x)).min()\n year_data[\"pct_10\"] = get_percentile(field, table, pct=.1, where=where)\n year_data[\"pct_20\"] = get_percentile(field, table, pct=.2, where=where)\n year_data[\"pct_30\"] = get_percentile(field, table, pct=.3, where=where)\n year_data[\"pct_40\"] = get_percentile(field, table, pct=.4, where=where)\n year_data[\"pct_50\"] = get_percentile(field, table, pct=.5, where=where)\n year_data[\"pct_60\"] = get_percentile(field, table, pct=.6, where=where)\n year_data[\"pct_70\"] = get_percentile(field, table, pct=.7, where=where)\n year_data[\"pct_80\"] = get_percentile(field, table, pct=.8, where=where)\n year_data[\"pct_90\"] = get_percentile(field, table, pct=.9, where=where)\n #add race filters to subsets\n race_1_where = where + \"\"\" AND (race_1 = '1' OR race_2 = '1' OR race_3 = '1' OR race_4 = '1' OR race_5 = '1' OR\n co_race_1 = '1' OR co_race_2 = '1' OR co_race_3 = '1' OR co_race_4 = '1' OR co_race_5 = '1')\"\"\"\n raw_data = get_field_data(field, table,race_1_where)\n year_data[\"sum_native\"] = get_field_sum(field, table, race_1_where)\n year_data[\"count_native\"] = get_field_count(field, table, race_1_where)\n year_data[\"mean_native\"] = get_field_mean(field, table, race_1_where)\n year_data[\"stdev_native\"] = get_field_stdev(field, table, race_1_where)\n year_data[\"median_native\"] = raw_data[field].median()\n year_data[\"skew_native\"] = raw_data[field].skew()\n year_data[\"kurtosis_native\"] = raw_data[field].kurtosis()\n year_data[\"max_native\"] = raw_data[field].map(lambda x: int(x)).max()\n year_data[\"min_native\"] = raw_data[field].map(lambda x: int(x)).min()\n year_data[\"pct_10_native\"] = get_percentile(field, table, pct=.1, where=race_1_where)\n year_data[\"pct_20_native\"] = get_percentile(field, table, pct=.2, where=race_1_where)\n year_data[\"pct_30_native\"] = get_percentile(field, table, pct=.3, where=race_1_where)\n year_data[\"pct_40_native\"] = get_percentile(field, table, pct=.4, where=race_1_where)\n year_data[\"pct_50_native\"] = get_percentile(field, table, pct=.5, where=race_1_where)\n year_data[\"pct_60_native\"] = get_percentile(field, table, pct=.6, where=race_1_where)\n year_data[\"pct_70_native\"] = get_percentile(field, table, pct=.7, where=race_1_where)\n year_data[\"pct_80_native\"] = get_percentile(field, table, pct=.8, where=race_1_where)\n year_data[\"pct_90_native\"] = get_percentile(field, table, pct=.9, where=race_1_where)\n\n race_2_where = where + \"\"\" AND (race_1 = '2' OR race_2 = '2' OR race_3 = '2' OR race_4 = '2' OR race_5 = '2' OR\n co_race_1 = '2' OR co_race_2 = '2' OR co_race_3 = '2' OR co_race_4 = '2' OR co_race_5 = '2')\"\"\"\n raw_data = get_field_data(field, table,race_2_where)\n year_data[\"sum_asian\"] = get_field_sum(field, table, race_2_where)\n year_data[\"count_asian\"] = get_field_count(field, table, race_2_where)\n year_data[\"mean_asian\"] = get_field_mean(field, table, race_2_where)\n year_data[\"stdev_asian\"] = get_field_stdev(field, table, race_2_where)\n year_data[\"median_asian\"] = raw_data[field].median()\n year_data[\"skew_asian\"] = raw_data[field].skew()\n year_data[\"kurtosis_asian\"] = raw_data[field].kurtosis()\n year_data[\"max_asian\"] = raw_data[field].map(lambda x: int(x)).max()\n year_data[\"min_asian\"] = raw_data[field].map(lambda x: int(x)).min()\n year_data[\"pct_10_asian\"] = get_percentile(field, table, pct=.1, where=race_2_where)\n year_data[\"pct_20_asian\"] = get_percentile(field, table, pct=.2, where=race_2_where)\n year_data[\"pct_30_asian\"] = get_percentile(field, table, pct=.3, where=race_2_where)\n year_data[\"pct_40_asian\"] = get_percentile(field, table, pct=.4, where=race_2_where)\n year_data[\"pct_50_asian\"] = get_percentile(field, table, pct=.5, where=race_2_where)\n year_data[\"pct_60_asian\"] = get_percentile(field, table, pct=.6, where=race_2_where)\n year_data[\"pct_70_asian\"] = get_percentile(field, table, pct=.7, where=race_2_where)\n year_data[\"pct_80_asian\"] = get_percentile(field, table, pct=.8, where=race_2_where)\n year_data[\"pct_90_asian\"] = get_percentile(field, table, pct=.9, where=race_2_where)\n\n race_3_where = where + \"\"\" AND (race_1 = '3' OR race_2 = '3' OR race_3 = '3' OR race_4 = '3' OR race_5 = '3' OR\n co_race_1 = '3' OR co_race_2 = '3' OR co_race_3 = '3' OR co_race_4 = '3' OR co_race_5 = '3')\"\"\"\n raw_data = get_field_data(field, table,race_3_where)\n year_data[\"sum_black\"] = get_field_sum(field, table, race_3_where)\n year_data[\"count_black\"] = get_field_count(field, table, race_3_where)\n year_data[\"mean_black\"] = get_field_mean(field, table, race_3_where)\n year_data[\"stdev_black\"] = get_field_stdev(field, table, race_3_where)\n year_data[\"median_black\"] = raw_data[field].median()\n year_data[\"skew_black\"] = raw_data[field].skew()\n year_data[\"kurtosis_black\"] = raw_data[field].kurtosis()\n year_data[\"max_black\"] = raw_data[field].map(lambda x: int(x)).max()\n year_data[\"min_black\"] = raw_data[field].map(lambda x: int(x)).min()\n year_data[\"pct_10_black\"] = get_percentile(field, table, pct=.1, where=race_3_where)\n year_data[\"pct_20_black\"] = get_percentile(field, table, pct=.2, where=race_3_where)\n year_data[\"pct_30_black\"] = get_percentile(field, table, pct=.3, where=race_3_where)\n year_data[\"pct_40_black\"] = get_percentile(field, table, pct=.4, where=race_3_where)\n year_data[\"pct_50_black\"] = get_percentile(field, table, pct=.5, where=race_3_where)\n year_data[\"pct_60_black\"] = get_percentile(field, table, pct=.6, where=race_3_where)\n year_data[\"pct_70_black\"] = get_percentile(field, table, pct=.7, where=race_3_where)\n year_data[\"pct_80_black\"] = get_percentile(field, table, pct=.8, where=race_3_where)\n year_data[\"pct_90_black\"] = get_percentile(field, table, pct=.9, where=race_3_where)\n\n race_4_where = where + \"\"\" AND (race_1 = '4' OR race_2 = '4' OR race_3 = '4' OR race_4 = '4' OR race_5 = '4' OR\n co_race_1 = '4' OR co_race_2 = '4' OR co_race_3 = '4' OR co_race_4 = '4' OR co_race_5 = '4')\"\"\"\n raw_data = get_field_data(field, table,race_4_where)\n year_data[\"sum_islander\"] = get_field_sum(field, table, race_4_where)\n year_data[\"count_islander\"] = get_field_count(field, table, race_4_where)\n year_data[\"mean_islander\"] = get_field_mean(field, table, race_4_where)\n year_data[\"stdev_islander\"] = get_field_stdev(field, table, race_4_where)\n year_data[\"median_islander\"] = raw_data[field].median()\n year_data[\"skew_islander\"] = raw_data[field].skew()\n year_data[\"kurtosis_islander\"] = raw_data[field].kurtosis()\n year_data[\"max_islander\"] = raw_data[field].map(lambda x: int(x)).max()\n year_data[\"min_islander\"] = raw_data[field].map(lambda x: int(x)).min()\n year_data[\"pct_10_islander\"] = get_percentile(field, table, pct=.1, where=race_4_where)\n year_data[\"pct_20_islander\"] = get_percentile(field, table, pct=.2, where=race_4_where)\n year_data[\"pct_30_islander\"] = get_percentile(field, table, pct=.3, where=race_4_where)\n year_data[\"pct_40_islander\"] = get_percentile(field, table, pct=.4, where=race_4_where)\n year_data[\"pct_50_islander\"] = get_percentile(field, table, pct=.5, where=race_4_where)\n year_data[\"pct_60_islander\"] = get_percentile(field, table, pct=.6, where=race_4_where)\n year_data[\"pct_70_islander\"] = get_percentile(field, table, pct=.7, where=race_4_where)\n year_data[\"pct_80_islander\"] = get_percentile(field, table, pct=.8, where=race_4_where)\n year_data[\"pct_90_islander\"] = get_percentile(field, table, pct=.9, where=race_4_where)\n\n race_5_where = where + \"\"\" AND (race_1 = '5' OR race_2 = '5' OR race_3 = '5' OR race_4 = '5' OR race_5 = '5' OR\n co_race_1 = '5' OR co_race_2 = '5' OR co_race_3 = '5' OR co_race_4 = '5' OR co_race_5 = '5')\"\"\"\n raw_data = get_field_data(field, table,race_5_where)\n year_data[\"sum_white\"] = get_field_sum(field, table, race_5_where)\n year_data[\"count_white\"] = get_field_count(field, table, race_5_where)\n year_data[\"mean_white\"] = get_field_mean(field, table, race_5_where)\n year_data[\"stdev_white\"] = get_field_stdev(field, table, race_5_where)\n year_data[\"median_white\"] = raw_data[field].median()\n year_data[\"skew_white\"] = raw_data[field].skew()\n year_data[\"kurtosis_white\"] = raw_data[field].kurtosis()\n year_data[\"max_white\"] = raw_data[field].map(lambda x: int(x)).max()\n year_data[\"min_white\"] = raw_data[field].map(lambda x: int(x)).min()\n year_data[\"pct_10_white\"] = get_percentile(field, table, pct=.1, where=race_5_where)\n year_data[\"pct_20_white\"] = get_percentile(field, table, pct=.2, where=race_5_where)\n year_data[\"pct_30_white\"] = get_percentile(field, table, pct=.3, where=race_5_where)\n year_data[\"pct_40_white\"] = get_percentile(field, table, pct=.4, where=race_5_where)\n year_data[\"pct_50_white\"] = get_percentile(field, table, pct=.5, where=race_5_where)\n year_data[\"pct_60_white\"] = get_percentile(field, table, pct=.6, where=race_5_where)\n year_data[\"pct_70_white\"] = get_percentile(field, table, pct=.7, where=race_5_where)\n year_data[\"pct_80_white\"] = get_percentile(field, table, pct=.8, where=race_5_where)\n year_data[\"pct_90_white\"] = get_percentile(field, table, pct=.9, where=race_5_where)\n return year_data", "title": "" }, { "docid": "9fa6467a86a256143e89c1ae4b837a35", "score": "0.51998633", "text": "def diversity_calculation(df, prefix=''):\n if len(df) == 0:\n d = {\n 'pctwhite': 0,\n 'pctblack': 0,\n 'pctapi': 0,\n 'pcthispanic': 0,\n 'other': 0,\n 'total_count': 0\n }\n else:\n pct = (df[RACES].mean() * 100).map(math.ceil)\n d = dict(pct)\n for k, v in d.items():\n if v == max(pct):\n d[k] = max(pct) - sum(pct) + 100\n break\n d['total_count'] = int(len(df))\n\n if prefix:\n d = {prefix + '_' + k: v for k, v in d.items()}\n return d", "title": "" }, { "docid": "962e0d1d1866d741309415cd31053751", "score": "0.5147744", "text": "def landward_percentage():\n from process_bootstrap_results import shortening_parallel_to_section\n landward_shortening = shortening_parallel_to_section() / 1000\n\n duration = 1.04 - 0.5 # Probably too short...\n rate = plate_motion()\n total = duration * rate\n return landward_shortening / total", "title": "" }, { "docid": "1b7636baafdf012cacf3eaede091235a", "score": "0.5127066", "text": "def make_region_counts_data(profile_info) :\n\n region_names = list(profile_info['region'])\n region_name_set = list(set(region_names))\n\n #Create dataframe containing the user counts within each region:\n region_count_data = pd.DataFrame(columns=[\"region\", \"num users\"])\n for name in tqdm(region_name_set):\n df_to_append = pd.DataFrame({\"region\": [name], \"num users\": [region_names.count(name)]})\n region_count_data = region_count_data.append(df_to_append)\n\n return region_count_data", "title": "" }, { "docid": "8ef2951ecf88eff28ac58c64092a9d9d", "score": "0.5119712", "text": "def aggregate(df, metric, geo_res):\n df = df.copy()\n metric_count_name = \"_\".join([metric, \"num\"])\n metric_prop_name = \"_\".join([metric, \"prop\"])\n\n gmpr = GeoMapper()\n geo_key = GEO_KEY_DICT[geo_res]\n df = gmpr.add_population_column(df, \"zip\")\n df = gmpr.replace_geocode(df,\n \"zip\",\n geo_key,\n date_col=\"timestamp\",\n data_cols=[metric_count_name, \"population\"])\n\n df[metric_prop_name] = df[metric_count_name] / df[\"population\"] \\\n * INCIDENCE_BASE\n return df.rename({geo_key: \"geo_id\"}, axis=1)", "title": "" }, { "docid": "ee6a3b629dadc203d92a8b4da5253ed5", "score": "0.5073141", "text": "def format_pct(df_column):\n def format(val):\n a = np.round(val/100*sum(df_column.tolist()), 0)\n return '%d(%.2f%s)' % (int(a), val, '%')\n return format", "title": "" }, { "docid": "d7623e1ee6ce9da9d728b99db1041d8b", "score": "0.5067787", "text": "def analyze(round_result, year):\n ranking = pd.DataFrame({'ID':round_result.iloc[0],'Fortune':round_result.iloc[year+1]}).sort_values(\n by='Fortune',ascending=False).reset_index(drop=True)\n ranking['Percent'] = ranking['Fortune'] / ranking['Fortune'].sum() # the ratio of one's fortune to the whole fortune\n ranking['Cumulative_sum'] = ranking['Percent'].cumsum() # the cumulative sum value\n temp_df = pd.DataFrame({'ID': round_result.iloc[0], 'Initial_fortune': round_result.iloc[2]})\n ranking = ranking.merge(temp_df, how='outer', on='ID') # add initial fortune of each person to make comparisons\n ranking['Increased_by'] = ranking['Fortune'] / ranking['Initial_fortune'] - 1\n # ranking['Increased_by'] = ranking['Increased_by'].apply(lambda x: format(x, '.2%'))\n # the number of people who have less wealth than 45 years ago\n m = ranking[ranking['Increased_by'] < 0]['ID'].count()\n n = ranking.loc[99] # the amounts of wealth top 10% people would obtain\n q = ranking.loc[199][3] # the accumulative sum of wealth percentage that the 200th person owns\n return ranking", "title": "" }, { "docid": "c54a74ddd9aebc899c55c5b35415ae32", "score": "0.50451696", "text": "def build_race_stats(self, race_ids):\n \n race_rows = self.get_from_rows_by_id(self._race_table, race_ids, 'numentrants')\n \n #get entrant stats\n most_entrants = 0\n total_entrants = 0\n for row in race_rows:\n total_entrants += row['numentrants']\n if row['numentrants'] > most_entrants:\n most_entrants = row['numentrants']\n \n\n \n #get quit rate stats\n result_rows = self.get_from_rows_by_id(self._result_table, race_ids, 'id, place')\n\n #use id to keep track of race\n current_id = result_rows[0]['id']\n quits = 0\n joins = 0\n quit_rates = []\n for row in result_rows:\n\n #if new id, compute quit rate for prev race\n if row['id'] != current_id:\n quit_rates.append((quits/joins)*100)\n current_id = row['id']\n joins = 0\n quits = 0\n\n joins += 1\n if row['place'] >= 9998:\n quits += 1\n\n #get last race\n quit_rates.append((quits/joins)*100)\n\n #build race stats\n race_stats = []\n race_stats.append(('Total Races:', len(race_ids)))\n race_stats.append(('Most Entrants:', most_entrants))\n race_stats.append(('Average Entrants:', round(total_entrants/len(race_ids), 2)))\n race_stats.append(('Average Quit Rate:', round(sum(quit_rates)/len(race_ids),2)))\n race_stats.append(('Highest Quit Rate:', round(max(quit_rates), 2)))\n\n print(race_stats)\n return race_stats", "title": "" }, { "docid": "77c59635918a210ed12e625729be3163", "score": "0.50346667", "text": "def lc_returns(df):\n grouped = df.groupby(['grade','term']).sum()[['funded_amnt','total_pymnt']]\n grouped.reset_index(inplace=True)\n grouped['return'] = (grouped['total_pymnt'] / grouped['funded_amnt'] - 1)*100\n grouped['annualized_pct'] = ((((grouped['return']/100)+1) ** (1 / (grouped['term']/12))) - 1)*100\n grouped['return_formatted'] = grouped['return'].apply(lambda x: str(round(x,1)) + '%')\n grouped['annualized_return_formatted'] = grouped['annualized_pct'].apply(lambda x: str(round(x,1)) + '%')\n grouped['return'] = grouped['return'].apply(lambda x: round(x,1))\n grouped['annualized_pct'] = grouped['annualized_pct'].apply(lambda x: round(x,1))\n grouped.drop(['funded_amnt','total_pymnt'],axis=1,inplace=True)\n return grouped", "title": "" }, { "docid": "79a1d3ba767bd674e3f9c21a62b5b893", "score": "0.50326324", "text": "def _percent_in_window(df, start_time, end_time, time_column=\"time\"):\n timedelta = pd.to_timedelta(df[time_column], unit=\"s\")\n times = ((timedelta >= pd.to_timedelta(start_time))\n & (timedelta <= pd.to_timedelta(end_time)))\n cols = _get_filterd_columns_list(df)\n total_avg = _calc_avg_occupancy(df[times][cols])\n means = df[times][cols].mean(axis=0, numeric_only=True)\n res = means / means.sum()\n res[\"avg number of passengers\"] = total_avg\n return res", "title": "" }, { "docid": "7b56720725e0bb0ebecc4f4ded6fbfd3", "score": "0.49672484", "text": "def _attach_percentiles(self, ret):\r\n percentiles = self.stats.percentiles\r\n fmt = self.fmt_float\r\n ret.extend([\r\n fmt % percentiles.perc10,\r\n fmt % percentiles.perc50,\r\n fmt % percentiles.perc90,\r\n fmt % percentiles.perc95\r\n ])", "title": "" }, { "docid": "ba8f2d218fad96b28c6caaf6bab5245b", "score": "0.49640206", "text": "def compute(self):\n total_intersect = self.total_intersect[self.total_union != 0] # type: ignore (third-party)\n total_union = self.total_union[self.total_union != 0] # type: ignore (third-party)\n return 100 * (total_intersect / total_union).mean()", "title": "" }, { "docid": "814eb138c35532de249b9ea24dc01359", "score": "0.49507087", "text": "def race_approval_rate(df):\n idx = df['applicant_race_1'].value_counts().index\n d = defaultdict(list)\n for i in idx:\n mask = df['applicant_race_1'] == i\n app_stat= df['action_taken'][mask]\n d[i].append(100*sum(app_stat[app_stat==1])/len(app_stat))\n return d", "title": "" }, { "docid": "3f307cb13b1b95649388ae481141c2b2", "score": "0.49502692", "text": "def compare_gender_race_dist_ppb():\n train_labels = pd.read_csv(\"MIT_dataset/PPB-2017/PPB-2017-metadata.csv\")\n train_labels.columns = [\"id\", \"file\", \"gender\", \"numeric\", \"skin_color\", \"country\"]\n train_labels.gender = train_labels[\"gender\"].str.lower()\n train_labels.skin_color = train_labels[\"skin_color\"].str.lower()\n title = \"Comparison of gender-race distribution of PPB\"\n male_number = len(train_labels.skin_color[train_labels.gender == \"male\"])\n male_count = train_labels.skin_color[train_labels.gender == \"male\"].value_counts() / male_number * 100\n male_count = male_count.rename_axis(\"Race\").reset_index(name=\"Percentage\")\n male_count[\"Gender\"] = \"Male\"\n female_number = len(train_labels.skin_color[train_labels.gender == \"female\"])\n female_count = (\n train_labels.skin_color[train_labels.gender == \"female\"].value_counts() / female_number * 100\n )\n female_count = female_count.rename_axis(\"Race\").reset_index(name=\"Percentage\")\n female_count[\"Gender\"] = \"Female\"\n count = pd.concat([male_count, female_count])\n\n fig = plt.figure(figsize=(6, 5), num=title)\n sns.barplot(x=\"Percentage\", y=\"Race\", hue=\"Gender\", data=count, palette=\"magma\")\n plt.title(title)\n plt.tight_layout()\n plt.show()\n fig.canvas.draw()", "title": "" }, { "docid": "981ce73ac3acb35b54025a5ec149ad52", "score": "0.49178106", "text": "def percent_value_counts(df, feature):\n percent = pd.DataFrame(round(df.loc[:,feature].value_counts(dropna=False, normalize=True)*100,2))\n ## creating a df with the\n total = pd.DataFrame(df.loc[:,feature].value_counts(dropna=False))\n\n ## concating percent and total dataframe\n total.columns = [\"Total\"]\n percent.columns = ['Percent']\n return pd.concat([total, percent], axis = 1)", "title": "" }, { "docid": "aa45e65a6dded5f87e90e6aa13110052", "score": "0.49104086", "text": "def compare_gender_race_dist(bias_dataframe=None, our_dataframe=None):\n\n def generate_count1(data, label):\n \"\"\"\n Private function to generate percentages by race-gender\n Args:\n data: Dataframe containing the data\n label: Select a label to difference between classes counted\n\n Returns:\n New dataframe consisting in 3 columns. Unique Values in class_name, percentage for the values,\n and a colum with the label.\n \"\"\"\n train_labels = data.copy()\n only_faces = train_labels[(train_labels.image_class == \"Human Face\")]\n male_number = len(only_faces.race[only_faces.gender == \"Male\"])\n male_count = only_faces.race[only_faces.gender == \"Male\"].value_counts() / male_number * 100\n male_count = male_count.rename_axis(\"Race\").reset_index(name=\"Percentage\").reindex([0, 5, 1, 4, 2, 3, 6])\n male_count[\"Gender\"] = \"Male\"\n male_count[\"Dataset\"] = label\n female_number = len(only_faces.race[only_faces.gender == \"Female\"])\n female_count = only_faces.race[only_faces.gender == \"Female\"].value_counts() / female_number * 100\n female_count = female_count.rename_axis(\"Race\").reset_index(name=\"Percentage\").reindex([0, 5, 1, 4, 2, 3, 6])\n female_count[\"Gender\"] = \"Female\"\n female_count[\"Dataset\"] = label\n frame_count = pd.concat([male_count, female_count])\n return frame_count\n\n data_list = list()\n if bias_dataframe is not None and our_dataframe is not None:\n title = \"Comparison of race-gender distribution for the three datasets\"\n data_list.append(generate_count1(our_dataframe, \"OUR Dataset\"))\n data_list.append(generate_count1(bias_dataframe, \"OUR-B Dataset\"))\n elif bias_dataframe is not None:\n title = \"Comparison of race-gender distribution of Our artificially biased and Mit dataset\"\n data_list.append(generate_count1(bias_dataframe, \"OUR-B Dataset\"))\n else:\n data_list.append(\n generate_count1(pd.read_csv(\"our_dataset/our_dataset_label_val.csv\").copy(), \"OUR Dataset\")\n )\n title = \"Comparison of race-gender distribution of OUR and Mit dataset.\"\n\n count = pd.concat(data_list)\n\n g = sns.catplot(\n data=count,\n x=\"Race\",\n y=\"Percentage\",\n hue=\"Gender\",\n col=\"Dataset\",\n kind=\"bar\",\n height=5,\n aspect=0.8,\n palette=\"magma\",\n legend=False,\n )\n g.set_xticklabels(rotation=65)\n g.fig.suptitle(title)\n g.fig.subplots_adjust(top=0.01, bottom=0)\n plt.legend()\n plt.tight_layout()\n plt.show()\n g.fig.canvas.draw()", "title": "" }, { "docid": "2fa92f577f688d7b8d99e65612235625", "score": "0.4899189", "text": "def prot_coverage(adata, prot_key = 'prot', prot_names_key = 'prot_names', groupby = 'batch'):\n \n # get the levels\n levels = adata.obs[groupby].cat.categories\n \n # initialise dataframe\n prot_coverage = pd.DataFrame(index = adata.uns[prot_names_key][:4], columns=levels)\n \n for level in levels:\n proteins = adata[adata.obs[groupby] == level].obsm[prot_key]\n perc = np.round((np.sum((proteins == 0), axis = 0) / proteins.shape[0])[:4], 2)\n prot_coverage[level] = perc\n \n return prot_coverage", "title": "" }, { "docid": "0eb2266fc98e8eb0b8538376870fe8aa", "score": "0.48983917", "text": "def _fill_feature_stats(languages, feature_names, areas):\n total_num_features = len(feature_names)\n for lang_id in languages:\n language = languages[lang_id]\n # Measure of inverse feature sparsity: percentage of populated features.\n language[\"%\"] = ((len(language) - len(_LANGUAGE_COLUMN_NAMES)) *\n 100.0 / total_num_features)\n\n # Fill in area statistics.\n for area in areas:\n count = 0\n features = areas[area]\n for feature in features:\n # Following will insert a 0-valued feature if it is not found.\n if language[feature] != 0:\n count += 1\n language[area] = count / len(features)", "title": "" }, { "docid": "223212042facc45c59ca295962587576", "score": "0.48923", "text": "def split_by_all_race(df):\n dct = {}\n dct['total'] = df.copy()\n dct['native american'] = df[(df['rac1p'].isin(['3', '4', '5']) & (df['hisp'] == '01'))]\n dct['pacific islander and native hawaiian'] = df[(df['rac1p'].isin(['7']) & (df['hisp'] == '01'))]\n dct['african american'] = df[(df['rac1p'].isin(['2']) & (df['hisp'] == '01'))]\n dct['hispanic'] = df[((df['hisp'] != '01') & (df['rac1p'].isin(['8'])))] \n dct['asian'] = df[(df['rac1p'].isin(['6']) & (df['hisp'] == '01'))]\n dct['white'] = df[(df['rac1p'].isin(['1']) & (df['hisp'] == '01'))]\n dct['two or more'] = df[(df['rac1p'].isin(['9']) & (df['hisp'] == '01'))]\n dct['other'] = df[(df['rac1p'].isin(['8']))]\n return dct", "title": "" }, { "docid": "4c2a0b1e8340957d9328e372a50f045b", "score": "0.48850352", "text": "def latest_holding_num_percentage(df_holding, df_universe):\n\n if len(df_holding) < 1:\n return np.nan\n\n\n latest_holding_num_pct = (df_holding.count(axis=1).ix[-1] / df_universe.ix[df_holding.index].count(axis=1)).ix[-1]\n\n return latest_holding_num_pct", "title": "" }, { "docid": "d09d590cfb3b3744e33dcc37cb5a5b68", "score": "0.48675883", "text": "def define_age_percentage(range_percentage):\n cp = {'13_17':0, '18_24':0, '25_34':0, '35_44':0, '45_54':0, '55_65':0, '65':0}\n total = range_percentage['13_17'] + range_percentage['18_24'] + range_percentage['25_34'] + \\\n range_percentage['35_44'] + range_percentage['45_54'] + range_percentage['55_65'] + \\\n range_percentage['65']\n if total != 0:\n cp['13_17'] = (range_percentage['13_17']/total) * 100\n cp['18_24'] = (range_percentage['18_24']/total) * 100\n cp['25_34'] = (range_percentage['25_34']/total) * 100\n cp['35_44'] = (range_percentage['35_44']/total) * 100\n cp['45_54'] = (range_percentage['45_54']/total) * 100\n cp['55_65'] = (range_percentage['55_65']/total) * 100\n cp['65'] = (range_percentage['65']/total) * 100\n else:\n cp = range_percentage\n return cp", "title": "" }, { "docid": "234156c92c3943fcc0011284522b8a81", "score": "0.4845171", "text": "def compute_percent_unemployed(self, df):\n percent_unemployed = round(\n (df.loc[:, 'Estimate!!Total:!!In labor force:!!Civilian labor force:!!Unemployed']\n / df.loc[:, 'Estimate!!Total:!!In labor force:!!Civilian labor force:']) * 100,\n 1)\n df['percent_unemployed'] = percent_unemployed\n\n return df", "title": "" }, { "docid": "2a6ffd26756a3eaece59396214be4a23", "score": "0.48290104", "text": "def fertility_vs_child_mortality(year):\n with_region = stats_relabeled(year).join('geo', countries.select('country', 'world_6region'), 'country')\n with_region.scatter(2, 3, sizes=1, colors=4, s=500)\n plots.xlim(0,10)\n plots.ylim(-50, 500)\n plots.title(year)", "title": "" }, { "docid": "2a6ffd26756a3eaece59396214be4a23", "score": "0.48290104", "text": "def fertility_vs_child_mortality(year):\n with_region = stats_relabeled(year).join('geo', countries.select('country', 'world_6region'), 'country')\n with_region.scatter(2, 3, sizes=1, colors=4, s=500)\n plots.xlim(0,10)\n plots.ylim(-50, 500)\n plots.title(year)", "title": "" }, { "docid": "d81c2b6925bfbdf18f1994cd209f0c7c", "score": "0.4829009", "text": "def get_race_averages(self, race):\n cursor = self.query(\n \"\"\"\n SELECT\n driverId,\n (SELECT AVG(position)\n FROM\n (SELECT position\n FROM results results1\n WHERE results1.raceId <= results.raceId\n AND results.driverId = results1.driverId\n AND position IS NOT NULL\n ORDER BY raceId DESC\n LIMIT 3)\n results2)\n as avg\n FROM results\n WHERE raceId = (SELECT MAX(raceId)\n FROM races races1\n WHERE races1.raceId < %s);\n \"\"\",\n (race,)\n )\n result = cursor.fetchall()\n cursor.close()\n return result", "title": "" }, { "docid": "dc321a3e3890bc06fc192b26aac47a9d", "score": "0.48224914", "text": "def theft_coverage(self):\n self.price_total += 700\n self.extra_features += 700", "title": "" }, { "docid": "23e4d061df608d44e6ed23f872dc83b4", "score": "0.4806577", "text": "def _get_occupancy_stats_amod(alg_path):\n full_path = os.path.join(alg_path, \"output\", \"data\", \"statusDistributionNumPassengers\")\n columns = ['4 pax', '3 pax', '2 pax', '1 pax', '0 pax', 'rebalance', 'stay', 'off-service']\n df = files.read_amod_csv(full_path)\n col_num = len(df.columns)\n # choose column names according to the number of columns in the data\n df.columns = columns[-col_num:]\n df[\"0 pax\"] += df[\"rebalance\"]\n df[\"time\"] = df.index * 10\n return df", "title": "" }, { "docid": "22b7151ad76b958b6b0cafe9fc6f0ebe", "score": "0.4806471", "text": "def hist_stat(self):\n historical_dodge = pd.read_csv('./Data/historical_dodge_data.csv').set_index('Year') \n pub_inst_values = historical_dodge.loc[list(range(1919, 1925)), ['Pub&Institutional']].values\n total_1925_6 = pd.DataFrame.sum(historical_dodge.loc[list(range(1925, 1927)),], axis=0).drop(index='Commercial (million SF)')\n inst_pub_total_1925 = pd.DataFrame.sum(historical_dodge.loc[1925,].drop('Commercial (million SF)'), axis=0)\n inst_pub_total_1926 = pd.DataFrame.sum(historical_dodge.loc[1926,].drop('Commercial (million SF)'), axis=0)\n inst_pub_total_1925_6 = inst_pub_total_1925 + inst_pub_total_1926\n\n shares = total_1925_6.divide(inst_pub_total_1925_6)\n for col in list(total_1925_6.index): \n values = historical_dodge.loc[list(range(1919, 1925)), ['Pub&Institutional']].multiply(shares[col]).values\n historical_dodge.at[list(range(1919, 1925)), col] = values\n\n historical_dodge.at[list(range(1919, 1925)), ['Pub&Institutional']] = pub_inst_values\n return historical_dodge", "title": "" }, { "docid": "63e30040c6f65c49b0feb6656575437e", "score": "0.47997078", "text": "def load_percentiles():\r\n return load_coord_var('percentile')", "title": "" }, { "docid": "edf0ea8d7d0e0c71eaa20eedcf20781e", "score": "0.47944158", "text": "def get_percentage_event_wrt(df, Id=None, subId=None, tag=None, event_name='name'):\r\n df_by_event = select_from_event(df, Id, subId, tag)\r\n counter = df_by_event.groupby(['playerId', 'teamId']).size().reset_index().merge(\r\n df_by_event.groupby('teamId').size().reset_index(), on='teamId')\r\n counter.rename(columns={'0_x': 'count_by_player', '0_y': 'total'}, inplace=True)\r\n counter[event_name + '_pct_WRT'] = counter.count_by_player / counter.total\r\n return counter.drop(['count_by_player', 'total', 'teamId'], 1)", "title": "" }, { "docid": "8b526a93eff4e2c85dce772b3237655a", "score": "0.47875375", "text": "def get_race_averages_team(self, race):\n cursor = self.query(\n \"\"\"\n SELECT\n driverId,\n (SELECT AVG(position)\n FROM\n (SELECT position\n FROM results results1\n WHERE results1.raceId <= results.raceId\n AND results.constructorId = results1.constructorId\n AND position IS NOT NULL\n ORDER BY raceId DESC\n LIMIT 6)\n results2)\n as avg\n FROM results\n WHERE raceId = (SELECT MAX(raceId)\n FROM races races1\n WHERE races1.raceId < %s);\n \"\"\",\n (race,)\n )\n result = cursor.fetchall()\n cursor.close()\n return result", "title": "" }, { "docid": "11ada17cd72a64e53298ac8d79ca467b", "score": "0.47781482", "text": "def average_holding_num_percentage(df_holding, df_universe):\n\n if len(df_holding) < 1:\n return np.nan\n\n df_count = df_universe.loc[df_holding.index,:].count(axis=1)\n df_count = df_count[df_count > 0]\n avg_holding_num_pct = (df_holding.count(axis=1) / df_count).mean()\n\n return avg_holding_num_pct", "title": "" }, { "docid": "cc59ea56221516caa4dc4d3c1cbdfe4d", "score": "0.47758833", "text": "def get_referendum_by_regions():\n drop_cols = ['Code du département', 'Libellé du département',\n 'Code de la commune', 'Libellé de la commune']\n\n def get_summary(df_list):\n df = pd.concat(df_list).drop(columns=drop_cols).sum()\n df = df/df[\"Inscrits\"]*100\n df.drop(\"Inscrits\", inplace=True)\n return df\n\n df_referendum = read_referendum_data()\n dfgroup = list(df_referendum.groupby(\"Code du département\"))\n\n depcode = [dfg_[0] for dfg_ in dfgroup]\n mainland = get_summary(\n [dfgroup[ix_][1] for ix_ in range(len(depcode))\n if 'Z' not in depcode[ix_]])\n overseasterr = get_summary(\n [dfgroup[ix_][1] for ix_ in range(len(depcode))\n if 'Z' in depcode[ix_]])\n abroad = get_summary([dfgroup[-1][1]])\n return mainland, overseasterr, abroad", "title": "" }, { "docid": "4c9c4232ab8d4df6d311df2b47693c9a", "score": "0.4773975", "text": "def handle_main_housenr_percent(relation: helpers.Relation) -> Tuple[yattag.Doc, str]:\n url = \"/osm/missing-housenumbers/\" + relation.get_name() + \"/view-result\"\n percent = \"N/A\"\n if os.path.exists(relation.get_files().get_housenumbers_percent_path()):\n percent = helpers.get_content(relation.get_files().get_housenumbers_percent_path())\n\n doc = yattag.Doc()\n if percent != \"N/A\":\n date = get_last_modified(relation.get_files().get_housenumbers_percent_path())\n with doc.tag(\"strong\"):\n with doc.tag(\"a\", href=url, title=_(\"updated\") + \" \" + date):\n doc.text(percent + \"%\")\n return doc, percent\n\n with doc.tag(\"strong\"):\n with doc.tag(\"a\", href=url):\n doc.text(_(\"missing house numbers\"))\n return doc, \"0\"", "title": "" }, { "docid": "3092bbc6ec20cc181c6145511cfcb490", "score": "0.4773862", "text": "def calc_stats(self):", "title": "" }, { "docid": "99ddd3fde7fc93962495ef9d23291e98", "score": "0.4771182", "text": "def change_all_radius(self, percentage):\n self._validate_attrs()\n\n dictl = self._gen_dict.copy()\n for i in dictl:\n for j in dictl[i]:\n for l in dictl[i][j]: # noqa\n dictl[i][j][l] = dictl[i][j][l] + percentage / 100 * dictl[i][j][l]\n self._gen_dict = dictl\n\n #Note that for the sake of a new pao_block, the radius of polarized\n #orbitals does not matter. However we change it for consistency\n if self._pol_dict:\n pol = self._pol_dict.copy()\n for i in pol:\n for j in pol[i]:\n for l in pol[i][j]: # noqa\n pol[i][j][l] = pol[i][j][l] + percentage / 100 * pol[i][j][l]\n self._pol_dict = pol", "title": "" }, { "docid": "f5ea560a8e7e8b600667a322b1a83486", "score": "0.47691098", "text": "def add_unit_columns(df):\n df['Pressure_gpa_mean'] = df['Pressure_mean']*gpa\n df['Pressure_gpa_error'] = df['Pressure_error']*gpa", "title": "" }, { "docid": "5a9c699d7e4e8f636647080a2ccff95f", "score": "0.47684285", "text": "def compare_mit_our_race_dist(bias_dataframe=None, our_dataframe=None):\n\n data_list = list()\n count = pd.DataFrame()\n count = count.append(\n [\n {\"Race\": \"White\", \"Percentage\": 83, \"discriminator\": \"Mit Dataset\"},\n {\"Race\": \"Black\", \"Percentage\": 7, \"discriminator\": \"Mit Dataset\"},\n {\"Race\": \"Latino_Hispanic\", \"Percentage\": 2, \"discriminator\": \"Mit Dataset\"},\n {\"Race\": \"East Asian\", \"Percentage\": 3, \"discriminator\": \"Mit Dataset\"},\n {\"Race\": \"Indian\", \"Percentage\": 2, \"discriminator\": \"Mit Dataset\"},\n {\"Race\": \"Middle Eastern\", \"Percentage\": 2, \"discriminator\": \"Mit Dataset\"},\n {\"Race\": \"Southeast Asian\", \"Percentage\": 1, \"discriminator\": \"Mit Dataset\"},\n ],\n ignore_index=True,\n )\n if bias_dataframe is not None and our_dataframe is not None:\n title = \"Comparison of race distribution for the three datasets\"\n data_list.append(generate_count(our_dataframe, \"race\", \"OUR Dataset\"))\n data_list.append(generate_count(bias_dataframe, \"race\", \"OUR-B Dataset\"))\n elif bias_dataframe is not None:\n title = \"Comparison of race distribution of Our artificially biased and Mit dataset\"\n data_list.append(generate_count(bias_dataframe, \"race\", \"OUR-B Dataset\"))\n else:\n data_list.append(\n generate_count(pd.read_csv(\"our_dataset/our_dataset_label_val.csv\").copy(), \"race\", \"OUR Dataset\")\n )\n title = \"Comparison of race distribution of OUR and Mit dataset.\"\n\n count = count.append(pd.concat(data_list).copy())\n\n fig = plt.figure(figsize=(9.5, 6), num=title)\n sns.barplot(x=\"Percentage\", y=\"Race\", hue=\"discriminator\", data=count, palette=\"magma\")\n plt.legend()\n\n plt.title(title)\n plt.tight_layout()\n plt.show()\n fig.canvas.draw()", "title": "" }, { "docid": "730baf608e69c01602d4233d047fed96", "score": "0.47657293", "text": "def percent(self):\n return (float(self.score) / 850.0) * 100", "title": "" }, { "docid": "4bc9ce409cbc2ffb0694f7b2b814765e", "score": "0.47582862", "text": "def prop_cohort(row,cohort_df):\n yr = row['issue_d_year']\n yr_amnt = cohort_df[cohort_df['issue_d_year'] == yr]['funded_amnt'].sum()\n prop = row['funded_amnt'] / yr_amnt\n return prop", "title": "" }, { "docid": "b2292639af1a6992eea6a7bc4c43e117", "score": "0.4750848", "text": "def stats_relabeled(year):\n return stats_for_year(year).relabel(2, 'Children per woman').relabel(3, 'Child deaths per 1000 born')", "title": "" }, { "docid": "5a4d29f674e2a3f7ff33813ee184b079", "score": "0.47356734", "text": "def example_counts_by_fips():\n contributions_resource = campaignadvisor.resources.get_resource(\"contributions.csv\")\n contributions = pd.read_csv(contributions_resource.get_local_path(), dtype=str)\n contributions['FIPS'] = contributions[\"contbr_zip\"].apply(get_fips_from_zip_code)\n print contributions.groupby('FIPS').size()", "title": "" }, { "docid": "98101a55b1b7c05ab77288d7d6c12a68", "score": "0.47283968", "text": "def _generate_percentage_row(self, field_name: str, value: str, bar_id: str) -> Markup:\n percentage = float(value.replace('%', ''))\n percentage_class = _percentage_bar_class(percentage)\n row_head = self._table_row_head % {\"head_class\": \"\", \"head_content\": field_name}\n row_data = self._table_data_percentage % {\"percentage\": percentage, \"bar_class\": percentage_class,\n \"bar_id\": bar_id}\n return row_head + row_data", "title": "" }, { "docid": "9c8b44bec9fe01316c81b869ea9302ff", "score": "0.4727348", "text": "def get_percentage_event_wrp(df, Id=None, subId=None, tag=None, event_name='name'):\r\n df_by_event = select_from_event(df, Id, subId, tag)\r\n counter = df_by_event.groupby(['playerId', 'teamId']).size().reset_index().merge(\r\n df.groupby(['playerId', 'teamId']).size().reset_index(), on=['teamId', 'playerId'])\r\n counter.rename(columns={'0_x': 'single_ev_player', '0_y': 'total_ev_player'}, inplace=True)\r\n counter[event_name + '_pct_WRP'] = counter.single_ev_player / counter.total_ev_player\r\n return counter.drop(['single_ev_player', 'total_ev_player', 'teamId'], 1)", "title": "" }, { "docid": "e0ebdd0d4ab4bda25c9c979e46195ef1", "score": "0.47262108", "text": "def calc_section_percents(times, time_type):\n if time_type == 'comp':\n key = 'comp_times'\n elif time_type == 'comm':\n key = 'comm_times'\n else:\n raise Exception('Type not supported: ' + time_type)\n\n percents = times[time_type].div(times[time_type].sum(1), axis=0)\n\n return percents", "title": "" }, { "docid": "e34a47f2c6f6fba9709245aea3e621f7", "score": "0.47199336", "text": "def percent_most_under_18(counties):", "title": "" }, { "docid": "543a18dd5d1c5a9026c4623bc795271a", "score": "0.47137448", "text": "def calc_stats(df):\n\n # All\n df['PENT60'] = df['PENT_all'] * 60 / df['TOI_all']\n df['PEND60'] = df['PEND_all'] * 60 / df['TOI_all']\n\n # Even\n df['FF60_even'] = df['FF_even'] * 60 / df['TOI_even']\n df['FA60_even'] = df['FA_even'] * 60 / df['TOI_even']\n df['xGF60/FF60_even'] = df['xGF_even'] / df['FF_even']\n df['xGA60/FA60_even'] = df['xGA_even'] / df['FA_even']\n df['GF60/xGF60_even'] = df['GF_even'] / df['xGF_even']\n\n # PP\n df['FF60_pp'] = df['FF_pp'] * 60 / df['TOI_pp']\n df['xGF60/FF60_pp'] = df['xGF_pp'] / df['FF_pp']\n df['GF60/xGF60_pp'] = df['GF_pp'] / df['xGF_pp']\n\n # PK\n df['FA60_pk'] = df['FA_pk'] * 60 / df['TOI_pk']\n df['xGA60/FA60_pk'] = df['GA_pk'] / df['FA_pk']\n\n return df", "title": "" }, { "docid": "f19b396a6683e8df4f8104d2034e9d4c", "score": "0.47117898", "text": "def percent_hmap(self,timeRange,trialRange):\n counter = 1\n\n plt.figure(figsize = [5,15])\n for patch in [1,2,4]:\n rews = np.array(self.rew_locs[patch])/patch\n cumulative_rews = rews.copy()\n for iTime in range(timeRange[1]):\n cumulative_rews[:,iTime] = np.sum(rews[:,:iTime+1],axis = 1)\n\n max_rew = int(np.max(cumulative_rews[trialRange[0]:trialRange[1],timeRange[0]:timeRange[1]]))\n\n hmap_num = np.zeros((max_rew,timeRange[1]))\n hmap_denom = np.zeros((max_rew,timeRange[1]))\n for trial in range(trialRange[0],trialRange[1]):\n for time in range(timeRange[0],timeRange[1]):\n cumulative_rew = int(cumulative_rews[trial,time])\n hmap_num[cumulative_rew-1,time] += self.timecourses[patch][trial,time]\n hmap_denom[cumulative_rew-1,time] += 1\n # display(hmap_denom)\n hmap = np.divide(hmap_num,hmap_denom,where = hmap_denom>0)\n hmap[np.where(hmap > 1)[0]] = 0\n plt.subplot(3,1,counter)\n plt.title(str(str(patch) + 'uL Rew Size'))\n ax = sns.heatmap(hmap)\n ax.invert_yaxis()\n plt.xlabel('Time on patch (sec)')\n plt.ylabel('Rewards Received')\n counter += 1\n plt.suptitle('Heatmap of patch stay percentage')", "title": "" }, { "docid": "f143fed87bfcb96d4e0a92a2edcd6b6c", "score": "0.4707852", "text": "def getRegion(df):\n\n return df.Region", "title": "" }, { "docid": "e65b1a62df7034a0d6a3873a2fd28620", "score": "0.4699639", "text": "def percent_point(self, U):\n raise NotImplementedError", "title": "" }, { "docid": "95d4146d33c89c051c95317a32e9cffe", "score": "0.46971816", "text": "def hist_stat_adj(self): \n hist_data = self.hist_stat()\n west_inflation = self.west_inflation()\n hist_data = hist_data.merge(west_inflation, how='outer', left_index=True, right_index=True)\n hist_data['Final Factors'] = hist_data['Final Factors'].fillna(1)\n adjusted_for_west = hist_data.drop(columns=['Final Factors', 'Pub&Institutional']).multiply(hist_data['Final Factors'].values, axis=0)\n return adjusted_for_west.loc[list(range(1919, 1960)), :]", "title": "" }, { "docid": "972ec23eaaf94abe25dd0a303ad8c6cf", "score": "0.4696911", "text": "def state_counties(counties, state):\n count=0\n i=0\n for c in counties:\n if state == c[\"State\"]:\n i+=c[\"Age\"][\"Percent Under 18 Years\"]\n count+=1\n return i/count", "title": "" }, { "docid": "bc55944b65e374b3d9fa973a008eaf79", "score": "0.46909732", "text": "def rel(array, percentage):\n return (percentage/100.) * array", "title": "" }, { "docid": "259ba402833d5f916447ca3ed71aa226", "score": "0.46903494", "text": "def finalize_stats(consensus: Dict[str, int], num_entries_in_uniprot: int, num_entries_in_refseq: int) -> pd.DataFrame:\n stats = {category: [value[\"matches\"],\n f\"{len(value['UniProt entry'])/num_entries_in_uniprot:.2%}\",\n f\"{value['matches']/num_entries_in_refseq:.2%}\"]\n for category, value in consensus.items()}\n stats_df = pd.DataFrame.from_dict(stats, orient=\"index\", columns=[\"Number of matches\",\n \"Matching UniProt entries [%]\",\n \"Matching RefSeq entries [%]\"])\n return stats_df", "title": "" }, { "docid": "4ff645c2abf6b0009cb543707ff66f07", "score": "0.46762007", "text": "def percent_lgtm(self):\n if not self.nb_issues:\n return 0\n return self.nb_lgtmed * 100. / self.nb_issues", "title": "" }, { "docid": "25dcdbc9fc28ebc6c8e7739d21c54a86", "score": "0.46760866", "text": "def LRscore(municipalities, manifestos):\n\n for municipality in municipalities:\n for year in municipalities[municipality]:\n tot_rile = 0\n tot_votes = 0\n for party in municipalities[municipality][year]:\n # calculate the weighted average of riles\n if party in (municipalities[municipality][year] and manifestos[int(year)]):\n tot_votes = tot_votes + municipalities[municipality][year][party]\n tot_rile = tot_rile + municipalities[municipality][year][party] * manifestos[int(year)][party]\n\n municipalities[municipality][year]['rile'] = tot_rile/tot_votes\n\n return municipalities", "title": "" }, { "docid": "3852b74729c813272f4718b068c01f0d", "score": "0.4668099", "text": "def get_percentage(predicted_data):\n SongPercentage = dict()\n for name_of_song, prdictions in predicted_data.items():\n predicted_values = prdictions.values()\n sum_of_predicted_values = sum(predicted_values)\n GenrePercentage = dict()\n for genre ,predicted_value in prdictions.items():\n try:\n percentage = (predicted_value / sum_of_predicted_values) * 100\n GenrePercentage.update({genre:percentage})\n except ZeroDivisionError:\n percentage = 0\n SongPercentage.update({name_of_song:GenrePercentage})\n return SongPercentage", "title": "" }, { "docid": "4f6aca608d7ceff3bec19b0d704d757d", "score": "0.4665427", "text": "def load_total_deaths_by_region(self):\n # store output as dataframe\n df = pd.read_excel(\n self.filename,\n #sheet_name='COVID19 total deaths by region', - old sheet name\n sheet_name='Tab1 Deaths by region',\n header=15, \n index_col=[1]\n )\n \n # Remove empty columns\n df.drop(df.filter(regex=\"Unname\"),axis=1, inplace=True)\n df = df.iloc[2:]\n self.df_total_deaths_by_region = df\n return self.df_total_deaths_by_region", "title": "" }, { "docid": "0c97e345f7c2f11cadef30d91450667e", "score": "0.46560842", "text": "def fix_columns_election_results(df, year, type_):\n df = df.loc[:, election_results_cols_of_interest]\n df[f'primary_votes_{type_.lower()}_{year}'] = df['PRIMARY VOTES']\n df[f'primary_votes_{type_.lower()}_{year}_pct'] = df['PRIMARY %']\n return df.drop(columns=['PRIMARY VOTES', 'PRIMARY %'])", "title": "" }, { "docid": "985799d0c986f691c6ac2acd4c5c5f38", "score": "0.46536115", "text": "def stats_relabeled(year):\n return stats_for_year(year).relabeled(2, 'Children per woman').relabeled(3, 'Child deaths per 1000 born')", "title": "" }, { "docid": "03719961aecd142aac3eda8c1c9f4310", "score": "0.46515125", "text": "def calculate_percentage(val, total):\n percent = np.divide(val, total)\n \n return percent", "title": "" }, { "docid": "6ba2d1d236939cd25c2f0f4f55f78377", "score": "0.46489426", "text": "def percentage(self):\n\t\treturn int((self._range[0] * 100) / self._range[1])", "title": "" }, { "docid": "27de73dd773df2b6fd4ef984e7215a50", "score": "0.46482435", "text": "def make_choropleth_percentOY_map(geo_df):\n\n fig_2, ax_2 = plt.subplots(1, 1)\n fig_2.set_size_inches(22, 17)\n\n ax_2.set_yticklabels([])\n ax_2.set_ylabel(\" \")\n\n ax_2.set_xticklabels([])\n ax_2.set_xlabel(\" \")\n\n ax_2.set_title(\n \"\"\"Percent Opportunity Youth \\n by PUMA in S. King County\"\"\",\n fontdict={\"fontsize\": \"48\"},\n pad=20,\n )\n\n divider_2 = make_axes_locatable(ax_2)\n cax_2 = divider_2.append_axes(\"right\", size=\"5%\", pad=0.1)\n\n geo_df.plot(\n column=\"OY_percent\",\n ax=ax_2,\n legend=True,\n cmap=\"YlOrBr\",\n cax=cax_2,\n edgecolor=\"black\",\n )\n\n plt.savefig(\"../../reports/figures/08_OY_percent_puma_map.png\", dpi=200)\n return fig_2, ax_2", "title": "" }, { "docid": "0b2ebf02ab457b549d2b1926b7e2b5b5", "score": "0.46411481", "text": "def tax_county_town(df, town):\n df = df[0]\n town = town.upper()\n df_county_town = df['Municipality'] == town\n tax_county = df[df_county_town]['Net County Rate'].values + df[df_county_town]['County Services'].values\n tax_town = df[df_county_town]['Net Town Rate'].values\n \n return tax_county[0], tax_town[0]", "title": "" }, { "docid": "27a5c9a562847ee7d71e67690b1b66b0", "score": "0.46375257", "text": "def get_region_numbers(df, region):\n region_numbers = df.groupby('Date').sum().reset_index()\n region_numbers['Region'] = region\n region_numbers['Countries'] = df.groupby('Date').count().reset_index()['name_short']\n return region_numbers", "title": "" }, { "docid": "a967a2993e3475256ea5c9336019745b", "score": "0.46320102", "text": "def print_df_stats(df):\n sums = df.sum()\n print(\"CPU: %f percent of values true\" % (sums[\"CPU\"] / len(df) * 100))\n print(\"GPU: %f percent of values true\" % (sums[\"GPU\"] / len(df) * 100))", "title": "" }, { "docid": "548e28896eca9def6525a47cfe2a4007", "score": "0.46202788", "text": "def goleft_norm_coverage_group(table):\n weights = table.align_len / np.sum(table.align_len)\n return np.sum(weights * table.norm_covg)", "title": "" }, { "docid": "c39d95d971a1a98dba1aa8733065c058", "score": "0.4619549", "text": "def seaward_percentage():\n # Duration in myr from Strasser, 2009\n duration = 1.95 - 1.24 \n rate = plate_motion()\n total = duration * rate\n return seaward_shortening() / total", "title": "" }, { "docid": "3c48d3a2e10a1ee17b6b6cdfb5b5d3d8", "score": "0.4608309", "text": "def collision_coverage(self):\n self.price_total += 2000\n self.extra_features += 2000", "title": "" }, { "docid": "7d7b9c7f856994ae390767cfd5c7fce9", "score": "0.46065593", "text": "def percentage(milestone):\n closed_tasks = milestone.task_set.filter(state_kind=\"Z\")\n part = closed_tasks.count()\n \n all_tasks = milestone.task_set.all()\n whole = all_tasks.count()\n \n if(part != 0 and whole != 0):\n return round(100 * float(part)/float(whole),2)\n \n return 0", "title": "" }, { "docid": "2b14fd8837302a964ed27f3ae2e2d965", "score": "0.46008554", "text": "def area_km2_per_grid(infra_dataset, df_store):\n asset_list = []\n\n for asset in infra_dataset.asset.unique():\n if not \"{}_count\".format(asset) in df_store.columns: df_store.insert(1, \"{}_count\".format(asset), \"\") #add assettype as column after first column for count calculations\n if not \"{}_km2\".format(asset) in df_store.columns: df_store.insert(1, \"{}_km2\".format(asset), \"\") #add assettype as column after first column for area calculations\n asset_list.append(asset)\n\n for grid_row in df_store.itertuples():\n grid_cell = grid_row.geometry #select grid\n try:\n asset_clip = gpd.clip(infra_dataset, grid_cell) #clip infra data using GeoPandas clip\n\n #count per asset type\n count = asset_clip.asset.value_counts() #count number of assets per asset type\n for asset_type in asset_list:\n if asset_type in count.index:\n df_store.loc[grid_row.Index, \"{}_count\".format(asset_type)] = count.get(key = asset_type)\n else:\n df_store.loc[grid_row.Index, \"{}_count\".format(asset_type)] = 0\n\n #calculate area for each asset in clipped infrastructure grid\n asset_clip.insert(1, \"area_km2\", \"\") #add assettype as column after first column for length calculations\n for polygon_object in asset_clip['index']:\n asset_clip.loc[polygon_object, \"area_km2\"] = polygon_area((asset_clip.loc[asset_clip['index']==polygon_object].geometry.item())) #calculate area per object and put in dataframe\n\n area_per_type = asset_clip.groupby(['asset'])['area_km2'].sum() #get total length per asset_type in grid\n for asset_type in asset_list:\n if asset_type in area_per_type.index:\n df_store.loc[grid_row.Index, \"{}_km2\".format(asset_type)] = area_per_type.get(key = asset_type)\n else:\n df_store.loc[grid_row.Index, \"{}_km2\".format(asset_type)] = 0 \n\n except: \n print(\"Grid number {} raises a ValueError, area has not been clipped\".format(grid_row.index))\n for asset_type in asset_list:\n df_store.loc[grid_row.Index, \"{}_count\".format(asset_type)] = np.nan\n df_store.loc[grid_row.Index, \"{}_km2\".format(asset_type)] = np.nan\n \n return df_store", "title": "" }, { "docid": "187346153d5efc5654bb3758c3ee2322", "score": "0.46001118", "text": "def percent(x):\n\tc = upper_bound(x) - lower_bound(x)\n\tp = c / (upper_bound(x) + lower_bound(x))\n\treturn p * 100", "title": "" }, { "docid": "d2c48f4e789f170b759d46b931e85430", "score": "0.45930678", "text": "def calculate_contributions(df, year_to_calculate):\n\n # calculate the embodied energy/emissions due to construction\n total_column = 'saver'\n ## calculate how many years before the calculation year the building was built in\n df['delta_year'] = year_to_calculate - df['YEAR']\n\n ## if it was built more than X years before, the embodied energy/emissions have been \"paid off\" and are set to 0\n df['confirm'] = df.apply(lambda x: calc_if_existing(x['delta_year'], SERVICE_LIFE_OF_BUILDINGS), axis=1)\n ## if it was built less than X years before, the contribution from each building component is calculated\n df[total_column] = ((df['capex_WALL'] * (df['area_walls_ext_ag'] + df['area_walls_ext_bg']) +\n df['capex_WIN'] * df['windows_ag'] +\n df['capex_FLOOR'] * df['floor_area_ag'] +\n df['capex_CONS'] * (df['floor_area_bg'] +df['floor_area_ag']) +\n df['capex_LEAK'] * (df['area_walls_ext_ag'] + df['area_walls_ext_bg']) +\n df['capex_hvacCS'] * df['floor_area_ag'] +\n df['capex_hvacHS'] * df['floor_area_ag'] +\n df['capex_hvacVENT'] * df['floor_area_ag'] +\n df['capex_BASE'] * df['floor_area_bg'] +\n df['capex_PART'] * (df['floor_area_ag']+df['floor_area_bg']) * CONVERSION_AREA_TO_FLOOR_AREA_RATIO +\n df['capex_ROOF'] * df['footprint']) / SERVICE_LIFE_OF_TECHNICAL_SYSTEMS) * df['confirm']\n\n # df[total_column] += (((df['floor_area_ag'] + df['floor_area_bg']) * EMISSIONS_EMBODIED_TECHNICAL_SYSTEMS) / SERVICE_LIFE_OF_TECHNICAL_SYSTEMS) * df['confirm']\n\n # the total cost intensity\n df['capex_total_cost_m2'] = df[total_column] / df['GFA_m2']\n\n # the total and specific embodied energy/emissions are returned\n # result = df[['Name', 'GHG_sys_embodied_tonCO2', 'GHG_sys_embodied_kgCO2m2', 'GFA_m2']]\n\n df['capex_building_systems'] = df[total_column]\n df['opex_building_systems'] = df['capex_building_systems'] * (.05)\n df['capex_ann_building_systems'] = df.apply(lambda x: calc_capex_annualized(x['capex_building_systems'], 5, 30),axis=1)\n df['opex_ann_building_systems'] = df.apply(lambda x: calc_opex_annualized(x['opex_building_systems'], 5, 30),axis=1)\n df['TAC_building_systems'] = df['capex_ann_building_systems'] + df['opex_ann_building_systems']\n\n return df", "title": "" }, { "docid": "73e66a2e1103f7c5bf4846d7eefb3fef", "score": "0.45888796", "text": "def _basic_table(df):\n \n mk_series = lambda r: pd.Series([r['Publication ID'].count(),\n r['Times cited'].mean(),\n ],\n index = [\"N Publications\", \n \"Mean Citations\",\n ])\n out = df.groupby(\"PubYear\"\n ).apply(mk_series\n ).round({\"N Publications\":0, \n \"Mean Citations\":1,\n }\n ).astype({\"N Publications\":int,\n }\n )\n return out", "title": "" }, { "docid": "32567e6c122a16a54cf17a0c3e856b5e", "score": "0.45815462", "text": "def notional_complete_pct(self) -> float:\n return self.__notional_complete_pct", "title": "" }, { "docid": "aecbc8f66cb7a2156f7980ddf5efcc6d", "score": "0.4578724", "text": "def get_percentage_dist(dist):\n N = dist[\"traffic_light\"][\"red\"] + dist[\"traffic_light\"][\"green\"]\n for dist_key in dist:\n for key, dist_val in dist[dist_key].items():\n dist[dist_key][key] = dist_val / N\n return dist", "title": "" }, { "docid": "6905b566090caa20f8996c6dab51ad56", "score": "0.45782232", "text": "def count_per_grid(infra_dataset, df_store):\n asset_list = []\n\n for asset in infra_dataset.asset.unique():\n if not \"{}_count\".format(asset) in df_store.columns: df_store.insert(1, \"{}_count\".format(asset), \"\") #add assettype as column after first column\n asset_list.append(asset)\n\n for grid_row in df_store.itertuples():\n grid_cell = grid_row.geometry #select grid\n asset_clip = gpd.clip(infra_dataset, grid_cell) #clip infra data using GeoPandas clip\n count = asset_clip.asset.value_counts() #count number of assets per asset type\n\n for asset in asset_list:\n if asset in count.index:\n df_store.loc[grid_row.Index, \"{}_count\".format(asset)] = count.get(key = asset)\n else:\n df_store.loc[grid_row.Index, \"{}_count\".format(asset)] = 0\n \n return df_store", "title": "" }, { "docid": "354aa1fc493a46d6d30febae9771e670", "score": "0.45751578", "text": "def quantity_proportion(sqlContext, f1_georeferencia):\n f1_georeferencia = f1_georeferencia.toPandas()\n\n f1_georeferencia['q_ARRIS_Group'] = f1_georeferencia.\\\n apply(lambda x: 1 if (x[\"Fabricante\"]) ==\n 'ARRIS Group, Inc.' else 0, axis=1)\n\n f1_georeferencia['q_Cisco_Systems_Inc'] = f1_georeferencia.\\\n apply(lambda x: 1 if (x[\"Fabricante\"]) ==\n 'Cisco Systems, Inc' else 0, axis=1)\n\n f1_georeferencia['q_Technicolor'] = f1_georeferencia.\\\n apply(lambda x: 1 if (x[\"Fabricante\"]) ==\n 'Technicolor CH USA Inc.' else 0, axis=1)\n\n # Suming.\n suma = f1_georeferencia['q_ARRIS_Group'].sum() +\\\n f1_georeferencia['q_Cisco_Systems_Inc'].sum() +\\\n f1_georeferencia['q_Technicolor'].sum()\n\n # Proportion.\n f1_georeferencia['p_ARRIS_Group'] = f1_georeferencia.\\\n apply(lambda x: 1/suma if (x[\"Fabricante\"]) ==\n 'ARRIS Group, Inc.' else 0, axis=1)\n\n f1_georeferencia['p_Cisco_Systems_Inc'] = f1_georeferencia.\\\n apply(lambda x: 1/suma if (x[\"Fabricante\"]) ==\n 'Cisco Systems, Inc' else 0, axis=1)\n\n f1_georeferencia['p_Technicolor'] = f1_georeferencia.\\\n apply(lambda x: 1/suma if (x[\"Fabricante\"]) ==\n 'Technicolor CH USA Inc.' else 0, axis=1)\n\n f2_sum_prop = sqlContext.createDataFrame(f1_georeferencia)\n print('Final df for Sprint 2:\\n')\n f2_sum_prop.show(truncate=False)\n return f2_sum_prop", "title": "" }, { "docid": "9a95b72d458a07d6531a424445f6bcd8", "score": "0.45746762", "text": "def support_resistance(df):\n pass", "title": "" }, { "docid": "25dab402f2c1cace9f116f2d953f24d6", "score": "0.45696545", "text": "def calculate_tort_local_density(filename, finalbur, yearlist, burr_attr):\n\t\n\tquarterdict={}\n\tquarterdict[1] = [11,12]\n\tquarterdict[2] = [3,4,5,6]\n\tquarterdict[3] = [7,8,9,10]\n\ttort_density={}\t\n\tfor bur in finalbur:\n\t\teasting = burr_attr[bur][3]\n\t\tnorthing = burr_attr[bur][4]\n\t\ttort_density[bur]={}\n\t\tfor year in yearlist:\n\t\t\ttort_density[bur][year]={}\n\t\t\tfor quarter in quarterdict.keys():\n\t\t\t\tcursor = db.cursor()\n\t\t\t\t# extract all torts that have easting+-50 w.r.t focal burrows in that year (March_Dec of that year)\n\t\t\t\tcursor.execute(\"\"\" select date, count(date) from \"\"\" + filename + \"\"\" where year(date) = %s and find_in_set(month(date), %s)>0 and UTM_easting>= %s and UTM_easting<=%s and UTM_northing>=%s and UTM_northing<=%s group by date;\"\"\", (year, ','.join(str(num) for num in quarterdict[quarter]), easting-50, easting+50, northing-50, northing+50))\n\t\t\t\tresults = cursor.fetchall()\n\t\t\t\t# Do a len to find out number of torts around the burrow in the particular year\n\t\t\t\ttortlist = [row[1] for row in results]\n\t\t\t\tif quarter==1:\n\t\t\t\t\tcursor = db.cursor()\n\t\t\t\t\t# In addition, extract all torts that have easting+-50 w.r.t focal burrows in that year (Jan-Feb of next calender year)\n\t\t\t\t\tcursor.execute(\"\"\" select date, count(date) from \"\"\" + filename + \"\"\" where year(date) = %s and find_in_set(month(date), %s)>0 and UTM_easting>= %s and UTM_easting<=%s and UTM_northing>=%s and UTM_northing<=%s group by date;\"\"\", (year+1, ','.join(str(num) for num in [1,2]), easting-50, easting+50, northing-50, northing+50))\n\t\t\t\t\tresults = cursor.fetchall()\n\t\t\t\t\t\n\t\t\t\t\tfor row in results:\n\t\t\t\t\t\ttortlist.append(row[1])\n\t\t\t\t\n\t\t\t\tif len(tortlist) > 0 :tort_density[bur][year][quarter] = np.mean(tortlist)\n\t\t\t\telse: tort_density[bur][year][quarter] =0\n\t\t\t\t#print bur, year, quarter, tort_density[bur][year][quarter]\n\t\t\n\treturn tort_density", "title": "" }, { "docid": "15593d89cfcd0f1f8418ed894421c988", "score": "0.45691156", "text": "def get_exp_percents(self, quarter=None, year=None):\n totals = self.get_exp_totals(quarter, year)\n total = sum(totals.itervalues())\n\n if total != 0:\n return {\n 'direct': '%.2f' % ((totals['direct'] / total) * 100),\n 'indirect': '%.2f' % ((totals['indirect'] / total) * 100),\n 'other': '%.2f' % ((totals['other'] / total) * 100)\n }\n else:\n return {\n 'direct': 0,\n 'indirect': 0,\n 'other': 0\n }", "title": "" }, { "docid": "a90e19508bd69841e0acf2446bd4c793", "score": "0.45440653", "text": "def get_circuit_averages_team(self, race):\n cursor = self.query(\n \"\"\"\n SELECT\n driverId,\n (SELECT AVG(position)\n FROM\n (SELECT position\n FROM results results1\n INNER JOIN races\n ON races.raceId=results1.raceId\n WHERE results1.raceId <= results.raceId\n AND results.constructorId = results1.constructorId\n AND position IS NOT NULL\n AND races.circuitId=(SELECT circuitId\n FROM races WHERE raceId = %s)\n ORDER BY results1.raceId DESC\n LIMIT 6)\n results2)\n as avg\n FROM results\n WHERE results.raceId = (SELECT MAX(raceId)\n FROM races races1\n WHERE races1.raceId < %s);\n \"\"\",\n (race, race)\n )\n result = cursor.fetchall()\n cursor.close()\n return result", "title": "" }, { "docid": "2743479179dadf3eb6e007e0524e80fe", "score": "0.45436636", "text": "def calc_percentual(pixel):\n pixel[:] = int(float(pixel[0]) * 0.3 + float(\n pixel[1]) * 0.59 + float(pixel[2]) * 0.11)", "title": "" }, { "docid": "221073915afaee0eb466978e4b109366", "score": "0.45431206", "text": "def length_km_per_grid(infra_dataset, df_store):\n asset_list = []\n\n for asset in infra_dataset.asset.unique():\n if not \"{}_count\".format(asset) in df_store.columns: df_store.insert(1, \"{}_count\".format(asset), \"\") #add assettype as column after first column for count calculations\n if not \"{}_km\".format(asset) in df_store.columns: df_store.insert(1, \"{}_km\".format(asset), \"\") #add assettype as column after first column for length calculations\n asset_list.append(asset)\n\n for grid_row in df_store.itertuples():\n grid_cell = grid_row.geometry #select grid\n try:\n asset_clip = gpd.clip(infra_dataset, grid_cell) #clip infra data using GeoPandas clip\n\n #count per asset type\n count = asset_clip.asset.value_counts() #count number of assets per asset type\n for asset_type in asset_list:\n if asset_type in count.index:\n df_store.loc[grid_row.Index, \"{}_count\".format(asset_type)] = count.get(key = asset_type)\n else:\n df_store.loc[grid_row.Index, \"{}_count\".format(asset_type)] = 0\n\n #calculate length for each asset in clipped infrastructure grid\n asset_clip.insert(1, \"length_km\", \"\") #add assettype as column after first column for length calculations\n for line_object in asset_clip['index']:\n asset_clip.loc[line_object, \"length_km\"] = line_length(asset_clip.loc[asset_clip['index']==line_object].geometry.item()) #calculate length per object and put in dataframe\n\n length_per_type = asset_clip.groupby(['asset'])['length_km'].sum() #get total length per asset_type in grid\n for asset_type in asset_list:\n if asset_type in length_per_type.index:\n df_store.loc[grid_row.Index, \"{}_km\".format(asset_type)] = length_per_type.get(key = asset_type)\n else:\n df_store.loc[grid_row.Index, \"{}_km\".format(asset_type)] = 0 \n\n except: \n print(\"Grid number {} raises a ValueError, area has not been clipped\".format(grid_row.index))\n for asset_type in asset_list:\n df_store.loc[grid_row.Index, \"{}_count\".format(asset_type)] = np.nan\n df_store.loc[grid_row.Index, \"{}_km\".format(asset_type)] = np.nan\n \n return df_store", "title": "" }, { "docid": "402f04b6db1b7b8987992294d8b6821a", "score": "0.45407778", "text": "def tax_calc(df=_DF, price=200000, municipality='Fairport', town='Perinton', school='Fairport (Village)', school_town='Fairport', districts=['PR104','PR110','PR701-B']):\n price_000 = price / 1000\n total_taxes = 0\n # calc the different types of taxes\n c, t = tax_county_town(df, town)\n total_taxes += (c * price_000 + t * price_000)\n s = tax_school_library(df, school_town, school)\n total_taxes += (s * price_000 * 0.90) #assessed against 90% of house value\n d = tax_special(df, districts, price_000)\n for tax in d:\n total_taxes += tax\n\n return (municipality + ' Taxes: ${0:,.0f} {1:.1f}%'.format(total_taxes, total_taxes / price * 100))", "title": "" }, { "docid": "717a9cd5bd69e7d23a0feb1d4fb76d42", "score": "0.45390317", "text": "def _get_occupancy_stats_drt(alg_path):\n fname = \"drt_occupancy_time_profiles_av.txt\"\n full_path = files.get_last_iter_file(alg_path, fname)\n df = pd.read_table(full_path)\n df[\"time\"] = df.index * 5 * 60\n return df", "title": "" }, { "docid": "5d10877541c8fd72ad1e95b8da5928c3", "score": "0.4538861", "text": "def data_preparation(self):\n # pass attributes\n dframe = self.df\n cat_x = self.feature_x\n cat_y = self.target_y\n \n # aggregate data\n data = dframe.groupby([cat_x, cat_y])[cat_x].count()\n \n # calculate percent and swap index level\n perc_df = pd.DataFrame(data.div(data.sum()).T).swaplevel().unstack().round(decimals=2)\n \n return perc_df", "title": "" }, { "docid": "07783e2e22563cd76a5686cb8aae50c0", "score": "0.4538722", "text": "def to_pct(x):\n series_to_pct = lambda z: z.divide(z.sum()) * 100 if isinstance(z, pd.Series) and z.dtype != 'object' else z\n\n if isinstance(x, pd.Series):\n return series_to_pct(x)\n elif isinstance(x, pd.DataFrame):\n return x.apply(series_to_pct, axis=0)\n else:\n total = sum(x)\n return [100 * float(z) / float(total) for z in x]", "title": "" } ]
3c9f776ede826eb32d3f3263d1126199
Get the single matching Country's ISO31662 code from a partial name.
[ { "docid": "7330db2871d6aa4b548ed4c6dda5f2dc", "score": "0.70137024", "text": "def getISO3166Code(self, countryName):\n logger.debug(f\"Getting country code for {countryName}\")\n # workaround a common problem: \"United States\" is the Mapbox name for US, but the algorithm\n # below will also find UM (United States Minor Outlying Islands) and fail because the\n # query is ambiguous.\n #\n # additionally the PRC is not in the iso3166 list (but China is).\n fixups = {\n 'united states': 'US',\n \"people's republic of china\": 'CN'\n }\n name = countryName.lower()\n if fixedCode := fixups.get(name):\n return fixedCode\n \n country = None\n for key in Geocoder.INDEX:\n if name in key:\n if country is not None:\n # Ambiguous countryName\n logger.error(f\"Country {countryName} is ambiguous, found both {country} and {Geocoder.INDEX[key]}\")\n return None\n country = Geocoder.INDEX[key]\n\n if country is not None:\n logger.debug(f\"code for {countryName} is {country.alpha2}\")\n return country.alpha2\n logger.error(f\"Country {countryName} not found in iso3166.countries!\")\n return None", "title": "" } ]
[ { "docid": "ab0b971edb8cd06feb6bb4c5ab5e70da", "score": "0.7358333", "text": "def get_country_code_alpha_3(name):\n for co in pc.countries:\n if name == 'XXX':\n return 'XXX'\n if name in co.alpha_2:\n return co.alpha_3\n return 'XXX'", "title": "" }, { "docid": "4755474fe0540ce8d9ed64959492b981", "score": "0.7343867", "text": "def get_country_code(name):\n for co in pc.subdivisions:\n if name in co.name:\n return co.country_code\n return 'XXX'", "title": "" }, { "docid": "98a6209c315fdeb1f21ff16dac821ad9", "score": "0.707797", "text": "def get_country_code(country_name):\n \n for code, name in COUNTRIES.items():\n if name == country_name:\n return code\n \n if country_name == 'Arab World':\n return 'ae'\n elif country_name == 'Yemen, Rep.':\n return 'ye'\n elif country_name == 'Libya':\n return 'ly'\n elif country_name == 'Egypt, Arab Rep.':\n return 'eg'\n elif country_name == 'Congo, Rep.':\n return 'cg'\n elif country_name == 'Congo, Dem. Rep.':\n return 'cd'\n elif country_name == 'Tanzania':\n return 'tz'\n elif country_name == 'Venezuela, RB':\n return 've'\n elif country_name == 'Sub-Saharan Africa':\n return 'eh'\n elif country_name == 'Bolivia':\n return 'bo'\n\n #if the country wasn't found, return none\n return None", "title": "" }, { "docid": "4bf2f25e0300b549740a56fd11c0ef6c", "score": "0.6978855", "text": "def get_country_code(country_name):\n\t# fix the country name to be fitting \n\tcountry_name = fix_country_name(country_name)\n\n\t# South Korea giving some issues\n\t\"\"\"if country_name == 'South Korea':\n\t\treturn 'kp'\"\"\"\n\tfor code, name in COUNTRIES.items():\n\t\tif name == country_name:\n\t\t\treturn code\n\t# If the country wasn't found, return None\n\treturn None", "title": "" }, { "docid": "5d1a8560e36905244893591e7be85da4", "score": "0.6944026", "text": "def get_country_code(country_name):\n for code, name in COUNTRIES.items():\n if name == country_name:\n return code\n return None # Tricky! Indent once more and it will almost always fail", "title": "" }, { "docid": "c9152ffa9c985bbfee3d21cbaaf88e22", "score": "0.6843766", "text": "def get_country_code(country_name):\n\tfor code, name in COUNTRIES.items():\n\t\tif name == country_name:\n\t\t\treturn code\n\t#if country not found return none\n\treturn None", "title": "" }, { "docid": "95a563038ec414ac8fd382cc5f3a3bc1", "score": "0.67264116", "text": "def get_country_phone_code(country_iso):\n if not country_iso:\n return None\n country_iso = str(country_iso)\n for code, iso in phn.COUNTRY_CODE_TO_REGION_CODE.items():\n if country_iso.upper() in iso:\n return str(code)\n\n return None", "title": "" }, { "docid": "09f83b8320870b33fe9383fdb5e23612", "score": "0.6661298", "text": "def get_iso3_country_code(\n cls,\n country: str,\n use_live: bool = True,\n exception: Optional[ExceptionUpperBound] = None,\n ) -> Optional[str]:\n countriesdata = cls.countriesdata(use_live=use_live)\n countryupper = country.strip().upper()\n if countryupper.isupper():\n len_countryupper = len(countryupper)\n if len_countryupper == 3:\n if countryupper in countriesdata[\"countries\"]:\n return countryupper\n elif len_countryupper == 2:\n iso3 = countriesdata[\"iso2iso3\"].get(countryupper)\n if iso3 is not None:\n return iso3\n\n iso3 = countriesdata[\"countrynames2iso3\"].get(countryupper)\n if iso3 is not None:\n return iso3\n\n for candidate in cls.expand_countryname_abbrevs(countryupper):\n iso3 = countriesdata[\"countrynames2iso3\"].get(candidate)\n if iso3 is not None:\n return iso3\n elif re.search(r\"[\\u4e00-\\u9fff]+\", countryupper):\n for country in countriesdata[\"countries\"]:\n if (\n countriesdata[\"countries\"][country][\n \"#country+alt+i_zh+name+v_unterm\"\n ]\n == countryupper\n ):\n return country\n elif re.search(r\"[\\u0600-\\u06FF]+\", countryupper):\n for country in countriesdata[\"countries\"]:\n if (\n countriesdata[\"countries\"][country][\n \"#country+alt+i_ar+name+v_unterm\"\n ]\n == countryupper\n ):\n return country\n\n if exception is not None:\n raise exception\n return None", "title": "" }, { "docid": "b3532ffdac9a1a2869a35c8846f75858", "score": "0.64783436", "text": "def get_country(cntryCode):\n\n # Get the country string according to field-choice\n sCountry = choice_english(PROVENANCE_GEOGRAPHIC_COUNTRY, cntryCode).strip()\n sCountryAlt = sCountry + \" (the)\"\n # Walk all country codes\n for tplCountry in COUNTRY_CODES:\n # Check for country name or alternative country name\n if sCountry == tplCountry[1] or sCountryAlt == tplCountry[1]:\n # REturn the correct country name and code\n return (tplCountry[1], tplCountry[0])\n # Empty\n return (None, None)", "title": "" }, { "docid": "8de9a8b0c0d5c0e2cadf9e80132cda62", "score": "0.6430433", "text": "def extract_usa(address):\n # if last string in the address contains two isolated capital letters followed by 5 digits,\n # then the country is USA\n match = re.search(\"^[A-Z]{2}\\s\\d{5}\", address[-1].split(sep=', ')[-1])\n if match is None:\n return None\n else:\n return match.group()", "title": "" }, { "docid": "542e55cc1297a00cb0cf50724240b1a2", "score": "0.6392495", "text": "def get_iso_a3(country):\n try:\n result = pycountry.countries.search_fuzzy(country)\n return result[0].alpha_3\n except:\n return np.nan", "title": "" }, { "docid": "3fe9c19ca8e691230169e9a4bef851d2", "score": "0.6392236", "text": "def get_country_code(country_list, country):\r\n\r\n for item in country_list:\r\n \r\n if country == item[1]:\r\n \r\n return item[0]", "title": "" }, { "docid": "194d7b946695f324a0d5388fcb12e144", "score": "0.6362089", "text": "def get_full_country_name(country_abbrv):\n try:\n return pycountry.countries.get(alpha_3=country_abbrv).name.replace(\" \",\"\")\n except Exception as e:\n return country_abbrv", "title": "" }, { "docid": "d7719b598269ec478586b369cb16ee4e", "score": "0.6318916", "text": "def country_code(self) -> str:\n return pulumi.get(self, \"country_code\")", "title": "" }, { "docid": "d7719b598269ec478586b369cb16ee4e", "score": "0.6318916", "text": "def country_code(self) -> str:\n return pulumi.get(self, \"country_code\")", "title": "" }, { "docid": "d7719b598269ec478586b369cb16ee4e", "score": "0.6318916", "text": "def country_code(self) -> str:\n return pulumi.get(self, \"country_code\")", "title": "" }, { "docid": "d7719b598269ec478586b369cb16ee4e", "score": "0.6318916", "text": "def country_code(self) -> str:\n return pulumi.get(self, \"country_code\")", "title": "" }, { "docid": "d7719b598269ec478586b369cb16ee4e", "score": "0.6318916", "text": "def country_code(self) -> str:\n return pulumi.get(self, \"country_code\")", "title": "" }, { "docid": "24b70493f64ffa50bbe30de556b18eb4", "score": "0.63069016", "text": "def get_country_name(country_list, code):\r\n \r\n for item in country_list:\r\n \r\n if code == item[0]:\r\n \r\n return item[1]", "title": "" }, { "docid": "ccc8e06604d190b99b1a48181203cf97", "score": "0.63056284", "text": "def _get_continent_country_from_maxmind(self,):\n return self._get_key_value_from_maxmind('continent_code', 'country_iso_code')", "title": "" }, { "docid": "0d6c840b7baea51ef56b2a102d15478f", "score": "0.62866104", "text": "def province_code(name: str) -> str:\n\n if name == \"ALBERTA\":\n return \"AB\"\n elif name == \"BRITISH COLUMBIA\":\n return \"BC\"\n elif name == \"MANITOBA\":\n return \"MB\"\n elif name == \"NEW BRUNSWICK\":\n return \"NB\"\n elif name == \"NEWFOUNDLAND\":\n return \"NL\"\n elif name == \"NOVA SCOTIA\":\n return \"NS\"\n elif name == \"NORTHWEST TERRITORIES\":\n return \"NT\"\n elif name == \"NUNAVUT\":\n return \"NU\"\n elif name == \"ONTARIO\":\n return \"ON\"\n elif name == \"PRINCE EDWARD ISLAND\":\n return \"PE\"\n elif name == \"QUEBEC\":\n return \"QC\"\n elif name == \"SASKATCHEWAN\":\n return \"SK\"\n elif name == \"YUKON TERRITORY\":\n return \"YT\"\n else:\n return None", "title": "" }, { "docid": "f54ed6adb3626489e2b4b80a57812363", "score": "0.62350684", "text": "def _get_sub1code_sub1name_from_maxmind(self,):\n return self._get_key_value_from_maxmind('subdivision_1_iso_code', 'subdivision_1_name')", "title": "" }, { "docid": "37fc4ddbf432524395b28af2d2d0f58e", "score": "0.6233615", "text": "def get_country_id(*args):\n return Session.query(Country.id).filter(Country.code == args[0]).one()[0]", "title": "" }, { "docid": "a00d6601a66cd582cd7e43a9f46ad5c2", "score": "0.62115854", "text": "def country(self: Enum) -> Country:\n return Country[self.name[0:2]]", "title": "" }, { "docid": "f134b635b1578295e6487a6e75525b35", "score": "0.6193788", "text": "def country_data(code):\n for country in iban_data:\n if country.code == code:\n return country\n return None", "title": "" }, { "docid": "03aa3dea46c2233e04ac2ae675d67e7e", "score": "0.6158131", "text": "def get_country_name_from_iso3(\n cls,\n iso3: str,\n use_live: bool = True,\n exception: Optional[ExceptionUpperBound] = None,\n formal: bool = False,\n ) -> Optional[str]:\n countryinfo = cls.get_country_info_from_iso3(\n iso3, use_live=use_live, exception=exception\n )\n if countryinfo is not None:\n countryname = countryinfo.get(\"#country+name+override\")\n if countryname is not None:\n return countryname\n if formal:\n countryname = countryinfo.get(\n \"#country+formal+i_en+name+v_unterm\"\n )\n if countryname is None or countryname == \"\":\n countryname = countryinfo.get(\"#country+name+preferred\")\n return countryname\n else:\n return countryinfo.get(\"#country+name+preferred\")\n return None", "title": "" }, { "docid": "7b941f9056ac2d0f9130e2c3913567ba", "score": "0.6137109", "text": "def country_code(self) -> pulumi.Input[str]:\n return pulumi.get(self, \"country_code\")", "title": "" }, { "docid": "7b941f9056ac2d0f9130e2c3913567ba", "score": "0.6137109", "text": "def country_code(self) -> pulumi.Input[str]:\n return pulumi.get(self, \"country_code\")", "title": "" }, { "docid": "dfc4dd8bf64b26cf74eb27cd4bc62a07", "score": "0.6127986", "text": "def lookup_country_id(self):\n country_iso_code = self.vals.get('country_iso_code')\n if country_iso_code:\n country_id = self.env['res.country'].search(\n [('code', '=', str(country_iso_code))]).id\n if not country_id:\n self.error_log.append(\n ERROR_MESSAGE.get('country_error', UNKNOWN_ERROR).format(\n country_iso_code))\n else:\n self.vals.update(country_id=country_id)", "title": "" }, { "docid": "89995abe8f977feeb2f8b22ff47f6ceb", "score": "0.6098904", "text": "def getCountryID(code):\n\n\tif (code in countryCodes):\n\t\treturn countryCodes[code]\n\telse:\n\t\treturn 0", "title": "" }, { "docid": "6492dd9f74cec477d2ce7e9fe06be486", "score": "0.60827416", "text": "def country(self, code):\n if self._country.has_key(code):\n return self._country[code]\n return \"\"", "title": "" }, { "docid": "c1c2066f618516620fed6b313fd7a7ac", "score": "0.60555947", "text": "def convert_old_country_names(c: str) -> str:\n\n if c == \"Macedonia\":\n return \"North Macedonia\"\n\n if c == \"Czech Republic\":\n return \"Czechia\"\n\n return c", "title": "" }, { "docid": "5e50625973d43212c8a1af756adf5132", "score": "0.6052099", "text": "def get_country_code(self):\n return self.country_code", "title": "" }, { "docid": "b52200d0e0393b70c9a7413286ccc72c", "score": "0.6034447", "text": "def fullcode_to_partialcode(code):\n lang, country, group = BundleManager.get_locale_info_from_code(code)\n return \"%s_%s\" % (lang.lower(), country.upper())", "title": "" }, { "docid": "39f0ed3a00915c501d62b0b1d490d3ea", "score": "0.6013102", "text": "def country_name(country_code):\n locale = get_language()\n\n # product_details has no `es` regional information, so we us es-ES instead.\n if locale == 'es':\n locale = 'es-ES'\n\n try:\n return product_details.get_regions(locale)[country_code]\n except KeyError:\n return ''", "title": "" }, { "docid": "8866bc10c59354b667cda3957f16cf1b", "score": "0.5996875", "text": "def getCountry(self, country_name):\n try:\n return self.countries[country_name]\n except KeyError:\n return None", "title": "" }, { "docid": "9c717b3bc1d841fe3ff4fdec52b60668", "score": "0.59933394", "text": "def get_region_name(country_name: str):\n try:\n country_name = country_name.strip()\n country = pycountry.countries.get(name=country_name) or pycountry.countries.get(\n official_name=country_name) or pycountry.historic_countries.get(\n name=country_name)\n if country is not None:\n return convert_continent_code_to_continent_name(country_alpha2_to_continent_code(country.alpha_2))\n try:\n return countryinfo.countryinfo.CountryInfo(country_name=country_name).region()\n except KeyError:\n country = _find_country_by_iterate_all_relevant_countries(country, country_name)\n return convert_continent_code_to_continent_name(\n country_alpha2_to_continent_code(country.alpha_2))\n except Exception as error:\n return SPECIAL_COUNTIES_WHITE_LIST.get(country_name)", "title": "" }, { "docid": "1a33275c6e301bd8ff1bbd8b8e2aaed6", "score": "0.5982778", "text": "def get_country_code(self):\n\t\t\n\t\treturn self.country_code", "title": "" }, { "docid": "28bcb5776344b547325b891caed379cf", "score": "0.5976165", "text": "def nameForPostalCode(code):\n code = code.upper()\n for x in states:\n if x[1] == code:\n return x[0]\n return None", "title": "" }, { "docid": "cb76e2821e215bff81320d19623f924c", "score": "0.5941584", "text": "def getCountryLetters(code):\n\n\tfor key, value in countryCodes.items():\n\t\tif (value == code):\n\t\t\treturn key\n\n\treturn \"XX\"", "title": "" }, { "docid": "35452998d52ebb9758b78c8fd728624d", "score": "0.5889801", "text": "def get_country_name_from_iso2(\n cls,\n iso2: str,\n use_live: bool = True,\n exception: Optional[ExceptionUpperBound] = None,\n formal: bool = False,\n ) -> Optional[str]:\n iso3 = cls.get_iso3_from_iso2(\n iso2, use_live=use_live, exception=exception\n )\n if iso3 is not None:\n return cls.get_country_name_from_iso3(\n iso3, exception=exception, formal=formal\n )\n return None", "title": "" }, { "docid": "d3facf938338505ef4c0b314cfddb2d5", "score": "0.5888051", "text": "def map_country_alpha2_to_country_name():\n return {x.alpha_2: x.name for x in pycountry.countries}", "title": "" }, { "docid": "09ec8c105853dbf8659826a2c84a876d", "score": "0.58708787", "text": "def getAirportCountry(self,code):\n\n airport=self.airportdict[code]\n return airport.getAirportCountry()", "title": "" }, { "docid": "c6b7b1875278c405eb9905aa7bd3dd07", "score": "0.58599985", "text": "def convert_country_names_single(observation):\n countries_two_letters = ''\n country_count = 0\n try:\n countries = observation[\"object_origin\"].split(\"/\")\n for country in countries:\n c = pycountry.countries.get(name=country)\n if c == None:\n c = pycountry.countries.get(alpha_3=country)\n if c:\n if country_count == 0:\n country_count = 1\n countries_two_letters = c.alpha_2\n else:\n countries_two_letters = countries_two_letters + '/' + c.alpha_2\n else:\n if country_count == 0:\n country_count = 1\n countries_two_letters = c.alpha_2\n else:\n countries_two_letters = countries_two_letters + '/' + c.alpha_2\n observation[\"object_origin\"] = countries_two_letters\n except:\n observation[\"object_origin\"] = ''\n return", "title": "" }, { "docid": "4c578636ffe8c1c4d7ac5fc97a0e583e", "score": "0.5850871", "text": "def get_country_code(self):\n\n # return the value of country code \n return self.country_code", "title": "" }, { "docid": "da6fc7f27af71df386b205ae4246cbc9", "score": "0.5839458", "text": "def get_country_by_name(self, name):\n\n response = self.__get_collection('countries_project', 'countries').find_one({'country': name})\n\n if response is None:\n response={\"Message\": \"This country doesn't exist.\"}\n\n return response", "title": "" }, { "docid": "9097021b9604affef5cb35b64ac5a04c", "score": "0.58073705", "text": "def get_country_name_from_m49(\n cls,\n m49: int,\n use_live: bool = True,\n exception: Optional[ExceptionUpperBound] = None,\n formal: bool = False,\n ) -> Optional[str]:\n iso3 = cls.get_iso3_from_m49(\n m49, use_live=use_live, exception=exception\n )\n if iso3 is not None:\n return cls.get_country_name_from_iso3(\n iso3, exception=exception, formal=formal\n )\n return None", "title": "" }, { "docid": "5ab54ae5b860cfc8a0d4166bdc8bb96f", "score": "0.58042043", "text": "def get_country_index(gtin):\n assert int(gtin.replace(\"-\",\"\"))\n for icountry in countries:\n if gtin[3:].find(str(icountry)) == 0:\n return icountry\n return -1", "title": "" }, { "docid": "f7a29a30f74224dfa9e8a523a8c13784", "score": "0.5794086", "text": "def getCountryId(country_name: str):\n if os.stat(\"countrydata.json\").st_size == 0:\n scrapeCountryIds({\n 'accept': 'text/html,application/xhtml+xml,application/xml;q=0.9,image/webp,image/apng,*/*;q=0.8,application/signed-exchange;v=b3;q=0.9',\n 'accept-encoding': 'gzip, deflate, br',\n 'accept-language': 'en-GB,en-US;q=0.9,en;q=0.8,cs;q=0.7,de;q=0.6',\n # 'cache-control': 'max-age=0',\n # 'sec-fetch-mode': 'navigate',\n # 'sec-fetch-site': 'none',\n 'referer' : \"https://www.google.com/\",\n # 'sec-fetch-user': '?1',\n # 'origin': 'https://www.solebox.com',\n # 'upgrade-insecure-requests': '1',\n # 'user-agent': 'Mozilla/5.0 (Macintosh; Intel Mac OS X 10_15_2) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/79.0.3945.130 Safari/537.36',\n })\n country_data = readFile(\"countrydata.json\")\n try:\n country_id = country_data[country_name]\n return country_id\n except:\n logMessage(\"ERROR\", \"Error getting country_id, check your country name in userdata.json!\")\n return None", "title": "" }, { "docid": "fa94587f6cf853561ce3770195ba457e", "score": "0.5783679", "text": "def get_language_code(self, country_name: str):\n\n country_name = country_name.lower()\n language_dict = {\n \"KR\": [\"korean\", \"korea\", \"kr\"],\n \"TW\": [\"taiwan\", \"taiwanese\", \"tw\"],\n \"CN\": [\"chinese\", \"china\", \"cn\"],\n \"JP\": [\"japanese\", \"japan\", \"jp\"],\n \"TH\": [\"thailand\", \"thai\", \"th\"],\n \"SG\": [\"singapore\", \"singa\", \"sg\", \"singaporean\"],\n \"PH\": [\"philippines\", \"filipino\", \"ph\"]\n }\n\n for code, language in language_dict.items():\n if country_name in language:\n return code\n\n return None", "title": "" }, { "docid": "8c32934000bcb4050bf74eac460aea3a", "score": "0.57641345", "text": "def profile_country_to_alpha3(cls, exam_profile):\n # Pearson requires ISO-3166 alpha3 codes, but we store as alpha2\n try:\n country = pycountry.countries.get(alpha_2=exam_profile.profile.country)\n except KeyError as exc:\n raise InvalidProfileDataException() from exc\n return country.alpha_3", "title": "" }, { "docid": "d831d9d733168d2cc380e1ac34919079", "score": "0.5741219", "text": "def get_country_code(self):\n\n return self.country_code", "title": "" }, { "docid": "d831d9d733168d2cc380e1ac34919079", "score": "0.5741219", "text": "def get_country_code(self):\n\n return self.country_code", "title": "" }, { "docid": "49000efdf488ae048460f34f1fd10314", "score": "0.57196295", "text": "def get_country_data(self, country):\r\n data = self.data['country']\r\n\r\n for content in data:\r\n if content['name'].lower() == country.lower():\r\n return content\r\n\r\n return \"0\"", "title": "" }, { "docid": "94728ba73f528365c86d4c6438312133", "score": "0.57067406", "text": "def get_country_from_ip(ip):\n ip = normalize_ip(ip)\n try:\n return reader.country(ip).country.iso_code\n # if we have disabled geoip support, reader should be None, so the\n # exception should be triggered\n except (geoip2.errors.AddressNotFoundError,\n geoip2.errors.GeoIP2Error, AttributeError):\n return '--'", "title": "" }, { "docid": "9121e11249df7a2645d0aa1880706d92", "score": "0.5689127", "text": "def revert_old_country_names(c: str) -> str:\n\n if c == \"North Macedonia\":\n return \"Macedonia\"\n\n if c == \"Czechia\":\n return \"Czech Republic\"\n\n return c", "title": "" }, { "docid": "868242fd5a4606b279a83a979eecfb3c", "score": "0.56876785", "text": "def libcbm_country(self):\n return libcbm_continent.countries[self.cbmcfs3_country.iso2_code]", "title": "" }, { "docid": "081db865e693d62f712e597f705a6259", "score": "0.5670446", "text": "def unidecode_country(country):\n country.name = unidecode.unidecode(country.name)\n country.capital = unidecode.unidecode(country.capital)\n country.neighbours = unidecode.unidecode(country.neighbours)\n country.language = unidecode.unidecode(country.language)\n country.time_zone = unidecode.unidecode(country.time_zone)\n country.government = unidecode.unidecode(country.government)\n\n return country", "title": "" }, { "docid": "3094bbeb4ee4f12c3af7a46a47e33330", "score": "0.5662897", "text": "def get_iso3_country_code_fuzzy(\n cls,\n country: str,\n use_live: bool = True,\n exception: Optional[ExceptionUpperBound] = None,\n min_chars: int = 5,\n ) -> Tuple[Optional[str], bool]:\n countriesdata = cls.countriesdata(use_live=use_live)\n country = country.strip()\n if not country.upper().isupper():\n return None, False\n\n iso3 = cls.get_iso3_country_code(country, use_live=use_live)\n # don't put exception param here as we don't want it to throw\n\n if iso3 is not None:\n return iso3, True\n\n # regex lookup\n for iso3, regex in countriesdata[\"aliases\"].items():\n index = re.search(regex, country.upper())\n if index is not None:\n return iso3, False\n\n if len(country) < min_chars:\n return None, False\n\n def remove_matching_from_list(wordlist, word_or_part):\n for word in wordlist:\n if word_or_part in word:\n wordlist.remove(word)\n if word_or_part == word:\n return 35\n return 17\n\n # fuzzy matching\n expanded_country_candidates = cls.expand_countryname_abbrevs(country)\n match_strength = 0\n matches = set()\n for countryname in sorted(countriesdata[\"countrynames2iso3\"]):\n for candidate in expanded_country_candidates:\n simplified_country, removed_words = cls.simplify_countryname(\n candidate\n )\n if simplified_country in countryname:\n words = get_words_in_sentence(countryname)\n new_match_strength = remove_matching_from_list(\n words, simplified_country\n )\n for word in removed_words:\n if word in countryname:\n remove_matching_from_list(words, word)\n new_match_strength += 4\n else:\n if word in cls.major_differentiators:\n new_match_strength -= 16\n else:\n new_match_strength -= 1\n for word in words:\n if word in cls.major_differentiators:\n new_match_strength -= 16\n else:\n new_match_strength -= 1\n iso3 = countriesdata[\"countrynames2iso3\"][countryname]\n if new_match_strength > match_strength:\n match_strength = new_match_strength\n matches = set()\n if new_match_strength == match_strength:\n matches.add(iso3)\n\n if len(matches) == 1 and match_strength > 16:\n return matches.pop(), False\n\n if exception is not None:\n raise exception\n return None, False", "title": "" }, { "docid": "98eb12bc7c8993d9074fd0164115f19b", "score": "0.56537086", "text": "def fix_country_name(country_name):\n\tif country_name == 'England':\n\t\treturn 'United Kingdom'\n\telif country_name == 'Wales':\n\t\treturn 'United Kingdom'\n\telif country_name == 'Scotland':\n\t\treturn 'United Kingdom'\n\telif country_name == 'Republic of Ireland':\n\t\treturn 'Ireland'\n\telif country_name == 'Northern Ireland':\n\t\treturn 'Ireland'\n\telif country_name == 'South Korea':\n\t\treturn 'Korea, Republic of'\n\telif country_name == 'Democratic Republic of Congo':\n\t\treturn 'Congo, the Democratic Republic of the'\n\telif country_name == 'Republic of the Congo':\n\t\treturn 'Congo'\n\telif country_name == 'Venezuela':\n\t\treturn 'Venezuela, Bolivarian Republic of'\n\telif country_name == 'The Gambia':\n\t\treturn 'Gambia'\n\telif country_name == 'Russia':\n\t\treturn 'Russian Federation'\n\telif country_name == 'Iran':\n\t\treturn 'Iran, Islamic Republic of'\n\telse:\n\t\treturn country_name", "title": "" }, { "docid": "573e0348fea533559aa462026d935c1d", "score": "0.5648368", "text": "def country_data(country_name: str):\n raw_data = _get_data(CONFIRMED_URL)\n return _country_from_df(country_name, raw_data)", "title": "" }, { "docid": "fdb3ecce33c1be74678b9dc4cec9df60", "score": "0.561259", "text": "def test_extract_exchange_code_from_full_name1(self) -> None:\n extracted_exchange = (\n iimibs.IbSymbolUniverse._extract_exchange_code_from_full_name(\n \"What a great (NAME)\"\n )\n )\n self.assert_equal(extracted_exchange, \"NAME\")", "title": "" }, { "docid": "279dd01f8b18a8b4db491930ed3b44d4", "score": "0.56002903", "text": "def country(self) -> pulumi.Output[Optional['outputs.NamedLocationCountry']]:\n return pulumi.get(self, \"country\")", "title": "" }, { "docid": "4e439dd41a929aa676f7e1c0bfaa1eb4", "score": "0.55991495", "text": "def country_subdivision(self):\n if self.state_or_territory:\n match = ISO_3166_SUBDIVISION_RE.match(self.state_or_territory)\n if match:\n return match.group(1, 2)\n return (None, None)", "title": "" }, { "docid": "3f917595f3a64827e649aebe81333d48", "score": "0.55983216", "text": "def _querycountry(self, name):\n return self.db.query(u\"\"\"SELECT a.*, 'country' as 'ltype', c.population\n FROM allcountries as a\n INNER JOIN alternatenames as b ON a.geonameid=b.geonameid\n INNER JOIN allcities as c ON a.geonameid=c.id\n WHERE\n (country=? OR b.alternatename=?) LIMIT 1\"\"\", (name, name))", "title": "" }, { "docid": "24fde4de8749499ebb357d3fdcfa9bba", "score": "0.5571847", "text": "def check_country(affil_text: str):\n for region in SUB_REGION:\n for sub_region in SUB_REGION[region]:\n if sub_region in affil_text.lower():\n return region\n\n return \"\"", "title": "" }, { "docid": "cd983860a5ee988d74465fa34b1c272c", "score": "0.55667764", "text": "def get_location_code(location: str, validate_location: bool = False) -> {str, None}:\n import pandas as pd\n df = pd.read_csv(\"india_country_code.csv\")\n\n df_loc = df[df.apply(lambda row: location.lower() in row[\"City name\"].lower(), axis=1)]\n if validate_location:\n if len(df_loc) != 0:\n return True\n else:\n return False\n return df_loc.iloc[0][\"Airport Code\"]", "title": "" }, { "docid": "3d92b5e7194ea135bc968fa705d134b4", "score": "0.5533316", "text": "def _find_codename(self, html):\r\n # Codenames may contain HTML escape characters, and the wordlist\r\n # contains various symbols.\r\n codename_re = r'<strong id=\"codename\">(?P<codename>[a-z0-9 &#;?:=@_.*+()\\'\"$%!-]+)</strong>'\r\n codename_match = re.search(codename_re, html)\r\n self.assertIsNotNone(codename_match)\r\n return codename_match.group('codename')", "title": "" }, { "docid": "7e04c810a8fdff4ccd21213698bc39e3", "score": "0.5528767", "text": "def issuer_country_code(self) -> str:\n return self.__issuer_country_code", "title": "" }, { "docid": "2911958df497749ec9269a0d3c0b3272", "score": "0.5523851", "text": "def country(self) -> Optional[pulumi.Input['NamedLocationCountryArgs']]:\n return pulumi.get(self, \"country\")", "title": "" }, { "docid": "2911958df497749ec9269a0d3c0b3272", "score": "0.5523851", "text": "def country(self) -> Optional[pulumi.Input['NamedLocationCountryArgs']]:\n return pulumi.get(self, \"country\")", "title": "" }, { "docid": "986dc63a7b86b62cf405c52d0e55b14f", "score": "0.55130315", "text": "def profile_phone_number_to_country_code(cls, exam_profile):\n phone_number = cls._parse_phone_number(exam_profile.profile.phone_number)\n return str(phone_number.country_code)", "title": "" }, { "docid": "58e8d16ff58b6a35401c6e4abaaf3043", "score": "0.5491906", "text": "def _get_language_code(language):\n code = language.get('alpha2')\n if not code:\n code = language.get('alpha3-b')\n return code", "title": "" }, { "docid": "75295048427a6591146b1f6ff201f651", "score": "0.54770064", "text": "def get_country_info_from_iso3(\n cls,\n iso3: str,\n use_live: bool = True,\n exception: Optional[ExceptionUpperBound] = None,\n ) -> Optional[Dict[str, str]]:\n countriesdata = cls.countriesdata(use_live=use_live)\n country = countriesdata[\"countries\"].get(iso3.upper())\n if country is not None:\n return country\n\n if exception is not None:\n raise exception\n return None", "title": "" }, { "docid": "6604ac40f3f10deffcc7748533a9eabb", "score": "0.54733974", "text": "def countryName2TwoLetter(countryName):\n \n if (countryName == 'Albania') or (countryName == 'Albanien'):\n return 'AL'\n elif (countryName == 'Austria') or (countryName == 'Oesterreich'):\n return 'AT'\n elif (countryName == 'Algeria') or (countryName == 'Algerien'):\n return 'DZ'\n elif (countryName == 'Belgium') or (countryName == 'Belgien'):\n return 'BE'\n elif (countryName == 'Bulgaria') or (countryName == 'Bulgarien'):\n return 'BG'\n elif (countryName == 'Bosnia Herzegovina') or (countryName == 'Bosnien Herzegowina'):\n return 'BA'#\n elif (countryName == 'Weissrussland') or (countryName == 'Belarus'):\n return 'BY'\n elif (countryName == 'Switzerland') or (countryName == 'Schweiz'):\n return 'CH'\n elif (countryName == 'Cyprus') or (countryName == 'Zypern'):\n return 'CY'\n elif (countryName == 'Czech Republic') or (countryName == 'Tschechien'):\n return 'CZ'\n elif (countryName == 'Germany') or (countryName == 'Deutschland'):\n return 'DE'\n elif (countryName == 'Denmark') or (countryName == 'Daenemark'):\n return 'DK'\n elif (countryName == 'Mediteranian') or (countryName == 'Mittelmeer'):\n return 'DZ'\n elif (countryName == 'Estonia') or (countryName == 'Estland'):\n return 'EE'\n elif (countryName == 'Spain') or (countryName == 'Spanien'):\n return 'ES'\n elif (countryName == 'Finland') or (countryName == 'Finnland'):\n return 'FI'\n elif (countryName == 'France') or (countryName == 'Frankreich'):\n return 'FR'\n elif (countryName == 'Greece') or (countryName == 'Griechenland'):\n return 'GR'\n elif (countryName == 'Croatia') or (countryName == 'Kroatien'):\n return 'HR'\n elif (countryName == 'Hungary') or (countryName == 'Ungarn'):\n return 'HU'\n elif (countryName == 'Ireland') or (countryName == 'Irland'):\n return 'IE'\n elif (countryName == 'Italy') or (countryName == 'Italien'):\n return 'IT'\n elif (countryName == 'Litauen') or (countryName == 'Lithuania'):\n return 'LT'\n elif (countryName == 'Luxemburg'):\n return 'LU'\n elif (countryName == 'Lettland') or (countryName == 'Latvia'):\n return 'LV'\n elif (countryName == 'Libyen') or (countryName == 'Lybia'):\n return 'LY'\n elif (countryName == 'Malta'):\n return 'MT'\n elif (countryName == 'Moldavia') or (countryName == 'Moldavien'):\n return 'MD'\n elif (countryName == 'Nordmazedonien') or (countryName == 'Mazedonia') or (countryName == 'North Mazedonia'):\n return 'MK'\n elif (countryName == 'Holland') or (countryName == 'Netherlands'):\n return 'NL'\n elif (countryName == 'Norwegen') or (countryName == 'Norway') :\n return 'NO'\n elif (countryName == 'Polen') or (countryName == 'Poland'):\n return 'PL'\n elif (countryName == 'Portugal'):\n return 'PT'\n elif (countryName == 'Romania') or (countryName == 'Rumaenien'):\n return 'RO'\n elif (countryName == 'Serbia') or (countryName == 'Serbien'):\n return 'RS'\n elif (countryName == 'Russia') or (countryName == 'Russland'):\n return 'RU'\n elif (countryName == 'Sweeden') or (countryName == 'Schweden'):\n return 'SE'\n elif (countryName == 'Slovenia') or (countryName == 'Slowenien'):\n return 'SI'\n elif (countryName == 'Slovakia') or (countryName == 'Slovakei'):\n return 'SK'\n elif (countryName == 'Turkey') or (countryName == 'Tuerkei'):\n return 'TR'\n elif (countryName == 'Ukrain') or (countryName == 'Ukraine'):\n return 'UA'\n elif (countryName == 'United Kingdom') or (countryName == 'England'):\n return 'GB' # \n \n \n return ''", "title": "" }, { "docid": "ecb57657e3a9255ad3897b092ce95160", "score": "0.5456737", "text": "def country(self):\r\n return self._ensureSet('_country', '__substg1.0_3A26')", "title": "" }, { "docid": "4d8d1c27fb1b3f7c4d7800f733435b79", "score": "0.54486823", "text": "def test_extract_exchange_code_from_full_name3(self) -> None:\n extracted_exchange = (\n iimibs.IbSymbolUniverse._extract_exchange_code_from_full_name(\n \"What a great (Name)\"\n )\n )\n self.assertIsNone(extracted_exchange)", "title": "" }, { "docid": "afe5da8d93d697d9af12ce1c318d2b1a", "score": "0.54436314", "text": "def partialcode_to_fullcode(code, group):\n lang, country = BundleManager.get_locale_info_from_code(code)\n return \"%s_%s_%s\" % (lang.lower(), country.upper(), group.upper())", "title": "" }, { "docid": "b1ce1faef47db2b41ace0316dfec6341", "score": "0.543231", "text": "def get_country(self):\n data = Country.objects.get(country_id=self.country_id)\n if data:\n return data\n return None", "title": "" }, { "docid": "a1a0ce3e1bca397c004ad0460db9b8e9", "score": "0.53906894", "text": "def get_city_country(city, country):\n city_country = city + \", \" + country\n return city_country.title()", "title": "" }, { "docid": "18069c783ee4056ee54762b303d21f37", "score": "0.5388367", "text": "def get_iso(wiki_code: str) -> str:\n assert wiki2iso is not None, \"call 'initialize' first\"\n iso = wiki2iso.get(wiki_code, wiki_code)\n if len(iso) == 0:\n iso = wiki_code\n return iso", "title": "" }, { "docid": "5c3cea168af80e056b99f803b6ccea5d", "score": "0.53834003", "text": "def lookup(input_code: str) -> dict or None:\n # force convert input_code to string\n input_code = str(input_code)\n\n # select first 3 characters of input code\n prefix = input_code[:3]\n data = city_codes_data()\n\n # return a dict if the first 3 characters exist in our data\n if prefix in data.keys():\n return data[prefix]\n\n return None", "title": "" }, { "docid": "703dde5b3edbbd79c07511157625350e", "score": "0.5382228", "text": "def country(self):\n return elem_of_lst(self['countryid'],\n '{}/{}'.format(LIST_LOCATION, COUNTRY))", "title": "" }, { "docid": "ae6c2cd5dc574f9a351835640792b7d4", "score": "0.5372337", "text": "def GetOrderingCustomerCountryCode(settlement):\n return settlement.Counterparty().JurisdictionCountryCode()", "title": "" }, { "docid": "265fac2580dd7ae48541e358c79ba0a1", "score": "0.5371836", "text": "def acquirer_country_code(self):\n return self._acquirer_country_code", "title": "" }, { "docid": "a98bb7a76f1d703fe14f38bcc23b2648", "score": "0.5369186", "text": "def get_country_name(df,location):\n d = df[df.location == location]\n return d.country.values[0]", "title": "" }, { "docid": "05d6c0ccb4fff9d9e03658fda20bc914", "score": "0.5333277", "text": "def codeForState(stateName):\n snl = stateName.lower()\n for x in states:\n if x[0].lower() == snl:\n return x[1]\n return None", "title": "" }, { "docid": "09d02c769b1452078a0580790df08ad1", "score": "0.53276575", "text": "def get_language_iso_code(language_string):\n if 'ko' in language_string:\n language_code = 'ko'\n elif 'cn' in language_string:\n language_code = 'cn'\n elif 'jp' in language_string:\n language_code = 'jp'\n # Use English as default\n else:\n language_code = 'en'\n\n return language_code", "title": "" }, { "docid": "2ea040515fd3e62ac161a79e8e3fd55b", "score": "0.5326127", "text": "def get_country(self, country):\n if self.indexate:\n if self._by_country.has_key(country.lower()):\n return self._by_country[country.lower()]\n else:\n return Addresses([])\n else:\n result = [address for address in self._addresses\n if address.country.lower() == country.lower()]\n return Addresses(result)", "title": "" }, { "docid": "b64244d1fa5f7a350319839f25403ffc", "score": "0.532033", "text": "def get_short_code(party):\n return party.Free5()", "title": "" }, { "docid": "8cd39194ee5947b50a281cf41f68e042", "score": "0.53122747", "text": "def get_country_code(self, ip, default_country=None):\n\n ipnum = self._geoip_redis_aton(ip)\n if not ipnum:\n return default_country\n result = self.redis.zrangebyscore(\"geoip\", ipnum, 'inf',\n 0, 1, withscores=True)\n if not result:\n return default_country\n\n res, score = result[0]\n parts = res.split(\":\")\n if len(parts) != 3:\n return default_country\n country_code = parts[0]\n start_end = parts[2]\n if start_end == \"s\":\n if float(score) > ipnum:\n # We have the start of a new block and IP actually is not found\n return default_country\n return country_code", "title": "" }, { "docid": "7d83a9710fd337dad0153dffcc92dc50", "score": "0.5312024", "text": "def _get_code(self, code: str):\n # Get first 3 digits\n return code[:3]", "title": "" }, { "docid": "c85d298800688133d18a0c5556c923df", "score": "0.52908623", "text": "def countryCode(self) -> str:\n return self.__countryCode", "title": "" }, { "docid": "b01ee4e1186cd081331019bd118c6e85", "score": "0.52858514", "text": "def abbrev(self):\n return self._code", "title": "" }, { "docid": "d7be02a46a3007021b3ffd736b9826ad", "score": "0.52721494", "text": "def countryName(self) -> str:\n return self.__countryName", "title": "" }, { "docid": "14100c40363c41e530aa2f362089e4ad", "score": "0.5261032", "text": "def test_extract_exchange_code_from_full_name2(self) -> None:\n extracted_exchange = (\n iimibs.IbSymbolUniverse._extract_exchange_code_from_full_name(\"NAME\")\n )\n self.assert_equal(extracted_exchange, \"NAME\")", "title": "" }, { "docid": "b92023732442a5e124c8c81603c80431", "score": "0.52607465", "text": "def get_initial(name):\n initial = name[0]\n initial = unicodedata.normalize('NFD', initial).encode('ascii', 'ignore')\n return initial", "title": "" }, { "docid": "a9d0bcbe1ffc429cc091fc2e4bd704a7", "score": "0.5258602", "text": "def ca_postal_code():\r\n return bothify('?#? #?#').upper()", "title": "" }, { "docid": "fcf63cb0a2bfb858426913270925f33e", "score": "0.52446043", "text": "def primary_country_ric(self) -> str:\n return self.__primary_country_ric", "title": "" }, { "docid": "923f800d7ac556d0cb46013e1e62cbfa", "score": "0.5241262", "text": "def load_country_mask(country_name='United Kingdom'):\n # loop through the countries and extract the appropriate shapefile\n countries_shp = shpreader.natural_earth(resolution='10m',\n category='cultural',\n name='admin_0_countries')\n\n # search for matching shapefile\n country_shapely = None\n for country in shpreader.Reader(countries_shp).records():\n if country.attributes['NAME_LONG'] == country_name:\n logging.info(f'Found country mask: {country_name}')\n country_shapely = country.geometry\n return country_shapely", "title": "" } ]
2507fb0156b112837b29dfc6d8f88fb9
Return dict of key/value pairs vs. list of key/values dicts.
[ { "docid": "894e77ddacf4f1ae3f5e7a164877e7a2", "score": "0.0", "text": "def get_tag_dict_for_resource(self, arn: str) -> Dict[str, str]:\n result = {}\n if self.has_tags(arn):\n for key, val in self.tags[arn].items():\n result[key] = val\n return result # type: ignore", "title": "" } ]
[ { "docid": "cd12463d85324a2d7adbeaab7ab1b347", "score": "0.6692818", "text": "def get_dicts(self) -> List[Dict]:\n return deepcopy(self.value.dicts)", "title": "" }, { "docid": "af9a8f1e84754210a207676b1019ed6c", "score": "0.63920045", "text": "def list_of_dicts_gen_singleval(d):\n return [{key_header : k, value_headers: v} for k, v in d.items()]", "title": "" }, { "docid": "2061a779ac018c973e827b6def827f2a", "score": "0.63196343", "text": "def to_std_dicts(value):\n if isinstance(value, dict):\n return {k: to_std_dicts(v) for k, v in value.items()}\n elif isinstance(value, list):\n return [to_std_dicts(v) for v in value]\n else:\n return value", "title": "" }, { "docid": "4f8046df6376bcc859138972e1e27819", "score": "0.62178016", "text": "def as_dict(self):\n return {item: self[item] for item in self.as_list}", "title": "" }, { "docid": "02e7be841a35975c572374e6082adb10", "score": "0.6194739", "text": "def listtodict(keys_list, values_list):\n \n if len(keys_list)!= len(values_list):\n print('Error: keys_list and values_list must have the same length!\\n')\n raise Exception(ValueError)\n \n return {keys_list[i]:values_list[i] for i in range(len(keys_list))}", "title": "" }, { "docid": "c6bda85400261921634131b6b255447e", "score": "0.61923033", "text": "def dictdiff(first, second):\r\n\toutput = {}\r\n\tall_keys = first.keys() | second.keys()\r\n\r\n\tfor key in all_keys:\r\n\t\tif first.get(key)!= second.get(key):\r\n\t\t\toutput[key] = [first.get(key), second.get(key)] # storing the values as list\r\n\treturn output", "title": "" }, { "docid": "66b32eb7fd159bafe4343fd6e53f8029", "score": "0.6150648", "text": "def serialize_to_list_of_dicts(values):\n return [dict(value) for value in values]", "title": "" }, { "docid": "6a3cee4ad72fc19ad0b7dff57c824356", "score": "0.6149655", "text": "def t_dict(d: Union[List, Dict]) -> Union[Dict, List]:\n if isinstance(d, list):\n return [t_dict(i) if isinstance(i, (dict, list)) else i for i in d]\n return {\n _no_camels(a): t_dict(b) if isinstance(b, (dict, list)) else b\n for a, b in d.items()\n }", "title": "" }, { "docid": "debd0c8649e879e43b2051fce4fafece", "score": "0.61416364", "text": "def _get_updated_values(before_values, after_values):\n assert before_values.keys() == after_values.keys()\n return dict([(k, [before_values[k], after_values[k]])\n for k in before_values.keys()\n if before_values[k] != after_values[k]])", "title": "" }, { "docid": "5a01f1d5c13fd84c7a1fc28d14a1d787", "score": "0.6119714", "text": "def dict_of_lists(self):\r\n result = {}\r\n for key, value in self.iteritems():\r\n if key in result:\r\n result[key].append(value)\r\n else:\r\n result[key] = [value]\r\n return result", "title": "" }, { "docid": "cc8b9135dfc188474349935246bbf6ef", "score": "0.6092104", "text": "def list_of_dicts_gen_dict(d):\n out_dcts = []\n for k, v in d.items():\n dct = {key_header : k}\n dct.update(v)\n out_dcts.append(dct)\n return out_dcts", "title": "" }, { "docid": "85832f93b473f6b7b474463d22931fad", "score": "0.60554206", "text": "def _list2dict(kv):\n\treturn dict((k.upper(), v) for k, v in imaplib2.group(kv))", "title": "" }, { "docid": "def844735a26c2c2358663516a50e265", "score": "0.6032115", "text": "def toDict(self):\n return dict((k, v.toDict()) for k, v in self.iteritems())", "title": "" }, { "docid": "a3d35f1869662bf81288b485338bfc72", "score": "0.60200787", "text": "def format_dict_diff(old_dict: dict, updated_values: dict) -> dict(tuple()):\n return [\n f\"{str(key).title()}: {old_dict[key]} -> {updated_values[key]}\"\n for key in updated_values.keys()\n ]", "title": "" }, { "docid": "d1a71cfe4342438ab59d2ee0d82d009e", "score": "0.6018386", "text": "def list_of_dicts_gen_list(d):\n out_dcts = []\n for k, v in d.items():\n dct = dict(((vh, val) for vh, val in izip(value_headers, v)))\n dct[key_header] = k\n out_dcts.append(dct)\n return out_dcts", "title": "" }, { "docid": "7a664af6082c8efa7d8dafcfc54cc777", "score": "0.6016508", "text": "def __call__(self, values):\n return [\n {self.key: val}\n for val in values\n ]", "title": "" }, { "docid": "278df0112e73be4d39318a69de66412f", "score": "0.60144085", "text": "def dict_of_lists(self):\n result = {}\n for key, value in self._items:\n if key in result:\n result[key].append(value)\n else:\n result[key] = [value]\n return result", "title": "" }, { "docid": "b95aa8ce75e483a4c113e64b00850fbb", "score": "0.5996946", "text": "def mixed(self):\r\n result = {}\r\n multi = {}\r\n for key, value in self.iteritems():\r\n if key in result:\r\n # We do this to not clobber any lists that are\r\n # *actual* values in this dictionary:\r\n if key in multi:\r\n result[key].append(value)\r\n else:\r\n result[key] = [result[key], value]\r\n multi[key] = None\r\n else:\r\n result[key] = value\r\n return result", "title": "" }, { "docid": "f37bbda9afe6be321d99c4b9c0852d4f", "score": "0.5957955", "text": "def from_pairs(pairs):\n return dict(pairs)", "title": "" }, { "docid": "298afc87ee9f964a22494ba7f0c1ce19", "score": "0.5954348", "text": "def dict(self):\n return {key: value.dict for key, value in self.items()}", "title": "" }, { "docid": "a4b8672c94bad56e485a4c0b0643600f", "score": "0.5944411", "text": "def constant_dict(\n keys: typing.Iterable[Key], value: Value) -> typing.Dict[Key, Value]:\n return {key: value for key in keys}", "title": "" }, { "docid": "d8a9f8c85ddabbde5a6b885f3e70700e", "score": "0.59292924", "text": "def convert_tags_list_to_dict(tags_list):\n return {tag['Key']: tag['Value'] for (tag) in tags_list}", "title": "" }, { "docid": "485aec385e2897116d132770b34c3cb3", "score": "0.59226596", "text": "def to_dict(self):\n return self._simplify_keyvalue(self.items(), dict)", "title": "" }, { "docid": "c48aa6793480d4854703577c77b27af9", "score": "0.5919699", "text": "def return_dictionary_list(lst_of_tuples):\r\n orDict = defaultdict(list)\r\n # iterating over list of tuples\r\n for key, val in lst_of_tuples:\r\n orDict[key].append(val)\r\n return orDict", "title": "" }, { "docid": "2f1fe0c93709cd613cbd19550e42aa5a", "score": "0.591416", "text": "def dict_raise_on_duplicates(ordered_pairs):\n d = {}\n for k, v in ordered_pairs:\n if k in d:\n if type(d[k]) is list:\n d[k].append(v)\n else:\n d[k] = [d[k],v]\n else:\n d[k] = v\n return d", "title": "" }, { "docid": "1ca17e0e8b86e901543b76e926492dbd", "score": "0.5896904", "text": "def from_two_lists_to_dict(self, list_key, list_value):\n dict_result = {}\n for pos, k in enumerate(list_key):\n dict_result[k] = list_value[pos]\n return dict_result", "title": "" }, { "docid": "2f762b423229d6767a3f9c673dd91fce", "score": "0.5892955", "text": "def return_dictionary_list(lst_of_tuples):\n orDict = defaultdict(list)\n # iterating over list of tuples\n for key, val in lst_of_tuples:\n orDict[key].append(val)\n return orDict", "title": "" }, { "docid": "51db00ee6b380fa0ad5ab1a0fe180d78", "score": "0.58852553", "text": "def lists(self):\r\n return super(MultiValueDict, self).items()", "title": "" }, { "docid": "fbf2a11e1dba94c2c15bfc2a4a7be85a", "score": "0.58467925", "text": "def mixed(self):\n result = {}\n multi = {}\n for key, value in self._items:\n if key in result:\n # We do this to not clobber any lists that are\n # *actual* values in this dictionary:\n if key in multi:\n result[key].append(value)\n else:\n result[key] = [result[key], value]\n multi[key] = None\n else:\n result[key] = value\n return result", "title": "" }, { "docid": "f004e6165845a7c8236ef48eae747294", "score": "0.5814294", "text": "def item_list(self) -> List:\n return [(key, AttrDict(value) if isinstance(value, dict) else value) for key, value in super().items()]", "title": "" }, { "docid": "7bf8e996066fa312372efb90ad9535bf", "score": "0.58106565", "text": "def make_dict(keys,values):\n return {k : v for (k, v) in zip(keys, values)}", "title": "" }, { "docid": "61ed7c1c6106d4ed3684cf2bfa3b49f0", "score": "0.5796273", "text": "def _items_to_dict(items):\n return dict((key, value) for key, value in items)", "title": "" }, { "docid": "d776b3d513a81232be4caf7928d1efc1", "score": "0.57576513", "text": "def build_data_dict(self):\n return {d.key: d.value for d in self.data}", "title": "" }, { "docid": "32202f234a3241eb806ae1e5b52f95e0", "score": "0.57471585", "text": "def __call__(self, *args, **kwargs):\n\n keys = [key(*args, **kwargs) for key in self._keys]\n values = [value(*args, **kwargs) for value in self._values]\n return dict(zip(keys, values))", "title": "" }, { "docid": "68ec943032a888079b79b4227894d08f", "score": "0.57368404", "text": "def _mt_asdict(self):\n newdict = {}\n for key in self._fields:\n value = getattr(self, key)\n if ismutabletuple(value):\n newdict[key] = value._asdict()\n else:\n newdict[key] = value\n return newdict", "title": "" }, { "docid": "0e8d69e62f5f6411a24719b562ef1c50", "score": "0.5727279", "text": "def to_dict(self):\n return {key: value for (key, value) in self.items()}", "title": "" }, { "docid": "0e8d69e62f5f6411a24719b562ef1c50", "score": "0.5727279", "text": "def to_dict(self):\n return {key: value for (key, value) in self.items()}", "title": "" }, { "docid": "5ba1219d2fdcee28635666a70115c096", "score": "0.57261115", "text": "def list_to_multivalue_dict(seq: Iterable, key: Callable[[Any], Any]) -> Dict[Any, List[Any]]:\n dict = defaultdict(list)\n for item in seq:\n dict[key(item)].append(item)\n return dict", "title": "" }, { "docid": "0f0f7cfa7941d2db5f68eb3a5618ab52", "score": "0.57165176", "text": "def to_dict(res, keys):\n if type(res) == dict:\n return res\n return [dict(zip(keys, r)) for r in res]", "title": "" }, { "docid": "9f5eb5c16ae9063e577c9f239b1d8991", "score": "0.5715124", "text": "def values(dct_or_lst):\n if type(dct_or_lst) is dict:\n return dct_or_lst.values()\n else:\n return list(dct_or_lst)", "title": "" }, { "docid": "a3d25f98fbc3761ba8aaa92668b79714", "score": "0.5712102", "text": "def _switch_keys_and_values(\n key_value: Dict[Hashable, int]\n) -> Dict[int, List[Hashable]]:\n value_key = {}\n for key, value in key_value.items():\n if value not in value_key:\n value_key[value] = [key]\n else:\n value_key[value].append(key)\n return value_key", "title": "" }, { "docid": "74353022f470aa52d07a9ed87c03ef97", "score": "0.57056844", "text": "def invert_and_sort(key_to_value: Dict[object, object]) -> Dict[object, list]:\n value = []\n for key in key_to_value:\n for item in key_to_value[key]:\n value.append(item) \n invert = {}\n for item in value:\n invert[item] = []\n for key in key_to_value:\n if item in key_to_value[key]:\n invert[item].append(key)\n for key in invert:\n invert[key].sort()\n \n return invert", "title": "" }, { "docid": "b47f3c4cfd9a03172d1817055dd82ee4", "score": "0.57044697", "text": "def listdict2dictlist(LD):\n\n # Take intersection of keys\n keys = reduce(lambda x, y: x & y, (map(lambda d: d.keys(), LD)))\n return AttrDict({k: [dic[k] for dic in LD] for k in keys})", "title": "" }, { "docid": "66d350b7399ccdb0facb48b44060f974", "score": "0.5695348", "text": "def values_dict(self) -> typing.Dict:\n resp = {}\n for value in self._latest_values.values():\n resp.update(\n {\n value.param.name: {\n \"value\": value.value,\n \"unit\": value.param.unit,\n \"description\": value.param.description,\n }\n }\n )\n return resp", "title": "" }, { "docid": "9b9e27b5d140bd931221bc21cbbc9d42", "score": "0.5691249", "text": "def listDict_to_dictList(listDict):\n dictList = {}\n for cli in listDict:\n for k,v in cli.items():\n if k not in dictList.keys():\n dictList[k] = [v]\n else:\n dictList[k].append(v)\n return dictList", "title": "" }, { "docid": "0e7c117f82785556c325ccbfe83cbe2a", "score": "0.56904286", "text": "def _mt_orderedDict(self):\n newdict = OrderedDict()\n for key in self._fields:\n value = getattr(self, key)\n if ismutabletuple(value):\n newdict[key] = value.orderedDict()\n else:\n newdict[key] = value\n return newdict", "title": "" }, { "docid": "d44b9d5acb02e6bc052a7b94ba06afd2", "score": "0.56899625", "text": "def get_dict_values_list(d):\n return list(d.values())", "title": "" }, { "docid": "bfd804dcb75145cc49bfd73996b59711", "score": "0.5689046", "text": "def to_dict_list(t:[tuple]):\n d = c.defaultdict(list)\n for k, v in t:\n d[k].append(v)\n return d", "title": "" }, { "docid": "d6b79333732dade1c275501a4b4cae76", "score": "0.5686211", "text": "def values(self):\n keys = self.list\n values = extract(self.dict, keys)\n return values", "title": "" }, { "docid": "5a6fe74b5872b337e30c3cc38ffc18c6", "score": "0.5681003", "text": "def dict_list_to_list_dict(dict_list):\n list_dict = {}\n key_list = set(key for d in dict_list for key in d)\n for dictionary in dict_list:\n for key in key_list:\n val = dictionary.get(key)\n if key in list_dict:\n list_dict[key].append(val)\n else:\n list_dict[key] = [val]\n\n return list_dict", "title": "" }, { "docid": "0eb2fe96853669e9f179a3d0e31b62d2", "score": "0.5667635", "text": "def list_to_dictionary(team_value_list):\n\n dictionary = {}\n\n for team_value in team_value_list:\n dictionary[team_value.getname()] = team_value.getvalue()\n\n return dictionary", "title": "" }, { "docid": "06a72cde1c183ec1b56130015a50fe42", "score": "0.56625795", "text": "def transfer_dict(d1):\n values = []\n keys = d1.keys()\n for key in keys:\n data = d1[key]\n if isinstance(data, list):\n newlst = []\n for entry in data:\n if isinstance(entry, (int, float)):\n val = entry\n else:\n val = set()\n val.update(entry)\n # val = entry.copy()\n newlst.append(val)\n values = newlst\n elif isinstance(data, dict):\n newdic = transfer_dict(data)\n values = newdic\n\n ret = dict.fromkeys(keys, values)\n return ret", "title": "" }, { "docid": "e3126a3e6406b6b816b92d54ed2f45fe", "score": "0.5662438", "text": "def _(\n x: Mapping[str, Any],\n new: Optional[Iterable[str]] = None,\n _nested: bool = True\n) -> Union[List[str], Mapping[str, Any]]:\n if new is None:\n return list(x)\n return dict(zip(new, x.values()))", "title": "" }, { "docid": "66c5f48bc9b84ebc24b99588ca56ee67", "score": "0.5659523", "text": "def make_dict(key, values):\n return {k: v for k, v in zip(key, values)}", "title": "" }, { "docid": "b108eac24cdc17b02ada5dd3eae13c24", "score": "0.56474644", "text": "def list_of_dicts_to_dict_of_lists(list_):\n rv = dict()\n for item in list_:\n for key, value in item.items():\n if key not in rv:\n rv[key] = list()\n rv[key].append(value)\n return rv", "title": "" }, { "docid": "318c21c662f8baef00aff13837d4ef09", "score": "0.56413466", "text": "def values(self):\n d = {}\n for k, v in self.__dict__.items():\n if k[0] != '_':\n d[k] = v\n return d", "title": "" }, { "docid": "63ef0eb0ff629be87489503b0e6afbf5", "score": "0.5631166", "text": "def test_baseConvertToListOfPyDicts(self):\n actual = hf.convertToListOfPyDicts(self.inputList)\n expected = [\n {\n \"key1\": \"key1Val1\",\n \"key2\": \"key2Val1\",\n \"key3\": \"key3Val1\",\n \"key4\": \"key4Val1\"\n },\n {\n \"key1\": \"key1Val2\",\n \"key2\": \"key2Val2\",\n \"key3\": \"key3Val2\",\n \"key4\": \"key4Val2\"\n },\n {\n \"key1\": \"key1Val3\",\n \"key2\": \"key2Val3\",\n \"key3\": \"key3Val3\",\n \"key4\": \"key4Val3\"\n }\n ]\n self.assertEqual(actual, expected)", "title": "" }, { "docid": "dea874ce2a24ddfdf47332ccacddfd42", "score": "0.56237674", "text": "def check_for_duplicate_keys(\n ordered_pairs: typing.List[typing.Tuple[typing.Hashable, typing.Any]]\n) -> typing.Dict:\n dict_out: typing.Dict = {}\n for key, val in ordered_pairs:\n if key in dict_out:\n raise ValueError(f\"Duplicate key: {key}\")\n else:\n dict_out[key] = val\n return dict_out", "title": "" }, { "docid": "b3f4fd944541e4e02a9135dfaf5c914d", "score": "0.5620396", "text": "def dict(self) -> Dict[str, Any]:\n return {\"name\": self.name, \"items\": [i.dict() for i in self.items]}", "title": "" }, { "docid": "81340bed0e76ecf26b932093b1cbf72e", "score": "0.56170446", "text": "def get_difference(previous_list: List[T], current_list: List[T]) -> Dict[int, T]:\n diff_dict: Dict[int, T] = {i: new_value for i, new_value in\n enumerate(current_list) if new_value != previous_list[i]}\n return diff_dict", "title": "" }, { "docid": "357de452f77dc06eaa028bfc99f923ab", "score": "0.56157535", "text": "def test_baseGetValuesFromListOfDicts(self):\n actual = hf.getValuesFromListOfDicts(self.inputList, \"key1\")\n expected = [\"key1Val1\", \"key1Val2\", \"key1Val3\"]\n\n self.assertEqual(actual, expected)", "title": "" }, { "docid": "d9899773ee5670fc8c63d882e24d4e1a", "score": "0.5593165", "text": "def to_dict(self, flat=True):\r\n if flat:\r\n return dict(self.iteritems())\r\n return dict(self.lists())", "title": "" }, { "docid": "71602b483e538e6ef2aa741394f163c3", "score": "0.5586385", "text": "def pmutt_list_to_dict(pmutt_list, key='name'):\n return {getattr(obj, key): obj for obj in pmutt_list}", "title": "" }, { "docid": "2bdd50bd980b64fcdd44da540d68ccad", "score": "0.5583508", "text": "def zipdict(keys, vals):\n return dict(zip(keys, vals))", "title": "" }, { "docid": "122ee708610e5ff3e8a59f2ecb6807c1", "score": "0.5575299", "text": "def test_list_dictionaries(self):\n pass", "title": "" }, { "docid": "1a7b9077955e1da77420f9f581a137cc", "score": "0.5572506", "text": "def _dict(self, k=\"\", v=[]):\n # For example: \"<% for $i, $x in enumerate([1, 2, 3]): %>\",\n # \"$i, $x\" is mapped to {\"i\": 0, \"x\": 1}, {\"i\": 1, \"x\": 2}, ...\n # Nested tuples are not supported (e.g., \"($i, ($k, $v))\").\n k = [k.strip(\"$ \") for k in k.strip(\"()\").split(\",\")]\n return dict(zip(k, v if len(k) > 1 else [v]))", "title": "" }, { "docid": "b6bad2cfa980234bccc74b6d86663c7e", "score": "0.5563758", "text": "def compareDicts(new, old):\n diffs = {}\n for key, value in new.items():\n if key not in old:\n diffs[key] = (ADDED, value, None)\n elif old[key] != value:\n diffs[key] = (CHANGED, value, old[key])\n for key, value in old.items():\n if key not in new:\n diffs[key] = (REMOVED, None, value)\n return diffs", "title": "" }, { "docid": "12cc2f7767503d7e9691e60932556901", "score": "0.55629146", "text": "def as_dict(self):\n return self.entries(as_dict=True)", "title": "" }, { "docid": "438b694301da83a0cd2ed1fe24493a6c", "score": "0.55539", "text": "def to_map(self):\n return self._simplify_keyvalue(self.items(), self._new_empty_basic_map)", "title": "" }, { "docid": "86997b75a36ac04f356585def2ecb772", "score": "0.55533403", "text": "def items(self):\n return list(zip(self.keys(), self.values()))", "title": "" }, { "docid": "f02dd748df1629b0b76a6369367b7a04", "score": "0.5552461", "text": "def run(self):\n d = OrderedDict()\n d['keya'] = 'value1'\n d['keyb'] = 'value2'\n return [d]", "title": "" }, { "docid": "e9bf8f22c0cb3361c51aacfe78750e13", "score": "0.5551675", "text": "def serialize_dict(self) -> List[JsonDict]:\n return [d for _, d in self._serialize_pairs()]", "title": "" }, { "docid": "e67192f8769907af3da540b9dc610ea7", "score": "0.55410826", "text": "def dicts(\n keys: SearchStrategy[A],\n values: SearchStrategy[B],\n min_size: int = 0,\n max_size: int = None\n) -> SearchStrategy[Dict[A, B]]:\n return builds(\n Dict, dictionaries(keys, values, min_size=min_size, max_size=max_size)\n )", "title": "" }, { "docid": "ab9b70d5ce4b82d41f142c51a7d8561f", "score": "0.5534267", "text": "def _attrs_to_dict(self, attrs_list):\n attrs_dict = {}\n\n for key, value in attrs_list:\n attrs_dict[key] = value\n\n return attrs_dict", "title": "" }, { "docid": "6af20e0715f25d35353d6b86c4ae61fe", "score": "0.5531687", "text": "def items(self):\n return [{item['Key']: item['Value']} for item in self._get_all_items()]", "title": "" }, { "docid": "883feaec2bfb57dbc3bd26f2f6169f90", "score": "0.5527669", "text": "def lists2dict(list1, list2):\n zipped_list = zip(list1, list2)\n dicted_zip = dict(zipped_list)\n return dicted_zip", "title": "" }, { "docid": "680af2626e1952ab5deb58801a8a806d", "score": "0.55201685", "text": "def items(self):\n keys = self.list\n values = extract(self.dict,keys)\n return tuples(keys,values)", "title": "" }, { "docid": "ab1853623a4ae012160cdf5ad5043e99", "score": "0.5497273", "text": "def diff_dicts(original: Dict[str, Any], new: Dict[str, Any]) -> Dict[str, Any]:\n if not isinstance(original, Dict) or not isinstance(new, Dict):\n print(\"ERROR: %s or %s not Dict\" % (original, new))\n return {}\n\n diff = {}\n for key in new:\n # Values got from the GUI tend to be converted to strings.\n # Safest to presume they are floats.\n try:\n new[key] = float(new[key])\n except ValueError:\n pass\n except TypeError:\n pass\n\n value = new[key]\n if key in original:\n if value != original[key]:\n diff[key] = value\n else:\n # New key:value.\n # key did not exist in original.\n diff[key] = value\n\n return diff", "title": "" }, { "docid": "442baab701513f7158fdeb1daef6751a", "score": "0.5489727", "text": "def convert_facet_list_to_dict(facet_list: list, reverse: bool = False) -> dict:\n facet_dict = {}\n for i in range(0, len(facet_list)):\n if i % 2 == 0:\n facet_dict[facet_list[i]] = facet_list[i + 1]\n if reverse:\n rkeys = sorted(facet_dict, reverse=True)\n facet_dict_r = {}\n for k in rkeys:\n facet_dict_r[k] = facet_dict[k]\n return facet_dict_r\n else:\n return facet_dict", "title": "" }, { "docid": "8d2fa23421308f3d2af9857cb9a282d9", "score": "0.54879355", "text": "def to_dictionary(self, key=lambda x: x, value=lambda x: x):\n result = {}\n for i, e in enumerate(self):\n result[key(e)] = value(e)\n return result", "title": "" }, { "docid": "97d2afebde411f5edeade5d075805ae9", "score": "0.548658", "text": "def extract_keyvals(dictin, dictout):\n for key, value in dictin.iteritems():\n if isinstance(value, dict): # If value itself is dictionary\n extract_keyvals(value, dictout)\n elif isinstance(value, list): # If value itself is list\n for i in value:\n extract_keyvals(i, dictout)\n else:\n dictout[key] = value", "title": "" }, { "docid": "50c96eea35228a529bfc65717b388b13", "score": "0.5473614", "text": "def list2dict(keys, List):\n return mapl(lambda obj: dict(zip(keys, obj)), List)", "title": "" }, { "docid": "70f652cf34509245cdcce82cf6215f8b", "score": "0.54706407", "text": "def _get_dict_repr(self):\n dict_list = [ obj.get_dict_repr() for obj in self.req_objects ]\n\n return {self.url: dict_list}", "title": "" }, { "docid": "ce9d1449992917e9b8a805bd620ea96b", "score": "0.5458804", "text": "def make_dict(keys,values):\r\n dict = {}\r\n for key, value in zip(keys, values):\r\n dict[key] = value\r\n return dict", "title": "" }, { "docid": "eea2d38b942a4efa9b8d02ba5784e845", "score": "0.5454677", "text": "def to_dict(self):\n result = {}\n\n for attr, _ in iteritems(self.types):\n value = getattr(self, attr)\n if isinstance(value, list):\n result[attr] = list(map(\n lambda x: x.to_dict() if hasattr(x, \"to_dict\") else x,\n value\n ))\n elif hasattr(value, \"to_dict\"):\n result[attr] = value.to_dict()\n elif isinstance(value, dict):\n result[attr] = dict(map(\n lambda item: (item[0], item[1].to_dict())\n if hasattr(item[1], \"to_dict\") else item,\n value.items()\n ))\n else:\n result[attr] = value\n\n return result", "title": "" }, { "docid": "eea2d38b942a4efa9b8d02ba5784e845", "score": "0.5454677", "text": "def to_dict(self):\n result = {}\n\n for attr, _ in iteritems(self.types):\n value = getattr(self, attr)\n if isinstance(value, list):\n result[attr] = list(map(\n lambda x: x.to_dict() if hasattr(x, \"to_dict\") else x,\n value\n ))\n elif hasattr(value, \"to_dict\"):\n result[attr] = value.to_dict()\n elif isinstance(value, dict):\n result[attr] = dict(map(\n lambda item: (item[0], item[1].to_dict())\n if hasattr(item[1], \"to_dict\") else item,\n value.items()\n ))\n else:\n result[attr] = value\n\n return result", "title": "" }, { "docid": "eea2d38b942a4efa9b8d02ba5784e845", "score": "0.5454677", "text": "def to_dict(self):\n result = {}\n\n for attr, _ in iteritems(self.types):\n value = getattr(self, attr)\n if isinstance(value, list):\n result[attr] = list(map(\n lambda x: x.to_dict() if hasattr(x, \"to_dict\") else x,\n value\n ))\n elif hasattr(value, \"to_dict\"):\n result[attr] = value.to_dict()\n elif isinstance(value, dict):\n result[attr] = dict(map(\n lambda item: (item[0], item[1].to_dict())\n if hasattr(item[1], \"to_dict\") else item,\n value.items()\n ))\n else:\n result[attr] = value\n\n return result", "title": "" }, { "docid": "eea2d38b942a4efa9b8d02ba5784e845", "score": "0.5454677", "text": "def to_dict(self):\n result = {}\n\n for attr, _ in iteritems(self.types):\n value = getattr(self, attr)\n if isinstance(value, list):\n result[attr] = list(map(\n lambda x: x.to_dict() if hasattr(x, \"to_dict\") else x,\n value\n ))\n elif hasattr(value, \"to_dict\"):\n result[attr] = value.to_dict()\n elif isinstance(value, dict):\n result[attr] = dict(map(\n lambda item: (item[0], item[1].to_dict())\n if hasattr(item[1], \"to_dict\") else item,\n value.items()\n ))\n else:\n result[attr] = value\n\n return result", "title": "" }, { "docid": "0ee3b6ac875ac0f31aa1a743cec62081", "score": "0.5452132", "text": "def dict_from_har(j):\n return {x[\"name\"]: x[\"value\"] for x in j}", "title": "" }, { "docid": "7798eb2b7a2780953e8a6bb53af9cbaf", "score": "0.5448114", "text": "def __getstate__(self):\n values = {}\n for field, value in self._values.iteritems():\n values[field.name] = value\n \n return values", "title": "" }, { "docid": "71668e4f6fabeff229cf3619a05ebc7c", "score": "0.5443344", "text": "def standard_dict_group(data):\n d = {}\n for key, value in data:\n d.setdefault(key, []).append(value)\n return d", "title": "" }, { "docid": "85856d293ffa3d96d92368e4ba893282", "score": "0.54359466", "text": "def unpack_value_lists(some_dict):\n return ((key, v) for key in some_dict for v in some_dict[key])", "title": "" }, { "docid": "c04fb03cef999813df9873d8004031d5", "score": "0.5435412", "text": "def iterdicts(rec):\n return (dict(zip(*(rec.dtype.names, ii))) for ii in rec.ravel())", "title": "" }, { "docid": "9dd4d5bead344783d07e51954a3d915e", "score": "0.5433879", "text": "def lists2dict(list1, list2):\n\n # Zip lists: zipped_lists\n zipped_lists = zip(list1, list2)\n\n # Create a dictionary: rs_dict\n rs_dict = dict(zipped_lists)\n\n # Return the dictionary\n return rs_dict", "title": "" }, { "docid": "9dd4d5bead344783d07e51954a3d915e", "score": "0.5433879", "text": "def lists2dict(list1, list2):\n\n # Zip lists: zipped_lists\n zipped_lists = zip(list1, list2)\n\n # Create a dictionary: rs_dict\n rs_dict = dict(zipped_lists)\n\n # Return the dictionary\n return rs_dict", "title": "" }, { "docid": "e2cb9f67921fc074a7329b139d6cbff6", "score": "0.5432335", "text": "def to_dict(self):\n result = {}\n\n for attr, _ in six.iteritems(self.attr_types):\n correct_attr = self.attr_map[attr]\n value = getattr(self, correct_attr)\n if isinstance(value, list):\n result[attr] = list(map(\n lambda x: x.to_dict() if hasattr(x, \"to_dict\") else x,\n value\n ))\n elif hasattr(value, \"to_dict\"):\n result[attr] = value.to_dict()\n elif isinstance(value, dict):\n result[attr] = dict(map(\n lambda item: (item[0], item[1].to_dict())\n if hasattr(item[1], \"to_dict\") else item,\n value.items()\n ))\n else:\n result[attr] = value\n\n return result", "title": "" }, { "docid": "2ecb393bfb573579986c1473fd1d00f4", "score": "0.54319715", "text": "def dict(self):\n newdict = OrderedDict()\n for entry in self:\n this_entry = self[entry]\n if isinstance(this_entry, Section):\n this_entry = this_entry.dict()\n elif isinstance(this_entry, list):\n # create a copy rather than a reference\n this_entry = list(this_entry)\n elif isinstance(this_entry, tuple):\n # create a copy rather than a reference\n this_entry = tuple(this_entry)\n newdict[entry] = this_entry\n return newdict", "title": "" }, { "docid": "9819a816467254e2daa7de868f96555d", "score": "0.5429884", "text": "def to_dict(self):\n\n list_ = self._state.get_names('all')\n\n data = {}\n for key in list_:\n value = self.attr_to_dict(key)\n if hasattr(value, 'to_dict'):\n value = value.to_dict() # recursive call\n elif (key in [f.name for f in self._state.get_field_by_attribute('iscollection')]):\n #if self.key is a list, this needs special attention. It does\n #not have a to_dict like OrderedCollection does!\n vals = []\n for obj in value:\n try:\n obj_type = '{0.__module__}.{0.__class__.__name__}'.format(obj)\n except AttributeError:\n obj_type = '{0.__class__.__name__}'.format(obj)\n _id=None\n if hasattr(obj, 'id'):\n _id= str(obj.id)\n else:\n _id= str(id(obj))\n val = {'obj_type': obj_type, 'id': _id}\n vals.append(val)\n\n value = vals\n\n if value is not None:\n # some issue in colander monkey patch and the Wind schema\n # if None values are not pruned - take them out for now\n # this also means the default values will not be applied\n # on serialized -- that's ok though since we don't define\n # defaults in colander\n data[key] = value\n\n return data", "title": "" }, { "docid": "5d8024c2d285d0e15fc77a6947180495", "score": "0.54261655", "text": "def annotate_values(dictlike):\n return dict((k, annotate(v)) for k, v in six.iteritems(dictlike))", "title": "" }, { "docid": "180fd5688f56853a1d754b6a47614e06", "score": "0.54182684", "text": "def _asdict(t):\r\n return {'youngest_rev': t[0], 'oldest_rev': t[1],\r\n 'rev_dict': t[2], 'tag_set': t[3], 'srev_dict': t[4],\r\n 'branch_dict': t[5]}", "title": "" }, { "docid": "525994c8ed7e05f5017b814ceb2a71d6", "score": "0.5416977", "text": "def as_dict(self) -> dict:\n\n def to_item(obj):\n if isinstance(obj, ParseResults):\n return obj.as_dict() if obj.haskeys() else [to_item(v) for v in obj]\n else:\n return obj\n\n return dict((k, to_item(v)) for k, v in self.items())", "title": "" } ]
2441abb23bc7ca6c80f06ed0f1e59133
Subclassed by implementor to react to the ball being completely over automatically invoked by end_ball(). At this point the ball is over
[ { "docid": "a35f042629cafca6e894e2ee30de33ac", "score": "0.6899138", "text": "def ball_ended(self):\n self.log(\"Skel: BALL ENDED\")\n\n # turn off the flippers\n self.enable_flippers(False)\n # self.enable_alphanumeric_flippers(False)\n if(self.use_ballsearch_mode):\n self.ball_search.disable() # possibly redundant if ball ends normally, but not redundant when slam tilted\n\n super(SkeletonGame, self).ball_ended()\n for m in self.known_modes[AdvancedMode.Ball]:\n self.modes.remove(m)", "title": "" } ]
[ { "docid": "3e6fea8386528e150f2c5d464b07144a", "score": "0.70871973", "text": "def updateBall(self):\n \n self._ball.moveBall(self._paddle.collides(self._ball),\\\n self.collisionWithBricks())\n self._offScreen = self._ball.checkOffScreen()", "title": "" }, { "docid": "58f7f36a3d0df3da4bd83e2ab0233241", "score": "0.6973468", "text": "def update(self):\n\t\tself._check_screen_edges()\n\t\tself._bottom_hit()\n\t\t# Moves the ball\n\t\tself.x += self.settings.ball_speed * self.change_x\n\t\tself.y += self.settings.ball_speed * self.change_y\n\t\tself.rect.center = (self.x, self.y)", "title": "" }, { "docid": "e21bc0891bd68732675a88ef108edf9f", "score": "0.69657475", "text": "def evt_ball_ending(self, (shoot_again, last_ball)):\n self.logger.info(\"base game mode trough changed notification ('ball_ending - again=%s, last=%s')\" % (shoot_again,last_ball))\n\n # stop any music as appropriate\n # self.game.sound.fadeout_music()\n self.game.sound.play('ball_drain')\n self.game.sound.play_music('sonic')\n self.game.displayText('End of ball')\n return 2.0", "title": "" }, { "docid": "190402009eec6f516cb574e662d85d6c", "score": "0.68922037", "text": "def move(self, screen: pygame.display, ball) -> None:\n ym = self.get_new_y()\n if ym + 150 <= HEIGHT:\n self.y_corr = ym\n pygame.draw.rect(screen, self.color,\n (self.x_corr, self.y_corr, 15, 150))\n else:\n pygame.draw.rect(screen, self.color,\n (self.x_corr, HEIGHT - 150, 15, 150))\n\n # Ball Collision\n if ball.x == self.x_corr + self.mul*15 and self.y_corr - 15 <= \\\n ball.y <= self.y_corr + 50 + 15:\n ball.speed[0] = -ball.speed[0]\n ball.speed[1] = -1\n elif ball.x == self.x_corr + self.mul*15 and self.y_corr + 50 <= \\\n ball.y <= self.y_corr + 100:\n ball.speed[0] = -ball.speed[0]\n ball.speed[1] = 0\n elif ball.x == self.x_corr + self.mul*15 and self.y_corr + 100 \\\n <= ball.y <= self.y_corr + 150:\n ball.speed[0] = -ball.speed[0]\n ball.speed[1] = 1", "title": "" }, { "docid": "33f3b26f69fe55317233ca23031eea5f", "score": "0.6883499", "text": "def updateBall(self):\n self._ball.step()\n self._ball.bounce()\n if self._paddle.collides(self._ball):\n self._ball.setVY(-self._ball.getVY())\n for x in self._bricks:\n if x.collides(self._ball):\n self._ball.setVY(-self._ball.getVY())\n self._bricks.remove(x)\n seq=[Sound('bounce.wav'),Sound('saucer1.wav'),Sound('cup1.wav')\\\n ,Sound('saucer1.wav')]\n random.choice(seq).play() \n return self._ball.bottom()", "title": "" }, { "docid": "380c658046520bab257c2fb2f178a890", "score": "0.68821573", "text": "def paddle_b_check():\n if (350 < ball.xcor() < 360) and (\n paddle_b.ycor() + 50 > ball.ycor() > paddle_b.ycor() - 50):\n ball.setx(350)\n ball.dx = -ball.dx\n winsound.PlaySound(\"Sounds/ping-pong-ball-hit-paddle.wav\", winsound.SND_ASYNC)", "title": "" }, { "docid": "689f07fecf30ad80eae0201c5ca749d9", "score": "0.6811001", "text": "def out_of_bounds(self):\n if self.rect.centery > self.settings.screen_height:\n self.kill()\n sounds.play_sound(\"dropped_ball\", self.settings.sound)\n Ball._total_balls += 1", "title": "" }, { "docid": "a468899577f853af6b8235f12829c881", "score": "0.6791254", "text": "def check_ball_fall(self, player):\n if self.y + self.radius >= SCREEN_HEIGHT + 2 * BALL_RADIUS:\n self.x = player.x + 30\n self.y = BALL_START_POS[1]\n self.vx = 0\n self.vy = BALL_START_SPEED[1]\n Ball.BALLS_LEFT -= 1\n pygame.draw.circle(main.DISPLAY, self.color, [self.x, self.y], self.radius)\n pygame.display.update()\n pygame.time.wait(1000)", "title": "" }, { "docid": "86583b8f085d5f3cb9fbffce4f277020", "score": "0.67608166", "text": "def _bottom_hit(self):\n\t\tif self.rect.bottom >= self.screen_rect.bottom:\n\t\t\tif self.settings.lives_left > 1:\n\t\t\t\tself._start_ball()\n\t\t\t\t# Decreases number of lives left\n\t\t\t\tself.settings.lives_left -= 1\n\t\t\t\t# Decrease number of lives left to update in display_lives_left()\n\t\t\t\tself.lives -= 1\n\t\t\t\t# Update the number of lives left on screen\n\t\t\t\tself.scoreboard.display_lives_left(self.lives)\n\t\t\t\t# Add delay after the ball reaches the bottom of the screen\n\t\t\t\tsleep(0.5)\n\t\t\telse:\n\t\t\t\tself.settings.game_active = False", "title": "" }, { "docid": "adc6341df54facb81d8096c4feba512c", "score": "0.67361003", "text": "def ball_hit_brick(self):\n obj_tl = self.window.get_object_at(self.ball.x, self.ball.y)\n obj_tr = self.window.get_object_at(self.ball.x + self.ball.width, self.ball.y)\n obj_bl = self.window.get_object_at(self.ball.x, self.ball.y + self.ball.height)\n obj_br = self.window.get_object_at(self.ball.x + self.ball.width, self.ball.y + self.ball.height)\n is_hit_obj = (obj_tl is not None or obj_tr is not None or obj_bl is not None or obj_br is not None)\n is_beyond_paddle = (self.ball.y + self.ball.height) < self.paddle.y\n return is_hit_obj and is_beyond_paddle", "title": "" }, { "docid": "be258ce5e31016084e5fe5c5d0cddaf2", "score": "0.6733398", "text": "def manageBall(self):\n\n # if isReset is true, we are waiting to serve the ball so keep it on the paddle\n if self.isReset:\n self.ball.rect.centerx = self.paddle.rect.centerx\n return\n\n # if ball isn't below the walls\n if self.ball.rect.top < 550:\n # bounce ball off the ceiling\n if self.ball.rect.top <= 10:\n self.ball.rect.top = 11\n\n # reverse y velocity so it 'bounces'\n self.ball.vely *= -1\n\n # bounce ball off the left wall\n if self.ball.rect.left <= 10:\n self.ball.rect.left = 11\n\n # reverse X velocity so it 'bounces'\n self.ball.velx *= -1\n\n # bounce ball off the right wall\n elif self.ball.rect.right > 510:\n self.ball.rect.right = 509\n\n # reverse X velocity so it 'bounces'\n self.ball.velx *= -1\n\n # ball IS below the walls\n else:\n # if ball hits the bottom, reset the board\n if self.ball.rect.bottom > 600:\n self.reset()", "title": "" }, { "docid": "359a0dd3b50374e7023cec2d0ded8db5", "score": "0.6721197", "text": "def run(self):\n MAX_ANGULAR_VELOCITY = 3.14/2 * 0.5\n\n # After 1.5 meters, we don't care about how far the ball is. It doesn't make us\n # approach it any faster.\n DISTANCE_THRESHOLD = 1.5\n\n # Factor to multiply thresholded distance by to get a maximum value equal to one\n DISTANCE_CONSTANT = 2/3.\n\n # Ball pursing thresholds\n MAX_FORWARD_VELOCITY = .75\n MIN_FORWARD_VELOCITY = 0.50\n\n if self.getTime() > 2.0:\n self.postSignal(\"restart\")\n\n ball = memory.world_objects.getObjPtr(core.WO_BALL)\n if not ball.seen:\n return\n\n # Reset the timer to act as a failsafe against losing the ball\n self.reset()\n\n # Ball in the bottom frame?\n if not ball.fromTopCamera:\n self.finish()\n\n # Ball coordinates\n ball_x, ball_y = ball.imageCenterX, ball.imageCenterY\n\n # Calculate forward velocity\n ball_distance = ball.visionDistance / 1000\n# print('Ball distance: {}'.format(ball_distance))\n ball_distance = min(ball_distance, DISTANCE_THRESHOLD)\n\n # Cache the ball distances\n PursueBall.ball_distances = (PursueBall.ball_distances + [ball_distance])[-30:]\n# print('Ball distances: {}'.format(PursueBall.ball_distances))\n slope = sum(PursueBall.ball_distances[-10:])/10 - sum(PursueBall.ball_distances[:10])/10\n# print('Slope: {} - {} = {}'.format(sum(PursueBall.ball_distances[-10:]) / 10,\n# sum(PursueBall.ball_distances[:10]) / 10,\n# slope))\n# print('Input: {}'.format(1 / slope if slope else 1))\n\n\n # Get the maximum velocity to be 1\n forward_vel = ball_distance * DISTANCE_CONSTANT\n forward_vel *= MAX_FORWARD_VELOCITY\n forward_vel = max(MIN_FORWARD_VELOCITY, forward_vel)\n# print('forward velocity: {}'.format(forward_vel))\n\n # Calculate sideways velocity\n angular_vel = -(ball_x-160.0) / 160.0 * MAX_ANGULAR_VELOCITY\n# print('Sideways Amount: {}'.format(angular_vel))\n\n commands.setWalkVelocity(forward_vel, 0, angular_vel)", "title": "" }, { "docid": "f7c56f1d6a94527a35de6f23a2a44aff", "score": "0.67078227", "text": "def been_hit(ball: Ball, paddle: Paddle):\n collision_threshold = 15\n if ball.rect.colliderect(paddle.rect) and ball.velocity.y > 0.0:\n ontop = abs(ball.rect.bottom - paddle.rect.top)\n if ontop < collision_threshold:\n ball.velocity.y = -ball.velocity.y\n else:\n ball.velocity.x = -ball.velocity.x\n return True\n return False", "title": "" }, { "docid": "829edbf602edd307b728e7779ac6ec08", "score": "0.67038006", "text": "def game_over(self):\n self.bird.die()\n self.is_over = True\n for pipe in self.pipes:\n pipe.speed = 0\n self.base.speed = 0", "title": "" }, { "docid": "7ee60151b3e5be96911517277151c5f1", "score": "0.6700794", "text": "def update(self):\n if self._moving:\n new_pos = self.calc_new_pos(self._rect, self.angle)\n if not self._area.contains(new_pos):\n if new_pos.y < 0:\n self.ball_velocity.y *= -1\n if new_pos.x < 0 or new_pos.x > HEIGHT - 10:\n self.ball_velocity.x *= -1\n new_pos = self.calc_new_pos(self._rect, self.angle)\n else:\n collide_bricks = pygame.sprite.spritecollide(self, self._bricks_sprites, False)\n paddle = pygame.sprite.spritecollide(self, self._paddle_sprites, False)\n if collide_bricks:\n first_brick = collide_bricks[0]\n edge = first_brick.collision_edge(self)\n self.ball_velocity.reflect_ip(edge)\n self.brick_collide(first_brick)\n\n elif paddle:\n pad = paddle[0]\n self.rect.bottom = pad.rect.top\n self.ball_velocity.y *= -1\n if self.rect.centerx > pad.rect.centerx:\n if self.ball_velocity.x == -1:\n self.ball_velocity.x *= -1\n else:\n if self.ball_velocity.x == 1:\n self.ball_velocity.x *= -1\n\n new_pos = self.calc_new_pos(self._rect, self.angle)\n\n self._rect = new_pos\n\n else:\n self.angle = 70\n self.ball_velocity[:] = 1, -1\n paddle = pygame.sprite.spritecollide(self, self._paddle_sprites, False)\n self._rect.x = paddle[0].rect.x + 45", "title": "" }, { "docid": "4d664aba6e845e0909c21b6bc17cfabc", "score": "0.66927105", "text": "def collisionBall(self,\n ball,\n ball2,\n ):\n ballNumber = ball.number\n ball2Number = ball2.number\n ballCollh = ball.ballCollh\n \n if ballCollh[ball2Number-1]:\n return # Already interacted\n \n ball2.ballCollh[ballNumber-1] = True # Record we have delt with this ball\n \n x = ball.x\n y = ball.y\n r = ball.radius # Assuming equal\n vx = ball.vx\n vy = ball.vy\n vMag = sqrt(vx*vx + vy*vy)\n \n x2 = ball2.x\n y2 = ball2.y\n r2 = ball2.radius\n v2x = ball2.vx\n v2y = ball2.vy\n v2Mag = sqrt(v2x*v2x + v2y*v2y)\n \n dx = x-x2\n dy = y-y2\n dsepsq = dx*dx + dy*dy\n sep = ball.ballSep(ball2)\n \n if (sep >= 0):\n return # No collision\n \n \n locTheta = atan2(y2-y, x2-x)\n tanTheta = locTheta + PI/2\n loc2Theta = atan2(y-y2, x-x2) # Going from ball 2\n tan2Theta = loc2Theta + PI/2\n \n if (self.trace & 0x01):\n print(\"vx,vy = (\", vx, vy, \"), v2x,v2y = (\", v2x, v2y, \")\")\n print(\"x,y = (\", x, y, \"),\", x2, y2, \" = (\", x2, y2, \"), sep =\", sep)\n # Adjustment to avoid overlap\n # Backoff fastest moving ball amount of separation\n if (sep < 0):\n msep = -sep # Absolute separation\n msep += 1e-5 # Small fudge\n if (vMag >= v2Mag):\n if (x < x2):\n ball.x -= abs(cos(locTheta)*msep)\n else:\n ball.x += abs(cos(locTheta)*msep)\n if (y < y2):\n ball.y -= abs(sin(locTheta)*msep)\n ball.y += abs(sin(locTheta)*msep)\n else:\n if (x2 < x):\n ball2.x -= abs(cos(loc2Theta)*msep)\n else:\n ball2.x += abs(cos(loc2Theta)*msep)\n if (y2 < y):\n ball2.y -= abs(sin(loc2Theta)*msep)\n else:\n ball2.y += abs(sin(loc2Theta)*msep)\n \n sep_A = ball.ballSep(ball2)\n if (self.trace & 0x01):\n print(\"After backoff: sep=\", sep_A,)\n # TBD \" x1,y1:(\", ball.x, ball.y,\n # \" x2,y2:(\", ball2.x, ball2.y\n \n if (sep_A < 0):\n print(\"We're overlapping by \", sep_A)\n print(\"cos(\", loc2Theta, cos(loc2Theta), )\n print(\"sin(\\loc2Theta):\", sin(loc2Theta))\n if (self.trace & 1):\n print(\"Collision ball.number=ball2.number\")\n # Do ball pair, updating then and keeping\n # record in each ball indicating the pair has been\n # delt with\n # Calculate line of centers\n # center to center point normalized\n # sine of center to center\n \n \n \n \n \n loc = r*2\n vTheta = atan2(vy, vx)\n locVTheta = locTheta - vTheta # Angle v to loc\n (vN, vT) = xyrot(locVTheta, vx, vy)\n if (self.trace & 0x01):\n print(\"vTheta=vTheta, locTheta=locTheta, locVTheta=locVTheta\")\n \n print(\"vN=\",vN, \"vT=\",vT)\n # Ball 2\n v2Theta = atan2(v2y, v2x)\n locV2Theta = locTheta - v2Theta # Angle v to loc(ball 1)\n \n if (self.trace & 0x01):\n print(\"loc2Theta=loc2Theta, v2Theta=v2Theta, locV2Theta=locV2Theta, v2Mag=v2Mag\")\n (v2N, v2T) = xyrot(locV2Theta, v2x, v2y)\n \n # v1AN = v2N, v2AT = v1T\n vN_A = v2N\n vT_A = vT\n v2N_A = vN\n v2T_A = v2T\n \n if (self.trace & 0x01):\n print(\"vN =vN, vT = vT, v2N = v2N, v2T = v2T\")\n print(\"vN_A=vN_A, vT_A=vT_A, v2N_A=v2N_A, v2T_A = v2T_A\")\n # Add new components\n # To get resulting \n (vx_A, vy_A) = vRsum(locTheta, vN_A, tanTheta, vT_A)\n \n (v2x_A, v2y_A) = vRsum(locTheta, v2N_A, tanTheta, v2T_A)\n \n t = {}\n if (self.trace):\n t['sep'] = sep\n t['time'] = time.time\n t['ball'] = ball # Can't do deepcopy\n t['ball2'] = ball2 # Can't do deepcopy\n t['vN'] = vN\n t['vT'] = vT\n t['v2N'] = v2N\n t['v2T'] = v2T\n t['vN_A'] = vN_A\n t['vT_A'] = vT_A\n t['v2N_A'] = v2N_A\n t['v2T_A'] = v2T_A\n ball.vx = vx_A\n ball.vy = vy_A\n ball.ballCollh[ball2Number-1] = True # Record we have\n # delt with this ball\n \n ball2.vx = v2x_A\n ball2.vy = v2y_A\n # delt with this ball\n if (self.trace):\n t['ball_A'] = ball # Can't do deepcopy\n t['ball2_A'] = ball # Can't do deepcopy\n tracecollisions.append(t)\n if (self.trace & 0x01):\n print(\"vx,vy[after] = (vx_A,vy_A), v2x,v2y = (v2x_A,v2y_A)\")\n if (self.trace & 0x01):\n print(\"Ball collision updated\")", "title": "" }, { "docid": "adce2fc2012727cc53f5034fd0682ec7", "score": "0.6682006", "text": "def paddle_a_check():\n if (-360 > ball.xcor() > -370) and (\n paddle_a.ycor() + 50 > ball.ycor() > paddle_a.ycor() - 50):\n ball.setx(-360)\n ball.dx = -ball.dx\n winsound.PlaySound(\"Sounds/ping-pong-ball-hit-paddle.wav\", winsound.SND_ASYNC)", "title": "" }, { "docid": "ff3b8914303aa34f50c9afdfa95640e5", "score": "0.6668007", "text": "def object_check(self):\n if self.ball.y <= (self.window.height - PADDLE_OFFSET): # prevention of the ball touching to the label\n if self.window.get_object_at(self.ball.x, self.ball.y) is not None:\n self.__dy = -self.__dy\n elif self.window.get_object_at(self.ball.x + BALL_RADIUS*2, self.ball.y) is not None:\n self.__dy = -self.__dy\n elif self.window.get_object_at(self.ball.x, self.ball.y + BALL_RADIUS*2) is not None:\n self.__dy = -self.__dy\n elif self.window.get_object_at(self.ball.x + BALL_RADIUS*2, self.ball.y + BALL_RADIUS*2) is not None:\n self.__dy = -self.__dy", "title": "" }, { "docid": "738df075ad98574bd74db04b96372cd1", "score": "0.6653563", "text": "def ballchase(self):\n\n # check if we are goalside\n goalside = False\n if self.team == 0:\n if self.pos.y < self.ball_pos.y:\n goalside = True\n else:\n if self.pos.y > self.ball_pos.y:\n goalside = True\n\n # choose next action based on how far away from the ball we are\n dist_to_ball = self.pos.dist(self.ball_pos)\n if dist_to_ball < self.DODGE_THRESHOLD and goalside:\n # dodge into ball\n self.action_display = \"shooting\"\n self.dodge(self.ball_pos)\n elif dist_to_ball <= self.DODGE_THRESHOLD * 2 and goalside:\n # face ball before dodging\n self.action_display = \"setting up to shoot\"\n self.aim(self.ball_pos)\n self.controller_state.throttle = 0.5\n else:\n # we are either too far away from the ball or not goalside\n boost = False\n if dist_to_ball > self.BALL_FAR_AWAY_DISTANCE:\n boost = True\n ball_angle_to_goal = math.atan2(\n self.offensive_goal.y - self.ball_pos.y,\n self.offensive_goal.x - self.ball_pos.x)\n ball_distance_to_goal = self.ball_pos.dist(self.offensive_goal)\n dist_plus = ball_distance_to_goal + (self.DODGE_THRESHOLD * 2)\n x = self.offensive_goal.x - \\\n (math.cos(ball_angle_to_goal) * dist_plus)\n y = self.offensive_goal.y - \\\n (math.sin(ball_angle_to_goal) * dist_plus)\n goalside_position = vec3(x, y, 0)\n location = self.check_for_boost_detour(goalside_position)\n if location == goalside_position:\n self.action_display = \"ballchasing\"\n else:\n self.action_display = \"boost > ball\"\n location = normalize_location(location)\n self.go_to_location(location, 0, boost)", "title": "" }, { "docid": "970dfb388de647c4a7fe588d257b9b0a", "score": "0.66514236", "text": "def updateBall(self,gamewidth,gameheight):\n \n self._ball.leftRightBounds(gamewidth)\n self._ball.topBound(gameheight)\n self._ball.stepX()\n self._ball.stepY()\n for x in self._bricks:\n if(x.collides(self._ball)):\n cling = Sound('cup1.wav')\n cling.play()\n self._ball.fillcolor = GREEN\n self._ball.negateDir();\n self._bricks.remove(x)\n if(self._paddle.collides(self._ball)):\n boing = Sound('bounce.wav')\n boing.play()\n self._ball.fillcolor = RED\n self._ball.negateDir()", "title": "" }, { "docid": "cd440964bcf66f4f84554b4154825a73", "score": "0.66356105", "text": "def collision_wall(self, ball, bat1, p1_score, sound):\n if pygame.sprite.collide_rect(ball, bat1) and ball.rect.x >= 15:\n y_acc = random.randint(-2, 2)\n xvel = ball.get_x_velocity() - random.randint(-1, 2)\n yvel = ball.get_y_velocity() - (1 + y_acc)\n if sound is not None:\n sound.bat_sound()\n if ball.rect.x >= 15:\n ball.set_position(26, ball.rect.y)\n ball.set_velocity(-xvel, yvel)\n p1_score += 1\n elif ball.rect.x > 0.75 * self.scr_width - 45:\n ball.set_position(0.75 * self.scr_width - 45, ball.rect.y)\n xvel = ball.get_x_velocity() * -1\n ball.set_velocity(xvel, ball.get_y_velocity())\n if sound is not None:\n sound.bat_sound()\n return p1_score", "title": "" }, { "docid": "2ca73da84c8a124fead610fc95079efb", "score": "0.66265655", "text": "def ballBarrier(self):\n if self.rect.right > SCREEN_WIDTH:\n self.xmove = self.xmove*-1\n if self.rect.left < 0:\n self.xmove = self.xmove*-1\n if self.rect.bottom > SCREEN_HEIGHT:\n self.ymove = self.ymove*-1\n if self.rect.top < 0:\n self.ymove = self.ymove*-1", "title": "" }, { "docid": "975bb45b20b8c7e90bfc74b800044fd5", "score": "0.6588626", "text": "def borderCheck():\n borderX = 380\n borderY = 280\n global score_a, score_b\n\n if ball.ycor() > borderY + 10:\n ball.sety(borderY + 10)\n ball.dy = 300\n ball.dy = -ball.dy\n winsound.PlaySound(\"Sounds/ping-pong-ball-hit-wall.wav\", winsound.SND_ASYNC)\n\n if ball.ycor() < -borderY:\n ball.sety(-borderY)\n ball.dy = -300\n ball.dy = -ball.dy\n winsound.PlaySound(\"Sounds/ping-pong-ball-hit-wall.wav\", winsound.SND_ASYNC)\n\n if ball.xcor() < -borderX - 10:\n ball.goto(-250, 0)\n ball.dx = -ball.dx\n score_b += 1\n win.ontimer(score_update(), 500)\n ball.dx = 250\n ball.dy = random.choice(speed)\n\n if ball.xcor() > borderX:\n ball.goto(250, 0)\n ball.dx = -ball.dx\n score_a += 1\n win.ontimer(score_update(), 500)\n ball.dx = -250\n ball.dy = random.choice(speed)", "title": "" }, { "docid": "138c0f888d00afd8a48cf55026a61172", "score": "0.65778637", "text": "def _holdBall(self):\n if self.fieldBall == None:\n print (\"ERROR: trying to hold ball without actually having ball\") \n return", "title": "" }, { "docid": "16bb454670bd1f38449277ff9934c2a5", "score": "0.65774995", "text": "def collision(self, all_balls):\n from all_functions import *\n show_table()\n all_other_except_me = [my_ball for my_ball in all_balls if (my_ball != self) & (my_ball.pocketed == 0)]\n # This is list of all balls except me, which are not pocketed :D\n\n for my_ball in all_other_except_me:\n dx = self.x - my_ball.x\n dy = self.y - my_ball.y\n tangent = atan2(dy, dx)\n dist_of_separation = hypot(dx, dy)\n\n if dist_of_separation < (self.size + my_ball.size + 2): # Collision\n if SOUNDS:\n hit.play()\n my_ball.angle = get_angle((my_ball.x, my_ball.y), self)\n dist_correction_angle = my_ball.angle - self.angle\n self.angle = 2*tangent - self.angle\n self.angle = self.angle % (2*pi)\n my_ball.angle = my_ball.angle % (2*pi)\n\n # This is bouncing of balls before hitting each other,\n # so they will not be trapped inside the\n # internal bouncing, and leanding nowhere\n # NOTE: THIS IS VERY IMPORTANT NEVER REMOVE THIS\n k = 10\n self.x += int(round(k*sin(self.angle)))\n self.y -= int(round(k*cos(self.angle)))\n self.boundary()\n\n my_ball.x += int(round(k*sin(my_ball.angle)))\n my_ball.y -= int(round(k*cos(my_ball.angle)))\n my_ball.boundary()\n # I am saving this dist_correction_angle as object parameter.\n # This has been initialized in the __init__ of Balls class\n self.correction_angle = abs(dist_correction_angle*180/pi) % 360\n\n sine_correction = sin(abs(dist_correction_angle))\n cosine_correction = cos(abs(dist_correction_angle))\n\n new_self_dist = int(self.dist * sine_correction)\n new_my_ball_dist = int(self.dist * cosine_correction)\n\n self.dist = new_self_dist\n my_ball.dist = new_my_ball_dist\n\n new_self_speed = self.speed * sine_correction\n new_my_ball_speed = self.speed * cosine_correction\n\n self.speed = new_self_speed\n my_ball.speed = new_my_ball_speed\n\n if (self.dist > 0) & (self.speed > 0):\n self.offset_speed = float(self.speed - self.default_speed) / self.dist\n\n if (my_ball.dist > 0) & (my_ball.speed > 0):\n my_ball.offset_speed = float(my_ball.speed - my_ball.default_speed) / my_ball.dist", "title": "" }, { "docid": "733f3ad7980018f2f393002b2a0ded14", "score": "0.6550568", "text": "def check_ball(self, ball):\n # if self.collide_widget(ball):\n # self.parent.score += 1\n # return True", "title": "" }, { "docid": "a4d3724a67a5d12b94d592f0e3529cc3", "score": "0.65469337", "text": "def make_ball():\n\t # --- Logic", "title": "" }, { "docid": "0f30df83bacc6de8f2cfff08cfaa8737", "score": "0.65436476", "text": "def make_ball():\n ball = Ball()\n # Starting position of the ball.\n # Take into account the ball size so we don't spawn on the edge.\n ball.x = random.randrange(BALL_SIZE, SCREEN_WIDTH - BALL_SIZE)\n ball.y = random.randrange(BALL_SIZE, SCREEN_HEIGHT - BALL_SIZE)\n \n # Speed and direction of rectangle\n ball.change_x = random.randrange(-2, 3)\n ball.change_y = random.randrange(-2, 3)\n \n return ball\n\n \n \n def collison():\n if ball.y > SCREEN_HEIGHT - BALL_SIZE or ball.y < BALL_SIZE:\n ball.change_y *= -1\n if ball.x > SCREEN_WIDTH - BALL_SIZE or ball.x < BALL_SIZE:\n ball.change_x *= -1", "title": "" }, { "docid": "da7a2c55202bae2785b7cca74ad1b8c5", "score": "0.6540864", "text": "def __on_collision_end__(self,obj):\n self.on_collision_end(obj)\n pass", "title": "" }, { "docid": "ab812f149d5e5f94598de9cb1de84d1d", "score": "0.65392005", "text": "def on_collision_end(self,obj):\n pass", "title": "" }, { "docid": "3619bed3d1e9a0cdfc43c4a589eca3fa", "score": "0.6535007", "text": "def bouncing_ball(e):\r\n # global variables that show the status of the program and the number of runs\r\n global running, num_run\r\n # return and do nothing if the user clicks when (1) a ball is still bouncing (2) bouncing ball has run 3 times\r\n if running or num_run >= 3:\r\n return\r\n # check if there is a ball at the starting position (which happens if it is not first run)\r\n check_ball = window.get_object_at(START_X+SIZE/2, START_Y+SIZE/2)\r\n if check_ball is not None:\r\n window.remove(check_ball) # remove the ball at the starting position (created in last call)\r\n # a new ball is created every time for bouncing\r\n ball = create_ball()\r\n window.add(ball)\r\n # change current status to 'running' and start the animation\r\n running = True\r\n # assign initial speed in y-direction\r\n vy = 0\r\n\r\n # animation part inside the below while loop\r\n while True:\r\n # if the ball hit the bottom horizontal line of the window, change the direction of vy\r\n if ball.y+SIZE >= window.height and vy > 0: # vy > 0 makes sure the direction of vy changes only once\r\n vy = -REDUCE*vy # REDUCE is a coefficient that simulates the effect of non-elastic collisions\r\n # if the ball hit the right vertical line of the window, end the animation\r\n if ball.x >= window.width:\r\n num_run += 1 # number of runs +1\r\n count.text = 'No. of runs: ' + str(num_run) # update the counting label\r\n running = False # change current status to 'NOT running'\r\n window.remove(ball) # remove the ball\r\n ball = create_ball() # create a new ball and put it at starting position\r\n window.add(ball)\r\n break\r\n ball.move(VX, vy+GRAVITY) # normally, the ball moves when the ball has not hit the boundaries of the window\r\n vy += GRAVITY # speed in y-direction accelerates by GRAVITY every time\r\n pause(DELAY) # slow down the while-loop to make it visible to human eyes\r", "title": "" }, { "docid": "3d7da934b37d508a84572f3ff3e136d4", "score": "0.65317893", "text": "def update(self):\n self.rect.move_ip(self.x_speed, self.y_speed)\n \n if self.rect.top < self.screen_rect.top or self.rect.bottom > self.screen_rect.bottom:\n self.rect.clamp(self.screen_rect)\n self.y_speed *= -1\n \n if self.rect.left < self.screen_rect.left:\n self.spawn_ball(1)\n if self.rect.right > self.screen_rect.right:\n self.spawn_ball(-1)", "title": "" }, { "docid": "0e2739bcbb410acbf44b10e524e89ed7", "score": "0.65032166", "text": "def roll(self): \n #calculates positions to be based on speed\n next_x_position = int(math.floor(self.x_position + self.x_speed))\n next_y_position = int(math.floor(self.y_position + self.y_speed))\n\n #checks if the ball would be out of the field.\n if(next_x_position >= self.field.width):\n next_x_position = self.field.width - 1\n if(next_y_position >= self.field.height):\n next_y_position = self.field.height - 1\n if(next_x_position < 0):\n next_x_position = 0\n if(next_y_position < 0):\n next_y_position = 0\n\n #checks if the place that the ball would move is occuped by a grass tile\n if(isinstance(self.field[next_y_position][next_x_position], Classes.Field.Grass)):\n prev_grass = self.field[next_y_position][next_x_position]\n\n self.field[next_y_position][next_x_position] = self\n self.field[self.y_position][self.x_position] = prev_grass\n \n #updates positions\n self.x_position = next_x_position\n self.y_position = next_y_position\n\n #de-accelerate the balls\n self.x_speed = max(0, self.x_speed - 1) if self.x_speed > 0 else min(0, self.x_speed + 1)\n self.y_speed = max(0, self.y_speed - 1) if self.y_speed > 0 else min(0, self.y_speed + 1)", "title": "" }, { "docid": "5961589d2e260c4d760a6000d8aa6a54", "score": "0.65016717", "text": "def bouncing_ball():\n # Detect collision with wall\n if ball.ycor() > 280 or ball.ycor() < -280:\n ball.bounce_y()\n # Detect collision with paddle\n if ball.distance(right_paddle) < 50 and ball.xcor() > 320 or ball.distance(\n left_paddle) < 50 and ball.xcor() < -320:\n ball.bounce_x()", "title": "" }, { "docid": "fd22179f387ac4493649a812bd3fa87d", "score": "0.6498927", "text": "def on_collision_start(self, obj):\n pass", "title": "" }, { "docid": "c85cc74f7b20a9007dd2bdbceea9c4f2", "score": "0.6491543", "text": "def step(self):\n # self.send('(check_ball)')\n self.removeNonHFOPlayers()\n self._teamHoldingBall, self._playerHoldingBall = self.calcBallHolder()\n if self._teamHoldingBall is not None:\n self._lastFrameBallTouched = self._frame\n if self.trialOver():\n self.updateResults()\n self._lastFrameBallTouched = self._frame\n self.reset()", "title": "" }, { "docid": "d361a7c67405fec78fa2f44e79ada3ef", "score": "0.64879787", "text": "def demo_kick_a_ball(self, time, interval):\n # Variables for ball and positioning\n diam_ball = 0.5\n\n if time - self.last >= interval:\n if not self.simulation_done:\n self.last = time\n floor_pos = self.floor.getPosition()\n ball_pos = agx.Vec3(1, 0, floor_pos[2] + diam_ball / 2 + 0.1)\n end_effector_current_pos = self.get_end_effector_current_pos()\n\n if not self.ball_shot:\n\n if not self.ball_active:\n self.ball_active, ball = self.add_ball(ball_pos, diam_ball)\n oneLegRobotApp.sim().add(ball)\n\n if not self.ready_to_simulate:\n print(\"Not ready to simulate!\")\n self.x = 0\n self.y = 0\n self.z = 0\n\n if end_effector_current_pos[0] > self.x - self.error_margin and end_effector_current_pos[\n 0] < self.x + self.error_margin:\n print(\"Init x-position reached.\")\n\n if end_effector_current_pos[2] > 4.1 and end_effector_current_pos[2] < 4.3:\n print(\"Init z-position reached.\")\n self.ready_to_simulate = True\n else:\n if not self.ready_to_shoot:\n print(\"Not ready to shoot.\")\n print(\"X: \", self.x, \"Y: \", self.y, \"Z: \", self.z)\n self.x = ball_pos[0] * 50 - 75\n self.y = ball_pos[1] * 50\n self.z = ball_pos[2] * 50 - 2 * 90\n\n if end_effector_current_pos[0] > self.x / 50 - self.error_margin and end_effector_current_pos[0] < self.x / 50 + self.error_margin:\n print(end_effector_current_pos[2])\n print(ball_pos[2] - diam_ball)\n print(ball_pos[2] + diam_ball)\n if end_effector_current_pos[2] > ball_pos[2] - diam_ball - self.error_margin and end_effector_current_pos[2] < ball_pos[2] + diam_ball + self.error_margin:\n self.ready_to_shoot = True\n print(\"Ready to shoot now!\")\n else:\n self.x = ball_pos[0] * 50 - 10\n self.speed = 3\n self.ball_shot = True\n\n else:\n if end_effector_current_pos[0] > self.x / 50 - self.error_margin and end_effector_current_pos[0] < self.x / 50 + self.error_margin:\n self.x = 0\n self.speed = 1\n\n if end_effector_current_pos[0] > self.x - self.error_margin and end_effector_current_pos[0] < self.x + self.error_margin:\n print(\"Init x-position reached.\")\n self.z = 0\n\n if end_effector_current_pos[2] > 4.1 and end_effector_current_pos[2] < 4.3:\n print(\"Init z-position reached.\")\n self.simulation_done = True", "title": "" }, { "docid": "da8b857b14c71966d8fabd88eb8983cb", "score": "0.64591396", "text": "def circle_bounce(circle, ball):\n pass", "title": "" }, { "docid": "da32d2838a3116138329c266cf0eb0e9", "score": "0.64514107", "text": "def handle_collisions(self):\n collisions = pygame.sprite.spritecollide(self.ball, self.pads, False)\n if len(collisions):\n # in the future there will be generic collision test\n # so the ball will bounce of extra walls etc\n self.ball.movement_vector[0] = -self.ball.movement_vector[0] \n self.ball.image = pygame.transform.flip(self.ball.image, 1, 0)\n ball_mid = self.ball.rect.midright[1]\n pad = collisions[0]\n pad_mid = pad.rect.midright[1]\n angle_speed = (pad_mid - ball_mid)/10\n self.ball.movement_vector[1] -= angle_speed", "title": "" }, { "docid": "390bb571d71a1382b894b2b6ce0a9e7e", "score": "0.6448613", "text": "def check_ball_hits_wall(self):\n for ball in self.balls:\n if ball.x > self.WIDTH or ball.x < 0:\n sys.exit(1)\n\n if ball.y > self.HEIGHT - self.BALL_WIDTH or ball.y < 0:\n ball.angle = -ball.angle", "title": "" }, { "docid": "aa4b00f796b104acc8832910a0ca6570", "score": "0.64484155", "text": "def collision_paddle(self, paddle):\n r_left = pg.mouse.get_pos()[0] - paddle.width\n r_right = pg.mouse.get_pos()[0] + paddle.width\n r_top = paddle.coords.y\n\n # Doing nothing if ball is further away from the top left of the rectangle than the sum of its radius and\n # the width of the rectangle. Reduces computation times.\n if r_top - self.radius - paddle.buffer - 30 > self.coords.y:\n return\n\n elif r_top - self.radius - paddle.buffer - 5 <= self.coords.y:\n if r_left <= self.coords.x <= r_right:\n self.velocity.y = -self.velocity.y\n return\n else:\n pg.time.wait(1)\n self.velocity = Vector(0, 0)\n self.coords = self.initial_coords\n self.restarts += 1\n self.moving = False\n return\n\n return", "title": "" }, { "docid": "a3a2f134012465fe2cb2565842fef075", "score": "0.6428308", "text": "def ball_restart(self):\n self.__dx = 0\n self.__dy = 0", "title": "" }, { "docid": "935f5f37fb35e8a901fdb9618ed29c37", "score": "0.64279497", "text": "def update(self):\n self.rect.y += self.speedy\n ## kill the sprite after it moves over the top border\n if self.rect.top > Commons.HEIGHT:\n self.kill()", "title": "" }, { "docid": "48515b1b303ffacbbfb6c0c26559acdc", "score": "0.6409898", "text": "def collision(self, brickGroup):\n \n #Creates list of bricks that were collided with\n bricksHit = pygame.sprite.spritecollide(self.ball, brickGroup, False)\n \n #If any bricks were collided with, change direction of ball and remove brick from \n #window\n if bricksHit:\n \n self.brickSound.play()\n \n for brick in bricksHit:\n \n #6 is needed to account for the ball moving slightly inside the brick\n #Change y-direction\n if (brick.rect.left < (self.ball.rect.right -6)< brick.rect.right) or (brick.rect.left < (self.ball.rect.left + 6) < brick.rect.right):\n self.ball.speed[1] *= -1\n brick.kill()\n \n #3 is needed to account for the ball moving slightly inside the brick\n #Change x-direction\n elif (brick.rect.top < (self.ball.rect.top - 3)< brick.rect.bottom) or (brick.rect.top < (self.ball.rect.bottom + 3) < brick.rect.bottom):\n self.ball.speed[0] *= -1\n brick.kill()\n \n #Creates list containing paddle if paddle is collided with \n paddleHit = pygame.sprite.spritecollide(self.ball, self.paddleGroup, False)\n \n #If the paddle is collided with, change the direction of the ball\n if paddleHit:\n \n self.paddleSound.play()\n\n \n #6 is needed to account for the ball moving slightly inside the paddle\n #Change y-direction\n if ((paddleHit[0].rect.left < (self.ball.rect.right-6)< paddleHit[0].rect.right) or (paddleHit[0].rect.left < (self.ball.rect.left+6) < paddleHit[0].rect.right)) and self.ball.speed[1] >= 0 and self.ball.rect.bottom-3 < self.paddle.rect.top:\n self.ball.speed[1] *= -1\n \n #Check where ball hits the paddle\n xChanged = False\n for i in range(6):\n if self.ball.rect.centerx < self.paddle.rect.left + self.paddle.sec * (i+1) and not xChanged:\n #Change x-direction according to location of collision\n self.ball.speed[0] += i - 3\n xChanged = True\n \n if not xChanged:\n self.ball.speed[0] += 3\n xChanged = True\n\n \n #Set maximum speed\n if self.ball.speed[0] > 6:\n self.ball.speed[0] = 6\n \n #Set minimum speed\n elif self.ball.speed[0] < -6:\n self.ball.speed[0] = -6\n \n #3 is needed to account for the ball moving slightly inside the paddle\n #Change x-direction\n elif (paddleHit[0].rect.top < (self.ball.rect.top - 3)< paddleHit[0].rect.bottom) or (paddleHit[0].rect.top < (self.ball.rect.bottom + 3) < paddleHit[0].rect.bottom):\n self.ball.speed[0] *= -1", "title": "" }, { "docid": "ef1920f68f0d6de4fc1cc94d94068461", "score": "0.6409801", "text": "def __on_collision__(self, obj):\n self.on_collision(obj)\n pass", "title": "" }, { "docid": "3e6f7f330512639b91f1f3d577db4d08", "score": "0.6405823", "text": "def check_ball_hits_paddle(self):\n for ball in self.balls:\n for paddle in self.paddles:\n if ball.colliderect(paddle):\n ball.velocity = -ball.velocity\n ball.angle = random.randint(-10, 10)\n break", "title": "" }, { "docid": "8192cf9982ca6b0492b845666341b2cc", "score": "0.6404141", "text": "def move_ball():\n # determine ball position\n ball_vel[0], ball_vel[1] = max(ball_vel[0],ball_min_vel), max(ball_vel[1],ball_min_vel)\n ball_vel[0], ball_vel[1] = min(ball_vel[0],ball_max_vel), min(ball_vel[1],ball_max_vel)\n ball_pos[0] += ball_vel[0] * ball_dir[0]\n ball_pos[1] += ball_vel[1] * ball_dir[1]\n # check for intersection with paddle\n isect = ball_intersect_paddle()\n # check for intersection with screen edge (top/bot first)\n if not isect:\n ball_intersect_screen()\n # check for scoring position\n if ((ball_pos[0] / ball_vel_scale) > (canvas_width - 1)):\n score('player')\n elif ball_pos[0] <= 0:\n score('computer')\n return", "title": "" }, { "docid": "967d27a6100c3cb1b2ff64416b779ce2", "score": "0.64000756", "text": "def collision(self):\n\n jay = self.root.ids.jay\n is_colliding = False\n for pipe in self.amount_pipes:\n if pipe.collide_widget(jay):\n is_colliding = True\n if jay.y < (pipe.pipe_center - pipe.pipe_gap / 2.0):\n self.game_over()\n if jay.top > (pipe.pipe_center + pipe.pipe_gap / 2.0):\n self.game_over()\n if jay.y < 112:\n self.game_over()\n if jay.top > Window.height:\n self.game_over()\n\n if self.was_colliding and not is_colliding:\n self.root.ids.player_score.text = str(int(self.root.ids.player_score.text) + 1)\n\n self.was_colliding = is_colliding", "title": "" }, { "docid": "553a3cad68175bc3b137cf8233a7aaf5", "score": "0.6396666", "text": "def __on_collision_start__(self,obj):\n self.on_collision_start(obj)\n pass", "title": "" }, { "docid": "098cc060b05420de850d0883cc41fdae", "score": "0.6393968", "text": "def on_collision(self, obj):\n pass", "title": "" }, { "docid": "1da3e3c29f95a754e8c8f4bdc9eb3c68", "score": "0.6388546", "text": "def update(self):\n\n # Move the ball\n self.position_y += self.change_y\n self.position_x += self.change_x\n\n # See if the ball hit the edge of the screen. If so, change direction\n if self.position_x < self.radius:\n self.change_x *= -1\n\n if self.position_x > SCREEN_WIDTH - self.radius:\n self.change_x *= -1\n\n if self.position_y < self.radius:\n self.change_y *= -1\n\n if self.position_y > SCREEN_HEIGHT - self.radius:\n self.change_y *= -1", "title": "" }, { "docid": "bd8c0c448af1e9e951ff86f9e7bea4cc", "score": "0.6386796", "text": "def bounce1(self):\n if self.ball[0].ycor() >= 380:\n self.ball[0].forward(0)\n self.ball[0].right(self.ball[0].heading() * 2)", "title": "" }, { "docid": "c1daa7234fc39e9de2e2a95849f13a21", "score": "0.6384125", "text": "def game_over(self):\n raise NotImplementedError(\"Please override this method\")", "title": "" }, { "docid": "c9bdeeb176cacf2441a96b4e720d5947", "score": "0.6383261", "text": "def run(self):\n ball = memory.world_objects.getObjPtr(core.WO_BALL)\n\n if ball.seen:\n ballX = ball.imageCenterX\n ballY = ball.imageCenterY\n \n pan = core.joint_values[core.HeadYaw]\n if abs(ballX - self.midX) > self.thresh:\n if ballX > self.midX:\n pan -= self.delta_pan\n else:\n pan += self.delta_pan\n \n pan = self.clip(pan, -75.0*core.DEG_T_RAD, 75.0*core.DEG_T_RAD) \n if abs(ball.visionBearing) > self.ball_b_thresh:\n omega = self.sgn(ball.visionBearing) * self.omega\n else:\n omega = 0.0\n\n commands.setHeadPan(pan, target_time=self.duration)\n commands.setWalkVelocity(0.0, 0.0, omega)\n\n if abs(ballX - self.midX) <= self.thresh and abs(ball.visionBearing) <= self.ball_b_thresh:\n print('===> LookAtBall: looking at ball! Finishing')\n self.finish()\n self.last_seen = self.getTime()\n else:\n if self.getTime() - self.last_seen > 2.0:\n commands.setWalkVelocity(0.0, 0.0, 0.0)\n self.postFailure()", "title": "" }, { "docid": "56d42ec32f26c24300bb871d17fa3591", "score": "0.6370684", "text": "def MoveBall():\n pass", "title": "" }, { "docid": "9a98141111c24a421cdf36686a449980", "score": "0.6360561", "text": "def move(self, board, ball):\n if self.racket.rect.top < ball.rect.y - 50:\n self.racket.rect.top += self.speed\n if self.racket.rect.bottom > ball.rect.y + 50:\n self.racket.rect.bottom -= self.speed\n if self.racket.rect.top <= 0:\n self.racket.rect.top = 0\n if self.racket.rect.bottom >= board.height:\n self.racket.rect.bottom = board.height", "title": "" }, { "docid": "880872aed5e799253a27e4e6d13d01b2", "score": "0.6356767", "text": "def bounce_paddle(self):\n self.x_move *= -1\n # slightly increase ball speed with each successful save\n self.move_speed *= MOVE_INCREASE", "title": "" }, { "docid": "bcbfe372cab417560d4f7a440b2d596e", "score": "0.6348128", "text": "def update(self):\n self.rect.y += self.speedy\n ## kill the sprite after it moves over the top border\n if self.rect.top > HEIGHT:\n self.kill()", "title": "" }, { "docid": "bcbfe372cab417560d4f7a440b2d596e", "score": "0.6348128", "text": "def update(self):\n self.rect.y += self.speedy\n ## kill the sprite after it moves over the top border\n if self.rect.top > HEIGHT:\n self.kill()", "title": "" }, { "docid": "bcbfe372cab417560d4f7a440b2d596e", "score": "0.6348128", "text": "def update(self):\n self.rect.y += self.speedy\n ## kill the sprite after it moves over the top border\n if self.rect.top > HEIGHT:\n self.kill()", "title": "" }, { "docid": "4d54aacdc89550a75f66e4a3a1646583", "score": "0.6341871", "text": "def update(self, delta_time):\n self.ball.update()", "title": "" }, { "docid": "9ba97eca933b5beb86ccdddb490b2432", "score": "0.6323229", "text": "def main():\n graphics = BreakoutGraphics()\n brick_num = 0\n lives_used = 0\n dy2 = 0\n # Add animation loop here!\n while True:\n graphics.set_ball_velocity()\n dx = graphics.get_dx()\n dy = graphics.get_dy()\n lock = graphics.lock\n pause(1)\n while lock:\n if brick_num > graphics.bricks/2:\n graphics.ball.move(dx, dy2)\n else:\n graphics.ball.move(dx, dy)\n pause(FRAME_RATE)\n # ball moves in the window\n if graphics.ball.x <= 0 or graphics.ball.x >= graphics.window.width-graphics.ball.width:\n dx = -dx\n if graphics.ball.y <= 0 or graphics.ball.y >= graphics.window.height-graphics.ball.height:\n dy = -dy\n dy2 = -dy2\n # ball bounces when touches bricks and paddle; and remove the bricks\n obj = graphics.check()\n if obj is graphics.paddle:\n if dy > 0 or dy2 > 0:\n dy = -dy\n dy2 = -dy2\n elif obj is graphics.l1 or obj is graphics.l2 or obj is graphics.l3:\n pass\n elif obj:\n graphics.window.remove(obj)\n dy = -dy\n dy2 = -dy2\n brick_num += 1\n graphics.score_label.text = f'scores: {brick_num}'\n # win the game\n if brick_num == graphics.bricks:\n break\n if brick_num > graphics.bricks/2:\n dy2 = dy*1.5\n # ball moves out of the window, try again\n if graphics.ball.y+graphics.ball.height >= graphics.window.height:\n graphics.set_ball()\n graphics.lock = False\n lives_used += 1\n break\n # you win\n if brick_num == graphics.bricks:\n # graphics.set_ball()\n graphics.window.remove(graphics.ball)\n graphics.win()\n break\n # game over\n if lives_used == NUM_LIVES:\n graphics.window.remove(graphics.ball)\n graphics.game_over()\n break", "title": "" }, { "docid": "0b7cda8168bd46b9049e6d968f638400", "score": "0.6320346", "text": "def tilted_ball_end(self):\n self.game_tilted = False\n if(not self.b_slam_tilted):\n self.notifyModes('evt_tilt_ball_ending', args=None, event_complete_fn=self.end_ball)\n else:\n self.notifyModes('evt_tilt_ball_ending', args=None, event_complete_fn=self.slam_tilt_complete)", "title": "" }, { "docid": "ba66b1a5e1a0af5bba50d18b4fc88f84", "score": "0.6295893", "text": "def bounce2(self):\n if self.ball[0].ycor() <= -380:\n self.ball[0].forward(0)\n self.ball[0].left((180 - (self.ball[0].heading() / 2)) * 4)", "title": "" }, { "docid": "72142001035cd36af95b011a1b786296", "score": "0.6288525", "text": "def reset_ball(self):\n self.ball.x = (self.window.width - self.ball.width) / 2\n self.ball.y = (self.window.height-self.ball.height)/2\n self.set_ball_velocity()\n self.window.add(self.ball)", "title": "" }, { "docid": "ce2777a7593c6c123d7e33ef0d42064e", "score": "0.6282805", "text": "def shoot_again(self):\n self.log(\"Skel: SHOOT AGAIN\")\n self.notifyModes('evt_shoot_again', args=None, event_complete_fn=self.ball_starting, only_active_modes=False)", "title": "" }, { "docid": "6e504cb4f4a0babc68702d2f1750ae15", "score": "0.62814546", "text": "def check_bounce(self):\n if self.ball.center.x < 0 and self.ball.velocity.dx < 0:\n self.ball.bounce_horizontal()\n\n if self.ball.center.y < 0 and self.ball.velocity.dy < 0:\n self.ball.bounce_vertical()\n\n if self.ball.center.y > SCREEN_HEIGHT and self.ball.velocity.dy > 0:\n self.ball.bounce_vertical()", "title": "" }, { "docid": "83fa164884469ecb5eb9ab731db6e641", "score": "0.62771213", "text": "def update(self, dt):\n self.ball.move()\n\n # bounce of paddles\n self.player1.paddle.bounce_ball(self.ball)\n self.player2.paddle.bounce_ball(self.ball)\n\n # bounce off top and bottom\n if (self.ball.y < 0) or (self.ball.top > self.height):\n self.ball.velocity_y *= -1\n\n # bounce off left and right\n if (self.ball.x < 0) or (self.ball.right > self.width):\n self.ball.velocity_x *= -1\n\n # went of to a side to score point?\n if (\n self.player1.gate.check_ball(self.ball) or\n self.player2.gate.check_ball(self.ball)\n ):\n self.serve_ball()", "title": "" }, { "docid": "8b579af2c8a48b9df73da0f90ef900dd", "score": "0.62751836", "text": "def _handle_ball_constraints(self, ball):\n position = ball.get_position()\n x = position.get_x()\n y = position.get_y()\n if x == 0 or x == constants.MAX_X:\n ball.set_velocity(ball.get_velocity().reverse_x())\n if y == 0:\n ball.set_velocity(ball.get_velocity().reverse_y())\n if y == constants.MAX_Y:\n sys.exit()", "title": "" }, { "docid": "a3c0f633f8fe0fcf31a420ed8c793f8c", "score": "0.627461", "text": "def crossing(self):\n if self.rect.top < 0:\n self.rect.top = 0\n self.speed_y = -self.speed_y\n self.can_damage = True\n if self.rect.bottom > system.WIN_HEIGHT:\n self.rect.bottom = system.WIN_HEIGHT\n self.speed_y = -self.speed_y\n self.can_damage = True\n if self.rect.right < 0:\n self.rect.right = 0\n self.speed_x = -self.speed_x\n self.can_damage = True\n if self.rect.left > system.WIN_WIDTH:\n self.rect.left = system.WIN_WIDTH\n self.speed_x = -self.speed_x\n self.can_damage = True", "title": "" }, { "docid": "07cbcbd64fa07ffa0606fb2127821e92", "score": "0.62731504", "text": "def update(self):\n self.rect.y += self.speedy\n if self.rect.bottom < 0:\n self.kill()", "title": "" }, { "docid": "07cbcbd64fa07ffa0606fb2127821e92", "score": "0.62731504", "text": "def update(self):\n self.rect.y += self.speedy\n if self.rect.bottom < 0:\n self.kill()", "title": "" }, { "docid": "abf1bd6e145439aa4aa5988631949188", "score": "0.6256542", "text": "def bounce_wall(self):\n self.y_move *= -1", "title": "" }, { "docid": "7f2338dffaa94c2e865eb1d9904df1f4", "score": "0.6254501", "text": "def _resolve_ball_physics(self, ball):\n assert isinstance(ball, Ball)\n\n bx, by = ball.position\n vx, vy = self.index_to_velocity[ball.velocity_index]\n orig_vx, orig_vy = vx, vy\n\n # Address the corner case mentioned in the docstring: if the ball\n # starts in the paddle, ignore everything and make it travel in a\n # straight line.\n if self.paddle.contains_position((bx, by)):\n self.debugprint_line('ball inside paddle')\n ball.position = vx + bx, vy + by\n return\n\n # Step A: Update ball position.\n #######################################################################\n vx_after_paddle_bounce = \\\n self.get_ball_vx_after_paddle_bounce(bx, by, vx, vy)\n\n intangible = [obj for obj in self.walls\n if isinstance(obj, WallOfPunishment)] + self.balls\n occupied_positions = self.occupied_by(exclude=intangible)\n\n self.hit_objects |= self.get_collision_elements((bx + vx, by + vy))\n\n # [Emptiness]\n if (bx + vx, by + vy) not in occupied_positions and \\\n vx_after_paddle_bounce is None:\n\n self.debugprint_line('ball physics', 0, vx_after_paddle_bounce)\n\n # Easiest case! Destination is empty. Notice that this assumes\n # there are no walls that are 1 pixel thick. If that was the case,\n # this suddenly becomes a nightmare.\n ball.position = vx + bx, vy + by\n # Velocity remains the same.\n\n # [Bounce, paddle]\n elif vx_after_paddle_bounce is not None:\n self.debugprint_line('ball physics', 1, vx_after_paddle_bounce)\n\n vx = vx_after_paddle_bounce\n vy *= -1\n\n # Don't let the ball slow down by a paddle bounce!\n if abs(vy) < self.ball_movement_radius and abs(vx) <= 1:\n vy = self.ball_movement_radius\n\n ball.velocity_index = self.velocity_to_index[(vx, vy)]\n ball.position = vx + bx, vy + by\n\n if (vx + bx, vy + by) in occupied_positions:\n vx *= -1\n ball.velocity_index = self.velocity_to_index[(vx, vy)]\n ball.position = vx + bx, vy + by\n\n # [Bounce, brick or wall]\n elif (bx + vx, by) in occupied_positions:\n self.debugprint_line('ball physics', 2, vx_after_paddle_bounce)\n\n vx *= -1\n ball.velocity_index = self.velocity_to_index[(vx, vy)]\n ball.velocity_index = self.randomize_velocity(ball.velocity_index)\n vx, vy = self.index_to_velocity[ball.velocity_index]\n ball.position = vx + bx, vy + by\n\n # [Bounce, brick or wall]\n elif (bx, by + vy) in occupied_positions:\n self.debugprint_line('ball physics', 3, vx_after_paddle_bounce)\n\n vy *= -1\n ball.velocity_index = self.velocity_to_index[(vx, vy)]\n ball.velocity_index = self.randomize_velocity(ball.velocity_index)\n vx, vy = self.index_to_velocity[ball.velocity_index]\n ball.position = vx + bx, vy + by\n\n else:\n self.debugprint_line('ball physics', 4, vx_after_paddle_bounce)\n\n vx, vy = -vx, -vy\n ball.velocity_index = self.velocity_to_index[(vx, vy)]\n ball.position = bx + vx, by + vy\n\n # Step B: Check where we landed, manage any higher-order collisions\n #######################################################################\n vx, vy = self.index_to_velocity[ball.velocity_index]\n if tuple(ball.position) in occupied_positions:\n self.debugprint_line('higher-order collision')\n\n # -----------------------------------------------------------------\n # Here we flag this as an indirect collision effect as some objects\n # (like bricks) only trigger effects, e.g., yield rewards, after a\n # direct collision. This exception is in order to avoid multiple\n # brick destructions in one timestep, which would mess with\n # learning when the destroyed bricks yield opposite sign rewards.\n # -----------------------------------------------------------------\n self.hit_objects |= self.get_collision_elements(ball.position,\n is_indirect=True)\n\n vx, vy = -orig_vx, -orig_vy\n ball.position = bx + vx, by + vy\n ball.velocity_index = self.velocity_to_index[(vx, vy)]", "title": "" }, { "docid": "d02c85735a2bb9b023949b1a6f802608", "score": "0.625311", "text": "def check_hit(self):\n for ball in self.overlapping_sprites:\n self.score.value += 10\n self.score.right = games.screen.width - 10\n ball.handle_hit()", "title": "" }, { "docid": "6443da240aba4f4a284e377835d62224", "score": "0.62422913", "text": "def draw_ball(self):\n pygame.draw.rect(self.screen, self.color, self.rect)", "title": "" }, { "docid": "25f11172a74aa15df86347d60de56ea5", "score": "0.62415546", "text": "def collide_Ball(self, spriteGroup):\n if pygame.sprite.spritecollide(self, spriteGroup, False):\n self.vy = -self.vy", "title": "" }, { "docid": "ad99bce6b22f52aeebc3d8f3d3b3486a", "score": "0.6237298", "text": "def ball_hit_paddle(self):\n obj_tl = self.window.get_object_at(self.ball.x, self.ball.y)\n obj_tr = self.window.get_object_at(self.ball.x + self.ball.width, self.ball.y)\n obj_bl = self.window.get_object_at(self.ball.x, self.ball.y + self.ball.height)\n obj_br = self.window.get_object_at(self.ball.x + self.ball.width, self.ball.y + self.ball.height)\n is_hit_paddle = (obj_tl is self.paddle or obj_tr is self.paddle\n or obj_bl is self.paddle or obj_br is self.paddle)\n return is_hit_paddle", "title": "" }, { "docid": "8a063e1bf99f557defa8017608a92733", "score": "0.62319815", "text": "def execute(self):\n this=self.owner\n # NOTE: Players could be over-lapping and close to ball, making who ends up with it random(ish).\n # BUT, if collision between players have been resolved first, maybe this is okay?\n # NOTE: Add check of handling skill (or somesuch) to possibly fumble pickup?\n\n for p in this.pitch.players.values():\n dist = (p.pos - this.pos).mag2()\n if dist < p.size**2:\n this.state = BallHeld(this,p)", "title": "" }, { "docid": "31b0a0386b4877cc5caef741669d30fc", "score": "0.62225455", "text": "def reflect_ball(self):\n ball = self.ball\n\n if (ball.direction.x > 0 and ball.right > self.frame.right) or \\\n (ball.direction.x < 0 and ball.x < self.frame.left):\n ball.direction.x = -ball.direction.x\n\n if ball.direction.y < 0 and ball.y < self.frame.top + 0.1:\n ball.direction.y = -ball.direction.y", "title": "" }, { "docid": "31b0a0386b4877cc5caef741669d30fc", "score": "0.62225455", "text": "def reflect_ball(self):\n ball = self.ball\n\n if (ball.direction.x > 0 and ball.right > self.frame.right) or \\\n (ball.direction.x < 0 and ball.x < self.frame.left):\n ball.direction.x = -ball.direction.x\n\n if ball.direction.y < 0 and ball.y < self.frame.top + 0.1:\n ball.direction.y = -ball.direction.y", "title": "" }, { "docid": "86f2d442a166fed2a7c07d07ea904a84", "score": "0.6222311", "text": "def bounceBall(self):\n \n if self.getVy()>=0:\n if self.top>=GAME_HEIGHT:\n self._vy=-1*self._vy\n \n if self.getVx()>=0:\n if self.left>=GAME_WIDTH:\n self._vx=-1*self._vx\n else:\n if self.right<=0:\n self._vx=-1*self._vx", "title": "" }, { "docid": "033980294af6ce674da4c6eddb3ad940", "score": "0.6219676", "text": "def on_collide(self, obj=None):\n if not self.dead:\n # temp = obj.on_collide()\n # if self.owner is not None:\n # self.owner.score += temp\n _ = obj\n self.dead = True", "title": "" }, { "docid": "8410ca68bcedf3c488aeed75d1d36156", "score": "0.6216519", "text": "def collision_begin(space, arbiter, *args, **kwargs):\n ball_shape = arbiter.shapes[0]\n ball_shape.ball.freeze()\n return True", "title": "" }, { "docid": "7a777ddf4a2b179c2c93e9f04b1daad6", "score": "0.6210555", "text": "def movement(self):\r\n self.x += self.speed_x\r\n self.y += self.speed_y\r\n \r\n #wall col\r\n if self.y <= 0:\r\n # bounce off ceiling\r\n self.speed_y *= -1\r\n elif self.y >= self.graphic.WINDOWHEIGHT - self.size:\r\n # bounce off floor (compensates for ball size so it stays on screen)\r\n self.speed_y *= -1\r\n\r\n if self.x <= 0:\r\n # resets ball if hits dragon wall, makes sure it doesn't touch the dragon or dragon loses health\r\n self.x, self.y = self.dragon.size + 1, self.dragon.y + random.randint(150, 200)\r\n self.speed_x *= -1\r\n\r\n elif self.x >= self.graphic.WINDOWWIDTH - self.size:\r\n # resets ball if hits village, makes sure it doesn't touch the dragon or dragon loses health\r\n self.village.updateHealth(self.dragon.attackOther(\"fire\", self.village.health))\r\n self.x, self.y = self.dragon.size + 1, self.dragon.y + random.randint(150, self.dragon.size) \r\n \r\n if self.rect.colliderect(self.player.playerRect):\r\n self.graphic.fireballSound.play()\r\n # deflects ball if hits player\r\n if \"shield\" in self.player.invPlayer:\r\n # must bounce ball back player width or it gets stuck on player. Reverse speed so it returns.\r\n self.x -= (self.player.playerWidth + self.size + self.speed_x)\r\n self.speed_x *= -1\r\n self.speed_y *= -1\r\n else:\r\n # resets ball if hits player, makes sure it doesn't touch the dragon or dragon loses health\r\n self.player.updateHealth(self.dragon.attackOther(\"fire\", self.player.health))\r\n self.x, self.y = self.dragon.size + 1, self.dragon.y + random.randint(150, self.dragon.size)\r\n self.speed_x += 1\r\n self.speed_x *= -1\r\n self.speed_y *= -1\r\n\r\n if self.rect.colliderect(self.dragon.dragonRect):\r\n self.graphic.fireballSound.play()\r\n # resets ball when it hits the dragon \r\n self.dragon.updateHealth(self.player.attackOther(\"fire\", self.dragon.health))\r\n self.x, self.y = self.dragon.size, self.dragon.y + random.randint(150, self.dragon.size)\r\n self.speed_x *= -1\r\n self.speed_y *= -1\r\n self.speed_x += 1", "title": "" }, { "docid": "93ea58528d84e62fdd37d41839b9cfd5", "score": "0.62065727", "text": "def update(self):\n # Update the decimal position of the ball.\n self.x += self.settings.ball_speed\n # Update the rect position\n self.rect.x = self.x", "title": "" }, { "docid": "2f0159a5cf672ab1468f6475e2a60c6f", "score": "0.6197469", "text": "def main() -> None:\r\n # Annotate and initialize variables\r\n WIDTH: int = 640\r\n HEIGHT: int = 480\r\n screen: pygame.Surface\r\n background: pygame.Surface\r\n wrapping_ball: WrappingBall\r\n ball_group: pygame.sprite.Group = pygame.sprite.Group()\r\n user_quit: bool = False\r\n e: pygame.event.Event\r\n caption: str = \"Add a window caption here\"\r\n x: int = 0\r\n y: int = 200\r\n dx: int = 5\r\n i: int\r\n\r\n # Set up assets.\r\n screen = make_window(WIDTH, HEIGHT, caption)\r\n background = pygame.Surface((WIDTH, HEIGHT))\r\n background.fill((255, 255, 255))\r\n screen.blit(background, (0, 0))\r\n ball_image: pygame.Surface = pygame.image.load(\"ball.gif\").convert()\r\n for i in range(8):\r\n wrapping_ball = WrappingBall(ball_image,\r\n random.randint(0, WIDTH),\r\n random.randint(0, HEIGHT))\r\n ball_group.add(wrapping_ball)\r\n\r\n clock: pygame.time.Clock = pygame.time.Clock()\r\n\r\n # Process events until the user chooses to quit.\r\n while not user_quit:\r\n # Loop 30 times per second\r\n clock.tick(30)\r\n for e in pygame.event.get():\r\n # Process a quit choice.\r\n if e.type == pygame.QUIT:\r\n user_quit = True\r\n elif e.type == pygame.MOUSEMOTION:\r\n pass\r\n elif e.type == pygame.MOUSEBUTTONDOWN:\r\n pass\r\n elif e.type == pygame.MOUSEBUTTONUP:\r\n pass\r\n elif e.type == pygame.KEYDOWN:\r\n pass\r\n elif e.type == pygame.KEYUP:\r\n pass\r\n elif e.type == pygame.ACTIVEEVENT:\r\n pass\r\n\r\n \r\n # Update and draw.\r\n ball_group.clear(screen, background)\r\n ball_group.update(screen)\r\n ball_group.draw(screen)\r\n pygame.display.flip()\r\n pygame.quit()", "title": "" }, { "docid": "1bd5c84689d95592b4798d0126e66baf", "score": "0.61930984", "text": "def _update_balls(self):\n self._ticks_to_next_ball -= 1\n if self._ticks_to_next_ball <= 0:\n self._create_ball()\n self._ticks_to_next_ball = 100\n # Remove balls that fall below 100 vertically\n balls_to_remove = [ball for ball in self._balls if ball.body.position.y < 100]\n for ball in balls_to_remove:\n self._space.remove(ball, ball.body)\n self._balls.remove(ball)", "title": "" }, { "docid": "228328bbf806d7c6755171ea0074c9fa", "score": "0.61855316", "text": "def resetBall(self):\n self._ball = Ball(GEllipse())", "title": "" }, { "docid": "a154f9083c4c2630dc1efe71f7d72472", "score": "0.61816347", "text": "def draw_ball(self):\n\t\tpygame.draw.circle(self.screen, self.ball_config[3],\n\t\t (self.ball.left, self.ball.top), self.ball_config[0])", "title": "" }, { "docid": "0989cf17b4cb50ea2e1abe8328623471", "score": "0.6180225", "text": "def moveBall(self, sound):\n self._ball.changePosition()\n s = self._ball.checkWall()\n if s == 'end':\n self._playerlives -= 1\n x = self._getCollidingObject()\n if x == self._paddle:\n self._hitPaddle(sound)\n elif len(x) != 0:\n self._ball.negate_vy()\n for i in x:\n self._updateScore()\n self._wall.removeBrick(i)\n if sound == True: \n piano = Sound('Subsynth-modfilter.ogg')\n piano.play()\n if self._wall.getBricks() == []:\n self._playerlives = 0", "title": "" }, { "docid": "5e4d8ddf08bd28fc780e23cdf08d38b8", "score": "0.6180004", "text": "def on_draw(self):\n arcade.start_render()\n self.ball.draw()", "title": "" }, { "docid": "9622056dfba74e09cc74cef6616209ec", "score": "0.6169122", "text": "def over(self, px, py):\n if self.collide(px, py):\n if self.click_time == 0:\n self.is_over = True\n else:\n if self.click_time == 0:\n self.is_over = False\n return self.is_over", "title": "" }, { "docid": "1c80e3f9ddce911274b5b2e2e71beba0", "score": "0.61671346", "text": "def game_over(self):\n return", "title": "" }, { "docid": "febbb3aa642879d05d0ea57c4c4228cc", "score": "0.6164663", "text": "def collide(self, ball1, ball2):\n normal = ball2.pos - ball1.pos\n normal = normal / np.sqrt(np.sum(normal**2))\n if self.ndim == 1:\n tangent = np.array([0])\n else:\n tangent = np.copy(normal[::-1])\n tangent[0] = -tangent[0]\n\n vel1old = np.copy(ball1.vel)\n vel2old = np.copy(ball2.vel)\n\n ball1_vel_temp = normal * (normal.dot(ball2.vel)) + \\\n tangent * (tangent.dot(ball1.vel))\n ball2.vel = normal * (normal.dot(ball1.vel)) + \\\n tangent * (tangent.dot(ball2.vel))\n ball1.vel = ball1_vel_temp\n\n # assert momentum and energy conserved\n if not np.all((ball1.vel + ball2.vel - (vel1old + vel2old)) < 1e-5):\n warnings.warn(f'Violation of conservation of momentum. '\n 'Try running with smaller dt to avoid, '\n 'or with fewer balls of larger radii',\n category=RuntimeWarning)\n # if not np.sum(ball1.vel**2 + ball2.vel**2) - \\\n # np.sum(vel1old**2 + vel2old**2) < 1e-5:\n # warnings.warn(f'Violation of conservation of energy. '\n # 'Try running with smaller dt to avoid, '\n # 'or with fewer balls of larger radii',\n # category=RuntimeWarning)", "title": "" }, { "docid": "835d557857686f2bf26b6650a73e4cd0", "score": "0.6161421", "text": "def gameloop(self):\n # Initiate clock object\n clock = pygame.time.Clock()\n\n # Initialize pygame\n pygame.init()\n\n # set the pygame window name \n pygame.display.set_caption('Breakout') \n\n # FONTS\n pygame.font.init() # module for adding text\n win_text = pygame.font.SysFont('Comic Sans MS', 30)\n\n # Initiate the objects used in the game\n Game_ball = Ball()\n Player = Platform()\n # Spawns bricks on top of screen\n Bricks.initiate_bricks(AMOUNT_OF_BRICKS)\n\n # Set start value to playing\n Game = 'Playing'\n\n # Make timer and frames for FPS\n time1 = time.clock()\n frames = 0\n lastframe = 0\n\n # Start the playing game loop\n while Game == 'Playing':\n self.Event()\n\n # Limit to 60FPS this is to contain the speed of the ball on all computers (unless their FPS will stagnate below 60)\n TIME_PASSED = clock.tick(60)\n # Time in seconds\n TIME_PASSED_SECONDS = TIME_PASSED/1000.0\n\n Game_ball.move(TIME_PASSED_SECONDS)\n Player.move(TIME_PASSED_SECONDS)\n Game_ball.check_platform_collision(Player)\n Lost_check = Game_ball.check_wall_collisions()\n Bricks.update_bricks(Game_ball)\n\n \n # Update screen\n SCREEN.fill(BLACK)\n # Draw objects to screen\n for bricks in Bricks.bricks_list:\n bricks.draw()\n\n # FPS logic\n time1, frames, lastframe = self.fps_check(time1, frames, lastframe)\n Game_ball.draw()\n Player.draw()\n frames_text = win_text.render('FPS: %s'%lastframe, False, GRAY)\n SCREEN.blit(frames_text,(50,50))\n pygame.display.update()\n\n # Check if won on lost game.\n if len(Bricks.bricks_list) == 0:\n Game = 'Won'\n\n if Lost_check:\n Game = 'Lost'\n \n # Print text on screen if won\n while Game == 'Won':\n self.Event()\n won_text = win_text.render('You won!', False, GRAY)\n SCREEN.blit(won_text,(600,700))\n pygame.display.update()\n \n # Print text on screen if lost\n while Game == 'Lost':\n self.Event()\n lost_text = win_text.render('You Lost!', False, GRAY)\n SCREEN.blit(lost_text,(600,700))\n pygame.display.update()", "title": "" }, { "docid": "723b240993cf77afe193739dac9b3b31", "score": "0.6159168", "text": "def kick(self,ball,mag):\n print(\"Ball kicked\")\n ball.xd = self.xd + mag*m.cos(self.theta)\n ball.yd = self.yd + mag*m.sin(self.theta)\n ball.speed = m.sqrt(ball.xd*ball.xd +ball.yd*ball.yd)\n self.dribble = 0\n kc = (self.x-ball.x)/(self.r+ball.r)\n ks = (self.y-ball.y)/(self.r+ball.r)\n if kc>0:\n ball.x = self.x-(self.r+ball.r)*kc-2\n elif kc < 0:\n ball.x = self.x-(self.r+ball.r)*kc+2\n if ks > 0:\n ball.y = self.y-(self.r+ball.r)*ks+2\n elif ks < 0:\n ball.y = self.y-(self.r+self.r)*ks-2", "title": "" }, { "docid": "b9a4e4e5d34b864eea1d45d9531e61c0", "score": "0.61435264", "text": "def _shoot(self):\n #print 'SHOOT BULLET'\n if self.side == 'top':\n self.yposc += self.speed\n if self.side == 'bottom':\n self.yposc -= self.speed\n soundPellet.play()", "title": "" }, { "docid": "03710effdf0406422898727eb8576835", "score": "0.613754", "text": "def __init__(self, winWidth, winHeight):\n \n #Calls the Sprite init function \n super().__init__()\n \n #Set sound for the wall\n self.wallSound = pygame.mixer.Sound(\"switch36.wav\")\n \n #Set window width and height\n self.winWidth = winWidth\n self.winHeight = winHeight\n \n #Set ball's speed and direction\n self.speed = [0, 0]\n \n #Import ball image\n self.image = pygame.image.load(\"ball.png\").convert()\n \n #Set all white pixels in self.image to transparent\n self.image.set_colorkey((0,0,0))\n \n #Set ball initial position\n self.rect = self.image.get_rect() \n self.rect.centerx = self.rect.width // 2\n self.rect.centery = self.winHeight // 2", "title": "" }, { "docid": "8008bc2f34aa1494431b785888d7a26a", "score": "0.61183393", "text": "def _start_ball(self):\n\t\t# Set the ball's center to a random position on the screen. x goes from 0 to the screen width, y goes from 0 to 1/3 screen height\n\t\tself.x, self.y = (randint(0, self.settings.screen_width), randint(0, self.settings.screen_width / 3))\n\t\tself.rect.center = (self.x, self.y)", "title": "" } ]
a53f33dc7e4342805ba6d0e3d6122bd4
Create the config and optionally a htdigest file
[ { "docid": "bbb350e8c0d33e081ef6e2150de58605", "score": "0.70987415", "text": "def trac_config_create(adminuser, adminpass, realm, digestfile, trac_ini):\n config_str = ''\n config_str += BASE_CONFIG\n \n htdigest_create(digestfile, adminuser, realm, adminpass)\n # adding appropriate configuration to use the digestfile with the\n # account manager plugin\n config_str += ACCOUNTMGRSTR % {'htdigest':digestfile,\n 'realm':realm}\n with open(trac_ini, 'a') as inifile:\n inifile.write(config_str)\n return adminpass", "title": "" } ]
[ { "docid": "a74ae68dbc7a73070c813da713290bcf", "score": "0.67188954", "text": "def generate_config(config):\n config['HASHBENCH'] = {'location_john': './john/run/john',\n 'location_hashcat': './hashcat/hashcat'}\n with open(\"hashbench.conf\", \"w\") as configuration_file:\n config.write(configuration_file)\n\n print(\"No configuration file was found, generated one. For more info see doc/config.md\")", "title": "" }, { "docid": "4b2947d5d32a1f860c70b250712e1578", "score": "0.64631593", "text": "def createConfigFile():\n configFile = os.path.join(os.path.expanduser('~'), '.wakatime.cfg')\n try:\n with open(configFile) as fh:\n pass\n except IOError:\n try:\n with open(configFile, 'w') as fh:\n fh.write(\"[settings]\\n\")\n fh.write(\"debug = false\\n\")\n fh.write(\"hidefilenames = false\\n\")\n except IOError:\n pass", "title": "" }, { "docid": "df78e771769193a24aeb817df8d3cdb3", "score": "0.6417892", "text": "def _create_configure_file(ctx):\n import pipes\n\n configure = sys.argv[2:]\n if not ctx.options.secret_key:\n configure.append(\"--secret-key=%s\" % ctx.env.SECRET_KEY)\n\n configure = (['%s' % ' '.join(sys.argv[:2])] +\n [pipes.quote(c) for c in configure])\n\n f = open('configure', 'w')\n f.write('#!/bin/sh\\n')\n f.write(' \\\\\\n '.join(configure) + '\\n')\n f.close()\n\n os.chmod('configure', 0700)", "title": "" }, { "docid": "c87affc7c2e1af22ac1c696ebc220fde", "score": "0.63475615", "text": "def _createConfig(self):\n with open(\"grab.conf\", \"w\") as cfgfile:\n cfg = ConfigParser.ConfigParser()\n # start adding the parameters and its values\n cfg.add_section(\"Paths\")\n cfg.set(\"Paths\", \"savePath\", \"ebooks/\")\n cfg.set(\"Paths\", \"visitedUrls\", \"visited.txt\")\n\n cfg.add_section(\"Connection\")\n cfg.set(\"Connection\", \"sleep\", \"60\")\n cfg.set(\"Connection\", \"userAgent\", \"Mozilla/5.0 (X11; Ubuntu; Linux x86_64; rv:26.0) Gecko/20100101 Firefox/26.0\")\n\n # write and close the file\n cfg.write(cfgfile)\n return", "title": "" }, { "docid": "b7bc86bb01edb829a2a2abdfa9348131", "score": "0.633114", "text": "def create(args):\n setting_file = args.setting\n output_file = args.output\n Configer.create_from_file(Path(setting_file), Path(output_file))", "title": "" }, { "docid": "aeb43259d5e0d22764587661442b8752", "score": "0.61817783", "text": "def htdigest_create(filename, user, realm, password, path=''):\n\n user_realm = ':'.join((user, realm))\n digest = md5(':'.join((user_realm, password))).hexdigest()\n data = ':'.join((user_realm, digest)) + '\\n'\n\n filepath = os.path.join(path, filename)\n temp, tempfilepath = mkstemp()\n with open(tempfilepath,'w') as tempdigestfile:\n if os.path.exists(filepath):\n with open(filepath) as origdigestfile:\n for line in origdigestfile:\n if line.strip().startswith(user_realm + ':'):\n tempdigestfile.write('#' + line)\n else:\n tempdigestfile.write(line)\n tempdigestfile.write(data)\n os.close(temp)\n if os.path.exists(filepath):\n os.remove(filepath)\n shutil.move(tempfilepath, filepath)", "title": "" }, { "docid": "1bcae83a5ceda9789e10bb754528feab", "score": "0.61424434", "text": "def generate_config_file(): \n\n config = configparser.ConfigParser()\n\n # General config details\n config['general'] = {\n \"root\" : CURRENT_LOCATION,\n \"drive_status\" : False,\n \"populated\" : False\n }\n\n # User config details\n config['user'] = {\n \"folder_name\" : CURRENT_LOCATION,\n \"folder_id\" : \"\"\n }\n\n\n path = os.path.join(os.getcwd(), '.sink', 'config', 'config.ini')\n with open(path, \"w\") as configfile:\n config.write(configfile)", "title": "" }, { "docid": "04cf2a9ba7cf591e82d9ad79af8b8814", "score": "0.6114724", "text": "def create_configuration_file(parsers, args):\n logger.info(\"Writing configuration file %s\", args.create_configuration_file)\n import datetime\n executables = bob.extension.find_executable(os.path.basename(sys.argv[0]),\n prefixes=[os.path.dirname(sys.argv[0]), 'bin'])\n if not executables:\n executables = [sys.argv[0]]\n\n parser = parsers['main']\n\n bob.io.base.create_directories_safe(os.path.dirname(args.create_configuration_file))\n\n required = \"# Configuration file automatically generated at %s for %s.\\n\\n\" % (\n datetime.date.today(), executables[0])\n required += \"##################################################\\n\" \\\n \"############### REQUIRED ARGUMENTS ###############\\n\" \\\n \"##################################################\\n\\n\"\n required += \"# These arguments need to be set.\\n\\n\\n\"\n common = \"##################################################\\n\" \\\n \"################ COMMON ARGUMENTS ################\\n\" \\\n \"##################################################\\n\\n\"\n common += \"# These arguments are commonly changed.\\n\\n\\n\"\n optional = \"##################################################\\n\" \\\n \"############### OPTIONAL ARGUMENTS ###############\\n\" \\\n \"##################################################\\n\\n\"\n optional += \"# Files and directories might commonly be specified with absolute paths or \" \\\n \"relative to the temp_directory.\\n# Change these options, e.g., to reuse parts \" \\\n \"of other experiments.\\n\\n\\n\"\n rare = \"##################################################\\n\" \\\n \"############ RARELY CHANGED ARGUMENTS ############\\n\" \\\n \"##################################################\\n\\n\\n\"\n\n with open(args.create_configuration_file, 'w') as f:\n\n for action in parser._actions[3:]:\n if action.help == \"==SUPPRESS==\":\n continue\n\n tmp = \"# %s\\n\\n\" % action.help\n if action.nargs is None and action.type is None and action.default is not None:\n tmp += \"#%s = '%s'\\n\\n\\n\" % (action.dest, action.default)\n else:\n tmp += \"#%s = %s\\n\\n\\n\" % (action.dest, action.default)\n\n if action.dest in _required_list:\n required += tmp\n elif action.dest in _common_list:\n common += tmp\n elif action.dest in _optional_list:\n optional += tmp\n else:\n rare += tmp\n\n f.write(required)\n f.write(common)\n f.write(optional)\n f.write(rare)\n\n parser.exit(1, \"Configuration file '%s' was written; exiting\\n\" % args.create_configuration_file)", "title": "" }, { "docid": "7de91ca71a1de535abed84257e91e56d", "score": "0.6112091", "text": "def createConfig(path):\n config = configparser.ConfigParser()\n parser = create_parser()\n args = parser.parse_args(sys.argv[1:])\n\n config.add_section(\"TG\")\n config[\"TG\"][\"token\"] = args.token\n\n config.add_section(\"Mongo\")\n config[\"Mongo\"][\"db\"] = args.database\n\n with open(path, \"w\") as config_file:\n config.write(config_file)", "title": "" }, { "docid": "5096c2c47a9541a6a7fe4c90c9f6eed5", "score": "0.6060391", "text": "def _create_agent_config(node: CephNode, config: Dict) -> None:\n node.exec_command(sudo=True, cmd=\"mkdir -p /usr/local/etc/vault/\")\n\n _write_remote_file(\n node=node,\n file_name=\"/usr/local/etc/vault/.app-role-id\",\n content=config[\"agent\"][\"role-id\"],\n )\n _write_remote_file(\n node=node,\n file_name=\"/usr/local/etc/vault/.app-secret-id\",\n content=config[\"agent\"][\"secret-id\"],\n )\n # hcl file\n agent_conf = {\"url\": config[\"url\"], \"auth\": config[\"agent\"][\"auth\"]}\n tmpl = Template(AGENT_HCL)\n data = tmpl.render(data=agent_conf)\n _write_remote_file(\n node=node,\n file_name=\"/usr/local/etc/vault/agent.hcl\",\n content=data,\n )", "title": "" }, { "docid": "00b6e477796bc229bc53bb47cd4efcce", "score": "0.6038291", "text": "def make_minimum_configuration():\n\tSECRET_KEY = ''.join('%02x' % ord(x) for x in os.urandom(16))\n\twith open(\"config.py\", \"w\") as configfile:\n\t\tconfigfile.write(\"SECRET_KEY='%s'\\n\"%SECRET_KEY)", "title": "" }, { "docid": "c0de652537e29cb175f34a7b76059150", "score": "0.60380405", "text": "def new_config_file():\n if os.path.exists(config_filename):\n os.remove(config_filename)\n\n config = {\n # rF2 items\n '# %ProgramFiles(x86)% will be expanded to your Windows setting but you can write it explicitly if you want': \"\",\n '# Same for %LOCALAPPDATA%': \"\",\n '# Use / not backslash': \"\",\n 'rF2root': '%ProgramFiles(x86)%/Steam/steamapps/common/rFactor 2',\n 'SteamExe': \"%ProgramFiles(x86)%/Steam/steam.exe\",\n 'SteamDelaySeconds': 10,\n '#SteamDelaySeconds: How long it takes Steam to start up before we can start rF2': \"\",\n # 'DiscordExe' : '\"%APPDATA%/Microsoft/Windows/Start Menu/Programs/Discord Inc/Discord.lnk\"',\n # '#DiscordExe: had to use short cut as the command wouldn\\'t work' : '',\n 'DiscordExe': '%LOCALAPPDATA%/Discord/Update.exe',\n 'DiscordArgs': '--processStart Discord.exe',\n 'CrewChiefExe': \"%ProgramFiles(x86)%/Britton IT Ltd/CrewChiefV4/CrewChiefV4.exe\",\n 'CrewChiefArgs': 'RF2_64BIT',\n 'VolumeControlExe': \"%ProgramFiles(x86)%/VolumeControl/VolumeControl.exe\",\n 'TeamSpeakExe': \"%ProgramFiles(x86)%/TeamSpeak 3 Client/ts3client_win64.exe\",\n '#MyPreCommand: use this call a program or batch file before rF2 runs': \"\",\n 'MyPreCommand': '',\n 'MyPreCommandArgs': '',\n '#MyPostCommand: use this call a program or batch file after rF2 runs': \"\",\n 'MyPostCommand': '',\n 'MyPostCommandArgs': '',\n 'UserData player': 'player'\n }\n\n # -- Add auto detected items\n config.update(auto_detect_apps())\n\n _text = json.dumps(config, sort_keys=True, indent=4)\n writeFile(config_filename, _text)\n return config", "title": "" }, { "docid": "38bf19fab42aef4e888e8eff6d91354a", "score": "0.60374755", "text": "def create_config():\n for filename in ['settings.yaml', 'logging.yaml']:\n src = pkg_resources.resource_stream('dominator.utils', filename)\n dstpath = os.path.join(utils.settings.dirpath, filename)\n if os.path.exists(dstpath):\n if not click.confirm(\"File {} exists. Are you sure you want to overwrite it?\".format(dstpath)):\n continue\n getlogger().debug(\"writing config to {}\".format(dstpath))\n with open(dstpath, 'w+') as dst:\n dst.write(src.read())", "title": "" }, { "docid": "67f60ca7a235be68387ca12c0a00bb12", "score": "0.59808147", "text": "def _setupConfigFile(self, configParser):\n system = configParser.addSection('system')\n system.exePath = '../exe/exe'\n system.exeDir = '../exe'\n system.webDir = '../exe/webui'\n system.port = 8081\n tmpDir = Path('tmp')\n if not tmpDir.exists(): tmpDir.mkdir()\n dataDir = tmpDir/'data'\n if not dataDir.exists():\n dataDir.mkdir()\n system.dataDir = dataDir\n system.browserPath = 'not really used in tests so far'\n logging = configParser.addSection('logging')\n logging.root = 'DEBUG'", "title": "" }, { "docid": "1e392da9b7a9e8c6038ad0fbb3b69165", "score": "0.5919076", "text": "def _set_config(fpath, kwargs):\n try:\n with open(os.path.join(fpath, \".tccfg\"), \"w\") as f:\n if \"tpam_host\" in kwargs:\n f.write(\"%s=%s\\n\" % (\"tpam_host\", kwargs[\"tpam_host\"]))\n if \"tpam_user\" in kwargs:\n f.write(\"%s=%s\\n\" % (\"tpam_user\", kwargs[\"tpam_user\"]))\n if \"tpam_key\" in kwargs:\n kwargs[\"tpam_key\"] = expand_user_path(kwargs[\"tpam_key\"])\n f.write(\"%s=%s\\n\" % (\"tpam_key\", os.path.normpath(kwargs[\"tpam_key\"])))\n except IOError as e:\n print(\"Could not write .tccfg file: %s\" % e)", "title": "" }, { "docid": "36ee2705a7b0bc4a7bd3df0a6f979e38", "score": "0.5892735", "text": "def write_settings():\n try:\n with open('%s/%s' % (credential_path,credential_file), 'w') as f:\n config.write(f)\n except IOError:\n print('ERROR: unable to write to %s/%s' % (credential_path,credential_file))\n sys.exit(1)", "title": "" }, { "docid": "818d6ebf6a7dff4e352b85877f58a9ee", "score": "0.5839846", "text": "def make_wn_cfg(self):\n\n # write each line to config\n self._logger.info('Creating file {}'.format(self.wn_cfg_file))\n with open(self.wn_cfg_file, 'w') as f:\n for k, v in self.config.items():\n f.write('{} = {}\\n'.format(k, v))", "title": "" }, { "docid": "8ab328fbb4768b18454df3500f14d4ee", "score": "0.5829965", "text": "def generate_initial_config():\n\n if get_config_file().is_file() is False:\n config_path = get_config_file()\n with open(config_path, \"w\", encoding=\"utf-8\") as config_file:\n json.dump(DEFAULT_CONFIG, config_file, indent=4)", "title": "" }, { "docid": "ab7d231ec370916032d12122b84678a0", "score": "0.58137786", "text": "def createDevFiles(properties):\n #generate the config file for nginx\n createNGINXConf(properties)\n\n #generate the config file for apache\n createApacheConf(properties)\n \n #generate the run_app script\n createRunAppScript(properties)", "title": "" }, { "docid": "02882a303b96767d758327bd085dcce1", "score": "0.58039546", "text": "def build_config():\n config['BASIC'] = {'input': '',\n 'output': '',\n 'chapterlabel': 'chapter',\n 'perseriesconfig': ''}\n config['ADVANCED'] = {'splitchar': ',',\n 'regex': ''}\n config['FEATURES'] = {'preprocess': 'True',\n 'decorations': 'True',\n 'panels': 'True',\n 'nonlatin': 'True',\n 'speakers': 'True',\n 'tildes': 'True',\n 'ellipses': 'True',\n 'blanklines': 'True',\n 'inputisoutput': 'False'}\n with open(configfile, 'w') as fout:\n config.write(fout)", "title": "" }, { "docid": "5f67becb09737df74ea37de7b05e959c", "score": "0.58006686", "text": "def config_man(args):\n\n file_check()\n f_cfg, cfg_tmp = mkstemp()\n\n with open(cfg_tmp, 'w') as cfg:\n cfg.write(\n \"database=\"+args.db+'\\n'\n +\"db_os=\"+args.db_os+'\\n'\n +\"rel=\"+args.rel+'\\n'\n +\"icat_os=\"+args.cat_os+'\\n'\n +\"ires_os=\"+args.res_os+'\\n'\n +\"box_mode=\"+args.box+'\\n'\n +\"ires_nodes=\"+args.nodes+'\\n'\n )\n credentials = 0\n\n #if a redhat box is needed\n if ('redhat7' in args.db_os) or ('redhat7' in args.cat_os):\n if os.path.isfile('./settings/settings.cfg'):\n with open('./settings/settings.cfg', 'r') as settings:\n for line in settings:\n if 'username' in line or 'password' in line:\n cfg.write(line)\n credentials = credentials + 1\n if credentials != 2:\n cfg.write(\n \"rh_username=\"+raw_input('Enter your RedHat username: ')+'\\n'\n +\"rh_password=\"+getpass.getpass()+'\\n'\n )\n\n shutil.move(cfg_tmp, './settings/settings.cfg')\n\n\n\n os.close(f_cfg)", "title": "" }, { "docid": "11cb71e26dec113c83a414c059df55fc", "score": "0.5791446", "text": "def createConfiguration():\n writeConfigurationScript(domainName,configurationName,listenerPort,serverName,originServer,scriptFile)\n printFlag('Begin Creating Configuration')\n execWLSTCommand(scriptFile)\n printFlag('Done Creating Configuration')", "title": "" }, { "docid": "8c1f2c7bde3dd130002d131dd9e45fe6", "score": "0.5790035", "text": "def init():\n\n if os.path.exists(CONFIG_FILENAME):\n raise click.FileError(CONFIG_FILENAME, \"I won't overwrite an existing file\")\n\n click.echo(f\"generating new secret key into {CONFIG_FILENAME}\")\n sk = NUMS.random()\n write_config({\"sk\": sk, \"claims\": {}})", "title": "" }, { "docid": "ce876714808d39dcd1dc0a2778bb17f4", "score": "0.57492286", "text": "def generate_example_config():\n generate_example_config_file()", "title": "" }, { "docid": "eaa23c4e2a2a370704c5146b65cb5bf5", "score": "0.57367414", "text": "def generate_config(self):\n if self.config_available():\n raise FileExistsAlready(\"File {0} already exists.\"\n .format(self.filename))\n else:\n content = \"\"\"\\\n# email the results should be send to\nemail = {email}\n\n# category in which should be searched (space separated list)\ncategory = {category}\n\n# keywords in the title (space separated list)\ntitle = {title}\n\n# authors that should be searched (space separated list)\nauthors = {author}\n\n# keywords in abstract that should be searched (space separated list)\nabstract = {abstract}\n\n# whether to suppress empty emails or not (True/False)\nsuppress = {suppress}\n\n# the amount of days the you want your results to go back\nlastNDays = {lastNDays}\n\"\"\"\n email = raw_input(\"What is your email address? \"\n \"(Has to be from uni-mainz.de) \")\n category = raw_input(\"Which categories should be searched? \"\n \"(cond-mat, cond-mat:soft) \")\n title = raw_input(\"Which keywords should be in the title? \")\n author = raw_input(\"Who are the authors? \")\n abstract = raw_input(\"Which keywords should be in the abstract? \")\n suppress = raw_input(\"Should empty email be suppressed? \"\n \"(True/False) \")\n lastNDays = raw_input(\"How many days from today should we search \"\n \"back? \")\n\n with open(self.filename, 'w') as conf:\n conf.write(content.format(email=email, category=category,\n title=title, author=author,\n abstract=abstract, suppress=suppress,\n lastNDays=lastNDays))", "title": "" }, { "docid": "ad7a056358b0b4ad71b9412b965d9d57", "score": "0.5702699", "text": "def _prepare_config(separate, resources, flavor_ref,\n git_command, zip_patch,\n directory, image_ref, architecture, use_arestor):\n\n conf = six.moves.configparser.SafeConfigParser()\n conf.add_section(\"argus\")\n conf.add_section(\"openstack\")\n\n conf.set(\"argus\", \"output_directory\", os.path.join(directory, \"output\"))\n conf.set(\"argus\", \"argus_log_file\", os.path.join(directory, \"argus.log\"))\n conf.set(\"argus\", \"git_command\", str(git_command))\n conf.set(\"argus\", \"patch_install\", str(zip_patch))\n conf.set(\"argus\", \"log_each_scenario\", str(separate))\n conf.set(\"argus\", \"arch\", str(architecture))\n conf.set(\"argus\", \"use_arestor\", str(use_arestor))\n conf.set(\"openstack\", \"image_ref\", str(image_ref))\n\n if resources:\n conf.set(\"argus\", \"resources\", str(resources))\n\n if flavor_ref:\n conf.set(\"openstack\", \"flavor_ref\", str(flavor_ref))\n\n config_path = os.path.join(directory, \"argus.conf\")\n with open(config_path, 'w') as file_handle:\n conf.write(file_handle)\n\n return config_path", "title": "" }, { "docid": "efd4faa87c61c837caea3135529f2418", "score": "0.5682569", "text": "def save_config(self):\n if not self.parser:\n self.parser = ConfigParser.SafeConfigParser()\n\n #try:\n #fp = open('pyh3c.conf', 'r+')\n #except IOError:\n #fp = open('pyh3c.conf', 'w')\n\n if not self.parser.has_section('sys_conf'):\n self.parser.add_section('sys_conf')\n if not self.parser.has_section('account'):\n self.parser.add_section('account')\n\n self.parser.set('sys_conf', 'dev', self.dev)\n self.parser.set('sys_conf', 'dhcp_command', self.dhcp_command)\n self.parser.set('sys_conf', 'ping_target', self.ping_target)\n self.parser.set('sys_conf', 'ping_interval', str(self.ping_interval))\n self.parser.set('sys_conf', 'ping_tolerence', str(self.ping_tolerence))\n self.parser.set('sys_conf', 'ping_after_reauth', str(self.ping_after_reauth))\n self.parser.set('account', 'user_name', self.user_name)\n self.parser.set('account', 'user_pass', self.user_pass)\n \n #ConfigParser module will delete all comments, here is a dirty hack\n #@TODO@: fix the ConfigParser module, or use cfgparse module\n try:\n os.unlink('pyh3c.conf')\n except OSError:\n pass\n fp = open('pyh3c.conf', 'w')\n self.parser.write(fp)\n fp = fp.close()\n return", "title": "" }, { "docid": "f5248dbf724c1b034a2747b749fbc015", "score": "0.5678581", "text": "def test_digest_file(tmp_path):\n with tempconfig({}):\n tmp_cfg = tempfile.NamedTemporaryFile(\"w\", dir=tmp_path, delete=False)\n tmp_cfg.write(\n \"\"\"\n [CLI]\n media_dir = this_is_my_favorite_path\n video_dir = {media_dir}/videos\n sections_dir = {media_dir}/{scene_name}/prepare_for_unforeseen_consequences\n frame_height = 10\n \"\"\",\n )\n tmp_cfg.close()\n config.digest_file(tmp_cfg.name)\n\n assert config.get_dir(\"media_dir\") == Path(\"this_is_my_favorite_path\")\n assert config.get_dir(\"video_dir\") == Path(\"this_is_my_favorite_path/videos\")\n assert config.get_dir(\"sections_dir\", scene_name=\"test\") == Path(\n \"this_is_my_favorite_path/test/prepare_for_unforeseen_consequences\"\n )", "title": "" }, { "docid": "54e09766abafaea09a5e222b7415e620", "score": "0.56763697", "text": "def build_config(user, assignment, archive):\n assert assignment in config.config.sections(), (\n 'No such assigment `%s\\'.' % assignment)\n\n # the upload time is the system's current time\n upload_time = time.strftime(config.DATE_FORMAT)\n\n # repository's path\n repository = vmcheckerpaths.repository\n\n # creates a temporary directory to store homework\n location = join(repository, assignment)\n try:\n os.makedirs(location)\n _logger.info(\"created `%s'\", location)\n except OSError, e:\n if e.errno != errno.EEXIST: raise\n _logger.info(\"`%s' already exists\", location)\n\n with _Locker(assignment):\n location = join(location, user)\n _logger.info(\"Storing student's files at `%s'\", location)\n\n try:\n shutil.rmtree(location)\n _logger.info(\"Removed old `%s'\", location)\n except OSError, e:\n if e.errno != errno.ENOENT: raise\n _logger.info(\"Ignoring missing `%s'\", location)\n\n os.makedirs(location)\n\n # brings necessary files\n check_call(['unzip', archive, '-d',\n os.path.join(location, 'archive')])\n\n # writes assignment configuration file\n assignment_config = join(location, 'config')\n\n with open(assignment_config, 'w') as handle:\n handle.write('[Assignment]\\n')\n handle.write('User=%s\\n' % user)\n handle.write('Assignment=%s\\n' % assignment)\n handle.write('UploadTime=%s\\n' % upload_time)\n # these should go to `callback'\n handle.write('ResultsDest=%s\\n' % join(location, 'results'))\n handle.write('RemoteUsername=%s\\n' % getpass.getuser())\n handle.write('RemoteHostname=%s\\n' % 'cs.pub.ro')\n\n _logger.info('stored homework files. overwriting old homework')\n\n # commits all new files from 'location' that are not ignored by .gitignore\n _call_git(repository, 'add', '--all', location)\n _call_git(repository, 'commit', '--allow-empty', location, '-m',\n \"Updated `%s''s submition for `%s'.\" % (user, assignment))\n\n # XXX should create a clean zip from repository\n shutil.copy(archive, join(location, 'archive.zip'))\n return assignment_config", "title": "" }, { "docid": "1cfba7dfbb478d860c1b43324aa72827", "score": "0.5673882", "text": "def make_user_config():\n user_config_path = prefect.config.get(\"user_config_path\")\n if not user_config_path:\n raise ValueError(\"No user config path set!\")\n elif os.path.isfile(user_config_path):\n raise ValueError(\"A file already exists at {}\".format(user_config_path))\n\n os.makedirs(os.path.dirname(user_config_path), exist_ok=True)\n with open(user_config_path, \"w\") as user_config:\n user_config.write(\n \"# This is a user configuration file.\\n\"\n \"# Settings placed here will overwrite Prefect's defaults.\"\n )\n\n click.secho(\"Config created at {}\".format(user_config_path), fg=\"green\")", "title": "" }, { "docid": "4f24ebe7ac0b8a169f6ff3bd7b3cdaea", "score": "0.56690925", "text": "def generate(config, path):\n f = open(path, 'w+')\n f.write('''####################################\n# Evergreen configuration\n#\n# Generated with evergreen_config_generator from\n# github.com/mongodb-labs/drivers-evergreen-tools\n#\n# DO NOT EDIT THIS FILE\n#\n####################################\n''')\n f.write(yaml_dump(config))", "title": "" }, { "docid": "0d7031f96c8e20bb6ced26d5d6d2ac41", "score": "0.5646327", "text": "def main():\n\n parser = OptionParser()\n parser.add_option('-f', '--digestfile', dest='digestfile',\n help='htdigest filename')\n parser.add_option('-r', '--realm', dest='realm',\n help='authentication realm')\n parser.add_option('-u', '--user', dest='user',\n help='user name')\n parser.add_option('-p', '--password', dest='password',\n help='password for USER')\n\n (opts, args) = parser.parse_args()\n\n if not opts.digestfile:\n input_file = raw_input('Enter the file [%s]: ' % DEFAULT_FILE)\n opts.digestfile = input_file if input_file else DEFAULT_FILE\n path, filename = os.path.split(opts.digestfile)\n\n if not opts.user:\n input_user = raw_input('Enter the user [%s]: ' % DEFAULT_USER)\n opts.user = input_user if input_user else DEFAULT_USER\n\n if not opts.password:\n attempts = 3\n for attempt in range(attempts):\n if attempt > 0:\n print \"Passwords empty or did not match. Please try again\",\n print \"(attempt %d/%d)\"\"\" % (attempt+1, attempts)\n password1 = getpass('Enter a new password for \"%s\": ' % opts.user)\n password2 = getpass('Please reenter the password: ')\n if password1 and password1 == password2:\n opts.password = password1\n break\n if not opts.password:\n print \"Passwords did not match. Quitting.\"\n sys.exit(1)\n\n if not opts.realm:\n input_realm = raw_input('Enter the auth realm [%s]: ' % DEFAULT_REALM)\n opts.realm = input_realm if input_realm else DEFAULT_REALM\n\n htdigest_create(filename, opts.user, opts.realm, opts.password, path)", "title": "" }, { "docid": "9d75bcef5602562c8cb6b1e4b8071549", "score": "0.56388336", "text": "def configfile_create(self, sfilename=None):\n if sfilename is None:\n sfilename = self.configfile\n else:\n self.configfile = sfilename\n \n if self.configfile is None:\n return False\n else:\n if self.name is None:\n smsg = \"\"\n else:\n smsg = \" for \" + self.name\n fconfig = open(self.configfile, 'w')\n fconfig.write(\"# configuration\" + smsg + \"\\n\")\n fconfig.close()\n\n return True", "title": "" }, { "docid": "d9e952d36b6b88eae66f3a5ca8f5cecc", "score": "0.56281424", "text": "def create_config(self):\n raise NotImplementedError()", "title": "" }, { "docid": "1c9570b59e2bd41e575cf799450fc70a", "score": "0.56199384", "text": "def createConfig(self, path):\n config = configparser.ConfigParser()\n config.add_section(\"Settings\")\n config.set(\"Settings\", \"DB_NAME\", \"db_001.db\")\n config.set(\"Settings\", \"table_name\", \"T_Question_Answer\")\n # change it !!!\n config.set(\"Settings\", \"token\", \"461661232:AAExDNSsp3zQfL3oAovRhi3TVQKZWEJr7aI\")\n \n config.set(\"Settings\", \"test2\", \"You are using %(font)s at %(font_size)s pt\")\n \n with open(path, \"w\") as config_file:\n config.write(config_file)", "title": "" }, { "docid": "8d2958317f441fe72162014f78bcba06", "score": "0.5604351", "text": "def ensure_config_file(self, kind, template, openvpn_service, defaults=None):\n config_str = self._gen_config_content(template, openvpn_service, defaults=defaults)\n config_file_name = self._get_config_filename(kind)\n LOG.info(_(\"openvpn config file:%s\" % config_str))\n utils.replace_file(config_file_name, unicode(config_str), mode=0o600)", "title": "" }, { "docid": "1fa22948e606935b2b2d4d2c2fb06687", "score": "0.56013197", "text": "def create_init_file():\n _create_yml_file()", "title": "" }, { "docid": "790d8aed7fe2f560751712b19f4063a0", "score": "0.55845845", "text": "def generateConfigFile(cop_density, agent_density, vision, legitimacy, max_jail_term, repeat_number):\n # Convert parameters to string.\n cop_density_str = str(cop_density)\n agent_density_str = str(agent_density)\n vision_str = str(vision)\n\n # Times legitimacy with 100 and use int() to avoid period (.) in file name.\n legitimacy_str_in_file_name = (str(int(legitimacy * 100)))\n legitimacy_str = str(legitimacy)\n max_jail_term_str = str(max_jail_term)\n\n # Define fle names.\n parameters_list = [cop_density_str, agent_density_str, vision_str,\n legitimacy_str_in_file_name, max_jail_term_str,\n str(repeat_number)]\n file_name_pattern = \"_\".join(parameters_list)\n file_name = \"config\" + file_name_pattern + \".properties\"\n output_file_name = \"out\" + file_name_pattern\n\n # Create the file.\n with open(file_name, \"w+\") as f:\n # Write the configuration data.\n f.write(make_config_line(AGENT_DENSITY_LABEL,\n agent_density_str))\n f.write(make_config_line(COP_DENSITY_LABEL, cop_density_str))\n f.write(make_config_line(VISION_LABEL, vision_str))\n f.write(make_config_line(\n GOVERNMENT_LEGITIMACY_LABEL, legitimacy_str))\n f.write(make_config_line(MAX_JAIL_TERM_LABEL, max_jail_term_str))\n f.write(make_config_line(MOVEMENT_SWITCH_LABEL,\n MOVEMENT_SWITCH))\n f.write(make_config_line(ITERATION_TIMES_LABEL,\n ITERATION_TIMES))\n f.write(make_config_line(OUTPUT_FILE_NAME_LABEL,\n output_file_name))\n\n f.close()\n\n return file_name", "title": "" }, { "docid": "5241738812d21e62ee7d41e24a815074", "score": "0.5574989", "text": "def generate_config(start, stop, file_name, tag, daily):\n\n\n repo = get_repo_obj()\n\n name = tag\n image = LATEST_IMAGE\n command = [\"python\", ALPHA_COMMAND, \"--file\", file_name]\n if daily:\n command.append('--daily')\n\n task_gen = {\n \"type\": 'month', \\\n \"params\": {\"start_date\": start.strftime('%Y-%m'), \\\n \"end_date\":stop.strftime('%Y-%m')},\n \"requests\": {\"cpu\":\"1\", \"memory\":\"500Mi\"}\n }\n\n json_command = dict(name=tag, image=image, command=command, taskgen=task_gen)\n \n ### always put in root directory to keep clean \n fh = file(repo.working_dir + '/' + OUTNAME, 'w')\n fh.write(json.dumps(json_command, sort_keys=True, indent=4))\n fh.close()\n\n repo_fname = get_file_dirs(repo.working_dir, file_name)\n file_list = [OUTNAME, repo_fname]\n git_commiter(repo, file_list)", "title": "" }, { "docid": "0656940ea8abc6702a1d1be3de1478bd", "score": "0.55614644", "text": "def create_initial_config(self):\n self.kernel_dict = self.choose_kernel()\n ceph_hash = self.choose_ceph_hash()\n # We don't store ceph_version because we don't use it yet outside of\n # logging.\n self.choose_ceph_version(ceph_hash)\n suite_branch = self.choose_suite_branch()\n suite_hash = self.choose_suite_hash(suite_branch)\n if self.args.suite_dir:\n self.suite_repo_path = self.args.suite_dir\n else:\n self.suite_repo_path = util.fetch_repos(\n suite_branch, test_name=self.name)\n teuthology_branch, teuthology_sha1 = self.choose_teuthology_branch()\n\n\n if self.args.distro_version:\n self.args.distro_version, _ = \\\n OS.version_codename(self.args.distro, self.args.distro_version)\n self.config_input = dict(\n suite=self.args.suite,\n suite_branch=suite_branch,\n suite_hash=suite_hash,\n ceph_branch=self.args.ceph_branch,\n ceph_hash=ceph_hash,\n ceph_repo=config.get_ceph_git_url(),\n teuthology_branch=teuthology_branch,\n teuthology_sha1=teuthology_sha1,\n machine_type=self.args.machine_type,\n distro=self.args.distro,\n distro_version=self.args.distro_version,\n archive_upload=config.archive_upload,\n archive_upload_key=config.archive_upload_key,\n suite_repo=config.get_ceph_qa_suite_git_url(),\n suite_relpath=self.args.suite_relpath,\n )\n return self.build_base_config()", "title": "" }, { "docid": "e3cc8e8ed1beb32be77bfb3208a7ffa6", "score": "0.55504453", "text": "def create_env_file(self):\n click.echo(info(\"...generating the .env file\"))\n env_path = self.service_dir / \".env_template\"\n env_text = (\n env_path.read_text()\n .replace(\"__SECRETKEY__\", secrets.token_urlsafe(40))\n .replace(\"__PASSWORD__\", secrets.token_urlsafe(8))\n )\n (self.service_dir / \".env\").write_text(env_text)", "title": "" }, { "docid": "47297580f962f743443fd663de636998", "score": "0.5549758", "text": "def create(self):\n os.makedirs(self.path)\n with open(os.path.join(self.path, 'README'), 'w') as fd:\n fd.write('This is a Borg cache')\n config = configparser.RawConfigParser()\n config.add_section('cache')\n config.set('cache', 'version', '1')\n config.set('cache', 'repository', hexlify(self.repository.id).decode('ascii'))\n config.set('cache', 'manifest', '')\n with open(os.path.join(self.path, 'config'), 'w') as fd:\n config.write(fd)\n ChunkIndex().write(os.path.join(self.path, 'chunks').encode('utf-8'))\n os.makedirs(os.path.join(self.path, 'chunks.archive.d'))\n with open(os.path.join(self.path, 'files'), 'wb') as fd:\n pass # empty file", "title": "" }, { "docid": "23adc991d431a42981e47a73c1c89217", "score": "0.55386466", "text": "def setup(**kwargs):\n cfg = hiraeth.Config(root, kwargs)\n cfg.setup()", "title": "" }, { "docid": "58376985ff9f572d7a3c1738f108be52", "score": "0.5529481", "text": "def _write_berks_config(self):\n\n command = ('mkdir -p .berkshelf; cd .berkshelf; '\n 'echo \"{\\\\\"ssl\\\\\":{\\\\\"verify\\\\\":false}}\" > config.json')\n\n self.node.run_cmd(command)", "title": "" }, { "docid": "ec0234c87d9668d4a99e24481d7b12cc", "score": "0.552877", "text": "def generate_config():\n\n config_path = get_config_file()\n if config_path.exists():\n overwrite_config = input(\"Config file already exists. Overwrite? (y/N): \")\n\n if overwrite_config.lower() != \"y\":\n print(\"Exiting...\")\n return None\n\n with open(config_path, \"w\", encoding=\"utf-8\") as config_file:\n json.dump(DEFAULT_CONFIG, config_file, indent=4)\n\n print(f\"Config file generated at {config_path}\")\n\n return None", "title": "" }, { "docid": "c655013355b5ffa56bdf14e807af814e", "score": "0.5520197", "text": "def initialize_experiment_file() -> None:\n os.system('mkdir -p \"{}\"'.format(dp.DAF_CONFIGS))", "title": "" }, { "docid": "ad9ed2a9205c9693e7597c599205f92c", "score": "0.55161107", "text": "def create_project_configuration(filename):\n home = os.path.expanduser(\"~\")\n project_root_folder = os.path.join(home, \"hwr-experiments\")\n config = {\n \"root\": project_root_folder,\n \"nntoolkit\": None,\n \"dropbox_app_key\": None,\n \"dropbox_app_secret\": None,\n \"dbconfig\": os.path.join(home, \"hwrt-config/db.config.yml\"),\n \"data_analyzation_queue\": [{\"Creator\": None}],\n \"worker_api_key\": \"1234567890abc\",\n \"environment\": \"development\",\n }\n with open(filename, \"w\") as fp:\n yaml.dump(config, fp, default_flow_style=False)", "title": "" }, { "docid": "e985882a0bbaf00bfc39343e2e94cd50", "score": "0.55128807", "text": "def create(self):\n\n if not exists(self._current['config']):\n with open(self._current['config'], 'w') as f:\n dump(self._current, f)", "title": "" }, { "docid": "44ba7584b2d467cf7f1ab2010c0d438c", "score": "0.5505745", "text": "def createNGINXConf(properties):\n consts.NGINX = os.path.join(consts.ENV, 'nginx')\n try:\n os.mkdir(consts.NGINX)\n except:\n pass #already existed I guess.\n\n with open(os.path.join(consts.RESOURCES,'nginx.conf'), 'r') as f:\n conf = f.read()\n \n name = properties['website']['name']\n\n with open(os.path.join(consts.NGINX,'%s.nginxconf'%name), 'w') as f:\n f.write(conf % (name,consts.WEBAPPS,name,name,consts.WEBAPPS,name,consts.WEBAPPS,name,consts.WEBAPPS,name,consts.WEBAPPS,name,name,consts.WEBAPPS,name))", "title": "" }, { "docid": "f6c8efebefafc8b2a8bf196bdc70ecd7", "score": "0.5497902", "text": "def _setup():\n\n configfile = os.path.expanduser(\"~/.notoconfig\")\n if os.path.exists(configfile):\n with open(configfile, \"r\") as f:\n for line in f:\n line = line.strip()\n if not line or line.startswith('#'):\n continue\n k, v = line.split('=', 1)\n values[k.strip()] = v.strip()", "title": "" }, { "docid": "dd045aa121d778a3430c1a018b015f0a", "score": "0.54961216", "text": "def setup_experiment(config: Dict[str, Any]) -> Tuple[Path, Path, Dict[str, str], Dict[str, Any]]:\n jconfig = json.dumps(config, indent=4)\n log.info('Running Config:')\n log.info(jconfig)\n\n # 1 Delete the keys for this protocol and only pass on other parameters.\n privileged_config = {'protocol': '', 'workdir': '', 'harness': ''}\n for pk in privileged_config.keys():\n if pk not in config:\n raise AttributeError(f'Please set \"{pk}\" in the config files')\n privileged_config[pk] = config[pk]\n del config[pk]\n\n # 2 Validate config being sent to a protocol.\n if privileged_config['protocol'] == 'condda':\n config = ConddaConfig(config).asdict()\n elif privileged_config['protocol'] == 'ond':\n config = OndConfig(config).asdict()\n else:\n raise AttributeError(f'Please set protocol to either \"ond\" or \"condda\". '\n f'\"{privileged_config[\"protocol\"]}\" in the config files')\n\n # 3 hash the dictionary to create temp name create the folder\n ub.util_hash._HASHABLE_EXTENSIONS._register_agressive_extensions()\n name = ub.hash_data(config)\n\n log.info(f'Folder {name} created for the following config')\n working_folder = Path(privileged_config['workdir'], name).expanduser().resolve()\n working_folder.mkdir(exist_ok=True, parents=True)\n if 'save_dir' in config.keys() and config['save_dir'] == '{workdir.id}':\n config['save_dir'] = str(working_folder)\n config['detectors']['csv_folder'] = str(\n working_folder / config['detectors']['csv_folder']\n )\n\n Path(config['detectors']['csv_folder']).mkdir(exist_ok=True, parents=True)\n jconfig = json.dumps(config, indent=4)\n\n # 4 save the config into the new folder.\n working_config_fp = working_folder / 'config.json'\n with open(working_config_fp, 'w') as f:\n f.write(jconfig)\n log.debug(f'Config: \\n{jconfig}')\n return working_folder, working_config_fp, privileged_config, config", "title": "" }, { "docid": "51cd94753901ce1927acd148dfe097c7", "score": "0.5495441", "text": "def create_cfg ( image_save_dir=None,\n url_timeout=None,\n max_dl_attempt=None,\n sys_proxy=None,\n log_dir=None,\n log_lvl=None ):\n new_cfg = cfg.APP_CFG\n new_cfg[ IMAGE_SAVE_DIR ] = image_save_dir\n new_cfg[ URL_TIMEOUT ] = url_timeout\n new_cfg[ MAX_DOWNLOAD_REATTEMPTS ] = max_dl_attempt\n new_cfg[ SYSTEM_PROXY ] = sys_proxy\n new_cfg[ LOG_DIR ] = log_dir\n new_cfg[ LOG_LEVEL ] = log_lvl\n\n return new_cfg", "title": "" }, { "docid": "6c497d08a4669790a5defdc73f44142c", "score": "0.54878944", "text": "def create_config_if_dne(raw_config):\n if not exists(CONFIG_FILE):\n with open(CONFIG_FILE, 'w+') as config_file:\n config_file.write(raw_config)", "title": "" }, { "docid": "31cdd3aabb6dbd7c679975ec4acd478d", "score": "0.5487095", "text": "def nxgen_configure(self,scenariofile,datafile=None):\n fdesc = open('/etc/hosts','r')\n fcont = fdesc.readlines()\n fdesc.close()\n public_rsa1 = None\n private_rsa1 = None\n ind = 0\n for cont in fcont:\n ind += 1\n if not cont.__contains__('#'):\n if cont.__contains__('pub_rsa'):\n public_rsa1 = cont\n pubind = ind\n if private_rsa1:\n break\n if cont.__contains__('prv_rsa'):\n private_rsa1 = cont\n prvind = ind\n if public_rsa1:\n break\n\n pub_rsaList = public_rsa1.split(' ')\n pub_realm = pub_rsaList[0]\n prv_rsaList = private_rsa1.split(' ')\n prv_realm = prv_rsaList[0]\n\n usrcfg = Context()\n userconfig_value = UserConfig(usrcfg)\n public_interface = usrcfg['userConfig.eth_pub_iface']\n private_interface = usrcfg['userConfig.eth_pri_iface']\n if self.eth=='eth%s'% public_interface:\n realm_rsa=pub_realm\n elif self.eth=='eth%s'% private_interface:\n realm_rsa=prv_realm\n\n if not datafile:\n self.configure('nxgen',\n calls='1',\n gateway=realm_rsa,\n xmlfile=scenariofile,\n )\n else:\n self.configure('nxgen',\n calls='1',\n gateway=realm_rsa,\n xmlfile=scenariofile,\n csvfile=datafile\n )", "title": "" }, { "docid": "094b6b8bd7ae71b0d1d947a46a86565a", "score": "0.5481455", "text": "def _write_config(\n account_id: str = '123412341234',\n region: str = 'us-test-1',\n prefix: str = 'test_prefix',\n enable_downloader: bool = True,\n cb_url: str = 'https://cb-example.com',\n encrypted_api_token: str = 'A'*100):\n with open(CONFIG_FILE, 'w') as config_file:\n config_file.write('\\n'.join([\n '// comment1',\n 'aws_account_id = \"{}\"'.format(account_id),\n 'aws_region = \"{}\" // comment2'.format(region),\n 'name_prefix = \"{}\" // comment3'.format(prefix),\n 'enable_carbon_black_downloader = {}'.format(str(enable_downloader).lower()),\n 'carbon_black_url = \"{}\" //comment4'.format(cb_url),\n 'encrypted_carbon_black_api_token = \"{}\"'.format(encrypted_api_token),\n 'force_destroy = false',\n 'objects_per_retro_message = 5',\n '// comment5'\n ]))", "title": "" }, { "docid": "584458fcb92414fabb5037fd70d3329c", "score": "0.5470951", "text": "def create_config(fp=None): # pragma: no cover\n if fp is None:\n fp = INI_FILE\n\n conf_file = read_or_make(fp).filename\n click.echo('Wrote configfile to %s' % conf_file)\n return conf_file", "title": "" }, { "docid": "62421df81bc4272975807559d05a98a1", "score": "0.54707575", "text": "def write_config_file(config: PytketConfig) -> None:\n config.write_file(get_config_file_path())", "title": "" }, { "docid": "9735e54306e32e64f34221d13eff2644", "score": "0.54566604", "text": "def first_time_setup(self, config_root_path):\n from toyz.utils import file_access\n from toyz.utils import third_party\n \n # Set default settings\n default_settings['web']['third_party'] = third_party.get_defaults()\n for key, val in default_settings.items():\n setattr(self, key, ToyzClass(val))\n \n # Create config directory if it does not exist\n print(\"\\nToyz: First Time Setup\\n----------------------\\n\")\n self.config.root_path = normalize_path(os.getcwd())\n while not get_bool(\n \"Create new Toyz configuration in '{0}'? \".format(self.config.root_path)):\n self.config.root_path = normalize_path(raw_input(\"new path: \"))\n self.config.path = os.path.join(\n self.config.root_path,\n self.config.config_path\n )\n create_paths([self.config.root_path, os.path.join(self.config.root_path, 'config')])\n \n # Create database for toyz settings and permissions\n self.db.path = os.path.join(self.config.root_path, \n self.db.db_path)\n create_paths(os.path.dirname(self.db.path))\n db_utils.create_toyz_database(self.db)\n \n # Create default users and groups\n admin_pwd = 'admin'\n if self.security.user_login:\n admin_pwd = encrypt_pwd(self, admin_pwd)\n db_utils.update_param(self.db, 'pwd', user_id='admin', pwd=admin_pwd)\n db_utils.update_param(self.db, 'pwd', group_id='all', pwd='')\n db_utils.update_param(self.db, 'pwd', group_id='admin', pwd='')\n db_utils.update_param(self.db, 'pwd', group_id='modify_toyz', pwd='')\n db_utils.update_param(self.db, 'paths', group_id='all', paths={\n os.path.join(ROOT_DIR, 'web','static'): 'fr',\n os.path.join(ROOT_DIR, 'web','templates'): 'fr',\n os.path.join(ROOT_DIR, 'third_party'): 'fr',\n })\n \n self.save_settings()\n print(\"\\nFirst Time Setup completed\")", "title": "" }, { "docid": "88ade7ca311517f462201c70a3428619", "score": "0.5445182", "text": "def criarConfig(self):\n with open(self.arquivo_config, 'w', encoding='utf-8', errors='ignore') as arq:\n arq.write(f\"{str(self.json)}\")", "title": "" }, { "docid": "bfe998fea6e9d59d8e0c2eef8f85a6a9", "score": "0.54447424", "text": "def config():\n config_file = os.path.expanduser('~/.weather.cfg')\n api_key = click.prompt(\n \"Please enter your API key\",\n )\n\n with open(config_file, 'w') as cfg:\n cfg.write(api_key)", "title": "" }, { "docid": "4b494c0c412087da352d96580f2eca73", "score": "0.5428089", "text": "def produce_config_file(form: Dict[str, Any], bundle_directory: Path) -> None:\n engine = form.get('engine')\n engine_yaml = {\"engine\": f\"chalkbox.engines.{engine}\"}\n\n default = deep_update(DEFAULT, {\"courseCode\": form.get(\"course_code\"),\n \"assignment\": form.get(\"assignment_id\")})\n\n if engine == \"JavaEngine\":\n dependencies = get_dependencies(form.get(\"dependencies\"))\n default = deep_update(default, {\"dependencies\": dependencies})\n java = deep_update(JAVA, form.get(\"java_stages\"))\n settings = {**default, **java}\n reformat_test_classes(settings, form.get(\"session_directory\"))\n elif engine == \"PythonEngine\":\n python = deep_update(PYTHON, {\n \"fileName\": form.get(\"fileName\"),\n \"runner\": form.get(\"runner\"),\n })\n settings = {**default, **python}\n else:\n raise NotImplementedError\n\n config_yaml = dump_all([engine_yaml, settings], sort_keys=False)\n\n with open(f\"{bundle_directory / 'config.yml'}\", \"w\") as config_file:\n config_file.write(config_yaml)", "title": "" }, { "docid": "6e3bb5a5f5a8f10e4e2ddf18cde2a1dd", "score": "0.54249233", "text": "def _genconfig(args):\n\n fdir = args['--fencepy-root']\n cfile = os.path.join(fdir, 'fencepy.conf')\n\n if os.path.exists(cfile):\n l.info('backing up {0}'.format(cfile))\n i = 0\n while True:\n bak = '{0}.bak.{1}'.format(cfile, i)\n if not os.path.exists(bak):\n shutil.move(cfile, bak)\n break\n l.info('generating {0}'.format(cfile))\n shutil.copy(_get_default_config_file(), cfile)\n\n return 0", "title": "" }, { "docid": "02bc3b10d06035c758f13262ddb5107f", "score": "0.54136133", "text": "def createApacheConf(properties):\n consts.APACHE = os.path.join(consts.ENV, 'apache')\n try:\n os.mkdir(consts.APACHE)\n except:\n pass #already existed\n\n with open(os.path.join(consts.RESOURCES, 'apache.conf'),'r') as f:\n conf = f.read()\n \n name = properties['website']['name']\n\n with open(os.path.join(consts.APACHE,'%s.apacheconf'%name), 'w') as f:\n f.write(conf % (consts.HOSTNAME,\\\n os.path.join(consts.WEBAPPS,globs.ENVNAME,name),name,\\\n os.path.join(consts.WEBAPPS,globs.ENVNAME,name),name,\\\n os.path.join(consts.WEBAPPS,globs.ENVNAME,name),name,\\\n os.path.join(consts.WEBAPPS,globs.ENVNAME,name),name,name,\n \"\"))#last piece will be favicon eventually", "title": "" }, { "docid": "05a6194c8ab412616342c9651d27903b", "score": "0.54055816", "text": "def __writeConfigFile(self, **kwargs) -> None:\n\n def makeHeader(label: str) -> str:\n \"\"\"Makes a nice header for the config file sections.\"\"\"\n return \"#\" * 80 + \"\\n# {0:<77}#\\n\".format(label) + \"#\" * 80\n\n # calculate some parameters which are defined differently in OpenNMT\n saveCheckpointSteps = max(kwargs[\"numTrainingSteps\"] // kwargs[\"numCheckpoints\"], 1)\n validSteps = max(kwargs[\"numTrainingSteps\"] // kwargs[\"numValidations\"], 1)\n\n lines = [\n '# AUTOGENERATED',\n '',\n makeHeader(\"GENERAL OPTIONS\"),\n '',\n '# Base path for objects that will be saved, e.g. vocab, embeddings, etc.',\n 'save_data: \"{}\"'.format(self.__SAVE_DATA_PATH.replace(os.path.sep, \"/\")),\n '',\n '# Base path for saved model checkpoints',\n 'save_model: \"{}\"'.format(self.__SAVE_MODEL_PATH.replace(os.path.sep, \"/\")),\n '',\n '# Save a model checkpoint after X number of training steps',\n 'save_checkpoint_steps: {}'.format(saveCheckpointSteps),\n '',\n '# Allow overwriting existing files in the model directory',\n 'overwrite: true',\n '',\n makeHeader(\"VOCABULARY AND DATA\"),\n '',\n '# Vocabularies will be written to these files',\n 'src_vocab: \"{}\"'.format(self.__SOURCE_VOCAB_PATH.replace(os.path.sep, \"/\")),\n 'tgt_vocab: \"{}\"'.format(self.__TARGET_VOCAB_PATH.replace(os.path.sep, \"/\")),\n '',\n '# Defines training and validation datasets. Data is already in the correct',\n '# format, so no need for transforms.',\n 'data:',\n ' corpus_1:',\n ' path_src: \"{}\"'.format(kwargs[\"trainSource\"].replace(os.path.sep, \"/\")),\n ' path_tgt: \"{}\"'.format(kwargs[\"trainTarget\"].replace(os.path.sep, \"/\")),\n ' transforms: []',\n ' weight: 1',\n ' valid:',\n ' path_src: \"{}\"'.format(kwargs[\"validSource\"].replace(os.path.sep, \"/\")),\n ' path_tgt: \"{}\"'.format(kwargs[\"validTarget\"].replace(os.path.sep, \"/\")),\n ' transforms: []',\n '',\n makeHeader(\"MODEL\"),\n '',\n '# Overall type of model, here we use seq2seq',\n 'model_task: seq2seq',\n '',\n '# Attention method to use in encoder and decoder, mlp means Bahdanau',\n 'global_attention: mlp',\n '',\n '# Do not use an additional layer between the encoder and decoder',\n 'bridge: false',\n '',\n '# Word embedding size for source and target',\n 'word_vec_size: {}'.format(kwargs[\"embeddingSize\"]),\n '',\n makeHeader(\"ENCODER / DECODER\"),\n '',\n '# Gate type to use in RNN encoder and decoder',\n 'rnn_type: {}'.format(kwargs[\"rnnType\"]),\n '',\n '# Encoder and decoder are always RNNs',\n 'encoder_type: rnn',\n 'decoder_type: rnn',\n '',\n '# Size of encoder and decoder RNN hidden states',\n 'rnn_size: {}'.format(kwargs[\"rnnSize\"]),\n '',\n '# Number of layers in each the encoder and decoder',\n 'layers: {}'.format(kwargs[\"numLayers\"]),\n '',\n makeHeader(\"LEARNING AND OPTIMIZATION\"),\n '',\n '# Number of training steps to perform',\n 'train_steps: {}'.format(kwargs[\"numTrainingSteps\"]),\n '',\n '# Perform validation every X number of training steps',\n 'valid_steps: {}'.format(validSteps),\n '',\n '# Dropout probability',\n 'dropout: {}'.format(kwargs[\"dropout\"]),\n '',\n '# Use the Adam optimization method',\n 'optim: adam',\n '',\n '# Starting learning rate -- Tufano et al. use 0.0001',\n 'learning_rate: 0.0001',\n ''\n ]\n\n numGPUs = kwargs[\"numGPUs\"]\n gpuLines = []\n if numGPUs > 0:\n gpuLines += [\n '# Train using {} GPU{}'.format(numGPUs, \"s\" if numGPUs > 1 else \"\"),\n 'world_size: {}'.format(numGPUs),\n 'gpu_ranks:',\n *[\"- {}\".format(i) for i in range(numGPUs)],\n ''\n ]\n else:\n gpuLines += [\"# Train using the CPU, so no world_size parameter is provided\", \"\"]\n\n lines = lines[:16] + gpuLines + lines[16:]\n\n with open(self.__CONFIG_PATH, \"w\") as file:\n file.write(\"\\n\".join(lines))", "title": "" }, { "docid": "c04fa2aa31d5afc6657511cc7fe57376", "score": "0.539547", "text": "def create_default_config() -> dict:\n\n default_values = {\n 'results_path': f'{os.environ[\"HOME\"]}/netbench'\n }\n Path(os.path.dirname(config_file)).mkdir(parents=True, exist_ok=True)\n with open(config_file, 'w') as f:\n json.dump(default_values, f)\n return default_values", "title": "" }, { "docid": "f12ec37032b2a736d09fa39ff260944e", "score": "0.5394129", "text": "def app_init_create_config(self, args, output_variables, tcex_testing_context):\n args['tc_playbook_out_variables'] = ','.join(output_variables)\n args['tcex_testing_context'] = tcex_testing_context\n\n # update path args\n self._update_path_args(args)\n\n # merge default and app args\n app_args = dict(self.default_args)\n app_args.update(args)\n\n # service Apps will get their args/params from encrypted file in the \"in\" directory\n data = json.dumps(app_args, sort_keys=True).encode('utf-8')\n key = ''.join(random.choice(string.ascii_lowercase) for i in range(16))\n encrypted_data = self._encrypt_file_contents(key, data)\n\n # create files necessary to run Service App\n if not os.path.exists(app_args.get('tc_in_path')):\n os.mkdir(app_args.get('tc_in_path'))\n\n app_params_json = os.path.join(app_args.get('tc_in_path'), '.app_params.json')\n with open(app_params_json, 'wb') as fh:\n fh.write(encrypted_data)\n\n # create environment variable for tcex inputs method to pick up to read encrypted file\n os.environ['TC_APP_PARAM_KEY'] = key\n os.environ['TC_APP_PARAM_FILE'] = app_params_json", "title": "" }, { "docid": "62c4328b718802786262bd859b0a29a0", "score": "0.5394111", "text": "def add_config():\n\n genius_key = input('Enter Genius key : ')\n bing_key = input('Enter Bing key : ')\n\n CONFIG['keys']['bing_key'] = bing_key\n CONFIG['keys']['genius_key'] = genius_key\n\n with open(config_path, 'w') as configfile:\n CONFIG.write(configfile)", "title": "" }, { "docid": "3f982ae0cdc92d8e70480dd61c907b03", "score": "0.5390414", "text": "def generate_pyry_inconfig(self, filename):\n self.create_config_file(str(filename))\n self.add_default_data()", "title": "" }, { "docid": "3ab75f3091aac666d27eb2888a28a99a", "score": "0.5388321", "text": "def generate_config(services):\n\n template = env.get_template('haproxy.cfg.tmpl')\n with open(HAPROXY_CONFIG, \"w\") as f:\n f.write(template.render(services=services))", "title": "" }, { "docid": "8ce7c60dabf359a25123043983b10828", "score": "0.5379549", "text": "def create(experiment_path, config) -> Experiment:\n exp = Experiment()\n exp.paths = create_file_structure(experiment_path)\n new_config = check_config(config)\n for key, value in new_config.items():\n exp.config[key] = value\n return save(exp)", "title": "" }, { "docid": "0457ecc9f39b40c6870c390625b2f6bc", "score": "0.5377216", "text": "def config(args):\n # split up the jumble of time to set the hr and min correctly\n args['CYCLEHR'], args['CYCLEMIN']=Config.format_time(args['CYCLETIME'])\n # convert NSFW from on/off to True/False \n args['NSFW'] = Config.convert_NSFW(args['NSFW'])\n\n config = configparser.ConfigParser()\n config['Statusbar'] = OrderedDict([('Statusbar Text',\n args['STATUSBAR'])])\n config['Save Location'] = OrderedDict([('Directory',\n args['DWNLDLOC'])])\n config['Options'] = OrderedDict([('Minwidth', args['MINWIDTH']),\n ('Minheight', args['MINHEIGHT']),\n ('Subreddits', args['SUBREDDITS']),\n ('Category', args['CATEGORY']),\n ('Maxposts', args['MAXPOSTS'])])\n config['Cycletime'] = OrderedDict([('Hours', args['CYCLEHR']),\n ('Minutes', args['CYCLEMIN'])])\n config['Adult Content'] = OrderedDict([('NSFW', args['NSFW'])])\n # this try/except is used because the cmdline args come through here\n # and doesn't contain the wallpaper argument, so it is not always\n # provided\n try: \n config['Last Wallpaper'] = OrderedDict([('Wallpaper',\n args['WALLPAPER'])])\n except KeyError:\n configParser = configparser.ConfigParser()\n # this is cyclical because we must read the setting in order\n # to reset it with the same value, as this file is rewritten\n # each call to this function. This is so we can pass one of\n # two dictionarys to it without rewriting similar code.\n # ^^CLArgs or DefaultValues\n args['WALLPAPER'] = Config.lastImg()\n config['Last Wallpaper'] = OrderedDict([('Wallpaper',\n args['WALLPAPER'])])\n \n with open('settings.conf', 'w') as configfile:\n config.write(configfile)\n\n log.debug(\"Set config file\")", "title": "" }, { "docid": "a1cc04649d6a58ac642ae48e27d4c8ab", "score": "0.5357817", "text": "def createConfig(path):\r\n config = configparser.ConfigParser()\r\n config.add_section(\"Path\")\r\n config.set(\"Path\", \"query_file\", \"\")\r\n config.set(\"Path\", \"answer_file\", \"\")\r\n\r\n with open(path, \"w\") as config_file:\r\n config.write(config_file)", "title": "" }, { "docid": "d58b183444dbadcaadeeed73cd72158e", "score": "0.5350892", "text": "def generate_file(hv_info, config):\n err_dic = {}\n\n # add config scenario name\n (err_dic, scenario_name) = acrn_config_utilities.get_scenario_name()\n (err_dic, board_name) = acrn_config_utilities.get_board_name()\n\n print(\"{}\".format(DESC), file=config)\n if hv_info.log.release == 'y':\n print(\"CONFIG_RELEASE=y\", file=config)\n print('CONFIG_BOARD=\"{}\"'.format(board_name), file=config)\n\n get_memory(hv_info, config)\n get_features(hv_info, config)\n get_capacities(hv_info, config)\n get_serial_console(config)\n get_log_opt(hv_info, config)\n\n return err_dic", "title": "" }, { "docid": "dfdc9006775ce3c437470ad53e3013b7", "score": "0.5343266", "text": "def generate_config(config_dir, filename):\n config_file = \"{}/{}\".format(config_dir, filename.split('/')[1].split('.example')[0])\n env = Environment()\n template_source = open(filename, 'r').read()\n template = env.from_string(template_source)\n with open(config_file, 'w') as config:\n config.write(\n template.render(log_dir=LOG_DIR)\n )\n config.close()", "title": "" }, { "docid": "96e601d777625ccd659ae887840ff50b", "score": "0.53428394", "text": "def config_init(app):\n\n create_path(_INSTANCE_FOLDER)\n create_path(_TMP_FOLDER)", "title": "" }, { "docid": "ca386bc12e741823e95636860cd5a89e", "score": "0.5325287", "text": "def create_conf_dir():\n # Create logentries config\n try:\n os.makedirs(config.config_dir_name)\n except OSError, e:\n if e.errno != errno.EEXIST:\n if e.errno == errno.EACCES:\n die(\"You don't have permission to create logentries config file. Please run logentries agent as root.\")\n die('Error: %s' % e)", "title": "" }, { "docid": "244af63a68c4c8195bfa010102eaa893", "score": "0.5319997", "text": "def config(self):\n for dir_ in [self.download_dir, self.output_dir]:\n dir_.mkdir(parents=True, exist_ok=True)", "title": "" }, { "docid": "46f1b35950a12c70906d0041e7555221", "score": "0.53185004", "text": "def _write_openssl_config(self):\n ext_key_usage = 'critical, cRLSign, keyCertSign'\n cert_policies = '1.3.6.1.4.1.34998.1.6'\n key_id = ''\n pathlen = ''\n\n ext_contents = \"\"\"%ssubjectAltName=DNS:%s\nkeyUsage=critical,digitalSignature,keyEncipherment,dataEncipherment\nextendedKeyUsage=serverAuth,clientAuth\ncertificatePolicies=%s\nbasicConstraints=critical,CA:false\nsubjectKeyIdentifier=hash\nauthorityKeyIdentifier=keyid:always,issuer\n\"\"\" % (key_id, _get_hostname(), cert_policies)\n\n openssl_config = open('/etc/pki/tls/openssl.cnf', 'rb')\n config_contents = to_str(openssl_config.read())\n openssl_config.close()\n replace_text = [(\"# crl_extensions\t= crl_ext\", \"crl_extensions\t= crl_ext\"),\n (\"basicConstraints = CA:true\", \"basicConstraints = critical, CA:true%s\" % pathlen),\n (\"# keyUsage = cRLSign, keyCertSign\",\n \"keyUsage = %s\" % ext_key_usage),\n (\"dir\t\t= ../../CA\t\t# Where everything is kept\",\n \"dir\t\t= %s\t\t# Where everything is kept\" % self._OPENSSL_CA_DIR)]\n for (old, new) in replace_text:\n config_contents = config_contents.replace(old, new)\n\n _write_file(self._CONFIG_PATH, config_contents)\n _write_file(self._EXT_CONFIG_PATH, ext_contents)\n _write_file(self._OPENSSL_CA_DIR + \"index.txt\", \"\")\n _write_file(self._OPENSSL_CA_DIR + \"index.txt.attr\", \"unique_subject = no\\n\") # TODO: Implement cert revocation instead\n _write_file(self._OPENSSL_CA_DIR + \"serial\", self._SERIAL_NUM)\n _write_file(self._OPENSSL_CA_DIR + \"crlnumber\", \"01\\n\")\n\n # openssl 0.x doesn't create this for us\n _safe_makedirs(os.path.join(self._OPENSSL_CA_DIR, 'newcerts'), 0o755)", "title": "" }, { "docid": "d137755e39f030331458a3255c7caa41", "score": "0.5310389", "text": "def config(config_ctx, file, mode, view):\n\n if file:\n\n os.remove(config_ctx.config_path)\n\n abs_path = os.path.abspath(file)\n click.echo(\"Absolute path for provided file: {}\".format(abs_path))\n\n new_config = TurmyxConfig()\n new_config.read(abs_path)\n\n # TODO: validate this config file.\n\n if not mode:\n with open(config_ctx.config_path, \"w\") as config_f:\n new_config.write(config_f)\n click.echo(\"Succesfully saved into {}.\".format(config_ctx.config_path))\n elif mode == \"merge\":\n # First attempt, only overriding partials:\n\n config_ctx.read(abs_path)\n with open(config_ctx.config_path, \"w\") as config_f:\n config_ctx.write(config_f)\n click.echo(\"Succesfully merged: {} \\n into: {} \\n and saved.\".format(abs_path, config_ctx.config_path))\n\n elif mode == \"symlink\":\n os.symlink(abs_path, config_ctx.config_path)\n click.echo(\"Succesfully linked: {} \\n to: {}.\".format(config_ctx.config_path, abs_path))\n\n if view:\n with open(config_ctx.config_path, 'r') as config_f:\n click.echo(config_f.read())", "title": "" }, { "docid": "6dad25a53715d462bc3c2a025d311cb8", "score": "0.5308391", "text": "def init_production_config(self):\n\n chars = 'abcdefghijklmnopqrstuvwxyz0123456789!@#$%^&*(-_=+)'\n\n # See if we need to initialize the production configuration file\n if os.path.exists('proj/settings/production.py'):\n\n with open('proj/settings/production.py') as handle:\n lines = handle.readlines()\n\n # Loop through lines and create the build archive file\n for x in range(0, len(lines)):\n\n # Always replace the secret key\n if lines[x].startswith('SECRET_KEY'):\n lines[x] = \"SECRET_KEY = '{0}'\\n\".format(''.join(\n [random.SystemRandom().choice(chars) for i in range(50)]\n ))\n\n # only replace the fernet key if it hasn't been initialized\n if lines[x].startswith('FERNET_KEY') and '%%fernetkey%%' in lines[x]:\n lines[x] = \"FERNET_KEY = {0}\\n\".format(Fernet.generate_key())\n\n with open('proj/settings/production.py', 'w') as handle:\n handle.writelines(lines)", "title": "" }, { "docid": "ac2b408e475e9882d342210bac7b6e9f", "score": "0.5306355", "text": "def create_invalid_cfg ( self ):\n new_cfg = self.create_cfg (\n image_save_dir=\"/mantosh_downloaded\",\n url_timeout=-1,\n max_dl_attempt=-1,\n sys_proxy=(),\n log_dir=\"/mantosh_logs\",\n log_lvl=\"invalid\"\n )\n\n return new_cfg", "title": "" }, { "docid": "4d7d313a121ff702175d7835e769b6a1", "score": "0.5298721", "text": "def setup_config(command, filename, section, vars):\n conf = appconfig('config:' + filename)\n load_environment(conf.global_conf, conf.local_conf)\n log.info(\"Creating tables %s\" % meta.engine)\n meta.metadata.drop_all(bind=meta.engine, checkfirst=True)\n meta.metadata.create_all(bind=meta.engine)\n log.info(\"Successfully setup\")", "title": "" }, { "docid": "c52a15cb378bf0fa815f37af6975ee5e", "score": "0.5297652", "text": "def createNBconfig(self):\n with open(os.path.join(self.nbgrader_course_directory,\"nbgrader_config.py\"),\"w\") as NBC:\n NBC.write(self.nbgrader_config_template)\n logging.debug(f\"created {self.nbgrader_course_directory}/nbgrader_config.py:\\n{self.logging_separator}\\n{self.nbgrader_config_template}\\n{self.logging_separator}\")", "title": "" }, { "docid": "04eb00e0307ef582acb61a25bd88bae6", "score": "0.528714", "text": "def generate_defaults():\n\n cf = SafeConfigParser()\n cf.add_section('basics')\n ls = ['user', 'id', 'time', 'species', 'factor', 'treat', 'cont', 'output']\n for fld in ls:\n cf.set('basics', fld, '$'+fld.upper())\n #SET the chilin version number\n cf.set('basics', \"version\", _CHILIN_VERSION)\n\n #read in the chilin.conf file--look first for developer defaults\n if os.path.exists('chilin.conf.filled'):\n cf.read('chilin.conf.filled')\n else:\n cf.read('chilin.conf')\n\n #write the template file!\n f = open(os.path.join('chilin2','modules','config','chilin.conf'),'w')\n cf.write(f)\n f.close()", "title": "" }, { "docid": "aa5630f016fd85981506c6eac7f1b5cc", "score": "0.5285307", "text": "def make_config(dataset_id=None):\n config = DefaultSettings()\n if not dataset_id:\n import uuid\n dataset_id = str(uuid.uuid4())\n api_port = get_open_port()\n pub_port = get_open_port()\n num, loc = tempfile.mkstemp(suffix='.hdf5', prefix='tmp_test_')\n import h5py\n f = h5py.File(loc)\n f.close()\n config.update({\n 'DS_ID': dataset_id,\n 'API_PORT': api_port, # API port. Application will listen for incoming requests\n 'PUB_PORT': pub_port, # Will publish all the news on this port.\n 'PEERS': [], # ('127.0.0.1', 9201) Name of peers to subscribe to their publisher port.\n 'HDF5_REPO': loc,\n 'LOG_LEVEL': 'DEBUG',\n 'LOG_FILE': tempfile.mktemp(suffix='.log')})\n return config", "title": "" }, { "docid": "415a1609825ff339b56303081d5de145", "score": "0.5279528", "text": "def create_manifest_runtime_config(filename, config):\n if not config:\n return\n try:\n with open(filename, 'w') as f:\n yaml.dump(config, f, default_flow_style=False)\n except Exception:\n LOG.exception(\"failed to write config file: %s\" % filename)\n raise", "title": "" }, { "docid": "836e66c36b4e6093dabd94f070beefc8", "score": "0.5264505", "text": "def generate_conf(self):\n\n config_dic = {}\n\n config_dic[\"references\"] = self.references\n config_dic[\"institution\"] = self.institution\n config_dic[\"instrument_name\"] = self.instrument\n config_dic[\"site_name\"] = self.site\n config_dic[\"comments\"] = self.comments\n config_dic[\"contact_person\"] = self.contact\n config_dic[\"email\"] = self.email\n\n config_js = json.dumps(config_dic)\n config_file = open(\"config.json\", \"w\")\n config_file.write(config_js)\n config_file.close()", "title": "" }, { "docid": "f5652b9894203aec35e976add5d306b9", "score": "0.52580357", "text": "def populate_shed_conf_file( shed_conf_file, tool_path, xml_elems=None ):\n if xml_elems is None:\n tool_conf_template_parser = string.Template( shed_tool_conf_xml_template )\n xml_elems = tool_conf_template_parser.safe_substitute( shed_tool_path=tool_path )\n file( shed_conf_file, 'w' ).write( xml_elems )", "title": "" }, { "docid": "e96d1281f6ee9d84804461c159bf4efa", "score": "0.5252121", "text": "def createConfig(self):\n config = self.testInit.getConfiguration()\n self.testInit.generateWorkDir(config)\n\n config.section_(\"ACDC\")\n config.ACDC.couchurl = os.getenv(\"COUCHURL\")\n config.ACDC.database = \"jobaccountant_acdc_t\"\n\n config.section_(\"JobStateMachine\")\n config.JobStateMachine.couchurl = os.getenv(\"COUCHURL\")\n config.JobStateMachine.couchDBName = \"jobaccountant_t\"\n config.JobStateMachine.jobSummaryDBName = \"jobaccountant_wmstats_t\"\n\n config.component_(\"JobAccountant\")\n config.JobAccountant.pollInterval = 60\n config.JobAccountant.componentDir = os.getcwd()\n config.JobAccountant.logLevel = 'SQLDEBUG'\n config.JobAccountant.specDir = self.testDir\n\n config.component_(\"TaskArchiver\")\n config.TaskArchiver.localWMStatsURL = \"%s/%s\" % (\n config.JobStateMachine.couchurl, config.JobStateMachine.jobSummaryDBName)\n\n return config", "title": "" }, { "docid": "271515ee13264c9c219b2b8d39ac3665", "score": "0.52517396", "text": "def generate_settings():\r\n output = CONFIG_TEMPLATE % dict(\r\n default_key=base64.b64encode(os.urandom(KEY_LENGTH)),\r\n )\r\n\r\n return output", "title": "" }, { "docid": "410f9d6dacf191edc68dc5feaa01b7fb", "score": "0.5246027", "text": "def create_launch_config(self):\n pass", "title": "" }, { "docid": "58a804ea9dcc7dc52d1dd6c2d8df5918", "score": "0.52421796", "text": "def _setupConfigFile(self, configParser):\n utils.SuperTestCase._setupConfigFile(self, configParser)\n tmp = Path('tmp')\n if not tmp.exists():\n tmp.mkdir()\n logfn = tmp/'exe.log'\n if logfn.exists():\n try:\n logfn.remove()\n except OSError:\n pass\n configParser.system.configDir = tmp\n configParser.logging.root = 'ERROR'\n configParser.logging.foo = 'DEBUG'\n configParser.logging.foo = 'DEBUG'", "title": "" }, { "docid": "7d047255f2af5444f00a5af85ff7926f", "score": "0.52410924", "text": "def create_hyperparams_file(hyperparams_group, config_dir):\n if not tf.gfile.Exists(config_dir):\n tf.gfile.MakeDirs(config_dir)\n \n for i in range(len(hyperparams_group)):\n config_file = os.path.join(config_dir, \"config_hyperparams_{0}.json\".format(i))\n with codecs.getwriter(\"utf-8\")(tf.gfile.GFile(config_file, \"w\")) as file:\n hyperparam_dict = hyperparams_group[i].values()\n hyperparams_json = json.dumps(hyperparam_dict, indent=4)\n file.write(hyperparams_json)", "title": "" }, { "docid": "2fe11d569e62df8d20e9d6fef0609ab7", "score": "0.52387726", "text": "def configure():\n ensure()\n\n copy_site_config()\n copy_gral_config()\n copy_proxy_config()", "title": "" }, { "docid": "067d64efef597c860213ba08f6ed1b35", "score": "0.5238356", "text": "def ensure_configs(self):\n #self.openvpn_service['openvpn_file'] = self.openvpn_file\n self.ensure_config_dir(self.openvpn_service)\n self.ensure_config_file(\n 'openvpn.conf',\n self.conf.openvpn.openvpn_config_template,\n self.openvpn_service,\n defaults = default)", "title": "" }, { "docid": "26f2177244a4d8ce7c60dd153745c158", "score": "0.5224977", "text": "def create_config(path):\n logger = logging.getLogger(\"json_to_report_portal.create_config\")\n config = configparser.ConfigParser()\n config.add_section(\"Settings\")\n config.set(\"Settings\", \"rp_api_url\", \"http://[YOUR RP SERVER IP]/api\")\n config.set(\"Settings\", \"rp_project_name\", \"[YOUR RP PROJECT NAME]\")\n config.set(\"Settings\", \"rp_uuid\", \"[from RP user settings]\")\n config.set(\"Settings\", \"rp_launch\", \"[YOUR RP LAUNCH NAME]\")\n config.set(\"Settings\", \"rp_info\",\n \"You are reporting to RP: %(rp_api_url)s \"\n \"to project: %(rp_project_name)s\")\n\n with open(path, \"w\") as config_file:\n config.write(config_file)\n logger.info('Config created')", "title": "" }, { "docid": "aa615e266e79b7140cfc386344377b30", "score": "0.522446", "text": "def run_configure():\n cfg = config.Config()\n options = [\n (\n \"user_access_key\",\n \"Access Key\",\n \"Access key and Secret key are your identifiers for object storage service\",\n ),\n (\"user_secret_key\", \"Secret Key\"),\n (\n \"user_url\",\n \"Object Storage Endpoint\",\n f'Use \"{cfg.user_url}\" for Neo Object Storage.',\n ),\n (\n \"user_gmt_policy\",\n \"Gmt Policy Path\",\n f\"Path to your gmt policy file, Leave as is 'notset' if you don't want to use Cloudian extension\",\n ),\n (\n \"admin_username\",\n \"Admin Username\",\n f\"\"\"Admin username and password are your identifiers for your admin panel.\"\"\",\n ),\n (\"admin_password\", \"Admin Password\"),\n (\n \"admin_url\",\n \"Admin URL\",\n f\"Admin url and port are your url and port location to your admin panel.\",\n ),\n (\"admin_port\", \"Admin port\"),\n (\n \"use_https\",\n \"Use HTTPS protocol\",\n \"All communication is protected when enabled, but it's slower than plain HTTP.\",\n ),\n (\n \"use_neo\",\n \"Use NEO compatibility\",\n \"Use NEO compatibility for adding specific policy id in Object Storage.\",\n ),\n ]\n try:\n while True:\n click.secho(\"Put in new values or accept defaults.\")\n click.secho(\"See user manual for complete description of options.\")\n for option in options:\n prompt = option[1]\n val = getattr(cfg, option[0])\n if val not in (None, \"\"):\n prompt += f\" [{val}]\"\n\n if len(option) >= 3:\n click.secho(f\"\\n{option[2]}\")\n\n val = input(prompt + \": \")\n # only set new value if user provide one\n if val != \"\":\n setattr(cfg, option[0], val)\n\n val = input(\"\\nSave settings? [y/N] \")\n if val.lower().startswith(\"y\"):\n break\n\n val = input(\"Retry configuration? [Y/n] \")\n if val.lower().startswith(\"n\"):\n raise EOFError()\n\n cfg.dump_config(options, cfg)\n\n except (EOFError, KeyboardInterrupt):\n click.secho(\n \"\\nConfiguration aborted. Changes were NOT saved.\",\n fg=\"yellow\",\n bold=True,\n err=True,\n )\n return", "title": "" }, { "docid": "098c7289d4cfb0922d29179e5a726bc0", "score": "0.5223362", "text": "def config():", "title": "" }, { "docid": "098c7289d4cfb0922d29179e5a726bc0", "score": "0.5223362", "text": "def config():", "title": "" }, { "docid": "26f5f1ecbc0ed594b3e378b2d8cd0278", "score": "0.5219096", "text": "def setup_class(cls):\n with open(CONFIGFILE, \"w\") as config:\n config.write(CONFIG_YAML)", "title": "" } ]
76f1e124c63d081a7ac8be64e178f2f8
Loads the grid profile required for the run.
[ { "docid": "d74062a4691924661853becce801b7e2", "score": "0.67323035", "text": "def load_grid_profile(\n auto_generated_files_directory: str, logger: Logger, scenario: Scenario\n) -> Optional[pd.DataFrame]:\n\n grid_profile: Optional[pd.DataFrame] = None\n if scenario.grid:\n try:\n with open(\n os.path.join(\n auto_generated_files_directory,\n \"grid\",\n f\"{scenario.grid_type}_grid_status.csv\",\n ),\n \"r\",\n ) as f:\n grid_profile = pd.read_csv(\n f,\n index_col=0,\n )\n except FileNotFoundError as e:\n logger.error(\n \"%sGrid profile file for profile '%s' could not be found: %s%s\",\n BColours.fail,\n scenario.grid_type,\n str(e),\n BColours.endc,\n )\n raise\n\n return grid_profile", "title": "" } ]
[ { "docid": "a9fef0df39a0ea62e69b7433b2580ad4", "score": "0.6204795", "text": "def test_profile_loading(self):\n self.click('empty_profile')\n self.find('no_plugins_enabled')\n\n self.click('owasp_top_10')\n self.find('audit_plugins_enabled')\n\n self.click('empty_profile')\n self.find('no_plugins_enabled')", "title": "" }, { "docid": "e7198e544990178e3198a9f655705534", "score": "0.6195963", "text": "def __init__(self, profile_path):\n self.profile = None\n self.__load_profile(profile_path)", "title": "" }, { "docid": "5f9b07c74ad5836adcf72d8df1d839b9", "score": "0.6041172", "text": "def load_profile(ctx, load, variant_file, update, stats, profile_threshold, check_vcf):\n\n adapter = ctx.obj[\"adapter\"]\n\n LOG.info(\"Running loqusdb profile\")\n\n if check_vcf:\n LOG.info(f\"Check if profile in {check_vcf} has match in database\")\n vcf_file = check_vcf\n profiles = get_profiles(adapter, vcf_file)\n duplicate = check_duplicates(adapter, profiles, profile_threshold)\n\n if duplicate is not None:\n duplicate = json.dumps(duplicate)\n click.echo(duplicate)\n else:\n LOG.info(\"No duplicates found in the database\")\n\n if load:\n genome_build = ctx.obj[\"genome_build\"]\n vcf_path = MAF_PATH[genome_build]\n if variant_file is not None:\n vcf_path = variant_file\n LOG.info(f\"Loads variants in {vcf_path} to be used in profiling\")\n load_profile_variants(adapter, vcf_path)\n\n if update:\n LOG.info(\"Updates profiles in database\")\n update_profiles(adapter)\n\n if stats:\n LOG.info(\"Prints profile stats\")\n distance_dict = profile_stats(adapter, threshold=profile_threshold)\n click.echo(table_from_dict(distance_dict))", "title": "" }, { "docid": "4215c37bcabbc97c10c294d35d97b837", "score": "0.6027754", "text": "def loads(self, profile_name: Optional[str] = \"default\"):\n super().__init__(profile_name=profile_name)\n self.iam = self.boto_client.client(\"iam\")", "title": "" }, { "docid": "23809db789b858e60be48e3ce9e87990", "score": "0.573305", "text": "def userenv_LoadUserProfile(jitter, get_str, set_str):\n ret_ad, args = jitter.func_args_stdcall([\"hToken\", \"lpProfileInfo\"])\n raise RuntimeError('API not implemented')\n jitter.func_ret_stdcall(ret_ad, ret_value)", "title": "" }, { "docid": "03dc1d833574c207ed3bd356f2396f39", "score": "0.57289255", "text": "def load_profile_collection(path, patch_profiles=True):\n\n # Create the list of files to load\n path = os.path.expanduser(path)\n path = os.path.abspath(path)\n\n if not os.path.exists(path):\n raise IOError(f\"Path '{path}' does not exist.\")\n if not os.path.isdir(path):\n raise IOError(f\"Failed to load the profile collection. Path '{path}' is not a directory.\")\n\n file_pattern = os.path.join(path, \"[0-9][0-9]*.py\")\n file_list = glob.glob(file_pattern)\n file_list.sort() # Sort in alphabetical order\n\n # Add original path to the profile collection to allow local imports\n # from the patched temporary file.\n if path not in sys.path:\n # We don't want to add/remove the path if it is already in `sys.path` for some reason.\n sys.path.append(path)\n path_is_set = True\n else:\n path_is_set = False\n\n # Load the files into the namespace 'nspace'.\n try:\n nspace = None\n for file in file_list:\n logger.info(f\"Loading startup file '{file}' ...\")\n fln_tmp = _patch_profile(file) if patch_profiles else file\n nspace = runpy.run_path(fln_tmp, nspace)\n\n # Discard RE and db from the profile namespace (if they exist).\n nspace.pop(\"RE\", None)\n nspace.pop(\"db\", None)\n finally:\n try:\n if path_is_set:\n sys.path.remove(path)\n except Exception:\n pass\n\n return nspace", "title": "" }, { "docid": "9a5587b9eeac928283d13a14c68d01ee", "score": "0.5680726", "text": "def load_basic_profile(info=None):\n url = urls.basic_profile()\n data = helper.request_get(url)\n return helper.data_filter(data, info)", "title": "" }, { "docid": "b9da5280e7bf12bb2e1ff238c6a738a7", "score": "0.5662868", "text": "def setUp(self):\r\n\r\n self.config = Configuration.objects.get()\r\n activate_profile(self.config, 1, 1)", "title": "" }, { "docid": "b82dcfe0df06aaab27325f6a353e1f72", "score": "0.56569266", "text": "def _loadprofile(i):\n switcher={\n #Residentail \n 1:'examples/Winter_EV_Profiles.csv',\n #Commercial\n 2:'examples/Commercial.csv',\n #Office\n 3:'examples/Office.csv',\n #Shopping mall\n 4:'examples/ShoppingMall.csv',\n #Factory\n 5:'examples/Factory.csv',\n #Parking\n 6:'examples/Parking.csv',\n }\n \n return switcher.get(i,\"Invalid switch\")", "title": "" }, { "docid": "a87ad9b519f7f20cb56d41411bf60ebc", "score": "0.5616937", "text": "def ipfix_load_profile_from_cfg_file(self, line):\n parser = parsing_opts.gen_parser(self,\n \"ipfix_load_profile_from_cfg_file\",\n self.ipfix_load_profile_from_cfg_file.__doc__,\n parsing_opts.IPFIX_PROFILE_CFG_FILE,\n )\n opts = parser.parse_args(line.split())\n\n try:\n config = IpfixProfileJsonConfig(opts.profile_cfg_file)\n profile = config.get_profile()\n except ValueError as error:\n print(\"Failed to create profile from config file, err:\", error)\n return\n\n if DEBUG:\n print(config.dump_profile_json())\n\n self.emu_c.remove_profile()\n self.emu_c.load_profile(profile)", "title": "" }, { "docid": "b99a5f95da25a9f7c6e8da2a261ebef6", "score": "0.5610454", "text": "def on_actionLoad_profile_triggered(self):\n statPath = str(QFileDialog.getOpenFileName(self.dock,\n \"Open profile dump\", path.expanduser(\"~\"), \"Profile file (*)\"))\n if statPath:\n self.clearContent()\n print(' INFO: OK: Loading profiling from ' + statPath)\n self.setStat(statPath)", "title": "" }, { "docid": "2afc5192089e407ff1fd898b57e79fb2", "score": "0.5566305", "text": "def _load_profile_urls(self):\n logging.info(f\"checkpoint read: ({self.checkpoint['index']},{self.checkpoint['profile_name']})\")\n self._read_profile_urls()\n self._format_profile_urls()\n logging.info(\"Done loading up \" + str(len(self.profile_urls)) + \" profile URLs. \"\n \"First profile is \" + self.profile_urls[0])", "title": "" }, { "docid": "f6697d8df36b7f6acb67b2cb83718721", "score": "0.5558902", "text": "def read_profile(self, filepath):\n profile = None\n with io.open(filepath, \"r\", encoding=\"utf-8\") as handle:\n profile = json.load(handle)\n self.parse_profile(profile)", "title": "" }, { "docid": "77439763646261f920d4c22e0edd4316", "score": "0.5551296", "text": "def profile(self, name):\r\n \r\n if 'profiles' in self:\r\n indict = self['profiles'][name]\r\n profile = Profile(self['profiles'], self['profiles'].depth, \r\n self, indict, name)\r\n # Inject the _profile object in the configobj dict.\r\n self['profiles'][name] = profile\r\n return profile\r\n raise KeyError(\"Unable to find _profile '%s'\" % name)", "title": "" }, { "docid": "964e20d5221668743c75dac070075070", "score": "0.553972", "text": "def cuda_profile_start():\n # check if initialization has occurred\n if not hoomd.init.is_initialized():\n raise RuntimeError(\"Cannot start profiling before initialization\\n\");\n\n if hoomd.context.current.device.cpp_exec_conf.isCUDAEnabled():\n hoomd.context.current.device.cpp_exec_conf.cudaProfileStart();", "title": "" }, { "docid": "6f0c87f99ca44228b4c7ae05cecd3bec", "score": "0.5534869", "text": "def __init__(self, profile_name):\n self.profile_name = profile_name\n\n try:\n with open('resources/user_profiles/' + self.profile_name + '.json', 'r') as f:\n self.profile = json.load(f)\n except IOError as error:\n print(error)", "title": "" }, { "docid": "a7d3d39f06ef4803e84c5a2d6cb4290b", "score": "0.55205774", "text": "def CoreProfile(self):", "title": "" }, { "docid": "a5b6475f80210038f2f0dc8f3b6774d4", "score": "0.54559183", "text": "def test_destiny2_get_profile(self):\n pass", "title": "" }, { "docid": "3e2757ed240f8e57be97efddcd33c223", "score": "0.54460895", "text": "def profile():\n raise NotImplementedError(\"clpy does not support this\")", "title": "" }, { "docid": "3e2ba34fc943a597d36acf20b5f9c068", "score": "0.5445082", "text": "def test_profiles_py(self):\n filename: str = os.path.join(self.profile.feature_directory, 'test_profiles.py')\n variables = {\n 'class_name': self.app_class,\n 'runtime_level': self.profile.ij.runtime_level.lower(),\n }\n self.render(f'{self.url}/tests/test_profiles.py.tpl', filename, variables, True)", "title": "" }, { "docid": "2ae03d5b26cb5a3edf7482b946bb2fec", "score": "0.5430509", "text": "def load_profiles(self, profiles):\n # This prevents python from blowing up when the Key does not exist :)\n self.profiles = defaultdict(list)\n\n # Now override with User-provided profiles, if present.\n self.profiles[\"GENERAL_PROFILE\"] = profiles.get('g', None) or self.get_val(\"DEFAULT_GENERAL_PROFILE\")\n # Resources profile\n self.profiles[\"RESOURCES_PROFILE\"] = profiles.get('r', None) or \\\n self.get_val(\"DEFAULT_RESOURCES_PROFILE\")\n # web plugin order\n self.profiles[\"WEB_PLUGIN_ORDER_PROFILE\"] = profiles.get('w', None) or \\\n self.get_val(\"DEFAULT_WEB_PLUGIN_ORDER_PROFILE\")\n # network plugin order\n self.profiles[\"NET_PLUGIN_ORDER_PROFILE\"] = profiles.get('n', None) or \\\n self.get_val(\"DEFAULT_NET_PLUGIN_ORDER_PROFILE\")\n # mapping\n self.profiles[\"MAPPING_PROFILE\"] = profiles.get('m', None) or self.get_val(\"DEFAULT_MAPPING_PROFILE\")", "title": "" }, { "docid": "d342e21b6c67e6f3da4d777cab209bbb", "score": "0.5407954", "text": "def getprofile(self):\n\n #import re\n #import numpy as np\n\n #first check if everything is fine\n status=self.checkrun()\n\n #if so proceed\n if status:\n\n #open safe\n filname=self.path+'/'+self.root+'.ovr'\n try:\n file1=open(filname,\"r\")\n except:\n print(\"Overview not found\")\n return\n\n #parse header\n header=file1.readline()\n header=header.replace(\"#\",\"\")\n header=header.split()\n\n #init dictionary\n for ff in header:\n self.prof[ff]=np.empty(0)\n\n #now parse\n for line in file1:\n line=line.split()\n\n ii=0\n for ff in header:\n self.prof[ff]=np.append(self.prof[ff],float(line[ii]))\n ii=ii+1\n\n #close and return\n file1.close()\n\n else:\n print(\"Check model... Something is wrong!\")\n return", "title": "" }, { "docid": "0cf414a0e604628744a5022674d55b0b", "score": "0.5382241", "text": "def __init__(self, profile_name):\n\n\t\tprofile = read_json(\n\t\t\tfile_name = profile_name,\n\t\t\tfile_type = 'profile_p'\n\t\t)\n\n\t\ttry:\n\t\t\tself.tree = profile['tree']\n\t\t\tself.colors = profile['colors']\n\n\t\t\tself.__load_clf(self.tree)\n\n\t\texcept KeyError:\n\t\t\texit('Invalid JSON keys')", "title": "" }, { "docid": "a6e41192f3ae36e782fc150feac920f9", "score": "0.5379902", "text": "def profile_module(self):\n raise NotImplementedError", "title": "" }, { "docid": "295684adbddbe3cddb637e09ff08183b", "score": "0.5368172", "text": "def load_profile_from_xlr_repo(self, profileName):\n sp = SearchParameters()\n sp.setType(Type.valueOf('rel.ReleaseProfile'))\n\n for p in self.__repositoryService.listEntities(sp):\n if str(p.getTitle()) == profileName:\n return json.loads(p.getProperty('profileJson'))", "title": "" }, { "docid": "2ee615e3943f24b7164c578deb87bc05", "score": "0.5349806", "text": "def _execute_profile_data_gatherer(self):\n with FecTimer(\"Profile data gatherer\", TimerWork.EXTRACTING) as timer:\n if timer.skip_if_cfg_false(\"Reports\", \"read_provenance_data\"):\n return\n if timer.skip_if_virtual_board():\n return\n profile_data_gatherer()", "title": "" }, { "docid": "5f5c528982ac6e4561203acb738c7aea", "score": "0.53437304", "text": "def profile(self):\n if issubclass_(self.profile_model, profiles.Profile):\n return self.profile_model(cm_relation=self.cm,\n mean_dens=self.mean_density0,\n delta_halo=self.delta_halo, z=self.z,\n **self.profile_params)\n else:\n print self.profile_model\n return get_model(self.profile_model, \"halomod.profiles\",\n cm_relation=self.cm,\n mean_dens=self.mean_density0,\n delta_halo=self.delta_halo, z=self.z,\n **self.profile_params)", "title": "" }, { "docid": "a0ce9e8731f847c0da1a115ce59d819b", "score": "0.53365576", "text": "def load(self):\r\n self.get_environment()\r\n self.read_config()\r\n self.load_dict()", "title": "" }, { "docid": "e1db045ba5475e135596f2ba4d9bfd46", "score": "0.5334687", "text": "def fullprofile():", "title": "" }, { "docid": "b1fed4d53b7612df5bb55d174c5a2966", "score": "0.53009284", "text": "def __init__(self, config, fname, dt, preload=True): \n \n self.config = config\n self.fname = fname\n self.dt = dt\n self.xvar = config.get('profiles', 'xvar')\n self.yvar = config.get('profiles', 'yvar')\n self.zvar = config.get('profiles', 'zvar')\n self.qcvar = config.get('profiles', 'qcvar')\n self.posqcvar = config.get('profiles', 'posqcvar')\n self.datavar = config.get('profiles', 'datavar')\n \n if preload: \n self.load_data()\n self.load_x()\n self.load_y()\n self.load_z()\n self.load_qc()\n self.load_posqc()", "title": "" }, { "docid": "aae20cab30b6370fe530b71decaa053b", "score": "0.5297457", "text": "def read_Tprofile(run='current'):\n params = load_params(run)\n Temp_file = get_path(run) + ('TempProfile.%d.%d.dat'%(params['Ndepths'], num_iterations(run)))\n return read_datfile(Temp_file,columns=(0,1))", "title": "" }, { "docid": "97287f917a6a66b1ed81083a8a1bdbbe", "score": "0.5287345", "text": "def profile():\n pass", "title": "" }, { "docid": "450546a196a9eb4a61c06ac9c5f95f2e", "score": "0.528554", "text": "def __init__(\n self,\n profiles: Profiles = Profiles(),\n cl_args: list[str] = None\n ):\n self._profiles = profiles\n\n self._args = Args(profiles, cl_args)\n self.profile = self._args.profile\n\n self._config = self._read_config_file(\n self._profiles.get_config_file_path(\n self.profile\n )\n )", "title": "" }, { "docid": "0faeedf76ddd2ee774417d85597d6b6d", "score": "0.52617246", "text": "def load_tile_properties(filename):\n pass", "title": "" }, { "docid": "035ca04248a596cbd593a0f55d98a29a", "score": "0.5261314", "text": "def getProfile(self):\n print 'Getting Profile...'\n self.profile = Empty()\n self.profile.xmin = self.info['gPROF_LIMITSxmin']\n self.profile.ymin = self.info['gPROF_LIMITSymin']\n self.profile.xmax = self.info['gPROF_LIMITSxmax']\n self.profile.ymax = self.info['gPROF_LIMITSymax']\n self.profile.xsize = self.profile.xmax - self.profile.xmin\n self.profile.ysize = self.profile.ymax - self.profile.ymin\n self.profile.xcenter = self.profile.xmin + self.profile.xsize / 2\n self.profile.ycenter = self.profile.ymin + self.profile.ysize / 2\n return self.profile", "title": "" }, { "docid": "795da4cdad377656dfd0ce6108958351", "score": "0.524635", "text": "def load_map(self):\n #import cProfile\n #cProfile.runctx('self.load_map_actual()', globals(), locals())\n self.load_map_actual()", "title": "" }, { "docid": "c313b0f40a080582fce473aa051b2357", "score": "0.52428627", "text": "def experiment_init(self):\n self.logger.info(\"%s Loaded Behavior Profile: %s\" % (self.robot_name, self.behavior_profile))", "title": "" }, { "docid": "3c399a35340488bf9ad7ba75239de74b", "score": "0.5237826", "text": "def load_extra_state(self, profile):\n return []", "title": "" }, { "docid": "6600c1d763c9ea74566a17e156ef89eb", "score": "0.5209816", "text": "def generate_profile_pool(path=None, runs=100, timestep=60):\n\n if path is None:\n path = os.path.dirname(os.path.abspath(__file__))\n path = os.path.join(path, 'profiles')\n else: # path is given by user\n # Create path, if not existent\n create_path_if_not_exist(path)\n\n timesteps_total = 365 * 24 * 3600 / timestep\n\n # Generate environment\n timer = pycity_base.classes.timer.Timer(time_discretization=timestep,\n timesteps_total=timesteps_total)\n weather = pycity_base.classes.weather.Weather(timer, use_TRY=True)\n prices = pycity_base.classes.prices.Prices()\n env = pycity_base.classes.environment.Environment(timer, weather, prices)\n\n for occ_index in range(1, 6): # Loop from 1 to 5 occupants\n\n print('Number of occupants: ', occ_index)\n print('#####################################################')\n\n # Filenames (Files are going to store 3 arrays ('occ', 'app', 'lig'))\n file_name = str(occ_index) + '_person_profiles.npz'\n path_profile_file = os.path.join(path, file_name)\n\n occupancy_profiles = None\n el_profiles = None\n dhw_profiles = None\n\n for i in range(runs): # Loop over desired number of profiles\n\n print('Run number: ', i)\n\n # Generate occupancy object\n occupancy = occ.Occupancy(environment=env, number_occupants=occ_index)\n\n # Get profile\n occ_profile = occupancy.occupancy\n\n if occupancy_profiles is None:\n occupancy_profiles = occ_profile\n else:\n occupancy_profiles = np.vstack(\n (occupancy_profiles, occ_profile))\n\n # Generate el. load profile\n el_dem_stochastic = \\\n ed.ElectricalDemand(environment=env,\n method=2,\n total_nb_occupants=occ_index,\n randomize_appliances=True,\n light_configuration=10,\n occupancy=occupancy.occupancy)\n\n # Get el. load profile\n el_profile = el_dem_stochastic.loadcurve\n\n if el_profiles is None:\n el_profiles = el_profile\n else:\n el_profiles = np.vstack((el_profiles, el_profile))\n\n # Generate hot water profile\n dhw_stochastical = \\\n dhw.DomesticHotWater(environment=env,\n t_flow=60,\n thermal=True,\n method=2,\n supply_temperature=20,\n occupancy=occ_profile)\n\n # Get dhw curve\n dhw_profile = dhw_stochastical.loadcurve\n\n if dhw_profiles is None:\n dhw_profiles = dhw_profile\n else:\n dhw_profiles = np.vstack((dhw_profiles, dhw_profile))\n\n # Save as npz file (3 arrays ('occ', 'el', 'dhw'))\n np.savez(path_profile_file, occ=occupancy_profiles,\n el=el_profiles, dhw=dhw_profiles)\n print('#####################################################')\n print()", "title": "" }, { "docid": "d8fc22a63423882e79305e866d7396ca", "score": "0.5200476", "text": "def _config_profile_get(self, thisprofile):\n url = self._cfg_profile_get_url % (thisprofile)\n payload = {}\n\n res = self._send_request('GET', url, payload, 'config-profile')\n return res.json()", "title": "" }, { "docid": "6268e25c04e63739716d9060dc84384f", "score": "0.5197894", "text": "def test_profile_load_balancing(self):\n query = \"select release_version from system.local\"\n node1 = ExecutionProfile(\n load_balancing_policy=HostFilterPolicy(\n RoundRobinPolicy(), lambda host: host.address == CASSANDRA_IP\n )\n )\n with TestCluster(execution_profiles={'node1': node1}, monitor_reporting_enabled=False) as cluster:\n session = cluster.connect(wait_for_all_pools=True)\n\n # default is DCA RR for all hosts\n expected_hosts = set(cluster.metadata.all_hosts())\n queried_hosts = set()\n for _ in expected_hosts:\n rs = session.execute(query)\n queried_hosts.add(rs.response_future._current_host)\n self.assertEqual(queried_hosts, expected_hosts)\n\n # by name we should only hit the one\n expected_hosts = set(h for h in cluster.metadata.all_hosts() if h.address == CASSANDRA_IP)\n queried_hosts = set()\n for _ in cluster.metadata.all_hosts():\n rs = session.execute(query, execution_profile='node1')\n queried_hosts.add(rs.response_future._current_host)\n self.assertEqual(queried_hosts, expected_hosts)\n\n # use a copied instance and override the row factory\n # assert last returned value can be accessed as a namedtuple so we can prove something different\n named_tuple_row = rs[0]\n self.assertIsInstance(named_tuple_row, tuple)\n self.assertTrue(named_tuple_row.release_version)\n\n tmp_profile = copy(node1)\n tmp_profile.row_factory = tuple_factory\n queried_hosts = set()\n for _ in cluster.metadata.all_hosts():\n rs = session.execute(query, execution_profile=tmp_profile)\n queried_hosts.add(rs.response_future._current_host)\n self.assertEqual(queried_hosts, expected_hosts)\n tuple_row = rs[0]\n self.assertIsInstance(tuple_row, tuple)\n with self.assertRaises(AttributeError):\n tuple_row.release_version\n\n # make sure original profile is not impacted\n self.assertTrue(session.execute(query, execution_profile='node1')[0].release_version)", "title": "" }, { "docid": "735614db6e92c0c4fe1f4b5972e888dd", "score": "0.51901066", "text": "def profile(self) -> Optional[str]:\n return __config__.get('profile') or _utilities.get_env('ALICLOUD_PROFILE')", "title": "" }, { "docid": "1d7c55c6a566166260594d3b1430debc", "score": "0.51897997", "text": "def __init__(self, profile: Profile):\n self.profile = profile", "title": "" }, { "docid": "bbad5b1e0c699715448b70294c210fbb", "score": "0.5189331", "text": "def load_config(self):\n pass", "title": "" }, { "docid": "1b4b3f87fd88dfdf42596392498630a8", "score": "0.51783156", "text": "def run_all_profile(self):\n self.getImages()\n self.getCollection()\n self.getJournal()\n self.getProfile()", "title": "" }, { "docid": "8fd280311cde39a9824b55ff36b9c02a", "score": "0.5177492", "text": "def load_config(self):\n config_file = 'client.ini'\n\n # read config from a file\n config = ConfigParser.ConfigParser()\n config.read(config_file)\n\n self.access_key = config.get('AWS', 'keyid')\n self.secret_key = config.get('AWS', 'secret')\n self.key_pair = config.get('AWS', 'key_pair')\n\n self.username = config.get('Client', 'username')\n self.ssh_key = config.get('Client', 'ssh_key')\n\n # self.input_dir = config.get('Profile', 'input_dir')\n # self.executable = config.get('Profile', 'executable')\n # self.instances = config.get('Profile', 'instances')\n # self.name = config.get('Profile', 'name')\n\n\n self.profiler_address = config.get('Profiler', 'address')\n self.profiler_port = config.get('Profiler', 'port')\n\n\n self.job_file = config.get('Job', 'description')\n with open(self.job_file) as data_file: \n self.job_desc = json.load(data_file)", "title": "" }, { "docid": "dbe4189987025a580acb6c54308a5a4d", "score": "0.5175147", "text": "def load_game(game_board: GameBoard) -> None:", "title": "" }, { "docid": "66ee69d08ee29b19bdd43190f974d8f0", "score": "0.5172243", "text": "def load_profiles(self,filename,valid_split=0.2):\n with open(filename, 'rb') as f:\n myattrs = pd.read_pickle(f)\n for key in myattrs:\n setattr(self, key, myattrs[key])\n \n # processing\n self.test_profiles = np.array(self.test_profiles)\n test_linear_initial = np.transpose([self.test_scalars[\"m1\"],\n self.test_scalars[\"m2\"],\n self.test_scalars[\"p\"]]) \n self.test_initial = np.log10(np.array(test_linear_initial))\n \n linear_initial = np.transpose([self.scalars[\"m1\"],\n self.scalars[\"m2\"],\n self.scalars[\"p\"]])\n \n # random split for training and validation data (default 80/20)\n binaries = np.arange(len(self.profiles))\n np.random.shuffle(binaries) \n split = int(len(self.profiles)*valid_split) # index at which to split data\n \n self.valid_profiles = np.array(self.profiles)[binaries[:split]] \n self.valid_initial = np.log10(linear_initial)[binaries[:split]] \n self.valid_scalars = self.scalars.iloc[binaries[:split]]\n \n self.profiles = np.array(self.profiles)[binaries[split:]]\n self.initial = np.log10(linear_initial)[binaries[split:]]\n self.scalars = self.scalars.iloc[binaries[split:]]", "title": "" }, { "docid": "af91def9d439180337b641381772136f", "score": "0.516826", "text": "def load_investment_profile(info=None):\n url = urls.investment_profile()\n data = helper.request_get(url)\n return helper.data_filter(data, info)", "title": "" }, { "docid": "b22286923554a89aa809815a072faa42", "score": "0.5165221", "text": "def load_profiles(filename, projection, flip):\n profiles = []\n for (\n lat,\n lon,\n id,\n name,\n clat,\n clon,\n flightline,\n glaciertype,\n flowtype,\n ) in read_shapefile(filename):\n p = Profile(\n id,\n name,\n lat,\n lon,\n clat,\n clon,\n flightline,\n glaciertype,\n flowtype,\n projection,\n flip,\n )\n profiles.append(p)\n\n return profiles", "title": "" }, { "docid": "edad233efe4249e15c1ea4617d0db8b9", "score": "0.51636237", "text": "def load():\n\n check_for_name_loops()\n check_for_team_loops()\n clear_all()\n\n load_metadata()\n\n #load_usmntstats()\n #load_soccerstatsus()\n\n load_garberbucks()\n\n #load_early()\n #load_socceroutsider()\n \n #load_advanced()", "title": "" }, { "docid": "af8eefbde66e99be0d60510ed15d55d5", "score": "0.516154", "text": "def test_config_load__profile_first_section(self):\n myconfig = \"\"\"\n [first]\n solver = DW_2000Q_1\n \"\"\"\n with mock.patch(\"dwave.cloud.config.load_config_from_files\",\n partial(self._load_config_from_files,\n provided=None, data=myconfig)):\n profile = load_config()\n self.assertIn('solver', profile)\n self.assertEqual(profile['solver'], 'DW_2000Q_1')", "title": "" }, { "docid": "b2f3c1b413fb717295ee998b751bc356", "score": "0.5158999", "text": "def profile():\n\n check_program(\"py-spy\", \"pip install py-spy\")\n\n with project_env():\n run(\"py-spy record -o /tmp/profile.svg -- transom render config input output\")", "title": "" }, { "docid": "5ebd4c2eac21cd64d7eb326bcf301652", "score": "0.5158006", "text": "def test_get_profiles_filepath(self):\n\t\tinput_directory = os.path.join(self.data_directory, \"simple_tree\")\n\t\tprofile = optimise.get_profiles(input_directory)\n\t\tself.assertEqual(profile.filepath, os.path.join(input_directory, \"simple_tree.inst.cfg\"), \"The file is in simple_tree/simple_tree.inst.cfg.\")", "title": "" }, { "docid": "277b21a0b002843393d548579ba5b414", "score": "0.51569873", "text": "def loadProject(self, spec_file, exp_cfg_name=None):\n\n self.spec_data = self.loadSpecFile(spec_file)\n\n # Figure out the name of the current experiment config if not specified\n if exp_cfg_name is None:\n exp_cfg_name = self.spec_data['SETTINGS']['currentExperimentName'][0]\n\n self.regionMapping = self.loadRegionMapping(self.spec_data)\n self.exp_cfg_data = self.getExperimentConfig(exp_cfg_name)\n self.lab_data = self.loadLabData(self.exp_cfg_data)\n self.robot_data = self.loadRobotFile(self.exp_cfg_data)\n self.rfi = self.loadRegionFile()\n self.coordmap_map2lab, self.coordmap_lab2map = self.getCoordMaps(self.exp_cfg_data)\n self.determineEnabledPropositions()", "title": "" }, { "docid": "0989f7dd06d3bc08c0141dad02bdbaff", "score": "0.51496035", "text": "def profile(self):\n return img_profiles.get(self._name_, {})", "title": "" }, { "docid": "7832c285b89533471a850f8890b58a5a", "score": "0.5147002", "text": "def load_parameters(self):\n\t\t# measurements of gridworld\n\t\tself.sim_height = int(rospy.get_param(\"pred/sim_height\"))\n\t\tself.sim_width = int(rospy.get_param(\"pred/sim_width\"))\n\n\t\t# simulation forward prediction parameters\n\t\tself.fwd_tsteps = rospy.get_param(\"pred/fwd_tsteps\")\n\n\t\tself.human_height = rospy.get_param(\"pred/human_height\")\n\t\tself.prob_thresh = rospy.get_param(\"pred/prob_thresh\")\n\n\t\t# resolution (m/cell)\n\t\tself.res = rospy.get_param(\"pred/resolution\")\n\n\t\t# simulation forward prediction parameters\n\t\tself.fwd_tsteps = rospy.get_param(\"pred/fwd_tsteps\")\n\n\t\t# --- real-world params ---# \n\n\t\tlow = rospy.get_param(\"state/lower\")\n\t\tup = rospy.get_param(\"state/upper\")\n\n\t\t# get real-world measurements of experimental space\n\t\tself.real_height = up[1] - low[1] \n\t\tself.real_width = up[0] - low[0] \n\t\tself.real_z = up[2] - low[2]\n\n\t\t# store the lower and upper measurements\n\t\tself.real_lower = low\n\t\tself.real_upper = up\n\n\t\t# store the occupancy grid\n\t\tself.occupancy_grids = None\n\n\t\t# visualize every 0.1 seconds\n\t\tself.visualization_delta = 0.05\n\t\tself.prev_t = rospy.Time().now()\n\n\t\t# visualize \n\t\tworld_xy = [low[0] + self.real_width/2.0, low[1] + self.real_height/2.0]\n\t\tworld_color = [0.1, 0, 0.9]\n\t\tworld_scale = [self.real_width, self.real_height, self.real_z]\n\t\tself.world_marker = self.state_to_marker(xy=world_xy, color=world_color, alpha=0.5, scale=world_scale)", "title": "" }, { "docid": "54953b1258926b54884eaa60bdb5fe92", "score": "0.51437354", "text": "def get_profile(self):\n return self.profile", "title": "" }, { "docid": "cea897119445f9fb6d43288b08366139", "score": "0.5142587", "text": "def _load_all(self):\n file_based_profiles = PrinterProfileManager._load_all(self)\n device_type = deviceInfo().get_type()\n mrbeam_generated_profiles = {device_type: self.get(device_type)}\n mrbeam_profiles = dict_merge(LASER_PROFILE_MAP, mrbeam_generated_profiles)\n return dict_merge(mrbeam_profiles, file_based_profiles)", "title": "" }, { "docid": "5666b4eb2b8e612b60546aa33db3a091", "score": "0.51361114", "text": "def loadConfig( self ):\n pass", "title": "" }, { "docid": "a85cddf2c39cadf2ce95f36d66a45077", "score": "0.51292026", "text": "def get_profile():\n profile_dir = sys.path[0]\n profile_path = os.path.join(profile_dir, '0kajmvjw.TestBot')\n profile = FirefoxProfile(profile_path)\n return profile", "title": "" }, { "docid": "3d7fbeaa888b3c0edbf38302a6f002a4", "score": "0.51286435", "text": "def load():\n ...", "title": "" }, { "docid": "66e88e4aa22123f476447a21353615fd", "score": "0.5113762", "text": "def profile(self):\n profile = AerisProfilePlaces(self.data[\"profile\"])\n return profile", "title": "" }, { "docid": "553184b9e1438ee055525cd1d82a97b6", "score": "0.5111401", "text": "def load(self):\n self.log_level = self.settings.get('log_level')\n setup_logging(self.log_level)\n\n plug_path = sublime.packages_path()\n default_css = os.path.join(plug_path, 'SublimeKickAssemblerC64/css/default.css')\n\n KickAssTooltip.css_file = self.settings.get('css_file', default_css)\n\n KickAssTooltip.scopes = self.settings.get('scopes', [])\n\n self.help_directories = self.settings.get('help_directories', [])\n self._load_definition()", "title": "" }, { "docid": "c728f96ac3692dabfc538c796a7f4439", "score": "0.5111237", "text": "def load_user_profile(info=None):\n url = urls.user_profile()\n data = helper.request_get(url)\n return helper.data_filter(data, info)", "title": "" }, { "docid": "778361d1d4af6c04b3738002b3663e65", "score": "0.5110366", "text": "def handle_profile_imported_event(event):\n qi = api.portal.get_tool(name='portal_quickinstaller')\n setup = api.portal.get_tool(name='portal_setup')\n\n if not qi.isProductInstalled(config.PROJECT_NAME):\n return\n\n if event.profile_id == 'profile-plone.app.upgrade.v50:to50alpha3':\n setup.runAllImportStepsFromProfile(config.INSTALL_PROFILE)\n\n if event.profile_id == 'profile-plone.app.mosaic:default':\n setup.runAllImportStepsFromProfile(config.MOSAIC_SUPPORT_PROFILE)\n\n if event.profile_id == 'profile-collective.cover:default':\n setup.runAllImportStepsFromProfile(config.COVER_SUPPORT_PROFILE)", "title": "" }, { "docid": "7091bec233cadcac6551d21d8bcaffb9", "score": "0.51041114", "text": "def profiled(self, profile: dict) -> None:\n raise NotImplementedError", "title": "" }, { "docid": "de1d9094b320ff2f0f52ace4c912375e", "score": "0.5101679", "text": "def test_load_ModelGrid():\n path = 'data/models/atmospheric/Filippazzo2016.p'\n filepath = resource_filename('sedkit', path)\n\n lmg = mg.load_ModelGrid(filepath)\n assert isinstance(lmg, mg.ModelGrid)", "title": "" }, { "docid": "5f1d78e520822b82c3b8c0990b4a3674", "score": "0.5101431", "text": "def LoadInterface():\n with open(\"interface.json\",\"r\") as file:\n profile = json.load(file)\n return profile", "title": "" }, { "docid": "76eb8c07267cbf1825d03139087d648c", "score": "0.50891364", "text": "def init(self):\n test_cmd = \"profile_server_facade.ProfileServerInit\"\n test_args = {}\n test_id = self.build_id(self.test_counter)\n self.test_counter += 1\n\n return self.send_command(test_id, test_cmd, test_args)", "title": "" }, { "docid": "7fec972b1a3e5c57a893fb284be5522d", "score": "0.50841707", "text": "def load_profile(profile_directory, name):\n possible_profile_paths = [\n name,\n os.path.join(profile_directory, name)\n ]\n\n try:\n profile_path = next(\n path for path in possible_profile_paths if os.path.isfile(path)\n )\n except StopIteration:\n raise ConfigurationError(\"profile '{}' not found\".format(name))\n else:\n return yaml.load(open(profile_path))", "title": "" }, { "docid": "63b990170b479c50a5409b695f964cd4", "score": "0.508033", "text": "def _refresh_profiles(self, change):\n if self.driver:\n core = self.plugin.workbench.get_plugin('enaml.workbench.core')\n cmd = 'hqc_meas.instr_manager.matching_profiles'\n self.profiles = core.invoke_command(cmd,\n {'drivers': [self.driver]},\n self)", "title": "" }, { "docid": "73d631af8b6597666764177665c9b0fa", "score": "0.5077805", "text": "def __init__(self, configuration_file_location):\n self.profile_settings = ProfileSettings()\n self._load_settings(configuration_file_location)\n self.iamadmin_client_pool = utils.initialize_iamadmin_client_pool(self.profile_settings.PROFILE_SERVICE_HOST,\n self.profile_settings.PROFILE_SERVICE_PORT)", "title": "" }, { "docid": "3db06b64ffbe7202f1f251112aaa0896", "score": "0.5075461", "text": "def j1713_profile():\n path = 'psrsigsim/data/J1713+0747_profile.npy'\n return np.load(path)", "title": "" }, { "docid": "e8120188c97ba0eec87c99e260370381", "score": "0.5070061", "text": "def load_state(self, render_context=None):\n if self.state_loaded:\n return\n\n profile_sort_list = None\n profile_columns_list = None\n profile = None\n profile_dirty_fields = []\n profile_dirty_fields_all = False\n\n # Get the saved settings for this grid in the profile. These will\n # work as defaults and allow us to determine if we need to save\n # the profile.\n if self.request.user.is_authenticated:\n profile = self.get_user_profile()\n\n if profile:\n if self.profile_sort_field:\n profile_sort_list = \\\n getattr(profile, self.profile_sort_field, None)\n\n if self.profile_columns_field:\n profile_columns_list = \\\n getattr(profile, self.profile_columns_field, None)\n\n # Figure out the columns we're going to display\n # We're also going to calculate the column widths based on the\n # shrink and expand values.\n colnames = self.request.GET.get('columns', profile_columns_list) or ''\n\n columns = list(filter(None, [\n self.get_column(colname)\n for colname in colnames.split(',')\n ]))\n\n if not columns:\n colnames = ','.join(self.default_columns)\n columns = [\n self.get_column(colname)\n for colname in self.default_columns\n ]\n\n expand_columns = []\n normal_columns = []\n\n for column_def in columns:\n column = self.get_stateful_column(column_def)\n self.columns.append(column)\n column.active = True\n\n if column.expand:\n # This column is requesting all remaining space. Save it for\n # later so we can tell how much to give it. Each expanded\n # column will count as two normal columns when calculating\n # the normal sized columns.\n expand_columns.append(column)\n elif column.shrink:\n # Make this as small as possible.\n column.width = 0\n else:\n # We'll divide the column widths equally after we've built\n # up the lists of expanded and normal sized columns.\n normal_columns.append(column)\n\n self.columns[-1].last = True\n\n # Try to figure out the column widths for each column.\n # We'll start with the normal sized columns.\n total_pct = 100\n\n # Each expanded column counts as two normal columns.\n normal_column_width = total_pct / (len(self.columns) +\n len(expand_columns))\n\n for column in normal_columns:\n column.width = normal_column_width\n total_pct -= normal_column_width\n\n if len(expand_columns) > 0:\n expanded_column_width = total_pct / len(expand_columns)\n else:\n expanded_column_width = 0\n\n for column in expand_columns:\n column.width = expanded_column_width\n\n # Now get the sorting order for the columns.\n sort_str = self.request.GET.get('sort', profile_sort_list)\n self.sort_list = []\n\n if sort_str:\n for sort_item in sort_str.split(','):\n if not sort_item:\n continue\n\n if sort_item[0] == '-':\n base_sort_item = sort_item[1:]\n else:\n base_sort_item = sort_item\n\n column = self.get_column(base_sort_item)\n\n if column and column.sortable:\n self.sort_list.append(sort_item)\n\n if not self.sort_list:\n self.sort_list = self.default_sort\n sort_str = \",\".join(self.sort_list)\n\n # A subclass might have some work to do for loading and saving\n # as well.\n load_state_result = self.load_extra_state(profile)\n\n assert isinstance(load_state_result, list)\n profile_dirty_fields += load_state_result\n\n # Now that we have all that, figure out if we need to save new\n # settings back to the profile.\n if profile:\n if (self.profile_columns_field and\n colnames != profile_columns_list):\n setattr(profile, self.profile_columns_field, colnames)\n profile_dirty_fields.append(self.profile_columns_field)\n\n if self.profile_sort_field and sort_str != profile_sort_list:\n setattr(profile, self.profile_sort_field, sort_str)\n profile_dirty_fields.append(self.profile_sort_field)\n\n if profile_dirty_fields_all:\n # This can be removed in Djblets 4.\n profile.save()\n elif profile_dirty_fields:\n profile.save(update_fields=profile_dirty_fields)\n\n self.state_loaded = True\n\n # Fetch the list of objects and have it ready.\n self.precompute_objects(render_context)", "title": "" }, { "docid": "781acfeea9a8989dbdfbf3c150089178", "score": "0.5068962", "text": "def load(self):\r\n self.d_threshold=np.load(r\"results/models/CRGAN/threshold.npy\")\r\n self.discriminator = load_model(os.path.join('results','models','CRGAN', 'discriminator.h5'))\r\n try:\r\n self.combined = load_model(os.path.join('results','models','CRGAN', 'combined_model.h5'))\r\n self.generator = load_model(os.path.join('results','models','CRGAN', 'generator.h5'))\r\n except:\r\n pass\r\n self.discriminator.summary()", "title": "" }, { "docid": "879eb7b9046410ae2cc2dc1913b8cb7b", "score": "0.5065788", "text": "def initialize_profile(self):\n\n prefs = []\n\n part = \"\"\"\\\nuser_pref(\"browser.dom.window.dump.enabled\", true);\nuser_pref(\"dom.allow_scripts_to_close_windows\", true);\nuser_pref(\"dom.disable_open_during_load\", false);\nuser_pref(\"dom.max_script_run_time\", 0); // no slow script dialogs\nuser_pref(\"dom.max_chrome_script_run_time\", 0);\nuser_pref(\"dom.popup_maximum\", -1);\nuser_pref(\"signed.applets.codebase_principal_support\", true);\nuser_pref(\"security.warn_submit_insecure\", false);\nuser_pref(\"browser.shell.checkDefaultBrowser\", false);\nuser_pref(\"shell.checkDefaultClient\", false);\nuser_pref(\"browser.warnOnQuit\", false);\nuser_pref(\"accessibility.typeaheadfind.autostart\", false);\nuser_pref(\"javascript.options.showInConsole\", true);\nuser_pref(\"layout.debug.enable_data_xbl\", true);\nuser_pref(\"browser.EULA.override\", true);\nuser_pref(\"javascript.options.jit.content\", true);\nuser_pref(\"gfx.color_management.force_srgb\", true);\nuser_pref(\"network.manage-offline-status\", false);\nuser_pref(\"test.mousescroll\", true);\nuser_pref(\"security.default_personal_cert\", \"Select Automatically\"); // Need to client auth test be w/o any dialogs\nuser_pref(\"network.http.prompt-temp-redirect\", false);\nuser_pref(\"svg.smil.enabled\", true); // Needed for SMIL mochitests until bug 482402 lands\nuser_pref(\"media.cache_size\", 100);\nuser_pref(\"security.warn_viewing_mixed\", false);\n\nuser_pref(\"geo.wifi.uri\", \"http://localhost:8888/tests/dom/tests/mochitest/geolocation/network_geolocation.sjs\");\nuser_pref(\"geo.wifi.testing\", true);\n\nuser_pref(\"camino.warn_when_closing\", false); // Camino-only, harmless to others\n\n// Make url-classifier updates so rare that they won't affect tests\nuser_pref(\"urlclassifier.updateinterval\", 172800);\n// Point the url-classifier to the local testing server for fast failures\nuser_pref(\"browser.safebrowsing.provider.0.gethashURL\", \"http://localhost:8888/safebrowsing-dummy/gethash\");\nuser_pref(\"browser.safebrowsing.provider.0.keyURL\", \"http://localhost:8888/safebrowsing-dummy/newkey\");\nuser_pref(\"browser.safebrowsing.provider.0.lookupURL\", \"http://localhost:8888/safebrowsing-dummy/lookup\");\nuser_pref(\"browser.safebrowsing.provider.0.updateURL\", \"http://localhost:8888/safebrowsing-dummy/update\");\n\"\"\"\n prefs.append(part)\n\n # write the preferences\n prefsFile = open(self.profile_dir + \"/\" + \"user.js\", \"a\")\n prefsFile.write(\"\".join(prefs))\n prefsFile.close()", "title": "" }, { "docid": "b9d647b5d80bf7fae6f292abaa9432e1", "score": "0.5055109", "text": "def _init_profiler_info(self):\n mode = \"graph\"\n if context.get_context(\"mode\") == context.PYNATIVE_MODE:\n mode = \"pynative\"\n store_id = self._dev_id if self._device_target == DeviceTarget.GPU.value else self._rank_id\n ProfilerInfo.init_info(mode, store_id)", "title": "" }, { "docid": "9646422e7f026abc287db0715eabbf3c", "score": "0.5049698", "text": "def load():\n global _config\n _config = make_config()\n _config.load()\n globals().update(_config.dict())", "title": "" }, { "docid": "fc62e5104b3fefa6622109d76ca71cdb", "score": "0.5049358", "text": "def Load(self, config):", "title": "" }, { "docid": "002399d05897e5b54318b562d040594e", "score": "0.50491345", "text": "def get_profile(self):\r\n result = self.get_attribute(\"profile\")\r\n return result['catalog']", "title": "" }, { "docid": "e489b52cc96b78fcf23f9b8216c22d5d", "score": "0.50480455", "text": "def profile(self):\n temp_param = 'profile'\n return self._get_return_value(temp_param, self._state_hue)", "title": "" }, { "docid": "17ac344fb8e5b64acd8f3c9b14f35ca1", "score": "0.504215", "text": "def load_profile_from_h5(h5file):\n import h5py\n from .geometry import Profile\n\n with h5py.File(h5file, 'r') as fid:\n u, h, b, x = [fid[key][()] for key in ('u', 'h', 'b', 'x')]\n profile = Profile(x, h, b, u)\n if 't' in fid.keys():\n profile.t = fid['t'][()]\n else:\n profile.t = 0.0\n return profile", "title": "" }, { "docid": "83165a3c2b67b1c56de8de93da089f7d", "score": "0.5041468", "text": "def loadPConf(self):\n\n\n with open(self.confPath+self.confName,\"r\") as f_in:\n dct = json.loads(f_in.readlines()[0])\n\n self.squareCol = dct[\"squareCol\"]\n self.squareMinSize = dct[\"squareMinSize\"]\n self.useRoot = dct[\"useRoot\"]\n self.debug = dct[\"debug\"]\n self.useNumpy = dct[\"useNumpy\"]\n\n if self.debug:\n print \"loaded Pconf from: \"\n print self.confPath+self.confName", "title": "" }, { "docid": "df0b81aab75cae1b99285c607ec74771", "score": "0.50412744", "text": "def test_fail_if_profile_not_found(self):\n test_ui = MockUserInterface(argv=[])\n with open(test_ui.HOME + \"/.okta_aws_login_config\", \"w\") as config_file:\n config_file.write(\"\"\"\n [myprofile]\n client_id = foo\n \"\"\")\n config = Config(gac_ui=test_ui, create_config=False)\n config.conf_profile = \"DEFAULT\"\n with self.assertRaises(errors.GimmeAWSCredsError) as context:\n config.get_config_dict()\n self.assertTrue('DEFAULT profile is missing! This is profile is required when not using --profile' == context.exception.message)", "title": "" }, { "docid": "32c028a28ecaaedc84676d2a158e1173", "score": "0.5034164", "text": "def _populate():\n global _loaded\n\n with _load_lock:\n _do_populate()", "title": "" }, { "docid": "cec50284ed91ddd2e6617e0275cf537e", "score": "0.50337565", "text": "def load_settings(self):\n pass", "title": "" }, { "docid": "d5d13d7c593b98fde2e117bac4ef4ae1", "score": "0.50299525", "text": "def load_scenarios(self):\n pass", "title": "" }, { "docid": "6b261042f133a23f91af9c35d33d2165", "score": "0.50252926", "text": "def init(self):\n # update profile\n profile_data = self.update()\n\n # replace all variable references\n profile_data = self.replace_env_variables(profile_data)\n\n # replace all staged variable\n profile_data = self.replace_tc_variables(profile_data)\n\n # set update profile data\n self._data = profile_data", "title": "" }, { "docid": "f80d35957fd3bf92dc24f656863949a1", "score": "0.5024173", "text": "def source_profile():\n if(path.isfile('~/.profile')):\n subprocess.run([\"source\", \"~/.profile\"])", "title": "" }, { "docid": "095a983a81bb0556814bae8a5c9522eb", "score": "0.5021008", "text": "def load(self):\n self.dataset = read_dataset(self.data_config[\"training_path\"])", "title": "" }, { "docid": "0f2923a50676734f3bce5ee6fea738bb", "score": "0.50174224", "text": "def test_config_load__profile_arg_nonexisting(self):\n with mock.patch(\"dwave.cloud.config.load_config_from_files\",\n partial(self._load_config_from_files, provided=None)):\n self.assertRaises(ValueError, load_config, profile=\"nonexisting\")\n with mock.patch.dict(os.environ, {'DWAVE_PROFILE': 'nonexisting'}):\n self.assertRaises(ValueError, load_config)", "title": "" }, { "docid": "59b308cce48249eca8b59bc410867c29", "score": "0.5012932", "text": "def profile(self):\r\n return self._profile", "title": "" }, { "docid": "59b308cce48249eca8b59bc410867c29", "score": "0.5012932", "text": "def profile(self):\r\n return self._profile", "title": "" }, { "docid": "f71b700298e9590caddb74f31af99e34", "score": "0.50073826", "text": "def load_pngs():\n load_stone_pngs()\n load_field_pngs()\n load_dice_pngs()\n load_text_pngs()", "title": "" }, { "docid": "0f644d1e2c6fe7dcf8c23dfc0363b888", "score": "0.49931526", "text": "def test_profile_dataset(self):\n profile = DS.profile_3.load()\n assert profile.profile.shape == (7385, 1948)\n print(profile.activity.shape)\n assert profile.activity.shape == (7385,)\n assert profile.smiles.shape == (7385,)\n subset = [119, 79, 83, 7, 37, 99, 129, 59, 41]\n cluster_0 = profile.get_subprofile(subset)\n assert cluster_0.profile.shape == (7385, 9)", "title": "" }, { "docid": "35daa429190ab46cbd42079b7fc42186", "score": "0.49905598", "text": "def load(self):\n print(f\"Loading data into {self.config_dict.get('remote_url')}\"\n f\" to {self.config_dict.get('proj_dir')}\")", "title": "" }, { "docid": "d0ac5d9ca2236db7e04dd0d665a1147c", "score": "0.49894038", "text": "def load_security_profile(info=None):\n url = urls.security_profile()\n data = helper.request_get(url)\n return helper.data_filter(data, info)", "title": "" }, { "docid": "2698039f06cd2c89e04e2b715b07dce5", "score": "0.49817234", "text": "def test_eager_sync_source_profile_should_skip(self):\n cli.core.profiles = [\"default\"]\n self.assertIsNone(cli.core.eager_sync_source_profile(\"default\", {}))", "title": "" }, { "docid": "5a89f575864fecd0aa9d90e187f587fb", "score": "0.49796197", "text": "def profile(self):\n return self._profile", "title": "" } ]
6deaeac49a5e00cf85b672ee7e6c9014
Test serialization/deserialization for CreatePeerBodyStorage
[ { "docid": "0eb0102d88ceb9abf14e0f30a62bd4bc", "score": "0.8516777", "text": "def test_create_peer_body_storage_serialization(self):\n\n # Construct dict forms of any model objects needed in order to build this model.\n\n storage_object_model = {} # StorageObject\n storage_object_model['size'] = '4GiB'\n storage_object_model['class'] = 'default'\n\n # Construct a json representation of a CreatePeerBodyStorage model\n create_peer_body_storage_model_json = {}\n create_peer_body_storage_model_json['peer'] = storage_object_model\n create_peer_body_storage_model_json['statedb'] = storage_object_model\n\n # Construct a model instance of CreatePeerBodyStorage by calling from_dict on the json representation\n create_peer_body_storage_model = CreatePeerBodyStorage.from_dict(create_peer_body_storage_model_json)\n assert create_peer_body_storage_model != False\n\n # Construct a model instance of CreatePeerBodyStorage by calling from_dict on the json representation\n create_peer_body_storage_model_dict = CreatePeerBodyStorage.from_dict(create_peer_body_storage_model_json).__dict__\n create_peer_body_storage_model2 = CreatePeerBodyStorage(**create_peer_body_storage_model_dict)\n\n # Verify the model instances are equivalent\n assert create_peer_body_storage_model == create_peer_body_storage_model2\n\n # Convert model instance back to dict and verify no loss of data\n create_peer_body_storage_model_json2 = create_peer_body_storage_model.to_dict()\n assert create_peer_body_storage_model_json2 == create_peer_body_storage_model_json", "title": "" } ]
[ { "docid": "4a37b93496932e4910b45d05b5ebc111", "score": "0.7728197", "text": "def test_peer_response_storage_serialization(self):\n\n # Construct dict forms of any model objects needed in order to build this model.\n\n storage_object_model = {} # StorageObject\n storage_object_model['size'] = '4GiB'\n storage_object_model['class'] = 'default'\n\n # Construct a json representation of a PeerResponseStorage model\n peer_response_storage_model_json = {}\n peer_response_storage_model_json['peer'] = storage_object_model\n peer_response_storage_model_json['statedb'] = storage_object_model\n\n # Construct a model instance of PeerResponseStorage by calling from_dict on the json representation\n peer_response_storage_model = PeerResponseStorage.from_dict(peer_response_storage_model_json)\n assert peer_response_storage_model != False\n\n # Construct a model instance of PeerResponseStorage by calling from_dict on the json representation\n peer_response_storage_model_dict = PeerResponseStorage.from_dict(peer_response_storage_model_json).__dict__\n peer_response_storage_model2 = PeerResponseStorage(**peer_response_storage_model_dict)\n\n # Verify the model instances are equivalent\n assert peer_response_storage_model == peer_response_storage_model2\n\n # Convert model instance back to dict and verify no loss of data\n peer_response_storage_model_json2 = peer_response_storage_model.to_dict()\n assert peer_response_storage_model_json2 == peer_response_storage_model_json", "title": "" }, { "docid": "84a87479cbf27268e12171bac46599cb", "score": "0.7353251", "text": "def test_create_orderer_raft_body_storage_serialization(self):\n\n # Construct dict forms of any model objects needed in order to build this model.\n\n storage_object_model = {} # StorageObject\n storage_object_model['size'] = '4GiB'\n storage_object_model['class'] = 'default'\n\n # Construct a json representation of a CreateOrdererRaftBodyStorage model\n create_orderer_raft_body_storage_model_json = {}\n create_orderer_raft_body_storage_model_json['orderer'] = storage_object_model\n\n # Construct a model instance of CreateOrdererRaftBodyStorage by calling from_dict on the json representation\n create_orderer_raft_body_storage_model = CreateOrdererRaftBodyStorage.from_dict(create_orderer_raft_body_storage_model_json)\n assert create_orderer_raft_body_storage_model != False\n\n # Construct a model instance of CreateOrdererRaftBodyStorage by calling from_dict on the json representation\n create_orderer_raft_body_storage_model_dict = CreateOrdererRaftBodyStorage.from_dict(create_orderer_raft_body_storage_model_json).__dict__\n create_orderer_raft_body_storage_model2 = CreateOrdererRaftBodyStorage(**create_orderer_raft_body_storage_model_dict)\n\n # Verify the model instances are equivalent\n assert create_orderer_raft_body_storage_model == create_orderer_raft_body_storage_model2\n\n # Convert model instance back to dict and verify no loss of data\n create_orderer_raft_body_storage_model_json2 = create_orderer_raft_body_storage_model.to_dict()\n assert create_orderer_raft_body_storage_model_json2 == create_orderer_raft_body_storage_model_json", "title": "" }, { "docid": "dada7f8adf7906e72c00a2756cef93f2", "score": "0.71289426", "text": "def test_peer_resources_serialization(self):\n\n # Construct dict forms of any model objects needed in order to build this model.\n\n resource_requests_model = {} # ResourceRequests\n resource_requests_model['cpu'] = '100m'\n resource_requests_model['memory'] = '256MiB'\n\n resource_limits_model = {} # ResourceLimits\n resource_limits_model['cpu'] = '100m'\n resource_limits_model['memory'] = '256MiB'\n\n resource_object_fab_v2_model = {} # ResourceObjectFabV2\n resource_object_fab_v2_model['requests'] = resource_requests_model\n resource_object_fab_v2_model['limits'] = resource_limits_model\n\n resource_object_couch_db_model = {} # ResourceObjectCouchDb\n resource_object_couch_db_model['requests'] = resource_requests_model\n resource_object_couch_db_model['limits'] = resource_limits_model\n\n resource_object_model = {} # ResourceObject\n resource_object_model['requests'] = resource_requests_model\n resource_object_model['limits'] = resource_limits_model\n\n resource_object_fab_v1_model = {} # ResourceObjectFabV1\n resource_object_fab_v1_model['requests'] = resource_requests_model\n resource_object_fab_v1_model['limits'] = resource_limits_model\n\n # Construct a json representation of a PeerResources model\n peer_resources_model_json = {}\n peer_resources_model_json['chaincodelauncher'] = resource_object_fab_v2_model\n peer_resources_model_json['couchdb'] = resource_object_couch_db_model\n peer_resources_model_json['statedb'] = resource_object_model\n peer_resources_model_json['dind'] = resource_object_fab_v1_model\n peer_resources_model_json['fluentd'] = resource_object_fab_v1_model\n peer_resources_model_json['peer'] = resource_object_model\n peer_resources_model_json['proxy'] = resource_object_model\n\n # Construct a model instance of PeerResources by calling from_dict on the json representation\n peer_resources_model = PeerResources.from_dict(peer_resources_model_json)\n assert peer_resources_model != False\n\n # Construct a model instance of PeerResources by calling from_dict on the json representation\n peer_resources_model_dict = PeerResources.from_dict(peer_resources_model_json).__dict__\n peer_resources_model2 = PeerResources(**peer_resources_model_dict)\n\n # Verify the model instances are equivalent\n assert peer_resources_model == peer_resources_model2\n\n # Convert model instance back to dict and verify no loss of data\n peer_resources_model_json2 = peer_resources_model.to_dict()\n assert peer_resources_model_json2 == peer_resources_model_json", "title": "" }, { "docid": "850f8e963afad61a8067882af011b837", "score": "0.6983533", "text": "def test_generic_component_response_storage_serialization(self):\n\n # Construct dict forms of any model objects needed in order to build this model.\n\n storage_object_model = {} # StorageObject\n storage_object_model['size'] = '4GiB'\n storage_object_model['class'] = 'default'\n\n # Construct a json representation of a GenericComponentResponseStorage model\n generic_component_response_storage_model_json = {}\n generic_component_response_storage_model_json['ca'] = storage_object_model\n generic_component_response_storage_model_json['peer'] = storage_object_model\n generic_component_response_storage_model_json['orderer'] = storage_object_model\n generic_component_response_storage_model_json['statedb'] = storage_object_model\n\n # Construct a model instance of GenericComponentResponseStorage by calling from_dict on the json representation\n generic_component_response_storage_model = GenericComponentResponseStorage.from_dict(generic_component_response_storage_model_json)\n assert generic_component_response_storage_model != False\n\n # Construct a model instance of GenericComponentResponseStorage by calling from_dict on the json representation\n generic_component_response_storage_model_dict = GenericComponentResponseStorage.from_dict(generic_component_response_storage_model_json).__dict__\n generic_component_response_storage_model2 = GenericComponentResponseStorage(**generic_component_response_storage_model_dict)\n\n # Verify the model instances are equivalent\n assert generic_component_response_storage_model == generic_component_response_storage_model2\n\n # Convert model instance back to dict and verify no loss of data\n generic_component_response_storage_model_json2 = generic_component_response_storage_model.to_dict()\n assert generic_component_response_storage_model_json2 == generic_component_response_storage_model_json", "title": "" }, { "docid": "5c737dcd803373959dac62c9ceee86cf", "score": "0.6907085", "text": "def test_peer_response_serialization(self):\n\n # Construct dict forms of any model objects needed in order to build this model.\n\n node_ou_model = {} # NodeOu\n node_ou_model['enabled'] = True\n\n msp_crypto_field_ca_model = {} # MspCryptoFieldCa\n msp_crypto_field_ca_model['name'] = 'ca'\n msp_crypto_field_ca_model['root_certs'] = ['LS0tLS1CRUdJTiBDRVJUSUZJQ0FURS0tLS0tCkNlcnQgZGF0YSB3b3VsZCBiZSBoZXJlIGlmIHRoaXMgd2FzIHJlYWwKLS0tLS1FTkQgQ0VSVElGSUNBVEUtLS0tLQo=']\n\n msp_crypto_field_tlsca_model = {} # MspCryptoFieldTlsca\n msp_crypto_field_tlsca_model['name'] = 'tlsca'\n msp_crypto_field_tlsca_model['root_certs'] = ['LS0tLS1CRUdJTiBDRVJUSUZJQ0FURS0tLS0tCkNlcnQgZGF0YSB3b3VsZCBiZSBoZXJlIGlmIHRoaXMgd2FzIHJlYWwKLS0tLS1FTkQgQ0VSVElGSUNBVEUtLS0tLQo=']\n\n msp_crypto_field_component_model = {} # MspCryptoFieldComponent\n msp_crypto_field_component_model['tls_cert'] = 'LS0tLS1CRUdJTiBDRVJUSUZJQ0FURS0tLS0tCkNlcnQgZGF0YSB3b3VsZCBiZSBoZXJlIGlmIHRoaXMgd2FzIHJlYWwKLS0tLS1FTkQgQ0VSVElGSUNBVEUtLS0tLQo='\n msp_crypto_field_component_model['ecert'] = 'LS0tLS1CRUdJTiBDRVJUSUZJQ0FURS0tLS0tCkNlcnQgZGF0YSB3b3VsZCBiZSBoZXJlIGlmIHRoaXMgd2FzIHJlYWwKLS0tLS1FTkQgQ0VSVElGSUNBVEUtLS0tLQo='\n msp_crypto_field_component_model['admin_certs'] = ['LS0tLS1CRUdJTiBDRVJUSUZJQ0FURS0tLS0tCkNlcnQgZGF0YSB3b3VsZCBiZSBoZXJlIGlmIHRoaXMgd2FzIHJlYWwKLS0tLS1FTkQgQ0VSVElGSUNBVEUtLS0tLQo=']\n\n msp_crypto_field_model = {} # MspCryptoField\n msp_crypto_field_model['ca'] = msp_crypto_field_ca_model\n msp_crypto_field_model['tlsca'] = msp_crypto_field_tlsca_model\n msp_crypto_field_model['component'] = msp_crypto_field_component_model\n\n generic_resources_requests_model = {} # GenericResourcesRequests\n generic_resources_requests_model['cpu'] = '100m'\n generic_resources_requests_model['memory'] = '256M'\n\n generic_resource_limits_model = {} # GenericResourceLimits\n generic_resource_limits_model['cpu'] = '8000m'\n generic_resource_limits_model['memory'] = '16384M'\n\n generic_resources_model = {} # GenericResources\n generic_resources_model['requests'] = generic_resources_requests_model\n generic_resources_model['limits'] = generic_resource_limits_model\n\n peer_response_resources_model = {} # PeerResponseResources\n peer_response_resources_model['peer'] = generic_resources_model\n peer_response_resources_model['proxy'] = generic_resources_model\n peer_response_resources_model['statedb'] = generic_resources_model\n\n storage_object_model = {} # StorageObject\n storage_object_model['size'] = '4GiB'\n storage_object_model['class'] = 'default'\n\n peer_response_storage_model = {} # PeerResponseStorage\n peer_response_storage_model['peer'] = storage_object_model\n peer_response_storage_model['statedb'] = storage_object_model\n\n # Construct a json representation of a PeerResponse model\n peer_response_model_json = {}\n peer_response_model_json['id'] = 'component1'\n peer_response_model_json['dep_component_id'] = 'admin'\n peer_response_model_json['api_url'] = 'grpcs://n3a3ec3-mypeer.ibp.us-south.containers.appdomain.cloud:7051'\n peer_response_model_json['display_name'] = 'My Peer'\n peer_response_model_json['grpcwp_url'] = 'https://n3a3ec3-mypeer-proxy.ibp.us-south.containers.appdomain.cloud:8084'\n peer_response_model_json['location'] = 'ibmcloud'\n peer_response_model_json['operations_url'] = 'https://n3a3ec3-mypeer.ibp.us-south.containers.appdomain.cloud:9443'\n peer_response_model_json['config_override'] = { 'foo': 'bar' }\n peer_response_model_json['node_ou'] = node_ou_model\n peer_response_model_json['msp'] = msp_crypto_field_model\n peer_response_model_json['msp_id'] = 'Org1'\n peer_response_model_json['resources'] = peer_response_resources_model\n peer_response_model_json['scheme_version'] = 'v1'\n peer_response_model_json['state_db'] = 'couchdb'\n peer_response_model_json['storage'] = peer_response_storage_model\n peer_response_model_json['tags'] = ['fabric-ca']\n peer_response_model_json['timestamp'] = 1537262855753\n peer_response_model_json['type'] = 'fabric-peer'\n peer_response_model_json['version'] = '1.4.6-1'\n peer_response_model_json['zone'] = '-'\n\n # Construct a model instance of PeerResponse by calling from_dict on the json representation\n peer_response_model = PeerResponse.from_dict(peer_response_model_json)\n assert peer_response_model != False\n\n # Construct a model instance of PeerResponse by calling from_dict on the json representation\n peer_response_model_dict = PeerResponse.from_dict(peer_response_model_json).__dict__\n peer_response_model2 = PeerResponse(**peer_response_model_dict)\n\n # Verify the model instances are equivalent\n assert peer_response_model == peer_response_model2\n\n # Convert model instance back to dict and verify no loss of data\n peer_response_model_json2 = peer_response_model.to_dict()\n assert peer_response_model_json2 == peer_response_model_json", "title": "" }, { "docid": "f96f03e95d903f293499856eb7e6f009", "score": "0.6906553", "text": "def test_peer_response_resources_serialization(self):\n\n # Construct dict forms of any model objects needed in order to build this model.\n\n generic_resources_requests_model = {} # GenericResourcesRequests\n generic_resources_requests_model['cpu'] = '100m'\n generic_resources_requests_model['memory'] = '256M'\n\n generic_resource_limits_model = {} # GenericResourceLimits\n generic_resource_limits_model['cpu'] = '8000m'\n generic_resource_limits_model['memory'] = '16384M'\n\n generic_resources_model = {} # GenericResources\n generic_resources_model['requests'] = generic_resources_requests_model\n generic_resources_model['limits'] = generic_resource_limits_model\n\n # Construct a json representation of a PeerResponseResources model\n peer_response_resources_model_json = {}\n peer_response_resources_model_json['peer'] = generic_resources_model\n peer_response_resources_model_json['proxy'] = generic_resources_model\n peer_response_resources_model_json['statedb'] = generic_resources_model\n\n # Construct a model instance of PeerResponseResources by calling from_dict on the json representation\n peer_response_resources_model = PeerResponseResources.from_dict(peer_response_resources_model_json)\n assert peer_response_resources_model != False\n\n # Construct a model instance of PeerResponseResources by calling from_dict on the json representation\n peer_response_resources_model_dict = PeerResponseResources.from_dict(peer_response_resources_model_json).__dict__\n peer_response_resources_model2 = PeerResponseResources(**peer_response_resources_model_dict)\n\n # Verify the model instances are equivalent\n assert peer_response_resources_model == peer_response_resources_model2\n\n # Convert model instance back to dict and verify no loss of data\n peer_response_resources_model_json2 = peer_response_resources_model.to_dict()\n assert peer_response_resources_model_json2 == peer_response_resources_model_json", "title": "" }, { "docid": "11f924fef3d6991aaf4ad8620f79b93c", "score": "0.6900543", "text": "def test_definse_serialize_deserialize(self):\n self.assertEqual(self._class().serialize({}), \"{}\")\n self.assertEqual(self._class().deserialize(\"{}\"), {})", "title": "" }, { "docid": "da57c3fdb4225b0efc4053584c60b39b", "score": "0.6858918", "text": "def test_create_ca_body_storage_serialization(self):\n\n # Construct dict forms of any model objects needed in order to build this model.\n\n storage_object_model = {} # StorageObject\n storage_object_model['size'] = '4GiB'\n storage_object_model['class'] = 'default'\n\n # Construct a json representation of a CreateCaBodyStorage model\n create_ca_body_storage_model_json = {}\n create_ca_body_storage_model_json['ca'] = storage_object_model\n\n # Construct a model instance of CreateCaBodyStorage by calling from_dict on the json representation\n create_ca_body_storage_model = CreateCaBodyStorage.from_dict(create_ca_body_storage_model_json)\n assert create_ca_body_storage_model != False\n\n # Construct a model instance of CreateCaBodyStorage by calling from_dict on the json representation\n create_ca_body_storage_model_dict = CreateCaBodyStorage.from_dict(create_ca_body_storage_model_json).__dict__\n create_ca_body_storage_model2 = CreateCaBodyStorage(**create_ca_body_storage_model_dict)\n\n # Verify the model instances are equivalent\n assert create_ca_body_storage_model == create_ca_body_storage_model2\n\n # Convert model instance back to dict and verify no loss of data\n create_ca_body_storage_model_json2 = create_ca_body_storage_model.to_dict()\n assert create_ca_body_storage_model_json2 == create_ca_body_storage_model_json", "title": "" }, { "docid": "a71f00cee8267147e0037d45bbd2483e", "score": "0.6794682", "text": "def test_config_peer_create_serialization(self):\n\n # Construct dict forms of any model objects needed in order to build this model.\n\n config_peer_keepalive_client_model = {} # ConfigPeerKeepaliveClient\n config_peer_keepalive_client_model['interval'] = '60s'\n config_peer_keepalive_client_model['timeout'] = '20s'\n\n config_peer_keepalive_delivery_client_model = {} # ConfigPeerKeepaliveDeliveryClient\n config_peer_keepalive_delivery_client_model['interval'] = '60s'\n config_peer_keepalive_delivery_client_model['timeout'] = '20s'\n\n config_peer_keepalive_model = {} # ConfigPeerKeepalive\n config_peer_keepalive_model['minInterval'] = '60s'\n config_peer_keepalive_model['client'] = config_peer_keepalive_client_model\n config_peer_keepalive_model['deliveryClient'] = config_peer_keepalive_delivery_client_model\n\n config_peer_gossip_election_model = {} # ConfigPeerGossipElection\n config_peer_gossip_election_model['startupGracePeriod'] = '15s'\n config_peer_gossip_election_model['membershipSampleInterval'] = '1s'\n config_peer_gossip_election_model['leaderAliveThreshold'] = '10s'\n config_peer_gossip_election_model['leaderElectionDuration'] = '5s'\n\n config_peer_gossip_pvt_data_implicit_collection_dissemination_policy_model = {} # ConfigPeerGossipPvtDataImplicitCollectionDisseminationPolicy\n config_peer_gossip_pvt_data_implicit_collection_dissemination_policy_model['requiredPeerCount'] = 0\n config_peer_gossip_pvt_data_implicit_collection_dissemination_policy_model['maxPeerCount'] = 1\n\n config_peer_gossip_pvt_data_model = {} # ConfigPeerGossipPvtData\n config_peer_gossip_pvt_data_model['pullRetryThreshold'] = '60s'\n config_peer_gossip_pvt_data_model['transientstoreMaxBlockRetention'] = 1000\n config_peer_gossip_pvt_data_model['pushAckTimeout'] = '3s'\n config_peer_gossip_pvt_data_model['btlPullMargin'] = 10\n config_peer_gossip_pvt_data_model['reconcileBatchSize'] = 10\n config_peer_gossip_pvt_data_model['reconcileSleepInterval'] = '1m'\n config_peer_gossip_pvt_data_model['reconciliationEnabled'] = True\n config_peer_gossip_pvt_data_model['skipPullingInvalidTransactionsDuringCommit'] = False\n config_peer_gossip_pvt_data_model['implicitCollectionDisseminationPolicy'] = config_peer_gossip_pvt_data_implicit_collection_dissemination_policy_model\n\n config_peer_gossip_state_model = {} # ConfigPeerGossipState\n config_peer_gossip_state_model['enabled'] = True\n config_peer_gossip_state_model['checkInterval'] = '10s'\n config_peer_gossip_state_model['responseTimeout'] = '3s'\n config_peer_gossip_state_model['batchSize'] = 10\n config_peer_gossip_state_model['blockBufferSize'] = 100\n config_peer_gossip_state_model['maxRetries'] = 3\n\n config_peer_gossip_model = {} # ConfigPeerGossip\n config_peer_gossip_model['useLeaderElection'] = True\n config_peer_gossip_model['orgLeader'] = False\n config_peer_gossip_model['membershipTrackerInterval'] = '5s'\n config_peer_gossip_model['maxBlockCountToStore'] = 100\n config_peer_gossip_model['maxPropagationBurstLatency'] = '10ms'\n config_peer_gossip_model['maxPropagationBurstSize'] = 10\n config_peer_gossip_model['propagateIterations'] = 3\n config_peer_gossip_model['pullInterval'] = '4s'\n config_peer_gossip_model['pullPeerNum'] = 3\n config_peer_gossip_model['requestStateInfoInterval'] = '4s'\n config_peer_gossip_model['publishStateInfoInterval'] = '4s'\n config_peer_gossip_model['stateInfoRetentionInterval'] = '0s'\n config_peer_gossip_model['publishCertPeriod'] = '10s'\n config_peer_gossip_model['skipBlockVerification'] = False\n config_peer_gossip_model['dialTimeout'] = '3s'\n config_peer_gossip_model['connTimeout'] = '2s'\n config_peer_gossip_model['recvBuffSize'] = 20\n config_peer_gossip_model['sendBuffSize'] = 200\n config_peer_gossip_model['digestWaitTime'] = '1s'\n config_peer_gossip_model['requestWaitTime'] = '1500ms'\n config_peer_gossip_model['responseWaitTime'] = '2s'\n config_peer_gossip_model['aliveTimeInterval'] = '5s'\n config_peer_gossip_model['aliveExpirationTimeout'] = '25s'\n config_peer_gossip_model['reconnectInterval'] = '25s'\n config_peer_gossip_model['election'] = config_peer_gossip_election_model\n config_peer_gossip_model['pvtData'] = config_peer_gossip_pvt_data_model\n config_peer_gossip_model['state'] = config_peer_gossip_state_model\n\n config_peer_authentication_model = {} # ConfigPeerAuthentication\n config_peer_authentication_model['timewindow'] = '15m'\n\n bccsp_sw_model = {} # BccspSW\n bccsp_sw_model['Hash'] = 'SHA2'\n bccsp_sw_model['Security'] = 256\n\n bccsp_pkc_s11_model = {} # BccspPKCS11\n bccsp_pkc_s11_model['Label'] = 'testString'\n bccsp_pkc_s11_model['Pin'] = 'testString'\n bccsp_pkc_s11_model['Hash'] = 'SHA2'\n bccsp_pkc_s11_model['Security'] = 256\n\n bccsp_model = {} # Bccsp\n bccsp_model['Default'] = 'SW'\n bccsp_model['SW'] = bccsp_sw_model\n bccsp_model['PKCS11'] = bccsp_pkc_s11_model\n\n config_peer_client_model = {} # ConfigPeerClient\n config_peer_client_model['connTimeout'] = '2s'\n\n config_peer_deliveryclient_address_overrides_item_model = {} # ConfigPeerDeliveryclientAddressOverridesItem\n config_peer_deliveryclient_address_overrides_item_model['from'] = 'n3a3ec3-myorderer.ibp.us-south.containers.appdomain.cloud:7050'\n config_peer_deliveryclient_address_overrides_item_model['to'] = 'n3a3ec3-myorderer2.ibp.us-south.containers.appdomain.cloud:7050'\n config_peer_deliveryclient_address_overrides_item_model['caCertsFile'] = 'my-data/cert.pem'\n\n config_peer_deliveryclient_model = {} # ConfigPeerDeliveryclient\n config_peer_deliveryclient_model['reconnectTotalTimeThreshold'] = '60m'\n config_peer_deliveryclient_model['connTimeout'] = '2s'\n config_peer_deliveryclient_model['reConnectBackoffThreshold'] = '60m'\n config_peer_deliveryclient_model['addressOverrides'] = [config_peer_deliveryclient_address_overrides_item_model]\n\n config_peer_admin_service_model = {} # ConfigPeerAdminService\n config_peer_admin_service_model['listenAddress'] = '0.0.0.0:7051'\n\n config_peer_discovery_model = {} # ConfigPeerDiscovery\n config_peer_discovery_model['enabled'] = True\n config_peer_discovery_model['authCacheEnabled'] = True\n config_peer_discovery_model['authCacheMaxSize'] = 1000\n config_peer_discovery_model['authCachePurgeRetentionRatio'] = 0.75\n config_peer_discovery_model['orgMembersAllowedAccess'] = False\n\n config_peer_limits_concurrency_model = {} # ConfigPeerLimitsConcurrency\n config_peer_limits_concurrency_model['endorserService'] = 2500\n config_peer_limits_concurrency_model['deliverService'] = 2500\n\n config_peer_limits_model = {} # ConfigPeerLimits\n config_peer_limits_model['concurrency'] = config_peer_limits_concurrency_model\n\n config_peer_gateway_model = {} # ConfigPeerGateway\n config_peer_gateway_model['enabled'] = True\n\n config_peer_create_peer_model = {} # ConfigPeerCreatePeer\n config_peer_create_peer_model['id'] = 'john-doe'\n config_peer_create_peer_model['networkId'] = 'dev'\n config_peer_create_peer_model['keepalive'] = config_peer_keepalive_model\n config_peer_create_peer_model['gossip'] = config_peer_gossip_model\n config_peer_create_peer_model['authentication'] = config_peer_authentication_model\n config_peer_create_peer_model['BCCSP'] = bccsp_model\n config_peer_create_peer_model['client'] = config_peer_client_model\n config_peer_create_peer_model['deliveryclient'] = config_peer_deliveryclient_model\n config_peer_create_peer_model['adminService'] = config_peer_admin_service_model\n config_peer_create_peer_model['validatorPoolSize'] = 8\n config_peer_create_peer_model['discovery'] = config_peer_discovery_model\n config_peer_create_peer_model['limits'] = config_peer_limits_model\n config_peer_create_peer_model['gateway'] = config_peer_gateway_model\n\n config_peer_chaincode_golang_model = {} # ConfigPeerChaincodeGolang\n config_peer_chaincode_golang_model['dynamicLink'] = False\n\n config_peer_chaincode_external_builders_item_model = {} # ConfigPeerChaincodeExternalBuildersItem\n config_peer_chaincode_external_builders_item_model['path'] = '/path/to/directory'\n config_peer_chaincode_external_builders_item_model['name'] = 'descriptive-build-name'\n config_peer_chaincode_external_builders_item_model['environmentWhitelist'] = ['GOPROXY']\n\n config_peer_chaincode_system_model = {} # ConfigPeerChaincodeSystem\n config_peer_chaincode_system_model['cscc'] = True\n config_peer_chaincode_system_model['lscc'] = True\n config_peer_chaincode_system_model['escc'] = True\n config_peer_chaincode_system_model['vscc'] = True\n config_peer_chaincode_system_model['qscc'] = True\n\n config_peer_chaincode_logging_model = {} # ConfigPeerChaincodeLogging\n config_peer_chaincode_logging_model['level'] = 'info'\n config_peer_chaincode_logging_model['shim'] = 'warning'\n config_peer_chaincode_logging_model['format'] = '%{color}%{time:2006-01-02 15:04:05.000 MST} [%{module}] %{shortfunc} -> %{level:.4s} %{id:03x}%{color:reset} %{message}'\n\n config_peer_chaincode_model = {} # ConfigPeerChaincode\n config_peer_chaincode_model['golang'] = config_peer_chaincode_golang_model\n config_peer_chaincode_model['externalBuilders'] = [config_peer_chaincode_external_builders_item_model]\n config_peer_chaincode_model['installTimeout'] = '300s'\n config_peer_chaincode_model['startuptimeout'] = '300s'\n config_peer_chaincode_model['executetimeout'] = '30s'\n config_peer_chaincode_model['system'] = config_peer_chaincode_system_model\n config_peer_chaincode_model['logging'] = config_peer_chaincode_logging_model\n\n metrics_statsd_model = {} # MetricsStatsd\n metrics_statsd_model['network'] = 'udp'\n metrics_statsd_model['address'] = '127.0.0.1:8125'\n metrics_statsd_model['writeInterval'] = '10s'\n metrics_statsd_model['prefix'] = 'server'\n\n metrics_model = {} # Metrics\n metrics_model['provider'] = 'prometheus'\n metrics_model['statsd'] = metrics_statsd_model\n\n # Construct a json representation of a ConfigPeerCreate model\n config_peer_create_model_json = {}\n config_peer_create_model_json['peer'] = config_peer_create_peer_model\n config_peer_create_model_json['chaincode'] = config_peer_chaincode_model\n config_peer_create_model_json['metrics'] = metrics_model\n\n # Construct a model instance of ConfigPeerCreate by calling from_dict on the json representation\n config_peer_create_model = ConfigPeerCreate.from_dict(config_peer_create_model_json)\n assert config_peer_create_model != False\n\n # Construct a model instance of ConfigPeerCreate by calling from_dict on the json representation\n config_peer_create_model_dict = ConfigPeerCreate.from_dict(config_peer_create_model_json).__dict__\n config_peer_create_model2 = ConfigPeerCreate(**config_peer_create_model_dict)\n\n # Verify the model instances are equivalent\n assert config_peer_create_model == config_peer_create_model2\n\n # Convert model instance back to dict and verify no loss of data\n config_peer_create_model_json2 = config_peer_create_model.to_dict()\n assert config_peer_create_model_json2 == config_peer_create_model_json", "title": "" }, { "docid": "108189a07518b9e54654db30043d0d99", "score": "0.6781152", "text": "def test_storage_object_serialization(self):\n\n # Construct a json representation of a StorageObject model\n storage_object_model_json = {}\n storage_object_model_json['size'] = '4GiB'\n storage_object_model_json['class'] = 'default'\n\n # Construct a model instance of StorageObject by calling from_dict on the json representation\n storage_object_model = StorageObject.from_dict(storage_object_model_json)\n assert storage_object_model != False\n\n # Construct a model instance of StorageObject by calling from_dict on the json representation\n storage_object_model_dict = StorageObject.from_dict(storage_object_model_json).__dict__\n storage_object_model2 = StorageObject(**storage_object_model_dict)\n\n # Verify the model instances are equivalent\n assert storage_object_model == storage_object_model2\n\n # Convert model instance back to dict and verify no loss of data\n storage_object_model_json2 = storage_object_model.to_dict()\n assert storage_object_model_json2 == storage_object_model_json", "title": "" }, { "docid": "0124b3c9e254490bfed8f3b1b2fba249", "score": "0.67791045", "text": "def test_config_peer_create_peer_serialization(self):\n\n # Construct dict forms of any model objects needed in order to build this model.\n\n config_peer_keepalive_client_model = {} # ConfigPeerKeepaliveClient\n config_peer_keepalive_client_model['interval'] = '60s'\n config_peer_keepalive_client_model['timeout'] = '20s'\n\n config_peer_keepalive_delivery_client_model = {} # ConfigPeerKeepaliveDeliveryClient\n config_peer_keepalive_delivery_client_model['interval'] = '60s'\n config_peer_keepalive_delivery_client_model['timeout'] = '20s'\n\n config_peer_keepalive_model = {} # ConfigPeerKeepalive\n config_peer_keepalive_model['minInterval'] = '60s'\n config_peer_keepalive_model['client'] = config_peer_keepalive_client_model\n config_peer_keepalive_model['deliveryClient'] = config_peer_keepalive_delivery_client_model\n\n config_peer_gossip_election_model = {} # ConfigPeerGossipElection\n config_peer_gossip_election_model['startupGracePeriod'] = '15s'\n config_peer_gossip_election_model['membershipSampleInterval'] = '1s'\n config_peer_gossip_election_model['leaderAliveThreshold'] = '10s'\n config_peer_gossip_election_model['leaderElectionDuration'] = '5s'\n\n config_peer_gossip_pvt_data_implicit_collection_dissemination_policy_model = {} # ConfigPeerGossipPvtDataImplicitCollectionDisseminationPolicy\n config_peer_gossip_pvt_data_implicit_collection_dissemination_policy_model['requiredPeerCount'] = 0\n config_peer_gossip_pvt_data_implicit_collection_dissemination_policy_model['maxPeerCount'] = 1\n\n config_peer_gossip_pvt_data_model = {} # ConfigPeerGossipPvtData\n config_peer_gossip_pvt_data_model['pullRetryThreshold'] = '60s'\n config_peer_gossip_pvt_data_model['transientstoreMaxBlockRetention'] = 1000\n config_peer_gossip_pvt_data_model['pushAckTimeout'] = '3s'\n config_peer_gossip_pvt_data_model['btlPullMargin'] = 10\n config_peer_gossip_pvt_data_model['reconcileBatchSize'] = 10\n config_peer_gossip_pvt_data_model['reconcileSleepInterval'] = '1m'\n config_peer_gossip_pvt_data_model['reconciliationEnabled'] = True\n config_peer_gossip_pvt_data_model['skipPullingInvalidTransactionsDuringCommit'] = False\n config_peer_gossip_pvt_data_model['implicitCollectionDisseminationPolicy'] = config_peer_gossip_pvt_data_implicit_collection_dissemination_policy_model\n\n config_peer_gossip_state_model = {} # ConfigPeerGossipState\n config_peer_gossip_state_model['enabled'] = True\n config_peer_gossip_state_model['checkInterval'] = '10s'\n config_peer_gossip_state_model['responseTimeout'] = '3s'\n config_peer_gossip_state_model['batchSize'] = 10\n config_peer_gossip_state_model['blockBufferSize'] = 100\n config_peer_gossip_state_model['maxRetries'] = 3\n\n config_peer_gossip_model = {} # ConfigPeerGossip\n config_peer_gossip_model['useLeaderElection'] = True\n config_peer_gossip_model['orgLeader'] = False\n config_peer_gossip_model['membershipTrackerInterval'] = '5s'\n config_peer_gossip_model['maxBlockCountToStore'] = 100\n config_peer_gossip_model['maxPropagationBurstLatency'] = '10ms'\n config_peer_gossip_model['maxPropagationBurstSize'] = 10\n config_peer_gossip_model['propagateIterations'] = 3\n config_peer_gossip_model['pullInterval'] = '4s'\n config_peer_gossip_model['pullPeerNum'] = 3\n config_peer_gossip_model['requestStateInfoInterval'] = '4s'\n config_peer_gossip_model['publishStateInfoInterval'] = '4s'\n config_peer_gossip_model['stateInfoRetentionInterval'] = '0s'\n config_peer_gossip_model['publishCertPeriod'] = '10s'\n config_peer_gossip_model['skipBlockVerification'] = False\n config_peer_gossip_model['dialTimeout'] = '3s'\n config_peer_gossip_model['connTimeout'] = '2s'\n config_peer_gossip_model['recvBuffSize'] = 20\n config_peer_gossip_model['sendBuffSize'] = 200\n config_peer_gossip_model['digestWaitTime'] = '1s'\n config_peer_gossip_model['requestWaitTime'] = '1500ms'\n config_peer_gossip_model['responseWaitTime'] = '2s'\n config_peer_gossip_model['aliveTimeInterval'] = '5s'\n config_peer_gossip_model['aliveExpirationTimeout'] = '25s'\n config_peer_gossip_model['reconnectInterval'] = '25s'\n config_peer_gossip_model['election'] = config_peer_gossip_election_model\n config_peer_gossip_model['pvtData'] = config_peer_gossip_pvt_data_model\n config_peer_gossip_model['state'] = config_peer_gossip_state_model\n\n config_peer_authentication_model = {} # ConfigPeerAuthentication\n config_peer_authentication_model['timewindow'] = '15m'\n\n bccsp_sw_model = {} # BccspSW\n bccsp_sw_model['Hash'] = 'SHA2'\n bccsp_sw_model['Security'] = 256\n\n bccsp_pkc_s11_model = {} # BccspPKCS11\n bccsp_pkc_s11_model['Label'] = 'testString'\n bccsp_pkc_s11_model['Pin'] = 'testString'\n bccsp_pkc_s11_model['Hash'] = 'SHA2'\n bccsp_pkc_s11_model['Security'] = 256\n\n bccsp_model = {} # Bccsp\n bccsp_model['Default'] = 'SW'\n bccsp_model['SW'] = bccsp_sw_model\n bccsp_model['PKCS11'] = bccsp_pkc_s11_model\n\n config_peer_client_model = {} # ConfigPeerClient\n config_peer_client_model['connTimeout'] = '2s'\n\n config_peer_deliveryclient_address_overrides_item_model = {} # ConfigPeerDeliveryclientAddressOverridesItem\n config_peer_deliveryclient_address_overrides_item_model['from'] = 'n3a3ec3-myorderer.ibp.us-south.containers.appdomain.cloud:7050'\n config_peer_deliveryclient_address_overrides_item_model['to'] = 'n3a3ec3-myorderer2.ibp.us-south.containers.appdomain.cloud:7050'\n config_peer_deliveryclient_address_overrides_item_model['caCertsFile'] = 'my-data/cert.pem'\n\n config_peer_deliveryclient_model = {} # ConfigPeerDeliveryclient\n config_peer_deliveryclient_model['reconnectTotalTimeThreshold'] = '60m'\n config_peer_deliveryclient_model['connTimeout'] = '2s'\n config_peer_deliveryclient_model['reConnectBackoffThreshold'] = '60m'\n config_peer_deliveryclient_model['addressOverrides'] = [config_peer_deliveryclient_address_overrides_item_model]\n\n config_peer_admin_service_model = {} # ConfigPeerAdminService\n config_peer_admin_service_model['listenAddress'] = '0.0.0.0:7051'\n\n config_peer_discovery_model = {} # ConfigPeerDiscovery\n config_peer_discovery_model['enabled'] = True\n config_peer_discovery_model['authCacheEnabled'] = True\n config_peer_discovery_model['authCacheMaxSize'] = 1000\n config_peer_discovery_model['authCachePurgeRetentionRatio'] = 0.75\n config_peer_discovery_model['orgMembersAllowedAccess'] = False\n\n config_peer_limits_concurrency_model = {} # ConfigPeerLimitsConcurrency\n config_peer_limits_concurrency_model['endorserService'] = 2500\n config_peer_limits_concurrency_model['deliverService'] = 2500\n\n config_peer_limits_model = {} # ConfigPeerLimits\n config_peer_limits_model['concurrency'] = config_peer_limits_concurrency_model\n\n config_peer_gateway_model = {} # ConfigPeerGateway\n config_peer_gateway_model['enabled'] = True\n\n # Construct a json representation of a ConfigPeerCreatePeer model\n config_peer_create_peer_model_json = {}\n config_peer_create_peer_model_json['id'] = 'john-doe'\n config_peer_create_peer_model_json['networkId'] = 'dev'\n config_peer_create_peer_model_json['keepalive'] = config_peer_keepalive_model\n config_peer_create_peer_model_json['gossip'] = config_peer_gossip_model\n config_peer_create_peer_model_json['authentication'] = config_peer_authentication_model\n config_peer_create_peer_model_json['BCCSP'] = bccsp_model\n config_peer_create_peer_model_json['client'] = config_peer_client_model\n config_peer_create_peer_model_json['deliveryclient'] = config_peer_deliveryclient_model\n config_peer_create_peer_model_json['adminService'] = config_peer_admin_service_model\n config_peer_create_peer_model_json['validatorPoolSize'] = 8\n config_peer_create_peer_model_json['discovery'] = config_peer_discovery_model\n config_peer_create_peer_model_json['limits'] = config_peer_limits_model\n config_peer_create_peer_model_json['gateway'] = config_peer_gateway_model\n\n # Construct a model instance of ConfigPeerCreatePeer by calling from_dict on the json representation\n config_peer_create_peer_model = ConfigPeerCreatePeer.from_dict(config_peer_create_peer_model_json)\n assert config_peer_create_peer_model != False\n\n # Construct a model instance of ConfigPeerCreatePeer by calling from_dict on the json representation\n config_peer_create_peer_model_dict = ConfigPeerCreatePeer.from_dict(config_peer_create_peer_model_json).__dict__\n config_peer_create_peer_model2 = ConfigPeerCreatePeer(**config_peer_create_peer_model_dict)\n\n # Verify the model instances are equivalent\n assert config_peer_create_peer_model == config_peer_create_peer_model2\n\n # Convert model instance back to dict and verify no loss of data\n config_peer_create_peer_model_json2 = config_peer_create_peer_model.to_dict()\n assert config_peer_create_peer_model_json2 == config_peer_create_peer_model_json", "title": "" }, { "docid": "72889cc15f5d925a3d59ba8854f4071b", "score": "0.67574406", "text": "def test_msp_public_data_serialization(self):\n\n # Construct a json representation of a MspPublicData model\n msp_public_data_model_json = {}\n msp_public_data_model_json['msp_id'] = 'Org1'\n msp_public_data_model_json['root_certs'] = ['LS0tLS1CRUdJTiBDRVJUSUZJQ0FURS0tLS0tCkNlcnQgZGF0YSB3b3VsZCBiZSBoZXJlIGlmIHRoaXMgd2FzIHJlYWwKLS0tLS1FTkQgQ0VSVElGSUNBVEUtLS0tLQo=']\n msp_public_data_model_json['admins'] = ['LS0tLS1CRUdJTiBDRVJUSUZJQ0FURS0tLS0tCkNlcnQgZGF0YSB3b3VsZCBiZSBoZXJlIGlmIHRoaXMgd2FzIHJlYWwKLS0tLS1FTkQgQ0VSVElGSUNBVEUtLS0tLQo=']\n msp_public_data_model_json['tls_root_certs'] = ['LS0tLS1CRUdJTiBDRVJUSUZJQ0FURS0tLS0tCkNlcnQgZGF0YSB3b3VsZCBiZSBoZXJlIGlmIHRoaXMgd2FzIHJlYWwKLS0tLS1FTkQgQ0VSVElGSUNBVEUtLS0tLQo=']\n\n # Construct a model instance of MspPublicData by calling from_dict on the json representation\n msp_public_data_model = MspPublicData.from_dict(msp_public_data_model_json)\n assert msp_public_data_model != False\n\n # Construct a model instance of MspPublicData by calling from_dict on the json representation\n msp_public_data_model_dict = MspPublicData.from_dict(msp_public_data_model_json).__dict__\n msp_public_data_model2 = MspPublicData(**msp_public_data_model_dict)\n\n # Verify the model instances are equivalent\n assert msp_public_data_model == msp_public_data_model2\n\n # Convert model instance back to dict and verify no loss of data\n msp_public_data_model_json2 = msp_public_data_model.to_dict()\n assert msp_public_data_model_json2 == msp_public_data_model_json", "title": "" }, { "docid": "3171dbe832783f844ec76eb944254ff4", "score": "0.67234355", "text": "def test_config_peer_deliveryclient_serialization(self):\n\n # Construct dict forms of any model objects needed in order to build this model.\n\n config_peer_deliveryclient_address_overrides_item_model = {} # ConfigPeerDeliveryclientAddressOverridesItem\n config_peer_deliveryclient_address_overrides_item_model['from'] = 'n3a3ec3-myorderer.ibp.us-south.containers.appdomain.cloud:7050'\n config_peer_deliveryclient_address_overrides_item_model['to'] = 'n3a3ec3-myorderer2.ibp.us-south.containers.appdomain.cloud:7050'\n config_peer_deliveryclient_address_overrides_item_model['caCertsFile'] = 'my-data/cert.pem'\n\n # Construct a json representation of a ConfigPeerDeliveryclient model\n config_peer_deliveryclient_model_json = {}\n config_peer_deliveryclient_model_json['reconnectTotalTimeThreshold'] = '60m'\n config_peer_deliveryclient_model_json['connTimeout'] = '2s'\n config_peer_deliveryclient_model_json['reConnectBackoffThreshold'] = '60m'\n config_peer_deliveryclient_model_json['addressOverrides'] = [config_peer_deliveryclient_address_overrides_item_model]\n\n # Construct a model instance of ConfigPeerDeliveryclient by calling from_dict on the json representation\n config_peer_deliveryclient_model = ConfigPeerDeliveryclient.from_dict(config_peer_deliveryclient_model_json)\n assert config_peer_deliveryclient_model != False\n\n # Construct a model instance of ConfigPeerDeliveryclient by calling from_dict on the json representation\n config_peer_deliveryclient_model_dict = ConfigPeerDeliveryclient.from_dict(config_peer_deliveryclient_model_json).__dict__\n config_peer_deliveryclient_model2 = ConfigPeerDeliveryclient(**config_peer_deliveryclient_model_dict)\n\n # Verify the model instances are equivalent\n assert config_peer_deliveryclient_model == config_peer_deliveryclient_model2\n\n # Convert model instance back to dict and verify no loss of data\n config_peer_deliveryclient_model_json2 = config_peer_deliveryclient_model.to_dict()\n assert config_peer_deliveryclient_model_json2 == config_peer_deliveryclient_model_json", "title": "" }, { "docid": "9d027e32c0d8503790d8475ff21a6f3d", "score": "0.6721263", "text": "def test_create_orderer_raft_body_resources_serialization(self):\n\n # Construct dict forms of any model objects needed in order to build this model.\n\n resource_requests_model = {} # ResourceRequests\n resource_requests_model['cpu'] = '100m'\n resource_requests_model['memory'] = '256MiB'\n\n resource_limits_model = {} # ResourceLimits\n resource_limits_model['cpu'] = '100m'\n resource_limits_model['memory'] = '256MiB'\n\n resource_object_model = {} # ResourceObject\n resource_object_model['requests'] = resource_requests_model\n resource_object_model['limits'] = resource_limits_model\n\n # Construct a json representation of a CreateOrdererRaftBodyResources model\n create_orderer_raft_body_resources_model_json = {}\n create_orderer_raft_body_resources_model_json['orderer'] = resource_object_model\n create_orderer_raft_body_resources_model_json['proxy'] = resource_object_model\n\n # Construct a model instance of CreateOrdererRaftBodyResources by calling from_dict on the json representation\n create_orderer_raft_body_resources_model = CreateOrdererRaftBodyResources.from_dict(create_orderer_raft_body_resources_model_json)\n assert create_orderer_raft_body_resources_model != False\n\n # Construct a model instance of CreateOrdererRaftBodyResources by calling from_dict on the json representation\n create_orderer_raft_body_resources_model_dict = CreateOrdererRaftBodyResources.from_dict(create_orderer_raft_body_resources_model_json).__dict__\n create_orderer_raft_body_resources_model2 = CreateOrdererRaftBodyResources(**create_orderer_raft_body_resources_model_dict)\n\n # Verify the model instances are equivalent\n assert create_orderer_raft_body_resources_model == create_orderer_raft_body_resources_model2\n\n # Convert model instance back to dict and verify no loss of data\n create_orderer_raft_body_resources_model_json2 = create_orderer_raft_body_resources_model.to_dict()\n assert create_orderer_raft_body_resources_model_json2 == create_orderer_raft_body_resources_model_json", "title": "" }, { "docid": "e15b7d7e4dbe787d9a96c20bdefa1cd4", "score": "0.6670905", "text": "def test_msp_response_serialization(self):\n\n # Construct a json representation of a MspResponse model\n msp_response_model_json = {}\n msp_response_model_json['id'] = 'component1'\n msp_response_model_json['type'] = 'fabric-peer'\n msp_response_model_json['display_name'] = 'My Peer'\n msp_response_model_json['msp_id'] = 'Org1'\n msp_response_model_json['timestamp'] = 1537262855753\n msp_response_model_json['tags'] = ['fabric-ca']\n msp_response_model_json['root_certs'] = ['LS0tLS1CRUdJTiBDRVJUSUZJQ0FURS0tLS0tCkNlcnQgZGF0YSB3b3VsZCBiZSBoZXJlIGlmIHRoaXMgd2FzIHJlYWwKLS0tLS1FTkQgQ0VSVElGSUNBVEUtLS0tLQo=']\n msp_response_model_json['intermediate_certs'] = ['LS0tLS1CRUdJTiBDRVJUSUZJQ0FURS0tLS0tCkRhdGEgaGVyZSBpZiB0aGlzIHdhcyByZWFsCi0tLS0tRU5EIENFUlRJRklDQVRFLS0tLS0K']\n msp_response_model_json['admins'] = ['LS0tLS1CRUdJTiBDRVJUSUZJQ0FURS0tLS0tCkNlcnQgZGF0YSB3b3VsZCBiZSBoZXJlIGlmIHRoaXMgd2FzIHJlYWwKLS0tLS1FTkQgQ0VSVElGSUNBVEUtLS0tLQo=']\n msp_response_model_json['scheme_version'] = 'v1'\n msp_response_model_json['tls_root_certs'] = ['LS0tLS1CRUdJTiBDRVJUSUZJQ0FURS0tLS0tCkNlcnQgZGF0YSB3b3VsZCBiZSBoZXJlIGlmIHRoaXMgd2FzIHJlYWwKLS0tLS1FTkQgQ0VSVElGSUNBVEUtLS0tLQo=']\n\n # Construct a model instance of MspResponse by calling from_dict on the json representation\n msp_response_model = MspResponse.from_dict(msp_response_model_json)\n assert msp_response_model != False\n\n # Construct a model instance of MspResponse by calling from_dict on the json representation\n msp_response_model_dict = MspResponse.from_dict(msp_response_model_json).__dict__\n msp_response_model2 = MspResponse(**msp_response_model_dict)\n\n # Verify the model instances are equivalent\n assert msp_response_model == msp_response_model2\n\n # Convert model instance back to dict and verify no loss of data\n msp_response_model_json2 = msp_response_model.to_dict()\n assert msp_response_model_json2 == msp_response_model_json", "title": "" }, { "docid": "6d560eb4be660683d4e7e47b97b60aeb", "score": "0.6656393", "text": "def test_v2_serialize_deserialize(self):\n # Build a transformer network to use within the BERT trainer. (Here, we use\n # a short sequence_length for convenience.)\n test_network = networks.BertEncoder(vocab_size=100, num_layers=2)\n\n # Create a BERT trainer with the created network. (Note that all the args\n # are different, so we can catch any serialization mismatches.)\n bert_trainer_model = pretrainer.MobileBERTEdgeTPUPretrainer(\n encoder_network=test_network)\n\n # Create another BERT trainer via serialization and deserialization.\n config = bert_trainer_model.get_config()\n new_bert_trainer_model = pretrainer.MobileBERTEdgeTPUPretrainer.from_config(\n config)\n\n # Validate that the config can be forced to JSON.\n _ = new_bert_trainer_model.to_json()\n\n # If the serialization was successful, the new config should match the old.\n self.assertAllEqual(bert_trainer_model.get_config(),\n new_bert_trainer_model.get_config())", "title": "" }, { "docid": "8b30fa722b7e9baec1a821404086a9cd", "score": "0.6609555", "text": "def test_orderer_response_storage_serialization(self):\n\n # Construct dict forms of any model objects needed in order to build this model.\n\n storage_object_model = {} # StorageObject\n storage_object_model['size'] = '4GiB'\n storage_object_model['class'] = 'default'\n\n # Construct a json representation of a OrdererResponseStorage model\n orderer_response_storage_model_json = {}\n orderer_response_storage_model_json['orderer'] = storage_object_model\n\n # Construct a model instance of OrdererResponseStorage by calling from_dict on the json representation\n orderer_response_storage_model = OrdererResponseStorage.from_dict(orderer_response_storage_model_json)\n assert orderer_response_storage_model != False\n\n # Construct a model instance of OrdererResponseStorage by calling from_dict on the json representation\n orderer_response_storage_model_dict = OrdererResponseStorage.from_dict(orderer_response_storage_model_json).__dict__\n orderer_response_storage_model2 = OrdererResponseStorage(**orderer_response_storage_model_dict)\n\n # Verify the model instances are equivalent\n assert orderer_response_storage_model == orderer_response_storage_model2\n\n # Convert model instance back to dict and verify no loss of data\n orderer_response_storage_model_json2 = orderer_response_storage_model.to_dict()\n assert orderer_response_storage_model_json2 == orderer_response_storage_model_json", "title": "" }, { "docid": "f481617fffe6cc1aec35574b7b5054cf", "score": "0.66069424", "text": "def test_config_peer_deliveryclient_address_overrides_item_serialization(self):\n\n # Construct a json representation of a ConfigPeerDeliveryclientAddressOverridesItem model\n config_peer_deliveryclient_address_overrides_item_model_json = {}\n config_peer_deliveryclient_address_overrides_item_model_json['from'] = 'n3a3ec3-myorderer.ibp.us-south.containers.appdomain.cloud:7050'\n config_peer_deliveryclient_address_overrides_item_model_json['to'] = 'n3a3ec3-myorderer2.ibp.us-south.containers.appdomain.cloud:7050'\n config_peer_deliveryclient_address_overrides_item_model_json['caCertsFile'] = 'my-data/cert.pem'\n\n # Construct a model instance of ConfigPeerDeliveryclientAddressOverridesItem by calling from_dict on the json representation\n config_peer_deliveryclient_address_overrides_item_model = ConfigPeerDeliveryclientAddressOverridesItem.from_dict(config_peer_deliveryclient_address_overrides_item_model_json)\n assert config_peer_deliveryclient_address_overrides_item_model != False\n\n # Construct a model instance of ConfigPeerDeliveryclientAddressOverridesItem by calling from_dict on the json representation\n config_peer_deliveryclient_address_overrides_item_model_dict = ConfigPeerDeliveryclientAddressOverridesItem.from_dict(config_peer_deliveryclient_address_overrides_item_model_json).__dict__\n config_peer_deliveryclient_address_overrides_item_model2 = ConfigPeerDeliveryclientAddressOverridesItem(**config_peer_deliveryclient_address_overrides_item_model_dict)\n\n # Verify the model instances are equivalent\n assert config_peer_deliveryclient_address_overrides_item_model == config_peer_deliveryclient_address_overrides_item_model2\n\n # Convert model instance back to dict and verify no loss of data\n config_peer_deliveryclient_address_overrides_item_model_json2 = config_peer_deliveryclient_address_overrides_item_model.to_dict()\n assert config_peer_deliveryclient_address_overrides_item_model_json2 == config_peer_deliveryclient_address_overrides_item_model_json", "title": "" }, { "docid": "0ebc71efc2920dfe64b9f77a0eef7af8", "score": "0.66046816", "text": "def test_serialize_deserialize(self):\n tempOut = 'temp.p'\n sys.setrecursionlimit(10000) # this is necessary for serialisation\n cryptoGraph.serialize(tempOut, self.g)\n\n readInObject = cryptoGraph.deserialize(tempOut)\n\n # Just check the type\n # Would be useful to implement the comparison methods __eq__() and __ne__() in the CryptoGraph class\n self.assertIsInstance(readInObject, cryptoGraph.CryptoGraph)\n\n # delete the temp file and reset the recursion limit\n os.remove(tempOut)\n sys.setrecursionlimit(1500)", "title": "" }, { "docid": "da928dfe12b8e7779c114b85ca870f53", "score": "0.6520344", "text": "def test_config_peer_update_peer_serialization(self):\n\n # Construct dict forms of any model objects needed in order to build this model.\n\n config_peer_keepalive_client_model = {} # ConfigPeerKeepaliveClient\n config_peer_keepalive_client_model['interval'] = '60s'\n config_peer_keepalive_client_model['timeout'] = '20s'\n\n config_peer_keepalive_delivery_client_model = {} # ConfigPeerKeepaliveDeliveryClient\n config_peer_keepalive_delivery_client_model['interval'] = '60s'\n config_peer_keepalive_delivery_client_model['timeout'] = '20s'\n\n config_peer_keepalive_model = {} # ConfigPeerKeepalive\n config_peer_keepalive_model['minInterval'] = '60s'\n config_peer_keepalive_model['client'] = config_peer_keepalive_client_model\n config_peer_keepalive_model['deliveryClient'] = config_peer_keepalive_delivery_client_model\n\n config_peer_gossip_election_model = {} # ConfigPeerGossipElection\n config_peer_gossip_election_model['startupGracePeriod'] = '15s'\n config_peer_gossip_election_model['membershipSampleInterval'] = '1s'\n config_peer_gossip_election_model['leaderAliveThreshold'] = '10s'\n config_peer_gossip_election_model['leaderElectionDuration'] = '5s'\n\n config_peer_gossip_pvt_data_implicit_collection_dissemination_policy_model = {} # ConfigPeerGossipPvtDataImplicitCollectionDisseminationPolicy\n config_peer_gossip_pvt_data_implicit_collection_dissemination_policy_model['requiredPeerCount'] = 0\n config_peer_gossip_pvt_data_implicit_collection_dissemination_policy_model['maxPeerCount'] = 1\n\n config_peer_gossip_pvt_data_model = {} # ConfigPeerGossipPvtData\n config_peer_gossip_pvt_data_model['pullRetryThreshold'] = '60s'\n config_peer_gossip_pvt_data_model['transientstoreMaxBlockRetention'] = 1000\n config_peer_gossip_pvt_data_model['pushAckTimeout'] = '3s'\n config_peer_gossip_pvt_data_model['btlPullMargin'] = 10\n config_peer_gossip_pvt_data_model['reconcileBatchSize'] = 10\n config_peer_gossip_pvt_data_model['reconcileSleepInterval'] = '1m'\n config_peer_gossip_pvt_data_model['reconciliationEnabled'] = True\n config_peer_gossip_pvt_data_model['skipPullingInvalidTransactionsDuringCommit'] = False\n config_peer_gossip_pvt_data_model['implicitCollectionDisseminationPolicy'] = config_peer_gossip_pvt_data_implicit_collection_dissemination_policy_model\n\n config_peer_gossip_state_model = {} # ConfigPeerGossipState\n config_peer_gossip_state_model['enabled'] = True\n config_peer_gossip_state_model['checkInterval'] = '10s'\n config_peer_gossip_state_model['responseTimeout'] = '3s'\n config_peer_gossip_state_model['batchSize'] = 10\n config_peer_gossip_state_model['blockBufferSize'] = 100\n config_peer_gossip_state_model['maxRetries'] = 3\n\n config_peer_gossip_model = {} # ConfigPeerGossip\n config_peer_gossip_model['useLeaderElection'] = True\n config_peer_gossip_model['orgLeader'] = False\n config_peer_gossip_model['membershipTrackerInterval'] = '5s'\n config_peer_gossip_model['maxBlockCountToStore'] = 100\n config_peer_gossip_model['maxPropagationBurstLatency'] = '10ms'\n config_peer_gossip_model['maxPropagationBurstSize'] = 10\n config_peer_gossip_model['propagateIterations'] = 3\n config_peer_gossip_model['pullInterval'] = '4s'\n config_peer_gossip_model['pullPeerNum'] = 3\n config_peer_gossip_model['requestStateInfoInterval'] = '4s'\n config_peer_gossip_model['publishStateInfoInterval'] = '4s'\n config_peer_gossip_model['stateInfoRetentionInterval'] = '0s'\n config_peer_gossip_model['publishCertPeriod'] = '10s'\n config_peer_gossip_model['skipBlockVerification'] = False\n config_peer_gossip_model['dialTimeout'] = '3s'\n config_peer_gossip_model['connTimeout'] = '2s'\n config_peer_gossip_model['recvBuffSize'] = 20\n config_peer_gossip_model['sendBuffSize'] = 200\n config_peer_gossip_model['digestWaitTime'] = '1s'\n config_peer_gossip_model['requestWaitTime'] = '1500ms'\n config_peer_gossip_model['responseWaitTime'] = '2s'\n config_peer_gossip_model['aliveTimeInterval'] = '5s'\n config_peer_gossip_model['aliveExpirationTimeout'] = '25s'\n config_peer_gossip_model['reconnectInterval'] = '25s'\n config_peer_gossip_model['election'] = config_peer_gossip_election_model\n config_peer_gossip_model['pvtData'] = config_peer_gossip_pvt_data_model\n config_peer_gossip_model['state'] = config_peer_gossip_state_model\n\n config_peer_authentication_model = {} # ConfigPeerAuthentication\n config_peer_authentication_model['timewindow'] = '15m'\n\n config_peer_client_model = {} # ConfigPeerClient\n config_peer_client_model['connTimeout'] = '2s'\n\n config_peer_deliveryclient_address_overrides_item_model = {} # ConfigPeerDeliveryclientAddressOverridesItem\n config_peer_deliveryclient_address_overrides_item_model['from'] = 'n3a3ec3-myorderer.ibp.us-south.containers.appdomain.cloud:7050'\n config_peer_deliveryclient_address_overrides_item_model['to'] = 'n3a3ec3-myorderer2.ibp.us-south.containers.appdomain.cloud:7050'\n config_peer_deliveryclient_address_overrides_item_model['caCertsFile'] = 'my-data/cert.pem'\n\n config_peer_deliveryclient_model = {} # ConfigPeerDeliveryclient\n config_peer_deliveryclient_model['reconnectTotalTimeThreshold'] = '60m'\n config_peer_deliveryclient_model['connTimeout'] = '2s'\n config_peer_deliveryclient_model['reConnectBackoffThreshold'] = '60m'\n config_peer_deliveryclient_model['addressOverrides'] = [config_peer_deliveryclient_address_overrides_item_model]\n\n config_peer_admin_service_model = {} # ConfigPeerAdminService\n config_peer_admin_service_model['listenAddress'] = '0.0.0.0:7051'\n\n config_peer_discovery_model = {} # ConfigPeerDiscovery\n config_peer_discovery_model['enabled'] = True\n config_peer_discovery_model['authCacheEnabled'] = True\n config_peer_discovery_model['authCacheMaxSize'] = 1000\n config_peer_discovery_model['authCachePurgeRetentionRatio'] = 0.75\n config_peer_discovery_model['orgMembersAllowedAccess'] = False\n\n config_peer_limits_concurrency_model = {} # ConfigPeerLimitsConcurrency\n config_peer_limits_concurrency_model['endorserService'] = 2500\n config_peer_limits_concurrency_model['deliverService'] = 2500\n\n config_peer_limits_model = {} # ConfigPeerLimits\n config_peer_limits_model['concurrency'] = config_peer_limits_concurrency_model\n\n config_peer_gateway_model = {} # ConfigPeerGateway\n config_peer_gateway_model['enabled'] = True\n\n # Construct a json representation of a ConfigPeerUpdatePeer model\n config_peer_update_peer_model_json = {}\n config_peer_update_peer_model_json['id'] = 'john-doe'\n config_peer_update_peer_model_json['networkId'] = 'dev'\n config_peer_update_peer_model_json['keepalive'] = config_peer_keepalive_model\n config_peer_update_peer_model_json['gossip'] = config_peer_gossip_model\n config_peer_update_peer_model_json['authentication'] = config_peer_authentication_model\n config_peer_update_peer_model_json['client'] = config_peer_client_model\n config_peer_update_peer_model_json['deliveryclient'] = config_peer_deliveryclient_model\n config_peer_update_peer_model_json['adminService'] = config_peer_admin_service_model\n config_peer_update_peer_model_json['validatorPoolSize'] = 8\n config_peer_update_peer_model_json['discovery'] = config_peer_discovery_model\n config_peer_update_peer_model_json['limits'] = config_peer_limits_model\n config_peer_update_peer_model_json['gateway'] = config_peer_gateway_model\n\n # Construct a model instance of ConfigPeerUpdatePeer by calling from_dict on the json representation\n config_peer_update_peer_model = ConfigPeerUpdatePeer.from_dict(config_peer_update_peer_model_json)\n assert config_peer_update_peer_model != False\n\n # Construct a model instance of ConfigPeerUpdatePeer by calling from_dict on the json representation\n config_peer_update_peer_model_dict = ConfigPeerUpdatePeer.from_dict(config_peer_update_peer_model_json).__dict__\n config_peer_update_peer_model2 = ConfigPeerUpdatePeer(**config_peer_update_peer_model_dict)\n\n # Verify the model instances are equivalent\n assert config_peer_update_peer_model == config_peer_update_peer_model2\n\n # Convert model instance back to dict and verify no loss of data\n config_peer_update_peer_model_json2 = config_peer_update_peer_model.to_dict()\n assert config_peer_update_peer_model_json2 == config_peer_update_peer_model_json", "title": "" }, { "docid": "48fee40096bb0c8f86b4b9b55470b5f6", "score": "0.6500846", "text": "def test_config_peer_discovery_serialization(self):\n\n # Construct a json representation of a ConfigPeerDiscovery model\n config_peer_discovery_model_json = {}\n config_peer_discovery_model_json['enabled'] = True\n config_peer_discovery_model_json['authCacheEnabled'] = True\n config_peer_discovery_model_json['authCacheMaxSize'] = 1000\n config_peer_discovery_model_json['authCachePurgeRetentionRatio'] = 0.75\n config_peer_discovery_model_json['orgMembersAllowedAccess'] = False\n\n # Construct a model instance of ConfigPeerDiscovery by calling from_dict on the json representation\n config_peer_discovery_model = ConfigPeerDiscovery.from_dict(config_peer_discovery_model_json)\n assert config_peer_discovery_model != False\n\n # Construct a model instance of ConfigPeerDiscovery by calling from_dict on the json representation\n config_peer_discovery_model_dict = ConfigPeerDiscovery.from_dict(config_peer_discovery_model_json).__dict__\n config_peer_discovery_model2 = ConfigPeerDiscovery(**config_peer_discovery_model_dict)\n\n # Verify the model instances are equivalent\n assert config_peer_discovery_model == config_peer_discovery_model2\n\n # Convert model instance back to dict and verify no loss of data\n config_peer_discovery_model_json2 = config_peer_discovery_model.to_dict()\n assert config_peer_discovery_model_json2 == config_peer_discovery_model_json", "title": "" }, { "docid": "001ad3d2db0ac259c39a74910e202c35", "score": "0.64913267", "text": "def test_config_peer_chaincode_serialization(self):\n\n # Construct dict forms of any model objects needed in order to build this model.\n\n config_peer_chaincode_golang_model = {} # ConfigPeerChaincodeGolang\n config_peer_chaincode_golang_model['dynamicLink'] = False\n\n config_peer_chaincode_external_builders_item_model = {} # ConfigPeerChaincodeExternalBuildersItem\n config_peer_chaincode_external_builders_item_model['path'] = '/path/to/directory'\n config_peer_chaincode_external_builders_item_model['name'] = 'descriptive-build-name'\n config_peer_chaincode_external_builders_item_model['environmentWhitelist'] = ['GOPROXY']\n\n config_peer_chaincode_system_model = {} # ConfigPeerChaincodeSystem\n config_peer_chaincode_system_model['cscc'] = True\n config_peer_chaincode_system_model['lscc'] = True\n config_peer_chaincode_system_model['escc'] = True\n config_peer_chaincode_system_model['vscc'] = True\n config_peer_chaincode_system_model['qscc'] = True\n\n config_peer_chaincode_logging_model = {} # ConfigPeerChaincodeLogging\n config_peer_chaincode_logging_model['level'] = 'info'\n config_peer_chaincode_logging_model['shim'] = 'warning'\n config_peer_chaincode_logging_model['format'] = '%{color}%{time:2006-01-02 15:04:05.000 MST} [%{module}] %{shortfunc} -> %{level:.4s} %{id:03x}%{color:reset} %{message}'\n\n # Construct a json representation of a ConfigPeerChaincode model\n config_peer_chaincode_model_json = {}\n config_peer_chaincode_model_json['golang'] = config_peer_chaincode_golang_model\n config_peer_chaincode_model_json['externalBuilders'] = [config_peer_chaincode_external_builders_item_model]\n config_peer_chaincode_model_json['installTimeout'] = '300s'\n config_peer_chaincode_model_json['startuptimeout'] = '300s'\n config_peer_chaincode_model_json['executetimeout'] = '30s'\n config_peer_chaincode_model_json['system'] = config_peer_chaincode_system_model\n config_peer_chaincode_model_json['logging'] = config_peer_chaincode_logging_model\n\n # Construct a model instance of ConfigPeerChaincode by calling from_dict on the json representation\n config_peer_chaincode_model = ConfigPeerChaincode.from_dict(config_peer_chaincode_model_json)\n assert config_peer_chaincode_model != False\n\n # Construct a model instance of ConfigPeerChaincode by calling from_dict on the json representation\n config_peer_chaincode_model_dict = ConfigPeerChaincode.from_dict(config_peer_chaincode_model_json).__dict__\n config_peer_chaincode_model2 = ConfigPeerChaincode(**config_peer_chaincode_model_dict)\n\n # Verify the model instances are equivalent\n assert config_peer_chaincode_model == config_peer_chaincode_model2\n\n # Convert model instance back to dict and verify no loss of data\n config_peer_chaincode_model_json2 = config_peer_chaincode_model.to_dict()\n assert config_peer_chaincode_model_json2 == config_peer_chaincode_model_json", "title": "" }, { "docid": "3e6152544a3b3348436a9613b150cb03", "score": "0.6489201", "text": "def test_config_peer_update_serialization(self):\n\n # Construct dict forms of any model objects needed in order to build this model.\n\n config_peer_keepalive_client_model = {} # ConfigPeerKeepaliveClient\n config_peer_keepalive_client_model['interval'] = '60s'\n config_peer_keepalive_client_model['timeout'] = '20s'\n\n config_peer_keepalive_delivery_client_model = {} # ConfigPeerKeepaliveDeliveryClient\n config_peer_keepalive_delivery_client_model['interval'] = '60s'\n config_peer_keepalive_delivery_client_model['timeout'] = '20s'\n\n config_peer_keepalive_model = {} # ConfigPeerKeepalive\n config_peer_keepalive_model['minInterval'] = '60s'\n config_peer_keepalive_model['client'] = config_peer_keepalive_client_model\n config_peer_keepalive_model['deliveryClient'] = config_peer_keepalive_delivery_client_model\n\n config_peer_gossip_election_model = {} # ConfigPeerGossipElection\n config_peer_gossip_election_model['startupGracePeriod'] = '15s'\n config_peer_gossip_election_model['membershipSampleInterval'] = '1s'\n config_peer_gossip_election_model['leaderAliveThreshold'] = '10s'\n config_peer_gossip_election_model['leaderElectionDuration'] = '5s'\n\n config_peer_gossip_pvt_data_implicit_collection_dissemination_policy_model = {} # ConfigPeerGossipPvtDataImplicitCollectionDisseminationPolicy\n config_peer_gossip_pvt_data_implicit_collection_dissemination_policy_model['requiredPeerCount'] = 0\n config_peer_gossip_pvt_data_implicit_collection_dissemination_policy_model['maxPeerCount'] = 1\n\n config_peer_gossip_pvt_data_model = {} # ConfigPeerGossipPvtData\n config_peer_gossip_pvt_data_model['pullRetryThreshold'] = '60s'\n config_peer_gossip_pvt_data_model['transientstoreMaxBlockRetention'] = 1000\n config_peer_gossip_pvt_data_model['pushAckTimeout'] = '3s'\n config_peer_gossip_pvt_data_model['btlPullMargin'] = 10\n config_peer_gossip_pvt_data_model['reconcileBatchSize'] = 10\n config_peer_gossip_pvt_data_model['reconcileSleepInterval'] = '1m'\n config_peer_gossip_pvt_data_model['reconciliationEnabled'] = True\n config_peer_gossip_pvt_data_model['skipPullingInvalidTransactionsDuringCommit'] = False\n config_peer_gossip_pvt_data_model['implicitCollectionDisseminationPolicy'] = config_peer_gossip_pvt_data_implicit_collection_dissemination_policy_model\n\n config_peer_gossip_state_model = {} # ConfigPeerGossipState\n config_peer_gossip_state_model['enabled'] = True\n config_peer_gossip_state_model['checkInterval'] = '10s'\n config_peer_gossip_state_model['responseTimeout'] = '3s'\n config_peer_gossip_state_model['batchSize'] = 10\n config_peer_gossip_state_model['blockBufferSize'] = 100\n config_peer_gossip_state_model['maxRetries'] = 3\n\n config_peer_gossip_model = {} # ConfigPeerGossip\n config_peer_gossip_model['useLeaderElection'] = True\n config_peer_gossip_model['orgLeader'] = False\n config_peer_gossip_model['membershipTrackerInterval'] = '5s'\n config_peer_gossip_model['maxBlockCountToStore'] = 100\n config_peer_gossip_model['maxPropagationBurstLatency'] = '10ms'\n config_peer_gossip_model['maxPropagationBurstSize'] = 10\n config_peer_gossip_model['propagateIterations'] = 3\n config_peer_gossip_model['pullInterval'] = '4s'\n config_peer_gossip_model['pullPeerNum'] = 3\n config_peer_gossip_model['requestStateInfoInterval'] = '4s'\n config_peer_gossip_model['publishStateInfoInterval'] = '4s'\n config_peer_gossip_model['stateInfoRetentionInterval'] = '0s'\n config_peer_gossip_model['publishCertPeriod'] = '10s'\n config_peer_gossip_model['skipBlockVerification'] = False\n config_peer_gossip_model['dialTimeout'] = '3s'\n config_peer_gossip_model['connTimeout'] = '2s'\n config_peer_gossip_model['recvBuffSize'] = 20\n config_peer_gossip_model['sendBuffSize'] = 200\n config_peer_gossip_model['digestWaitTime'] = '1s'\n config_peer_gossip_model['requestWaitTime'] = '1500ms'\n config_peer_gossip_model['responseWaitTime'] = '2s'\n config_peer_gossip_model['aliveTimeInterval'] = '5s'\n config_peer_gossip_model['aliveExpirationTimeout'] = '25s'\n config_peer_gossip_model['reconnectInterval'] = '25s'\n config_peer_gossip_model['election'] = config_peer_gossip_election_model\n config_peer_gossip_model['pvtData'] = config_peer_gossip_pvt_data_model\n config_peer_gossip_model['state'] = config_peer_gossip_state_model\n\n config_peer_authentication_model = {} # ConfigPeerAuthentication\n config_peer_authentication_model['timewindow'] = '15m'\n\n config_peer_client_model = {} # ConfigPeerClient\n config_peer_client_model['connTimeout'] = '2s'\n\n config_peer_deliveryclient_address_overrides_item_model = {} # ConfigPeerDeliveryclientAddressOverridesItem\n config_peer_deliveryclient_address_overrides_item_model['from'] = 'n3a3ec3-myorderer.ibp.us-south.containers.appdomain.cloud:7050'\n config_peer_deliveryclient_address_overrides_item_model['to'] = 'n3a3ec3-myorderer2.ibp.us-south.containers.appdomain.cloud:7050'\n config_peer_deliveryclient_address_overrides_item_model['caCertsFile'] = 'my-data/cert.pem'\n\n config_peer_deliveryclient_model = {} # ConfigPeerDeliveryclient\n config_peer_deliveryclient_model['reconnectTotalTimeThreshold'] = '60m'\n config_peer_deliveryclient_model['connTimeout'] = '2s'\n config_peer_deliveryclient_model['reConnectBackoffThreshold'] = '60m'\n config_peer_deliveryclient_model['addressOverrides'] = [config_peer_deliveryclient_address_overrides_item_model]\n\n config_peer_admin_service_model = {} # ConfigPeerAdminService\n config_peer_admin_service_model['listenAddress'] = '0.0.0.0:7051'\n\n config_peer_discovery_model = {} # ConfigPeerDiscovery\n config_peer_discovery_model['enabled'] = True\n config_peer_discovery_model['authCacheEnabled'] = True\n config_peer_discovery_model['authCacheMaxSize'] = 1000\n config_peer_discovery_model['authCachePurgeRetentionRatio'] = 0.75\n config_peer_discovery_model['orgMembersAllowedAccess'] = False\n\n config_peer_limits_concurrency_model = {} # ConfigPeerLimitsConcurrency\n config_peer_limits_concurrency_model['endorserService'] = 2500\n config_peer_limits_concurrency_model['deliverService'] = 2500\n\n config_peer_limits_model = {} # ConfigPeerLimits\n config_peer_limits_model['concurrency'] = config_peer_limits_concurrency_model\n\n config_peer_gateway_model = {} # ConfigPeerGateway\n config_peer_gateway_model['enabled'] = True\n\n config_peer_update_peer_model = {} # ConfigPeerUpdatePeer\n config_peer_update_peer_model['id'] = 'john-doe'\n config_peer_update_peer_model['networkId'] = 'dev'\n config_peer_update_peer_model['keepalive'] = config_peer_keepalive_model\n config_peer_update_peer_model['gossip'] = config_peer_gossip_model\n config_peer_update_peer_model['authentication'] = config_peer_authentication_model\n config_peer_update_peer_model['client'] = config_peer_client_model\n config_peer_update_peer_model['deliveryclient'] = config_peer_deliveryclient_model\n config_peer_update_peer_model['adminService'] = config_peer_admin_service_model\n config_peer_update_peer_model['validatorPoolSize'] = 8\n config_peer_update_peer_model['discovery'] = config_peer_discovery_model\n config_peer_update_peer_model['limits'] = config_peer_limits_model\n config_peer_update_peer_model['gateway'] = config_peer_gateway_model\n\n config_peer_chaincode_golang_model = {} # ConfigPeerChaincodeGolang\n config_peer_chaincode_golang_model['dynamicLink'] = False\n\n config_peer_chaincode_external_builders_item_model = {} # ConfigPeerChaincodeExternalBuildersItem\n config_peer_chaincode_external_builders_item_model['path'] = '/path/to/directory'\n config_peer_chaincode_external_builders_item_model['name'] = 'descriptive-build-name'\n config_peer_chaincode_external_builders_item_model['environmentWhitelist'] = ['GOPROXY']\n\n config_peer_chaincode_system_model = {} # ConfigPeerChaincodeSystem\n config_peer_chaincode_system_model['cscc'] = True\n config_peer_chaincode_system_model['lscc'] = True\n config_peer_chaincode_system_model['escc'] = True\n config_peer_chaincode_system_model['vscc'] = True\n config_peer_chaincode_system_model['qscc'] = True\n\n config_peer_chaincode_logging_model = {} # ConfigPeerChaincodeLogging\n config_peer_chaincode_logging_model['level'] = 'info'\n config_peer_chaincode_logging_model['shim'] = 'warning'\n config_peer_chaincode_logging_model['format'] = '%{color}%{time:2006-01-02 15:04:05.000 MST} [%{module}] %{shortfunc} -> %{level:.4s} %{id:03x}%{color:reset} %{message}'\n\n config_peer_chaincode_model = {} # ConfigPeerChaincode\n config_peer_chaincode_model['golang'] = config_peer_chaincode_golang_model\n config_peer_chaincode_model['externalBuilders'] = [config_peer_chaincode_external_builders_item_model]\n config_peer_chaincode_model['installTimeout'] = '300s'\n config_peer_chaincode_model['startuptimeout'] = '300s'\n config_peer_chaincode_model['executetimeout'] = '30s'\n config_peer_chaincode_model['system'] = config_peer_chaincode_system_model\n config_peer_chaincode_model['logging'] = config_peer_chaincode_logging_model\n\n metrics_statsd_model = {} # MetricsStatsd\n metrics_statsd_model['network'] = 'udp'\n metrics_statsd_model['address'] = '127.0.0.1:8125'\n metrics_statsd_model['writeInterval'] = '10s'\n metrics_statsd_model['prefix'] = 'server'\n\n metrics_model = {} # Metrics\n metrics_model['provider'] = 'prometheus'\n metrics_model['statsd'] = metrics_statsd_model\n\n # Construct a json representation of a ConfigPeerUpdate model\n config_peer_update_model_json = {}\n config_peer_update_model_json['peer'] = config_peer_update_peer_model\n config_peer_update_model_json['chaincode'] = config_peer_chaincode_model\n config_peer_update_model_json['metrics'] = metrics_model\n\n # Construct a model instance of ConfigPeerUpdate by calling from_dict on the json representation\n config_peer_update_model = ConfigPeerUpdate.from_dict(config_peer_update_model_json)\n assert config_peer_update_model != False\n\n # Construct a model instance of ConfigPeerUpdate by calling from_dict on the json representation\n config_peer_update_model_dict = ConfigPeerUpdate.from_dict(config_peer_update_model_json).__dict__\n config_peer_update_model2 = ConfigPeerUpdate(**config_peer_update_model_dict)\n\n # Verify the model instances are equivalent\n assert config_peer_update_model == config_peer_update_model2\n\n # Convert model instance back to dict and verify no loss of data\n config_peer_update_model_json2 = config_peer_update_model.to_dict()\n assert config_peer_update_model_json2 == config_peer_update_model_json", "title": "" }, { "docid": "1f0a20c47a1ffdc462f8e0d6d6dc85d6", "score": "0.64313537", "text": "def test_config_peer_chaincode_system_serialization(self):\n\n # Construct a json representation of a ConfigPeerChaincodeSystem model\n config_peer_chaincode_system_model_json = {}\n config_peer_chaincode_system_model_json['cscc'] = True\n config_peer_chaincode_system_model_json['lscc'] = True\n config_peer_chaincode_system_model_json['escc'] = True\n config_peer_chaincode_system_model_json['vscc'] = True\n config_peer_chaincode_system_model_json['qscc'] = True\n\n # Construct a model instance of ConfigPeerChaincodeSystem by calling from_dict on the json representation\n config_peer_chaincode_system_model = ConfigPeerChaincodeSystem.from_dict(config_peer_chaincode_system_model_json)\n assert config_peer_chaincode_system_model != False\n\n # Construct a model instance of ConfigPeerChaincodeSystem by calling from_dict on the json representation\n config_peer_chaincode_system_model_dict = ConfigPeerChaincodeSystem.from_dict(config_peer_chaincode_system_model_json).__dict__\n config_peer_chaincode_system_model2 = ConfigPeerChaincodeSystem(**config_peer_chaincode_system_model_dict)\n\n # Verify the model instances are equivalent\n assert config_peer_chaincode_system_model == config_peer_chaincode_system_model2\n\n # Convert model instance back to dict and verify no loss of data\n config_peer_chaincode_system_model_json2 = config_peer_chaincode_system_model.to_dict()\n assert config_peer_chaincode_system_model_json2 == config_peer_chaincode_system_model_json", "title": "" }, { "docid": "5a71963835027f6bae7a11cca3fb4476", "score": "0.6378603", "text": "def test_config_peer_chaincode_external_builders_item_serialization(self):\n\n # Construct a json representation of a ConfigPeerChaincodeExternalBuildersItem model\n config_peer_chaincode_external_builders_item_model_json = {}\n config_peer_chaincode_external_builders_item_model_json['path'] = '/path/to/directory'\n config_peer_chaincode_external_builders_item_model_json['name'] = 'descriptive-build-name'\n config_peer_chaincode_external_builders_item_model_json['environmentWhitelist'] = ['GOPROXY']\n\n # Construct a model instance of ConfigPeerChaincodeExternalBuildersItem by calling from_dict on the json representation\n config_peer_chaincode_external_builders_item_model = ConfigPeerChaincodeExternalBuildersItem.from_dict(config_peer_chaincode_external_builders_item_model_json)\n assert config_peer_chaincode_external_builders_item_model != False\n\n # Construct a model instance of ConfigPeerChaincodeExternalBuildersItem by calling from_dict on the json representation\n config_peer_chaincode_external_builders_item_model_dict = ConfigPeerChaincodeExternalBuildersItem.from_dict(config_peer_chaincode_external_builders_item_model_json).__dict__\n config_peer_chaincode_external_builders_item_model2 = ConfigPeerChaincodeExternalBuildersItem(**config_peer_chaincode_external_builders_item_model_dict)\n\n # Verify the model instances are equivalent\n assert config_peer_chaincode_external_builders_item_model == config_peer_chaincode_external_builders_item_model2\n\n # Convert model instance back to dict and verify no loss of data\n config_peer_chaincode_external_builders_item_model_json2 = config_peer_chaincode_external_builders_item_model.to_dict()\n assert config_peer_chaincode_external_builders_item_model_json2 == config_peer_chaincode_external_builders_item_model_json", "title": "" }, { "docid": "62b3d8274b2d7936950b9d665d4921ea", "score": "0.6372804", "text": "def test_roundtrip_serializable(self):\n exp = Tphi([0], [1], [2])\n self.assertRoundTripSerializable(exp)\n exp = Tphi([0], [1], [2], \"hahn\", 3)\n self.assertRoundTripSerializable(exp)\n exp = Tphi([0], [1], [2], \"ramsey\", 0)\n self.assertRoundTripSerializable(exp)", "title": "" }, { "docid": "92a819a06d9e7ce14f182ac6b10bd22b", "score": "0.63707054", "text": "def test_config_peer_gossip_pvt_data_serialization(self):\n\n # Construct dict forms of any model objects needed in order to build this model.\n\n config_peer_gossip_pvt_data_implicit_collection_dissemination_policy_model = {} # ConfigPeerGossipPvtDataImplicitCollectionDisseminationPolicy\n config_peer_gossip_pvt_data_implicit_collection_dissemination_policy_model['requiredPeerCount'] = 0\n config_peer_gossip_pvt_data_implicit_collection_dissemination_policy_model['maxPeerCount'] = 1\n\n # Construct a json representation of a ConfigPeerGossipPvtData model\n config_peer_gossip_pvt_data_model_json = {}\n config_peer_gossip_pvt_data_model_json['pullRetryThreshold'] = '60s'\n config_peer_gossip_pvt_data_model_json['transientstoreMaxBlockRetention'] = 1000\n config_peer_gossip_pvt_data_model_json['pushAckTimeout'] = '3s'\n config_peer_gossip_pvt_data_model_json['btlPullMargin'] = 10\n config_peer_gossip_pvt_data_model_json['reconcileBatchSize'] = 10\n config_peer_gossip_pvt_data_model_json['reconcileSleepInterval'] = '1m'\n config_peer_gossip_pvt_data_model_json['reconciliationEnabled'] = True\n config_peer_gossip_pvt_data_model_json['skipPullingInvalidTransactionsDuringCommit'] = False\n config_peer_gossip_pvt_data_model_json['implicitCollectionDisseminationPolicy'] = config_peer_gossip_pvt_data_implicit_collection_dissemination_policy_model\n\n # Construct a model instance of ConfigPeerGossipPvtData by calling from_dict on the json representation\n config_peer_gossip_pvt_data_model = ConfigPeerGossipPvtData.from_dict(config_peer_gossip_pvt_data_model_json)\n assert config_peer_gossip_pvt_data_model != False\n\n # Construct a model instance of ConfigPeerGossipPvtData by calling from_dict on the json representation\n config_peer_gossip_pvt_data_model_dict = ConfigPeerGossipPvtData.from_dict(config_peer_gossip_pvt_data_model_json).__dict__\n config_peer_gossip_pvt_data_model2 = ConfigPeerGossipPvtData(**config_peer_gossip_pvt_data_model_dict)\n\n # Verify the model instances are equivalent\n assert config_peer_gossip_pvt_data_model == config_peer_gossip_pvt_data_model2\n\n # Convert model instance back to dict and verify no loss of data\n config_peer_gossip_pvt_data_model_json2 = config_peer_gossip_pvt_data_model.to_dict()\n assert config_peer_gossip_pvt_data_model_json2 == config_peer_gossip_pvt_data_model_json", "title": "" }, { "docid": "4951da8865eece617f72e5d84a9b323f", "score": "0.6342068", "text": "def test_config_peer_chaincode_golang_serialization(self):\n\n # Construct a json representation of a ConfigPeerChaincodeGolang model\n config_peer_chaincode_golang_model_json = {}\n config_peer_chaincode_golang_model_json['dynamicLink'] = False\n\n # Construct a model instance of ConfigPeerChaincodeGolang by calling from_dict on the json representation\n config_peer_chaincode_golang_model = ConfigPeerChaincodeGolang.from_dict(config_peer_chaincode_golang_model_json)\n assert config_peer_chaincode_golang_model != False\n\n # Construct a model instance of ConfigPeerChaincodeGolang by calling from_dict on the json representation\n config_peer_chaincode_golang_model_dict = ConfigPeerChaincodeGolang.from_dict(config_peer_chaincode_golang_model_json).__dict__\n config_peer_chaincode_golang_model2 = ConfigPeerChaincodeGolang(**config_peer_chaincode_golang_model_dict)\n\n # Verify the model instances are equivalent\n assert config_peer_chaincode_golang_model == config_peer_chaincode_golang_model2\n\n # Convert model instance back to dict and verify no loss of data\n config_peer_chaincode_golang_model_json2 = config_peer_chaincode_golang_model.to_dict()\n assert config_peer_chaincode_golang_model_json2 == config_peer_chaincode_golang_model_json", "title": "" }, { "docid": "60ab384dabe02dea980356c4121795dc", "score": "0.6337152", "text": "def test_import_response_metadata_serialization(self):\n\n # Construct a json representation of a ImportResponseMetadata model\n import_response_metadata_model_json = {}\n import_response_metadata_model_json['catalog_id'] = 'testString'\n import_response_metadata_model_json['created_at'] = \"2019-01-01T12:00:00Z\"\n import_response_metadata_model_json['created_by'] = '[email protected]'\n import_response_metadata_model_json['id'] = 'testString'\n import_response_metadata_model_json['modified_at'] = \"2019-01-01T12:00:00Z\"\n import_response_metadata_model_json['name'] = 'testString'\n import_response_metadata_model_json['project_id'] = 'testString'\n import_response_metadata_model_json['project_name'] = 'testString'\n import_response_metadata_model_json['url'] = 'testString'\n\n # Construct a model instance of ImportResponseMetadata by calling from_dict on the json representation\n import_response_metadata_model = ImportResponseMetadata.from_dict(import_response_metadata_model_json)\n assert import_response_metadata_model != False\n\n # Construct a model instance of ImportResponseMetadata by calling from_dict on the json representation\n import_response_metadata_model_dict = ImportResponseMetadata.from_dict(import_response_metadata_model_json).__dict__\n import_response_metadata_model2 = ImportResponseMetadata(**import_response_metadata_model_dict)\n\n # Verify the model instances are equivalent\n assert import_response_metadata_model == import_response_metadata_model2\n\n # Convert model instance back to dict and verify no loss of data\n import_response_metadata_model_json2 = import_response_metadata_model.to_dict()\n assert import_response_metadata_model_json2 == import_response_metadata_model_json", "title": "" }, { "docid": "c3b0fbdee3963b49211673fc64634b20", "score": "0.6323107", "text": "def test_config_peer_gossip_election_serialization(self):\n\n # Construct a json representation of a ConfigPeerGossipElection model\n config_peer_gossip_election_model_json = {}\n config_peer_gossip_election_model_json['startupGracePeriod'] = '15s'\n config_peer_gossip_election_model_json['membershipSampleInterval'] = '1s'\n config_peer_gossip_election_model_json['leaderAliveThreshold'] = '10s'\n config_peer_gossip_election_model_json['leaderElectionDuration'] = '5s'\n\n # Construct a model instance of ConfigPeerGossipElection by calling from_dict on the json representation\n config_peer_gossip_election_model = ConfigPeerGossipElection.from_dict(config_peer_gossip_election_model_json)\n assert config_peer_gossip_election_model != False\n\n # Construct a model instance of ConfigPeerGossipElection by calling from_dict on the json representation\n config_peer_gossip_election_model_dict = ConfigPeerGossipElection.from_dict(config_peer_gossip_election_model_json).__dict__\n config_peer_gossip_election_model2 = ConfigPeerGossipElection(**config_peer_gossip_election_model_dict)\n\n # Verify the model instances are equivalent\n assert config_peer_gossip_election_model == config_peer_gossip_election_model2\n\n # Convert model instance back to dict and verify no loss of data\n config_peer_gossip_election_model_json2 = config_peer_gossip_election_model.to_dict()\n assert config_peer_gossip_election_model_json2 == config_peer_gossip_election_model_json", "title": "" }, { "docid": "683485e3b8b7ded6353a4e04f3a3e154", "score": "0.6322229", "text": "def test_get_public_settings_response_crn_serialization(self):\n\n # Construct a json representation of a GetPublicSettingsResponseCRN model\n get_public_settings_response_crn_model_json = {}\n get_public_settings_response_crn_model_json['account_id'] = 'a/abcd'\n get_public_settings_response_crn_model_json['c_name'] = 'staging'\n get_public_settings_response_crn_model_json['c_type'] = 'public'\n get_public_settings_response_crn_model_json['instance_id'] = 'abc123'\n get_public_settings_response_crn_model_json['location'] = 'us-south'\n get_public_settings_response_crn_model_json['resource_id'] = '-'\n get_public_settings_response_crn_model_json['resource_type'] = '-'\n get_public_settings_response_crn_model_json['service_name'] = 'blockchain'\n get_public_settings_response_crn_model_json['version'] = 'v1'\n\n # Construct a model instance of GetPublicSettingsResponseCRN by calling from_dict on the json representation\n get_public_settings_response_crn_model = GetPublicSettingsResponseCRN.from_dict(get_public_settings_response_crn_model_json)\n assert get_public_settings_response_crn_model != False\n\n # Construct a model instance of GetPublicSettingsResponseCRN by calling from_dict on the json representation\n get_public_settings_response_crn_model_dict = GetPublicSettingsResponseCRN.from_dict(get_public_settings_response_crn_model_json).__dict__\n get_public_settings_response_crn_model2 = GetPublicSettingsResponseCRN(**get_public_settings_response_crn_model_dict)\n\n # Verify the model instances are equivalent\n assert get_public_settings_response_crn_model == get_public_settings_response_crn_model2\n\n # Convert model instance back to dict and verify no loss of data\n get_public_settings_response_crn_model_json2 = get_public_settings_response_crn_model.to_dict()\n assert get_public_settings_response_crn_model_json2 == get_public_settings_response_crn_model_json", "title": "" }, { "docid": "13b00f9eaa64a2dfce90359fbc2278b2", "score": "0.6312941", "text": "def test_import_ca_body_msp_serialization(self):\n\n # Construct dict forms of any model objects needed in order to build this model.\n\n import_ca_body_msp_ca_model = {} # ImportCaBodyMspCa\n import_ca_body_msp_ca_model['name'] = 'org1CA'\n import_ca_body_msp_ca_model['root_certs'] = ['LS0tLS1CRUdJTiBDRVJUSUZJQ0FURS0tLS0tCkNlcnQgZGF0YSB3b3VsZCBiZSBoZXJlIGlmIHRoaXMgd2FzIHJlYWwKLS0tLS1FTkQgQ0VSVElGSUNBVEUtLS0tLQo=']\n\n import_ca_body_msp_tlsca_model = {} # ImportCaBodyMspTlsca\n import_ca_body_msp_tlsca_model['name'] = 'org1tlsCA'\n import_ca_body_msp_tlsca_model['root_certs'] = ['LS0tLS1CRUdJTiBDRVJUSUZJQ0FURS0tLS0tCkNlcnQgZGF0YSB3b3VsZCBiZSBoZXJlIGlmIHRoaXMgd2FzIHJlYWwKLS0tLS1FTkQgQ0VSVElGSUNBVEUtLS0tLQo=']\n\n import_ca_body_msp_component_model = {} # ImportCaBodyMspComponent\n import_ca_body_msp_component_model['tls_cert'] = 'LS0tLS1CRUdJTiBDRVJUSUZJQ0FURS0tLS0tCkNlcnQgZGF0YSB3b3VsZCBiZSBoZXJlIGlmIHRoaXMgd2FzIHJlYWwKLS0tLS1FTkQgQ0VSVElGSUNBVEUtLS0tLQo='\n\n # Construct a json representation of a ImportCaBodyMsp model\n import_ca_body_msp_model_json = {}\n import_ca_body_msp_model_json['ca'] = import_ca_body_msp_ca_model\n import_ca_body_msp_model_json['tlsca'] = import_ca_body_msp_tlsca_model\n import_ca_body_msp_model_json['component'] = import_ca_body_msp_component_model\n\n # Construct a model instance of ImportCaBodyMsp by calling from_dict on the json representation\n import_ca_body_msp_model = ImportCaBodyMsp.from_dict(import_ca_body_msp_model_json)\n assert import_ca_body_msp_model != False\n\n # Construct a model instance of ImportCaBodyMsp by calling from_dict on the json representation\n import_ca_body_msp_model_dict = ImportCaBodyMsp.from_dict(import_ca_body_msp_model_json).__dict__\n import_ca_body_msp_model2 = ImportCaBodyMsp(**import_ca_body_msp_model_dict)\n\n # Verify the model instances are equivalent\n assert import_ca_body_msp_model == import_ca_body_msp_model2\n\n # Convert model instance back to dict and verify no loss of data\n import_ca_body_msp_model_json2 = import_ca_body_msp_model.to_dict()\n assert import_ca_body_msp_model_json2 == import_ca_body_msp_model_json", "title": "" }, { "docid": "a8e3894f883dd9ed0d89a326ec27b0ee", "score": "0.63086194", "text": "def test_config_peer_client_serialization(self):\n\n # Construct a json representation of a ConfigPeerClient model\n config_peer_client_model_json = {}\n config_peer_client_model_json['connTimeout'] = '2s'\n\n # Construct a model instance of ConfigPeerClient by calling from_dict on the json representation\n config_peer_client_model = ConfigPeerClient.from_dict(config_peer_client_model_json)\n assert config_peer_client_model != False\n\n # Construct a model instance of ConfigPeerClient by calling from_dict on the json representation\n config_peer_client_model_dict = ConfigPeerClient.from_dict(config_peer_client_model_json).__dict__\n config_peer_client_model2 = ConfigPeerClient(**config_peer_client_model_dict)\n\n # Verify the model instances are equivalent\n assert config_peer_client_model == config_peer_client_model2\n\n # Convert model instance back to dict and verify no loss of data\n config_peer_client_model_json2 = config_peer_client_model.to_dict()\n assert config_peer_client_model_json2 == config_peer_client_model_json", "title": "" }, { "docid": "92d861e4179d1940b3359490adebaca5", "score": "0.6307818", "text": "def test_update_peer_body_crypto_serialization(self):\n\n # Construct dict forms of any model objects needed in order to build this model.\n\n crypto_enrollment_component_model = {} # CryptoEnrollmentComponent\n crypto_enrollment_component_model['admincerts'] = ['LS0tLS1CRUdJTiBDRVJUSUZJQ0FURS0tLS0tCkNlcnQgZGF0YSB3b3VsZCBiZSBoZXJlIGlmIHRoaXMgd2FzIHJlYWwKLS0tLS1FTkQgQ0VSVElGSUNBVEUtLS0tLQo=']\n\n update_enrollment_crypto_field_ca_model = {} # UpdateEnrollmentCryptoFieldCa\n update_enrollment_crypto_field_ca_model['host'] = 'n3a3ec3-myca.ibp.us-south.containers.appdomain.cloud'\n update_enrollment_crypto_field_ca_model['port'] = 7054\n update_enrollment_crypto_field_ca_model['name'] = 'ca'\n update_enrollment_crypto_field_ca_model['tls_cert'] = 'LS0tLS1CRUdJTiBDRVJUSUZJQ0FURS0tLS0tCkNlcnQgZGF0YSB3b3VsZCBiZSBoZXJlIGlmIHRoaXMgd2FzIHJlYWwKLS0tLS1FTkQgQ0VSVElGSUNBVEUtLS0tLQo='\n update_enrollment_crypto_field_ca_model['enroll_id'] = 'admin'\n update_enrollment_crypto_field_ca_model['enroll_secret'] = 'password'\n\n update_enrollment_crypto_field_tlsca_model = {} # UpdateEnrollmentCryptoFieldTlsca\n update_enrollment_crypto_field_tlsca_model['host'] = 'n3a3ec3-myca.ibp.us-south.containers.appdomain.cloud'\n update_enrollment_crypto_field_tlsca_model['port'] = 7054\n update_enrollment_crypto_field_tlsca_model['name'] = 'tlsca'\n update_enrollment_crypto_field_tlsca_model['tls_cert'] = 'LS0tLS1CRUdJTiBDRVJUSUZJQ0FURS0tLS0tCkNlcnQgZGF0YSB3b3VsZCBiZSBoZXJlIGlmIHRoaXMgd2FzIHJlYWwKLS0tLS1FTkQgQ0VSVElGSUNBVEUtLS0tLQo='\n update_enrollment_crypto_field_tlsca_model['enroll_id'] = 'admin'\n update_enrollment_crypto_field_tlsca_model['enroll_secret'] = 'password'\n update_enrollment_crypto_field_tlsca_model['csr_hosts'] = ['testString']\n\n update_enrollment_crypto_field_model = {} # UpdateEnrollmentCryptoField\n update_enrollment_crypto_field_model['component'] = crypto_enrollment_component_model\n update_enrollment_crypto_field_model['ca'] = update_enrollment_crypto_field_ca_model\n update_enrollment_crypto_field_model['tlsca'] = update_enrollment_crypto_field_tlsca_model\n\n update_msp_crypto_field_ca_model = {} # UpdateMspCryptoFieldCa\n update_msp_crypto_field_ca_model['root_certs'] = ['LS0tLS1CRUdJTiBDRVJUSUZJQ0FURS0tLS0tCkNlcnQgZGF0YSB3b3VsZCBiZSBoZXJlIGlmIHRoaXMgd2FzIHJlYWwKLS0tLS1FTkQgQ0VSVElGSUNBVEUtLS0tLQo=']\n update_msp_crypto_field_ca_model['ca_intermediate_certs'] = ['testString']\n\n update_msp_crypto_field_tlsca_model = {} # UpdateMspCryptoFieldTlsca\n update_msp_crypto_field_tlsca_model['root_certs'] = ['LS0tLS1CRUdJTiBDRVJUSUZJQ0FURS0tLS0tCkNlcnQgZGF0YSB3b3VsZCBiZSBoZXJlIGlmIHRoaXMgd2FzIHJlYWwKLS0tLS1FTkQgQ0VSVElGSUNBVEUtLS0tLQo=']\n update_msp_crypto_field_tlsca_model['ca_intermediate_certs'] = ['testString']\n\n client_auth_model = {} # ClientAuth\n client_auth_model['type'] = 'noclientcert'\n client_auth_model['tls_certs'] = ['testString']\n\n update_msp_crypto_field_component_model = {} # UpdateMspCryptoFieldComponent\n update_msp_crypto_field_component_model['ekey'] = 'testString'\n update_msp_crypto_field_component_model['ecert'] = 'LS0tLS1CRUdJTiBDRVJUSUZJQ0FURS0tLS0tCkNlcnQgZGF0YSB3b3VsZCBiZSBoZXJlIGlmIHRoaXMgd2FzIHJlYWwKLS0tLS1FTkQgQ0VSVElGSUNBVEUtLS0tLQo='\n update_msp_crypto_field_component_model['admin_certs'] = ['LS0tLS1CRUdJTiBDRVJUSUZJQ0FURS0tLS0tCkNlcnQgZGF0YSB3b3VsZCBiZSBoZXJlIGlmIHRoaXMgd2FzIHJlYWwKLS0tLS1FTkQgQ0VSVElGSUNBVEUtLS0tLQo=']\n update_msp_crypto_field_component_model['tls_key'] = 'testString'\n update_msp_crypto_field_component_model['tls_cert'] = 'LS0tLS1CRUdJTiBDRVJUSUZJQ0FURS0tLS0tCkNlcnQgZGF0YSB3b3VsZCBiZSBoZXJlIGlmIHRoaXMgd2FzIHJlYWwKLS0tLS1FTkQgQ0VSVElGSUNBVEUtLS0tLQo='\n update_msp_crypto_field_component_model['client_auth'] = client_auth_model\n\n update_msp_crypto_field_model = {} # UpdateMspCryptoField\n update_msp_crypto_field_model['ca'] = update_msp_crypto_field_ca_model\n update_msp_crypto_field_model['tlsca'] = update_msp_crypto_field_tlsca_model\n update_msp_crypto_field_model['component'] = update_msp_crypto_field_component_model\n\n # Construct a json representation of a UpdatePeerBodyCrypto model\n update_peer_body_crypto_model_json = {}\n update_peer_body_crypto_model_json['enrollment'] = update_enrollment_crypto_field_model\n update_peer_body_crypto_model_json['msp'] = update_msp_crypto_field_model\n\n # Construct a model instance of UpdatePeerBodyCrypto by calling from_dict on the json representation\n update_peer_body_crypto_model = UpdatePeerBodyCrypto.from_dict(update_peer_body_crypto_model_json)\n assert update_peer_body_crypto_model != False\n\n # Construct a model instance of UpdatePeerBodyCrypto by calling from_dict on the json representation\n update_peer_body_crypto_model_dict = UpdatePeerBodyCrypto.from_dict(update_peer_body_crypto_model_json).__dict__\n update_peer_body_crypto_model2 = UpdatePeerBodyCrypto(**update_peer_body_crypto_model_dict)\n\n # Verify the model instances are equivalent\n assert update_peer_body_crypto_model == update_peer_body_crypto_model2\n\n # Convert model instance back to dict and verify no loss of data\n update_peer_body_crypto_model_json2 = update_peer_body_crypto_model.to_dict()\n assert update_peer_body_crypto_model_json2 == update_peer_body_crypto_model_json", "title": "" }, { "docid": "673dc652e76fe41bc230ed5c8afb81b1", "score": "0.6304075", "text": "def test_config_peer_gossip_serialization(self):\n\n # Construct dict forms of any model objects needed in order to build this model.\n\n config_peer_gossip_election_model = {} # ConfigPeerGossipElection\n config_peer_gossip_election_model['startupGracePeriod'] = '15s'\n config_peer_gossip_election_model['membershipSampleInterval'] = '1s'\n config_peer_gossip_election_model['leaderAliveThreshold'] = '10s'\n config_peer_gossip_election_model['leaderElectionDuration'] = '5s'\n\n config_peer_gossip_pvt_data_implicit_collection_dissemination_policy_model = {} # ConfigPeerGossipPvtDataImplicitCollectionDisseminationPolicy\n config_peer_gossip_pvt_data_implicit_collection_dissemination_policy_model['requiredPeerCount'] = 0\n config_peer_gossip_pvt_data_implicit_collection_dissemination_policy_model['maxPeerCount'] = 1\n\n config_peer_gossip_pvt_data_model = {} # ConfigPeerGossipPvtData\n config_peer_gossip_pvt_data_model['pullRetryThreshold'] = '60s'\n config_peer_gossip_pvt_data_model['transientstoreMaxBlockRetention'] = 1000\n config_peer_gossip_pvt_data_model['pushAckTimeout'] = '3s'\n config_peer_gossip_pvt_data_model['btlPullMargin'] = 10\n config_peer_gossip_pvt_data_model['reconcileBatchSize'] = 10\n config_peer_gossip_pvt_data_model['reconcileSleepInterval'] = '1m'\n config_peer_gossip_pvt_data_model['reconciliationEnabled'] = True\n config_peer_gossip_pvt_data_model['skipPullingInvalidTransactionsDuringCommit'] = False\n config_peer_gossip_pvt_data_model['implicitCollectionDisseminationPolicy'] = config_peer_gossip_pvt_data_implicit_collection_dissemination_policy_model\n\n config_peer_gossip_state_model = {} # ConfigPeerGossipState\n config_peer_gossip_state_model['enabled'] = True\n config_peer_gossip_state_model['checkInterval'] = '10s'\n config_peer_gossip_state_model['responseTimeout'] = '3s'\n config_peer_gossip_state_model['batchSize'] = 10\n config_peer_gossip_state_model['blockBufferSize'] = 100\n config_peer_gossip_state_model['maxRetries'] = 3\n\n # Construct a json representation of a ConfigPeerGossip model\n config_peer_gossip_model_json = {}\n config_peer_gossip_model_json['useLeaderElection'] = True\n config_peer_gossip_model_json['orgLeader'] = False\n config_peer_gossip_model_json['membershipTrackerInterval'] = '5s'\n config_peer_gossip_model_json['maxBlockCountToStore'] = 100\n config_peer_gossip_model_json['maxPropagationBurstLatency'] = '10ms'\n config_peer_gossip_model_json['maxPropagationBurstSize'] = 10\n config_peer_gossip_model_json['propagateIterations'] = 3\n config_peer_gossip_model_json['pullInterval'] = '4s'\n config_peer_gossip_model_json['pullPeerNum'] = 3\n config_peer_gossip_model_json['requestStateInfoInterval'] = '4s'\n config_peer_gossip_model_json['publishStateInfoInterval'] = '4s'\n config_peer_gossip_model_json['stateInfoRetentionInterval'] = '0s'\n config_peer_gossip_model_json['publishCertPeriod'] = '10s'\n config_peer_gossip_model_json['skipBlockVerification'] = False\n config_peer_gossip_model_json['dialTimeout'] = '3s'\n config_peer_gossip_model_json['connTimeout'] = '2s'\n config_peer_gossip_model_json['recvBuffSize'] = 20\n config_peer_gossip_model_json['sendBuffSize'] = 200\n config_peer_gossip_model_json['digestWaitTime'] = '1s'\n config_peer_gossip_model_json['requestWaitTime'] = '1500ms'\n config_peer_gossip_model_json['responseWaitTime'] = '2s'\n config_peer_gossip_model_json['aliveTimeInterval'] = '5s'\n config_peer_gossip_model_json['aliveExpirationTimeout'] = '25s'\n config_peer_gossip_model_json['reconnectInterval'] = '25s'\n config_peer_gossip_model_json['election'] = config_peer_gossip_election_model\n config_peer_gossip_model_json['pvtData'] = config_peer_gossip_pvt_data_model\n config_peer_gossip_model_json['state'] = config_peer_gossip_state_model\n\n # Construct a model instance of ConfigPeerGossip by calling from_dict on the json representation\n config_peer_gossip_model = ConfigPeerGossip.from_dict(config_peer_gossip_model_json)\n assert config_peer_gossip_model != False\n\n # Construct a model instance of ConfigPeerGossip by calling from_dict on the json representation\n config_peer_gossip_model_dict = ConfigPeerGossip.from_dict(config_peer_gossip_model_json).__dict__\n config_peer_gossip_model2 = ConfigPeerGossip(**config_peer_gossip_model_dict)\n\n # Verify the model instances are equivalent\n assert config_peer_gossip_model == config_peer_gossip_model2\n\n # Convert model instance back to dict and verify no loss of data\n config_peer_gossip_model_json2 = config_peer_gossip_model.to_dict()\n assert config_peer_gossip_model_json2 == config_peer_gossip_model_json", "title": "" }, { "docid": "0fe2d10ff4da07bcc210f6a8a15913e0", "score": "0.6294839", "text": "def test_config_peer_gateway_serialization(self):\n\n # Construct a json representation of a ConfigPeerGateway model\n config_peer_gateway_model_json = {}\n config_peer_gateway_model_json['enabled'] = True\n\n # Construct a model instance of ConfigPeerGateway by calling from_dict on the json representation\n config_peer_gateway_model = ConfigPeerGateway.from_dict(config_peer_gateway_model_json)\n assert config_peer_gateway_model != False\n\n # Construct a model instance of ConfigPeerGateway by calling from_dict on the json representation\n config_peer_gateway_model_dict = ConfigPeerGateway.from_dict(config_peer_gateway_model_json).__dict__\n config_peer_gateway_model2 = ConfigPeerGateway(**config_peer_gateway_model_dict)\n\n # Verify the model instances are equivalent\n assert config_peer_gateway_model == config_peer_gateway_model2\n\n # Convert model instance back to dict and verify no loss of data\n config_peer_gateway_model_json2 = config_peer_gateway_model.to_dict()\n assert config_peer_gateway_model_json2 == config_peer_gateway_model_json", "title": "" }, { "docid": "67e335aca03fbd2142e10219946f8e5e", "score": "0.62886584", "text": "def test_config_peer_gossip_state_serialization(self):\n\n # Construct a json representation of a ConfigPeerGossipState model\n config_peer_gossip_state_model_json = {}\n config_peer_gossip_state_model_json['enabled'] = True\n config_peer_gossip_state_model_json['checkInterval'] = '10s'\n config_peer_gossip_state_model_json['responseTimeout'] = '3s'\n config_peer_gossip_state_model_json['batchSize'] = 10\n config_peer_gossip_state_model_json['blockBufferSize'] = 100\n config_peer_gossip_state_model_json['maxRetries'] = 3\n\n # Construct a model instance of ConfigPeerGossipState by calling from_dict on the json representation\n config_peer_gossip_state_model = ConfigPeerGossipState.from_dict(config_peer_gossip_state_model_json)\n assert config_peer_gossip_state_model != False\n\n # Construct a model instance of ConfigPeerGossipState by calling from_dict on the json representation\n config_peer_gossip_state_model_dict = ConfigPeerGossipState.from_dict(config_peer_gossip_state_model_json).__dict__\n config_peer_gossip_state_model2 = ConfigPeerGossipState(**config_peer_gossip_state_model_dict)\n\n # Verify the model instances are equivalent\n assert config_peer_gossip_state_model == config_peer_gossip_state_model2\n\n # Convert model instance back to dict and verify no loss of data\n config_peer_gossip_state_model_json2 = config_peer_gossip_state_model.to_dict()\n assert config_peer_gossip_state_model_json2 == config_peer_gossip_state_model_json", "title": "" }, { "docid": "bf6be4237b0ccd7a8e46fea81c5f5e98", "score": "0.62838554", "text": "def test_config_peer_keepalive_serialization(self):\n\n # Construct dict forms of any model objects needed in order to build this model.\n\n config_peer_keepalive_client_model = {} # ConfigPeerKeepaliveClient\n config_peer_keepalive_client_model['interval'] = '60s'\n config_peer_keepalive_client_model['timeout'] = '20s'\n\n config_peer_keepalive_delivery_client_model = {} # ConfigPeerKeepaliveDeliveryClient\n config_peer_keepalive_delivery_client_model['interval'] = '60s'\n config_peer_keepalive_delivery_client_model['timeout'] = '20s'\n\n # Construct a json representation of a ConfigPeerKeepalive model\n config_peer_keepalive_model_json = {}\n config_peer_keepalive_model_json['minInterval'] = '60s'\n config_peer_keepalive_model_json['client'] = config_peer_keepalive_client_model\n config_peer_keepalive_model_json['deliveryClient'] = config_peer_keepalive_delivery_client_model\n\n # Construct a model instance of ConfigPeerKeepalive by calling from_dict on the json representation\n config_peer_keepalive_model = ConfigPeerKeepalive.from_dict(config_peer_keepalive_model_json)\n assert config_peer_keepalive_model != False\n\n # Construct a model instance of ConfigPeerKeepalive by calling from_dict on the json representation\n config_peer_keepalive_model_dict = ConfigPeerKeepalive.from_dict(config_peer_keepalive_model_json).__dict__\n config_peer_keepalive_model2 = ConfigPeerKeepalive(**config_peer_keepalive_model_dict)\n\n # Verify the model instances are equivalent\n assert config_peer_keepalive_model == config_peer_keepalive_model2\n\n # Convert model instance back to dict and verify no loss of data\n config_peer_keepalive_model_json2 = config_peer_keepalive_model.to_dict()\n assert config_peer_keepalive_model_json2 == config_peer_keepalive_model_json", "title": "" }, { "docid": "018d53ac8a6ec70faac6b88a8b586660", "score": "0.6220824", "text": "def test_config_peer_chaincode_logging_serialization(self):\n\n # Construct a json representation of a ConfigPeerChaincodeLogging model\n config_peer_chaincode_logging_model_json = {}\n config_peer_chaincode_logging_model_json['level'] = 'info'\n config_peer_chaincode_logging_model_json['shim'] = 'warning'\n config_peer_chaincode_logging_model_json['format'] = '%{color}%{time:2006-01-02 15:04:05.000 MST} [%{module}] %{shortfunc} -> %{level:.4s} %{id:03x}%{color:reset} %{message}'\n\n # Construct a model instance of ConfigPeerChaincodeLogging by calling from_dict on the json representation\n config_peer_chaincode_logging_model = ConfigPeerChaincodeLogging.from_dict(config_peer_chaincode_logging_model_json)\n assert config_peer_chaincode_logging_model != False\n\n # Construct a model instance of ConfigPeerChaincodeLogging by calling from_dict on the json representation\n config_peer_chaincode_logging_model_dict = ConfigPeerChaincodeLogging.from_dict(config_peer_chaincode_logging_model_json).__dict__\n config_peer_chaincode_logging_model2 = ConfigPeerChaincodeLogging(**config_peer_chaincode_logging_model_dict)\n\n # Verify the model instances are equivalent\n assert config_peer_chaincode_logging_model == config_peer_chaincode_logging_model2\n\n # Convert model instance back to dict and verify no loss of data\n config_peer_chaincode_logging_model_json2 = config_peer_chaincode_logging_model.to_dict()\n assert config_peer_chaincode_logging_model_json2 == config_peer_chaincode_logging_model_json", "title": "" }, { "docid": "558a8c1762b26cc23312e3905c285b0c", "score": "0.61989766", "text": "def test_serialize_deserialize():\n current = GridCurrent.from_netCDF(curr_file2)\n py_current = CurrentMover(current=current)\n\n serial = py_current.serialize()\n assert validate_serialize_json(serial, py_current)\n\n # check our CurrentMover attributes\n\n deser = CurrentMover.deserialize(serial)\n\n assert deser == py_current", "title": "" }, { "docid": "58fbfa84a42bea89b2d2176bc50b092d", "score": "0.619188", "text": "def test_edit_log_settings_body_serialization(self):\n\n # Construct dict forms of any model objects needed in order to build this model.\n\n logging_settings_client_model = {} # LoggingSettingsClient\n logging_settings_client_model['enabled'] = True\n logging_settings_client_model['level'] = 'silly'\n logging_settings_client_model['unique_name'] = False\n\n logging_settings_server_model = {} # LoggingSettingsServer\n logging_settings_server_model['enabled'] = True\n logging_settings_server_model['level'] = 'silly'\n logging_settings_server_model['unique_name'] = False\n\n # Construct a json representation of a EditLogSettingsBody model\n edit_log_settings_body_model_json = {}\n edit_log_settings_body_model_json['client'] = logging_settings_client_model\n edit_log_settings_body_model_json['server'] = logging_settings_server_model\n\n # Construct a model instance of EditLogSettingsBody by calling from_dict on the json representation\n edit_log_settings_body_model = EditLogSettingsBody.from_dict(edit_log_settings_body_model_json)\n assert edit_log_settings_body_model != False\n\n # Construct a model instance of EditLogSettingsBody by calling from_dict on the json representation\n edit_log_settings_body_model_dict = EditLogSettingsBody.from_dict(edit_log_settings_body_model_json).__dict__\n edit_log_settings_body_model2 = EditLogSettingsBody(**edit_log_settings_body_model_dict)\n\n # Verify the model instances are equivalent\n assert edit_log_settings_body_model == edit_log_settings_body_model2\n\n # Convert model instance back to dict and verify no loss of data\n edit_log_settings_body_model_json2 = edit_log_settings_body_model.to_dict()\n assert edit_log_settings_body_model_json2 == edit_log_settings_body_model_json", "title": "" }, { "docid": "52a7b21d77b68aebc864df3f1f3cba75", "score": "0.6189044", "text": "def test_import_ca_body_msp_component_serialization(self):\n\n # Construct a json representation of a ImportCaBodyMspComponent model\n import_ca_body_msp_component_model_json = {}\n import_ca_body_msp_component_model_json['tls_cert'] = 'LS0tLS1CRUdJTiBDRVJUSUZJQ0FURS0tLS0tCkNlcnQgZGF0YSB3b3VsZCBiZSBoZXJlIGlmIHRoaXMgd2FzIHJlYWwKLS0tLS1FTkQgQ0VSVElGSUNBVEUtLS0tLQo='\n\n # Construct a model instance of ImportCaBodyMspComponent by calling from_dict on the json representation\n import_ca_body_msp_component_model = ImportCaBodyMspComponent.from_dict(import_ca_body_msp_component_model_json)\n assert import_ca_body_msp_component_model != False\n\n # Construct a model instance of ImportCaBodyMspComponent by calling from_dict on the json representation\n import_ca_body_msp_component_model_dict = ImportCaBodyMspComponent.from_dict(import_ca_body_msp_component_model_json).__dict__\n import_ca_body_msp_component_model2 = ImportCaBodyMspComponent(**import_ca_body_msp_component_model_dict)\n\n # Verify the model instances are equivalent\n assert import_ca_body_msp_component_model == import_ca_body_msp_component_model2\n\n # Convert model instance back to dict and verify no loss of data\n import_ca_body_msp_component_model_json2 = import_ca_body_msp_component_model.to_dict()\n assert import_ca_body_msp_component_model_json2 == import_ca_body_msp_component_model_json", "title": "" }, { "docid": "cb5bf2747519c7645e6e92fb58525bc4", "score": "0.6181668", "text": "def test_ca_response_storage_serialization(self):\n\n # Construct dict forms of any model objects needed in order to build this model.\n\n storage_object_model = {} # StorageObject\n storage_object_model['size'] = '4GiB'\n storage_object_model['class'] = 'default'\n\n # Construct a json representation of a CaResponseStorage model\n ca_response_storage_model_json = {}\n ca_response_storage_model_json['ca'] = storage_object_model\n\n # Construct a model instance of CaResponseStorage by calling from_dict on the json representation\n ca_response_storage_model = CaResponseStorage.from_dict(ca_response_storage_model_json)\n assert ca_response_storage_model != False\n\n # Construct a model instance of CaResponseStorage by calling from_dict on the json representation\n ca_response_storage_model_dict = CaResponseStorage.from_dict(ca_response_storage_model_json).__dict__\n ca_response_storage_model2 = CaResponseStorage(**ca_response_storage_model_dict)\n\n # Verify the model instances are equivalent\n assert ca_response_storage_model == ca_response_storage_model2\n\n # Convert model instance back to dict and verify no loss of data\n ca_response_storage_model_json2 = ca_response_storage_model.to_dict()\n assert ca_response_storage_model_json2 == ca_response_storage_model_json", "title": "" }, { "docid": "447321cb11d9111f83522a0dbecf304a", "score": "0.6164679", "text": "def test_update_orderer_body_resources_serialization(self):\n\n # Construct dict forms of any model objects needed in order to build this model.\n\n resource_requests_model = {} # ResourceRequests\n resource_requests_model['cpu'] = '100m'\n resource_requests_model['memory'] = '256MiB'\n\n resource_limits_model = {} # ResourceLimits\n resource_limits_model['cpu'] = '100m'\n resource_limits_model['memory'] = '256MiB'\n\n resource_object_model = {} # ResourceObject\n resource_object_model['requests'] = resource_requests_model\n resource_object_model['limits'] = resource_limits_model\n\n # Construct a json representation of a UpdateOrdererBodyResources model\n update_orderer_body_resources_model_json = {}\n update_orderer_body_resources_model_json['orderer'] = resource_object_model\n update_orderer_body_resources_model_json['proxy'] = resource_object_model\n\n # Construct a model instance of UpdateOrdererBodyResources by calling from_dict on the json representation\n update_orderer_body_resources_model = UpdateOrdererBodyResources.from_dict(update_orderer_body_resources_model_json)\n assert update_orderer_body_resources_model != False\n\n # Construct a model instance of UpdateOrdererBodyResources by calling from_dict on the json representation\n update_orderer_body_resources_model_dict = UpdateOrdererBodyResources.from_dict(update_orderer_body_resources_model_json).__dict__\n update_orderer_body_resources_model2 = UpdateOrdererBodyResources(**update_orderer_body_resources_model_dict)\n\n # Verify the model instances are equivalent\n assert update_orderer_body_resources_model == update_orderer_body_resources_model2\n\n # Convert model instance back to dict and verify no loss of data\n update_orderer_body_resources_model_json2 = update_orderer_body_resources_model.to_dict()\n assert update_orderer_body_resources_model_json2 == update_orderer_body_resources_model_json", "title": "" }, { "docid": "e21787462c08ded96d3f5b61d4ecc3a7", "score": "0.61455345", "text": "def test_msp_crypto_field_component_serialization(self):\n\n # Construct a json representation of a MspCryptoFieldComponent model\n msp_crypto_field_component_model_json = {}\n msp_crypto_field_component_model_json['tls_cert'] = 'LS0tLS1CRUdJTiBDRVJUSUZJQ0FURS0tLS0tCkNlcnQgZGF0YSB3b3VsZCBiZSBoZXJlIGlmIHRoaXMgd2FzIHJlYWwKLS0tLS1FTkQgQ0VSVElGSUNBVEUtLS0tLQo='\n msp_crypto_field_component_model_json['ecert'] = 'LS0tLS1CRUdJTiBDRVJUSUZJQ0FURS0tLS0tCkNlcnQgZGF0YSB3b3VsZCBiZSBoZXJlIGlmIHRoaXMgd2FzIHJlYWwKLS0tLS1FTkQgQ0VSVElGSUNBVEUtLS0tLQo='\n msp_crypto_field_component_model_json['admin_certs'] = ['LS0tLS1CRUdJTiBDRVJUSUZJQ0FURS0tLS0tCkNlcnQgZGF0YSB3b3VsZCBiZSBoZXJlIGlmIHRoaXMgd2FzIHJlYWwKLS0tLS1FTkQgQ0VSVElGSUNBVEUtLS0tLQo=']\n\n # Construct a model instance of MspCryptoFieldComponent by calling from_dict on the json representation\n msp_crypto_field_component_model = MspCryptoFieldComponent.from_dict(msp_crypto_field_component_model_json)\n assert msp_crypto_field_component_model != False\n\n # Construct a model instance of MspCryptoFieldComponent by calling from_dict on the json representation\n msp_crypto_field_component_model_dict = MspCryptoFieldComponent.from_dict(msp_crypto_field_component_model_json).__dict__\n msp_crypto_field_component_model2 = MspCryptoFieldComponent(**msp_crypto_field_component_model_dict)\n\n # Verify the model instances are equivalent\n assert msp_crypto_field_component_model == msp_crypto_field_component_model2\n\n # Convert model instance back to dict and verify no loss of data\n msp_crypto_field_component_model_json2 = msp_crypto_field_component_model.to_dict()\n assert msp_crypto_field_component_model_json2 == msp_crypto_field_component_model_json", "title": "" }, { "docid": "f30ed682e247df597ac9dc0781baf1b5", "score": "0.61408573", "text": "def serialize():", "title": "" }, { "docid": "576464eaf4fff862ab5a12ec0dc87272", "score": "0.6130203", "text": "def test_serialize_from_json(self): \n e = orders.TypedPhone.serialize_from_json(self.sample_dict) \n self.assertEqual(e.json_data, self.sample_dict)", "title": "" }, { "docid": "1b9c984917b46415b91f75faa4d0824e", "score": "0.6118753", "text": "def test_create_orderer_response_serialization(self):\n\n # Construct dict forms of any model objects needed in order to build this model.\n\n node_ou_model = {} # NodeOu\n node_ou_model['enabled'] = True\n\n msp_crypto_field_ca_model = {} # MspCryptoFieldCa\n msp_crypto_field_ca_model['name'] = 'ca'\n msp_crypto_field_ca_model['root_certs'] = ['LS0tLS1CRUdJTiBDRVJUSUZJQ0FURS0tLS0tCkNlcnQgZGF0YSB3b3VsZCBiZSBoZXJlIGlmIHRoaXMgd2FzIHJlYWwKLS0tLS1FTkQgQ0VSVElGSUNBVEUtLS0tLQo=']\n\n msp_crypto_field_tlsca_model = {} # MspCryptoFieldTlsca\n msp_crypto_field_tlsca_model['name'] = 'tlsca'\n msp_crypto_field_tlsca_model['root_certs'] = ['LS0tLS1CRUdJTiBDRVJUSUZJQ0FURS0tLS0tCkNlcnQgZGF0YSB3b3VsZCBiZSBoZXJlIGlmIHRoaXMgd2FzIHJlYWwKLS0tLS1FTkQgQ0VSVElGSUNBVEUtLS0tLQo=']\n\n msp_crypto_field_component_model = {} # MspCryptoFieldComponent\n msp_crypto_field_component_model['tls_cert'] = 'LS0tLS1CRUdJTiBDRVJUSUZJQ0FURS0tLS0tCkNlcnQgZGF0YSB3b3VsZCBiZSBoZXJlIGlmIHRoaXMgd2FzIHJlYWwKLS0tLS1FTkQgQ0VSVElGSUNBVEUtLS0tLQo='\n msp_crypto_field_component_model['ecert'] = 'LS0tLS1CRUdJTiBDRVJUSUZJQ0FURS0tLS0tCkNlcnQgZGF0YSB3b3VsZCBiZSBoZXJlIGlmIHRoaXMgd2FzIHJlYWwKLS0tLS1FTkQgQ0VSVElGSUNBVEUtLS0tLQo='\n msp_crypto_field_component_model['admin_certs'] = ['LS0tLS1CRUdJTiBDRVJUSUZJQ0FURS0tLS0tCkNlcnQgZGF0YSB3b3VsZCBiZSBoZXJlIGlmIHRoaXMgd2FzIHJlYWwKLS0tLS1FTkQgQ0VSVElGSUNBVEUtLS0tLQo=']\n\n msp_crypto_field_model = {} # MspCryptoField\n msp_crypto_field_model['ca'] = msp_crypto_field_ca_model\n msp_crypto_field_model['tlsca'] = msp_crypto_field_tlsca_model\n msp_crypto_field_model['component'] = msp_crypto_field_component_model\n\n generic_resources_requests_model = {} # GenericResourcesRequests\n generic_resources_requests_model['cpu'] = '100m'\n generic_resources_requests_model['memory'] = '256M'\n\n generic_resource_limits_model = {} # GenericResourceLimits\n generic_resource_limits_model['cpu'] = '8000m'\n generic_resource_limits_model['memory'] = '16384M'\n\n generic_resources_model = {} # GenericResources\n generic_resources_model['requests'] = generic_resources_requests_model\n generic_resources_model['limits'] = generic_resource_limits_model\n\n orderer_response_resources_model = {} # OrdererResponseResources\n orderer_response_resources_model['orderer'] = generic_resources_model\n orderer_response_resources_model['proxy'] = generic_resources_model\n\n storage_object_model = {} # StorageObject\n storage_object_model['size'] = '4GiB'\n storage_object_model['class'] = 'default'\n\n orderer_response_storage_model = {} # OrdererResponseStorage\n orderer_response_storage_model['orderer'] = storage_object_model\n\n orderer_response_model = {} # OrdererResponse\n orderer_response_model['id'] = 'component1'\n orderer_response_model['dep_component_id'] = 'admin'\n orderer_response_model['api_url'] = 'grpcs://n3a3ec3-myorderer.ibp.us-south.containers.appdomain.cloud:7050'\n orderer_response_model['display_name'] = 'orderer'\n orderer_response_model['cluster_id'] = 'mzdqhdifnl'\n orderer_response_model['cluster_name'] = 'ordering service 1'\n orderer_response_model['grpcwp_url'] = 'https://n3a3ec3-myorderer-proxy.ibp.us-south.containers.appdomain.cloud:443'\n orderer_response_model['location'] = 'ibmcloud'\n orderer_response_model['operations_url'] = 'https://n3a3ec3-myorderer.ibp.us-south.containers.appdomain.cloud:8443'\n orderer_response_model['orderer_type'] = 'raft'\n orderer_response_model['config_override'] = { 'foo': 'bar' }\n orderer_response_model['consenter_proposal_fin'] = True\n orderer_response_model['node_ou'] = node_ou_model\n orderer_response_model['msp'] = msp_crypto_field_model\n orderer_response_model['msp_id'] = 'Org1'\n orderer_response_model['resources'] = orderer_response_resources_model\n orderer_response_model['scheme_version'] = 'v1'\n orderer_response_model['storage'] = orderer_response_storage_model\n orderer_response_model['system_channel_id'] = 'testchainid'\n orderer_response_model['tags'] = ['fabric-ca']\n orderer_response_model['timestamp'] = 1537262855753\n orderer_response_model['type'] = 'fabric-peer'\n orderer_response_model['version'] = '1.4.6-1'\n orderer_response_model['zone'] = '-'\n\n # Construct a json representation of a CreateOrdererResponse model\n create_orderer_response_model_json = {}\n create_orderer_response_model_json['created'] = [orderer_response_model]\n\n # Construct a model instance of CreateOrdererResponse by calling from_dict on the json representation\n create_orderer_response_model = CreateOrdererResponse.from_dict(create_orderer_response_model_json)\n assert create_orderer_response_model != False\n\n # Construct a model instance of CreateOrdererResponse by calling from_dict on the json representation\n create_orderer_response_model_dict = CreateOrdererResponse.from_dict(create_orderer_response_model_json).__dict__\n create_orderer_response_model2 = CreateOrdererResponse(**create_orderer_response_model_dict)\n\n # Verify the model instances are equivalent\n assert create_orderer_response_model == create_orderer_response_model2\n\n # Convert model instance back to dict and verify no loss of data\n create_orderer_response_model_json2 = create_orderer_response_model.to_dict()\n assert create_orderer_response_model_json2 == create_orderer_response_model_json", "title": "" }, { "docid": "f3cc57ed2d5009b20301fd426f56eef3", "score": "0.6074169", "text": "def test_serialization(self):\n message = 'Hello, world.'\n time = timezone.now()\n\n info = ServerInfo(timestamp=time, team_msg=message)\n json_data = info.json()\n\n self.assertTrue('message' in json_data)\n self.assertEqual(json_data['message'], message)\n self.assertTrue('message_timestamp' in json_data)\n self.assertEqual(json_data['message_timestamp'], time.isoformat())", "title": "" }, { "docid": "b783f7ff3e5350973139e86ff194abe4", "score": "0.60716045", "text": "def test_resource_object_fab_v2_serialization(self):\n\n # Construct dict forms of any model objects needed in order to build this model.\n\n resource_requests_model = {} # ResourceRequests\n resource_requests_model['cpu'] = '100m'\n resource_requests_model['memory'] = '256MiB'\n\n resource_limits_model = {} # ResourceLimits\n resource_limits_model['cpu'] = '100m'\n resource_limits_model['memory'] = '256MiB'\n\n # Construct a json representation of a ResourceObjectFabV2 model\n resource_object_fab_v2_model_json = {}\n resource_object_fab_v2_model_json['requests'] = resource_requests_model\n resource_object_fab_v2_model_json['limits'] = resource_limits_model\n\n # Construct a model instance of ResourceObjectFabV2 by calling from_dict on the json representation\n resource_object_fab_v2_model = ResourceObjectFabV2.from_dict(resource_object_fab_v2_model_json)\n assert resource_object_fab_v2_model != False\n\n # Construct a model instance of ResourceObjectFabV2 by calling from_dict on the json representation\n resource_object_fab_v2_model_dict = ResourceObjectFabV2.from_dict(resource_object_fab_v2_model_json).__dict__\n resource_object_fab_v2_model2 = ResourceObjectFabV2(**resource_object_fab_v2_model_dict)\n\n # Verify the model instances are equivalent\n assert resource_object_fab_v2_model == resource_object_fab_v2_model2\n\n # Convert model instance back to dict and verify no loss of data\n resource_object_fab_v2_model_json2 = resource_object_fab_v2_model.to_dict()\n assert resource_object_fab_v2_model_json2 == resource_object_fab_v2_model_json", "title": "" }, { "docid": "fabb73b055cd8e6612aa5f571e043a9b", "score": "0.60690576", "text": "def test_serialization():\n valid_data = FileLinkDataFactory(url='www.citrine.io', filename='materials.txt')\n file_link = FileLink.build(valid_data)\n assert file_link.dump() == valid_data", "title": "" }, { "docid": "1bf3ddab7aca5f03737cdf2b2cd7bebb", "score": "0.6055065", "text": "def test_node_serialization(_node, _serialization):\n assert _node.serialize() == _serialization", "title": "" }, { "docid": "a9936b62bbe48a81fa2461e1d7edc4d9", "score": "0.60489976", "text": "def test_deserialize(self):\n block = blocks.PageChooserBlock()\n christmas_page = Page.objects.get(slug=\"christmas\")\n\n self.assertEqual(block.to_python(christmas_page.id), christmas_page)\n\n # None should deserialize to None\n self.assertIsNone(block.to_python(None))", "title": "" }, { "docid": "71b71b4c628181ff09521d6f478e1c2d", "score": "0.6040876", "text": "def test_config_peer_authentication_serialization(self):\n\n # Construct a json representation of a ConfigPeerAuthentication model\n config_peer_authentication_model_json = {}\n config_peer_authentication_model_json['timewindow'] = '15m'\n\n # Construct a model instance of ConfigPeerAuthentication by calling from_dict on the json representation\n config_peer_authentication_model = ConfigPeerAuthentication.from_dict(config_peer_authentication_model_json)\n assert config_peer_authentication_model != False\n\n # Construct a model instance of ConfigPeerAuthentication by calling from_dict on the json representation\n config_peer_authentication_model_dict = ConfigPeerAuthentication.from_dict(config_peer_authentication_model_json).__dict__\n config_peer_authentication_model2 = ConfigPeerAuthentication(**config_peer_authentication_model_dict)\n\n # Verify the model instances are equivalent\n assert config_peer_authentication_model == config_peer_authentication_model2\n\n # Convert model instance back to dict and verify no loss of data\n config_peer_authentication_model_json2 = config_peer_authentication_model.to_dict()\n assert config_peer_authentication_model_json2 == config_peer_authentication_model_json", "title": "" }, { "docid": "088ebdfd39cfe9694ff07496790368fa", "score": "0.60268164", "text": "def test_config_peer_admin_service_serialization(self):\n\n # Construct a json representation of a ConfigPeerAdminService model\n config_peer_admin_service_model_json = {}\n config_peer_admin_service_model_json['listenAddress'] = '0.0.0.0:7051'\n\n # Construct a model instance of ConfigPeerAdminService by calling from_dict on the json representation\n config_peer_admin_service_model = ConfigPeerAdminService.from_dict(config_peer_admin_service_model_json)\n assert config_peer_admin_service_model != False\n\n # Construct a model instance of ConfigPeerAdminService by calling from_dict on the json representation\n config_peer_admin_service_model_dict = ConfigPeerAdminService.from_dict(config_peer_admin_service_model_json).__dict__\n config_peer_admin_service_model2 = ConfigPeerAdminService(**config_peer_admin_service_model_dict)\n\n # Verify the model instances are equivalent\n assert config_peer_admin_service_model == config_peer_admin_service_model2\n\n # Convert model instance back to dict and verify no loss of data\n config_peer_admin_service_model_json2 = config_peer_admin_service_model.to_dict()\n assert config_peer_admin_service_model_json2 == config_peer_admin_service_model_json", "title": "" }, { "docid": "3fd51c731e2cc9b5bf96f89c4a11d9f4", "score": "0.6020229", "text": "def test_config_peer_keepalive_delivery_client_serialization(self):\n\n # Construct a json representation of a ConfigPeerKeepaliveDeliveryClient model\n config_peer_keepalive_delivery_client_model_json = {}\n config_peer_keepalive_delivery_client_model_json['interval'] = '60s'\n config_peer_keepalive_delivery_client_model_json['timeout'] = '20s'\n\n # Construct a model instance of ConfigPeerKeepaliveDeliveryClient by calling from_dict on the json representation\n config_peer_keepalive_delivery_client_model = ConfigPeerKeepaliveDeliveryClient.from_dict(config_peer_keepalive_delivery_client_model_json)\n assert config_peer_keepalive_delivery_client_model != False\n\n # Construct a model instance of ConfigPeerKeepaliveDeliveryClient by calling from_dict on the json representation\n config_peer_keepalive_delivery_client_model_dict = ConfigPeerKeepaliveDeliveryClient.from_dict(config_peer_keepalive_delivery_client_model_json).__dict__\n config_peer_keepalive_delivery_client_model2 = ConfigPeerKeepaliveDeliveryClient(**config_peer_keepalive_delivery_client_model_dict)\n\n # Verify the model instances are equivalent\n assert config_peer_keepalive_delivery_client_model == config_peer_keepalive_delivery_client_model2\n\n # Convert model instance back to dict and verify no loss of data\n config_peer_keepalive_delivery_client_model_json2 = config_peer_keepalive_delivery_client_model.to_dict()\n assert config_peer_keepalive_delivery_client_model_json2 == config_peer_keepalive_delivery_client_model_json", "title": "" }, { "docid": "792e9aa5f404b45b5a1ffa149b51e267", "score": "0.60137916", "text": "def test_get_fabric_versions_response_versions_serialization(self):\n\n # Construct dict forms of any model objects needed in order to build this model.\n\n fab_version_object_model = {} # FabVersionObject\n fab_version_object_model['default'] = True\n fab_version_object_model['version'] = '1.4.6-2'\n fab_version_object_model['image'] = { 'foo': 'bar' }\n\n fabric_version_dictionary_model = {} # FabricVersionDictionary\n fabric_version_dictionary_model['1.4.6-2'] = fab_version_object_model\n fabric_version_dictionary_model['2.1.0-0'] = fab_version_object_model\n fabric_version_dictionary_model['foo'] = { 'foo': 'bar' }\n\n # Construct a json representation of a GetFabricVersionsResponseVersions model\n get_fabric_versions_response_versions_model_json = {}\n get_fabric_versions_response_versions_model_json['ca'] = fabric_version_dictionary_model\n get_fabric_versions_response_versions_model_json['peer'] = fabric_version_dictionary_model\n get_fabric_versions_response_versions_model_json['orderer'] = fabric_version_dictionary_model\n\n # Construct a model instance of GetFabricVersionsResponseVersions by calling from_dict on the json representation\n get_fabric_versions_response_versions_model = GetFabricVersionsResponseVersions.from_dict(get_fabric_versions_response_versions_model_json)\n assert get_fabric_versions_response_versions_model != False\n\n # Construct a model instance of GetFabricVersionsResponseVersions by calling from_dict on the json representation\n get_fabric_versions_response_versions_model_dict = GetFabricVersionsResponseVersions.from_dict(get_fabric_versions_response_versions_model_json).__dict__\n get_fabric_versions_response_versions_model2 = GetFabricVersionsResponseVersions(**get_fabric_versions_response_versions_model_dict)\n\n # Verify the model instances are equivalent\n assert get_fabric_versions_response_versions_model == get_fabric_versions_response_versions_model2\n\n # Convert model instance back to dict and verify no loss of data\n get_fabric_versions_response_versions_model_json2 = get_fabric_versions_response_versions_model.to_dict()\n assert get_fabric_versions_response_versions_model_json2 == get_fabric_versions_response_versions_model_json", "title": "" }, { "docid": "ded60a009597c3a4534bc9cf5adab5a7", "score": "0.60026693", "text": "def test_orderer_response_serialization(self):\n\n # Construct dict forms of any model objects needed in order to build this model.\n\n node_ou_model = {} # NodeOu\n node_ou_model['enabled'] = True\n\n msp_crypto_field_ca_model = {} # MspCryptoFieldCa\n msp_crypto_field_ca_model['name'] = 'ca'\n msp_crypto_field_ca_model['root_certs'] = ['LS0tLS1CRUdJTiBDRVJUSUZJQ0FURS0tLS0tCkNlcnQgZGF0YSB3b3VsZCBiZSBoZXJlIGlmIHRoaXMgd2FzIHJlYWwKLS0tLS1FTkQgQ0VSVElGSUNBVEUtLS0tLQo=']\n\n msp_crypto_field_tlsca_model = {} # MspCryptoFieldTlsca\n msp_crypto_field_tlsca_model['name'] = 'tlsca'\n msp_crypto_field_tlsca_model['root_certs'] = ['LS0tLS1CRUdJTiBDRVJUSUZJQ0FURS0tLS0tCkNlcnQgZGF0YSB3b3VsZCBiZSBoZXJlIGlmIHRoaXMgd2FzIHJlYWwKLS0tLS1FTkQgQ0VSVElGSUNBVEUtLS0tLQo=']\n\n msp_crypto_field_component_model = {} # MspCryptoFieldComponent\n msp_crypto_field_component_model['tls_cert'] = 'LS0tLS1CRUdJTiBDRVJUSUZJQ0FURS0tLS0tCkNlcnQgZGF0YSB3b3VsZCBiZSBoZXJlIGlmIHRoaXMgd2FzIHJlYWwKLS0tLS1FTkQgQ0VSVElGSUNBVEUtLS0tLQo='\n msp_crypto_field_component_model['ecert'] = 'LS0tLS1CRUdJTiBDRVJUSUZJQ0FURS0tLS0tCkNlcnQgZGF0YSB3b3VsZCBiZSBoZXJlIGlmIHRoaXMgd2FzIHJlYWwKLS0tLS1FTkQgQ0VSVElGSUNBVEUtLS0tLQo='\n msp_crypto_field_component_model['admin_certs'] = ['LS0tLS1CRUdJTiBDRVJUSUZJQ0FURS0tLS0tCkNlcnQgZGF0YSB3b3VsZCBiZSBoZXJlIGlmIHRoaXMgd2FzIHJlYWwKLS0tLS1FTkQgQ0VSVElGSUNBVEUtLS0tLQo=']\n\n msp_crypto_field_model = {} # MspCryptoField\n msp_crypto_field_model['ca'] = msp_crypto_field_ca_model\n msp_crypto_field_model['tlsca'] = msp_crypto_field_tlsca_model\n msp_crypto_field_model['component'] = msp_crypto_field_component_model\n\n generic_resources_requests_model = {} # GenericResourcesRequests\n generic_resources_requests_model['cpu'] = '100m'\n generic_resources_requests_model['memory'] = '256M'\n\n generic_resource_limits_model = {} # GenericResourceLimits\n generic_resource_limits_model['cpu'] = '8000m'\n generic_resource_limits_model['memory'] = '16384M'\n\n generic_resources_model = {} # GenericResources\n generic_resources_model['requests'] = generic_resources_requests_model\n generic_resources_model['limits'] = generic_resource_limits_model\n\n orderer_response_resources_model = {} # OrdererResponseResources\n orderer_response_resources_model['orderer'] = generic_resources_model\n orderer_response_resources_model['proxy'] = generic_resources_model\n\n storage_object_model = {} # StorageObject\n storage_object_model['size'] = '4GiB'\n storage_object_model['class'] = 'default'\n\n orderer_response_storage_model = {} # OrdererResponseStorage\n orderer_response_storage_model['orderer'] = storage_object_model\n\n # Construct a json representation of a OrdererResponse model\n orderer_response_model_json = {}\n orderer_response_model_json['id'] = 'component1'\n orderer_response_model_json['dep_component_id'] = 'admin'\n orderer_response_model_json['api_url'] = 'grpcs://n3a3ec3-myorderer.ibp.us-south.containers.appdomain.cloud:7050'\n orderer_response_model_json['display_name'] = 'orderer'\n orderer_response_model_json['cluster_id'] = 'mzdqhdifnl'\n orderer_response_model_json['cluster_name'] = 'ordering service 1'\n orderer_response_model_json['grpcwp_url'] = 'https://n3a3ec3-myorderer-proxy.ibp.us-south.containers.appdomain.cloud:443'\n orderer_response_model_json['location'] = 'ibmcloud'\n orderer_response_model_json['operations_url'] = 'https://n3a3ec3-myorderer.ibp.us-south.containers.appdomain.cloud:8443'\n orderer_response_model_json['orderer_type'] = 'raft'\n orderer_response_model_json['config_override'] = { 'foo': 'bar' }\n orderer_response_model_json['consenter_proposal_fin'] = True\n orderer_response_model_json['node_ou'] = node_ou_model\n orderer_response_model_json['msp'] = msp_crypto_field_model\n orderer_response_model_json['msp_id'] = 'Org1'\n orderer_response_model_json['resources'] = orderer_response_resources_model\n orderer_response_model_json['scheme_version'] = 'v1'\n orderer_response_model_json['storage'] = orderer_response_storage_model\n orderer_response_model_json['system_channel_id'] = 'testchainid'\n orderer_response_model_json['tags'] = ['fabric-ca']\n orderer_response_model_json['timestamp'] = 1537262855753\n orderer_response_model_json['type'] = 'fabric-peer'\n orderer_response_model_json['version'] = '1.4.6-1'\n orderer_response_model_json['zone'] = '-'\n\n # Construct a model instance of OrdererResponse by calling from_dict on the json representation\n orderer_response_model = OrdererResponse.from_dict(orderer_response_model_json)\n assert orderer_response_model != False\n\n # Construct a model instance of OrdererResponse by calling from_dict on the json representation\n orderer_response_model_dict = OrdererResponse.from_dict(orderer_response_model_json).__dict__\n orderer_response_model2 = OrdererResponse(**orderer_response_model_dict)\n\n # Verify the model instances are equivalent\n assert orderer_response_model == orderer_response_model2\n\n # Convert model instance back to dict and verify no loss of data\n orderer_response_model_json2 = orderer_response_model.to_dict()\n assert orderer_response_model_json2 == orderer_response_model_json", "title": "" }, { "docid": "6a15242ad0657803dbfeb6831cbebb51", "score": "0.59983855", "text": "def test_import_notification_serialization(self):\n\n # Construct a json representation of a ImportNotification model\n import_notification_model_json = {}\n import_notification_model_json['created_at'] = \"2019-01-01T12:00:00Z\"\n import_notification_model_json['id'] = 'testString'\n import_notification_model_json['status'] = 'testString'\n\n # Construct a model instance of ImportNotification by calling from_dict on the json representation\n import_notification_model = ImportNotification.from_dict(import_notification_model_json)\n assert import_notification_model != False\n\n # Construct a model instance of ImportNotification by calling from_dict on the json representation\n import_notification_model_dict = ImportNotification.from_dict(import_notification_model_json).__dict__\n import_notification_model2 = ImportNotification(**import_notification_model_dict)\n\n # Verify the model instances are equivalent\n assert import_notification_model == import_notification_model2\n\n # Convert model instance back to dict and verify no loss of data\n import_notification_model_json2 = import_notification_model.to_dict()\n assert import_notification_model_json2 == import_notification_model_json", "title": "" }, { "docid": "ff80af460edad112eb15915613192045", "score": "0.599103", "text": "def test_get_public_settings_response_versions_serialization(self):\n\n # Construct a json representation of a GetPublicSettingsResponseVERSIONS model\n get_public_settings_response_versions_model_json = {}\n get_public_settings_response_versions_model_json['apollo'] = '65f3cbfd'\n get_public_settings_response_versions_model_json['athena'] = '1198f94'\n get_public_settings_response_versions_model_json['stitch'] = '0f1a0c6'\n get_public_settings_response_versions_model_json['tag'] = 'v0.4.31'\n\n # Construct a model instance of GetPublicSettingsResponseVERSIONS by calling from_dict on the json representation\n get_public_settings_response_versions_model = GetPublicSettingsResponseVERSIONS.from_dict(get_public_settings_response_versions_model_json)\n assert get_public_settings_response_versions_model != False\n\n # Construct a model instance of GetPublicSettingsResponseVERSIONS by calling from_dict on the json representation\n get_public_settings_response_versions_model_dict = GetPublicSettingsResponseVERSIONS.from_dict(get_public_settings_response_versions_model_json).__dict__\n get_public_settings_response_versions_model2 = GetPublicSettingsResponseVERSIONS(**get_public_settings_response_versions_model_dict)\n\n # Verify the model instances are equivalent\n assert get_public_settings_response_versions_model == get_public_settings_response_versions_model2\n\n # Convert model instance back to dict and verify no loss of data\n get_public_settings_response_versions_model_json2 = get_public_settings_response_versions_model.to_dict()\n assert get_public_settings_response_versions_model_json2 == get_public_settings_response_versions_model_json", "title": "" }, { "docid": "168a6d732f804558e33873651f837759", "score": "0.59885293", "text": "def test_crypto_object_msp_serialization(self):\n\n # Construct dict forms of any model objects needed in order to build this model.\n\n client_auth_model = {} # ClientAuth\n client_auth_model['type'] = 'noclientcert'\n client_auth_model['tls_certs'] = ['testString']\n\n msp_crypto_comp_model = {} # MspCryptoComp\n msp_crypto_comp_model['ekey'] = 'testString'\n msp_crypto_comp_model['ecert'] = 'LS0tLS1CRUdJTiBDRVJUSUZJQ0FURS0tLS0tCkNlcnQgZGF0YSB3b3VsZCBiZSBoZXJlIGlmIHRoaXMgd2FzIHJlYWwKLS0tLS1FTkQgQ0VSVElGSUNBVEUtLS0tLQo='\n msp_crypto_comp_model['admin_certs'] = ['LS0tLS1CRUdJTiBDRVJUSUZJQ0FURS0tLS0tCkNlcnQgZGF0YSB3b3VsZCBiZSBoZXJlIGlmIHRoaXMgd2FzIHJlYWwKLS0tLS1FTkQgQ0VSVElGSUNBVEUtLS0tLQo=']\n msp_crypto_comp_model['tls_key'] = 'testString'\n msp_crypto_comp_model['tls_cert'] = 'LS0tLS1CRUdJTiBDRVJUSUZJQ0FURS0tLS0tCkNlcnQgZGF0YSB3b3VsZCBiZSBoZXJlIGlmIHRoaXMgd2FzIHJlYWwKLS0tLS1FTkQgQ0VSVElGSUNBVEUtLS0tLQo='\n msp_crypto_comp_model['client_auth'] = client_auth_model\n\n msp_crypto_ca_model = {} # MspCryptoCa\n msp_crypto_ca_model['root_certs'] = ['LS0tLS1CRUdJTiBDRVJUSUZJQ0FURS0tLS0tCkNlcnQgZGF0YSB3b3VsZCBiZSBoZXJlIGlmIHRoaXMgd2FzIHJlYWwKLS0tLS1FTkQgQ0VSVElGSUNBVEUtLS0tLQo=']\n msp_crypto_ca_model['ca_intermediate_certs'] = ['testString']\n\n # Construct a json representation of a CryptoObjectMsp model\n crypto_object_msp_model_json = {}\n crypto_object_msp_model_json['component'] = msp_crypto_comp_model\n crypto_object_msp_model_json['ca'] = msp_crypto_ca_model\n crypto_object_msp_model_json['tlsca'] = msp_crypto_ca_model\n\n # Construct a model instance of CryptoObjectMsp by calling from_dict on the json representation\n crypto_object_msp_model = CryptoObjectMsp.from_dict(crypto_object_msp_model_json)\n assert crypto_object_msp_model != False\n\n # Construct a model instance of CryptoObjectMsp by calling from_dict on the json representation\n crypto_object_msp_model_dict = CryptoObjectMsp.from_dict(crypto_object_msp_model_json).__dict__\n crypto_object_msp_model2 = CryptoObjectMsp(**crypto_object_msp_model_dict)\n\n # Verify the model instances are equivalent\n assert crypto_object_msp_model == crypto_object_msp_model2\n\n # Convert model instance back to dict and verify no loss of data\n crypto_object_msp_model_json2 = crypto_object_msp_model.to_dict()\n assert crypto_object_msp_model_json2 == crypto_object_msp_model_json", "title": "" }, { "docid": "b68df1b503f89cee18366885d7e5b53f", "score": "0.59722114", "text": "def test_serialize_deserialize(self):\n num_classes = 3\n min_level = 3\n max_level = 7\n num_scales = 3\n aspect_ratios = [1.0]\n num_anchors_per_location = num_scales * len(aspect_ratios)\n\n backbone = resnet.ResNet(model_id=50)\n decoder = fpn.FPN(\n input_specs=backbone.output_specs,\n min_level=min_level,\n max_level=max_level)\n head = dense_prediction_heads.RetinaNetHead(\n min_level=min_level,\n max_level=max_level,\n num_classes=num_classes,\n num_anchors_per_location=num_anchors_per_location)\n generator = detection_generator.MultilevelDetectionGenerator(\n max_num_detections=10)\n model = retinanet_model.RetinaNetModel(\n backbone=backbone,\n decoder=decoder,\n head=head,\n detection_generator=generator)\n\n config = model.get_config()\n new_model = retinanet_model.RetinaNetModel.from_config(config)\n\n # Validate that the config can be forced to JSON.\n _ = new_model.to_json()\n\n # If the serialization was successful, the new config should match the old.\n self.assertAllEqual(model.get_config(), new_model.get_config())", "title": "" }, { "docid": "65b1e81969db419d45569575f4855159", "score": "0.5965127", "text": "def test_import_ca_body_msp_ca_serialization(self):\n\n # Construct a json representation of a ImportCaBodyMspCa model\n import_ca_body_msp_ca_model_json = {}\n import_ca_body_msp_ca_model_json['name'] = 'org1CA'\n import_ca_body_msp_ca_model_json['root_certs'] = ['LS0tLS1CRUdJTiBDRVJUSUZJQ0FURS0tLS0tCkNlcnQgZGF0YSB3b3VsZCBiZSBoZXJlIGlmIHRoaXMgd2FzIHJlYWwKLS0tLS1FTkQgQ0VSVElGSUNBVEUtLS0tLQo=']\n\n # Construct a model instance of ImportCaBodyMspCa by calling from_dict on the json representation\n import_ca_body_msp_ca_model = ImportCaBodyMspCa.from_dict(import_ca_body_msp_ca_model_json)\n assert import_ca_body_msp_ca_model != False\n\n # Construct a model instance of ImportCaBodyMspCa by calling from_dict on the json representation\n import_ca_body_msp_ca_model_dict = ImportCaBodyMspCa.from_dict(import_ca_body_msp_ca_model_json).__dict__\n import_ca_body_msp_ca_model2 = ImportCaBodyMspCa(**import_ca_body_msp_ca_model_dict)\n\n # Verify the model instances are equivalent\n assert import_ca_body_msp_ca_model == import_ca_body_msp_ca_model2\n\n # Convert model instance back to dict and verify no loss of data\n import_ca_body_msp_ca_model_json2 = import_ca_body_msp_ca_model.to_dict()\n assert import_ca_body_msp_ca_model_json2 == import_ca_body_msp_ca_model_json", "title": "" }, { "docid": "e54b558064389304c411eb399e51d23b", "score": "0.59571123", "text": "def test_get_public_settings_response_filelogging_serialization(self):\n\n # Construct dict forms of any model objects needed in order to build this model.\n\n logging_settings_client_model = {} # LoggingSettingsClient\n logging_settings_client_model['enabled'] = True\n logging_settings_client_model['level'] = 'silly'\n logging_settings_client_model['unique_name'] = False\n\n logging_settings_server_model = {} # LoggingSettingsServer\n logging_settings_server_model['enabled'] = True\n logging_settings_server_model['level'] = 'silly'\n logging_settings_server_model['unique_name'] = False\n\n log_settings_response_model = {} # LogSettingsResponse\n log_settings_response_model['client'] = logging_settings_client_model\n log_settings_response_model['server'] = logging_settings_server_model\n\n # Construct a json representation of a GetPublicSettingsResponseFILELOGGING model\n get_public_settings_response_filelogging_model_json = {}\n get_public_settings_response_filelogging_model_json['server'] = log_settings_response_model\n get_public_settings_response_filelogging_model_json['client'] = log_settings_response_model\n\n # Construct a model instance of GetPublicSettingsResponseFILELOGGING by calling from_dict on the json representation\n get_public_settings_response_filelogging_model = GetPublicSettingsResponseFILELOGGING.from_dict(get_public_settings_response_filelogging_model_json)\n assert get_public_settings_response_filelogging_model != False\n\n # Construct a model instance of GetPublicSettingsResponseFILELOGGING by calling from_dict on the json representation\n get_public_settings_response_filelogging_model_dict = GetPublicSettingsResponseFILELOGGING.from_dict(get_public_settings_response_filelogging_model_json).__dict__\n get_public_settings_response_filelogging_model2 = GetPublicSettingsResponseFILELOGGING(**get_public_settings_response_filelogging_model_dict)\n\n # Verify the model instances are equivalent\n assert get_public_settings_response_filelogging_model == get_public_settings_response_filelogging_model2\n\n # Convert model instance back to dict and verify no loss of data\n get_public_settings_response_filelogging_model_json2 = get_public_settings_response_filelogging_model.to_dict()\n assert get_public_settings_response_filelogging_model_json2 == get_public_settings_response_filelogging_model_json", "title": "" }, { "docid": "9d34664b48257114756f30dcf016eae3", "score": "0.59541357", "text": "def test_crypto_object_enrollment_tlsca_serialization(self):\n\n # Construct a json representation of a CryptoObjectEnrollmentTlsca model\n crypto_object_enrollment_tlsca_model_json = {}\n crypto_object_enrollment_tlsca_model_json['host'] = 'n3a3ec3-myca.ibp.us-south.containers.appdomain.cloud'\n crypto_object_enrollment_tlsca_model_json['port'] = 7054\n crypto_object_enrollment_tlsca_model_json['name'] = 'tlsca'\n crypto_object_enrollment_tlsca_model_json['tls_cert'] = 'LS0tLS1CRUdJTiBDRVJUSUZJQ0FURS0tLS0tCkNlcnQgZGF0YSB3b3VsZCBiZSBoZXJlIGlmIHRoaXMgd2FzIHJlYWwKLS0tLS1FTkQgQ0VSVElGSUNBVEUtLS0tLQo='\n crypto_object_enrollment_tlsca_model_json['enroll_id'] = 'admin'\n crypto_object_enrollment_tlsca_model_json['enroll_secret'] = 'password'\n crypto_object_enrollment_tlsca_model_json['csr_hosts'] = ['testString']\n\n # Construct a model instance of CryptoObjectEnrollmentTlsca by calling from_dict on the json representation\n crypto_object_enrollment_tlsca_model = CryptoObjectEnrollmentTlsca.from_dict(crypto_object_enrollment_tlsca_model_json)\n assert crypto_object_enrollment_tlsca_model != False\n\n # Construct a model instance of CryptoObjectEnrollmentTlsca by calling from_dict on the json representation\n crypto_object_enrollment_tlsca_model_dict = CryptoObjectEnrollmentTlsca.from_dict(crypto_object_enrollment_tlsca_model_json).__dict__\n crypto_object_enrollment_tlsca_model2 = CryptoObjectEnrollmentTlsca(**crypto_object_enrollment_tlsca_model_dict)\n\n # Verify the model instances are equivalent\n assert crypto_object_enrollment_tlsca_model == crypto_object_enrollment_tlsca_model2\n\n # Convert model instance back to dict and verify no loss of data\n crypto_object_enrollment_tlsca_model_json2 = crypto_object_enrollment_tlsca_model.to_dict()\n assert crypto_object_enrollment_tlsca_model_json2 == crypto_object_enrollment_tlsca_model_json", "title": "" }, { "docid": "3680c12a7ed5bf7eea96612e73ce50af", "score": "0.5950333", "text": "def test_msp_crypto_field_serialization(self):\n\n # Construct dict forms of any model objects needed in order to build this model.\n\n msp_crypto_field_ca_model = {} # MspCryptoFieldCa\n msp_crypto_field_ca_model['name'] = 'ca'\n msp_crypto_field_ca_model['root_certs'] = ['LS0tLS1CRUdJTiBDRVJUSUZJQ0FURS0tLS0tCkNlcnQgZGF0YSB3b3VsZCBiZSBoZXJlIGlmIHRoaXMgd2FzIHJlYWwKLS0tLS1FTkQgQ0VSVElGSUNBVEUtLS0tLQo=']\n\n msp_crypto_field_tlsca_model = {} # MspCryptoFieldTlsca\n msp_crypto_field_tlsca_model['name'] = 'tlsca'\n msp_crypto_field_tlsca_model['root_certs'] = ['LS0tLS1CRUdJTiBDRVJUSUZJQ0FURS0tLS0tCkNlcnQgZGF0YSB3b3VsZCBiZSBoZXJlIGlmIHRoaXMgd2FzIHJlYWwKLS0tLS1FTkQgQ0VSVElGSUNBVEUtLS0tLQo=']\n\n msp_crypto_field_component_model = {} # MspCryptoFieldComponent\n msp_crypto_field_component_model['tls_cert'] = 'LS0tLS1CRUdJTiBDRVJUSUZJQ0FURS0tLS0tCkNlcnQgZGF0YSB3b3VsZCBiZSBoZXJlIGlmIHRoaXMgd2FzIHJlYWwKLS0tLS1FTkQgQ0VSVElGSUNBVEUtLS0tLQo='\n msp_crypto_field_component_model['ecert'] = 'LS0tLS1CRUdJTiBDRVJUSUZJQ0FURS0tLS0tCkNlcnQgZGF0YSB3b3VsZCBiZSBoZXJlIGlmIHRoaXMgd2FzIHJlYWwKLS0tLS1FTkQgQ0VSVElGSUNBVEUtLS0tLQo='\n msp_crypto_field_component_model['admin_certs'] = ['LS0tLS1CRUdJTiBDRVJUSUZJQ0FURS0tLS0tCkNlcnQgZGF0YSB3b3VsZCBiZSBoZXJlIGlmIHRoaXMgd2FzIHJlYWwKLS0tLS1FTkQgQ0VSVElGSUNBVEUtLS0tLQo=']\n\n # Construct a json representation of a MspCryptoField model\n msp_crypto_field_model_json = {}\n msp_crypto_field_model_json['ca'] = msp_crypto_field_ca_model\n msp_crypto_field_model_json['tlsca'] = msp_crypto_field_tlsca_model\n msp_crypto_field_model_json['component'] = msp_crypto_field_component_model\n\n # Construct a model instance of MspCryptoField by calling from_dict on the json representation\n msp_crypto_field_model = MspCryptoField.from_dict(msp_crypto_field_model_json)\n assert msp_crypto_field_model != False\n\n # Construct a model instance of MspCryptoField by calling from_dict on the json representation\n msp_crypto_field_model_dict = MspCryptoField.from_dict(msp_crypto_field_model_json).__dict__\n msp_crypto_field_model2 = MspCryptoField(**msp_crypto_field_model_dict)\n\n # Verify the model instances are equivalent\n assert msp_crypto_field_model == msp_crypto_field_model2\n\n # Convert model instance back to dict and verify no loss of data\n msp_crypto_field_model_json2 = msp_crypto_field_model.to_dict()\n assert msp_crypto_field_model_json2 == msp_crypto_field_model_json", "title": "" }, { "docid": "9a9a509f4ca17ecbd7d9921d54c78b1d", "score": "0.59470433", "text": "def test_uid_proto_serialization() -> None:\n\n uid = UID(value=uuid.UUID(int=333779996850170035686993356951732753684))\n\n blob = _serialize(obj=uid, to_bytes=True)\n\n assert sy.serialize(uid, to_bytes=True) == blob\n assert sy.serialize(uid, to_bytes=True) == blob\n assert sy.serialize(uid, to_bytes=True) == blob", "title": "" }, { "docid": "4e1cc5d3838d6292e7d5ee5a186d2733", "score": "0.59424794", "text": "def test_delete_component_response_serialization(self):\n\n # Construct a json representation of a DeleteComponentResponse model\n delete_component_response_model_json = {}\n delete_component_response_model_json['message'] = 'deleted'\n delete_component_response_model_json['type'] = 'fabric-peer'\n delete_component_response_model_json['id'] = 'component1'\n delete_component_response_model_json['display_name'] = 'My Peer'\n\n # Construct a model instance of DeleteComponentResponse by calling from_dict on the json representation\n delete_component_response_model = DeleteComponentResponse.from_dict(delete_component_response_model_json)\n assert delete_component_response_model != False\n\n # Construct a model instance of DeleteComponentResponse by calling from_dict on the json representation\n delete_component_response_model_dict = DeleteComponentResponse.from_dict(delete_component_response_model_json).__dict__\n delete_component_response_model2 = DeleteComponentResponse(**delete_component_response_model_dict)\n\n # Verify the model instances are equivalent\n assert delete_component_response_model == delete_component_response_model2\n\n # Convert model instance back to dict and verify no loss of data\n delete_component_response_model_json2 = delete_component_response_model.to_dict()\n assert delete_component_response_model_json2 == delete_component_response_model_json", "title": "" }, { "docid": "0ed4295a1231e9b1f57ddfedcee4329d", "score": "0.5941814", "text": "def test_serialize_from_json(self): \n e = orders.Payer.serialize_from_json(self.sample_dict) \n self.assertEqual(e.json_data, self.sample_dict)", "title": "" }, { "docid": "a8b50eb6fc6e15a2b8e536661e12bc60", "score": "0.594008", "text": "def test_data_intg_flow_entity_serialization(self):\n\n # Construct dict forms of any model objects needed in order to build this model.\n\n data_intg_flow_lock_entity_model = {} # DataIntgFlowLockEntity\n data_intg_flow_lock_entity_model['data_intg_flow_id'] = 'testString'\n data_intg_flow_lock_entity_model['requester'] = 'testString'\n\n data_intg_flow_lock_metadata_model = {} # DataIntgFlowLockMetadata\n data_intg_flow_lock_metadata_model['alive'] = True\n\n data_intg_flow_lock_model = {} # DataIntgFlowLock\n data_intg_flow_lock_model['entity'] = data_intg_flow_lock_entity_model\n data_intg_flow_lock_model['metadata'] = data_intg_flow_lock_metadata_model\n\n asset_entity_rov_model = {} # AssetEntityROV\n asset_entity_rov_model['members'] = ['testString']\n asset_entity_rov_model['mode'] = 38\n\n # Construct a json representation of a DataIntgFlowEntity model\n data_intg_flow_entity_model_json = {}\n data_intg_flow_entity_model_json['data_intg_flow'] = { 'foo': 'bar' }\n data_intg_flow_entity_model_json['data_intg_subflow'] = { 'foo': 'bar' }\n data_intg_flow_entity_model_json['description'] = 'testString'\n data_intg_flow_entity_model_json['lock'] = data_intg_flow_lock_model\n data_intg_flow_entity_model_json['name'] = 'testString'\n data_intg_flow_entity_model_json['rov'] = asset_entity_rov_model\n data_intg_flow_entity_model_json['sub_type'] = 'testString'\n\n # Construct a model instance of DataIntgFlowEntity by calling from_dict on the json representation\n data_intg_flow_entity_model = DataIntgFlowEntity.from_dict(data_intg_flow_entity_model_json)\n assert data_intg_flow_entity_model != False\n\n # Construct a model instance of DataIntgFlowEntity by calling from_dict on the json representation\n data_intg_flow_entity_model_dict = DataIntgFlowEntity.from_dict(data_intg_flow_entity_model_json).__dict__\n data_intg_flow_entity_model2 = DataIntgFlowEntity(**data_intg_flow_entity_model_dict)\n\n # Verify the model instances are equivalent\n assert data_intg_flow_entity_model == data_intg_flow_entity_model2\n\n # Convert model instance back to dict and verify no loss of data\n data_intg_flow_entity_model_json2 = data_intg_flow_entity_model.to_dict()\n assert data_intg_flow_entity_model_json2 == data_intg_flow_entity_model_json", "title": "" }, { "docid": "71889627214a2878c0c9741143701cb7", "score": "0.5921036", "text": "def test_serialize_and_unserialize(self):\n import pickle\n import cPickle\n\n def serialize_and_unserialize_a_task_instance(cls_name, serialize):\n task_cls = manager.load_a_task_by_name(cls_name)\n task_instance = task_cls(day_arrow)\n\n task_instance_2 = serialize.loads(serialize.dumps(task_instance))\n # already set when in serialize.laod\n package_name_2 = getattr(task_instance_2, \"package_name\")\n self.assertEqual(package_name_2, \"project_A\")\n\n self.assertEqual(hash(task_instance), hash(task_instance_2))\n\n for ref_task_name_3 in task_cls._ref_tasks:\n self.assertEqual(\n getattr(task_instance, ref_task_name_3),\n getattr(task_instance_2, ref_task_name_3))\n self.assertEqual(\n hash(getattr(task_instance, ref_task_name_3 + \"_task\")),\n hash(getattr(task_instance_2, ref_task_name_3 + \"_task\")))\n\n serialize_and_unserialize_a_task_instance('ADay', pickle)\n serialize_and_unserialize_a_task_instance('ADay', cPickle)\n serialize_and_unserialize_a_task_instance('DDay', pickle)\n serialize_and_unserialize_a_task_instance('DDay', cPickle)", "title": "" }, { "docid": "42fab4ae2c65aed79caeb429ef109cc4", "score": "0.5913759", "text": "def test_notification_data_serialization(self):\n\n # Construct a json representation of a NotificationData model\n notification_data_model_json = {}\n notification_data_model_json['id'] = '60d84819bfa17adb4174ff3a1c52b5d6'\n notification_data_model_json['type'] = 'notification'\n notification_data_model_json['status'] = 'pending'\n notification_data_model_json['by'] = 'd******[email protected]'\n notification_data_model_json['message'] = 'Restarting application'\n notification_data_model_json['ts_display'] = 1537262855753\n\n # Construct a model instance of NotificationData by calling from_dict on the json representation\n notification_data_model = NotificationData.from_dict(notification_data_model_json)\n assert notification_data_model != False\n\n # Construct a model instance of NotificationData by calling from_dict on the json representation\n notification_data_model_dict = NotificationData.from_dict(notification_data_model_json).__dict__\n notification_data_model2 = NotificationData(**notification_data_model_dict)\n\n # Verify the model instances are equivalent\n assert notification_data_model == notification_data_model2\n\n # Convert model instance back to dict and verify no loss of data\n notification_data_model_json2 = notification_data_model.to_dict()\n assert notification_data_model_json2 == notification_data_model_json", "title": "" }, { "docid": "a17d0fe4e73054a8760f21f0906779ed", "score": "0.59084064", "text": "def test_crypto_object_enrollment_ca_serialization(self):\n\n # Construct a json representation of a CryptoObjectEnrollmentCa model\n crypto_object_enrollment_ca_model_json = {}\n crypto_object_enrollment_ca_model_json['host'] = 'n3a3ec3-myca.ibp.us-south.containers.appdomain.cloud'\n crypto_object_enrollment_ca_model_json['port'] = 7054\n crypto_object_enrollment_ca_model_json['name'] = 'ca'\n crypto_object_enrollment_ca_model_json['tls_cert'] = 'LS0tLS1CRUdJTiBDRVJUSUZJQ0FURS0tLS0tCkNlcnQgZGF0YSB3b3VsZCBiZSBoZXJlIGlmIHRoaXMgd2FzIHJlYWwKLS0tLS1FTkQgQ0VSVElGSUNBVEUtLS0tLQo='\n crypto_object_enrollment_ca_model_json['enroll_id'] = 'admin'\n crypto_object_enrollment_ca_model_json['enroll_secret'] = 'password'\n\n # Construct a model instance of CryptoObjectEnrollmentCa by calling from_dict on the json representation\n crypto_object_enrollment_ca_model = CryptoObjectEnrollmentCa.from_dict(crypto_object_enrollment_ca_model_json)\n assert crypto_object_enrollment_ca_model != False\n\n # Construct a model instance of CryptoObjectEnrollmentCa by calling from_dict on the json representation\n crypto_object_enrollment_ca_model_dict = CryptoObjectEnrollmentCa.from_dict(crypto_object_enrollment_ca_model_json).__dict__\n crypto_object_enrollment_ca_model2 = CryptoObjectEnrollmentCa(**crypto_object_enrollment_ca_model_dict)\n\n # Verify the model instances are equivalent\n assert crypto_object_enrollment_ca_model == crypto_object_enrollment_ca_model2\n\n # Convert model instance back to dict and verify no loss of data\n crypto_object_enrollment_ca_model_json2 = crypto_object_enrollment_ca_model.to_dict()\n assert crypto_object_enrollment_ca_model_json2 == crypto_object_enrollment_ca_model_json", "title": "" }, { "docid": "9674e9bb6782a15c6de33e3cc089a439", "score": "0.5903156", "text": "def test_fab_version_object_serialization(self):\n\n # Construct a json representation of a FabVersionObject model\n fab_version_object_model_json = {}\n fab_version_object_model_json['default'] = True\n fab_version_object_model_json['version'] = '1.4.6-2'\n fab_version_object_model_json['image'] = { 'foo': 'bar' }\n\n # Construct a model instance of FabVersionObject by calling from_dict on the json representation\n fab_version_object_model = FabVersionObject.from_dict(fab_version_object_model_json)\n assert fab_version_object_model != False\n\n # Construct a model instance of FabVersionObject by calling from_dict on the json representation\n fab_version_object_model_dict = FabVersionObject.from_dict(fab_version_object_model_json).__dict__\n fab_version_object_model2 = FabVersionObject(**fab_version_object_model_dict)\n\n # Verify the model instances are equivalent\n assert fab_version_object_model == fab_version_object_model2\n\n # Convert model instance back to dict and verify no loss of data\n fab_version_object_model_json2 = fab_version_object_model.to_dict()\n assert fab_version_object_model_json2 == fab_version_object_model_json", "title": "" }, { "docid": "5101cb767c9a918c0864933e86ae1016", "score": "0.58949256", "text": "def test_simple_deserialization():\n valid_data = FileLinkDataFactory(url='www.citrine.io', filename='materials.txt')\n file_link = FileLink.build(valid_data)\n assert file_link.url == 'www.citrine.io'\n assert file_link.filename == 'materials.txt'", "title": "" }, { "docid": "ce25f590e34c81b2fdb74ccd4d036999", "score": "0.58924353", "text": "def test_import_flow_serialization(self):\n\n # Construct dict forms of any model objects needed in order to build this model.\n\n data_import_error_model = {} # DataImportError\n data_import_error_model['description'] = 'testString'\n data_import_error_model['name'] = 'testString'\n data_import_error_model['stage_type'] = 'testString'\n data_import_error_model['type'] = 'unsupported_stage_type'\n\n import_flow_warning_model = {} # ImportFlowWarning\n import_flow_warning_model['description'] = 'testString'\n import_flow_warning_model['name'] = 'testString'\n import_flow_warning_model['type'] = 'unreleased_stage_type'\n\n # Construct a json representation of a ImportFlow model\n import_flow_model_json = {}\n import_flow_model_json['conflict_resolution_status'] = 'import_flow_renamed'\n import_flow_model_json['end_time'] = \"2019-01-01T12:00:00Z\"\n import_flow_model_json['errors'] = [data_import_error_model]\n import_flow_model_json['id'] = 'ccfdbbfd-810d-4f0e-b0a9-228c328a0136'\n import_flow_model_json['job_id'] = 'ccfaaafd-810d-4f0e-b0a9-228c328a0136'\n import_flow_model_json['job_name'] = 'Aggregator12_DataStage_1'\n import_flow_model_json['job_type'] = 'px_job'\n import_flow_model_json['name'] = 'cancel-reservation-job'\n import_flow_model_json['original_name'] = 'cancel-reservation-job'\n import_flow_model_json['ref_asset_id'] = 'ccfdbbfd-810d-4f0e-b0a9-228c328a0136'\n import_flow_model_json['status'] = 'completed'\n import_flow_model_json['type'] = 'px_job'\n import_flow_model_json['warnings'] = [import_flow_warning_model]\n\n # Construct a model instance of ImportFlow by calling from_dict on the json representation\n import_flow_model = ImportFlow.from_dict(import_flow_model_json)\n assert import_flow_model != False\n\n # Construct a model instance of ImportFlow by calling from_dict on the json representation\n import_flow_model_dict = ImportFlow.from_dict(import_flow_model_json).__dict__\n import_flow_model2 = ImportFlow(**import_flow_model_dict)\n\n # Verify the model instances are equivalent\n assert import_flow_model == import_flow_model2\n\n # Convert model instance back to dict and verify no loss of data\n import_flow_model_json2 = import_flow_model.to_dict()\n assert import_flow_model_json2 == import_flow_model_json", "title": "" }, { "docid": "45f071b5c297d8856f347a6bd896a8db", "score": "0.587788", "text": "def test_get_fabric_versions_response_serialization(self):\n\n # Construct dict forms of any model objects needed in order to build this model.\n\n fab_version_object_model = {} # FabVersionObject\n fab_version_object_model['default'] = True\n fab_version_object_model['version'] = '1.4.6-2'\n fab_version_object_model['image'] = { 'foo': 'bar' }\n\n fabric_version_dictionary_model = {} # FabricVersionDictionary\n fabric_version_dictionary_model['1.4.6-2'] = fab_version_object_model\n fabric_version_dictionary_model['2.1.0-0'] = fab_version_object_model\n fabric_version_dictionary_model['foo'] = { 'foo': 'bar' }\n\n get_fabric_versions_response_versions_model = {} # GetFabricVersionsResponseVersions\n get_fabric_versions_response_versions_model['ca'] = fabric_version_dictionary_model\n get_fabric_versions_response_versions_model['peer'] = fabric_version_dictionary_model\n get_fabric_versions_response_versions_model['orderer'] = fabric_version_dictionary_model\n\n # Construct a json representation of a GetFabricVersionsResponse model\n get_fabric_versions_response_model_json = {}\n get_fabric_versions_response_model_json['versions'] = get_fabric_versions_response_versions_model\n\n # Construct a model instance of GetFabricVersionsResponse by calling from_dict on the json representation\n get_fabric_versions_response_model = GetFabricVersionsResponse.from_dict(get_fabric_versions_response_model_json)\n assert get_fabric_versions_response_model != False\n\n # Construct a model instance of GetFabricVersionsResponse by calling from_dict on the json representation\n get_fabric_versions_response_model_dict = GetFabricVersionsResponse.from_dict(get_fabric_versions_response_model_json).__dict__\n get_fabric_versions_response_model2 = GetFabricVersionsResponse(**get_fabric_versions_response_model_dict)\n\n # Verify the model instances are equivalent\n assert get_fabric_versions_response_model == get_fabric_versions_response_model2\n\n # Convert model instance back to dict and verify no loss of data\n get_fabric_versions_response_model_json2 = get_fabric_versions_response_model.to_dict()\n assert get_fabric_versions_response_model_json2 == get_fabric_versions_response_model_json", "title": "" }, { "docid": "4b2b94cf7fdae608dcd26becaa62ef14", "score": "0.5877282", "text": "def test_generic_component_response_msp_component_serialization(self):\n\n # Construct a json representation of a GenericComponentResponseMspComponent model\n generic_component_response_msp_component_model_json = {}\n generic_component_response_msp_component_model_json['tls_cert'] = 'LS0tLS1CRUdJTiBDRVJUSUZJQ0FURS0tLS0tCkNlcnQgZGF0YSB3b3VsZCBiZSBoZXJlIGlmIHRoaXMgd2FzIHJlYWwKLS0tLS1FTkQgQ0VSVElGSUNBVEUtLS0tLQo='\n generic_component_response_msp_component_model_json['ecert'] = 'LS0tLS1CRUdJTiBDRVJUSUZJQ0FURS0tLS0tCkNlcnQgZGF0YSB3b3VsZCBiZSBoZXJlIGlmIHRoaXMgd2FzIHJlYWwKLS0tLS1FTkQgQ0VSVElGSUNBVEUtLS0tLQo='\n generic_component_response_msp_component_model_json['admin_certs'] = ['LS0tLS1CRUdJTiBDRVJUSUZJQ0FURS0tLS0tCkNlcnQgZGF0YSB3b3VsZCBiZSBoZXJlIGlmIHRoaXMgd2FzIHJlYWwKLS0tLS1FTkQgQ0VSVElGSUNBVEUtLS0tLQo=']\n\n # Construct a model instance of GenericComponentResponseMspComponent by calling from_dict on the json representation\n generic_component_response_msp_component_model = GenericComponentResponseMspComponent.from_dict(generic_component_response_msp_component_model_json)\n assert generic_component_response_msp_component_model != False\n\n # Construct a model instance of GenericComponentResponseMspComponent by calling from_dict on the json representation\n generic_component_response_msp_component_model_dict = GenericComponentResponseMspComponent.from_dict(generic_component_response_msp_component_model_json).__dict__\n generic_component_response_msp_component_model2 = GenericComponentResponseMspComponent(**generic_component_response_msp_component_model_dict)\n\n # Verify the model instances are equivalent\n assert generic_component_response_msp_component_model == generic_component_response_msp_component_model2\n\n # Convert model instance back to dict and verify no loss of data\n generic_component_response_msp_component_model_json2 = generic_component_response_msp_component_model.to_dict()\n assert generic_component_response_msp_component_model_json2 == generic_component_response_msp_component_model_json", "title": "" }, { "docid": "a66903e2a94464362bbe91654d3d4e24", "score": "0.58767885", "text": "def test_serialization(valid_data):\n process_run: ProcessRun = ProcessRun.build(valid_data)\n serialized = process_run.dump()\n assert serialized == valid_data", "title": "" }, { "docid": "5dd20ebd9ac1ba9c2e79d6178abf3709", "score": "0.5876077", "text": "def test_pipelines_serialization(self):\n\n # Construct a json representation of a Pipelines model\n pipelines_model_json = {}\n pipelines_model_json['app_data'] = { 'foo': 'bar' }\n pipelines_model_json['description'] = 'A test DataStage flow.'\n pipelines_model_json['id'] = 'fa1b859a-d592-474d-b56c-2137e4efa4bc'\n pipelines_model_json['name'] = 'ContainerC1'\n pipelines_model_json['nodes'] = [{'app_data':{'ui_data':{'description':'Produce a set of mock data based on the specified metadata', 'image':'/data-intg/flows/graphics/palette/PxRowGenerator.svg', 'label':'Row_Generator_1', 'x_pos':108, 'y_pos':162}}, 'id':'9fc2ec49-87ed-49c7-bdfc-abb06a46af37', 'op':'PxRowGenerator', 'outputs':[{'app_data':{'datastage':{'is_source_of_link':'73a5fb2c-f499-4c75-a8a7-71cea90f5105'}, 'ui_data':{'label':'outPort'}}, 'id':'3d01fe66-e675-4e7f-ad7b-3ba9a9cff30d', 'parameters':{'records':10}, 'schema_ref':'0e04b1b8-60c2-4b36-bae6-d0c7ae03dd8d'}], 'parameters':{'input_count':0, 'output_count':1}, 'type':'binding'}, {'app_data':{'ui_data':{'description':'Print row column values to either the job log or to a separate output link', 'image':'/data-intg/flows/graphics/palette/PxPeek.svg', 'label':'Peek_1', 'x_pos':342, 'y_pos':162}}, 'id':'4195b012-d3e7-4f74-8099-e7b23ec6ebb9', 'inputs':[{'app_data':{'ui_data':{'label':'inPort'}}, 'id':'c4195b34-8b4a-473f-b987-fa6d028f3968', 'links':[{'app_data':{'ui_data':{'decorations':[{'class_name':'', 'hotspot':False, 'id':'Link_1', 'label':'Link_1', 'outline':True, 'path':'', 'position':'middle'}]}}, 'id':'73a5fb2c-f499-4c75-a8a7-71cea90f5105', 'link_name':'Link_1', 'node_id_ref':'9fc2ec49-87ed-49c7-bdfc-abb06a46af37', 'port_id_ref':'3d01fe66-e675-4e7f-ad7b-3ba9a9cff30d', 'type_attr':'PRIMARY'}], 'schema_ref':'0e04b1b8-60c2-4b36-bae6-d0c7ae03dd8d'}], 'op':'PxPeek', 'outputs':[{'app_data':{'ui_data':{'label':'outPort'}}, 'id':''}], 'parameters':{'all':' ', 'columns':' ', 'dataset':' ', 'input_count':1, 'name':'name', 'nrecs':10, 'output_count':0, 'selection':' '}, 'type':'execution_node'}]\n pipelines_model_json['runtime_ref'] = 'pxOsh'\n\n # Construct a model instance of Pipelines by calling from_dict on the json representation\n pipelines_model = Pipelines.from_dict(pipelines_model_json)\n assert pipelines_model != False\n\n # Construct a model instance of Pipelines by calling from_dict on the json representation\n pipelines_model_dict = Pipelines.from_dict(pipelines_model_json).__dict__\n pipelines_model2 = Pipelines(**pipelines_model_dict)\n\n # Verify the model instances are equivalent\n assert pipelines_model == pipelines_model2\n\n # Convert model instance back to dict and verify no loss of data\n pipelines_model_json2 = pipelines_model.to_dict()\n assert pipelines_model_json2 == pipelines_model_json", "title": "" }, { "docid": "97f67a89f57d91f17061d2bcf9b4d56d", "score": "0.5868087", "text": "def test_get_public_settings_response_fabriccapabilities_serialization(self):\n\n # Construct a json representation of a GetPublicSettingsResponseFABRICCAPABILITIES model\n get_public_settings_response_fabriccapabilities_model_json = {}\n get_public_settings_response_fabriccapabilities_model_json['application'] = ['V1_1']\n get_public_settings_response_fabriccapabilities_model_json['channel'] = ['V1_1']\n get_public_settings_response_fabriccapabilities_model_json['orderer'] = ['V1_1']\n\n # Construct a model instance of GetPublicSettingsResponseFABRICCAPABILITIES by calling from_dict on the json representation\n get_public_settings_response_fabriccapabilities_model = GetPublicSettingsResponseFABRICCAPABILITIES.from_dict(get_public_settings_response_fabriccapabilities_model_json)\n assert get_public_settings_response_fabriccapabilities_model != False\n\n # Construct a model instance of GetPublicSettingsResponseFABRICCAPABILITIES by calling from_dict on the json representation\n get_public_settings_response_fabriccapabilities_model_dict = GetPublicSettingsResponseFABRICCAPABILITIES.from_dict(get_public_settings_response_fabriccapabilities_model_json).__dict__\n get_public_settings_response_fabriccapabilities_model2 = GetPublicSettingsResponseFABRICCAPABILITIES(**get_public_settings_response_fabriccapabilities_model_dict)\n\n # Verify the model instances are equivalent\n assert get_public_settings_response_fabriccapabilities_model == get_public_settings_response_fabriccapabilities_model2\n\n # Convert model instance back to dict and verify no loss of data\n get_public_settings_response_fabriccapabilities_model_json2 = get_public_settings_response_fabriccapabilities_model.to_dict()\n assert get_public_settings_response_fabriccapabilities_model_json2 == get_public_settings_response_fabriccapabilities_model_json", "title": "" }, { "docid": "429f286bfc6c94f924a77326c5a98e37", "score": "0.5865179", "text": "def test_orderer_response_resources_serialization(self):\n\n # Construct dict forms of any model objects needed in order to build this model.\n\n generic_resources_requests_model = {} # GenericResourcesRequests\n generic_resources_requests_model['cpu'] = '100m'\n generic_resources_requests_model['memory'] = '256M'\n\n generic_resource_limits_model = {} # GenericResourceLimits\n generic_resource_limits_model['cpu'] = '8000m'\n generic_resource_limits_model['memory'] = '16384M'\n\n generic_resources_model = {} # GenericResources\n generic_resources_model['requests'] = generic_resources_requests_model\n generic_resources_model['limits'] = generic_resource_limits_model\n\n # Construct a json representation of a OrdererResponseResources model\n orderer_response_resources_model_json = {}\n orderer_response_resources_model_json['orderer'] = generic_resources_model\n orderer_response_resources_model_json['proxy'] = generic_resources_model\n\n # Construct a model instance of OrdererResponseResources by calling from_dict on the json representation\n orderer_response_resources_model = OrdererResponseResources.from_dict(orderer_response_resources_model_json)\n assert orderer_response_resources_model != False\n\n # Construct a model instance of OrdererResponseResources by calling from_dict on the json representation\n orderer_response_resources_model_dict = OrdererResponseResources.from_dict(orderer_response_resources_model_json).__dict__\n orderer_response_resources_model2 = OrdererResponseResources(**orderer_response_resources_model_dict)\n\n # Verify the model instances are equivalent\n assert orderer_response_resources_model == orderer_response_resources_model2\n\n # Convert model instance back to dict and verify no loss of data\n orderer_response_resources_model_json2 = orderer_response_resources_model.to_dict()\n assert orderer_response_resources_model_json2 == orderer_response_resources_model_json", "title": "" }, { "docid": "ad592fa24f8fd69fa7cab43de9a1feef", "score": "0.58461756", "text": "async def test_serializer() -> None:\n client = MockRegistryApi(body=json.dumps({\"id\": 1}).encode(\"utf-8\"))\n schema1 = {\n \"type\": \"record\",\n \"name\": \"schema1\",\n \"namespace\": \"test-schemas\",\n \"fields\": [\n {\"name\": \"a\", \"type\": \"int\"},\n {\"name\": \"b\", \"type\": \"string\"},\n ],\n }\n serializer = await Serializer.register(registry=client, schema=schema1)\n assert serializer.id == 1\n\n message = {\"a\": 1, \"b\": \"helloworld\"}\n data = serializer(message)\n assert isinstance(data, bytes)\n\n # Check that the message can be deserialized\n # First, the wire format prefix\n unpacked_id, unpacked_body = unpack_wire_format_data(data)\n assert unpacked_id == serializer.id\n # Second, the message\n unpacked_schema = client.schema_cache[unpacked_id]\n message_fh = BytesIO(unpacked_body)\n message_fh.seek(0)\n unpacked_message = fastavro.schemaless_reader(message_fh, unpacked_schema)\n assert unpacked_message == message", "title": "" }, { "docid": "e6370326a0e4aebb4a8c87ab5c66bdea", "score": "0.5837393", "text": "def deserialize(self, data):", "title": "" }, { "docid": "dc8fd18d8d8b5be8b931e1b35c321a8f", "score": "0.5837044", "text": "def test_encode_decode_with_out_schema(self):\n message = {\n \"test\": \"body\"\n }\n schmea = {\n \"type\": \"record\",\n \"namespace\": \"betacore\",\n \"name\": \"Test entity\",\n \"fields\": [\n {\"name\": \"test\", \"type\": \"string\"}\n ]\n }\n\n encode: bytes = self.serializer.encode(message, schema=schmea)\n actual: dict = self.serializer.decode(encode, schema=schmea)\n self.assertDictEqual(message, actual)", "title": "" }, { "docid": "adbab9c0d38a957e62cb8041e89be17a", "score": "0.58251446", "text": "def test_deserialize_true(self):\n self.assert_deserialize_equals(True, True)", "title": "" }, { "docid": "11e2ef9320d46fc9e7c96eeb70649038", "score": "0.5821848", "text": "def test_crypto_object_serialization(self):\n\n # Construct dict forms of any model objects needed in order to build this model.\n\n crypto_enrollment_component_model = {} # CryptoEnrollmentComponent\n crypto_enrollment_component_model['admincerts'] = ['LS0tLS1CRUdJTiBDRVJUSUZJQ0FURS0tLS0tCkNlcnQgZGF0YSB3b3VsZCBiZSBoZXJlIGlmIHRoaXMgd2FzIHJlYWwKLS0tLS1FTkQgQ0VSVElGSUNBVEUtLS0tLQo=']\n\n crypto_object_enrollment_ca_model = {} # CryptoObjectEnrollmentCa\n crypto_object_enrollment_ca_model['host'] = 'n3a3ec3-myca.ibp.us-south.containers.appdomain.cloud'\n crypto_object_enrollment_ca_model['port'] = 7054\n crypto_object_enrollment_ca_model['name'] = 'ca'\n crypto_object_enrollment_ca_model['tls_cert'] = 'LS0tLS1CRUdJTiBDRVJUSUZJQ0FURS0tLS0tCkNlcnQgZGF0YSB3b3VsZCBiZSBoZXJlIGlmIHRoaXMgd2FzIHJlYWwKLS0tLS1FTkQgQ0VSVElGSUNBVEUtLS0tLQo='\n crypto_object_enrollment_ca_model['enroll_id'] = 'admin'\n crypto_object_enrollment_ca_model['enroll_secret'] = 'password'\n\n crypto_object_enrollment_tlsca_model = {} # CryptoObjectEnrollmentTlsca\n crypto_object_enrollment_tlsca_model['host'] = 'n3a3ec3-myca.ibp.us-south.containers.appdomain.cloud'\n crypto_object_enrollment_tlsca_model['port'] = 7054\n crypto_object_enrollment_tlsca_model['name'] = 'tlsca'\n crypto_object_enrollment_tlsca_model['tls_cert'] = 'LS0tLS1CRUdJTiBDRVJUSUZJQ0FURS0tLS0tCkNlcnQgZGF0YSB3b3VsZCBiZSBoZXJlIGlmIHRoaXMgd2FzIHJlYWwKLS0tLS1FTkQgQ0VSVElGSUNBVEUtLS0tLQo='\n crypto_object_enrollment_tlsca_model['enroll_id'] = 'admin'\n crypto_object_enrollment_tlsca_model['enroll_secret'] = 'password'\n crypto_object_enrollment_tlsca_model['csr_hosts'] = ['testString']\n\n crypto_object_enrollment_model = {} # CryptoObjectEnrollment\n crypto_object_enrollment_model['component'] = crypto_enrollment_component_model\n crypto_object_enrollment_model['ca'] = crypto_object_enrollment_ca_model\n crypto_object_enrollment_model['tlsca'] = crypto_object_enrollment_tlsca_model\n\n client_auth_model = {} # ClientAuth\n client_auth_model['type'] = 'noclientcert'\n client_auth_model['tls_certs'] = ['testString']\n\n msp_crypto_comp_model = {} # MspCryptoComp\n msp_crypto_comp_model['ekey'] = 'testString'\n msp_crypto_comp_model['ecert'] = 'LS0tLS1CRUdJTiBDRVJUSUZJQ0FURS0tLS0tCkNlcnQgZGF0YSB3b3VsZCBiZSBoZXJlIGlmIHRoaXMgd2FzIHJlYWwKLS0tLS1FTkQgQ0VSVElGSUNBVEUtLS0tLQo='\n msp_crypto_comp_model['admin_certs'] = ['LS0tLS1CRUdJTiBDRVJUSUZJQ0FURS0tLS0tCkNlcnQgZGF0YSB3b3VsZCBiZSBoZXJlIGlmIHRoaXMgd2FzIHJlYWwKLS0tLS1FTkQgQ0VSVElGSUNBVEUtLS0tLQo=']\n msp_crypto_comp_model['tls_key'] = 'testString'\n msp_crypto_comp_model['tls_cert'] = 'LS0tLS1CRUdJTiBDRVJUSUZJQ0FURS0tLS0tCkNlcnQgZGF0YSB3b3VsZCBiZSBoZXJlIGlmIHRoaXMgd2FzIHJlYWwKLS0tLS1FTkQgQ0VSVElGSUNBVEUtLS0tLQo='\n msp_crypto_comp_model['client_auth'] = client_auth_model\n\n msp_crypto_ca_model = {} # MspCryptoCa\n msp_crypto_ca_model['root_certs'] = ['LS0tLS1CRUdJTiBDRVJUSUZJQ0FURS0tLS0tCkNlcnQgZGF0YSB3b3VsZCBiZSBoZXJlIGlmIHRoaXMgd2FzIHJlYWwKLS0tLS1FTkQgQ0VSVElGSUNBVEUtLS0tLQo=']\n msp_crypto_ca_model['ca_intermediate_certs'] = ['testString']\n\n crypto_object_msp_model = {} # CryptoObjectMsp\n crypto_object_msp_model['component'] = msp_crypto_comp_model\n crypto_object_msp_model['ca'] = msp_crypto_ca_model\n crypto_object_msp_model['tlsca'] = msp_crypto_ca_model\n\n # Construct a json representation of a CryptoObject model\n crypto_object_model_json = {}\n crypto_object_model_json['enrollment'] = crypto_object_enrollment_model\n crypto_object_model_json['msp'] = crypto_object_msp_model\n\n # Construct a model instance of CryptoObject by calling from_dict on the json representation\n crypto_object_model = CryptoObject.from_dict(crypto_object_model_json)\n assert crypto_object_model != False\n\n # Construct a model instance of CryptoObject by calling from_dict on the json representation\n crypto_object_model_dict = CryptoObject.from_dict(crypto_object_model_json).__dict__\n crypto_object_model2 = CryptoObject(**crypto_object_model_dict)\n\n # Verify the model instances are equivalent\n assert crypto_object_model == crypto_object_model2\n\n # Convert model instance back to dict and verify no loss of data\n crypto_object_model_json2 = crypto_object_model.to_dict()\n assert crypto_object_model_json2 == crypto_object_model_json", "title": "" }, { "docid": "0ebdcc801b3eb09d6f48569b2493145a", "score": "0.5820966", "text": "def test_import_ca_body_msp_tlsca_serialization(self):\n\n # Construct a json representation of a ImportCaBodyMspTlsca model\n import_ca_body_msp_tlsca_model_json = {}\n import_ca_body_msp_tlsca_model_json['name'] = 'org1tlsCA'\n import_ca_body_msp_tlsca_model_json['root_certs'] = ['LS0tLS1CRUdJTiBDRVJUSUZJQ0FURS0tLS0tCkNlcnQgZGF0YSB3b3VsZCBiZSBoZXJlIGlmIHRoaXMgd2FzIHJlYWwKLS0tLS1FTkQgQ0VSVElGSUNBVEUtLS0tLQo=']\n\n # Construct a model instance of ImportCaBodyMspTlsca by calling from_dict on the json representation\n import_ca_body_msp_tlsca_model = ImportCaBodyMspTlsca.from_dict(import_ca_body_msp_tlsca_model_json)\n assert import_ca_body_msp_tlsca_model != False\n\n # Construct a model instance of ImportCaBodyMspTlsca by calling from_dict on the json representation\n import_ca_body_msp_tlsca_model_dict = ImportCaBodyMspTlsca.from_dict(import_ca_body_msp_tlsca_model_json).__dict__\n import_ca_body_msp_tlsca_model2 = ImportCaBodyMspTlsca(**import_ca_body_msp_tlsca_model_dict)\n\n # Verify the model instances are equivalent\n assert import_ca_body_msp_tlsca_model == import_ca_body_msp_tlsca_model2\n\n # Convert model instance back to dict and verify no loss of data\n import_ca_body_msp_tlsca_model_json2 = import_ca_body_msp_tlsca_model.to_dict()\n assert import_ca_body_msp_tlsca_model_json2 == import_ca_body_msp_tlsca_model_json", "title": "" }, { "docid": "b03e11fbcbe2bc07b385fd607d9b8d59", "score": "0.5820653", "text": "def test_delete_multi_components_response_serialization(self):\n\n # Construct dict forms of any model objects needed in order to build this model.\n\n delete_component_response_model = {} # DeleteComponentResponse\n delete_component_response_model['message'] = 'deleted'\n delete_component_response_model['type'] = 'fabric-peer'\n delete_component_response_model['id'] = 'component1'\n delete_component_response_model['display_name'] = 'My Peer'\n\n # Construct a json representation of a DeleteMultiComponentsResponse model\n delete_multi_components_response_model_json = {}\n delete_multi_components_response_model_json['deleted'] = [delete_component_response_model]\n\n # Construct a model instance of DeleteMultiComponentsResponse by calling from_dict on the json representation\n delete_multi_components_response_model = DeleteMultiComponentsResponse.from_dict(delete_multi_components_response_model_json)\n assert delete_multi_components_response_model != False\n\n # Construct a model instance of DeleteMultiComponentsResponse by calling from_dict on the json representation\n delete_multi_components_response_model_dict = DeleteMultiComponentsResponse.from_dict(delete_multi_components_response_model_json).__dict__\n delete_multi_components_response_model2 = DeleteMultiComponentsResponse(**delete_multi_components_response_model_dict)\n\n # Verify the model instances are equivalent\n assert delete_multi_components_response_model == delete_multi_components_response_model2\n\n # Convert model instance back to dict and verify no loss of data\n delete_multi_components_response_model_json2 = delete_multi_components_response_model.to_dict()\n assert delete_multi_components_response_model_json2 == delete_multi_components_response_model_json", "title": "" }, { "docid": "8e54f752625184eb105c0eacb3b725a3", "score": "0.58183885", "text": "def test_generic_component_response_resources_serialization(self):\n\n # Construct dict forms of any model objects needed in order to build this model.\n\n generic_resources_requests_model = {} # GenericResourcesRequests\n generic_resources_requests_model['cpu'] = '100m'\n generic_resources_requests_model['memory'] = '256M'\n\n generic_resource_limits_model = {} # GenericResourceLimits\n generic_resource_limits_model['cpu'] = '8000m'\n generic_resource_limits_model['memory'] = '16384M'\n\n generic_resources_model = {} # GenericResources\n generic_resources_model['requests'] = generic_resources_requests_model\n generic_resources_model['limits'] = generic_resource_limits_model\n\n # Construct a json representation of a GenericComponentResponseResources model\n generic_component_response_resources_model_json = {}\n generic_component_response_resources_model_json['ca'] = generic_resources_model\n generic_component_response_resources_model_json['peer'] = generic_resources_model\n generic_component_response_resources_model_json['orderer'] = generic_resources_model\n generic_component_response_resources_model_json['proxy'] = generic_resources_model\n generic_component_response_resources_model_json['statedb'] = generic_resources_model\n\n # Construct a model instance of GenericComponentResponseResources by calling from_dict on the json representation\n generic_component_response_resources_model = GenericComponentResponseResources.from_dict(generic_component_response_resources_model_json)\n assert generic_component_response_resources_model != False\n\n # Construct a model instance of GenericComponentResponseResources by calling from_dict on the json representation\n generic_component_response_resources_model_dict = GenericComponentResponseResources.from_dict(generic_component_response_resources_model_json).__dict__\n generic_component_response_resources_model2 = GenericComponentResponseResources(**generic_component_response_resources_model_dict)\n\n # Verify the model instances are equivalent\n assert generic_component_response_resources_model == generic_component_response_resources_model2\n\n # Convert model instance back to dict and verify no loss of data\n generic_component_response_resources_model_json2 = generic_component_response_resources_model.to_dict()\n assert generic_component_response_resources_model_json2 == generic_component_response_resources_model_json", "title": "" }, { "docid": "93e5d1d72c36abd0bf2064bbcc78baf8", "score": "0.581821", "text": "def test_nested_serialization(self):\n expected = {\n 'first_name': 'john',\n 'last_name': 'doe',\n 'age': 42,\n 'siblings': [\n {\n 'first_name': 'jane',\n 'last_name': 'doe',\n 'age': 44,\n 'partner': {\n 'first_name': 'fred',\n 'last_name': 'bloggs',\n 'age': 41,\n }\n },\n {\n 'first_name': 'emily',\n 'last_name': 'doe',\n 'age': 37,\n }\n ]\n }\n self.assertEquals(NestedObjectSerializer().serialize('python', self.obj), expected)", "title": "" }, { "docid": "d1e823d12bcb667a91806ed72067a76c", "score": "0.5812898", "text": "def test_create_ca_body_resources_serialization(self):\n\n # Construct dict forms of any model objects needed in order to build this model.\n\n resource_requests_model = {} # ResourceRequests\n resource_requests_model['cpu'] = '100m'\n resource_requests_model['memory'] = '256MiB'\n\n resource_limits_model = {} # ResourceLimits\n resource_limits_model['cpu'] = '100m'\n resource_limits_model['memory'] = '256MiB'\n\n resource_object_model = {} # ResourceObject\n resource_object_model['requests'] = resource_requests_model\n resource_object_model['limits'] = resource_limits_model\n\n # Construct a json representation of a CreateCaBodyResources model\n create_ca_body_resources_model_json = {}\n create_ca_body_resources_model_json['ca'] = resource_object_model\n\n # Construct a model instance of CreateCaBodyResources by calling from_dict on the json representation\n create_ca_body_resources_model = CreateCaBodyResources.from_dict(create_ca_body_resources_model_json)\n assert create_ca_body_resources_model != False\n\n # Construct a model instance of CreateCaBodyResources by calling from_dict on the json representation\n create_ca_body_resources_model_dict = CreateCaBodyResources.from_dict(create_ca_body_resources_model_json).__dict__\n create_ca_body_resources_model2 = CreateCaBodyResources(**create_ca_body_resources_model_dict)\n\n # Verify the model instances are equivalent\n assert create_ca_body_resources_model == create_ca_body_resources_model2\n\n # Convert model instance back to dict and verify no loss of data\n create_ca_body_resources_model_json2 = create_ca_body_resources_model.to_dict()\n assert create_ca_body_resources_model_json2 == create_ca_body_resources_model_json", "title": "" }, { "docid": "056c6f59e74389a6c72b1f2797034d52", "score": "0.5811671", "text": "def test_config_peer_gossip_pvt_data_implicit_collection_dissemination_policy_serialization(self):\n\n # Construct a json representation of a ConfigPeerGossipPvtDataImplicitCollectionDisseminationPolicy model\n config_peer_gossip_pvt_data_implicit_collection_dissemination_policy_model_json = {}\n config_peer_gossip_pvt_data_implicit_collection_dissemination_policy_model_json['requiredPeerCount'] = 0\n config_peer_gossip_pvt_data_implicit_collection_dissemination_policy_model_json['maxPeerCount'] = 1\n\n # Construct a model instance of ConfigPeerGossipPvtDataImplicitCollectionDisseminationPolicy by calling from_dict on the json representation\n config_peer_gossip_pvt_data_implicit_collection_dissemination_policy_model = ConfigPeerGossipPvtDataImplicitCollectionDisseminationPolicy.from_dict(config_peer_gossip_pvt_data_implicit_collection_dissemination_policy_model_json)\n assert config_peer_gossip_pvt_data_implicit_collection_dissemination_policy_model != False\n\n # Construct a model instance of ConfigPeerGossipPvtDataImplicitCollectionDisseminationPolicy by calling from_dict on the json representation\n config_peer_gossip_pvt_data_implicit_collection_dissemination_policy_model_dict = ConfigPeerGossipPvtDataImplicitCollectionDisseminationPolicy.from_dict(config_peer_gossip_pvt_data_implicit_collection_dissemination_policy_model_json).__dict__\n config_peer_gossip_pvt_data_implicit_collection_dissemination_policy_model2 = ConfigPeerGossipPvtDataImplicitCollectionDisseminationPolicy(**config_peer_gossip_pvt_data_implicit_collection_dissemination_policy_model_dict)\n\n # Verify the model instances are equivalent\n assert config_peer_gossip_pvt_data_implicit_collection_dissemination_policy_model == config_peer_gossip_pvt_data_implicit_collection_dissemination_policy_model2\n\n # Convert model instance back to dict and verify no loss of data\n config_peer_gossip_pvt_data_implicit_collection_dissemination_policy_model_json2 = config_peer_gossip_pvt_data_implicit_collection_dissemination_policy_model.to_dict()\n assert config_peer_gossip_pvt_data_implicit_collection_dissemination_policy_model_json2 == config_peer_gossip_pvt_data_implicit_collection_dissemination_policy_model_json", "title": "" }, { "docid": "d0098bd05e5b55ca058156127aa54948", "score": "0.579568", "text": "def test_msp_crypto_comp_serialization(self):\n\n # Construct dict forms of any model objects needed in order to build this model.\n\n client_auth_model = {} # ClientAuth\n client_auth_model['type'] = 'noclientcert'\n client_auth_model['tls_certs'] = ['testString']\n\n # Construct a json representation of a MspCryptoComp model\n msp_crypto_comp_model_json = {}\n msp_crypto_comp_model_json['ekey'] = 'testString'\n msp_crypto_comp_model_json['ecert'] = 'LS0tLS1CRUdJTiBDRVJUSUZJQ0FURS0tLS0tCkNlcnQgZGF0YSB3b3VsZCBiZSBoZXJlIGlmIHRoaXMgd2FzIHJlYWwKLS0tLS1FTkQgQ0VSVElGSUNBVEUtLS0tLQo='\n msp_crypto_comp_model_json['admin_certs'] = ['LS0tLS1CRUdJTiBDRVJUSUZJQ0FURS0tLS0tCkNlcnQgZGF0YSB3b3VsZCBiZSBoZXJlIGlmIHRoaXMgd2FzIHJlYWwKLS0tLS1FTkQgQ0VSVElGSUNBVEUtLS0tLQo=']\n msp_crypto_comp_model_json['tls_key'] = 'testString'\n msp_crypto_comp_model_json['tls_cert'] = 'LS0tLS1CRUdJTiBDRVJUSUZJQ0FURS0tLS0tCkNlcnQgZGF0YSB3b3VsZCBiZSBoZXJlIGlmIHRoaXMgd2FzIHJlYWwKLS0tLS1FTkQgQ0VSVElGSUNBVEUtLS0tLQo='\n msp_crypto_comp_model_json['client_auth'] = client_auth_model\n\n # Construct a model instance of MspCryptoComp by calling from_dict on the json representation\n msp_crypto_comp_model = MspCryptoComp.from_dict(msp_crypto_comp_model_json)\n assert msp_crypto_comp_model != False\n\n # Construct a model instance of MspCryptoComp by calling from_dict on the json representation\n msp_crypto_comp_model_dict = MspCryptoComp.from_dict(msp_crypto_comp_model_json).__dict__\n msp_crypto_comp_model2 = MspCryptoComp(**msp_crypto_comp_model_dict)\n\n # Verify the model instances are equivalent\n assert msp_crypto_comp_model == msp_crypto_comp_model2\n\n # Convert model instance back to dict and verify no loss of data\n msp_crypto_comp_model_json2 = msp_crypto_comp_model.to_dict()\n assert msp_crypto_comp_model_json2 == msp_crypto_comp_model_json", "title": "" }, { "docid": "16daa14993b95c83e232d9c4a59ffb08", "score": "0.5784885", "text": "def test_serialize_from_json(self): \n e = orders.OrderPayment.serialize_from_json(self.sample_dict) \n self.assertEqual(e.json_data, self.sample_dict)", "title": "" }, { "docid": "7932a197b0f520ffbe7a6282fb15f35a", "score": "0.5784277", "text": "def test_serialize_from_json(self): \n e = orders.Card.serialize_from_json(self.sample_dict) \n self.assertEqual(e.json_data, self.sample_dict)", "title": "" }, { "docid": "5a7310d015817deba49fda3e2d132bf1", "score": "0.57719886", "text": "def test_data_intg_flow_serialization(self):\n\n # Construct dict forms of any model objects needed in order to build this model.\n\n data_intg_flow_lock_entity_model = {} # DataIntgFlowLockEntity\n data_intg_flow_lock_entity_model['data_intg_flow_id'] = 'testString'\n data_intg_flow_lock_entity_model['requester'] = 'testString'\n\n data_intg_flow_lock_metadata_model = {} # DataIntgFlowLockMetadata\n data_intg_flow_lock_metadata_model['alive'] = True\n\n data_intg_flow_lock_model = {} # DataIntgFlowLock\n data_intg_flow_lock_model['entity'] = data_intg_flow_lock_entity_model\n data_intg_flow_lock_model['metadata'] = data_intg_flow_lock_metadata_model\n\n asset_entity_rov_model = {} # AssetEntityROV\n asset_entity_rov_model['members'] = ['testString']\n asset_entity_rov_model['mode'] = 38\n\n data_intg_flow_entity_model = {} # DataIntgFlowEntity\n data_intg_flow_entity_model['data_intg_flow'] = { 'foo': 'bar' }\n data_intg_flow_entity_model['data_intg_subflow'] = { 'foo': 'bar' }\n data_intg_flow_entity_model['description'] = 'testString'\n data_intg_flow_entity_model['lock'] = data_intg_flow_lock_model\n data_intg_flow_entity_model['name'] = 'testString'\n data_intg_flow_entity_model['rov'] = asset_entity_rov_model\n data_intg_flow_entity_model['sub_type'] = 'testString'\n\n asset_system_metadata_usage_model = {} # AssetSystemMetadataUsage\n asset_system_metadata_usage_model['access_count'] = 38\n asset_system_metadata_usage_model['last_access_time'] = \"2019-01-01T12:00:00Z\"\n asset_system_metadata_usage_model['last_accessor_id'] = 'testString'\n asset_system_metadata_usage_model['last_modification_time'] = \"2019-01-01T12:00:00Z\"\n asset_system_metadata_usage_model['last_modifier_id'] = 'testString'\n\n asset_system_metadata_model = {} # AssetSystemMetadata\n asset_system_metadata_model['asset_id'] = 'testString'\n asset_system_metadata_model['asset_type'] = 'testString'\n asset_system_metadata_model['catalog_id'] = 'testString'\n asset_system_metadata_model['create_time'] = \"2019-01-01T12:00:00Z\"\n asset_system_metadata_model['creator_id'] = 'testString'\n asset_system_metadata_model['description'] = 'testString'\n asset_system_metadata_model['href'] = 'testString'\n asset_system_metadata_model['name'] = 'testString'\n asset_system_metadata_model['origin_country'] = 'testString'\n asset_system_metadata_model['project_id'] = 'testString'\n asset_system_metadata_model['resource_key'] = 'testString'\n asset_system_metadata_model['size'] = 38\n asset_system_metadata_model['source_system'] = {}\n asset_system_metadata_model['tags'] = ['testString']\n asset_system_metadata_model['usage'] = asset_system_metadata_usage_model\n\n # Construct a json representation of a DataIntgFlow model\n data_intg_flow_model_json = {}\n data_intg_flow_model_json['attachments'] = [{ 'foo': 'bar' }]\n data_intg_flow_model_json['entity'] = data_intg_flow_entity_model\n data_intg_flow_model_json['metadata'] = asset_system_metadata_model\n\n # Construct a model instance of DataIntgFlow by calling from_dict on the json representation\n data_intg_flow_model = DataIntgFlow.from_dict(data_intg_flow_model_json)\n assert data_intg_flow_model != False\n\n # Construct a model instance of DataIntgFlow by calling from_dict on the json representation\n data_intg_flow_model_dict = DataIntgFlow.from_dict(data_intg_flow_model_json).__dict__\n data_intg_flow_model2 = DataIntgFlow(**data_intg_flow_model_dict)\n\n # Verify the model instances are equivalent\n assert data_intg_flow_model == data_intg_flow_model2\n\n # Convert model instance back to dict and verify no loss of data\n data_intg_flow_model_json2 = data_intg_flow_model.to_dict()\n assert data_intg_flow_model_json2 == data_intg_flow_model_json", "title": "" }, { "docid": "6c00b09856dd1d4a614f8f59b1fc08aa", "score": "0.57696956", "text": "def test_returns_serializable(self, configuration):\n storage_server_deferred = storage_server.get_storage_server(\n configuration,\n get_anonymous_storage_server,\n )\n broker = Broker(None)\n broker.makeConnection(StringTransport())\n self.expectThat(\n storage_server_deferred,\n succeeded(\n AfterPreprocessing(\n lambda ann: broker.send(ann.storage_server),\n Always(),\n ),\n ),\n )", "title": "" }, { "docid": "83db0b4729eab2a970cc214ead2a63f6", "score": "0.57668537", "text": "def test_pickle():", "title": "" }, { "docid": "b58309796b229e76b8714338d947df29", "score": "0.5760416", "text": "def test_serialize_from_json(self): \n e = orders.Item.serialize_from_json(self.sample_dict) \n self.assertEqual(e.json_data, self.sample_dict)", "title": "" } ]
db63fbbf0ad44a9f2cdc9d0ff9e6bcac
return a directory path
[ { "docid": "05b8149e481899b172e6077f2c01d5a0", "score": "0.6978848", "text": "def get_dir(path):\n return Dirpath.get_instance(path)", "title": "" } ]
[ { "docid": "3deae8a2787265d785ffa279f672b4d5", "score": "0.8039122", "text": "def dirPath(self):\n\t\treturn os.path.dirname( self.path ) + '/'", "title": "" }, { "docid": "21032099a19e8ee44262d34373fb2be1", "score": "0.7886094", "text": "def get_directory_path(self):\n return self._dir_path", "title": "" }, { "docid": "ecb6c1fd9c72eb7421f1d5f4e0c5f9c7", "score": "0.78383726", "text": "def directory(self):\n # The built-in function #\n directory = os.path.dirname(os.path.dirname(self.path))\n # Maybe we need to go the absolute path way #\n if not directory:\n directory = os.path.dirname(os.path.dirname(self.absolute_path))\n # Return #\n return autopaths.dir_path.DirectoryPath(directory)", "title": "" }, { "docid": "96228f1fc6ddb9d6dc922cfcd23dac7e", "score": "0.7569742", "text": "def get_directory():\n return datapath", "title": "" }, { "docid": "02bc2770b5e492921ad00b049225e266", "score": "0.7501923", "text": "def get_file_directory(path):\n\n destination = path.split(os.path.sep)\n del (destination[len(destination) - 1])\n return os.path.sep.join(destination)", "title": "" }, { "docid": "cec6cbdfd4e1556340402e6d0404ad1d", "score": "0.7468503", "text": "def getDir(self):\n path = self.dir+(\"%4d\"%self.runnumber)+\"/\"\n if not os.path.exists(path):\n os.makedirs(path)\n return path", "title": "" }, { "docid": "320812860c47b829510543b7744ee2b0", "score": "0.7449165", "text": "def getDirectory(filePath):\n if os.path.isdir(filePath):\n return filePath\n return os.path.dirname(filePath)", "title": "" }, { "docid": "7d54eb8328afdaaf21a5ddc95ad7acc6", "score": "0.739863", "text": "def dirpath(self):\n return self._dirpath", "title": "" }, { "docid": "d5cbd1617b02d9e89fb50076f8024a20", "score": "0.7291728", "text": "def make_path(self, directory=None):\n\n if not directory:\n directory = ''\n\n return os.path.join(directory, self.filename)", "title": "" }, { "docid": "5ccaa59755a0d040b7479a0de6d97a83", "score": "0.72545344", "text": "def get_path(self, dir=None):\n if dir == None:\n dir = self.out_dir\n\n if self.day != None:\n out_dir = \"%s/%s\" % (dir, ARCHIVE_DIR)\n return \"%s/%s/%s/%s/\" % (out_dir, self.year, self.month, self.day)\n else:\n return \"%s/\" % dir", "title": "" }, { "docid": "10926a40b73c32ebe911bd110a30b34a", "score": "0.7237556", "text": "def getDirectory(self):\n pass", "title": "" }, { "docid": "56aeb838c2f02d3c4d43022fffd8ecc5", "score": "0.7222282", "text": "def dirname(self) -> str:\n pass", "title": "" }, { "docid": "44fe6cda97c0aa10621c62de97c5e8a5", "score": "0.71947753", "text": "def directory(path):\n if not os.path.exists(path):\n raise TypeError(\"%s does not exist\" % path)\n if not os.path.isdir(path):\n raise TypeError(\"%s is not a directory\" % path)\n return path", "title": "" }, { "docid": "1b69254d17dd84e4a8642b31bd68c7a8", "score": "0.7192619", "text": "def dir_path():\n abs_path = os.path.abspath(__file__)\n _dir = os.path.dirname(abs_path)\n return _dir", "title": "" }, { "docid": "65ce6f2357e48280f0b7dbcb217f3993", "score": "0.7131573", "text": "def get_dir(self):\r\n return self.dir", "title": "" }, { "docid": "bcc7cc5dbd90edd4b9789091302629e3", "score": "0.709419", "text": "def get_directory_name(self):\n head, tail = os.path.split(self.__fullPath)\n return head", "title": "" }, { "docid": "975ef7fd8e2cdc8f66edec893c370272", "score": "0.7085314", "text": "def path(self):\n return self._dirn", "title": "" }, { "docid": "d7ef346b239723c265a4bfd73ee8f6b1", "score": "0.70637393", "text": "def getPath(self):\n return self.dir", "title": "" }, { "docid": "ea394f254cc9a2f21c888428656310a2", "score": "0.70620775", "text": "def path(self) -> str:\n return self._dir", "title": "" }, { "docid": "d0ab0cdf3556155bc515c162660ddc9a", "score": "0.701713", "text": "def _directory_path_for_file(path):\n if path.real_file and path.is_dir():\n # If this is a directory on disk, put the metadata for the directory itself inside\n # the directory instead of in the parent directory. Don't do this for ZIPs (which\n # are \"directories\" but we don't write files into them).\n directory_path = path\n else:\n # Put data about the file in the containing directory.\n directory_path = path.filesystem_parent\n\n filename = path.relative_to(directory_path)\n return directory_path, filename", "title": "" }, { "docid": "21ac18d04eb1e42e3b020a8cdfa07e2d", "score": "0.7013715", "text": "def dirname(self):\n dir = self.path_.rsplit(\"/\", 1)[0]\n if dir == \"\": dir = \"/\"\n return dir", "title": "" }, { "docid": "4b12a055150b932227312296af2a9ddb", "score": "0.6968477", "text": "def file_directory_path(instance, filename):\n return '{0}/files/{1}'.format(instance.id, filename)", "title": "" }, { "docid": "913ca1a7d1fb6b9ad1e65cd55a4ca9f2", "score": "0.6939276", "text": "def getDirectory(self):\n return self.directory", "title": "" }, { "docid": "509873094829ffaca02cf33402aaecb3", "score": "0.69266725", "text": "def GetDirectory(self):\n return self._directory", "title": "" }, { "docid": "9416ecd4fe5b51ceaf0e675f8cfdefbe", "score": "0.6891418", "text": "def directory(root: Optional[str], name: Optional[str]) -> str:\n if name is None or not os.path.isdir(name):\n name = os.getcwd()\n result = os.path.abspath(name)\n if root is not None:\n if not result.startswith(root):\n result = root\n return result", "title": "" }, { "docid": "5e7ff20aa86b17e4aea70f2f1f165da4", "score": "0.6857955", "text": "def path(self):\n\n if not self.active:\n return None\n\n # check to see if path property is set\n if not getattr(self, '_parent_directory', None):\n # default path is simply the value of the name field\n # inside the subdirectory_of path\n path = os.path.join(self.subdirectory_of, self._filesystem_name)\n\n # try to create the directory\n try:\n os.makedirs(path)\n except Exception as e:\n logger.exception(\"Failed create directory: {}\".format(path))\n raise e\n else:\n # set the value of the directory path field\n self._parent_directory = self.subdirectory_of\n\n return os.path.join(self._parent_directory, self._filesystem_name)", "title": "" }, { "docid": "095d9f44e5b0910c11771c37f6e7f742", "score": "0.6843596", "text": "def get_download_dir() -> str:\n\n return download_directory", "title": "" }, { "docid": "4f362cf13302255c648d47392b63b10b", "score": "0.6842783", "text": "def dirname(self):\n return os.path.dirname(self.path)", "title": "" }, { "docid": "37b3b298c05993010f34617f3074b7c1", "score": "0.6822054", "text": "def DirnamePath(self, path):\n if path.endswith(self.PATH_SEPARATOR):\n path = path[:-1]\n if not path:\n return None\n\n dirname, _, _ = path.rpartition(self.PATH_SEPARATOR)\n return dirname", "title": "" }, { "docid": "68b4e808124b04d244e0a2e9f44f79b5", "score": "0.6804708", "text": "def target_directory():\n\n # Sets Empty String Variable\n src = \"\"\n\n # For 0 ... length of PT_PATH - 1\n for index in range(len(g_NAME_OF_PT_PATH.split(\"/\")) - 1):\n\n # Append to src with the element at index and \"/\"\n src += g_NAME_OF_PT_PATH.split(\"/\")[index] + \"/\"\n\n # Return src\n return src", "title": "" }, { "docid": "c22ba337c67aa939a27133c35e875e35", "score": "0.67676723", "text": "def path(self):\n\t\trepository, version, directory = self._parse_repo()\n\t\tmsg = \"Location of repository {0} [version: {1}] is {2}.\".format(repository, version, directory)\n\t\tself.log_system(msg)\n\t\treturn directory", "title": "" }, { "docid": "1a69eb8149293f738dbb0ca053bc2ba9", "score": "0.67605925", "text": "def make_path(self, directory=None, extension=None):\n\n if not directory:\n directory = ''\n if not extension:\n extension = ''\n\n return str(os.path.join(directory, self.path + extension))", "title": "" }, { "docid": "64da0771ddcbb0dddc2dfb66d8036515", "score": "0.6754327", "text": "def dirfile(filename, dir=\"\"):\n if filename[0:2] == '~/':\n filename = os.path.join(coetools.home, filename[2:])\n else:\n if dir[0:2] == '~/':\n dir = os.path.join(coetools.home, dir[2:])\n filename = os.path.join(dir, filename)\n return filename", "title": "" }, { "docid": "8b4107379c4389a9fb55befbe1355f8d", "score": "0.675213", "text": "def get_dir_from_arg(directory, end=''):\n if end.endswith('.'):\n end = end[:-1]\n\n if directory.endswith('/'):\n directory = directory[:-1]\n\n return os.path.abspath(os.path.expanduser(directory)) + end", "title": "" }, { "docid": "f53c34d365ffa44aeaff5cd671dcc269", "score": "0.6744242", "text": "def get_dir(self):\n\n # The directory name is based on the agents position, so we just use that\n return self.position.lower().replace(\"agent_\", \"\")", "title": "" }, { "docid": "76354f2ca6f09da414e92ee32ace1d56", "score": "0.67340124", "text": "def getdir(self):\r\n tokens = []\r\n curdir = 1\r\n parents = iter(self._dirparents)\r\n\r\n while curdir != self._dircluster:\r\n nextdir = next(parents)\r\n dcluster, offset = self._resolvedirbycluster(nextdir, curdir)\r\n assert dcluster is not None\r\n tokens.append(self._parsedirentry(\r\n self._readdirentry(dcluster, offset))[1])\r\n curdir = nextdir\r\n\r\n return \"/\" + \"/\".join(tokens)", "title": "" }, { "docid": "0008cc15ec83045edad80703590c5921", "score": "0.6710896", "text": "def get_dir(self):\n return os.getcwd()", "title": "" }, { "docid": "d7d086aba6dc9e9f706d14f6f93607d4", "score": "0.6705724", "text": "def get_dir_name(path: str) -> str:\n return str(pathlib.Path(path).parent)", "title": "" }, { "docid": "bf86dc1a54e6846aee12802b52ca63dc", "score": "0.6682323", "text": "def getdir(db: Database, key: str, date: datetime.date) -> str:\n return path.join(db.directory, key, '{:%Y/%m/%d}'.format(date))", "title": "" }, { "docid": "3e6351252ebf07e8ce08fa2216e8969a", "score": "0.66407317", "text": "def _get_path(subdir, filename):\n path = Path('files', 'images', subdir)\n path.mkdir(exist_ok=True, parents=True)\n\n return str(path / filename)", "title": "" }, { "docid": "0ef79fde313661e78d48717935ab9cd6", "score": "0.6639183", "text": "def capture_dir():\n\timport os\n\tconfig = load_cfg_basic()\n\tfolder = str(config['Directory'].get('capture_folder'))\n\tpath = os.path.join(program_path(), folder)\n\treturn path", "title": "" }, { "docid": "8ed169f67e126f00d69e465e572ff80c", "score": "0.6605068", "text": "def directory(self):\n return self._directory", "title": "" }, { "docid": "1f832b808f047d9f0620ad9c7e7f14a8", "score": "0.6604913", "text": "def file_directory(file):\n this_file = Path(file)\n global basename, directory, extension\n basename = str(this_file.stem)\n directory = str(this_file.parent)\n extension = str(this_file.suffix)\n print(directory)", "title": "" }, { "docid": "5ead1798d54252ca7c0e3447a4a8d2b4", "score": "0.65940464", "text": "def path_dirname(self, path):\n return self.path_separator.join(self._path(path).split(self.path_separator)[:-1])", "title": "" }, { "docid": "9b865d008523b7ecbd57b5e35b749e68", "score": "0.65918", "text": "def dirname(cls, path):\n return cls.split(path)[0]", "title": "" }, { "docid": "d98c7d3d430d8d833b3f9efc7bea1781", "score": "0.65841115", "text": "def getDirectory(fileName):\n dirName = os.path.dirname(fileName)\n return os.path.basename(dirName)", "title": "" }, { "docid": "c3adb0d0bef954ae677ad2649f8282e8", "score": "0.6581391", "text": "def fs_dir ( val ):\n retval = fs_abs ( val )\n if retval:\n if os.path.isdir ( retval ) or not os.path.exists ( retval ):\n return retval\n\n return None", "title": "" }, { "docid": "a00c3bdb5ad2f1252517b17bc52afa5d", "score": "0.65725446", "text": "def get_dirpath(self, key):\n dirpath = self[key]\n if dirpath:\n if os.path.isdir(dirpath):\n return dirpath\n log.warning('Invalid %s: %s is not a directory', key, dirpath)\n return None", "title": "" }, { "docid": "907faa7dc280989d7b7b421e68598228", "score": "0.6571422", "text": "def startdir(self,rom):\r\n return os.path.dirname(self.location)", "title": "" }, { "docid": "a8a11afbb6a69902bff82b2b231b7c00", "score": "0.65464", "text": "def get_dir_name(file):\n return os.path.dirname(os.path.realpath(file))", "title": "" }, { "docid": "f278164bf33683975d2e36613d8e8104", "score": "0.65300214", "text": "def get_path(subdir):\n if subdir is None or len(subdir) == 0:\n return get_fow_root()\n\n if len(subdir) > 1 and subdir[-1] == '/':\n subdir = subdir[0:-1]\n\n return get_fow_root() + subdir", "title": "" }, { "docid": "9c5828231874aac467ce3da64c0b4703", "score": "0.6524166", "text": "def get_dir_and_base_name(self):\n s = self.name.get_unicode()\n pos = s.rfind(u'/')\n if pos != -1:\n dir_name = s[:pos]\n file_name = s[pos+1:]\n if len(file_name) == 0:\n return FSString(dir_name), None\n else:\n return FSString(dir_name), FSString(file_name)\n else:\n return None, self.name", "title": "" }, { "docid": "88c97d2f3571c6f0bdcfb0960480505a", "score": "0.652381", "text": "def log_dir():\n\timport os\n\tpath = os.path.join(program_path(), log_folder_name())\n\treturn path", "title": "" }, { "docid": "711b5d9a1cca18c2670ea0b1f003eaa2", "score": "0.65238", "text": "def get_dir():\n result = userdir()\n if result:\n result = os.path.join(result, '.asciidoc', Plugin.type+'s')\n return result", "title": "" }, { "docid": "f6db128957337c1b656ee91e4c4dd5b4", "score": "0.65233403", "text": "def path():\n file_path = pathlib.Path('./')\n file_path.is_dir()\n file_path.absolute()\n for file in file_path.iterdir():\n print(file)", "title": "" }, { "docid": "7bcef80b0cda2ae3a39f7480b1a78d0f", "score": "0.6512198", "text": "def dirname(path, **kwargs):\n import os.path\n return os.path.dirname(path, **kwargs)", "title": "" }, { "docid": "39ee5520dbdf3150daf68ac5d3b4db11", "score": "0.651151", "text": "def get_data_dir(self, subdir = ''):\n directory = os.path.join(settings.imagesavepath,\n time.strftime(\"%Y/%m/%d/\"),\n subdir)\n if not os.access(directory, os.F_OK):\n try:#try to create dir\n os.makedirs(directory)\n except OSError:\n print \"cannot create data dir\"\n return os.getcwd()\n return directory", "title": "" }, { "docid": "ab6d2bf8ca03a086e8de982a7c255482", "score": "0.64984685", "text": "def get_dir_name(folder: Optional[str] = None) -> str:\n if not folder:\n folder = os.getcwd()\n return folder.replace(\"/\", os.sep).rstrip(os.sep).split(os.sep)[-1]", "title": "" }, { "docid": "325902be4cc0842e9e08a7e142c71a77", "score": "0.64968425", "text": "def get_dir(self):\n return os.path.join(LANGUAGE_DIR, self.abbrev)", "title": "" }, { "docid": "97e33b9c37d7a33ae48d032fa5b9b5a0", "score": "0.64962626", "text": "def getOutputDirectory():\n directory = \"/home/pi/\"\n now = datetime.datetime.now()\n directory_format = \"%Y%m%d\"\n directory += \"testing_files\" + now.strftime(directory_format) + \"/\"\n # if directory doesn't exist, create it\n if not os.path.exists(directory):\n os.mkdir(directory)\n return directory", "title": "" }, { "docid": "3ff63b351f22bc6ec16526932f863141", "score": "0.6481579", "text": "def get_directory_path():\n path = []\n frame = sys._getframe()\n while frame:\n if frame.f_code.co_name == '_q_traverse':\n self = frame.f_locals.get('self', None)\n if path[:1] != [self]:\n path.insert(0, self)\n frame = frame.f_back\n return path", "title": "" }, { "docid": "cd4cd78dd0e4928be97a3f10346a7ab1", "score": "0.64803946", "text": "def get_path(self, path):\n \n return os.path.join(self.workdir, path)", "title": "" }, { "docid": "839fc1676fe4b427e3dd04bf932e87cc", "score": "0.64749247", "text": "def final_path(self):\n if not self.destination:\n return None\n if not self.nature:\n return None\n p = [self.destination.path] + [ str(s) for s in self.subdirs ]\n return os.path.join(*p)", "title": "" }, { "docid": "ffa40ce3dfb1a31cba5ded045e09a240", "score": "0.6467492", "text": "def directory_fmt(directory):\n return directory.rstrip('/') + '/'", "title": "" }, { "docid": "73179f34d108ba8870ad89a23df5c46d", "score": "0.6465151", "text": "def pwd():\n return ROOT.gDirectory.GetPath()", "title": "" }, { "docid": "683eeeb7546b3ae7902b0ecd3e010c69", "score": "0.6463621", "text": "def get_user_directory(ns_type: int) -> pathlib.Path:\n # Note NSSearchPathForDirectoriesInDomains returns an autoreleased object,\n # so we need to push an autorelease pool and then pop an autorelease pool\n # to make sure this function never leaks. This requires that at least one\n # top-level autorelease pool exists already (which should always be the\n # case inside a Python interpreter thread).\n pool = objc.objc_autoreleasePoolPush()\n if not pool:\n raise RuntimeError(\"Could not push an autorelease pool\")\n try:\n array = foundation.NSSearchPathForDirectoriesInDomains(\n ns_type, NSUserDomainMask, c_bool(True)\n )\n if not array:\n raise ValueError(\n \"Unexpected ns_type or unable to retrieve directory from os\"\n )\n result = CFArrayGetIndex(array, 0, None)\n if result is not None:\n return pathlib.Path(CFString2Str(cast(result, CFString_p)))\n finally:\n objc.objc_autoreleasePoolPop(pool)", "title": "" }, { "docid": "48f6b6a5b5db2c48f5e90c60aeddddd7", "score": "0.64471567", "text": "def sub_dir(self, *args):\n return os.path.join(self.path,*args)", "title": "" }, { "docid": "bf4d0b5b1d530575286600a164dc3045", "score": "0.64460903", "text": "def dirname():\n return tempfile.gettempdir()", "title": "" }, { "docid": "9ceb49158fcd703cd62566394a7f6c2e", "score": "0.6443109", "text": "def get_path_str(path, pardir):\n if not os.path.isabs(path):\n return os.path.join(pardir, path)\n else:\n return path", "title": "" }, { "docid": "3f0ec1ed22c4241bfd466c70afcbee26", "score": "0.6430381", "text": "def directory_name(self) -> str:\n return self._values.get('directory_name')", "title": "" }, { "docid": "da5d5c6c740d4e71e2c7de37ea0d2963", "score": "0.6426648", "text": "def get_user_path():\r\n\r\n dir_name_input = \"Gallery path. Created on Desktop. [py_gallery_dist] \"\r\n gallery_dir_name = input(dir_name_input)\r\n if gallery_dir_name == \"\":\r\n gallery_dir_name = copier.default_dist_path\r\n else:\r\n user_def_path = os.path.join(copier.default_dist_path, gallery_dir_name)\r\n gallery_dir_name = os.path.join(user_def_path)\r\n return gallery_dir_name", "title": "" }, { "docid": "8373a678d7f09584ca5cacdf49467b76", "score": "0.6423563", "text": "def workspace_path(workspace_path: str, item_path: str, is_file=True) -> str:\n full_path = os.path.join(workspace_path, item_path)\n dir_path = os.path.dirname(full_path) if is_file else full_path\n create_directory(dir_path)\n return full_path", "title": "" }, { "docid": "b4a0d232e14a2d8ea67ce74b4127d9d0", "score": "0.6423036", "text": "def __check_dir(self, directory):\n if (directory[-1] != '/'):\n directory = directory+'/'\n\n return directory", "title": "" }, { "docid": "5e6ec7ea63aeb06672847bd73a077887", "score": "0.64205664", "text": "def add_dir_to_path(a):\n if os.path.isdir(a):\n os.environ['PATH'] += PATHSEP + a\n else:\n a = None\n return a", "title": "" }, { "docid": "63025b4febcc14f3ddb1e584e25a7847", "score": "0.641634", "text": "def get_dataset_directory():\n curr_dir = os.getcwd() # gets the current working directory\n dataset_dir = os.path.join(curr_dir, \"dataset\") # concatenates\n return dataset_dir", "title": "" }, { "docid": "3e9f39b4485f7aab7bb9c9d06461c0be", "score": "0.6415723", "text": "def cur_file_dir():\n path = sys.path[0]\n if os.path.isdir(path):\n return path\n elif os.path.isfile(path):\n return os.path.dirname(path)", "title": "" }, { "docid": "9000c56563709f3a821a750b3753605d", "score": "0.6413828", "text": "def dirname(path):\n return pathsplit(path)[0]", "title": "" }, { "docid": "5f188d33095ef420a2ed0cf2f058b8ca", "score": "0.6413725", "text": "def dirpath(self, *args, **kwargs):\n if not kwargs:\n path = object.__new__(self.__class__)\n path.strpath = dirname(self.strpath)\n if args:\n path = path.join(*args)\n return path\n return super(LocalPath, self).dirpath(*args, **kwargs)", "title": "" }, { "docid": "4f8fff4f558b5b1b930d3aaf0bc33f75", "score": "0.64134675", "text": "def home_directory():\n return str(Path.home())", "title": "" }, { "docid": "f10ebf5678379f7c6a7f06a5b1325278", "score": "0.64125293", "text": "def get_path(path):\n parent_dir = os.path.abspath(os.path.join(path, os.pardir))\n get_dir(parent_dir)\n\n return path", "title": "" }, { "docid": "b84afe97955e92a2064f332db335f1f7", "score": "0.64118785", "text": "def output_dir(self) -> str:\n return self.file_helper.dir_output", "title": "" }, { "docid": "e942b9c5dd5c948123af39b4fb1b6a29", "score": "0.6406466", "text": "def directory(self):\n return home + \"proj/%s/INBOX/%s/\" % (self.account, self.label)", "title": "" }, { "docid": "0a5ac4fed577a1293e8e8da4f2ca7add", "score": "0.64028484", "text": "def pkg2dir(package):\n name: List[str] = []\n while package:\n name.insert(0, package.name)\n package = package.package\n return \"/\".join(name)", "title": "" }, { "docid": "e0f4b7b7b396f3ed3bcc31d36071adf6", "score": "0.6401018", "text": "def dir(self, filetype, **kwargs):\n full = kwargs.get('full', self.full(filetype, **kwargs))\n return os.path.dirname(full)", "title": "" }, { "docid": "517df8af8f407f58bc20e1494cf5602a", "score": "0.639901", "text": "def __make_path(self):\n return os.path.dirname(self.original_pdf)", "title": "" }, { "docid": "08e266ee1d2ce53c1af05ff62d356d2f", "score": "0.639766", "text": "def getDir(dirName):\n return os.path.join(os.path.dirname(__file__), dirName)", "title": "" }, { "docid": "203928afa179ee5205ff049c56127855", "score": "0.6395717", "text": "def path(cls):\n\n return os.path.join(\n os.path.realpath(os.environ['DEEPSTAR_PATH']),\n 'files'\n )", "title": "" }, { "docid": "f59a08237e83be6681c6a9c2eeeb3959", "score": "0.6392548", "text": "def path(args) -> pathlib.Path:\n if args.directory is not None:\n return pathlib.Path(args.directory) / args.source\n return pathlib.Path(args.source)", "title": "" }, { "docid": "37bbc38b7b1b2b81391bfa6ca469d608", "score": "0.6379756", "text": "def data_directory(self):\n directory = join(data_directory(), 'dannet')\n return directory", "title": "" }, { "docid": "7f982df8c86af66f20c5a22e903800b1", "score": "0.637949", "text": "def get_path(self, path):\n # just in case there are query arguments\n # there shouldn't be for basic filesystem ops\n path = self.request.path\n path = path.split('?')[0]\n # remove any url encodings like %xx\n path = parse.unquote(path)\n\n # trailing slashes?\n # join request path to server directory\n sub_dirs = path.split('/')\n path = self.directory\n for d in sub_dirs:\n if d != '':\n path = os.path.join(path, d)\n\n return path", "title": "" }, { "docid": "694e6863064b9e293e3f02233b1bafe2", "score": "0.63785934", "text": "def _generate_output_dir(path):\n relpath = os.path.relpath(os.getcwd(),path)\n count = relpath.count(os.sep) + 1\n\n return relpath+os.path.sep, count", "title": "" }, { "docid": "1aaa7d5812e3cd434b2467ba243542a7", "score": "0.6377628", "text": "def set_letter_directory_path_path(self):\n location = input(f'Please enter the full path of the directory\\n'\n f'where you want to save your letters.\\n'\n f'Hit <Enter> to save to the current working directory.')\n\n if not location:\n location = os.getcwd()\n else:\n location = Path(f'{location}')\n if not location.exists():\n create_directory_answer = input('Path does\\'t exist. Do you want to create it?')\n # accept any version of yes, yep, etc.\n if create_directory_answer.lower() == 'yes':\n location.mkdir()\n location = os.chdir(location)\n else:\n location = os.getcwd()\n else:\n location = os.chdir(location)\n return location", "title": "" }, { "docid": "eef76c08607a873c3c1d47ffc999a929", "score": "0.63706815", "text": "def configuration_directory(self) -> str:\n return os.path.dirname(self.config_file_path)", "title": "" }, { "docid": "7ea7b2b524be978b67d6737a65fb7c6f", "score": "0.6367933", "text": "def storage_directory() -> str:\n return _storage_directory", "title": "" }, { "docid": "6574c0a7135ad89c028f80c3fac4efa2", "score": "0.6366917", "text": "def get_directory(cls, sys_dir, dir):\n dir.value = gxapi_cy.WrapSYS._get_directory(GXContext._get_tls_geo(), sys_dir, dir.value.encode())", "title": "" }, { "docid": "81b85488a01682f8641cff9ee4c5c12a", "score": "0.63664454", "text": "def real_dirname(path):\n return os.path.realpath(path)", "title": "" }, { "docid": "77d774839010849c81a3547f4ebdc538", "score": "0.6361326", "text": "def blogger_directory_path(instance, filename):\n return f'blogger_{instance.blogger.pk}/{filename}'", "title": "" }, { "docid": "5e056aec6d85e6740160b48749165184", "score": "0.6358075", "text": "def get_dir(path):\n if not path.startswith('/'):\n path = os.path.abspath(os.path.join(BASE_DIR, path))\n\n if path.startswith(BASE_DIR) and not os.path.exists(path):\n os.makedirs(path)\n\n return path", "title": "" }, { "docid": "46878b833f836398e4bf1f41f7c1d3ac", "score": "0.6353226", "text": "def short_path(file_path):\n if not os.path.exists(file_path):\n raise NameError(('%s does not exist' % file_path))\n\n full_path = os.path.abspath(file_path)\n my_dir = os.path.basename(os.path.dirname(full_path))\n return os.path.join(my_dir, os.path.sep, os.path.basename(full_path))", "title": "" }, { "docid": "466c54ac869afe994d0805b6a2a4d465", "score": "0.63509154", "text": "def _get_download_path():\n music_path = os.path.join(os.path.expanduser('~'), 'Music')\n if not os.path.exists(music_path):\n music_path = os.path.join(os.path.expanduser('~'), 'music')\n if not os.path.exists(music_path):\n os.mkdir(music_path)\n\n download_path = os.path.join(music_path, 'Instrumentals')\n if not os.path.exists(download_path):\n os.mkdir(download_path)\n\n return download_path", "title": "" } ]
6155a505416f858b45399811e7f617f1
Instantiate RequestFactory and User objects to pass POST requests to the TripCreate view.
[ { "docid": "15690371abf4a887ae5e3b44df8eb149", "score": "0.5762043", "text": "def setUp(self):\n self.factory = RequestFactory()\n self.user = User.objects.create(username='Abdullah',\n email='[email protected]',\n password=\"Abdullah's passwd\")", "title": "" } ]
[ { "docid": "59a348e0c16bfb59d077cf3914961971", "score": "0.6906684", "text": "def setUp(self):\n self.factory = RequestFactory()\n self.user = User.objects.create(username='Abdullah',\n email='[email protected]',\n password=\"Abdullah's passwd\")\n self.trip = Trip.objects.create(title=\"Summer Break\",\n passenger=self.user, arrive_at=\"BOS\",\n terminal='G')", "title": "" }, { "docid": "59a348e0c16bfb59d077cf3914961971", "score": "0.6906684", "text": "def setUp(self):\n self.factory = RequestFactory()\n self.user = User.objects.create(username='Abdullah',\n email='[email protected]',\n password=\"Abdullah's passwd\")\n self.trip = Trip.objects.create(title=\"Summer Break\",\n passenger=self.user, arrive_at=\"BOS\",\n terminal='G')", "title": "" }, { "docid": "c6c2e310752824baba279d7d8e74c338", "score": "0.6833409", "text": "def setUp(self):\n self.factory = RequestFactory()\n self.user = User.objects.create(username='Abdullah',\n email='[email protected]',\n password=\"Abdullah's passwd\")\n self.trip = Trip.objects.create(title=\"Summer Break\",\n passenger=self.user, arrive_at=\"BOS\",\n terminal='G')\n self.url = 'trips/4/change-details/'", "title": "" }, { "docid": "849764b5aa44e0d5146a4e5f9063e4f6", "score": "0.6529689", "text": "def create(self, request):\n pass", "title": "" }, { "docid": "849764b5aa44e0d5146a4e5f9063e4f6", "score": "0.6529689", "text": "def create(self, request):\n pass", "title": "" }, { "docid": "c6aca465eb2d89703eccef063570982d", "score": "0.6492873", "text": "def setUp(self):\n self.factory = RequestFactory()\n self.user = User.objects.create(username='Abdullah',\n email='[email protected]',\n password=\"Abdullah's passwd\")\n self.trip = Trip.objects.create(title=\"Summer Break\",\n passenger=self.user, arrive_at=\"BOS\",\n terminal='G')\n self.url = f'trips/{self.trip.id}/delete-trip/'", "title": "" }, { "docid": "08bac9b25577dd16051ac07a3539b1e7", "score": "0.63319004", "text": "def create(self, request):\n # pass request through to the ObtainAuthToken APIView and called the post function\n return ObtainAuthToken().post(request)", "title": "" }, { "docid": "19ed015740eadbdc21f073ab178e660d", "score": "0.6242039", "text": "def get_create_request(self, data):\n return self.request_factory.post(self.get_url(), data=data, **self.request_kwargs)", "title": "" }, { "docid": "a84f04f3b4c0394846fe76f4b6e0cbce", "score": "0.62326753", "text": "def __init__(self) -> None:\n # Create User (Post request parser)\n self.post_reqparser = reqparse.RequestParser()\n self.post_reqparser.add_argument('email', required=True, help=\"email field is required\",\n location=['form', 'json'], store_missing=False)\n self.post_reqparser.add_argument('fullname', required=False, location=['form', 'json'], store_missing=False)\n self.post_reqparser.add_argument('password', required=False, location=['form', 'json'], store_missing=False)\n self.post_reqparser.add_argument('admin', type=inputs.boolean, required=False, location=['form', 'json'],\n store_missing=False)\n self.post_reqparser.add_argument('activated', type=inputs.boolean, required=False, location=['form', 'json'],\n store_missing=False)", "title": "" }, { "docid": "87f69ae7b78c2bdc2ec1257bf7d00b37", "score": "0.6189332", "text": "def setUp(self):\n # Se crea el Request factory pars simular peticiones\n self.factory = RequestFactory()\n # Se crea el User que realiza las peticiones\n self.user = User.objects.create_user(username='testuser', email='[email protected]', password='test')", "title": "" }, { "docid": "a47abe4d1d4ec5e0ae1a2fd0e22400a0", "score": "0.6181593", "text": "def create(self,request):\n return ObtainAuthToken().post(request)", "title": "" }, { "docid": "df3334d483796d4b5c8e2e6c9c0682ee", "score": "0.61510116", "text": "def post(self):\n data = request.json\n return create_user(data=data)", "title": "" }, { "docid": "a0ac5ed5adf44f58be104b7fbfe28db0", "score": "0.61239094", "text": "def post(self):\n client = create(request.json)\n return client", "title": "" }, { "docid": "e49d55c1cddf54b286a1c89e3346c371", "score": "0.6089216", "text": "def post(self, request, *args, **kwargs):\n return self.create(request, *args, **kwargs)", "title": "" }, { "docid": "e49d55c1cddf54b286a1c89e3346c371", "score": "0.6089216", "text": "def post(self, request, *args, **kwargs):\n return self.create(request, *args, **kwargs)", "title": "" }, { "docid": "e49d55c1cddf54b286a1c89e3346c371", "score": "0.6089216", "text": "def post(self, request, *args, **kwargs):\n return self.create(request, *args, **kwargs)", "title": "" }, { "docid": "e49d55c1cddf54b286a1c89e3346c371", "score": "0.6089216", "text": "def post(self, request, *args, **kwargs):\n return self.create(request, *args, **kwargs)", "title": "" }, { "docid": "e49d55c1cddf54b286a1c89e3346c371", "score": "0.6089216", "text": "def post(self, request, *args, **kwargs):\n return self.create(request, *args, **kwargs)", "title": "" }, { "docid": "e49d55c1cddf54b286a1c89e3346c371", "score": "0.6089216", "text": "def post(self, request, *args, **kwargs):\n return self.create(request, *args, **kwargs)", "title": "" }, { "docid": "e49d55c1cddf54b286a1c89e3346c371", "score": "0.6089216", "text": "def post(self, request, *args, **kwargs):\n return self.create(request, *args, **kwargs)", "title": "" }, { "docid": "e49d55c1cddf54b286a1c89e3346c371", "score": "0.6089216", "text": "def post(self, request, *args, **kwargs):\n return self.create(request, *args, **kwargs)", "title": "" }, { "docid": "e49d55c1cddf54b286a1c89e3346c371", "score": "0.6089216", "text": "def post(self, request, *args, **kwargs):\n return self.create(request, *args, **kwargs)", "title": "" }, { "docid": "e49d55c1cddf54b286a1c89e3346c371", "score": "0.6089216", "text": "def post(self, request, *args, **kwargs):\n return self.create(request, *args, **kwargs)", "title": "" }, { "docid": "e49d55c1cddf54b286a1c89e3346c371", "score": "0.6089216", "text": "def post(self, request, *args, **kwargs):\n return self.create(request, *args, **kwargs)", "title": "" }, { "docid": "e49d55c1cddf54b286a1c89e3346c371", "score": "0.6089216", "text": "def post(self, request, *args, **kwargs):\n return self.create(request, *args, **kwargs)", "title": "" }, { "docid": "e49d55c1cddf54b286a1c89e3346c371", "score": "0.6089216", "text": "def post(self, request, *args, **kwargs):\n return self.create(request, *args, **kwargs)", "title": "" }, { "docid": "e49d55c1cddf54b286a1c89e3346c371", "score": "0.6089216", "text": "def post(self, request, *args, **kwargs):\n return self.create(request, *args, **kwargs)", "title": "" }, { "docid": "1d3c98aee083c8b3264cf1c7274ebc69", "score": "0.60653144", "text": "def perform_create(self, user=None, data=None):\n if data is None:\n data = self.data\n\n if user:\n self.response = self.client.post(\n self.url,\n json.dumps(data, cls=DjangoJSONEncoder),\n HTTP_AUTHORIZATION=\"JWT {0}\".format(user.get_jwt_token())\n )\n else:\n self.response = self.client.post(\n self.url,\n json.dumps(data, cls=DjangoJSONEncoder),\n )\n\n if (\n self.response.status_code == status.HTTP_201_CREATED and\n hasattr(self.serializer.Meta, 'model')\n ):\n self.model = self.serializer.Meta.model.objects.get(pk=self.response.json()['data']['id'])", "title": "" }, { "docid": "d242f86e3145542cbb404a275188ce5a", "score": "0.60634327", "text": "def create(self, data):\n passenger = data['user']\n car = Car.objects.filter(is_available=True).first()\n # import pdb; pdb.set_trace()\n trip = Trip.objects.create(\n passenger=passenger,\n car=car,\n travel_time=data['travel_time'],\n distance = data['distance'],\n departure_location = data['departure_location'],\n arrival_location = data['arrival_location'],\n )\n return trip", "title": "" }, { "docid": "db8e78b1ec56e48be8ede02046c4618a", "score": "0.60454214", "text": "def post(self, request):\n request.data['user'] = request.user.id\n serializer = NewTaskSerializer(data=request.data)\n if serializer.is_valid():\n serializer.save()\n\n return Response({'message': 'New task added', 'task': serializer.data}, status=status.HTTP_201_CREATED)\n return Response(serializer.errors, status=status.HTTP_400_BAD_REQUEST)", "title": "" }, { "docid": "f614e5608c81860141edc88522dff0e8", "score": "0.5988133", "text": "def __init__(self, request, user=None):\n self.request = request\n self.user = user or request.user", "title": "" }, { "docid": "ef8fbe7ae873d54bf7f7f33e4633c260", "score": "0.59658784", "text": "def create(self, request):\n\n return ObtainAuthToken().post(request)", "title": "" }, { "docid": "ef8fbe7ae873d54bf7f7f33e4633c260", "score": "0.59658784", "text": "def create(self, request):\n\n return ObtainAuthToken().post(request)", "title": "" }, { "docid": "ef8fbe7ae873d54bf7f7f33e4633c260", "score": "0.59658784", "text": "def create(self, request):\n\n return ObtainAuthToken().post(request)", "title": "" }, { "docid": "0563620cdff87295eb1915b65185565c", "score": "0.5902255", "text": "def __init__(self, request):\n self.request = request\n self.factory = RequestFactory()\n self.schema = request.user.customer.schema_name", "title": "" }, { "docid": "dabe37ee3cc2de0c3c86ee65e7d2db45", "score": "0.59021556", "text": "def create(self, request, *args, **kwargs):\n return super().create(request, *args, **kwargs)", "title": "" }, { "docid": "dabe37ee3cc2de0c3c86ee65e7d2db45", "score": "0.59021556", "text": "def create(self, request, *args, **kwargs):\n return super().create(request, *args, **kwargs)", "title": "" }, { "docid": "dabe37ee3cc2de0c3c86ee65e7d2db45", "score": "0.59021556", "text": "def create(self, request, *args, **kwargs):\n return super().create(request, *args, **kwargs)", "title": "" }, { "docid": "dabe37ee3cc2de0c3c86ee65e7d2db45", "score": "0.59021556", "text": "def create(self, request, *args, **kwargs):\n return super().create(request, *args, **kwargs)", "title": "" }, { "docid": "57fe6e51098e330a28d43a46c0ff2237", "score": "0.5881789", "text": "def create(self, request):\n return ObtainAuthToken().post(request)", "title": "" }, { "docid": "57fe6e51098e330a28d43a46c0ff2237", "score": "0.5881789", "text": "def create(self, request):\n return ObtainAuthToken().post(request)", "title": "" }, { "docid": "4ff61a7f2c04f9f842b36b4de81b1712", "score": "0.5857664", "text": "def __init__(self, request=None, *args, **kwargs):\n self.request = request\n self.user_cache = None\n super(UserCreationForm, self).__init__(*args, **kwargs)", "title": "" }, { "docid": "c1df436de26848219b566c18b6eb88a4", "score": "0.582708", "text": "def create_post(self, request, pk):\n if request.method == 'POST':\n data = JSONParser().parse(request)\n try:\n queryset = Tour.objects.get(id=data[\"tour_id\"])\n except Tour.DoesNotExist:\n return HttpResponse(\"El tour no existe.\",status=404)\n except:\n return HttpResponse(\"No veo el id tour\",status=404)\n if data[\"tour_id\"]:\n serializer = PlaceSerializers(data=data)\n if serializer.is_valid():\n serializer.save()\n route = Route(tour_id = data['tour_id'], place_id = serializer.data['id'])\n route.save()\n return JsonResponse(serializer.data, status=201)\n return JsonResponse(serializer.errors, status=400)\n return JsonResponse(\"No hay tour para este lugar.\", status=400)", "title": "" }, { "docid": "3abdcccefc50ede747b93467276bd415", "score": "0.57768524", "text": "def post(self, request, *args, **kwargs):\n user = get_user_model().objects.get(username=request.user.username)\n self.json_data['user'] = user\n return create_or_update_post(self.json_data)", "title": "" }, { "docid": "95ee40452c485f90a580683e4413a04a", "score": "0.575281", "text": "def create(self, validated_data):\n request = self.context['request']\n validated_data.update({\n 'reviewer': request.user.reviewer,\n 'ip_address': get_client_ip(request),\n })\n return super().create(validated_data)", "title": "" }, { "docid": "a226b2bb1a077e7bddaca6669a02a233", "score": "0.5728823", "text": "def test_request_creation(self):\n self.assertEqual('http://localhos:8000/test', self.request.url_priority.path)\n self.assertEqual(1, self.request.url_priority.priority)\n self.assertEqual('POST', self.request.method)\n self.assertEqual('utf-8', self.request.encoding)\n self.assertEqual('text/html', self.request.content_type)\n self.assertEqual(self.user_instance, self.request.user)", "title": "" }, { "docid": "e1c070248d69c2f53776bb3dc7d13345", "score": "0.5717556", "text": "def create_request(request, pk):\n\n requested_user = User.objects.get(pk=pk)\n \n try:\n PartnerRequest.objects.get(from_user=request.user, to_user=requested_user)\n except:\n PartnerRequest.objects.create(from_user=request.user, to_user=requested_user)\n\n\n return redirect('add_partner')", "title": "" }, { "docid": "f32f65d15baa92b8af53bc21cedcff98", "score": "0.5703788", "text": "def create(self, data={}, **kwargs):\n url = self.base_url\n return self.post_url(url, data, **kwargs)", "title": "" }, { "docid": "d1d2a43b66163b4cd597bb7b03654447", "score": "0.5700428", "text": "def post(self, request, username):\n try:\n user = User.objects.get(username=username)\n due_date = request.POST['due_date']\n new_task = Task(\n name=request.POST['name'],\n note=request.POST['note'],\n due_date=datetime.strptime(due_date, DATE_FMT) if due_date else None,\n completed=request.POST['completed'],\n user=user,\n )\n new_task.save()\n return JsonResponse({'msg': 'posted'}, status=status.HTTP_201_CREATED)\n except User.DoesNotExist:\n return JsonResponse({'error': 'The profile does not exist'}, status=status.HTTP_404_NOT_FOUND)\n except KeyError:\n return JsonResponse({'error': 'Some fields are missing'}, status=status.HTTP_400_BAD_REQUEST)", "title": "" }, { "docid": "5ffdcd5407208e96500389fc49ad747f", "score": "0.566035", "text": "def post(self):\n data = request.json\n return save_new_user(data=data)", "title": "" }, { "docid": "5ffdcd5407208e96500389fc49ad747f", "score": "0.566035", "text": "def post(self):\n data = request.json\n return save_new_user(data=data)", "title": "" }, { "docid": "5ffdcd5407208e96500389fc49ad747f", "score": "0.566035", "text": "def post(self):\n data = request.json\n return save_new_user(data=data)", "title": "" }, { "docid": "5ffdcd5407208e96500389fc49ad747f", "score": "0.566035", "text": "def post(self):\n data = request.json\n return save_new_user(data=data)", "title": "" }, { "docid": "933779abe19a9a2f131c63de0357a963", "score": "0.56556076", "text": "def setUp(self):\n self.request = HttpRequest()\n self.request.POST['text'] = 'new list item'\n self.request.user = Mock()", "title": "" }, { "docid": "ed3da3ff737ed40d25d1e0bc8780a649", "score": "0.5645764", "text": "def __init__(self, request):\n self.request = request\n self.data = request.POST\n self._build_params()", "title": "" }, { "docid": "e8f18799c4b438bfdc91a8fc6deb82ed", "score": "0.5641471", "text": "def create(self, request, *args, **kwargs):\n make_mutable = not getattr(request.data, '_mutable', True)\n if make_mutable:\n request.data._mutable = True # pylint: disable=protected-access\n request.data['user_id'] = request.user.id\n if make_mutable:\n request.data._mutable = False # pylint: disable=protected-access\n return super().create(request, *args, **kwargs)", "title": "" }, { "docid": "11a0a948815379eeed8b27312251eef4", "score": "0.5635967", "text": "def post(self):\n item = create(request.json)\n return item", "title": "" }, { "docid": "0e159654045a36aeb667828f55b8100c", "score": "0.5633995", "text": "def test_create_user(self):\n # This method utilises the POST request method and will make changes to the Canvas instance. This needs consideration.\n pass", "title": "" }, { "docid": "fba646af29763624cf5f5118564e59f7", "score": "0.5630597", "text": "def create(self, request):\n auth_serializer = self.serializer_class(data=request.data)\n auth_serializer.is_valid(raise_exception=True)\n client = request.client\n remember_me = bool(request.data.get('remember_me', False))\n id = auth_serializer.validated_data['user'].pk\n return self.response_login(id=id, client=client, remember_me=remember_me)", "title": "" }, { "docid": "1c844ed15335e6b2ddc4840ed0cbafd4", "score": "0.56294274", "text": "def tweet_create_view_pure_django(request, *args, **kwargs):\n\n #* Store the user from the request data\n user = request.user\n\n # handles authentication for a json request and a regular http request.\n # This will execute if a tweet creation is attempted without a user attached to it.\n if not request.user.is_authenticated:\n # user = None is to make sure the tweet without an authenitcated user will show as None\n user = None\n if request.is_ajax():\n return JsonResponse({}, status=401)\n return redirect(settings.LOGIN_URL)\n\n # the TweetForm class can be initialized with data\n # in this case, request.POST would be the post data that the form collected\n # OR\n # it could be initialized with no data, hense 'None'\n form = TweetForm(request.POST or None)\n\n # This will return the next url when the form is POSTed.\n # This can be used to redirect the user.\n # This gets the HTML element with the name \"next\" and returns its value which is \"/\"\n next_url = request.POST.get(\"next\") or None\n \n # if the form is valid, it saves the form\n # else, the page will render an invalid form\n if form.is_valid():\n obj = form.save(commit=False)\n\n #* This is added to associate the tweet obj with a user upon form submission\n obj.user = user\n\n # save the form to the database\n obj.save()\n\n if request.is_ajax():\n return JsonResponse(obj.serialize(), status=201) # 201 == created items\n\n # this will redirect the user to next_url which is \"/\" if the form was submitted with a POST\n if next_url != None and is_safe_url(next_url, ALLOWED_HOSTS):\n return redirect(next_url)\n\n # reinitialize a new blank form\n # This is then passed to the component in render\n form = TweetForm()\n if form.errors:\n if request.is_ajax():\n return JsonResponse(form.errors, status=400)\n\n return render(request, \"components/form.html\", context={\"form\": form})", "title": "" }, { "docid": "7000d7d8274c8a772216f8b7e3b4de20", "score": "0.56266785", "text": "def post(self, request, *args, **kwargs) -> Response:\n return self.create(request, *args, **kwargs)", "title": "" }, { "docid": "7000d7d8274c8a772216f8b7e3b4de20", "score": "0.56266785", "text": "def post(self, request, *args, **kwargs) -> Response:\n return self.create(request, *args, **kwargs)", "title": "" }, { "docid": "b17f32abf97cbad3c173194897936f0d", "score": "0.5618075", "text": "def create_visit(self, request, *args, **kwargs):\n return self.create(request, *args, **kwargs)", "title": "" }, { "docid": "b5f5df206c64c290e0557ad68171f63f", "score": "0.56066465", "text": "def on_post(self, request, response):\n try:\n validator = Validator(**CREATION_FIELDS)\n arguments = validator(request)\n except ValueError as error:\n bad_request(response, str(error))\n return\n create_user(self.api, arguments, response)", "title": "" }, { "docid": "dbb326c039e81c400c0d7b09def19bfa", "score": "0.5606281", "text": "def post(self):\n return create_user(auth_api.payload)", "title": "" }, { "docid": "5fc8293e4d72243d1d6dacf84c984194", "score": "0.56041765", "text": "def getRequestFactory(self):\n pass", "title": "" }, { "docid": "b212ac58c224eb7c4fb761d6f6648c7a", "score": "0.5598002", "text": "def add_user_request(request):\n if request.method == 'GET':\n User = get_user_model()\n users = User.objects.all()\n serializer = UserSerializer(users, many=True)\n return JsonResponse(serializer.data, status=200, safe=False)\n\n elif request.method == 'POST':\n User = get_user_model()\n userDict = json.loads(request.body)\n \n user = User(username=userDict['username'], email=userDict['email'], is_superuser=userDict['is_superuser'], is_staff=userDict['is_superuser'])\n user.set_password(userDict['password'])\n user.save()\n return JsonResponse({}, status=200)", "title": "" }, { "docid": "75f212563ace59fb5781793f5f46354e", "score": "0.55978566", "text": "def get_form_kwargs(self):\n kwargs = super().get_form_kwargs()\n k = {\"view\": \"create\", \"user\": self.request.user}\n kwargs.update(k)\n return kwargs", "title": "" }, { "docid": "c3db7f51cb8c13925106362130b70959", "score": "0.5558388", "text": "def setUp(self):\n self.api_client = APIClient()\n self.user, self.username, self.password = self.create_user()\n self.client = self.create_client()\n self.order = self.create_order(self.client)\n self.employee = self.create_employee()\n self.task = self.create_task(self.order, self.employee)", "title": "" }, { "docid": "cf3ae1916cb07f7e8362778051c9e2aa", "score": "0.55454546", "text": "def create_user_trip(user_id, trip_id):\n user_trip = User_trip(user_id=user_id, trip_id=trip_id)\n\n db.session.add(user_trip)\n db.session.commit()\n\n return user_trip", "title": "" }, { "docid": "32267576d12ddf0d5223b86558766053", "score": "0.55428165", "text": "def create_allergy(self, request, *args, **kwargs):\n return self.create(request, *args, **kwargs)", "title": "" }, { "docid": "ae3dddf57bf06ccbecb4b2db02d6e339", "score": "0.5533168", "text": "def setUp(self):\n user = User(username=\"admin\", dob=\"1988-01-01\")\n user.save()\n\n # Initialize client and force it to use authentication\n self.client = APIClient()\n self.client.force_authenticate(user=user)\n\n # Post user\n self.user_data = {\n \"first_name\": \"f\",\n \"last_name\": \"l\",\n \"email\": \"[email protected]\",\n \"username\": \"g\",\n \"password\": \"password123\",\n \"owner_id\": \"1\",\n \"dob\": \"1988-01-01\",\n }\n self.response = self.client.post(\n reverse('user_create'),\n self.user_data,\n format=\"json\")", "title": "" }, { "docid": "da7f91fc55cbf05008b39cda8ed7beec", "score": "0.55289155", "text": "def setUp(self):\n self.user = User.objects.create_user('ellen',\n first_name='Ellen',\n last_name='Cohen',\n password='mamas&papas',\n email='[email protected]')\n self.st = ServiceTicket.objects.create_ticket(service=self.service_url,\n user=self.user)\n self.pgt = ProxyGrantingTicket.objects.create_ticket(self.pgt_url,\n validate=False,\n user=self.user,\n granted_by_st=self.st)\n self.pt = ProxyTicket.objects.create_ticket(service=self.service_url,\n user=self.user,\n granted_by_pgt=self.pgt)", "title": "" }, { "docid": "535e2e5f974df00b41321e9b0c5dfdb0", "score": "0.5527267", "text": "def create(self, user_ip=IP_ADDR, user_agent=USER_AGENT, origin=ORIGIN):\n url = _get_url(self.base_url, 'Create', self.api_credentials, user_ip=user_ip, user_agent=user_agent, origin=origin)\n return _get(self.http, url)", "title": "" }, { "docid": "ea7734536f9e11668e23a271b6a8327c", "score": "0.5521935", "text": "def setUp(self):\n self.user = User.objects.create_user('ellen',\n first_name='Ellen',\n last_name='Cohen',\n password='mamas&papas',\n email='[email protected]')\n self.st = ServiceTicket.objects.create_ticket(service=self.service_url,\n user=self.user)\n self.pgt = ProxyGrantingTicket.objects.create_ticket(self.pgt_url,\n granted_by_st=self.st,\n validate=False,\n user=self.user)", "title": "" }, { "docid": "aec5422c72c62ebcbe87c0f2e9514229", "score": "0.55097204", "text": "def create(self, request, *args, **kwargs):\n serializer = self.get_serializer(data=self.clean_data(request.data))\n serializer.is_valid(raise_exception=True)\n self.perform_create(serializer)\n headers = self.get_success_headers(serializer.data)\n return Response(serializer.data, status=status.HTTP_201_CREATED, headers=headers)", "title": "" }, { "docid": "54520681629437c22b4241c5e16aebc1", "score": "0.55031466", "text": "def user_creation(request):\n if request.method == 'POST':\n user = User()\n serializer = UserSerializer(user, data=request.data)\n if serializer.is_valid():\n serializer.save()\n serializer.instance.set_password(request.data[\"password\"])\n serializer.instance.first_name = request.data[\"firstName\"]\n serializer.instance.last_name = request.data[\"lastName\"]\n serializer.instance.save()\n return Response(serializer.data, status=status.HTTP_201_CREATED)\n return Response(serializer.errors, status=status.HTTP_400_BAD_REQUEST)", "title": "" }, { "docid": "ab6f1b27063f6a434273a9a743f98a21", "score": "0.5493822", "text": "def __init__(self, obj=None, user=None, *args, **kwargs):\n self.obj = obj\n super(PostForm, self).__init__(*args, **kwargs)", "title": "" }, { "docid": "f22307bb7342637335bd37fb9a1d419e", "score": "0.5490413", "text": "def create(self, **kwargs):\n pass", "title": "" }, { "docid": "f22307bb7342637335bd37fb9a1d419e", "score": "0.5490413", "text": "def create(self, **kwargs):\n pass", "title": "" }, { "docid": "f22307bb7342637335bd37fb9a1d419e", "score": "0.5490413", "text": "def create(self, **kwargs):\n pass", "title": "" }, { "docid": "1a46c96b8e453f2ab85c3657ae85ba61", "score": "0.5481602", "text": "def setUp(self):\r\n self.client = Client()\r\n self.data = {\r\n \"email\": \"[email protected]\",\r\n \"username\": \"Slash\",\r\n \"password\": \"gunsnroses\"\r\n }\r\n self.response = self.client.post(\r\n reverse(\"signup\"),\r\n data=json.dumps(self.data),\r\n content_type=\"application/json\"\r\n )", "title": "" }, { "docid": "6c3ba3d8053dd6c4a262cfb3cf765b0c", "score": "0.546442", "text": "def setUp(self):\n self.user1 = User(first_name='Aslan', password=make_password(\n 'old_magic'), username=\"Aslan\", email=\"[email protected]\")\n self.user2 = User(first_name='White', last_name='Witch', password=make_password(\n 'new_magic'), username=\"whitewitch\", email=\"[email protected]\")\n self.user1.save()\n self.user2.save()\n self.users = [self.user1, self.user2]\n self.formData = {'title': 'CreationTest', 'address': 'war drobe',\n 'email': '[email protected]', 'accessCode': 'turkishdelight'}", "title": "" }, { "docid": "de9953a1e5086ce0ee45d0b60a2fd5ee", "score": "0.54573196", "text": "def create(self):\n \n res = register_user(self)\n self.assertTrue(res.status_code, 201)\n res = login_user(self)\n access_token = res.get_json()['token']\n\n headers = {\n \"x-access-token\": access_token,\n \"content-type\": \"application/json\"\n }\n \n return self.client.post('/api/v1/parcels',\n data=json.dumps(self.parcel),\n headers=headers,\n content_type='application/json')", "title": "" }, { "docid": "fdb5d7062c6ef73ca89a11753f6f7ced", "score": "0.54463786", "text": "def create_user(request):\n create_username = json.loads(request.body.decode(\"utf-8\"))['username']\n create_first_name = json.loads(request.body.decode(\"utf-8\"))['first_name']\n create_last_name = json.loads(request.body.decode(\"utf-8\"))['last_name']\n create_email = json.loads(request.body.decode(\"utf-8\"))['email']\n create_password = json.loads(request.body.decode(\"utf-8\"))['password']\n\n new_user = User(username=create_username, first_name=create_first_name, last_name=create_last_name, email=create_email, password=create_password)\n new_user.set_password(new_user.password)\n\n # user_type = json.loads(request.body.decode(\"utf-8\"))['user_type']\n\n new_user.save()\n new_profile = Profile(user=new_user, reports=0, user_type=0)\n new_profile.save()\n\n return HttpResponse(new_user.username)", "title": "" }, { "docid": "0804e476a54f659d8b063916006af64e", "score": "0.5445163", "text": "def create(self, request, *args, **kwargs):\n if not request.user.is_authenticated:\n raise exceptions.NotFound\n\n return super().create(request, *args, **kwargs)", "title": "" }, { "docid": "7dcca8caa500cf0aa4371bc876172b12", "score": "0.54434365", "text": "def perform_create(self, serializer):\n # Set user value to current authenticated user when model is saved to database.\n # Without this, there would be no user associated with recipe because by default\n # ModelViewSet simply creates the object with the details we provide and we did not provide the user\n # But the user exists in the request\n serializer.save(user=self.request.user)", "title": "" }, { "docid": "9058fadd655119be3ae71cc3d7a64afc", "score": "0.54374516", "text": "def perform_create(self, serializer):\n serializer.save(user=self.request.user)", "title": "" }, { "docid": "9058fadd655119be3ae71cc3d7a64afc", "score": "0.54374516", "text": "def perform_create(self, serializer):\n serializer.save(user=self.request.user)", "title": "" }, { "docid": "9058fadd655119be3ae71cc3d7a64afc", "score": "0.54374516", "text": "def perform_create(self, serializer):\n serializer.save(user=self.request.user)", "title": "" }, { "docid": "9058fadd655119be3ae71cc3d7a64afc", "score": "0.54374516", "text": "def perform_create(self, serializer):\n serializer.save(user=self.request.user)", "title": "" }, { "docid": "9058fadd655119be3ae71cc3d7a64afc", "score": "0.54374516", "text": "def perform_create(self, serializer):\n serializer.save(user=self.request.user)", "title": "" }, { "docid": "9058fadd655119be3ae71cc3d7a64afc", "score": "0.54374516", "text": "def perform_create(self, serializer):\n serializer.save(user=self.request.user)", "title": "" }, { "docid": "9058fadd655119be3ae71cc3d7a64afc", "score": "0.54374516", "text": "def perform_create(self, serializer):\n serializer.save(user=self.request.user)", "title": "" }, { "docid": "9058fadd655119be3ae71cc3d7a64afc", "score": "0.54374516", "text": "def perform_create(self, serializer):\n serializer.save(user=self.request.user)", "title": "" }, { "docid": "9058fadd655119be3ae71cc3d7a64afc", "score": "0.54374516", "text": "def perform_create(self, serializer):\n serializer.save(user=self.request.user)", "title": "" }, { "docid": "9058fadd655119be3ae71cc3d7a64afc", "score": "0.54374516", "text": "def perform_create(self, serializer):\n serializer.save(user=self.request.user)", "title": "" }, { "docid": "9058fadd655119be3ae71cc3d7a64afc", "score": "0.54374516", "text": "def perform_create(self, serializer):\n serializer.save(user=self.request.user)", "title": "" }, { "docid": "9058fadd655119be3ae71cc3d7a64afc", "score": "0.54374516", "text": "def perform_create(self, serializer):\n serializer.save(user=self.request.user)", "title": "" }, { "docid": "9058fadd655119be3ae71cc3d7a64afc", "score": "0.54374516", "text": "def perform_create(self, serializer):\n serializer.save(user=self.request.user)", "title": "" } ]
c691eca7ff52e62e7604da056a14a9d9
Load a jpeg file and return a numpy array
[ { "docid": "b68780fd1872766229f304d6227d324d", "score": "0.84246373", "text": "def load_jpeg(path_to_image, jpeg_file):\n jpeg_img = Image.open(os.path.join(path_to_image, jpeg_file))\n return np.array(jpeg_img)", "title": "" } ]
[ { "docid": "d65110329748b38ae8ef93b25a9cd423", "score": "0.81133324", "text": "def load_image(filename):\n im = Image.open(filename)\n return np.array(im)", "title": "" }, { "docid": "ff4b677912360981dede603dbda45afa", "score": "0.7762997", "text": "def load_image(path: str) -> np.ndarray:\n return np.asarray(mpimg.imread(path))", "title": "" }, { "docid": "9bb231b9c003311d6df3532839c1f985", "score": "0.7751375", "text": "def read_image(image_path):\n return np.array(PIL.Image.open(image_path))", "title": "" }, { "docid": "b52ebf0f5e69fad2df4e551d3dde3733", "score": "0.77199143", "text": "def readImage(fname, dtype='float'):\n\treturn np.array(PIL.Image.open(fname), dtype=dtype)", "title": "" }, { "docid": "ba770bbe86fc14234c1e57518d05eb91", "score": "0.7691442", "text": "def openImage(fname):\n return np.array(Image.open(fname))", "title": "" }, { "docid": "e9032edc4209d0e85978c745c9ef3826", "score": "0.7617544", "text": "def get_image(fname):\n return np.array(Image.open(fname))", "title": "" }, { "docid": "c304c27b1fbe11fd27177075fc81aab5", "score": "0.76114434", "text": "def load_img(filepath):\n img = np.asarray(Image.open(filepath).convert(\"RGB\"))\n return img", "title": "" }, { "docid": "8b72d337cef0273a0dfc78bbcd6a22f7", "score": "0.7563599", "text": "def np_img_load_file(path):\n return io.imread(path)", "title": "" }, { "docid": "1a459df8a1c4f99c2f9c96cc0f3acafd", "score": "0.7531709", "text": "def load_image_into_numpy_array(path):\r\n im =Image.open(path)\r\n (im_width, im_height) = im.size\r\n #print(im_width)\r\n #return np.array(im.getdata()).reshape((im_height, im_width, 3)).astype(np.uint8)\r\n return np.array(Image.open(path)),im_width,im_height", "title": "" }, { "docid": "c158344db36b761584cdf85c98d6d283", "score": "0.73625606", "text": "def load_image_into_numpy_array(path):\r\n img_data = tf.io.gfile.GFile(path, 'rb').read()\r\n image = Image.open(BytesIO(img_data))\r\n (im_width, im_height) = image.size\r\n return np.array(image.getdata()).reshape(\r\n (im_height, im_width, 3)).astype(np.uint8)", "title": "" }, { "docid": "94f20aaa027aa216ce1e309b2fe6b571", "score": "0.7362323", "text": "def load_image_into_numpy_array(path):\n img_data = tf.io.gfile.GFile(path, 'rb').read()\n image = Image.open(BytesIO(img_data))\n (im_width, im_height) = image.size\n del img_data\n gc.collect()\n return np.array(image.getdata()).reshape(\n (im_height, im_width, 3)).astype(np.uint8)", "title": "" }, { "docid": "e93ea5f10dc6fd407ae4830b10b0f9b3", "score": "0.7327138", "text": "def load_image_as_array(file_path):\n path = os.path.join(DATA_PATH, file_path)\n img = Image.open(path)\n img.load()\n data = np.asarray(img, dtype=np.uint8)\n return data", "title": "" }, { "docid": "4d904ff93e7ed68f16df706c95a16fd4", "score": "0.7316088", "text": "def load_image_into_numpy_array(path):\n img_data = tf.io.gfile.GFile(path, 'rb').read()\n image = Image.open(BytesIO(img_data))\n (im_width, im_height) = image.size\n return np.array(image.getdata()).reshape(\n (im_height, im_width, 3)).astype(np.uint8)", "title": "" }, { "docid": "30ce4a36221e9fc5310f980771d8e223", "score": "0.73150194", "text": "def load_image_into_numpy_array(path):\n img_data = tf.io.gfile.GFile(path, 'rb').read()\n image = Image.open(BytesIO(img_data))\n (im_width, im_height) = image.size\n return np.array(image.getdata()).reshape(\n (im_height, im_width, 3)).astype(np.uint8)", "title": "" }, { "docid": "4845e82f053d55bd0d098507b01a4161", "score": "0.731111", "text": "def read(path):\n i = Image.open(path)\n return np.asarray(i, dtype=np.uint8)", "title": "" }, { "docid": "5b0bc528ac27c2deb606fc0fc189b774", "score": "0.72956896", "text": "def load_image_into_numpy_array(path):\n img_data = tf.io.gfile.GFile(path, 'rb').read()\n image = Image.open(BytesIO(img_data))\n (im_width, im_height) = image.size\n return np.array(image.getdata()).reshape(\n (im_height, im_width, 3)).astype(np.uint8)", "title": "" }, { "docid": "ae19ade44b9eb240257bcbff95f925b2", "score": "0.72827923", "text": "def load_image_into_numpy_array(path):\n img_data = tf.io.gfile.GFile(path, 'rb').read()\n image = Image.open(io.BytesIO(img_data))\n (im_width, im_height) = image.size\n return np.array(image.getdata()).reshape(\n (im_height, im_width, 3)).astype(np.uint8)", "title": "" }, { "docid": "5a54d29eedd6ea2f24b17099a981658c", "score": "0.726662", "text": "def load_image_into_numpy_array(path):\n img = cv2.imread(path)\n # convert from OpenCV's BGR to RGB format\n img = cv2.cvtColor(img, cv2.COLOR_BGR2RGB)\n return np.array(img)", "title": "" }, { "docid": "481c9d9f03039ce50044916d3c1dfb74", "score": "0.7212853", "text": "def get_image(filename):\n return np.asarray(Image.open(filename), np.uint8)", "title": "" }, { "docid": "9a30a7a122e382907dc804d84d97b05a", "score": "0.72012335", "text": "def image_file_to_array(path):\n # Check if path is relative to project's image directory\n if P.image_dir and os.path.isfile(os.path.join(P.image_dir, path)):\n path = os.path.join(P.image_dir, path)\n elif not os.path.isfile(path):\n raise IOError(\"Unable to locate image file at ({0})\".format(path))\n\n img = Image.open(path)\n if img.mode != 'RGBA':\n if img.mode == 'RGB':\n return add_alpha(np.array(img))\n img = img.convert('RGBA')\n return np.array(img)", "title": "" }, { "docid": "2185ed997fa31eaea65884b91318883e", "score": "0.7156243", "text": "def read_img(path: str) -> np.ndarray:\n image = cv2.imread(path, cv2.IMREAD_UNCHANGED).astype(np.float32) / 255.\n # Ensure that the image is either RGB or RGBA\n assert len(image.shape) in [3, 4], \\\n print('[FATAL] %s It seems that there is something wrong with the image file %s' % (dt.now(), path))\n return image", "title": "" }, { "docid": "9bc79c14c3da04c6ef6d49c55071c691", "score": "0.71130323", "text": "def load_image(bytes) -> ndarray:\n image = io.imread(bytes, plugin='imageio')\n # If grayscale. Convert to RGB for consistency.\n if image.ndim != 3:\n image = color.gray2rgb(image)\n # If has an alpha channel, remove it for consistency\n if image.shape[-1] == 4:\n image = image[..., :3]\n return image", "title": "" }, { "docid": "b55a982407f4abaaa81d9eb1c19b2933", "score": "0.71129745", "text": "def load(img_path):\n pil_image = Image.open(img_path).convert(\"RGB\")\n # convert to BGR format\n image = np.array(pil_image)[:, :, [2, 1, 0]]\n return image", "title": "" }, { "docid": "11245ce8cfe82d2721340581439f1527", "score": "0.71101373", "text": "def read_image_data(fname):\n assert os.path.exists(fname), warnings.warn(\"File must exist to read: {}\".format(fname))\n\n method_lookup = {\n 'cr2': lambda fn: read_pgm(cr2_to_pgm(fn), remove_after=True),\n 'fits': lambda fn: fits.open(fn)[0].data,\n 'new': lambda fn: fits.open(fn)[0].data,\n 'pgm': lambda fn: read_pgm(fn),\n }\n\n file_type = fname.split('.')[-1]\n method = method_lookup.get(file_type, None)\n\n d = np.array([])\n if method is not None:\n d = method(fname)\n\n return d", "title": "" }, { "docid": "770fa51e51fea3d47d8dbb3a645530dd", "score": "0.7089521", "text": "def load_image(img_file):\r\n return np.array(Image.open(img_file)).astype(np.uint8)", "title": "" }, { "docid": "0b03d960c4f9634dbf4a6957b06a5746", "score": "0.7088389", "text": "def loadImage(imageName):\n np_img = np.asanyarray(Image.open(imageName).convert('L'))\n return np_img", "title": "" }, { "docid": "d11338b38aa8b591120950da8052074a", "score": "0.70715696", "text": "def img_loader(file_name):\n try:\n from skimage import img_as_ubyte\n from skimage.io import imread\n return np.asarray(img_as_ubyte(imread(file_name)))\n except ImportError:\n pass\n\n try:\n from PIL import Image\n return np.asarray(Image.open(file_name))\n except ImportError:\n raise ImportError(\"Reading %s requires PIL or scikit-image\" %\n file_name)", "title": "" }, { "docid": "2ff85e86929cc6d21b724491f7788348", "score": "0.7041294", "text": "def read_img(file_path, size):\n img = image.load_img(os.path.join(PATH, file_path), target_size=size)\n img = image.img_to_array(img)\n return img", "title": "" }, { "docid": "edb5a70218a8b7e23e64b82aa7503508", "score": "0.70371336", "text": "def load_img_array(fname, target_size=None, dim_ordering='default'):\n img = Image.open(fname)\n img = img.resize(target_size)\n img.load()\n x = image.img_to_array(img, dim_ordering=dim_ordering)\n img.close()\n return x", "title": "" }, { "docid": "be23f7784b2a155f1c8356a1684d9c86", "score": "0.7032323", "text": "def load_image(img_path):\n img = Image.open(img_path).convert('RGB')\n # img = np.array(img)\n # print(\"Loaded image: \", image_path)\n return img", "title": "" }, { "docid": "6c03b97179577b905608a48c1b2e7aab", "score": "0.7027618", "text": "def read_image(filepath: str, mode: str = \"RGB\") -> np.array:\n if not os.path.isfile(filepath):\n raise ValueError(f'Invalid file \"{filepath}\".')\n return Image.open(filepath).convert(mode)", "title": "" }, { "docid": "b6a017eed23cd8fb5be6e4e89bdea4a3", "score": "0.7000131", "text": "def load_jpeg(\n filename: str,\n resize: Optional[Tuple[int, int]] = None,\n) -> np.ndarray:\n img = Image.open(filename)\n if resize is not None:\n # PIL expects a (width, height) tuple.\n img = img.resize((resize[1], resize[0]))\n return np.asarray(img)", "title": "" }, { "docid": "6f1ac7a3ed49ecc2e92b5df1985ce0e5", "score": "0.699344", "text": "def load_data(filename):\n return skio.imread(filename)", "title": "" }, { "docid": "6abbceb8c8a486f6ab8ac0bc86bc3153", "score": "0.69635385", "text": "def load_img(img_path: Path) -> np.ndarray:\n img = cv2.imread(str(img_path))\n if img is None:\n raise FileNotFoundError(img_path)\n img = cv2.cvtColor(img, cv2.COLOR_RGB2BGR)\n return img", "title": "" }, { "docid": "f832c55ec7e4005dd85769012c7eff8e", "score": "0.6947549", "text": "def read_image(file_path):\n if file_path[-3:] == 'npy':\n im = np.load(file_path)\n else:\n try:\n im = cv2.imread(file_path, cv2.IMREAD_ANYDEPTH)\n except IOError as e:\n print(e)\n raise\n return im", "title": "" }, { "docid": "27a63e36e8b20e6f7202960f55a05c58", "score": "0.69388497", "text": "def read_image(path: str) -> np.ndarray:\n\n try:\n input_image_np = cv2.imread(path)\n except:\n raise ValueError('Inputted path does not reference an image that can be read by OpenCV.')\n if input_image_np is None:\n raise ValueError('Inputted path does not reference an image that can be read by OpenCV.')\n\n height, width, num_channels = input_image_np.shape\n\n assert num_channels == 3, f'num_channels = {num_channels} != 3'\n\n return input_image_np", "title": "" }, { "docid": "d9b51a858bd2200baa415a6a4fadea8c", "score": "0.6918416", "text": "def load_image(image_path: Path, image_type: str = 'L') -> np.ndarray:\n return np.array(Image.open(image_path).convert(image_type))", "title": "" }, { "docid": "4dde10039a1f3576e35df37ff24869cf", "score": "0.69088495", "text": "def readFile(name):\n\tim = cv2.imread(name)\n\tim = cv2.cvtColor(im, cv2.COLOR_BGR2RGB)\n\treturn np.asarray(im)", "title": "" }, { "docid": "2343dad38754f26b5f118407fed6187f", "score": "0.68999183", "text": "def load_img(path):\n sitk_t1 = sitk.ReadImage(path)\n img = sitk.GetArrayFromImage(sitk_t1)\n\n return img", "title": "" }, { "docid": "8474a959e8d264a37d095e0394b90b7b", "score": "0.6886924", "text": "def image_reader(filename):\n img = scipy.misc.imread(filename).astype(np.float)\n if len(img.shape) == 2:\n img = np.dstack((img, img, img))\n elif img.shape[2] == 4:\n img = img[:, :, :3]\n return img", "title": "" }, { "docid": "e88357f59235ce7631a8d74dd67b1008", "score": "0.68787694", "text": "def load_image(src_path):\n img = np.array(Image.open(src_path))\n if len(img.shape) == 3:\n img = np.dstack((np.zeros(img.shape[:2]+(1,)), img))\n img = img[:,:, ::-1]\n img = img.astype(np.uint8).view(np.uint32)\n img = img.reshape(img.shape[:2])\n return img.astype(np.uint32)\n else:\n return img.astype(np.uint8)", "title": "" }, { "docid": "ff05f0d64ea8e4bb30a51b25783c710a", "score": "0.6876938", "text": "def pic_to_numpy(temp_file):\n np_array = misc.imread(temp_file)\n return np_array", "title": "" }, { "docid": "67986e7bff68f4d4c1e0a54551127859", "score": "0.68033546", "text": "def tiff_loader(f):\n with open(f, 'rb') as f:\n image = Image.open(f)\n return np.array(image)", "title": "" }, { "docid": "64d6f1bffc333c276c334137ab8cca1c", "score": "0.6719803", "text": "def img2array(data_path, desired_size=None, expand=False, view=False):\r\n # Good Code !!!!!\r\n img = Image.open(data_path)\r\n img = img.convert('RGB')\r\n if desired_size:\r\n img = img.resize((desired_size[1], desired_size[0]))\r\n if view:\r\n img.show()\r\n x = np.asarray(img, dtype='float32')\r\n if expand:\r\n x = np.expand_dims(x, axis=0)\r\n x /= 255.0\r\n return x", "title": "" }, { "docid": "c7f5d510104d51179a203087c3543ba0", "score": "0.6714524", "text": "def image_reader(path):\n image_data = img.imread(path + \".png\",'png')\n \n return image_data", "title": "" }, { "docid": "57c6f446892c23d99faafe0002e63f12", "score": "0.6669547", "text": "def np_loader(filepath: str) -> np.array:\n return np.load(filepath)", "title": "" }, { "docid": "afb2ce4d2b13d37915977f9005eaa8c1", "score": "0.66443336", "text": "def imgRead(fileName):\n imgIn = plt.imread(fileName)\n return imgIn", "title": "" }, { "docid": "73a6e28e7925bb5c689bce845d64bbd6", "score": "0.66328", "text": "def _images(path):\n with gzip.open(path) as f:\n # First 16 bytes are magic_number, n_imgs, n_rows, n_cols\n pixels = np.frombuffer(f.read(), '>B', offset=16)\n return pixels.reshape(-1, 784).astype('float64')", "title": "" }, { "docid": "35753a9479d1377e8b8dc9113841d691", "score": "0.6592134", "text": "def tif_read(file_name):\n im = io.imread(file_name)\n im_array = np.zeros((im.shape[1],im.shape[2],im.shape[0]), dtype=im.dtype)\n for i in range(im.shape[0]):\n im_array[:,:,i] = im[i]\n return im_array", "title": "" }, { "docid": "6a82930fa750c6888a839c36d7d42c52", "score": "0.65896726", "text": "def read_image_into_array(file_name,input_cols,input_rows):\r\n\r\n input_image= open(file_name) #open image file\r\n input_image_array = np.fromfile(input_image, dtype = np.uint8, count = input_rows*input_cols) #image is read into array. \r\n input_image_array.shape = (input_image_array.size//input_cols,input_cols) #1D to 2D array\r\n original_image = input_image_array #copy the read image into new variable\r\n\r\n\r\n print(\"\\n \\n original image \\n\",original_image)\r\n return original_image", "title": "" }, { "docid": "fbd32575f300a534f90acb9181013e8a", "score": "0.65761304", "text": "def read_file(file_path):\n return cv2.imread(file_path)", "title": "" }, { "docid": "17c407911eca4b730ab4059ece330009", "score": "0.6572319", "text": "def read_image(image_path):\n return imageio.imread(image_path)", "title": "" }, { "docid": "56ae6efdc7b971ef1edb1ae753c728a8", "score": "0.656753", "text": "def load_array_img(path, is_int=False):\n\tif not isfile(path):\n\t\traise IOError('image array file \"{0:s}\" not found'.format(path))\n\timg = open(path)\n\tif 'dtype' not in img.info:\n\t\twarning('png metadata missing or corrupted, cannot determine original data type, using float64')\n\tdatatype = dtype(img.info.get('dtype', 'float64'))\n\tsz = datatype.itemsize\n\tif 'padding' not in img.info and sz < 4:\n\t\traise IOError('png metadata missing or corrupted, making assumptions about the shape of the data, this may lead to errors')\n\tpad_len = int(img.info.get('padding', 0))\n\tmat_width = img.size[0]\n\tmat_height = int((img.size[1] * 4 - pad_len / mat_width) / sz)\n\tmat = frombuffer(img.tobytes(), dtype=datatype, count=mat_width * mat_height)\n\tmat = mat.reshape((mat_width, mat_height))\n\treturn mat", "title": "" }, { "docid": "54a951cd6d31de96055ad26ded8ec7dc", "score": "0.656026", "text": "def image_file_to_array(filename, dim_input):\n\timage = misc.imread(filename)\n\timage = image.reshape([dim_input])\n\timage = image.astype(np.float32) / 255.0\n\timage = 1.0 - image\n\treturn image", "title": "" }, { "docid": "1fd791f34793c65fed9c8b444fac6c36", "score": "0.655456", "text": "def read_img(path):\n return cv2.imread(path)", "title": "" }, { "docid": "b6b95bc41367b4f90858cab02682c334", "score": "0.65485877", "text": "def load_img(path):\n img = image.load_img(path, target_size=(224, 224))\n arr = np.expand_dims(image.img_to_array(img), axis=0)\n arr = preprocess_input(arr)\n return arr", "title": "" }, { "docid": "1cf506a3f0f1859dbe4be47c2e76868c", "score": "0.6543414", "text": "def load_np_image(image_file):\n return np.float32(load_np_image_uint8(image_file) / 255.0)", "title": "" }, { "docid": "90f49dbf9bc42dd5146dd06a650f7d92", "score": "0.65397507", "text": "def __read_image__(self, image_path: str) -> np.ndarray:\n\n image = cv2.imread(image_path)\n\n image = self.__preprocess_image__(image=image)\n\n return image", "title": "" }, { "docid": "b8b0ddc8dd54b0165163ef931e4c9f51", "score": "0.6536804", "text": "def read_jpg(filename):\n width = int()\n height = int()\n img = list()\n\n return width,height,img", "title": "" }, { "docid": "d03c238242a14abd6679a66a2f575593", "score": "0.65366685", "text": "def loadImage(self, image_name):\n original_image_path = os.path.join(self.path_to_image_dir, image_name)\n image = Image.open(original_image_path)\n image_np = self.load_image_into_numpy_array(image)\n return image_np", "title": "" }, { "docid": "1bd963ece6a54439e7e5d225514e9be2", "score": "0.65332514", "text": "def read_file(path):\n data = cv2.imread(path, -1).astype('float32')\n result = np.empty(data.shape, dtype='float32')\n result[:,:,0] = (data[:,:,2] - 2**15) / 64\n result[:,:,1] = (data[:,:,1] - 2**15) / 64\n result[:,:,2] = data[:,:,0]\n\n return result", "title": "" }, { "docid": "5d258321298e6e34649ea48c8b9ab308", "score": "0.6506893", "text": "def read_image(filename, verbose=False):\n\n pil_image = Image.open(filename)\n if verbose:\n pil_image.show()\n\n np_image = np.array(pil_image, dtype=np.uint8)\n\n #some grayscale images was readed as 3D with three equal color channels, implicitly in this app we want grascale image as 2D\n if len(np_image.shape) == 3:\n if np_image.shape[2] == 3:\n if np.array_equiv(np_image[:,:,0],np_image[:,:,1]) and np.array_equiv(np_image[:,:,2],np_image[:,:,1]):\n np_image = np_image[:,:,0]\n np_image = grayTo2D(np_image)\n else:\n #we must convert images which is read as 2 dimensional grayscale images\n pil_image = Image.open(filename).convert('L')\n np_image = np.array(pil_image, dtype=np.uint8)\n\n np_image.setflags(write=1)\n \n return np_image", "title": "" }, { "docid": "46473b3de34ab6b5b2ddef63ac208982", "score": "0.65033466", "text": "def data_import(file_path):\n\n # Import image\n image = cv2.imread(file_path, 1)\n\n return image", "title": "" }, { "docid": "d91e0905ceb3376c145441b906907cc3", "score": "0.6492261", "text": "def image_file(self, filename):\n img = cv2.imread(filename, 0)\n if type(img) == np.ndarray:\n return self.image(img)\n else:\n return None", "title": "" }, { "docid": "713a321d7a0210e13d138d34de404678", "score": "0.64895076", "text": "def parse_dataset(filepath):\n\n # open the dataset\n with open(filepath, \"rb\") as dataset:\n # read the magic number and the number of images\n magic_number, number_of_images = struct.unpack(\">II\", dataset.read(8))\n # read the number of rows and number of columns per image\n rows, columns = struct.unpack(\">II\", dataset.read(8))\n # now read the rest of the file using numpy.fromfile()\n images = np.fromfile(dataset, dtype=np.dtype(np.uint8).newbyteorder(\">\"))\n # reshape so that the final shape is (number_of_images, rows, columns)\n images = images.reshape((number_of_images, rows, columns))\n\n # return the images\n return images", "title": "" }, { "docid": "a1946cd64806a9e2b064671235ee8d0f", "score": "0.6488699", "text": "def read_image(filename):\n image_array_rgb = misc.imread(filename, mode='RGB')\n # image_array_grey = misc.imread(filename, flatten=True, mode='F')\n image_array_grey = color.rgb2grey(image_array_rgb)*255\n image_array_luv = color.rgb2luv(image_array_rgb)\n return image_array_rgb, image_array_grey, image_array_luv", "title": "" }, { "docid": "b88ca739c86e327e16354299490b5cc0", "score": "0.64793986", "text": "def read_image_file(filename):\n if not os.path.isfile(filename):\n return None\n data = cv2.imread(filename)\n if data is not None and len(data.shape) >= 3 and data.shape[2] > 1:\n return data[:, :, ::-1] # fix the channel order to RGB if we have a colour image\n return data", "title": "" }, { "docid": "821a6fd15f9389697e0cccb9f53d1670", "score": "0.64774567", "text": "def _read_image(filename):\n bgr = cv2.imread(filename)\n rgb = bgr[:, :, ::-1]\n return rgb", "title": "" }, { "docid": "20e98e741ddb35065b3bab0b01839a1b", "score": "0.646708", "text": "def read_image(path):\r\n image = cv2.imread(path)\r\n\r\n return image", "title": "" }, { "docid": "6c7348e72154ee40026ea25d8e6708b2", "score": "0.6455969", "text": "def read_image(path: str, opencv_color_flag: int) -> np.ndarray:\n my_image = cv2.imread(filename = path, flags = opencv_color_flag)\n return my_image", "title": "" }, { "docid": "68db2173a93170c7c741de840fc28389", "score": "0.64479125", "text": "def hdr_read(filename: str) -> np.ndarray:\r\n data = cv2.imread(filename, cv2.IMREAD_ANYDEPTH)\r\n assert data is not None, \"File {0} not exist\".format(filename)\r\n assert len(data.shape) == 3 and data.shape[2] == 3, \"Input should be a 3-channel color hdr image\"\r\n return data", "title": "" }, { "docid": "31ef93c0980d0038313edafa942ac686", "score": "0.6441795", "text": "def load_image(self, path):\n with open(path, \"rb\") as f:\n return self.load_image_fobj(f)", "title": "" }, { "docid": "9195e106790ffc11a2b443cb43e9e819", "score": "0.64403856", "text": "def image_reader_as_array(file_name):\n raster = gdal.Open(file_name)\n band_num = raster.RasterCount\n band = raster.GetRasterBand(1)\n rows, columns = (band.XSize, band.YSize)\n\n np_array = np.empty([columns, rows, band_num], dtype=np.float32)\n\n for i in range(0, band_num):\n band = raster.GetRasterBand(i + 1)\n arr = band.ReadAsArray()\n np_array[:, :, i] = arr\n\n return np_array", "title": "" }, { "docid": "3f7f0396b6180220d7539c3b577878a0", "score": "0.6438198", "text": "def read_image(path, dtype=np.float32, color=True):\n\n f = Image.open(path)\n try:\n if color:\n img = f.convert('RGB')\n else:\n img = f.convert('P')\n img = np.asarray(img, dtype=dtype)\n finally:\n if hasattr(f, 'close'):\n f.close()\n\n if img.ndim == 2:\n # reshape (H, W) -> (1, H, W)\n return img[np.newaxis]\n else:\n # transpose (H, W, C) -> (C, H, W)\n return img.transpose((2, 0, 1))", "title": "" }, { "docid": "50f3b1d0c71daadcf1c7c09502e53714", "score": "0.64193684", "text": "def LoadImage(filename):\n #img is loaded in bgr colorspace\n return cv2.imread(filename)", "title": "" }, { "docid": "6aa99da53f7864a4e2478c8432e55b54", "score": "0.64178014", "text": "def read_image(file, image_width, image_height):\n image = bytearray(file.read(image_width * image_height))\n image = np.array(image)\n if image.size == image_width * image_height:\n image = np.reshape(image, (image_height, image_width))\n else:\n image = np.array([])\n\n return image", "title": "" }, { "docid": "771f7ed3f076c5a824656d0fdf57f479", "score": "0.6408233", "text": "def image_to_np_array(img_filename: str, float_cols: bool = True) -> np.ndarray:\n img = Image.open(img_filename)\n img.load()\n if float_cols:\n data = np.asarray(img, dtype=\"float32\") / 255.0\n else:\n data = np.asarray(img, dtype=\"uint8\")\n return data", "title": "" }, { "docid": "6e8492e37cd71761323252371bed9e7b", "score": "0.63990724", "text": "def jpg2np(jpg):\n import cv2\n jpgimg = jpg[~np.isnan(jpg)].astype(np.uint8)\n return cv2.imdecode(jpgimg,cv2.IMREAD_COLOR)", "title": "" }, { "docid": "4df05b29b114d865aa0ba2e65b099a9a", "score": "0.6372704", "text": "def load_image(infilename):\n return mpimg.imread(infilename)", "title": "" }, { "docid": "92d3d7b55b3938dca7362479417d4062", "score": "0.6371372", "text": "def load_image_into_numpy_array(self, img):\n\n (im_height, im_width, im_chan) = img.shape\n\n return (\n np.array(img.data)\n .reshape((im_height, im_width, 3))\n .astype(np.uint8)\n )", "title": "" }, { "docid": "47281666ed153195fe492787d1491564", "score": "0.6367422", "text": "def PIL_image_to_array(image):\n a = numpy.fromstring(image.tostring(),'b')\n a.shape = image.im.size[1], image.im.size[0]\n\n return a", "title": "" }, { "docid": "5f40584d46f0a5c5aa25c2121b0db001", "score": "0.6324254", "text": "def process_image_from_observation(observation):\n\n # Decode image bytes into a numpy array.\n pil_image = Image.open(BytesIO(observation.image_bytes)).convert('RGB')\n\n image_arr = np.array(pil_image)\n\n return image_arr", "title": "" }, { "docid": "c7d572c588323cca90c477225a9cd29d", "score": "0.63112205", "text": "def load_image(path_to_image):\n img_array = imread(path_to_image)\n img_array_as_rgb = rgba2rgb(img_array).astype('float')\n\n return img_array_as_rgb", "title": "" }, { "docid": "93fdcb3ca68886a810ea035d0d6380fb", "score": "0.6299629", "text": "def import_mgh(filename):\n mgh_file=nb.load(filename)\n mmap_data=mgh_file.get_data()\n array_data=np.ndarray.flatten(mmap_data)\n return array_data;", "title": "" }, { "docid": "9a0905ec1c10fb3f040f61daab220a6a", "score": "0.62966007", "text": "def load_from_numpy(filename):\n with tf.io.gfile.Open(filename, \"rb\") as fp:\n return np.load(fp)", "title": "" }, { "docid": "20e8d927def01d2a68eba2f3e842e90e", "score": "0.6292146", "text": "def imReadAndConvert(filename: str, representation: int) -> np.ndarray:\r\n img=cv2.imread(filename)\r\n if(representation==LOAD_GRAY_SCALE):\r\n img=cv2.cvtColor(img,cv2.COLOR_BGR2GRAY)\r\n else:\r\n img = cv2.cvtColor(img,cv2.COLOR_BGR2RGB)\r\n # normalized the mat\r\n img = img * (1 / 255)\r\n # mat in float\r\n img=np.array(img,dtype=float)\r\n return img", "title": "" }, { "docid": "6ad8826e9e814705d6941426a6fff51e", "score": "0.6289625", "text": "def read_data(self,filename):\n data = np.genfromtxt(self.image_dir+filename,delimiter=',')\n data = data.reshape(data.shape[0],data.shape[1],1)\n\n return data", "title": "" }, { "docid": "894eb5c1bf764b11ab507e398421e6bb", "score": "0.6288511", "text": "def load(url):\n #response = requests.get(url)\n #pil_image = Image.open(BytesIO(response.content)).convert(\"RGB\")\n pil_image = Image.open(url).convert(\"RGB\")\n # convert to BGR format\n image = np.array(pil_image)[:, :, [2, 1, 0]]\n return image", "title": "" }, { "docid": "9a83e8030df41f7297984591ded8b272", "score": "0.628804", "text": "def load_lab_image(filename):\n path = str(IMAGE_DIRECTORY / filename)\n return np.asarray(Image.open(path).convert('L'))", "title": "" }, { "docid": "0370dbda5ba697b8457d8fa525ea268e", "score": "0.6286716", "text": "def _read_images(filename):\n assert(os.path.isfile(filename))\n\n print('Reading images from %s.' % filename)\n with gzip.open(filename, 'rb') as f:\n # Skip magic number\n f.read(4)\n # Get number of images in this file.\n num = int.from_bytes(f.read(4), byteorder='big')\n print('Number of images in current file: %d' % num)\n # Get number of rows and columns.\n rows = int.from_bytes(f.read(4), byteorder='big')\n cols = int.from_bytes(f.read(4), byteorder='big')\n\n # The rest of the file consists of pure image data, each pixel\n # value encoded as a byte.\n num_rem_bytes = num * rows * cols\n images = np.array(struct.unpack('%dB' % num_rem_bytes,\n f.read(num_rem_bytes)))\n\n images = np.reshape(images, (-1, rows * cols))\n\n return images", "title": "" }, { "docid": "9f217d1fd000ab523a7c9f06470cc063", "score": "0.62844366", "text": "def load_image(image):\n image = io.imread(image)\n image = resize(image,(512,512))\n image = np.array(image)\n image = image[:,:,0]\n print('Image shape:',image.shape)\n return image", "title": "" }, { "docid": "6beceab71a88a65d5e03c3d40888bef6", "score": "0.62801206", "text": "def load_image(image_path):\n return skimage.io.imread(image_path)", "title": "" }, { "docid": "0a5cfaaa4a1a4b7babcece81acb5d06c", "score": "0.6266097", "text": "def load_sample(self, path):\n if not os.path.isfile(path):\n raise NotADirectoryError(\"Sample not found at {}\".format(path))\n return np.array(Image.open(path).convert(self.colormode.value).resize((self.img_size, self.img_size)))", "title": "" }, { "docid": "49097913d6cfa62e7c517e73f0b1cebc", "score": "0.62592906", "text": "def read_test(filepath):\n f = open(filepath)\n X = []\n for line in f:\n data = line.strip()\n img = Image.open(data).convert(\"L\")\n img.thumbnail((64, 64), Image.ANTIALIAS)\n img = np.array(img)\n X.append(img.flatten())\n X = np.array(X)\n return X", "title": "" }, { "docid": "87cb27f4f927713c3c2c9b39842ec748", "score": "0.62525284", "text": "def get_data(PATH):\n X = []\n for img in glob.glob(PATH+\"/*\"):\n x = np.array(Image.open(img).convert('RGB'))\n X.append(x)\n return np.array(X)", "title": "" }, { "docid": "311cae0d718b1bb5ee1118813579d2c9", "score": "0.62461364", "text": "def from_file(cls, filepath: str):\n\n def binary_to_data(binary: bytes, image_format: int, width: int, height: int) -> List[List[Any]]:\n # Convert bytes to binary string\n binary_str = \"\"\n for byte in binary:\n binary_str += \"{0:08b}\".format(byte)\n\n result = [[0 for _ in range(width)] for _ in range(height)] # Creates array with specified dimensions\n if image_format == cls.Format_BW:\n # B & W, each pixel is a single bit of 1 or 0\n i = 0\n for row in range(len(result)):\n for col in range(len(result[0])):\n result[row][col] = int(binary_str[i])\n i += 1\n return result\n\n # Check that file exists\n if not os.path.isfile(filepath):\n raise ValueError(filepath + \" doesn't exist.\")\n\n file = open(filepath, 'rb')\n\n # Check signature\n signature = file.read(4)\n if signature != cls._FILE_SIGNATURE:\n raise ValueError(filepath + \" doesn't seem to point to a limg file.\")\n\n # Read dimensions\n width = int.from_bytes(file.read(2), \"big\")\n height = int.from_bytes(file.read(2), \"big\")\n\n # Read image format\n image_format = int.from_bytes(file.read(1), \"big\")\n\n # Read data\n data = file.read()\n file.close()\n\n # Convert data from binary to pixel array\n data = binary_to_data(data, image_format, width, height)\n return Image(data, image_format)", "title": "" }, { "docid": "e2c4e289b34a9e39412afbfbc319d974", "score": "0.6216517", "text": "def load_image(image_path):\n return cv2.imread(image_path)", "title": "" }, { "docid": "fec2681bd0b3da7e786326926a030100", "score": "0.6212545", "text": "def load_jpeg_registry(jpeg_registry_path):\n with open(jpeg_registry_path) as f:\n return json.load(f)", "title": "" }, { "docid": "6d262e6987811540d1fb5eafc7152830", "score": "0.620837", "text": "def get_image(size):\n img_filename = _resource_loader.get_path_to_datafile(\n \"testdata/grace_hopper.jpg\")\n img = image.load_img(img_filename, target_size=(size, size))\n img_array = image.img_to_array(img)\n img_array = np.expand_dims(img_array, axis=0)\n return img_array", "title": "" }, { "docid": "975decf523851c9762499f35a58ea9d8", "score": "0.6198865", "text": "def load(url):\n #response = requests.get(url)\n #pil_image = Image.open(BytesIO(response.content)).convert(\"RGB\")\n pil_image = Image.open(BytesIO(url)).convert(\"RGB\")\n # convert to BGR format\n image = np.array(pil_image)[:, :, [2, 1, 0]]\n return image", "title": "" } ]
28746434125195c7f3b81818344debe7
change the image for better generalization Add rotation Add translation
[ { "docid": "53e7a6f6e35b21cffcef47162f10178b", "score": "0.0", "text": "def data_oversampling_pipe(image, random_factor=False):\n if random_factor:\n flip = 1 if random.randint(0, 1) == 1 else -1\n r = random.random() * flip\n else:\n r = 1\n # rotation\n img = rotate_image(image, 10 * r)\n\n # Translation\n img = translate_image(img, 3 * r, 6 * r)\n\n # we dont flip because the signs also flip make it harder to learn letters and numbers on signs\n # flip image horizontal\n # if random_factor and (flip == -1):\n # img = cv2.flip(image, flipCode=0)\n\n return img", "title": "" } ]
[ { "docid": "61b489a5f85b9c63ab37d3f7fd126d12", "score": "0.7310402", "text": "def update_rotation_img(self, context):\n \n cobj = get_object(context, self.lightname)\n world = context.scene.world\n mapping = world.node_tree.nodes['Mapping']\n mapping2 = world.node_tree.nodes['Mapping.001']\n if cobj.Lumiere.rotation_lock_img:\n mapping.rotation[2] += -(mapping2.rotation[2] - -math.radians(cobj.Lumiere.img_rotation))\n mapping2.rotation[2] = -math.radians(cobj.Lumiere.img_rotation)", "title": "" }, { "docid": "81b4f02078b106c0733267ebbf0e308a", "score": "0.7154416", "text": "def update(self):# this may work idk yet\n\n self.image_dom = rot_center(self.image, self.angle)", "title": "" }, { "docid": "48e19272dc7467feb989ff76e99d180a", "score": "0.71493363", "text": "def rotate_image(self, direction):\n logg = getMyLogger(f\"c.{__name__}.__init__\", \"INFO\")\n\n # self.image = rotate(self.orig_image, 360 - direction)\n self.image = rotate(self.orig_image, direction)\n self.rect = self.image.get_rect(center=(self.cx, self.cy))\n\n logg.debug(f\"lefttop {self.rect.topleft} rightbottom {self.rect.bottomright}\")\n logg.debug(f\"width {self.rect.width} height {self.rect.height}\")", "title": "" }, { "docid": "4d421b50529ff07188e04ef49d8be720", "score": "0.7141685", "text": "def rot(self, angle):\r\n cent = self.pos\r\n self.image = pygame.transform.rotate(self.image_clean, angle)\r\n self.rect = self.image.get_rect(center=cent)", "title": "" }, { "docid": "e3bc44619e856ca7d84e877dbe7d00a5", "score": "0.71191597", "text": "def rotate(self):\n # `rotozoom` usually looks nicer than `rotate`. Pygame's rotation\n # functions return new images and don't modify the originals.\n self.image = pg.transform.rotozoom(self.orig_image, self.angle, 1)\n # Create a new rect with the center of the old rect.\n self.rect = self.image.get_rect(center=self.rect.center)", "title": "" }, { "docid": "164e1822b3e3b376370008b6a041cdbb", "score": "0.7042807", "text": "def __rotate__(self,image : np.ndarray, \n angle : np.float, \n center : np.float =None, \n scale :np.float =1.0) -> np.ndarray:\n \n # grab the dimensions of the image\n (h, w) = image.shape[:2];\n \n # if the center is None, initialize it as the center of\n # the image\n if center is None:\n center = (w // 2, h // 2);\n \n # perform the rotation\n M = cv2.getRotationMatrix2D(center, angle, scale);\n rotated = cv2.warpAffine(image, M, (w, h));\n \n # return the rotated image\n return (rotated)", "title": "" }, { "docid": "9c5d7b0174a59ce9cb0c1638c21d532f", "score": "0.7011833", "text": "def translateImage(path,output):\n img = Image.open(path)\n xyTranslate = np.random.uniform(-10,10,2)\n a = 1; b = 0;\n c = xyTranslate[0] #left/right\n d = 0; e = 1;\n f = xyTranslate[1] #up/down\n img = img.transform(img.size, Image.AFFINE, (a, b, c, d, e, f))\n img.save(output)\n print(xyTranslate)", "title": "" }, { "docid": "cc4013c46e818c77d6ab34598885cbb7", "score": "0.69508266", "text": "def rotate_image(self, image, rotation):\n return pygame.transform.rotate(image, rotation)", "title": "" }, { "docid": "0606aad190d2418b881d00fa21ba38f0", "score": "0.69327295", "text": "def _rotate_image(self, angle):\n orig_rect = self._image.get_rect()\n rot_image = pygame.transform.rotate(self._image, angle)\n rot_rect = orig_rect.copy()\n rot_rect.center = rot_image.get_rect().center\n rot_image = rot_image.subsurface(rot_rect).copy()\n return rot_image", "title": "" }, { "docid": "9f2d27825000fe267f9f2c0c9ed37975", "score": "0.6926178", "text": "def set_angle(self, degree):\n self.angle = degree\n oldcenter = self.rect.center\n self.image = pygame.transform.rotate(self.image0, self.angle)\n self.image.convert_alpha()\n self.rect = self.image.get_rect()\n self.rect.center = oldcenter\n #print(\"rotated to !\")", "title": "" }, { "docid": "36efa4a8261699a3b2c19d26bdb1e37a", "score": "0.6921291", "text": "def rotate(self, new_dir: float):\r\n self.img = rotated(self.org_img, new_dir)\r\n self.direction = new_dir", "title": "" }, { "docid": "9578890bc0306dd84ab0fc85ab3816d1", "score": "0.6888735", "text": "def rot_center(self, image, angle):\n\n loc = image.get_rect().center #rot_image is not defined \n rot_sprite = pygame.transform.rotate(image, angle)\n rot_sprite.get_rect().center = loc\n return rot_sprite", "title": "" }, { "docid": "6fe7917cf54182c4d69a571f113bec4a", "score": "0.6822434", "text": "def rotate(self):\n # Rotate the image.\n if self.active == True:\n random_angle = self.angle\n else:\n random_angle = self.angle + random.random() * 9 * (3720 - self.seconds) / 3720\n self.image = pygame.transform.rotozoom(self.orig_image, -random_angle, 1)\n # Rotate the offset vector.\n offset_rotated = self.offset.rotate(random_angle)\n # Create a new rect with the center of the sprite + the offset.\n self.rect = self.image.get_rect(center=self.pos+offset_rotated)", "title": "" }, { "docid": "2fc3f9b1520f597b03e04c96669158e8", "score": "0.67809975", "text": "def myRotate(image, rect, angle):\n # Rotate the original image without modifying it.\n new_image = pg.transform.rotozoom(image, angle,1)\n # Get a new rect with the center of the old rect.\n rect = new_image.get_rect(center=rect.center)\n return new_image, rect", "title": "" }, { "docid": "1ef86f2df6203d82dd57e4d7bc57bbf7", "score": "0.6730643", "text": "def rot_center(image, angle):\n\n loc = image.get_rect().center #rot_image is not defined \n rot_sprite = pygame.transform.rotate(image, angle)\n rot_sprite.get_rect().center = loc\n return rot_sprite", "title": "" }, { "docid": "73774881aa92780bad457c9d84ac1c11", "score": "0.67134476", "text": "def SetImageOrientation(self, , ):\n ...", "title": "" }, { "docid": "92589805a9067d04a6c390d24c207a1c", "score": "0.6690327", "text": "def transform(self, image, mode):", "title": "" }, { "docid": "53318af104cf0271331ac3acbfa85fd4", "score": "0.6678217", "text": "def _spin(self):\n center = self.rect.center\n self.rotation_angle += 12\n if self.rotation_angle >= 360:\n self.rotation_angle = 0\n self.image = self.original_image\n else:\n self.image = pygame.transform.rotate(self.original_image, self.rotation_angle)\n self.rect = self.image.get_rect(center=center)", "title": "" }, { "docid": "5d807d11532d083b7d033491ab018483", "score": "0.6655987", "text": "def rotated(img, new_dir):\r\n return new_dir_func[new_dir](img)", "title": "" }, { "docid": "b82ba770fb117d475ab884b2bf00dbc8", "score": "0.66183615", "text": "def modimage():", "title": "" }, { "docid": "fd0c32e5a803411f4d998e51d075f6ff", "score": "0.6591307", "text": "def rotateImage(path,output):\n im = Image.open(path)\n rotation = np.random.uniform(-10,10)\n im = im.rotate(rotation)\n im.save(output)", "title": "" }, { "docid": "5f612dd3aa4ee8875e2b04050670bfcc", "score": "0.6581421", "text": "def rotate(self, filename):\n self.imgLast = self.imgObj.copy()\n self.imgFilename = \"./images/testPic.tiff\"\n rotate = self.imgObj.rotate(90)\n rotate.save(self.imgFilename)\n self.displayImage()", "title": "" }, { "docid": "c92dc055dfb9b7d4975b71e92b89fc15", "score": "0.65759426", "text": "def transform(self, rot, tran): # -> None:\n ...", "title": "" }, { "docid": "c92dc055dfb9b7d4975b71e92b89fc15", "score": "0.65759426", "text": "def transform(self, rot, tran): # -> None:\n ...", "title": "" }, { "docid": "3a55ea09c570a8a0ed65ced602bcb6ed", "score": "0.6547076", "text": "def rot_center(self, angle):\r\n self.image = pygame.transform.rotate(self.image_clean, angle)\r\n self.rect = self.image.get_rect(center=self.rect.center)", "title": "" }, { "docid": "4d6d52ca61abe9c947b01a0bdc268ed9", "score": "0.65281606", "text": "def rot_center(self, image, angle):\n orig_rect = image.get_rect()\n rot_image = pygame.transform.rotate(image, angle)\n rot_rect = orig_rect.copy()\n rot_rect.center = rot_image.get_rect().center\n rot_image = rot_image.subsurface(rot_rect).copy()\n return rot_image", "title": "" }, { "docid": "f1220301a8875c4c86da842ee8fad233", "score": "0.64462095", "text": "def _rotate(self, im, meta):\n if self.request.kwargs.get(\"exifrotate\", None) == 2:\n try:\n ori = meta[\"EXIF_MAIN\"][\"Orientation\"]\n except KeyError: # pragma: no cover\n pass # Orientation not available\n else: # pragma: no cover - we cannot touch all cases\n # www.impulseadventure.com/photo/exif-orientation.html\n if ori in [1, 2]:\n pass\n if ori in [3, 4]:\n im = np.rot90(im, 2)\n if ori in [5, 6]:\n im = np.rot90(im, 3)\n if ori in [7, 8]:\n im = np.rot90(im)\n if ori in [2, 4, 5, 7]: # Flipped cases (rare)\n im = np.fliplr(im)\n return im", "title": "" }, { "docid": "f1220301a8875c4c86da842ee8fad233", "score": "0.64462095", "text": "def _rotate(self, im, meta):\n if self.request.kwargs.get(\"exifrotate\", None) == 2:\n try:\n ori = meta[\"EXIF_MAIN\"][\"Orientation\"]\n except KeyError: # pragma: no cover\n pass # Orientation not available\n else: # pragma: no cover - we cannot touch all cases\n # www.impulseadventure.com/photo/exif-orientation.html\n if ori in [1, 2]:\n pass\n if ori in [3, 4]:\n im = np.rot90(im, 2)\n if ori in [5, 6]:\n im = np.rot90(im, 3)\n if ori in [7, 8]:\n im = np.rot90(im)\n if ori in [2, 4, 5, 7]: # Flipped cases (rare)\n im = np.fliplr(im)\n return im", "title": "" }, { "docid": "d469efbe104644a8b714b2a23269a158", "score": "0.643348", "text": "def rot_center(self, image, rect, angle):\n self.rot_image = pygame.transform.rotate(image, angle)\n self.rot_rect = self.rot_image.get_rect(center=rect.center)\n return self.rot_image, self.rot_rect", "title": "" }, { "docid": "d469efbe104644a8b714b2a23269a158", "score": "0.643348", "text": "def rot_center(self, image, rect, angle):\n self.rot_image = pygame.transform.rotate(image, angle)\n self.rot_rect = self.rot_image.get_rect(center=rect.center)\n return self.rot_image, self.rot_rect", "title": "" }, { "docid": "98b699441cff8533063e86857eb3243c", "score": "0.6417123", "text": "def rotate():\r\n\tglobal image\r\n\r\n\t# make sure that an image is uploaded\r\n\tif type(image) == type(False):\r\n\t\treturn\r\n\t\r\n\t# rotate \r\n\timage = cv2.rotate(image, cv2.cv2.ROTATE_90_CLOCKWISE)\r\n\tupdate()", "title": "" }, { "docid": "fd70c17ee89cbeeff52c3c5e9437dce0", "score": "0.6416791", "text": "def rotate(self, by_degree):\n self.angle += by_degree\n oldcenter = self.rect.center\n self.image = pygame.transform.rotate(self.image0, self.angle)\n self.image.convert_alpha()\n self.rect = self.image.get_rect()\n self.rect.center = oldcenter\n #print(\"rotated by !\")", "title": "" }, { "docid": "8dff1bc457299eee16ff64de62245ea1", "score": "0.6416645", "text": "def translateDemo(img):\n cv2.imshow(\"Original\", img)\n (hgt, wid, dep) = img.shape\n for (xDist, yDist) in [(50, 50), (50, 200), (-10, 50), (-50, -50)]:\n transMatrix = np.float32([[1, 0, xDist], [0, 1, yDist]])\n newIm = cv2.warpAffine(img, transMatrix, (2*wid, 2*hgt))\n cv2.imshow(\"Translated\", newIm)\n cv2.waitKey()\n\n newIm = img.copy()\n for (xDist, yDist) in [(50, 50), (50, 200), (-10, 50), (-50, -50)]:\n transMatrix = np.float32([[1, 0, xDist], [0, 1, yDist]])\n newIm = cv2.warpAffine(newIm, transMatrix, (wid, hgt))\n cv2.imshow(\"Translated\", newIm)\n cv2.waitKey()", "title": "" }, { "docid": "791dbe6555a3ef50b42c50df6d9f54cc", "score": "0.64146084", "text": "def rotate(self, image):\n image = image.reshape([HEIGHT, WIDTH])\n image = np.fliplr(image)\n image = np.rot90(image)\n return image", "title": "" }, { "docid": "7a294d81dae12b30b5553788c268b508", "score": "0.6412585", "text": "def rotate(img):\r\n try:\r\n print(\"1.90\\n2.180\\n3.270\\n4.definir num grados\")\r\n z=input(\"Elija una opcion:\")\r\n if z == \"1\":\r\n rotpic= img.transpose(Image.ROTATE_90)\r\n elif z == \"2\":\r\n rotpic= img.transpose(Image.ROTATE_180)\r\n elif z == \"3\":\r\n rotpic= img.transpose(Image.ROTATE_270)\r\n elif z==\"4\":\r\n grad=input(\"Cuantos grados deseas rotar la imagen:\")\r\n rotpic=img.rotate(int(grad))\r\n else:\r\n print(\"Esa no vale\")\r\n rotpic.show()\r\n yn=input(\"Quieres guardar(s/n):\")\r\n if yn==\"y\":\r\n quality_val=90\r\n name=input(\"Dime el nombre con extension:\")\r\n rotpic.save(name,quality=quality_val)\r\n elif yn==\"n\":\r\n print(\"Imagen no guardada\")\r\n except:\r\n print(\"No se pudo realizar la operacion\")", "title": "" }, { "docid": "ec6bd501e2977b3d997e6af6cb915f0e", "score": "0.6410439", "text": "def _apply_rotate(self):\r\n raise NotImplementedError", "title": "" }, { "docid": "7a6acce2fa236a4be51c91f92c117876", "score": "0.6397396", "text": "def rot_im(img,phi,x0,y0):\r\n xc,yc = img.shape\r\n xc *= 0.5\r\n yc *= 0.5\r\n to_shiftx = xc - x0\r\n to_shifty = yc - y0\r\n #shift to center\r\n shifted = shift(img, (to_shiftx,to_shifty))\r\n #rotate around center\r\n rot_shifted = rotate(shifted,phi*180/np.pi, reshape = False)\r\n #shift back\r\n final = shift(rot_shifted,(-to_shiftx,-to_shifty))\r\n return final", "title": "" }, { "docid": "1adfeae8e2d6a53b695859eb88c75ff8", "score": "0.639595", "text": "def transform(self, sens):\n # /\\/\\/\\/\\/\\/\\/\\/\\/\\/\\/\\/\\/\\/\\/\\/\\/\\/\\/\\/\\/\\/\\/\\/\\/\\/\\/\\/\\/\\/\\/\\/\\/\\/\\/\\/\\\n if sens == \"auto\":\n if 'Exif.Image.Orientation' in self.__info.exifKeys():\n exifSens = int(self.__info['Exif.Image.Orientation'])\n if exifSens not in autoTrans.keys():\n exifSens = 1\n sens = autoTrans[exifSens][0]\n else:\n sens = autoTrans[1][0]\n if sens == \"rotate90\":\n jpegtranOpt = [\"-rotate\", \"90\"]\n exiftranOpt = \"-9\"\n elif sens == \"rotate180\":\n jpegtranOpt = [\"-rotate\", \"180\"]\n exiftranOpt = \"-1\"\n elif sens == \"rotate270\":\n jpegtranOpt = [\"-rotate\", \"270\"]\n exiftranOpt = \"-2\"\n elif sens == \"flipHorizontal\":\n jpegtranOpt = [\"-flip\", \"horizontal\"]\n exiftranOpt = \"-F\"\n elif sens == \"flipVertical\":\n jpegtranOpt = [\"-flip\", \"vertical\"]\n exiftranOpt = \"-f\"\n elif sens == \"transpose\":\n jpegtranOpt = [\"-transpose\"]\n exiftranOpt = \"-t\"\n elif sens == \"transverse\":\n jpegtranOpt = [\"-transverse\"]\n exiftranOpt = \"-T\"\n\n if not(sens == \"none\"):\n if _Command.isWin:\n _Command._run([_Command._jpegtran] + jpegtranOpt +\n ['-copy', 'all', self.__file, self.__file])\n # rebuild the exif thumb and reset the orientation tag,\n # because jpegtran doesn't do it on windows\n self.rebuildExifTB()\n self.__info['Exif.Image.Orientation'] = 1\n self.__maj()\n else:\n # exiftran rotate internal exif thumb\n _Command._run([_Command._exiftran, exiftranOpt,\n '-i', self.__file])\n\n self.__refresh()", "title": "" }, { "docid": "381af0bab80677efb16c34c722799cbf", "score": "0.6394181", "text": "def set_angle(self, degree):\n self.angle = degree\n oldcenter = self.rect.center\n self.image = pygame.transform.rotate(self.image0, self.angle)\n self.image.convert_alpha()\n self.rect = self.image.get_rect()\n self.rect.center = oldcenter", "title": "" }, { "docid": "47829e829b796832c28bea580ee307ec", "score": "0.63731086", "text": "def rotate_image(img, angle):\n\n # Use SciPy to rotate the image\n img_rot = rotate(img, angle)\n\n # Return the image\n return img_rot", "title": "" }, { "docid": "177203b7c461f6985982a1d3fa149b72", "score": "0.6370935", "text": "def update_rotation(scModel):\n rotateOps = ['OutputPng', 'OutputTif']\n projectDir = scModel.getGraph().dir\n for edge in scModel.getGraph().get_edges():\n currentLink = scModel.getGraph().get_edge(edge[0], edge[1])\n if currentLink['op'] in rotateOps:\n if 'arguments' not in currentLink:\n currentLink['arguments'] = {}\n if 'Image Rotated' in currentLink['arguments']:\n continue\n change = edge['shape change'] if 'shape change' in edge else None\n if change and change != '(0,0)':\n currentLink['arguments']['Image Rotated'] = 'yes'\n elif change and change == '(0,0)':\n currentLink['arguments']['Image Rotated'] = 'no'\n else:\n startFile = scModel.getGraph().get_node(edge[0])['file']\n endFile = scModel.getGraph().get_node(edge[1])['file']\n im1 = Image.open(os.path.join(projectDir, startFile))\n im2 = Image.open(os.path.join(projectDir, endFile))\n if im1.size != im2.size:\n currentLink['arguments']['Image Rotated'] = 'yes'\n else:\n currentLink['arguments']['Image Rotated'] = 'no'", "title": "" }, { "docid": "689f07d3b5548974880412259da994b1", "score": "0.6351308", "text": "def rot_center(image, angle):\n orig_rect = image.get_rect()\n rot_image = pygame.transform.rotate(image, angle)\n rot_rect = orig_rect.copy()\n rot_rect.center = rot_image.get_rect().center\n rot_image = rot_image.subsurface(rot_rect).copy()\n return rot_image", "title": "" }, { "docid": "689f07d3b5548974880412259da994b1", "score": "0.6351308", "text": "def rot_center(image, angle):\n orig_rect = image.get_rect()\n rot_image = pygame.transform.rotate(image, angle)\n rot_rect = orig_rect.copy()\n rot_rect.center = rot_image.get_rect().center\n rot_image = rot_image.subsurface(rot_rect).copy()\n return rot_image", "title": "" }, { "docid": "3f210e22c70f436b27027760d63f9eda", "score": "0.6350024", "text": "def rotateImage(img, center, angle):\n if angle == 0:\n return img, center, None\n\n # M = cv2.getRotationMatrix2D(tuple(center), angle, 1)\n # size = max(img.shape[0], img.shape[1])\n # used for expanding the rotated image\n # im_max_shape = max(img.shape[1], img.shape[0])\n # print(\"max image shape: {}\".format(im_max_shape))\n # im_center = (im_max_shape/2, im_max_shape/2)\n # translation = np.array(im_center) - np.array([img.shape[1]/2, img.shape[0]/2])\n # print(translation)\n # T = np.identity(3)\n # # T[0:1,2] = translation\n # T[0,2] = translation[0]\n # T[1,2] = translation[1]\n # M2 = np.identity(3)\n # print(\"M: {}\".format(M))\n # M2[0:2,:] = M\n # print(\"M2: {}\".format(M2))\n # M3 = np.dot(T, M2)\n # print(\"M3: {}\".format(M3))\n # M1 = M3[0:2,:]\n # print(\"M1: {}\".format(M1))\n\n # if img_type == \"PILATUS\":\n # img = img.astype('float32')\n # if mask_thres == -999:\n # mask_thres = getMaskThreshold(img, img_type)\n # mask = np.zeros((img.shape[0], img.shape[1]), dtype=np.uint8)\n # mask[img <= mask_thres] = 255\n # rotated_img, center, rotMat = rotateNonSquareImage(img, angle, center)\n # rotated_mask, _, _ = rotateNonSquareImage(mask, angle, center)\n # rotated_mask[rotated_mask > 0.] = 255\n # rotated_img[rotated_mask > 0] = mask_thres\n # return rotated_img, center, rotMat\n # else:\n return rotateNonSquareImage(img, angle, center)", "title": "" }, { "docid": "68b60bedd55cde6dfd402268b90f094a", "score": "0.6346945", "text": "def _augment(self, img):", "title": "" }, { "docid": "54f7809174346b86b021754919b8ef9f", "score": "0.6338935", "text": "def augment_image(self, image, transformation, **kwargs):\n raise NotImplementedError()", "title": "" }, { "docid": "d18f422b184cc0400b75d0d6c82f0f54", "score": "0.6316875", "text": "def rot_center(image, angle):\r\n orig_rect = image.get_rect()\r\n rot_image = pygame.transform.rotate(image, angle)\r\n rot_rect = orig_rect.copy()\r\n rot_rect.center = rot_image.get_rect().center\r\n rot_image = rot_image.subsurface(rot_rect).copy()\r\n return rot_image", "title": "" }, { "docid": "d18f422b184cc0400b75d0d6c82f0f54", "score": "0.6316875", "text": "def rot_center(image, angle):\r\n orig_rect = image.get_rect()\r\n rot_image = pygame.transform.rotate(image, angle)\r\n rot_rect = orig_rect.copy()\r\n rot_rect.center = rot_image.get_rect().center\r\n rot_image = rot_image.subsurface(rot_rect).copy()\r\n return rot_image", "title": "" }, { "docid": "a5bb87c02fb72a80d9ef9e38574e8c29", "score": "0.6295508", "text": "def image_rotate(image, label, angle=5):\n image = array_to_img(image.astype(np.uint8), scale=False, dtype=np.uint8)\n label = array_to_img(label.astype(np.uint8), scale=False, dtype=np.uint8)\n return np.array(image.rotate(angle), dtype=np.uint8), np.array(label.rotate(angle), dtype=np.uint8)", "title": "" }, { "docid": "245854309d430354728062f0385868bd", "score": "0.6286408", "text": "def transform_image(self, image, angle):\n if np.random.randint(2):\n image, angle = self.image_flipping(image, angle)\n if np.random.randint(2):\n image, angle = self.image_shifting(image, angle)\n if np.random.randint(2):\n image, angle = self.image_brightness(image, angle)\n if np.random.randint(2):\n image, angle = self.image_shadow(image, angle)\n return (image, angle)", "title": "" }, { "docid": "aef93eadb74eb4c152ccaf39504de2d0", "score": "0.6251541", "text": "def rot_center(image, angle):\n\torig_rect = image.get_rect()\n\trot_image = pygame.transform.rotate(image, angle)\n\trot_rect = orig_rect.copy()\n\trot_rect.center = rot_image.get_rect().center\n\trot_image = rot_image.subsurface(rot_rect).copy()\n\treturn rot_image", "title": "" }, { "docid": "cdf2844ad70a77608b47c866b0a6a363", "score": "0.6243679", "text": "def rot_center(image, angle):\n orig_rect = image.get_rect()\n rot_image = pygame.transform.rotate(image, angle)\n rot_rect = orig_rect.copy()\n rot_rect.center = rot_image.get_rect().center\n rot_image = rot_image.subsurface(rot_rect).copy()\n return rot_image", "title": "" }, { "docid": "cdf2844ad70a77608b47c866b0a6a363", "score": "0.6243679", "text": "def rot_center(image, angle):\n orig_rect = image.get_rect()\n rot_image = pygame.transform.rotate(image, angle)\n rot_rect = orig_rect.copy()\n rot_rect.center = rot_image.get_rect().center\n rot_image = rot_image.subsurface(rot_rect).copy()\n return rot_image", "title": "" }, { "docid": "cdf2844ad70a77608b47c866b0a6a363", "score": "0.6243679", "text": "def rot_center(image, angle):\n orig_rect = image.get_rect()\n rot_image = pygame.transform.rotate(image, angle)\n rot_rect = orig_rect.copy()\n rot_rect.center = rot_image.get_rect().center\n rot_image = rot_image.subsurface(rot_rect).copy()\n return rot_image", "title": "" }, { "docid": "cdf2844ad70a77608b47c866b0a6a363", "score": "0.6243679", "text": "def rot_center(image, angle):\n orig_rect = image.get_rect()\n rot_image = pygame.transform.rotate(image, angle)\n rot_rect = orig_rect.copy()\n rot_rect.center = rot_image.get_rect().center\n rot_image = rot_image.subsurface(rot_rect).copy()\n return rot_image", "title": "" }, { "docid": "65cb5a0cf2d2b0bea08cf9b1085a5aa6", "score": "0.6224902", "text": "def affine_transform_rotation(image, theta):\n digit_location = np.argwhere(image > 0)\n \"\"\"\n move digit to [0, 0]\n \"\"\"\n digit_mass_center = digit_location.mean(axis=0)\n transform_location = np.int_(np.ones((digit_location.shape[0], 3)))\n transform_location[:, :2] = digit_location\n translation_matrix = np.array([[1, 0, -digit_mass_center[0]],\n [0, 1, -digit_mass_center[1]],\n [0, 0, 1]])\n transform_location = np.int_(np.round(np.dot(translation_matrix, transform_location.T))).T\n \"\"\"\n rotate image\n \"\"\"\n theta = (theta / 360) * (2 * math.pi)\n rotation_matrix = np.array([[math.cos(theta), math.sin(theta), 0],\n [-math.sin(theta), math.cos(theta), 0],\n [0, 0, 1]])\n transform_location = np.int_(np.round(np.dot(rotation_matrix, transform_location.T))).T\n \"\"\"\n move back digit\n \"\"\"\n back_translation_matrix = np.array([[1, 0, digit_mass_center[0]],\n [0, 1, digit_mass_center[1]],\n [0, 0, 1]])\n transform_location = np.int_(np.round(np.dot(back_translation_matrix, transform_location.T))).T\n \"\"\"\n Check if the digit is out of image\n \"\"\"\n max_x = np.amax(transform_location[:, 0])\n min_x = np.amin(transform_location[:, 0])\n max_y = np.amax(transform_location[:, 1])\n min_y = np.amin(transform_location[:, 1])\n if max_x > 27 or min_x < 0 or max_y > 27 or min_y < 0:\n back_x = 0\n back_y = 0\n if max_x > 27:\n back_x = 27 - max_x\n elif min_x < 0:\n back_x = -min_x\n if max_y > 27:\n back_y = 27 - max_y\n elif min_y < 0:\n back_y = -min_y\n back_translation_matrix = np.array([[1, 0, back_x],\n [0, 1, back_y],\n [0, 0, 1]])\n transform_location = np.int_(np.dot(back_translation_matrix, transform_location.T)).T\n \"\"\"\n Generate new image\n \"\"\"\n new_image = np.zeros((28, 28))\n new_image[transform_location[:, 0], transform_location[:, 1]] = image[digit_location[:, 0], digit_location[:, 1]]\n \"\"\"\n Fill the empty pixels\n \"\"\"\n max_x = np.amax(transform_location[:, 0])\n min_x = np.amin(transform_location[:, 0])\n max_y = np.amax(transform_location[:, 1])\n min_y = np.amin(transform_location[:, 1])\n for tmp_x in range(min_x, max_x):\n for tmp_y in range(min_y, max_y):\n pixel = new_image[tmp_x, tmp_y]\n if pixel == 0:\n up_pixel = new_image[tmp_x-1, tmp_y]\n down_pixel = new_image[tmp_x+1, tmp_y]\n left_pixel = new_image[tmp_x, tmp_y-1]\n right_pixel = new_image[tmp_x, tmp_y+1]\n if up_pixel > 0 and down_pixel > 0 and right_pixel > 0 and left_pixel > 0:\n new_image[tmp_x, tmp_y] = (up_pixel + down_pixel + right_pixel + left_pixel) / 4\n elif up_pixel > 0 and down_pixel > 0:\n new_image[tmp_x, tmp_y] = (up_pixel + down_pixel) / 2\n elif left_pixel > 0 and right_pixel > 0:\n new_image[tmp_x, tmp_y] = (left_pixel + right_pixel) / 2\n return new_image", "title": "" }, { "docid": "aba4c4288471118e573b3b6db5ad77a5", "score": "0.62127775", "text": "def rotate(self, angle:int) -> None:\n\n #If the image exists\n if self.image:\n\n #Rotate the image\n self.image = pygame.transform.rotate(self.image, angle)\n\n #Load the rectangle\n self.load_rect()\n \n #Otherwise do nothing\n else:\n if self.debug:\n print(\"There is no image to rotate\")", "title": "" }, { "docid": "d74da75dc11906bd75685a99e3747685", "score": "0.62125003", "text": "def rotateDemo(img):\n cv2.imshow(\"Original\", img)\n (hgt, wid, dep) = img.shape\n midPt = (wid / 2, hgt / 2)\n for angle in range(10, 180, 20):\n rotMat = cv2.getRotationMatrix2D(midPt, angle, 1)\n newIm = cv2.warpAffine(img, rotMat, (wid, hgt))\n cv2.imshow(\"Rotated\", newIm)\n cv2.waitKey()\n\n for angle in range(10, 180, 20):\n rotMat = cv2.getRotationMatrix2D((0, 0), angle, 1)\n newIm = cv2.warpAffine(img, rotMat, (2 * wid, 2 * hgt))\n cv2.imshow(\"Rotated\", newIm)\n cv2.waitKey()", "title": "" }, { "docid": "faf322dc08c65baf51701ee348e8217f", "score": "0.621162", "text": "def rotate(image_path, deg = 90, url = False):\n if url == False:\n image = Image.open(image_path)\n mat_image = np.array(image)\n m, n, d = mat_image.shape\n \n elif url == True:\n response = requests.get(image_path)\n with Image.open(BytesIO(response.content)) as image:\n mat_image = np.array(image)\n m, n, d = mat_image.shape \n\n if deg == 90:\n\n mat_image = list(zip(*mat_image))\n for i in range(n):\n reversed_row = []\n for j in range(m-1, -1, -1):\n reversed_row.append(mat_image[i][j])\n mat_image[i] = reversed_row\n\n plt.axis('off')\n plt.imsave(\"rotated_image.png\", mat_image)\n return imshow(mat_image)\n \n\n elif deg == 180:\n mat_image_column_reverse = copy.deepcopy(mat_image)\n\n for i in range(n):\n c = 0\n for j in range(m-1,-1,-1):\n mat_image_column_reverse[c][i] = mat_image[j][i]\n c += 1\n\n plt.axis('off')\n plt.imsave(\"rotated_image.png\", mat_image)\n return imshow(mat_image_column_reverse)\n\n elif deg == 270:\n\n mat_image = list(zip(*mat_image))\n \n plt.axis('off')\n return imshow(mat_image)", "title": "" }, { "docid": "aae73fb484075a761b96f938be2d7e0e", "score": "0.6204244", "text": "def rotateFr(im,angle=0,expand=True,bgColor=150):\r\n# from PIL import Image\r\n# angle = 73\r\n a=360-angle\r\n img = Image. fromarray(im)\r\n fr=img.rotate(a,expand=expand,fillcolor=bgColor)\r\n imgRt = np.array(fr) \r\n return imgRt", "title": "" }, { "docid": "5453e42d315a3e09461a5768254a3c11", "score": "0.61978346", "text": "def translate_and_rotate_and_scale(self):\n self._registration_mode = \"TranslateRotateScale\"", "title": "" }, { "docid": "f53dc262402f32857cb225ce3011d335", "score": "0.6196463", "text": "def img_rotate(inp_image, rotation_angle):\r\n random_rotation_angle = np.random.randint(-rotation_angle, rotation_angle)\r\n output = rotate(inp_image, random_rotation_angle, axes=(1, 2), reshape=False)\r\n return output", "title": "" }, { "docid": "ee3dc3a2f8948c2ac0b738641e7826a4", "score": "0.6193574", "text": "def update_rotation_img_lock(self, context):\n \n cobj = get_object(context, self.lightname)\n world = context.scene.world\n mapping = world.node_tree.nodes['Mapping']\n mapping2 = world.node_tree.nodes['Mapping.001']\n \n if cobj.Lumiere.rotation_lock_img == False:\n if round(-math.degrees(mapping.rotation[2]), 2) != round(cobj.Lumiere.hdri_rotation, 2) :\n cobj.Lumiere.hdri_rotation = -math.degrees(mapping.rotation[2])", "title": "" }, { "docid": "6880e399cbaca2012b69c9c98d9862d5", "score": "0.61934865", "text": "def rotate(self, direction):\r\n # Calculate the angle\r\n dir_x, dir_y = direction\r\n length = math.sqrt(math.fabs(dir_x) + math.fabs(dir_y))\r\n angle = math.degrees(math.acos(dir_x / length))\r\n if dir_y != 0:\r\n angle *= -dir_y\r\n\r\n # Don't rotate the image if it already looks in the desired direction\r\n if self.angle == angle:\r\n return\r\n\r\n self.img = pygame.transform.rotate(self.orig_img, angle)\r\n self.angle = angle", "title": "" }, { "docid": "6738ff7f09795efbb879c1fbd6723bc8", "score": "0.61885715", "text": "def rotate_image(self, img):\n cx, cy = 9, 9\n index = 0\n\n index += 1\n mx, my = self.find_eyes(img)\n angle = atan2(mx - cx, my - cy)\n\n angle *= 180 / pi\n\n a = angle - -90\n a = ((a + 180) % (360)) - 180\n\n graphic = Im.fromarray(img)\n graphic = graphic.rotate(a)\n\n array = numpy.asarray(graphic, dtype = numpy.uint8)\n\n return array", "title": "" }, { "docid": "3e345b0737a699d6d7ee6d62668bc50f", "score": "0.6175875", "text": "def geometric_transformation(image, mode='zoom'):\n if mode == 'zoom':\n ratio = random.sample([0.8, 0.85, 0.9, 0.95], 1)[0]\n # print('zoom:{}'.format(ratio))\n if random.randint(0, 1) == 0:\n image = cv2.resize(image, None, fx=ratio, fy=1, interpolation=cv2.INTER_LINEAR)\n else:\n image = cv2.resize(image, None, fx=1, fy=ratio, interpolation=cv2.INTER_LINEAR)\n elif mode == 'rotate':\n imgH, imgW = image.shape[:2]\n angle = random.randint(1, 10) * 0.1\n # print('rotate:{}'.format(angle))\n M = cv2.getRotationMatrix2D((imgW / 2, imgH / 2), angle, 1)\n image = cv2.warpAffine(image, M, (imgW, imgH), borderMode=cv2.BORDER_REPLICATE)\n else:\n imgH, imgW = image.shape[:2]\n offset = random.randint(5, 15) * random.sample([-1, 1], 1)[0]\n # print('affine:{}'.format(offset))\n pts1 = np.float32([[0, 0], [0, imgH-1], [50, 0]])\n pts2 = np.float32([[0+offset, 0], [0, imgH-1], [50+offset, 0]])\n M = cv2.getAffineTransform(pts1, pts2)\n image = cv2.warpAffine(image, M, (imgW, imgH), borderMode=cv2.BORDER_REPLICATE)\n\n return image", "title": "" }, { "docid": "de6e9c0ca9f45d1b1ccf9441cd7ac558", "score": "0.6172783", "text": "def make_rotate(img,boxes):\n #cv2.imshow('pre_rotate', img)\n #height width\n rows,cols,_ = img.shape\n M = cv2.getRotationMatrix2D((cols/2,rows/2),90,1)\n rows_new = cols\n cols_new = rows\n M[0,2] += (cols_new-cols)/2\n M[1,2] += (rows_new-rows)/2\n img = cv2.warpAffine(img,M,(cols_new,rows_new))\n boxes_total = []\n rows_1,cols_1,_ = img.shape\n for ii in boxes:\n box = []\n width = ii[2]-ii[0]\n height = ii[3]-ii[1]\n new_x1 = ii[1]\n new_y1 = cols-ii[2]\n new_x2 = ii[3]\n new_y2 = cols-ii[0]\n box.append(new_x1)\n box.append(new_y1)\n box.append(new_x2)\n box.append(new_y2)\n boxes_total.append(box)\n #cv2.rectangle(img, (new_x1, new_y1), (new_x2, new_y2), (255, 0, 255), 2) \n return img,boxes_total", "title": "" }, { "docid": "d50733e695787aae2ec2e998234097db", "score": "0.61710775", "text": "def _compensate_rotation_shift(self, img, scale):\n ctr = np.asarray([self.center[1]*scale, self.center[0]*scale])\n tform1 = transform.SimilarityTransform(translation=ctr)\n tform2 = transform.SimilarityTransform(rotation=np.pi/2 - self.angle)\n tform3 = transform.SimilarityTransform(translation=-ctr)\n tform = tform3 + tform2 + tform1\n\n rows, cols = img.shape[0], img.shape[1]\n corners = np.array([\n [0, 0],\n [0, rows - 1],\n [cols - 1, rows - 1],\n [cols - 1, 0]\n ])\n corners = tform.inverse(corners)\n minc = corners[:, 0].min()\n minr = corners[:, 1].min()\n maxc = corners[:, 0].max()\n maxr = corners[:, 1].max()\n\n # fit output image in new shape\n translation = (minc, minr)\n tform4 = transform.SimilarityTransform(translation=translation)\n tform = tform4 + tform\n tform.params[2] = (0, 0, 1)\n\n # Compute the shift of the transformed center wrt original\n return (ctr - tform.inverse(ctr)).ravel().tolist()", "title": "" }, { "docid": "c6e4fcd27d71e1258b7aa5626c9fb2ac", "score": "0.61536133", "text": "def rotate_preview(self, angle):\r\n self.angle = angle\r\n slice = self.outer.get_current_slice()\r\n slice = Image.fromarray(np.array(slice,dtype=np.uint32)).rotate(angle,resample=PIL.Image.BICUBIC)\r\n self.ui.slices.update(np.array(slice))", "title": "" }, { "docid": "0a75bdd86b44b5b06f2dd7c0bad7e42f", "score": "0.6149548", "text": "def rot_center(image, angle):\n orig_rect = image.get_rect()\n rot_image = pygame.transform.rotate(image, angle)\n rot_rect = orig_rect.copy()\n rot_rect.center = rot_image.get_rect().center\n rot_image = rot_image.subsurface(rot_rect).copy()\n\n return rot_image", "title": "" }, { "docid": "81b3fa44c789085ab01003d447085816", "score": "0.6148666", "text": "def transform(image):\n transformed_image = image\n return transformed_image", "title": "" }, { "docid": "4582ae47a68adac3e12af51339603a9a", "score": "0.6133787", "text": "def rotate(self, by_degree):\n self.angle += by_degree\n oldcenter = self.rect.center\n self.image = pygame.transform.rotate(self.image0, self.angle)\n self.image.convert_alpha()\n self.rect = self.image.get_rect()\n self.rect.center = oldcenter", "title": "" }, { "docid": "6b19739398c1adf0239b582ba3206402", "score": "0.611402", "text": "def rotate(self, fname, angle, overwrite=False, nname=None):\n \n try:\n image = self.current[fname]\n \n except KeyError:\n self.load(fname)\n image = self.current[fname]\n\n if overwrite:\n self.current[fname] = pg.transform.rotate(image, angle)\n \n else:\n self.current[nname] = pg.transform.rotate(image, angle)", "title": "" }, { "docid": "565556c77ab5873f65866fd000774651", "score": "0.6100466", "text": "def drawImageRotated(self, rect, filename, rotDegrees=0, textureEffect=0):\r\n\r\n if textureEffect == TEXTURE_ROTATE_90:\r\n textureCoords = [[0.0,0.0],[0.0,1.0],[1.0,1.0],[1.0,0.0]]\r\n elif textureEffect == TEXTURE_ROTATE_180:\r\n textureCoords = [[1.0,0.0],[0.0,0.0],[0.0,1.0],[1.0,1.0]]\r\n elif textureEffect == TEXTURE_ROTATE_270: \r\n textureCoords = [[1.0,1.0],[1.0,0.0],[0.0,0.0],[0.0,1.0]]\r\n elif textureEffect == TEXTURE_MIRROR_H:\r\n textureCoords = [[1.0,1.0],[0.0,1.0],[0.0,0.0],[1.0,0.0]]\r\n elif textureEffect == TEXTURE_MIRROR_V:\r\n textureCoords = [[0.0,0.0],[1.0,0.0],[1.0,1.0],[0.0,1.0]]\r\n else:\r\n textureCoords = [[0.0,1.0],[1.0,1.0],[1.0,0.0],[0.0,0.0]]\r\n\r\n if filename not in self.textures:\r\n self.loadTexture(filename)\r\n\r\n texture = self.textures[filename]\r\n\r\n glColor4ub( 255, 255, 255, 255 )\r\n glEnable(GL_TEXTURE_2D)\r\n glBindTexture( GL_TEXTURE_2D, texture)\r\n\r\n halfwidth = rect[2] / 2\r\n halfheight = rect[3] / 2\r\n\r\n glPushMatrix()\r\n glTranslate(rect[0] + (halfwidth), rect[1] + (halfheight), 0.0)\r\n glRotate(rotationDegrees, 0.0, 0.0, 1.0) # Rotate\r\n\r\n glBegin(GL_QUADS)\r\n glTexCoord2f(textureCoords[0][0], textureCoords[0][1])\r\n glVertex2i( -halfwidth, -halfheight) \r\n glTexCoord2f(textureCoords[1][0], textureCoords[1][1])\r\n glVertex2i( halfwidth, -halfheight)\r\n glTexCoord2f(textureCoords[2][0], textureCoords[2][1])\r\n glVertex2i( halfwidth, halfheight)\r\n glTexCoord2f(textureCoords[3][0], textureCoords[3][1])\r\n glVertex2i( -halfwidth, halfheight)\r\n\r\n glEnd()\r\n glPopMatrix()\r\n\r\n glDisable(GL_TEXTURE_2D)", "title": "" }, { "docid": "4001cecabd295e8b2242aaf36f7e33d0", "score": "0.60866207", "text": "def apply(aug,img):\n\tif aug == \"NO\":\n\t\treturn img\n\telif aug == \"ROT90\":\n\t\treturn rot90(img,1)\n\telif aug == \"ROT180\":\n\t\treturn rot90(img,2)\n\telif aug == \"ROT270\":\n\t\treturn rot90(img,3)\n\telif aug == \"FLIP_UD\":\n\t\treturn flip_up(img)\n\telif aug == \"FLIP_LR\":\n\t\treturn flip_lr(img)", "title": "" }, { "docid": "5fdc164dd8cb8f77b744606ad6996e7f", "score": "0.60532695", "text": "def rotate (image_path, num_images, max_rotation):", "title": "" }, { "docid": "3393a2ab4df4762e33833882d239889a", "score": "0.60460377", "text": "def rotate(self, degrees):\n self._pil_image = self._pil_image.rotate(degrees)", "title": "" }, { "docid": "61d4d6044ab8e0f00ab0d84892d4d657", "score": "0.6045928", "text": "def update_image(self):\n # if mario is moving to the right, display the moving to the right image\n if self.vx > 0 & self.moving_right == False:\n self.image = self.image_right\n self.moving_right = True\n # if mario is moving to the left, display the moving to the left \n elif self.vx < 0 & self.moving_right:\n self.image = self.image_left\n self.moving_right = False", "title": "" }, { "docid": "ad135b9b34218249f43f72c2745be8a5", "score": "0.6040475", "text": "def rotate(self, sens):\n # /\\/\\/\\/\\/\\/\\/\\/\\/\\/\\/\\/\\/\\/\\/\\/\\/\\/\\/\\/\\/\\/\\/\\/\\/\\/\\/\\/\\/\\/\\/\\/\\/\\/\\/\\/\\\n if sens == \"R\":\n deg = \"90\"\n opt = \"-9\"\n else:\n deg = \"270\"\n opt = \"-2\"\n\n if _Command.isWin:\n _Command._run([_Command._jpegtran,\n '-rotate', deg, '-copy', 'all',\n self.__file, self.__file])\n # rebuild the exif thumb, because jpegtran doesn't do it on windows\n self.rebuildExifTB()\n else:\n # exiftran rotate internal exif thumb\n _Command._run([_Command._exiftran, opt, '-i', self.__file])\n\n self.__refresh()", "title": "" }, { "docid": "273a7a82769c7686eef6ee84e3f1b78d", "score": "0.60324323", "text": "def gTransform(self):\n # Mittepunkt zw. die Augen\n eye_center = ((self.lefteye_center[0]+self.righteye_center[0])*0.5,(self.lefteye_center[1]+self.righteye_center[1])*0.5)\n # Winkel der Augen\n dx = (self.righteye_center[0] - self.lefteye_center[0])\n dy = (self.righteye_center[1] - self.lefteye_center[1])\n length = math.sqrt(dx*dx+dy*dy)\n angle = math.atan2(dy,dx)*180.0/math.pi\n # Konstante von der Auge, die man braucht\n LEFT_EYE_X = 0.16\n LEFT_EYE_Y = 0.14\n RIGHT_EYE_X = (1.0-0.16)\n desired_len = (RIGHT_EYE_X - LEFT_EYE_X)\n scale = desired_len * self.FACE_WIDTH/length\n # Rotations Matrix\n rot_mat = cv2.getRotationMatrix2D(eye_center, angle, scale)\n rot_mat[0][2] += (self.FACE_WIDTH * 0.5) - eye_center[0]\n rot_mat[1][2] += (self.FACE_HEIGHT * LEFT_EYE_Y) - eye_center[1]\n # Erst mit grauwerten definiert\n self.fpp_result = np.ndarray(shape=(self.FACE_HEIGHT,self.FACE_WIDTH), dtype=np.uint8)\n self.fpp_result[:,:] = 128\n self.fpp_result = cv2.warpAffine(self.face,rot_mat,self.fpp_result.shape)", "title": "" }, { "docid": "e437a72291fb29fef7e7c80f77562fa4", "score": "0.6015557", "text": "def change_transform(self, scale):\n self.image = pg.transform.scale(self.image, (scale, scale))\n self.rect = self.image.get_rect()", "title": "" }, { "docid": "4fca81ab1dc6f659818ed6c4e8196332", "score": "0.60126394", "text": "def transpose(self, img):\n if self.screen['rotate_left_chk'].state == 'down':\n img = img.transpose(method=Image.ROTATE_90)\n elif self.screen['rotate_180_chk'].state == 'down':\n img = img.transpose(method=Image.ROTATE_180)\n elif self.screen['rotate_right_chk'].state == 'down':\n img = img.transpose(method=Image.ROTATE_270)\n if self.screen['flip_left2right_chk'].state == 'down':\n img = img.transpose(method=Image.FLIP_LEFT_RIGHT)\n elif self.screen['flip_top2bottom_chk'].state == 'down':\n img = img.transpose(method=Image.FLIP_TOP_BOTTOM)\n return img", "title": "" }, { "docid": "0f086544708923240dd020097bad44f3", "score": "0.6012348", "text": "def img_random_rotate(self, img, steering_angle, angle=30):\n tilt = np.random.randint(-1*angle, angle+1)\n height, width = img.shape[:2]\n M = cv2.getRotationMatrix2D((height/2, width/2), tilt, 1)\n img = cv2.warpAffine(img, M, (width, height))\n steering_angle += steering_angle * float(tilt/360)*0.001\n return img, steering_angle", "title": "" }, { "docid": "043820c9b88bfd6243a7aa516085cba8", "score": "0.59923303", "text": "def rotate90(image):\n\n return np.rot90(image, 3)", "title": "" }, { "docid": "fe0d46790d0c7f70f6f49b7c94f527fd", "score": "0.5984541", "text": "def rotateShip(self):\n \n self.getRotation()\n if self.rotateShipFlag == True:\n if self.finalAngle < self.angle:\n if self.angle > 1.5 and self.angle <= 3.1 and self.finalAngle < -1.5 and self.finalAngle >= -3.0:\n if self.angle == 3.1:\n self.angle = -3.1\n self.angle += 0.1\n self.angle = int((self.angle * 10) + 0.5) / 10.0\n self.rotatedShipImg = pygame.transform.rotate(self.shipImg, 360-self.angle*57.29)\n self.image = cfg.Image(self.rotatedShipImg)\n else:\n self.angle += 0.1\n self.angle = int((self.angle * 10) + 0.5) / 10.0\n self.rotatedShipImg = pygame.transform.rotate(self.shipImg, 360-self.angle*57.29)\n self.image = cfg.Image(self.rotatedShipImg)\n else:\n self.angle -= 0.1\n self.angle = int((self.angle * 10) - 0.5) / 10.0\n self.rotatedShipImg = pygame.transform.rotate(self.shipImg, 360-self.angle*57.29)\n self.image = cfg.Image(self.rotatedShipImg)\n elif self.finalAngle > self.angle:\n if self.angle < -1.5 and self.angle >= -3.0 and self.finalAngle <= 3.0 and self.finalAngle > 1.5:\n if self.angle == -3.0:\n self.angle = 3.1\n self.angle -= 0.1\n self.angle = int((self.angle * 10) - 0.5) / 10.0\n self.rotatedShipImg = pygame.transform.rotate(self.shipImg, 360-self.angle*57.29)\n self.image = cfg.Image(self.rotatedShipImg)\n else:\n self.angle -= 0.1\n self.angle = int((self.angle * 10) - 0.5) / 10.0\n self.rotatedShipImg = pygame.transform.rotate(self.shipImg, 360-self.angle*57.29)\n self.image = cfg.Image(self.rotatedShipImg)\n else:\n self.angle += 0.1\n self.angle = int((self.angle * 10) + 0.5) / 10.0\n self.rotatedShipImg = pygame.transform.rotate(self.shipImg, 360-self.angle*57.29)\n self.image = cfg.Image(self.rotatedShipImg)\n elif self.finalAngle == 0.0:\n if self.angle < self.finalAngle:\n self.angle += 0.1\n self.angle = int((self.angle * 10) + 0.5) / 10.0\n self.rotatedShipImg = pygame.transform.rotate(self.shipImg, 360-self.angle*57.29)\n self.image = cfg.Image(self.rotatedShipImg)\n elif self.angle > self.finalAngle:\n self.angle -= 0.1\n self.angle = int((self.angle * 10) + 0.5) / 10.0\n self.rotatedShipImg = pygame.transform.rotate(self.shipImg, 360-self.angle*57.29)\n self.image = cfg.Image(self.rotatedShipImg)\n else:\n pass\n else:\n self.rotateShipFlag = False # flag True set on mouseclick\n\n if self.angle == self.finalAngle:\n self.angleReversed = False # flag True set for reversing angle", "title": "" }, { "docid": "20e24045e1039dfecda856fd108cb666", "score": "0.59746313", "text": "def transform_and_save(scan_filename, output_dir, rot=True, h_flip=True, v_flip=True, scale=True):\n scan = Image.open(scan_filename) \n endo = Image.open(scan_filename.replace(\"_ORIG\", \"_ENDO\"))\n epi = Image.open(scan_filename.replace(\"_ORIG\", \"_EPI\"))\n \n random.seed(230)\n if rot:\n \tfor i in range(0, 3):\n degree = random.randint(70 + (i * 90), 110 + (i * 90))\n scan_rotated = scan.rotate(degree, resample=Image.BILINEAR, expand=0)\n endo_rotated = endo.rotate(degree, resample=Image.BILINEAR, expand=0)\n epi_rotated = epi.rotate(degree, resample=Image.BILINEAR, expand=0)\n scan_rotated.save(Path(output_dir) / Path(scan_filename).parts[-1].replace(\"_ORIG\", \"_ORIG_AUG{}\".format(i)))\n endo_rotated.save(Path(output_dir) / Path(scan_filename).parts[-1].replace(\"_ORIG\", \"_ENDO_AUG{}\".format(i)))\n epi_rotated.save(Path(output_dir) / Path(scan_filename).parts[-1].replace(\"_ORIG\", \"_EPI_AUG{}\".format(i)))\n \n #Horizontal Flip\n if h_flip:\n scan_flippedH =scan.transpose(Image.FLIP_TOP_BOTTOM)\n endo_flippedH = endo.transpose(Image.FLIP_TOP_BOTTOM)\n epi_flippedH = epi.transpose(Image.FLIP_TOP_BOTTOM)\n scan_flippedH.save(Path(output_dir) / Path(scan_filename).parts[-1].replace(\"_ORIG\", \"_ORIG_AUG{}\".format(3)))\n endo_flippedH.save(Path(output_dir) / Path(scan_filename).parts[-1].replace(\"_ORIG\", \"_ENDO_AUG{}\".format(3)))\n epi_flippedH.save(Path(output_dir) / Path(scan_filename).parts[-1].replace(\"_ORIG\", \"_EPI_AUG{}\".format(3)))\n \n #Vertical Flip\n if v_flip:\n scan_flippedV =scan.transpose(Image.FLIP_LEFT_RIGHT)\n endo_flippedV = endo.transpose(Image.FLIP_LEFT_RIGHT)\n epi_flippedV = epi.transpose(Image.FLIP_LEFT_RIGHT)\n scan_flippedV.save(Path(output_dir) / Path(scan_filename).parts[-1].replace(\"_ORIG\", \"_ORIG_AUG{}\".format(4)))\n endo_flippedV.save(Path(output_dir) / Path(scan_filename).parts[-1].replace(\"_ORIG\", \"_ENDO_AUG{}\".format(4)))\n epi_flippedV.save(Path(output_dir) / Path(scan_filename).parts[-1].replace(\"_ORIG\", \"_EPI_AUG{}\".format(4)))\n \n #Scaling\n if scale:\n crop_percentage = random.uniform(0.15, 0.25)\n crop_pixel = int(crop_percentage * IMG_SIZE)\n scan_cropped = scan.crop((crop_pixel, crop_pixel, IMG_SIZE - crop_pixel, IMG_SIZE - crop_pixel))\n endo_cropped = endo.crop((crop_pixel, crop_pixel, IMG_SIZE - crop_pixel, IMG_SIZE - crop_pixel))\n epi_cropped = epi.crop((crop_pixel, crop_pixel, IMG_SIZE - crop_pixel, IMG_SIZE - crop_pixel))\n scan_scaled = scan_cropped.resize([IMG_SIZE,IMG_SIZE],Image.ANTIALIAS)\n endo_scaled = endo_cropped.resize([IMG_SIZE,IMG_SIZE],Image.ANTIALIAS)\n epi_scaled = epi_cropped.resize([IMG_SIZE,IMG_SIZE],Image.ANTIALIAS)\n scan_scaled.save(Path(output_dir) / Path(scan_filename).parts[-1].replace(\"_ORIG\", \"_ORIG_AUG{}\".format(5)))\n endo_scaled.save(Path(output_dir) / Path(scan_filename).parts[-1].replace(\"_ORIG\", \"_ENDO_AUG{}\".format(5)))\n epi_scaled.save(Path(output_dir) / Path(scan_filename).parts[-1].replace(\"_ORIG\", \"_EPI_AUG{}\".format(5)))", "title": "" }, { "docid": "8b04fdb3189ae7c8331b2f465d346baf", "score": "0.5968397", "text": "def rotate_image(image, angle, center):\n rot_mat = cv2.getRotationMatrix2D(tuple(center), angle, 1.0)\n result = cv2.warpAffine(image, rot_mat, image.shape[1::-1], flags=cv2.INTER_LINEAR)\n return result", "title": "" }, { "docid": "cef12c25978b620ef9b04b5326c7e74d", "score": "0.5965887", "text": "def rotate_image(img_path, angle):\n img = cv.imread(img_path, 1)\n # straiting the image:\n if angle != 0:\n # rotate the image to become straight\n w = img.shape[0]\n h = img.shape[1]\n cent = (w // 2, h // 2)\n # cent = (x36, y36)\n\n m = cv.getRotationMatrix2D(cent, angle, 1.0)\n rotated_img = cv.warpAffine(img, m, cent)\n return rotated_img", "title": "" }, { "docid": "c7af04ad083e37a721f19d9137ce8a1d", "score": "0.5960433", "text": "def translate_and_rotate(self):\n self._registration_mode = \"TranslateAndRotate\"", "title": "" }, { "docid": "dfb222b88d37817ab3b7b8bab521bffe", "score": "0.5942643", "text": "def rot_im_jax_exp(img,phi,x0,y0):\r\n x = np.arange(img.shape[0])\r\n y = np.arange(img.shape[1])\r\n X,Y = np.meshgrid(x,y)\r\n\r\n X_rot = (X - x0)*np.cos(phi) + (Y - y0)*np.sin(phi) + x0\r\n Y_rot = -1*(X - x0)*np.sin(phi) + (Y - y0)*np.cos(phi) + y0\r\n locs = np.stack([Y_rot,X_rot])\r\n return map_coordinates(img,locs, order = 1 )", "title": "" }, { "docid": "4619eabf4d5eb18e572d269679bc8054", "score": "0.59241337", "text": "def image_rotating(img):\n rot_time = np.random.randint(low=0, high=4)\n img = np.rot90(img, rot_time, (0, 1))\n return img", "title": "" }, { "docid": "fa84bf85edec29b226568438e64fa77b", "score": "0.5916215", "text": "def rot(img, deg, axes=(1,0), reshape=False, prefilter=False, output=np.float64):\n img_rot = rotate(img, angle=deg, axes=axes, reshape=reshape, \\\n prefilter=prefilter, output=output)\n \n return img_rot", "title": "" }, { "docid": "40536033cc0f97e334d4a54c7f3cc9e8", "score": "0.5915281", "text": "def transform_image(img, ang_range, shear_range, trans_range):\n\n # Rotation\n ang_rot = np.random.uniform(ang_range) - ang_range / 2\n rows, cols, ch = img.shape\n Rot_M = cv2.getRotationMatrix2D((cols / 2, rows / 2), ang_rot, 1)\n\n # Translation\n tr_x = trans_range * np.random.uniform() - trans_range / 2\n tr_y = trans_range * np.random.uniform() - trans_range / 2\n Trans_M = np.float32([[1, 0, tr_x], [0, 1, tr_y]])\n\n # Shear\n pts1 = np.float32([[5, 5], [20, 5], [5, 20]])\n\n pt1 = 5 + shear_range * np.random.uniform() - shear_range / 2\n pt2 = 20 + shear_range * np.random.uniform() - shear_range / 2\n\n pts2 = np.float32([[pt1, 5], [pt2, pt1], [5, pt2]])\n\n shear_M = cv2.getAffineTransform(pts1, pts2)\n\n img = cv2.warpAffine(img, Rot_M, (cols, rows))\n img = cv2.warpAffine(img, Trans_M, (cols, rows))\n img = cv2.warpAffine(img, shear_M, (cols, rows))\n\n return img", "title": "" }, { "docid": "35feb72c1de42c359025ac0e14a0b88e", "score": "0.59116226", "text": "def rotate(image, angle, scale, show=False):\n\n center = map(lambda x: x // 2, image.shape[1::-1]) # (h, w, channels)\n M = cv2.getRotationMatrix2D(tuple(center), angle, float(scale))\n\n rotated = cv2.warpAffine(image, M, (image.shape[1], image.shape[0]))\n\n if show:\n cv2.imshow('{} Degree Rotated Image'.format(angle), rotated)\n cv2.waitKey(0)\n\n return rotated", "title": "" }, { "docid": "9942abc4d55ff7d570487e82a8b06e36", "score": "0.59103", "text": "def rotateImage(img, angle=90):\n plog(\"rotating by \" + str(angle))\n if (angle == 90) :\n return(cv2.flip(cv2.transpose(img),flipCode=0))\n elif (angle == -90) :\n return(cv2.flip(cv2.transpose(img),flipCode=1))\n else :\n center = (img.shape[1]/2.0,img.shape[0]/2.0)\n rotate = cv2.getRotationMatrix2D(center, angle, 1.0)\n return cv2.warpAffine(img, rotate, (img.shape[1], img.shape[0]))", "title": "" }, { "docid": "16a81a81a069d886c9f1536a42cbde7c", "score": "0.58997834", "text": "def __init__(self, rotation, translation):\n self.rotation = rotation\n self.translation = translation", "title": "" }, { "docid": "c87fdfac77cb80789a3773efba437887", "score": "0.5895447", "text": "def rotate(img, angle):\n if (angle != 0):\n r = cv2.getRotationMatrix2D((img.shape[0] / 2, img.shape[1] / 2), angle, 1.0)\n result = cv2.warpAffine(img, r, img.shape)\n return result\n else:\n return img", "title": "" }, { "docid": "3299450bdc3721bc689c0f1a555ee05a", "score": "0.5894112", "text": "def img_resize_rotate(file):\n new_img = Image.open(file).resize((640,480)).rotate(90)\n new_img.save( file + \"90deg_640x480\", format=img_ext)", "title": "" }, { "docid": "446af8fe06e2abc50df0d9d07610c968", "score": "0.5887914", "text": "def rotate_image(image, angle):\r\n\r\n # Get the image size\r\n # No that's not an error - NumPy stores image matricies backwards\r\n image_size = (image.shape[1], image.shape[0])\r\n image_center = tuple(np.array(image_size) / 2)\r\n\r\n # Convert the OpenCV 3x2 rotation matrix to 3x3\r\n rot_mat = np.vstack(\r\n [cv2.getRotationMatrix2D(image_center, angle, 1.0), [0, 0, 1]]\r\n )\r\n\r\n rot_mat_notranslate = np.matrix(rot_mat[0:2, 0:2])\r\n\r\n # Shorthand for below calcs\r\n image_w2 = image_size[0] * 0.5\r\n image_h2 = image_size[1] * 0.5\r\n\r\n # Obtain the rotated coordinates of the image corners\r\n rotated_coords = [\r\n (np.array([-image_w2, image_h2]) * rot_mat_notranslate).A[0],\r\n (np.array([ image_w2, image_h2]) * rot_mat_notranslate).A[0],\r\n (np.array([-image_w2, -image_h2]) * rot_mat_notranslate).A[0],\r\n (np.array([ image_w2, -image_h2]) * rot_mat_notranslate).A[0]\r\n ]\r\n\r\n # Find the size of the new image\r\n x_coords = [pt[0] for pt in rotated_coords]\r\n x_pos = [x for x in x_coords if x > 0]\r\n x_neg = [x for x in x_coords if x < 0]\r\n\r\n y_coords = [pt[1] for pt in rotated_coords]\r\n y_pos = [y for y in y_coords if y > 0]\r\n y_neg = [y for y in y_coords if y < 0]\r\n\r\n right_bound = max(x_pos)\r\n left_bound = min(x_neg)\r\n top_bound = max(y_pos)\r\n bot_bound = min(y_neg)\r\n\r\n new_w = int(abs(right_bound - left_bound))\r\n new_h = int(abs(top_bound - bot_bound))\r\n\r\n # We require a translation matrix to keep the image centred\r\n trans_mat = np.matrix([\r\n [1, 0, int(new_w * 0.5 - image_w2)],\r\n [0, 1, int(new_h * 0.5 - image_h2)],\r\n [0, 0, 1]\r\n ])\r\n\r\n # Compute the tranform for the combined rotation and translation\r\n affine_mat = (np.matrix(trans_mat) * np.matrix(rot_mat))[0:2, :]\r\n\r\n # Rotation\r\n result = imutils.rotate_bound(image, angle)\r\n return result", "title": "" }, { "docid": "e4d00b6cc3996f73d51a1adc133f884a", "score": "0.5881648", "text": "def angle_correction(self,img, val):\n row, cols, _ = img.shape\n M = np.float32([[1, 0, val], [0, 1, 0]])\n dst = cv2.warpAffine(img, M, (cols, row))\n dst = dst.reshape((row, cols, 1))\n return dst", "title": "" }, { "docid": "acff2138559b0e3e82456e2d7031b289", "score": "0.5879122", "text": "def rotate(image_path, degrees_to_rotate, saved_location):\r\n image_obj = Image.open(image_path)\r\n rotated_image = image_obj.rotate(degrees_to_rotate)\r\n rotated_image.save(saved_location)\r\n rotated_image.show()", "title": "" } ]
689d8416058a63d1dcb4dbdd8f2aebc1
Testing method / function web_api_server_url.
[ { "docid": "fefd3ea03d8f2ac053ca194123e0231a", "score": "0.82735455", "text": "def test_web_api_server_url(self):\n # Test\n result = self.config.web_api_server_url()\n self.assertEqual(\n result, 'http://127.0.0.12:50002/pattoo/api/v1/web/graphql')", "title": "" } ]
[ { "docid": "5d74342a9b7d8ad0bdf95cb6885454b8", "score": "0.7382157", "text": "def test_agent_api_server_url(self):\n # Initialize key values\n expected = 'http://127.0.0.11:50001/pattoo/api/v1/agent/receive/123'\n agent_id = 123\n\n # Test\n result = self.config.agent_api_server_url(agent_id)\n self.assertEqual(result, expected)", "title": "" }, { "docid": "5d74342a9b7d8ad0bdf95cb6885454b8", "score": "0.7382157", "text": "def test_agent_api_server_url(self):\n # Initialize key values\n expected = 'http://127.0.0.11:50001/pattoo/api/v1/agent/receive/123'\n agent_id = 123\n\n # Test\n result = self.config.agent_api_server_url(agent_id)\n self.assertEqual(result, expected)", "title": "" }, { "docid": "5d74342a9b7d8ad0bdf95cb6885454b8", "score": "0.7382157", "text": "def test_agent_api_server_url(self):\n # Initialize key values\n expected = 'http://127.0.0.11:50001/pattoo/api/v1/agent/receive/123'\n agent_id = 123\n\n # Test\n result = self.config.agent_api_server_url(agent_id)\n self.assertEqual(result, expected)", "title": "" }, { "docid": "5d74342a9b7d8ad0bdf95cb6885454b8", "score": "0.7382157", "text": "def test_agent_api_server_url(self):\n # Initialize key values\n expected = 'http://127.0.0.11:50001/pattoo/api/v1/agent/receive/123'\n agent_id = 123\n\n # Test\n result = self.config.agent_api_server_url(agent_id)\n self.assertEqual(result, expected)", "title": "" }, { "docid": "5d74342a9b7d8ad0bdf95cb6885454b8", "score": "0.7382157", "text": "def test_agent_api_server_url(self):\n # Initialize key values\n expected = 'http://127.0.0.11:50001/pattoo/api/v1/agent/receive/123'\n agent_id = 123\n\n # Test\n result = self.config.agent_api_server_url(agent_id)\n self.assertEqual(result, expected)", "title": "" }, { "docid": "d746436c33e72b5bf3a39a21c03c3788", "score": "0.7311262", "text": "def server_url(self):", "title": "" }, { "docid": "5fb33386d64cf0a339b7762d8c336376", "score": "0.7206997", "text": "def test_specified_url(self):\n self.assertEqual(self.api._baseURL, self.base_url)", "title": "" }, { "docid": "e1ea10cef7fc8dc3561604cf9f9b5f8a", "score": "0.71497786", "text": "def test_make_url(self):\n url = self.func.make_url()\n self.assert_('http://localhost:18801/method_name')", "title": "" }, { "docid": "03e13d91075cee6cb8163067487aa3cd", "score": "0.71188563", "text": "def test_handler(self):\n\n test_url = 'http://www.ted.com/talks/dan_dennett_on_our_consciousness.html'\n request = webapp2.Request.blank(\"/api/?url=%s\" % test_url)\n\n response = request.get_response(app)\n\n self.assertEqual(response.status_int, 200)\n dic = json.loads(response.body)\n self.assertEqual(dic[\"url\"], test_url)", "title": "" }, { "docid": "9e43447813c66708cab25c3b67040034", "score": "0.71149963", "text": "def test_url_(self):\n return 'PING'", "title": "" }, { "docid": "6627259c6aa4c8b61c348e5fab495bc3", "score": "0.7101113", "text": "def test_get_api_uri(self):\n path = '<eg>'\n expected = self.client.base_uri + url_quote(path)\n self.assertEqual(self.client.get_api_uri(path), expected)", "title": "" }, { "docid": "b8b01a0889ab61879c2f60ca49b3f7b6", "score": "0.7018669", "text": "def test_url(self):\n api.APICommand.when.called_with('somename', {'url': 'a'}).should_not.throw(minion.core.components.exceptions.ImproperlyConfigured)", "title": "" }, { "docid": "061e5b0b99db3b0f63e24b80a6627e27", "score": "0.6950891", "text": "def test_url(self):\n _url = \"/fablab/api/machines/1/\"\n print(\"(\"+self.test_url.__name__+\")\", self.test_url.__doc__)\n with resources.app.test_request_context(_url):\n rule = flask.request.url_rule\n view_point = resources.app.view_functions[rule.endpoint].view_class\n self.assertEqual(view_point, resources.Machine)", "title": "" }, { "docid": "9337c40bd0ca7de29309f0d077fe2ec3", "score": "0.6914563", "text": "def test_homepage(self):\n app = self.testapp\n response = app.get('/', status=200)\n\n self.assertEqual(response.json['Valid API Call List']['servers'], '/servers')", "title": "" }, { "docid": "d58b26caaf8ac6e1b2f00c5c3404819e", "score": "0.68823916", "text": "def test_base_url(self):\n web = WebObject()\n self.assertEqual(web.base_url, \"https://www.yikyak.com/api/v2/\")", "title": "" }, { "docid": "96ae065a168fdb4e11bdafbb204c4d16", "score": "0.6871626", "text": "def test_endpoint(request):\n pass # pragma: no cover", "title": "" }, { "docid": "725394e8e12509550631ad8738788ae6", "score": "0.6843816", "text": "def test_base_test_endpoint_url(self):\n base_instance = BaseRaveAPI()\n self.assertEqual(base_instance._get_base_endpoint(), 'https://ravesandboxapi.flutterwave.com/flwv3-pug/getpaidx/api/')", "title": "" }, { "docid": "b9d16ac1806e41917beb44ec95b97863", "score": "0.6800904", "text": "def test_generate_url(self):\n url = self.client.url('user', 'whoami')\n \n if url == 'http://localhost:8080/api/draft15/user/whoami':\n return\n \n self.fail('APIClient generated url incorrectly: {0}'.format(url))", "title": "" }, { "docid": "a93cbbf832699d48849683d4dd0f8ce1", "score": "0.68008935", "text": "def test_url(self):\n #NOTE: self.shortDescription() shuould work.\n print(\"(\"+self.test_url.__name__+\")\", self.test_url.__doc__, end=' ')\n with resources.app.test_request_context(self.url):\n rule = flask.request.url_rule\n view_point = resources.app.view_functions[rule.endpoint].view_class\n self.assertEqual(view_point, resources.Machines)", "title": "" }, { "docid": "8b0726ea1c3503e2398ddd9359151ad3", "score": "0.6800575", "text": "def url_for(endpoint):\n return TEST_HOST + endpoint", "title": "" }, { "docid": "0011adafc23c1aff47fe4c04d1c80b9e", "score": "0.6790447", "text": "def test_url(self):\n #NOTE: self.shortDescription() shuould work.\n _url = \"/exercisetracker/api/users/\"\n print \"(\"+self.test_url.__name__+\")\", self.test_url.__doc__,\n with resources.app.test_request_context(_url):\n rule = flask.request.url_rule\n view_point = resources.app.view_functions[rule.endpoint].view_class\n self.assertEquals(view_point, resources.Users)", "title": "" }, { "docid": "ce54d55f8684faf48a49e335c97abb69", "score": "0.67503685", "text": "def test_request_helper_base_url(self):\n os.environ[self.sandbox_mode] = \"true\"\n req = RequestHelper()\n self.assertEqual(req.base_endpoint, \"https://api-sandbox.shutterstock.com\")", "title": "" }, { "docid": "fdb27bb3a2f38bd6689669e9cd8cf956", "score": "0.6695713", "text": "def test_url(self):\n _url = \"/fablab/api/users/1/\"\n \n print(\"(\"+self.test_url.__name__+\")\", self.test_url.__doc__)\n with resources.app.test_request_context(_url):\n rule = flask.request.url_rule\n view_point = resources.app.view_functions[rule.endpoint].view_class\n self.assertEqual(view_point, resources.User)", "title": "" }, { "docid": "72100867f7ac08382e86595fb68bc476", "score": "0.66828775", "text": "def api_url(self):\n pass", "title": "" }, { "docid": "1e55fae910d41085b6f4e853555a0412", "score": "0.6633231", "text": "def test_url(self):\n site = wikipedia(session=Tests.session)\n self.assertEqual(site.url, 'https://en.wikipedia.org/w/api.php')", "title": "" }, { "docid": "37bb316ebac3852e642c6545544bcb24", "score": "0.6618601", "text": "def get_url(self, is_test_env):\n if is_test_env:\n return self.test_server\n else:\n return self.live_Server", "title": "" }, { "docid": "b23a2843c667b9e547121a0efae6e7e7", "score": "0.6600306", "text": "def test_index_url(self):\n with self.app as c:\n resp = c.get('http://127.0.0.1:5000/api/v1/')\n self.assertEqual(resp.status_code, 200)", "title": "" }, { "docid": "548b0d4a82e38dbe0ea0bef7abc65042", "score": "0.6592744", "text": "def test_agent_api_uri(self):\n # Initialize key values\n expected = '/pattoo/api/v1/agent/receive'\n\n # Test\n result = self.config.agent_api_uri()\n self.assertEqual(result, expected)", "title": "" }, { "docid": "548b0d4a82e38dbe0ea0bef7abc65042", "score": "0.6592744", "text": "def test_agent_api_uri(self):\n # Initialize key values\n expected = '/pattoo/api/v1/agent/receive'\n\n # Test\n result = self.config.agent_api_uri()\n self.assertEqual(result, expected)", "title": "" }, { "docid": "548b0d4a82e38dbe0ea0bef7abc65042", "score": "0.6592744", "text": "def test_agent_api_uri(self):\n # Initialize key values\n expected = '/pattoo/api/v1/agent/receive'\n\n # Test\n result = self.config.agent_api_uri()\n self.assertEqual(result, expected)", "title": "" }, { "docid": "548b0d4a82e38dbe0ea0bef7abc65042", "score": "0.6592744", "text": "def test_agent_api_uri(self):\n # Initialize key values\n expected = '/pattoo/api/v1/agent/receive'\n\n # Test\n result = self.config.agent_api_uri()\n self.assertEqual(result, expected)", "title": "" }, { "docid": "548b0d4a82e38dbe0ea0bef7abc65042", "score": "0.6592744", "text": "def test_agent_api_uri(self):\n # Initialize key values\n expected = '/pattoo/api/v1/agent/receive'\n\n # Test\n result = self.config.agent_api_uri()\n self.assertEqual(result, expected)", "title": "" }, { "docid": "33ecbd6aaab34ed8ecd2d32a13441346", "score": "0.65642715", "text": "def test_serve_api_version(self):\n pass", "title": "" }, { "docid": "bcf946fadd09a253642334940bf1d76b", "score": "0.65374196", "text": "def test_http(self):\n\t\tpass", "title": "" }, { "docid": "c83c56c17a49063c357a259e22f32cf3", "score": "0.6503448", "text": "def test_url(self):\n #NOTE: self.shortDescription() shuould work.\n print(\"(\"+self.test_url.__name__+\")\", self.test_url.__doc__, end=' ')\n with resources.app.test_request_context(self.url):\n rule = flask.request.url_rule\n view_point = resources.app.view_functions[rule.endpoint].view_class\n self.assertEqual(view_point, resources.Users)", "title": "" }, { "docid": "3d5e1e0790840f698fd2a5baabbf06a0", "score": "0.6499353", "text": "def test__get_url_generates_correct_endpoints(method, _id, expected_subpath, http_method):\n\n expected_url = f\"{settings.WAYSCRIPT_ORIGIN}/{expected_subpath}\"\n responses_method = getattr(responses, http_method)\n responses.add(responses_method, expected_url,\n json={}, status=200)\n\n client = utils.WayScriptClient()\n callable = getattr(client, method)\n callable(_id)\n\n assert len(responses.calls) == 1\n assert responses.calls[0].request.url == expected_url", "title": "" }, { "docid": "54242eea2a96ba014366bc34c9efe73a", "score": "0.64610046", "text": "def _s_url(self):\n return \"http://safrs-example.com/api/Test\"", "title": "" }, { "docid": "a836e5a86a7887bdcb2d59ed11f14f26", "score": "0.64570403", "text": "def test_api_route(self) -> None:\n response: Response = self.client.get(\"/\")\n response_json: dict = response.get_json()\n self.assertEqual(response.status_code, 200)\n self.assertEqual(response_json.get(\"self_link\"), \"/\")\n self.assertEqual(response_json.get(\"api_name\"), \"saints-xctf-api\")\n self.assertEqual(response_json.get(\"versions_link\"), \"/versions\")", "title": "" }, { "docid": "c4a5de8fa5e38bd969aa60fc468c33ea", "score": "0.64293736", "text": "def test_binds_url(self):\n self.service_configs[ServiceType.CLOUD_SERVERS]['url'] = 'myurl'\n eff = self._concrete(self.svcreq)\n next_eff = resolve_authenticate(eff)\n # URL in HTTP request is configured URL\n self.assertEqual(\n next_eff.intent,\n Request(method='GET', url='myurl/servers',\n headers=headers('token'), log=self.log))", "title": "" }, { "docid": "22b73272bd37c9eb795b0609486515d0", "score": "0.6404565", "text": "def test_web_api_ip_address(self):\n # Test\n result = self.config.web_api_ip_address()\n self.assertEqual(result, '127.0.0.12')", "title": "" }, { "docid": "22b73272bd37c9eb795b0609486515d0", "score": "0.6404565", "text": "def test_web_api_ip_address(self):\n # Test\n result = self.config.web_api_ip_address()\n self.assertEqual(result, '127.0.0.12')", "title": "" }, { "docid": "22b73272bd37c9eb795b0609486515d0", "score": "0.6404565", "text": "def test_web_api_ip_address(self):\n # Test\n result = self.config.web_api_ip_address()\n self.assertEqual(result, '127.0.0.12')", "title": "" }, { "docid": "22b73272bd37c9eb795b0609486515d0", "score": "0.6404565", "text": "def test_web_api_ip_address(self):\n # Test\n result = self.config.web_api_ip_address()\n self.assertEqual(result, '127.0.0.12')", "title": "" }, { "docid": "22b73272bd37c9eb795b0609486515d0", "score": "0.6404565", "text": "def test_web_api_ip_address(self):\n # Test\n result = self.config.web_api_ip_address()\n self.assertEqual(result, '127.0.0.12')", "title": "" }, { "docid": "7bbd972be1f535e116ee5ac24e7ffba8", "score": "0.6401712", "text": "def api_url(self):\n logger.debug('')\n return self.api.client_get_api_url()", "title": "" }, { "docid": "d224f5b5f5fbcc5be94022f0ff10a499", "score": "0.63324755", "text": "def test_module(client: Client) -> str:\n\n _save_urls_to_instance(client)\n return \"ok\"", "title": "" }, { "docid": "267dee275068ff80889f4c7337e18a2a", "score": "0.63259053", "text": "def get_url() -> str:\n return Discovery.get_setting('iscore_url') + '/api/v1/'", "title": "" }, { "docid": "baedd7ff0460c93c73ccfde6d81fc901", "score": "0.6323943", "text": "def test_base_create_url():\n assert SoElement._base_create_url() == \"{}/onap/so/infra/serviceInstantiation/{}/serviceInstances\".\\\n format(SoElement.base_url, SoElement.api_version)", "title": "" }, { "docid": "af2a3848b4406451eb6257b60c3e582a", "score": "0.62957036", "text": "def test_empty_url(self):\n api.APICommand.when.called_with('somename', {'url': ''}).should.throw(minion.core.components.exceptions.ImproperlyConfigured)", "title": "" }, { "docid": "86adb986872bb71ddabfb5c991ad4ecf", "score": "0.62944776", "text": "def test_public_endpoint_url(self):\n self.assertEqual(\n public_endpoint_url(fake_service_catalog, 'cloudServersOpenStack',\n 'DFW'),\n 'http://dfw.openstack/')", "title": "" }, { "docid": "2c36a9eacbc5d0648c06676b86eb58c0", "score": "0.62605697", "text": "def serverURI(): # @NoSelf", "title": "" }, { "docid": "3c9be9b9769cf85c847bc537ce309362", "score": "0.6252599", "text": "def get_api_url(self):\n return self.api_url", "title": "" }, { "docid": "8b34971a226c9a277629be7359964c38", "score": "0.62491536", "text": "def test_check_server(self):\n pass", "title": "" }, { "docid": "4601203386ec1f114e558fabdeab3bfa", "score": "0.6248772", "text": "def test_get_request_url(self):\n url = self.docker_cls.get_request_url()\n assert url == self.request_url", "title": "" }, { "docid": "9707fd11e4901d7a7a2579ff428af8cd", "score": "0.62360847", "text": "def test_request_absolute_url(self):\n client = cosm.Client(\"API_KEY\")\n client.request('GET', \"http://example.com\")\n self.session.assert_called_with('GET', \"http://example.com\")", "title": "" }, { "docid": "1353343ecf9d75ddda49ef0859871c7c", "score": "0.62292665", "text": "def test_url_case(self):\n url = GetRootUrl(u'http://ZetaCenTaUri.coM')\n self.assertEqual(url, 'zetacentauri.com')", "title": "" }, { "docid": "8b1b59cbfc0dab504e5f8f25b1c97d6f", "score": "0.6226686", "text": "def test_url(self):\n print(\"(\" + self.test_url.__name__ + \")\",\n self.test_url.__doc__, end=' ')\n with resources.APP.test_request_context(self.url):\n rule = flask.request.url_rule\n view_point = resources.APP.view_functions[rule.endpoint].view_class\n self.assertEqual(view_point, resources.Diagnoses)", "title": "" }, { "docid": "dc68a99cc8ac338c9988e14768dc36d8", "score": "0.6221457", "text": "def test_empty_url(self):\n api.JsonAPICommand.when.called_with('somename', {'url': ''}).should.throw(minion.core.components.exceptions.ImproperlyConfigured)", "title": "" }, { "docid": "a49e62473868ecd20286ce2eb78b33fd", "score": "0.61943865", "text": "def test_api_hello(self):\n\n response = self.client.get(self.prefix)\n\n self.assertEqual(response.status_code, 200)", "title": "" }, { "docid": "a2431710c153f7c3917b7dd954b8ac4f", "score": "0.6191882", "text": "def get_api_url(cls) -> str:\r\n\r\n return cls.__api_url", "title": "" }, { "docid": "78a2b0b1f37ad273b4dc64b608eeb166", "score": "0.61747676", "text": "def setUp(self):\n\n self.BASE_URL = 'https://fakerestapi.azurewebsites.net'", "title": "" }, { "docid": "518d2767652bb6de653b9962bde2210b", "score": "0.6168243", "text": "def mock_url(port):\n return 'http://{}:{}'.format(pytest.msgapi.ip, port)", "title": "" }, { "docid": "fb0ff56ce2e11ed18f48ff1f0461ca73", "score": "0.6142921", "text": "def _url(self):\n pass", "title": "" }, { "docid": "dba42b2f67404431c8125e47605bb40a", "score": "0.61368114", "text": "def _get_url(env, endpoint):\n return '{}/{}'.format(env['SERVER_URL'], endpoint)", "title": "" }, { "docid": "7aafb36402cce3b1ff4c096b6d9d91d8", "score": "0.6136373", "text": "def serverurl(self):\n return self.__serverurl", "title": "" }, { "docid": "7270dddacd516e6ba787acca5eda56b0", "score": "0.61353934", "text": "def api_endpoint(url):\n\n time.sleep(2)\n if urllib.parse.urlparse(url).scheme in ['http', 'https']:\n return url # url is already complete\n\n return urllib.parse.urljoin(f'{config.RESOURCE}/{config.API_VERSION}/',\n url.lstrip('/'))", "title": "" }, { "docid": "ce7997a2aa4cb6ae2e657a9d6b5a304f", "score": "0.6130997", "text": "def test_get_real_http():\n _valid_request()", "title": "" }, { "docid": "603217e8457a68e1d2a70cacbf73c13d", "score": "0.61218685", "text": "def _get_api_url(self, action):\n path = f'{self.API_NAME}{self.API_VERSION}{self.API_ENDPOINTS[action]}'\n if self.authorized_base_url is None:\n return urllib.parse.urljoin(self.API_DOMAIN, path)\n else:\n return urllib.parse.urljoin(self.authorized_base_url, path)", "title": "" }, { "docid": "8844acb7fe720b9aa392f188203ad648", "score": "0.6120527", "text": "def testRequestMainPage(self):\n response = self._get('/')\n self.assertEqual(response.status, 200)", "title": "" }, { "docid": "febb2566b897e3a23aed93101ddc822e", "score": "0.6094574", "text": "def test_video_url(self):\n with self.subTest('HOMEPAGE_VIDEO_URL starts with http'):\n config.HOMEPAGE_VIDEO_URL = 'http://www.somesite.com/lkfasondfo.notafile?'\n response = self.client.get(self.url)\n self.assertEqual(response.status_code, 200)\n self.assertEqual(response.context['video_url'], config.HOMEPAGE_VIDEO_URL)\n\n with self.subTest('HOMEPAGE_VIDEO_URL does not start with http'):\n config.HOMEPAGE_VIDEO_URL = 'www.somesite.com/somepage/file.mp4'\n response = self.client.get(self.url)\n self.assertEqual(response.status_code, 200)\n self.assertEqual(response.context['video_url'], None)", "title": "" }, { "docid": "9232ab2d5a39e75e7c889cfedf258558", "score": "0.60871625", "text": "def _url(self, *args):\n url = self._api_url.rstrip(\"/\")\n for suffix in args:\n url += \"/\" + suffix.strip(\"/\")\n LOGGER.debug(\"URL created: %s\", url)\n\n return url", "title": "" }, { "docid": "76fa7452ea7a0ee75b3afaa32521fc3c", "score": "0.6086087", "text": "def test_get_api_endpoints_utils(self):\n res = get_api_endpoints()\n for url, fn in res.items():\n self.assertTrue(fn.is_api_endpoint)\n self.assertTrue(url.startswith('/'))", "title": "" }, { "docid": "100a1d3518943b4268431d10b4af6f1f", "score": "0.6083629", "text": "async def get_web_endpoint(self) -> Optional[str]:", "title": "" }, { "docid": "9b36f95ba1eab1d60c368a370c4c4d53", "score": "0.6081619", "text": "def get_url(name):\n return 'http://testserver' + reverse(name)", "title": "" }, { "docid": "1c84ce2558ff81856bd27d7c4e5dc779", "score": "0.6078357", "text": "def test_url_getter(self):\n\n\t\tself.assertEqual(self.obj_url.url, \"www.amazon.com/sampleurl\")", "title": "" }, { "docid": "51e387ce8c7a9e770bcfd76ef6f62b66", "score": "0.60723186", "text": "def get_server_url():\n srv = testcase.require_service(\"cdb.uberserver.services.apache.Apache\")\n return srv.get_url(withuserandpass=False)", "title": "" }, { "docid": "530e8a3f80a8c98873213f1cfdbbda7b", "score": "0.60683846", "text": "def get_web_endpoint(self) -> Optional[str]:", "title": "" }, { "docid": "b9093ebbb1d8d7cc9beb4fae3cc4f17f", "score": "0.60665756", "text": "def getURL():", "title": "" }, { "docid": "35a3a8a9f7f05c6e54db371fdd232c74", "score": "0.6061042", "text": "def web_url(self):\n raise NotImplementedError", "title": "" }, { "docid": "17b4483ec9d8dd31fe4c6b2fadd749bd", "score": "0.6056207", "text": "def test_wrong_url(self):\n print(\"(\"+self.test_wrong_url.__name__+\")\", self.test_wrong_url.__doc__)\n resp = self.client.get(self.url_wrong)\n self.assertEqual(resp.status_code, 404)", "title": "" }, { "docid": "17b4483ec9d8dd31fe4c6b2fadd749bd", "score": "0.6056207", "text": "def test_wrong_url(self):\n print(\"(\"+self.test_wrong_url.__name__+\")\", self.test_wrong_url.__doc__)\n resp = self.client.get(self.url_wrong)\n self.assertEqual(resp.status_code, 404)", "title": "" }, { "docid": "6c7ae251f9d8d085eae5e2e185e17796", "score": "0.6050519", "text": "def test_url(self):\n _url = \"/medical_forum/api/diagnoses/dgs-1/\"\n print(\"(\" + self.test_url.__name__ + \")\", self.test_url.__doc__)\n with resources.APP.test_request_context(_url):\n rule = flask.request.url_rule\n view_point = resources.APP.view_functions[rule.endpoint].view_class\n self.assertEqual(view_point, resources.Diagnosis)", "title": "" }, { "docid": "429879118aa829358098a32d04ff7b0e", "score": "0.605012", "text": "def test_app_main(client):\n assert client.get(url_for('main')).status_code == 200", "title": "" }, { "docid": "481feabeea5caa75af6c2622af13177e", "score": "0.6048413", "text": "def test_base_url(self):\n\n # The base_url should return an URL without trailing slash.\n self.assertEqual(self.client.base_url, self.CANONICAL_URL)", "title": "" }, { "docid": "68d3412a3fa1b83fa20acfd86deb2484", "score": "0.60474896", "text": "def unittest():\n with app.test_request_context('/', method='GET'):\n assert request.path == '/'\n assert request.method == 'GET'", "title": "" }, { "docid": "5ee092490300269766f400d7c863a0a3", "score": "0.60433626", "text": "def test_no_url(self):\n api.APICommand.when.called_with('somename', {}).should.throw(minion.core.components.exceptions.ImproperlyConfigured)", "title": "" }, { "docid": "39afbf6905277f5be8bed27e45f2205a", "score": "0.60417527", "text": "def makeUrl(self):\n try:\n if self.options.use_ssl is True:\n self.url = \"https://%s:%s/api/aliveness-test/%s\" % (\n self.options.hostname,\n self.options.port,\n self.options.vhost,\n )\n else:\n self.url = \"http://%s:%s/api/aliveness-test/%s\" % (\n self.options.hostname,\n self.options.port,\n self.options.vhost,\n )\n return True\n except Exception as e:\n self.rabbit_error = 3\n self.rabbit_note = \"problem forming api url:\", e\n return False", "title": "" }, { "docid": "ca91d1ccf669f26fe9e9e2d7a532fe77", "score": "0.6034816", "text": "def url(self):\n raise NotImplementedError('Please define a url fixture')", "title": "" }, { "docid": "58fcb3f1021b4a9bc0b6a0361b494fa4", "score": "0.6031877", "text": "def test_get_main_page(self):\n response = self.client.get(self.url)\n self.assertEqual(response.status_code, 200)", "title": "" }, { "docid": "916388fa4383a09b8dd46306ad1333d6", "score": "0.6029722", "text": "def test_afirst(self):\n rv = self.app.get('/test')\n self.assertEqual(rv.status_code, 200)\n print('Routing Test : OK')", "title": "" }, { "docid": "bfaa9bd988607e4b55959cda6be5bd51", "score": "0.6028834", "text": "def test(request, api_client, setup, prefix):\n\n request.getfixturevalue(setup)\n\n response = api_client().get(f\"{prefix}anything/v2/test\")\n assert response.status_code == 200\n assert response.json()[\"path\"] == \"/anything/v2/test\"\n\n response = api_client().get(f\"{prefix}anything/v1/test\")\n assert response.status_code == 200\n assert response.json()[\"path\"] == \"/anything/v5/test\"", "title": "" }, { "docid": "f0a834667a3841ef46ac589f7ce0d7e6", "score": "0.6021136", "text": "def get_endpoint():\n settings = helpers.get_settings()\n return settings['endpoint_under_test']", "title": "" }, { "docid": "4219314b729b8df1cdc81bf435c53a00", "score": "0.602084", "text": "def test_url_with_subdomain(self):\n url = GetRootUrl(u'https://www.zetacentauri.com:80')\n self.assertEqual(url, 'www.zetacentauri.com')", "title": "" }, { "docid": "aec17f5f13858e235f80f2be6ed4893c", "score": "0.6019525", "text": "def test_set_url_to_get_url(self):\n page = interfaces.Page()\n page.set_url('/test')\n\n self.assertEqual('/test', page.get_url())", "title": "" }, { "docid": "883ba9506dc72d7154b0be4d5d8db69f", "score": "0.6017378", "text": "def api_endpoint(url):\n if urllib.parse.urlparse(url).scheme in ['http', 'https']:\n return url # url is already complete\n return urllib.parse.urljoin(f'{config.RESOURCE}/{config.API_VERSION}/',\n url.lstrip('/'))", "title": "" }, { "docid": "6e7e344acff0de996d17ccf590a63f93", "score": "0.60166925", "text": "def get_api_url (self, *args):\n return self.api_url.format(*args)", "title": "" } ]
6bf40afc0ad695debed41ec389d22c30
Creates matrix from carla transform.
[ { "docid": "379c79bcf651de3bc7d9d15fab44eb67", "score": "0.57375145", "text": "def get_matrix(transform):\n\n rotation = transform.rotation\n location = transform.location\n c_y = np.cos(np.radians(rotation.yaw))\n s_y = np.sin(np.radians(rotation.yaw))\n c_r = np.cos(np.radians(rotation.roll))\n s_r = np.sin(np.radians(rotation.roll))\n c_p = np.cos(np.radians(rotation.pitch))\n s_p = np.sin(np.radians(rotation.pitch))\n matrix = np.matrix(np.identity(4))\n matrix[0, 3] = location.x\n matrix[1, 3] = location.y\n matrix[2, 3] = location.z\n matrix[0, 0] = c_p * c_y\n matrix[0, 1] = c_y * s_p * s_r - s_y * c_r\n matrix[0, 2] = -c_y * s_p * c_r - s_y * s_r\n matrix[1, 0] = s_y * c_p\n matrix[1, 1] = s_y * s_p * s_r + c_y * c_r\n matrix[1, 2] = -s_y * s_p * c_r + c_y * s_r\n matrix[2, 0] = s_p\n matrix[2, 1] = -c_p * s_r\n matrix[2, 2] = c_p * c_r\n return matrix", "title": "" } ]
[ { "docid": "1c170c6319c966afb4509a550815648f", "score": "0.69329536", "text": "def makeMatrix(self, transformation):\n m = mathutils.Matrix()\n (m[0][0], m[0][1], m[0][2], m[0][3], m[1][0], m[1][1], m[1][2], m[1][3], \n m[2][0], m[2][1], m[2][2], m[2][3], m[3][0], m[3][1], m[3][2], m[3][3]) = transformation.toList(rowmajor=True)\n return m", "title": "" }, { "docid": "5ee7c73443d08e98bbb86c94a4418794", "score": "0.6797527", "text": "def transform(matrix):", "title": "" }, { "docid": "1dbaac1d8543207fd6f15ce73c3d2a9f", "score": "0.656694", "text": "def build_transform_matrix(self):\n self.transform_matrix = cp.asarray(\n np.multiply(self.local_basis.weights[None, None, :],\n np.exp(-1j * self.grid_phases.get())) / (self.J * self.length)\n )", "title": "" }, { "docid": "ecfcf70fac0fe4cde45fca677f138672", "score": "0.6323929", "text": "def build_transform_matrix(self):\n self.transform_matrix = cp.multiply(\n self.local_basis.device_weights[None, None, :],\n self.upper_grid_modes\n ) / self.J / cp.array(self.alpha)", "title": "" }, { "docid": "a007a26c17c587359a7b87df40f98da5", "score": "0.6215551", "text": "def translation_matrix(xyz: Sequence[float]) -> np.ndarray:\n matrix = np.eye(4)\n matrix[:-1, -1] = xyz\n\n return matrix", "title": "" }, { "docid": "c6743541da9e6e7c650188f906a36a9e", "score": "0.61711544", "text": "def get_matrix(data, vocab_processor):\n transform = vocab_processor.transform(data)\n return np.array(list(transform))", "title": "" }, { "docid": "ea78b8a6730e256a15cee2944afe8cfc", "score": "0.6151058", "text": "def get_transform_matrix(transform: carla.Transform):\n # original trans matrix in list\n _T = transform.get_matrix()\n\n # transform matrix from Actor system to world system\n trans_matrix = np.array([[_T[0][0], _T[0][1], _T[0][2]],\n [_T[1][0], _T[1][1], _T[1][2]],\n [_T[2][0], _T[2][1], _T[2][2]]])\n\n return trans_matrix", "title": "" }, { "docid": "928a88ededa567e6c3d2da36f242a0c2", "score": "0.60776156", "text": "def tomatrix(self):\r\n A = self.mpos[0].tomatrix()\r\n for mpo in self.mpos[1:]:\r\n A = A @ mpo.tomatrix()\r\n return A", "title": "" }, { "docid": "ab5cc9b76d7d5446fd6c41308cc9912d", "score": "0.6064097", "text": "def make_correlation_matrix(l_param: np.ndarray) -> np.ndarray:\n chol = make_cholesky_factor(l_param)\n return chol @ chol.T", "title": "" }, { "docid": "ff63ec59096bddd149f3a5639ab56e71", "score": "0.6039227", "text": "def vtk_to_mat(transform):\n tf_matrix = transform.GetMatrix()\n out = np.array(np.eye(4))\n for r in range(4):\n for c in range(4):\n out[r, c] = tf_matrix.GetElement(r, c)\n return out", "title": "" }, { "docid": "0cefa898d7370bc5691d2c223d5ba89d", "score": "0.6014213", "text": "def to_matrix_vector(transform):\r\n\r\n ndimin = transform.shape[0] - 1\r\n ndimout = transform.shape[1] - 1\r\n matrix = transform[0:ndimin, 0:ndimout]\r\n vector = transform[0:ndimin, ndimout]\r\n return matrix, vector", "title": "" }, { "docid": "098ad914f2b9a2c6afc4f5ce960c2dc8", "score": "0.6003483", "text": "def FromMatrix3x3(self, , , ):\n ...", "title": "" }, { "docid": "098ad914f2b9a2c6afc4f5ce960c2dc8", "score": "0.6003483", "text": "def FromMatrix3x3(self, , , ):\n ...", "title": "" }, { "docid": "b516386b604000d4f1e641c1417c0384", "score": "0.60024107", "text": "def matrixform(self):\n\n return (np.array([[self.v[0], -self.v[1]], [self.v[1], self.v[0]]]), self.v[2:4])", "title": "" }, { "docid": "b516386b604000d4f1e641c1417c0384", "score": "0.60024107", "text": "def matrixform(self):\n\n return (np.array([[self.v[0], -self.v[1]], [self.v[1], self.v[0]]]), self.v[2:4])", "title": "" }, { "docid": "799d34f0b705827abb5be6d534919a21", "score": "0.6001543", "text": "def traspuesta_matrices(x):\n\tresultado = x.T\n\treturn resultado", "title": "" }, { "docid": "a1eeffb8d1d970f9dca5b3d649202b4f", "score": "0.599124", "text": "def transform_to_matrices(transform):\n # split the transform string in to components of:\n # (operation, args) i.e. (translate, '-1.0, 2.0')\n components = [\n [j.strip() for j in i.strip().split('(') if len(j) > 0]\n for i in transform.lower().split(')') if len(i) > 0]\n # store each matrix without dotting\n matrices = []\n for line in components:\n if len(line) == 0:\n continue\n elif len(line) != 2:\n raise ValueError('should always have two components!')\n key, args = line\n # convert string args to array of floats\n # support either comma or space delimiter\n values = np.array([float(i) for i in\n args.replace(',', ' ').split()])\n if key == 'translate':\n # convert translation to a (3, 3) homogeneous matrix\n matrices.append(np.eye(3))\n matrices[-1][:2, 2] = values\n elif key == 'matrix':\n # [a b c d e f] ->\n # [[a c e],\n # [b d f],\n # [0 0 1]]\n matrices.append(np.vstack((\n values.reshape((3, 2)).T, [0, 0, 1])))\n elif key == 'rotate':\n # SVG rotations are in degrees\n angle = np.degrees(values[0])\n # if there are three values rotate around point\n if len(values) == 3:\n point = values[1:]\n else:\n point = None\n matrices.append(planar_matrix(theta=angle,\n point=point))\n elif key == 'scale':\n # supports (x_scale, y_scale) or (scale)\n mat = np.eye(3)\n mat[:2, :2] *= values\n matrices.append(mat)\n else:\n log.warning('unknown SVG transform: {}'.format(key))\n\n return matrices", "title": "" }, { "docid": "4624da8690529391bc23c7c8e9c7f8de", "score": "0.5979868", "text": "def transform(self):\n return core.Matrix2D()", "title": "" }, { "docid": "34405f73e40b0385ee9f6e400d8c835e", "score": "0.5963307", "text": "def to_matrix(self):\n return self.rotation.to_matrix(*self.position.x_y_z)", "title": "" }, { "docid": "54bf1683eaec6cb92ef2a6da0228187d", "score": "0.59465396", "text": "def to_matrix(self):\n return self.to_X()", "title": "" }, { "docid": "4097702299dbc7385abd5fdb3e25747e", "score": "0.5919039", "text": "def to_matrix_vector(transform):\n \n ndimin = transform.shape[0] - 1\n ndimout = transform.shape[1] - 1\n matrix = transform[0:ndimin, 0:ndimout]\n vector = transform[0:ndimin, ndimout]\n return matrix, vector", "title": "" }, { "docid": "e67cb7f40711289d3435a51159b2ae8f", "score": "0.58358765", "text": "def createTransformationMatrix(Ma,Mb,Vab,rel='a'):\n if rel!='a' and rel!='b': return None\n a1,a2,a3 = Ma\n b1,b2,b3 = Mb\n # Rotation matrix\n R = np.identity(4,np.float)\n R[0,0:3] = [np.dot(b1,a1), np.dot(b1,a2), np.dot(b1,a3)]\n R[1,0:3] = [np.dot(b2,a1), np.dot(b2,a2), np.dot(b2,a3)]\n R[2,0:3] = [np.dot(b3,a1), np.dot(b3,a2), np.dot(b3,a3)] \n # Transformation matrix\n if rel=='b':\n Vab = np.append(Vab,1.0)\n Vab = np.dot(R.T,Vab)[0:3]\n T = np.identity(4,np.float) \n T[0:3,3] = -Vab \n # Transformation matrix\n return np.dot(R,T)", "title": "" }, { "docid": "64d6c1ea2dd158edbad5818b9112330d", "score": "0.583149", "text": "def _matrix(a=1.0, b=0.0, c=0.0, d=1.0, m=0.0, n=0.0, p=0.0, q=0.0, s=1.0):\n return [\n [a, b, p],\n [c, d, q],\n [m, n, s]]", "title": "" }, { "docid": "7d4492cba777503140e4111211825834", "score": "0.58223677", "text": "def to_matrix(self):\n return numpy.array([[0, 1],\n [1, 0]], dtype=complex)", "title": "" }, { "docid": "c75d4074c9ea45d2be20c17c8bab324a", "score": "0.581047", "text": "def matrix(self):\n row_1 = [self.a, -self.b, -self.c, -self.d]\n row_2 = [self.b, self.a, -self.d, self.c]\n row_3 = [self.c, self.a, self.d, -self.b]\n row_4 = [self.d, -self.c, self.b, self.a]\n matrix = [row_1, row_2, row_3, row_4]\n return matrix", "title": "" }, { "docid": "67a3ce8a0635b98440a4d2a46e6eab19", "score": "0.58054227", "text": "def make_projection_matrix(X):\n\n return np.eye(X.shape[0]) - np.einsum('ij,jk', X, olsmatrix_ulen(X))", "title": "" }, { "docid": "bca4ceed2104f2a6d7a418e66631e0ea", "score": "0.5805115", "text": "def get_matrix(transform):\n\n rotation = transform.rotation\n location = transform.location\n c_y = np.cos(np.radians(rotation.yaw))\n s_y = np.sin(np.radians(rotation.yaw))\n c_r = np.cos(np.radians(rotation.roll))\n s_r = np.sin(np.radians(rotation.roll))\n c_p = np.cos(np.radians(rotation.pitch))\n s_p = np.sin(np.radians(rotation.pitch))\n matrix = np.matrix(np.identity(4))\n matrix[0, 3] = location.x\n matrix[1, 3] = location.y\n matrix[2, 3] = location.z\n matrix[0, 0] = c_p * c_y\n matrix[0, 1] = c_y * s_p * s_r - s_y * c_r\n matrix[0, 2] = -c_y * s_p * c_r - s_y * s_r\n matrix[1, 0] = s_y * c_p\n matrix[1, 1] = s_y * s_p * s_r + c_y * c_r\n matrix[1, 2] = -s_y * s_p * c_r + c_y * s_r\n matrix[2, 0] = s_p\n matrix[2, 1] = -c_p * s_r\n matrix[2, 2] = c_p * c_r\n return matrix", "title": "" }, { "docid": "1e3b316276017d2ea0e09d4bebf77e15", "score": "0.5783155", "text": "def to_matrix(self):\n cos = math.cos(self.params[0] / 2)\n sin = math.sin(self.params[0] / 2)\n return numpy.array([[cos, -1j * sin],\n [-1j * sin, cos]], dtype=complex)", "title": "" }, { "docid": "0aabbf3c377b763fee1d12e056b9079f", "score": "0.5769485", "text": "def get_mueller_matrix(self, *args, **kwargs):\n\n m = [[self.tx_2 ** 2 + self.tx_1 ** 2, self.tx_1 ** 2 - self.tx_2 ** 2, 0, 0],\n [self.tx_1 ** 2 - self.tx_2 ** 2, self.tx_2 ** 2 + self.tx_1 ** 2, 0, 0],\n [0, 0, 2 * self.tx_2 * self.tx_1, 0],\n [0, 0, 0, 2 * self.tx_2 * self.tx_1]]\n\n return self.orient(1 / 2 * xr.DataArray(m, dims=('mueller_v', 'mueller_h'), ))", "title": "" }, { "docid": "01e44c63695c65a322c630523e9b03c9", "score": "0.576441", "text": "def carma_matrix_car1(t,alpha,sig):\n\t\n\tn=t.size\n\tcarma_mat=np.zeros((n,n))\n\tfor k in range(n):\n\t\tcarma_mat[k,k]=1.\n\t\tfor l in range(k):\n\t\t\tcarma_mat[k,l]=np.exp(-alpha*(t[k]-t[l]))\n\t\t\tcarma_mat[l,k]=carma_mat[k,l]\n\treturn sig**2/2./alpha*carma_mat", "title": "" }, { "docid": "73a66e1e599216c2fdc65ae452d405bf", "score": "0.5754417", "text": "def transform(self):\n return core.Matrix3D()", "title": "" }, { "docid": "73a66e1e599216c2fdc65ae452d405bf", "score": "0.575362", "text": "def transform(self):\n return core.Matrix3D()", "title": "" }, { "docid": "73a66e1e599216c2fdc65ae452d405bf", "score": "0.5753137", "text": "def transform(self):\n return core.Matrix3D()", "title": "" }, { "docid": "73a66e1e599216c2fdc65ae452d405bf", "score": "0.5753137", "text": "def transform(self):\n return core.Matrix3D()", "title": "" }, { "docid": "73a66e1e599216c2fdc65ae452d405bf", "score": "0.57530826", "text": "def transform(self):\n return core.Matrix3D()", "title": "" }, { "docid": "73a66e1e599216c2fdc65ae452d405bf", "score": "0.57514036", "text": "def transform(self):\n return core.Matrix3D()", "title": "" }, { "docid": "73a66e1e599216c2fdc65ae452d405bf", "score": "0.57514036", "text": "def transform(self):\n return core.Matrix3D()", "title": "" }, { "docid": "8aa5094374095fa216152567fe29939e", "score": "0.5744702", "text": "def translation_vector_to_matrix(t): # t = 3d point\n return np.array([\n [1,0,0,t[0]],\n [0,1,0,t[1]],\n [0,0,1,t[2]],\n [0,0,0,1]\n ])", "title": "" }, { "docid": "019a99335a6e77d4627439825f5c65ae", "score": "0.57443327", "text": "def tomatrix(self):\r\n D = 1 # Total physical dimension so far\r\n out = np.array([[[1.0]]])\r\n for A in self._data:\r\n a, i, j, b = A.shape\r\n out = np.einsum(\"lma,aijb->limjb\", out, A)\r\n D *= i\r\n out = out.reshape(D, D, b)\r\n return out[:, :, 0]", "title": "" }, { "docid": "7ce0d2c0b5cfbe10da7414fc4e630d7a", "score": "0.57145333", "text": "def screw_matrix_from_transform_log(transform_log):\n transform_log = check_transform_log(transform_log)\n\n omega = np.array([\n transform_log[2, 1], transform_log[0, 2], transform_log[1, 0]])\n theta = np.linalg.norm(omega)\n if abs(theta) < np.finfo(float).eps:\n theta = np.linalg.norm(transform_log[:3, 3])\n if abs(theta) < np.finfo(float).eps:\n return np.zeros((4, 4)), 0.0\n return transform_log / theta, theta", "title": "" }, { "docid": "3c1989f58ea02f00a43c953c56e87d36", "score": "0.570659", "text": "def _build_matrix(self):\n self._matrix = Matrix()\n self._matrix.scale(self._scale.x,self._scale.y)\n self._matrix.rotate(self._rotate.angle)\n self._matrix.translate(self._trans.x,self._trans.y)\n self._invrse = Matrix()\n self._invrse.translate(-self._trans.x,-self._trans.y)\n self._invrse.rotate(-self._rotate.angle)\n self._invrse.scale(1.0/self._scale.x,1.0/self._scale.y)\n self._mtrue = True", "title": "" }, { "docid": "48c0c702abc73fdf26ac0e200925e0d2", "score": "0.5696928", "text": "def transform(self, matrix):\n # the transformation matrix does not need to be a rotation matrix,\n # so the unit-distance is not guaranteed. For speed, we check if the\n # matrix is in O(3) and preserves lengths.\n if np.all(is_O3(matrix)): # remain in unit-rep\n xyz = erfa_ufunc.s2c(self.lon, self.lat)\n p = erfa_ufunc.rxp(matrix, xyz)\n lon, lat = erfa_ufunc.c2s(p)\n rep = self.__class__(lon=lon, lat=lat)\n # handle differentials\n new_diffs = {\n k: d.transform(matrix, self, rep) for k, d in self.differentials.items()\n }\n rep = rep.with_differentials(new_diffs)\n\n else: # switch to dimensional representation\n rep = self._dimensional_representation(\n lon=self.lon, lat=self.lat, distance=1, differentials=self.differentials\n ).transform(matrix)\n\n return rep", "title": "" }, { "docid": "d9cf401b31c7679480fd47e3c683c867", "score": "0.5693905", "text": "def transform(self):\n if self._scene is None:\n util.log.warning(\n 'camera has no scene! returning identity')\n return np.eye(4)\n matrix = self._scene.graph[self.name][0]\n return matrix", "title": "" }, { "docid": "36e3169bc98f8a654d13d4997ec29656", "score": "0.568902", "text": "def _transformation(self, X: np.ndarray) -> np.ndarray:", "title": "" }, { "docid": "1837ef1447de86e3ae54d4f125bdd7e4", "score": "0.56873786", "text": "def make_mat(self):\n info_target = get_targets_info(self.pcid, self.cid, self.datamonth, src=\"LOCAL\")\n targets = select_targets(info_target, store=self.save)\n\n urls = get_urls(self.pcid, self.cid, self.datamonth, src=\"LOCAL\")\n _ = map_model_urls(urls, self.save)\n\n info_submarket = get_submarket_info(self.pcid, self.cid, self.datamonth, src=\"LOCAL\")\n _, sm_hot, = select_submarkets(info_submarket, store=self.save)\n\n info = reconstruct_info(info_target, info_submarket, targets, sm_hot)\n mat, map_line = embedding(info, targets, self.save)\n pca_mat, _ = pca(mat, self.save)\n\n # mat, map_line = embedding(info, targets, self.save, normal=\"NORMAL\")\n # pca_mat = mat\n\n return pca_mat, map_line, info, sm_hot", "title": "" }, { "docid": "dde7e6d246e6594c590088b1e045ac61", "score": "0.56671387", "text": "def _to_matrix(self, grid):\n\t\tself.output_matrix.first_matrix = []\n\t\tfor letter in self._rows:\n\t\t\tself.output_matrix.first_matrix.append(\n\t\t\t\t[\n\t\t\t\t\tint(grid[field]) for field in self._squares\n\t\t\t\t\t\t\t\t\t\tif field[0] == letter\n\t\t\t\t]\n\t\t\t)\n\t\treturn self.output_matrix", "title": "" }, { "docid": "d837b2ab5bc041bf0437b0029fd7e408", "score": "0.56671035", "text": "def trafo_matrix_seq_to_car(odometry: np.ndarray) -> np.ndarray:\n x_car = odometry[\"x_seq\"]\n y_car = odometry[\"y_seq\"]\n yaw_car = odometry[\"yaw_seq\"]\n c = np.cos(yaw_car)\n s = np.sin(yaw_car)\n return np.array([[c, s, -x_car * c - y_car * s],\n [-s, c, x_car * s - y_car * c],\n [0, 0, 1]])", "title": "" }, { "docid": "0094de6e6a55a7d6579deeb3f8a72c53", "score": "0.56446785", "text": "def X_mat(a):\n return np.array([\n [1,0,0],\n [0,cos(a),-sin(a)],\n [0,sin(a),cos(a)],\n ])", "title": "" }, { "docid": "9a2ed4dd96ec80271deed917baeeaf5f", "score": "0.5622995", "text": "def rotation_matrix(angle):\n\n angle2 = 2 * radians(angle)\n rot_mat = np.array([[1, 0, 0, 0],\n [0, np.cos(angle2), np.sin(angle2), 0],\n [0, -np.sin(angle2), np.cos(angle2), 0],\n [0, 0, 0, 1]])\n return xr.DataArray(rot_mat, dims=('mueller_v', 'mueller_h'), )", "title": "" }, { "docid": "cb8fc46fc2a4421024144cb6c5e4e47b", "score": "0.56200826", "text": "def ToMatrix3x3(self, *args, **kwargs):\n ...", "title": "" }, { "docid": "cb8fc46fc2a4421024144cb6c5e4e47b", "score": "0.56200826", "text": "def ToMatrix3x3(self, *args, **kwargs):\n ...", "title": "" }, { "docid": "656dd532d127543bdfcdf5e5f5acbefb", "score": "0.5605673", "text": "def generate_matrices(sparse_tokens):\n return construct_matrices(sparse_tokens)", "title": "" }, { "docid": "dc8d49d13649b6d549a2f5dc2433649e", "score": "0.5596424", "text": "def get_mueller_matrix(self, *args, **kwargs):\n\n delay = self.get_delay(*args, **kwargs)\n\n m1 = xr.ones_like(delay)\n m0 = xr.zeros_like(delay)\n cc = self.contrast_inst * np.cos(delay)\n cs = self.contrast_inst * np.sin(delay)\n\n m = [[m1, m0, m0, m0],\n [m0, m1, m0, m0],\n [m0, m0, cc, cs],\n [m0, m0, -cs, cc]]\n\n return self.orient(xr.combine_nested(m, concat_dim=('mueller_v', 'mueller_h', ), ))", "title": "" }, { "docid": "cc4df71b2a1e4b9e2ff6967847520df2", "score": "0.5593946", "text": "def matrix_2_abc(Lattice):\n\n a = np.sqrt(Lattice[0,0]**2+Lattice[0,1]**2+Lattice[0,2]**2)\n b = np.sqrt(Lattice[1,0]**2+Lattice[1,1]**2+Lattice[1,2]**2)\n c = np.sqrt(Lattice[2,0]**2+Lattice[2,1]**2+Lattice[2,2]**2)\n\n a_vec = Lattice[0,:]\n b_vec = Lattice[1,:]\n c_vec = Lattice[2,:]\n\n return a,b,c,a_vec,b_vec,c_vec", "title": "" }, { "docid": "8586fcf8d73194334cf86647e6926800", "score": "0.55936354", "text": "def __init__(self, function = None):\n if function is None:\n self.mat = np.identity(3)\n else: \n self.mat = parse_transform(function)", "title": "" }, { "docid": "cf818a7abbc657f39bb13e526879ef2c", "score": "0.5588401", "text": "def transformation(self, x):\n return x.transpose(0, 1, 2).reshape(x.shape[0], -1)", "title": "" }, { "docid": "34d999bafd6582a3fe2affc7b5f4a60f", "score": "0.55840534", "text": "def asmatrix(self):\n h_matrix = np.array([1])\n for i in range(self.ndim):\n if self.mode == 'circ':\n h_i = Convmtx([self.k[i]], self.h_list[i], mode=self.mode)\n else:\n h_i = Convmtx([self.n[i]], self.h_list[i], mode=self.mode)\n h_matrix = sp.sparse.kron(h_matrix, h_i)\n return h_matrix", "title": "" }, { "docid": "732f2aee50d93b3297c7a793f7489030", "score": "0.5576046", "text": "def cofactor_matrix(self):\r\n\t\tC = Matrix(self.dimensions)\r\n\t\tfor i in range(self.rows):\r\n\t\t\tfor j in range(self.columns):\r\n\t\t\t\tC[i,j] = self.cofactor(i, j)\r\n\t\treturn C", "title": "" }, { "docid": "0545a948a05eea439066a4f839b58025", "score": "0.5573038", "text": "def asMatrixTransform(self, *args):\n return _osg.AutoTransformRef_asMatrixTransform(self, *args)", "title": "" }, { "docid": "b97dd42b1f6afbd664074f758f56e82c", "score": "0.55666524", "text": "def translation_from_transformation_matrix(matrix):\n x = matrix[0][3]\n y = matrix[1][3]\n z = matrix[2][3]\n return (x, y, z)", "title": "" }, { "docid": "f387d8e1c986980e9a26228c0d6dcd43", "score": "0.55613256", "text": "def tomatrix(x):\n if np.size(np.shape(x)) == 1:\n x.shape = (x.shape[0], 1)", "title": "" }, { "docid": "3d50d3a26df12a08338ff5ec33c96cbe", "score": "0.55552304", "text": "def asMatrixTransform(self, *args):\n return _osg.CameraRef_asMatrixTransform(self, *args)", "title": "" }, { "docid": "59355e887fe4ed5d16ce0cbac67e1094", "score": "0.55445886", "text": "def asMatrixTransform(self, *args):\n return _osg.Transform_asMatrixTransform(self, *args)", "title": "" }, { "docid": "fc6170be26d76bae402f51ab6bcac080", "score": "0.55411786", "text": "def rotation_matrix(phi, theta, psi):\n \"\"\"a11 = np.cos(psi) * np.cos(phi) - np.cos(theta) * np.sin(phi) * np.sin(psi)\n a12 = np.cos(psi) * np.sin(phi) + np.cos(theta) * np.cos(phi) * np.sin(psi)\n a13 = np.sin(psi) * np.sin(theta)\n a21 = -np.sin(psi) * np.cos(phi) - np.cos(theta) * np.sin(phi) * np.cos(psi)\n a22 = -np.sin(psi) * np.sin(phi) + np.cos(theta) * np.cos(phi) * np.cos(psi) \n a23 = np.cos(psi) * np.sin(theta)\n a31 = np.sin(theta) * np.sin(phi)\n a32 = -np.sin(theta) * np.cos(phi)\n a33 = np.cos(theta) \n \"\"\"\n \n cp = np.cos(phi)\n sp = np.sin(phi)\n ct = np.cos(theta)\n st = np.sin(theta)\n cps = np.cos(psi)\n sps = np.sin(psi)\n \n a11 = cp * ct * cps - sp * sps\n a12 = sp * ct * cps + cp * sps\n a13 = -st * cps\n a21 = -cp * ct * sps - sp * cps\n a22 = -sp * ct * sps + cp * cps \n a23 = st * sps\n a31 = cp * st\n a32 = sp * st\n a33 = ct\n # print np.array([a11, a12, a13, a21, a22, a23, a31, a32, a33]).reshape((3, 3)).T.dot([1,0,0]) \n # print np.array([a11, a12, a13, a21, a22, a23, a31, a32, a33]).reshape((3, 3)).T.dot([0,1,0])\n # print np.array([a11, a12, a13, a21, a22, a23, a31, a32, a33]).reshape((3, 3)).T.dot([0,0,1])\n return np.array([a11, a12, a13, a21, a22, a23, a31, a32, a33]).reshape((3, 3)).T", "title": "" }, { "docid": "54896a2f67ddbebc3ca87b9ace88f157", "score": "0.5535023", "text": "def create_matrix() -> coo_matrix:\n # construct as a list of list matrix because it's way easier\n M = lil_matrix((len(user_ids), len(track_ids)))\n\n for _, row in df.iterrows():\n user_ind = user_indices[row[\"user_id\"]]\n track_ind = track_indices[row[\"song_id\"]]\n\n M[user_ind, track_ind] = int(row[\"play_count\"])\n return M.tocoo() # save as coordinate matrix", "title": "" }, { "docid": "ac5f6d3f289d9729152b6e2144a4c3f2", "score": "0.5533976", "text": "def Ctranspose(node):\n\n # unknown input\n if not node.num:\n return \"arma::trans(\", \"\", \")\"\n\n \"\"\"\n # colvec -> rowvec\n if node[0].dim == 1:\n node.dim = 2\n\n # rowvec -> colvec\n elif node[0].dim == 2:\n node.dim = 1\n \"\"\"\n\n return \"arma::trans(\", \"\", \")\"", "title": "" }, { "docid": "005b0755b71d0f6c98fdf51e3d307d43", "score": "0.55302596", "text": "def cramerv(dataset: DataFrame, cols: List[str]) -> np.matrix:\n matr = np.matrix([[0] * len(cols)] * len(cols), dtype='float')\n for i in range(len(cols)):\n for j in range(i, len(cols)):\n if i == j:\n matr[i, j] = 1\n else:\n corr, pvalue = _cramerv(dataset[cols[i]], dataset[cols[j]])\n matr[i, j] = corr\n matr[j, i] = corr\n return matr", "title": "" }, { "docid": "26d72e442541141837c1a8074be7fa05", "score": "0.55250275", "text": "def create_transformation_matrix(theta, z, sx, sy, dx, dy):\n\n \"\"\"\n Create our transformation matrices\n\n Rotation (no rotation: theta = 0):\n *Note: theta given in degrees, converted to radians\n [cos(theta), -sin(theta), 0]\n [sin(theta), cos(theta), 0]\n [0 , 0 , 1]\n\n Shearing (no shearing: z = 0):\n [1 , z , 0]\n [0 , 1 , 0]\n [0 , 0 , 1]\n\n Scaling (no scaling: sx = sx = 1):\n [sx , 0 , 0]\n [0 , sy , 0]\n [0 , 0 , 1]\n\n Translation (no translating: dx = dy = 0):\n [1 , 0 , dx]\n [0 , 1 , dy]\n [0 , 0 , 1]\n\n \"\"\"\n theta = theta * np.pi / 180.#Degrees -> Radians\n rotation = np.float32([[np.cos(theta), np.sin(theta), 0], [-np.sin(theta), np.cos(theta), 0], [0,0,1]])\n shearing = np.float32([[1,z,0],[0,1,0],[0,0,1]])\n scaling = np.float32([[sx,0,0],[0,sy,0],[0,0,1]])\n translation = np.float32([[1,0,dx],[0,1,dy],[0,0,1]])\n\n \"\"\"\n Create our final transformation matrix by chaining these together in a dot product of the form:\n res = top_left_to_center * translation * scaling * shearing * rotation * center_to_top_left.\n This is because if we want to apply them in our order, we would normally get the inverse of the dot product\n (translation * scaling * shearing * rotation)^-1 ,\n But since inverses are computationally expensive we instead just flip the order, as that is equivalent in this scenario.\n We do not flip the order of the normalization transformations, however.\n \"\"\"\n transformation = top_left_to_center.dot(translation).dot(scaling).dot(shearing).dot(rotation).dot(center_to_top_left)\n return transformation", "title": "" }, { "docid": "bf8e6c8ac0c11bd13f923faab5dceca0", "score": "0.5524306", "text": "def _from_matrix(cls, data, coxeter_type, index_set, coxeter_type_check):\n # Check that the data is valid\n check_coxeter_matrix(data)\n\n M = matrix(data)\n n = M.ncols()\n\n base_ring = M.base_ring()\n\n if not coxeter_type:\n if n == 1:\n coxeter_type = CoxeterType(['A', 1])\n elif coxeter_type_check:\n coxeter_type = recognize_coxeter_type_from_matrix(M, index_set)\n else:\n coxeter_type = None\n\n raw_data = M.list()\n\n mat = typecall(cls, MatrixSpace(base_ring, n, sparse=False), raw_data,\n coxeter_type, index_set)\n mat._subdivisions = M._subdivisions\n\n return mat", "title": "" }, { "docid": "ed8d1dee8d033cdb3ab55d07211bd8e5", "score": "0.55038756", "text": "def transl(x, y, z):\n transform = np.eye(4)\n transform[0][3] = x\n transform[1][3] = y\n transform[2][3] = z\n\n return transform", "title": "" }, { "docid": "5387c513db59f4cb59418cccaa1fa9ca", "score": "0.5493943", "text": "def matrix_transform(coords, matrix):\n return ProjectiveTransform(matrix)(coords)", "title": "" }, { "docid": "cae35cf1a461ecd48e105c86b8df15a8", "score": "0.54878247", "text": "def loadTransformationMatrix(grid):\n matArray = grid.GetFieldData().GetArray(\"TransformationMatrix\")\n if matArray:\n tf = vtk.vtkTransform()\n mat = vtk.vtkMatrix4x4()\n for i in range(0, 16):\n val = matArray.GetTuple1(i)\n mat.SetElement(int(i / 4), i % 4, val)\n tf.SetMatrix(mat)\n return tf\n\n raise IOError(\"No 'TransformationMatrix' array found in field data.\")", "title": "" }, { "docid": "6dbf1b910cfae201a654a5a45b1ca754", "score": "0.54829705", "text": "def xyz_to_mat(foci, xyz_dims=None, mat_dims=None):\n\tfoci = np.hstack((foci, np.ones((foci.shape[0], 1))))\n\tmat = np.array([[-0.5, 0, 0, 45], [0, 0.5, 0, 63], [0, 0, 0.5, 36]]).T\n\tresult = np.dot(foci, mat)[:,::-1] # multiply and reverse column order\n\treturn np.round_(result).astype(int) # need to round indices to ints", "title": "" }, { "docid": "20aa106f53e1ab323500dbba308db223", "score": "0.5482827", "text": "def transform(self, X):\n check_is_fitted(self, \"n_grids_\")\n X = check_array(X, accept_sparse=True)\n n_samples, n_features = X.shape\n\n if np.max(X) > 1:\n raise ValueError(\"The maximum value of X is bigger than 1.\")\n\n if np.min(X) < 0:\n raise ValueError(\"The minimum value of X is lower than 1.\")\n\n if self.n_components != self.n_grids_*n_features:\n raise ValueError(\"X.shape[1] is different from X_train.shape[1].\")\n dataset = get_dataset(X, order='c')\n data = np.zeros(X.size*2)\n row = np.zeros(X.size*2, dtype=np.int32)\n col = np.zeros(X.size*2, dtype=np.int32)\n\n make_sparse_mb(data, row, col, dataset, self.n_grids_)\n return csc_matrix((data, (row, col)),\n shape=(n_samples, self.n_components))", "title": "" }, { "docid": "0931a40a24b123ea08d2d1dae2e8b7fb", "score": "0.54826295", "text": "def matrix_formation(self):\n mat = array([self.unknowns])\n return mat", "title": "" }, { "docid": "ac9cfe77d81a0edc8f6f8ff11785be19", "score": "0.5475008", "text": "def transform(self, matrix):\n xyz = erfa_ufunc.s2c(self.lon, self.lat)\n p = erfa_ufunc.rxp(matrix, xyz)\n lon, lat, ur = erfa_ufunc.p2s(p)\n rep = self.__class__(lon=lon, lat=lat, distance=self.distance * ur)\n\n # handle differentials\n new_diffs = {\n k: d.transform(matrix, self, rep) for k, d in self.differentials.items()\n }\n return rep.with_differentials(new_diffs)", "title": "" }, { "docid": "79708ae12a1e410dbab5254800ada7b9", "score": "0.54740006", "text": "def Matrice_rotazione_asse_x(angolo):\r\n R_1 = [1.0, 0.0, 0.0] # Valori della prima riga\r\n R_2 = [0.0, np.cos(angolo), -np.sin(angolo)] # Valori della seconda riga\r\n R_3 = [0.0, np.sin(angolo), np.cos(angolo)] # Valori della terza riga.\r\n Matrix = np.array([R_1, R_2, R_3])\r\n return Matrix", "title": "" }, { "docid": "97e439f89994c7e44e0718c9fabcdc94", "score": "0.5473735", "text": "def asMatrixTransform(self, *args):\n return _osg.MatrixTransform_asMatrixTransform(self, *args)", "title": "" }, { "docid": "a12bbcb4db41637d1fa1bdc922523bcf", "score": "0.54709387", "text": "def test_to_matrix():\n results = table.to_numpy()\n assert(type(results) == ndarray)", "title": "" }, { "docid": "f2fdc585cee25183b77919bac23e5ca1", "score": "0.54542154", "text": "def coxeter_matrix(self):\n return self", "title": "" }, { "docid": "7f4b0cc95f1ac258a09b91bd9ae4cf45", "score": "0.5444027", "text": "def create_correlation_matrix(feature_values, corr_type):\n \n # Extracting values from Pandas DataFrame\n stripped_data = feature_values.values\n number_of_features = len(feature_values.columns)\n\n # Initializing the correlation matrix\n corr_mx = np.zeros((number_of_features, number_of_features))\n \n # Filling it depending on what metric we want to use\n if(corr_type == \"la\"):\n for i in range(number_of_features):\n norm_i = la.norm(stripped_data[:,i])\n for j in range(number_of_features):\n norm_j = la.norm(stripped_data[:,j])\n corr_mx[i][j] = np.dot(stripped_data[:,i], stripped_data[:,j])\n corr_mx[i][j] = corr_mx[i][j] / (norm_i * norm_j)\n elif(corr_type == \"pearson\"):\n corr_mx = np.corrcoef(stripped_data.T)\n\n return corr_mx", "title": "" }, { "docid": "8e2e3211ce98e42c445067b007928c98", "score": "0.5441604", "text": "def asMatrixTransform(self, *args):\n return _osg.TransformRef_asMatrixTransform(self, *args)", "title": "" }, { "docid": "213398e74567012c49e849a17c86c283", "score": "0.5434309", "text": "def Matrice_rotazione_asse_z(angolo):\r\n R_1 = [np.cos(angolo), np.sin(angolo), 0.0] # Valori della prima riga\r\n R_2 = [-np.sin(angolo), np.cos(angolo), 0.0] # Valori della seconda riga\r\n R_3 = [0.0, 0.0, 1.0] # Valori della terza riga.\r\n Matrix = np.array([R_1, R_2, R_3])\r\n return Matrix", "title": "" }, { "docid": "c38c9deb7c5ec5b4bf661f2866a8b440", "score": "0.543419", "text": "def transform(self, matrix):\n self.matrix = np.dot(matrix, self.matrix)\n return self", "title": "" }, { "docid": "404cd38b36fdcd4cd9856691eec25f35", "score": "0.5424677", "text": "def creation_plateau():\n plateau = np.zeros((15, 15), dtype=int) # On crée une matrice 15x15 de 0\n return plateau", "title": "" }, { "docid": "36ec8e5e5d41e7458405bde68a933431", "score": "0.54218674", "text": "def tomatrix(self):\r\n A = self.weights[0] * self.mpos[0].tomatrix()\r\n for i, mpo in enumerate(self.mpos[1:]):\r\n A = A + self.weights[i + 1] * mpo.tomatrix()\r\n return A", "title": "" }, { "docid": "0fae8177fd172f7159bd85f23864fdb5", "score": "0.5418126", "text": "def transform(self, X):\n X, dtype, itype, dense, \\\n nsamples, nfeatures, movable = self.check_input(X, \"transform\")\n if dtype != self.__mdtype:\n raise TypeError( \\\n \"transform: datatype of X is different than model dtype!\")\n (host, port) = FrovedisServer.getServerInstance()\n trans_mat = rpclib.kmeans_transform(host, port, \\\n self.__mid, self.__mdtype, \\\n X.get(), itype, dense)\n excpt = rpclib.check_server_exception()\n if excpt[\"status\"]:\n raise RuntimeError(excpt[\"info\"])\n ret = FrovedisRowmajorMatrix(mat=trans_mat, \\\n dtype=TypeUtil.to_numpy_dtype(dtype))\n if movable:\n return ret.to_numpy_array()\n else:\n return ret", "title": "" }, { "docid": "00c56a0378bea465ed22303feb774b33", "score": "0.5412475", "text": "def forward_transform(self, matrix):\n r=np.shape(matrix)[0]\n c=np.shape(matrix)[1]\n #print(r,c)\n N=r\n final = np.zeros((r, c), dtype=np.complex_)\n u=np.shape(final)[0]\n v=np.shape(final)[1]\n for u in range(0,N):\n for v in range(0,N):\n for i in range(0,N):\n for j in range(0,N):\n final[u][v]=matrix[i][j]*(math.cos(2*math.pi*(u*i+v*j)/N)-1j*math.sin(2*math.pi*(u*i+v*j)/N))+final[u][v]\n\n\n\n #print(np.fft.fft2(matrix))\n\n\n\n return final", "title": "" }, { "docid": "933dbdc698d2a9ab1ba7dae337435d68", "score": "0.5411228", "text": "def Matrixf_transform3x3(*args):\n return _osg.Matrixf_transform3x3(*args)", "title": "" }, { "docid": "572b408880ec34ae20d6c85448a8aa54", "score": "0.54033417", "text": "def _create_target_matrix(self):\r\n self._A_tilde = (self._A_tilde @ self._A_hat).tocoo()\r\n scores = np.log(self._A_tilde.data) - math.log(self._A_tilde.shape[0])\r\n mask = scores < 0\r\n rows = self._A_tilde.row[mask]\r\n cols = self._A_tilde.col[mask]\r\n scores = scores[mask]\r\n target_matrix = sp.csr_matrix((scores, (rows, cols)),\r\n shape=self._A_tilde.shape,\r\n dtype=np.float32)\r\n\r\n return target_matrix", "title": "" }, { "docid": "142fe59dcc14f657a55a0404ac06fa05", "score": "0.53975326", "text": "def to_kraus_matrices(self) -> List[np.ndarray]:\n return to_kraus_matrices_from_hs(\n self.composite_system, self.hs, self.eps_proj_physical\n )", "title": "" }, { "docid": "362b82e5ab2045d2e018a1f7bf9edfd3", "score": "0.539294", "text": "def to_process_matrix(self) -> np.ndarray:\n return to_process_matrix_from_hs(self.composite_system, self.hs)", "title": "" }, { "docid": "0b2681dc6b9c640ea516c4634d466869", "score": "0.5390335", "text": "def matReflectXAxis():\n return np.matrix([[1, 0], [0, -1]])", "title": "" }, { "docid": "ac417cee78a2e6444cd9f7f4ff131379", "score": "0.5389138", "text": "def make_transform(rotX=0, rotY=0, rotZ=0, scale=1, offset=vray.Vector(0.0,0.0,0.0)):\n\tmS = vray.Matrix(scale)\n\tmX = vray.Matrix.makeRotationMatrixX(rotX)\n\tmY = vray.Matrix.makeRotationMatrixY(rotY)\n\tmZ = vray.Matrix.makeRotationMatrixZ(rotZ)\n\ttransform = vray.Transform(mS * mZ * mY * mX, offset)\n\treturn transform", "title": "" }, { "docid": "88aff2cac6ce9b12688b943e8a33ff26", "score": "0.5386642", "text": "def __calculate_matrix_a(self):\n s = (self.N, self.N)\n result = np.zeros(s)\n\n for row in xrange(0, self.N):\n for col in xrange(0, self.N):\n if row == col:\n result[row][col] = self.R - 2.0 * self.r * self.__summation(self.N, row + 1, self.__ci_power)\n else:\n result[row][col] = -self.r * self.__summation(self.N, col + 1, self.__ci_power)\n\n self.A = result", "title": "" }, { "docid": "c68afc5a6f38cdda85f9a1df528607c1", "score": "0.5374525", "text": "def transformation_matrix_to_cartesian(structure):\n a = structure.lattice.a\n b = structure.lattice.b\n c = structure.lattice.c\n alpha = np.deg2rad(structure.lattice.alpha) # angle a to c\n beta = np.deg2rad(structure.lattice.beta) # angle b to c\n gamma = np.deg2rad(structure.lattice.gamma) # angle a to b\n\n cos_alpha = math.cos(alpha)\n cos_beta = math.cos(beta)\n cos_gamma = math.cos(gamma)\n sin_gamma = math.sin(gamma)\n\n factor_e3_0 = cos_beta\n factor_e3_1 = (cos_alpha - cos_beta*cos_gamma)/sin_gamma\n factor_e3_2 = math.sqrt(1 - np.dot(factor_e3_0, factor_e3_0) - np.dot(factor_e3_1, factor_e3_1))\n\n # Columns are the transformations of the corresponding cartesian basis vector.\n return np.array([\n [a, b*cos_gamma, c*factor_e3_0],\n [0, b*sin_gamma, c*factor_e3_1],\n [0, 0, c*factor_e3_2]\n ])", "title": "" }, { "docid": "ea93b2796fcccbad482fa130a02dea50", "score": "0.5366928", "text": "def asMatrixTransform(self, *args):\n return _osg.CameraViewRef_asMatrixTransform(self, *args)", "title": "" }, { "docid": "2c47f6c92fae6decb8795829c69bd4cb", "score": "0.5366291", "text": "def _convert_val_to_matrix(self):\n from nested.lsa import pop_to_matrix\n self.param_matrix, self.feat_matrix = pop_to_matrix(self, 'p', 'f', ['p'], ['o'])\n _, self.obj_matrix = pop_to_matrix(self, 'p', 'o', ['p'], ['o'])", "title": "" }, { "docid": "2ce25cf9de165e41cf94494b0e849883", "score": "0.53603107", "text": "def make_cost_matrix(profit_matrix, inversion_function):\n import munkres\n return munkres.make_cost_matrix(profit_matrix, inversion_function)", "title": "" }, { "docid": "92578b06fd560f9ec922fe0e09541897", "score": "0.53588265", "text": "def trans_list():\r\n sag = 5\r\n ax = 3\r\n cor = 5\r\n N = 0\r\n value = np.zeros((int(sag*ax*cor+12), 4))\r\n for i in range(sag):\r\n for j in range(ax):\r\n for k in range(cor):\r\n value[N, 0] = int(2*i - 3)\r\n value[N, 1] = int(j - 1)\r\n value[N, 2] = int(2*k - 3)\r\n value[N, 3] = 0\r\n if i == 4:\r\n value[N, 0] = 0\r\n if k == 4:\r\n value[N, 2] = 0\r\n N += 1\r\n while N < sag*ax*cor+12:\r\n for i in range(3):\r\n value[N, 0] = -2\r\n value[N, 1] = i\r\n value[N, 3] = 1\r\n N += 1\r\n value[N, 0] = -1\r\n value[N, 1] = i\r\n value[N, 3] = 1\r\n N += 1\r\n value[N, 0] = 1\r\n value[N, 1] = i\r\n value[N, 3] = 1\r\n N += 1\r\n value[N, 0] = 2\r\n value[N, 1] = i\r\n value[N, 3] = 1\r\n N += 1\r\n return np.array(value, dtype=int)", "title": "" } ]
cdd4fef4ac17a7d633fd81329cd1d45b
load spectra from raw file
[ { "docid": "5e40777ede40d2fcc25c75608bbbccf4", "score": "0.61535674", "text": "def load_spec_file(self, filename, load_to_ram=False):\r\n self.type = 'spec'\r\n\r\n self.f = netCDF4.Dataset(filename, 'r')\r\n print('keys ', self.f.variables.keys())\r\n\r\n self.timestamps = self.f.variables['time'][:]\r\n print('time ', self.timestamps[:10])\r\n self.delta_ts = np.mean(np.diff(self.timestamps)) if self.timestamps.shape[0] > 1 else 2.0\r\n self.range = self.f.variables['range'][:]\r\n print('range ', self.range[:10])\r\n self.velocity = self.f.variables['velocity'][:]\r\n print('velocity ', self.velocity[:10])\r\n print('Z chunking ', self.f.variables['Z'].chunking())\r\n\r\n self.begin_dt = h.ts_to_dt(self.timestamps[0])\r\n\r\n if load_to_ram == True:\r\n self.spectra_in_ram = True\r\n self.Z = self.f.variables['Z'][:].filled()\r\n self.LDR = self.f.variables['LDR'][:].filled()\r\n self.SNRco = self.f.variables['SNRco'][:].filled()", "title": "" } ]
[ { "docid": "71fb37fa0e0483864f730fc4934bbd94", "score": "0.73631144", "text": "def loadSpectra(self, path):\n if type(path) is not str:\n raise TypeError(\"path argument is not a string.\")\n foundBackground = False\n doGetWaveLength = False\n foundFiles = []\n for file in os.listdir(path):\n if fnmatch.fnmatch(file, f'*.csv'):\n foundFiles.append(file)\n\n sortedPaths = foundFiles\n for name in sortedPaths:\n # Find the positions\n matchCoords = re.match(r\"([A-Za-z_]*)_x(\\d+)_y(\\d+).*\", name)\n if matchCoords:\n posX = int(matchCoords.group(2))\n posY = int(matchCoords.group(3))\n\n # Open file and put in the data\n fich = open(path + '/' + name, \"r\")\n test_str = list(fich)\n fich.close()\n xAxis = []\n spectrum = []\n for j in test_str:\n elem_str = j.replace(\"\\n\", \"\")\n elem = elem_str.split(\",\")\n spectrum.append(float(elem[1]))\n\n if doGetWaveLength == False:\n elem_str = j.replace(\"\\n\", \"\")\n elem = elem_str.split(\",\")\n xAxis.append(float(elem[0]))\n self.setWavelength(xAxis)\n doGetWaveLength = True\n self.addSpectrum(posX, posY, spectrum, autoSave=False)\n\n matchBackground = re.match(\".*?(_background).*\", name)\n if matchBackground:\n foundBackground = True\n fich = open(path + '/' + name, \"r\")\n test_str = list(fich)\n fich.close()\n spectrum = []\n for j in test_str:\n elem_str = j.replace(\"\\n\", \"\")\n elem = elem_str.split(\",\")\n spectrum.append(float(elem[1]))\n self.background = spectrum\n\n return foundBackground", "title": "" }, { "docid": "4a8e93f983e232e59a31dae5e94a6606", "score": "0.71960706", "text": "def read_spectra(self, line, path='./'):\n eqpos = line.find('=')\n self.file = line[eqpos+2:].rstrip()\n\n # check if file exists\n full_filename = path + self.file\n\n # open and read the spectrum\n # datafile = fits.open(full_filename)\n # tdata = datafile[1].data # data are in the 1st extension\n tdata = Table.read(full_filename)\n\n self.waves = tdata['WAVELENGTH'].quantity\n self.fluxes = tdata['FLUX'].quantity\n self.uncs = tdata['SIGMA'].quantity\n self.npts = tdata['NPTS'].quantity\n self.n_waves = len(self.waves)\n\n # include the model if it exists\n # currently only used for FUSE H2 model\n if 'MODEL' in tdata.colnames:\n self.model = tdata['MODEL'].quantity\n\n # fix odd unit designations\n if self.waves.unit == 'ANGSTROM':\n self.waves = self.waves.value*u.angstrom\n if self.waves.unit == 'MICRON':\n self.waves = self.waves.value*u.micron\n if self.fluxes.unit == 'ERG/CM2/S/A':\n self.fluxes = self.fluxes.value*(u.erg/((u.cm**2)*u.s*u.angstrom))\n self.uncs = self.uncs.value*(u.erg/((u.cm**2)*u.s*u.angstrom))\n\n # compute the min/max wavelengths\n self.wave_range = np.array([min(self.waves.value),\n max(self.waves.value)])*self.waves.unit\n\n # trim any data that is not finite\n indxs, = np.where(~np.isfinite(self.fluxes))\n if len(indxs) > 0:\n self.fluxes[indxs] = 0.0\n self.npts[indxs] = 0\n\n # convert wavelengths to microns (standardization)\n self.waves = self.waves.to(u.micron)\n self.wave_range = self.wave_range.to(u.micron)", "title": "" }, { "docid": "3ebe5906751361e0ae03e99ad3587213", "score": "0.7046051", "text": "def read_spectra(infile, single=False):\n\n ftype = np.float64\n if single:\n ftype = np.float32\n\n infile = os.path.abspath(infile)\n if not os.path.isfile(infile):\n raise IOError(\"{} is not a file\".format(infile))\n\n hdus = fits.open(infile, mode=\"readonly\")\n nhdu = len(hdus)\n\n # load the metadata.\n\n meta = dict(hdus[0].header)\n\n # initialize data objects\n\n bands = []\n fmap = None\n wave = None\n flux = None\n ivar = None\n mask = None\n res = None\n extra = None\n\n # For efficiency, go through the HDUs in disk-order. Use the\n # extension name to determine where to put the data. We don't\n # explicitly copy the data, since that will be done when constructing\n # the Spectra object.\n\n for h in range(1, nhdu):\n name = hdus[h].header[\"EXTNAME\"]\n if name == \"FIBERMAP\":\n fmap = encode_table(Table(hdus[h].data, copy=True).as_array())\n else:\n # Find the band based on the name\n mat = re.match(r\"(.*)_(.*)\", name)\n if mat is None:\n raise RuntimeError(\"FITS extension name {} does not contain the band\".format(name))\n band = mat.group(1).lower()\n type = mat.group(2)\n if band not in bands:\n bands.append(band)\n if type == \"WAVELENGTH\":\n if wave is None:\n wave = {}\n wave[band] = native_endian(hdus[h].data.astype(ftype))\n elif type == \"FLUX\":\n if flux is None:\n flux = {}\n flux[band] = native_endian(hdus[h].data.astype(ftype))\n elif type == \"IVAR\":\n if ivar is None:\n ivar = {}\n ivar[band] = native_endian(hdus[h].data.astype(ftype))\n elif type == \"MASK\":\n if mask is None:\n mask = {}\n mask[band] = native_endian(hdus[h].data.astype(np.uint32))\n elif type == \"RESOLUTION\":\n if res is None:\n res = {}\n res[band] = native_endian(hdus[h].data.astype(ftype))\n else:\n # this must be an \"extra\" HDU\n if extra is None:\n extra = {}\n if band not in extra:\n extra[band] = {}\n extra[band][type] = native_endian(hdus[h].data.astype(ftype))\n\n # Construct the Spectra object from the data. If there are any\n # inconsistencies in the sizes of the arrays read from the file,\n # they will be caught by the constructor.\n\n spec = Spectra(bands, wave, flux, ivar, mask=mask, resolution_data=res,\n fibermap=fmap, meta=meta, extra=extra, single=single)\n\n hdus.close()\n\n return spec", "title": "" }, { "docid": "6baf124e580ee6b4be1c79c83c3cc53b", "score": "0.70320565", "text": "def load_synthetic_binary_spectra(binary_spectra_dir, file_format = '.npy'):\n return np.load(binary_spectra_dir + file_format)", "title": "" }, { "docid": "6e86f03063bf849227502140230e09ce", "score": "0.6869331", "text": "def load_synthetic_single_spectra(spectra_file_dir, file_format = '*.npy'):\n\n synthetic_single_fluxes = []\n # Extract spectra from files\n for file in glob.glob(spectra_file_dir + file_format):\n temp = np.load(file)\n synthetic_single_fluxes.append(temp)\n\n return np.vstack(synthetic_single_fluxes)", "title": "" }, { "docid": "6a35120426f151916b44263016720d12", "score": "0.67560273", "text": "def load_spectra(path):\n data = fits.getdata(path)\n ar_wl = data['wavelength grid']\n # assume that group IDs are integers in the range of [0, num_data_columns-1]\n ar_spectra = np.zeros(shape=(len(data.columns) - 1, ar_wl.size))\n\n # extract the correct median spectrum\n for c in data.columns:\n if c.name != 'wavelength grid':\n # we expect the column name to contain 'group_id=' followed by a number\n parsed_string = dict([i.split('=') for i in c.name.split(',')])\n # index of group_id in parsed string\n group_id = int(parsed_string['group_id'])\n ar_spectra[group_id] = data[c.name]\n\n return ar_wl, ar_spectra", "title": "" }, { "docid": "ff89b8b984148c012202b1ca1c44219a", "score": "0.66830295", "text": "def loadSpectra(spectraFile, unitTransform):\n \n wavelength, signal = loadtxt(spectraFile, unpack = True) \n \n# with open(spectraFile, 'rb') as f:\n# reader = csv.reader(f)\n# try:\n# for row in reader:\n# #Takes the string from the first element and splits it\n# row = row[0].split() \n# wavelength.append(float(row[0]))\n# signal.append(float(row[1]))\n# print \"Successful read!\"\n# \n# except csv.Error, e:\n# sys.exit(\"file %s, line %d: %s\" % (input, reader.line_num, e))\n\n wavelength = array(wavelength)\n signal = array(signal)\n \n unitTransform = 'electron volt-inverse meter relationship'\n transform = physical_constants[unitTransform][0]/100\n \n if unitTransform:\n wavelength /= transform\n\n return wavelength, signal", "title": "" }, { "docid": "0e06ff0dba620106a0c1aeca63fe0f56", "score": "0.652874", "text": "def read(self):\n\n # open and read all the lines in the file\n f = open(self.path + self.file, 'r')\n self.datfile_lines = list(f)\n\n # get the photometric band data\n self.data['BAND'] = BandData('BAND')\n self.data['BAND'].read_bands(self.datfile_lines)\n\n # covert the photoemtric band data to fluxes in all possible bands\n self.data['BAND'].get_band_fluxes()\n\n # go through and get info before reading the spectra\n poss_mod_params = ['model_type', 'Z', 'vturb',\n 'logg', 'Teff', 'origin']\n for line in self.datfile_lines:\n cpair = self._parse_dfile_line(line)\n if cpair is not None:\n if cpair[0] == 'sptype':\n self.sptype = cpair[1]\n elif cpair[0] in poss_mod_params:\n self.model_params[cpair[0]] = cpair[1]\n elif cpair[0] == 'corfac_irs_zerowave':\n self.corfac['IRS_zerowave'] = float(cpair[1])\n elif cpair[0] == 'corfac_irs_slope':\n self.corfac['IRS_slope'] = float(cpair[1])\n elif cpair[0] == 'corfac_irs_maxwave':\n self.corfac['IRS_maxwave'] = float(cpair[1])\n elif cpair[0] == 'corfac_irs':\n self.corfac['IRS'] = float(cpair[1])\n\n # read the spectra\n if not self.photonly:\n for line in self.datfile_lines:\n if line.find('IUE') == 0:\n self.data['IUE'] = SpecData('IUE')\n self.data['IUE'].read_iue(line, path=self.path)\n elif line.find('FUSE') == 0:\n self.data['FUSE'] = SpecData('FUSE')\n self.data['FUSE'].read_fuse(line, path=self.path)\n elif line.find('STIS_Opt') == 0:\n self.data['STIS_Opt'] = SpecData('STIS_Opt')\n self.data['STIS_Opt'].read_stis(line, path=self.path)\n elif line.find('STIS') == 0:\n self.data['STIS'] = SpecData('STIS')\n self.data['STIS'].read_stis(line, path=self.path)\n elif line.find('SpeX') == 0:\n self.data['SpeX'] = SpecData('SpeX')\n self.data['SpeX'].read_spex(line, path=self.path)\n elif line.find('IRS') == 0 and line.find('IRS15') < 0:\n self.data['IRS'] = SpecData('IRS')\n irs_corfacs = self.corfac\n if not self.use_corfac:\n irs_corfacs = {}\n self.data['IRS'].read_irs(line, path=self.path,\n corfac=irs_corfacs)", "title": "" }, { "docid": "eff68d191146490d7c581f8721477446", "score": "0.6476548", "text": "def read(self):\n self._read_spectra(self._filename)\n\n self._sort_spectra_by_rt()\n\n self._read_retention_times()\n self._read_retention_indices()", "title": "" }, { "docid": "4ce7387d68e6073cb12d8ebc626ec710", "score": "0.64035296", "text": "def import_spectra(path):\n mtrx_data = access2thematrix.MtrxData()\n data_file = f'{path}'\n traces, message = mtrx_data.open(data_file)\n print(\"path = \", data_file)\n print(message)\n curve_trace, message = mtrx_data.select_curve(traces[0])\n curve_retrace, message = mtrx_data.select_curve(traces[1])\n return Spec_curve(curve_trace), Spec_curve(curve_retrace)", "title": "" }, { "docid": "5175c5acbb6782bddd7f4fff9d9a1248", "score": "0.63829255", "text": "def read_spectra_list(filename: str) -> List[sus.MsmsSpectrum]:\n return list(read_spectra(filename))", "title": "" }, { "docid": "5b32d5ad8475721fa475528d8a8f69f7", "score": "0.63627803", "text": "def load_all_spectra(spectra_folder=\"spectra/\", ext_snr=1, ext_fluxed=2,\n ext_telluric_corr=3, include_subfolders=False, \n use_counts_ext_and_flux=False, \n correct_negative_fluxes=False,):\n # Initialise\n ids = []\n spectra_b = [] # blue\n spectra_r = [] # red\n snrs_b = []\n snrs_r = []\n exp_time = []\n obs_mjd = []\n obs_date = []\n ra = []\n dec = []\n airmass = []\n bcor = []\n readmode = []\n grating_b = []\n grating_r = []\n beam_splitter = []\n xbin = []\n ybin = []\n fullframe = []\n filename_b = []\n filename_r = []\n fluxed = []\n telluric_corr = []\n corrected_neg_fluxes_b_all = []\n corrected_neg_fluxes_r_all = []\n n_neg_px_b_all = []\n n_neg_px_r_all = []\n neg_flux_weight_b_all = []\n neg_flux_weight_r_all = []\n\n if not include_subfolders:\n spectra_b_path = os.path.join(spectra_folder, \"*_b.fits\")\n spectra_r_path = os.path.join(spectra_folder, \"*_r.fits\")\n else:\n spectra_b_path = os.path.join(spectra_folder, \"*\", \"*_b.fits\")\n spectra_r_path = os.path.join(spectra_folder, \"*\", \"*_r.fits\")\n \n spectra_b_files = glob.glob(spectra_b_path)\n spectra_r_files = glob.glob(spectra_r_path)\n\n spectra_b_files.sort()\n spectra_r_files.sort()\n\n bad_files = []\n\n # Ensure we have a matching number of blue and red spectra\n if len(spectra_b_files) != len(spectra_r_files):\n spec_files_b = [bb.split(\"_b\")[0] for bb in spectra_b_files]\n spec_files_r = [rr.split(\"_r\")[0] for rr in spectra_r_files]\n n_files = Counter(spec_files_b + spec_files_r)\n unmatched_spec = [si for si in n_files if n_files[si]==1]\n\n raise ValueError(\"Unequal number of blue and red spectra for following\"\n \" observations: %s\" % unmatched_spec)\n\n for fi, (file_b, file_r) in enumerate(\n zip(tqdm(spectra_b_files), spectra_r_files)):\n # Load in and extract required information from fits files\n with fits.open(file_b) as fits_b, fits.open(file_r) as fits_r:\n # If we're relying on our own flux calibration, no need to bother\n # with other extensions\n if use_counts_ext_and_flux:\n is_fluxed = True\n is_telluric_corr = False\n ext_sci = 1\n\n # Otherwise work as usual with what PyWiFeS outputs\n else:\n # Determine how many extensions the fits file has.\n # 2 extensions --> non-fluxed, no telluric corr [PyWiFeS p08]\n # 3 extensions --> fluxed, no telluric corr [PyWiFeS p09]\n # 4 extensions --> fluxed and telluric corr [PyWiFeS p10]\n if len(fits_b) != len(fits_r):\n raise ValueError(\"Blue and red have different # fits ext\")\n \n if len(fits_b) == ext_snr+1:\n is_fluxed = False\n is_telluric_corr = False\n ext_sci = 1\n\n elif len(fits_b) == ext_fluxed+1:\n is_fluxed = True\n is_telluric_corr = False\n ext_sci = 2\n\n elif len(fits_b) == ext_telluric_corr+1:\n is_fluxed = True\n is_telluric_corr = True\n ext_sci = 3\n\n else: \n raise ValueError(\"Unexpected # of HDUs. Should have 2-4.\")\n\n # Take the \"most-complete\" (in terms of data reduction) extension\n # for use as the \"science\" spectrum\n spec_b = np.stack(fits_b[ext_sci].data)\n spec_r = np.stack(fits_r[ext_sci].data)\n\n # PyWiFeS/process_stellar extraction causes non-physical negative\n # fluxes for some stars, likely those that have a substantial sky\n # background from e.g. a nearby star. Short of re-reducing these \n # stars, we can correct this here in a ~mostly~ rigorous way. \n # Uncertainties from this approach will likely be overestimated.\n # TODO: implement this for when we're not doing our own fluxing.\n corrected_neg_fluxes_b = False\n corrected_neg_fluxes_r = False\n\n n_neg_px_b = np.nansum(spec_b[:,1] < 0)\n n_neg_px_r = np.nansum(spec_r[:,1] < 0)\n\n neg_flux_weight_b = np.nan\n neg_flux_weight_r = np.nan\n \n if correct_negative_fluxes and (ext_sci == ext_snr):\n # Only run if we have negative fluxes\n if n_neg_px_b > 0:\n spec_b, neg_flux_weight_b, _ = correct_neg_counts(spec_b)\n corrected_neg_fluxes_b = True\n\n if n_neg_px_r > 0:\n spec_r, neg_flux_weight_r, _ = correct_neg_counts(spec_r)\n corrected_neg_fluxes_r = True\n\n # Regardless, store info on negative flux processing\n corrected_neg_fluxes_b_all.append(corrected_neg_fluxes_b)\n corrected_neg_fluxes_r_all.append(corrected_neg_fluxes_r)\n n_neg_px_b_all.append(n_neg_px_b)\n n_neg_px_r_all.append(n_neg_px_r)\n neg_flux_weight_b_all.append(neg_flux_weight_b)\n neg_flux_weight_r_all.append(neg_flux_weight_r)\n\n # Get headers\n header_b = fits_b[0].header\n header_r = fits_r[0].header\n\n # Ensure that there is actually signal here. If not, flag the files\n # as bad and skip processing them\n if (len(spec_b[:,1][np.isfinite(spec_b[:,1])]) == 0\n or len(spec_r[:,1][np.isfinite(spec_r[:,1])]) == 0):\n bad_files.append(file_b)\n bad_files.append(file_r)\n continue\n\n # Now that we know we're not working with bad files, add ext info\n fluxed.append(is_fluxed)\n telluric_corr.append(is_telluric_corr)\n\n # Get SNR measurements for each arm\n sig_b = np.median(fits_b[ext_snr].data[\"spectrum\"])\n snrs_b.append(sig_b / sig_b**0.5)\n\n sig_r = np.median(fits_r[ext_snr].data[\"spectrum\"])\n snrs_r.append(sig_r / sig_r**0.5)\n \n # Doing our own flux correction\n if use_counts_ext_and_flux:\n # Import blue\n spec_b_fluxed, e_spec_b_fluxed = flux_calibrate_spectra(\n wave=spec_b[:,0],\n spectra=spec_b[:,1],\n e_spectra=spec_b[:,2],\n airmass=float(header_b[\"AIRMASS\"]),\n arm=\"b\",\n exptime=float(header_b['EXPTIME']),\n )\n\n # Import red\n spec_r_fluxed, e_spec_r_fluxed = flux_calibrate_spectra(\n wave=spec_r[:,0],\n spectra=spec_r[:,1],\n e_spectra=spec_r[:,2],\n airmass=float(header_r[\"AIRMASS\"]),\n arm=\"r\",\n exptime=float(header_r['EXPTIME']),\n )\n\n # Save\n spec_b[:,1] = spec_b_fluxed\n spec_b[:,2] = e_spec_b_fluxed\n\n spec_r[:,1] = spec_r_fluxed\n spec_r[:,2] = e_spec_r_fluxed\n\n else:\n # HACK. FIX THIS.\n # Uncertainties on flux calibratated spectra don't currently \n # make sense, get the uncertainties from the unfluxxed spectra\n # in terms of fractions, then apply to the fluxed spectra \n sigma_b_pc = (fits_b[ext_snr].data[\"sigma\"] \n / fits_b[ext_snr].data[\"spectrum\"])\n sigma_r_pc = (fits_r[ext_snr].data[\"sigma\"]\n / fits_r[ext_snr].data[\"spectrum\"])\n \n # Sort out the uncertainties\n spec_b[:,2] = spec_b[:,1] * sigma_b_pc\n\n spec_r[:,2] = spec_r[:,1] * sigma_r_pc\n\n # Append\n spectra_b.append(spec_b.T)\n spectra_r.append(spec_r.T)\n\n # Get object name and details of observation\n header = fits_b[0].header\n ids.append(header[\"OBJNAME\"])\n exp_time.append(float(header[\"EXPTIME\"]))\n obs_mjd.append(float(header[\"MJD-OBS\"]))\n obs_date.append(header[\"DATE-OBS\"])\n ra.append(header[\"RA\"])\n dec.append(header[\"DEC\"])\n airmass.append(float(header[\"AIRMASS\"]))\n bcor.append(float(header[\"RADVEL\"]))\n readmode.append(header[\"READMODE\"])\n grating_b.append(header[\"GRATINGB\"])\n grating_r.append(header[\"GRATINGR\"])\n beam_splitter.append(header[\"BEAMSPLT\"])\n xbin.append(int(header[\"CCDSUM\"].split(\" \")[0]))\n ybin.append(int(header[\"CCDSUM\"].split(\" \")[1]))\n\n # Determine if full or half-frame\n y_min = int(header[\"CCDSEC\"].split(\",\")[-1].split(\":\")[0])\n if y_min == 1:\n fullframe.append(True)\n else:\n fullframe.append(False)\n\n filename_b.append(os.path.split(file_b)[-1])\n filename_r.append(os.path.split(file_r)[-1])\n \n # Now combine the arrays into our output structures\n spectra_b = np.stack(spectra_b)\n spectra_r = np.stack(spectra_r)\n\n # Convert arrays where necessary\n snrs_b = np.array(snrs_b).astype(float).astype(int)\n snrs_r = np.array(snrs_r).astype(float).astype(int)\n\n data = [ids, snrs_b, snrs_r, exp_time, obs_mjd, obs_date, ra, dec, airmass,\n bcor, readmode, grating_b, grating_r, beam_splitter, xbin, ybin, \n fullframe, filename_b, filename_r, fluxed, telluric_corr,\n corrected_neg_fluxes_b_all, corrected_neg_fluxes_r_all, \n n_neg_px_b_all, n_neg_px_r_all, neg_flux_weight_b_all, \n neg_flux_weight_r_all]\n cols = [\"id\", \"snr_b\", \"snr_r\", \"exp_time\", \"mjd\", \"date\", \"ra\", \n \"dec\", \"airmass\", \"bcor_pw\", \"readmode\", \"grating_b\", \"grating_r\", \n \"beam_splitter\", \"xbin\", \"ybin\", \"fullframe\", \"filename_b\", \n \"filename_r\", \"fluxed\", \"telluric_corr\", \"corrected_neg_fluxes_b\", \n \"corrected_neg_fluxes_r\", \"n_neg_px_b\", \"n_neg_px_r\",\n \"neg_flux_weight_b\", \"neg_flux_weight_r\",]\n\n # Create our resulting dataframe from a dict comprehension\n data = {col: vals for col, vals in zip(cols, data)} \n observations = pd.DataFrame(data=data)\n\n # Print bad filenames\n print(\"Excluded %i bad (i.e. all nan) files: %s\" % \n (len(bad_files), bad_files))\n\n return observations, spectra_b, spectra_r", "title": "" }, { "docid": "d4cfc366131ac5b43331740135ef7ade", "score": "0.6356244", "text": "def _read_spectra(data, n_rec_per_spec, spec_rec_start_indx, verbose=False):\n num_samples = int((len(data) - 256 * (spec_rec_start_indx - 1)) / (256 * n_rec_per_spec))\n spectra = []\n # The range below is due to the definition of the spectrum offset in\n # pcf definition document assuming a first index of 1\n progress_iterable = tqdm(\n range(1, num_samples + 1),\n leave=False,\n desc=\"Spectrum\",\n disable=not(verbose)\n )\n for spectrum_number in progress_iterable:\n spectrum_header_offset = _get_spectrum_header_offset(\n spectrum_number,\n spec_rec_start_indx,\n n_rec_per_spec\n )\n header_def = SPECTRUM_DEFINITION\n spectrum_header = _read_header(\n data[spectrum_header_offset: spectrum_header_offset+256],\n header_def\n )\n\n spctrum_offset = spectrum_header_offset + 256\n n_channels = int(256*(n_rec_per_spec-1) / 4)\n values = struct.unpack(\n \"{}f\".format(n_channels),\n data[spctrum_offset: spctrum_offset+(4*n_channels)]\n )\n\n spectrum = {\"header\": spectrum_header, \"spectrum\": np.array(values)}\n spectra.append(spectrum)\n\n return spectra", "title": "" }, { "docid": "9e169c47e16af7cc4c9ff02f4fe67429", "score": "0.6334418", "text": "def load_Otsu2018_spectra(path, every_nth=1):\n data = np.genfromtxt(path, delimiter=',', skip_header=1)\n\n # The first column is the id and is redundant\n data = data[:, 1:]\n\n spectra = []\n for i in range(data.shape[0]):\n if i % every_nth != 0:\n continue\n\n values = data[i, :]\n spectra.append(values)\n\n return np.array(spectra)", "title": "" }, { "docid": "6efe18ffcf388238d53cbcb7e3b041d5", "score": "0.6296896", "text": "def read_spectrum_data(fname):\n try:\n f = open(fname, 'r')\n except IOError:\n print \"cannot open \", fname\n else:\n data = np.genfromtxt(f, delimiter='')\n f.close()\n return data", "title": "" }, { "docid": "e982eddfdb6a2e2652271210c2c49e25", "score": "0.628855", "text": "def load_spectrum_medresIR(file, velScale):\n f_spec = open(file, 'rb')\n \n lineCount = 0\n waveNumber = []\n flux = []\n \n for line in f_spec:\n lineCount += 1\n if lineCount <= 6:\n continue\n \n fields = line.split()\n waveNumber.append( float(fields[0]) )\n flux.append( float(fields[1]) )\n \n waveNumber = np.array(waveNumber)\n flux = np.array(flux)\n \n print(file)\n logWave, specNew, vel = log_rebin2(waveNumber, flux, \n inMicrons=False, velScale=velScale)\n\n return logWave, specNew", "title": "" }, { "docid": "30a0f12191eb715101d5024376961514", "score": "0.6177745", "text": "def load(path, sr=22050, mono=True, offset=0.0, duration=None,\n dtype=np.float32):\n\n y = []\n with audioread.audio_open(os.path.realpath(path)) as input_file:\n sr_native = input_file.samplerate\n\n s_start = int(np.floor(sr_native * offset) * input_file.channels)\n\n if duration is None:\n s_end = np.inf\n else:\n s_end = s_start + int(np.ceil(sr_native * duration)\n * input_file.channels)\n\n n = 0\n\n for frame in input_file:\n frame = util.buf_to_float(frame, dtype=dtype)\n n_prev = n\n n = n + len(frame)\n\n if n < s_start:\n # offset is after the current frame\n # keep reading\n continue\n\n if s_end < n_prev:\n # we're off the end. stop reading\n break\n\n if s_end < n:\n # the end is in this frame. crop.\n frame = frame[:s_end - n_prev]\n\n if n_prev <= s_start < n:\n # beginning is in this frame\n frame = frame[(s_start - n_prev):]\n\n # tack on the current frame\n y.append(frame)\n\n if not len(y):\n # Zero-length read\n y = np.zeros(0, dtype=dtype)\n\n else:\n y = np.concatenate(y)\n\n if input_file.channels > 1:\n y = y.reshape((-1, 2)).T\n if mono:\n y = to_mono(y)\n\n if sr is not None:\n if y.ndim > 1:\n y = np.vstack([resample(yi, sr_native, sr) for yi in y])\n else:\n y = resample(y, sr_native, sr)\n\n else:\n sr = sr_native\n\n # Final cleanup for dtype and contiguity\n y = np.ascontiguousarray(y, dtype=dtype)\n\n return (y, sr)", "title": "" }, { "docid": "6a09adc9a33ebe9cb3c2ceec5f3456f0", "score": "0.6163589", "text": "def read_spectra(filename: str) -> Iterable[sus.MsmsSpectrum]:\n ext = os.path.splitext(filename.lower())[1]\n if ext == '.mgf':\n _read_spectra = _read_spectra_mgf\n elif ext == '.mzml':\n _read_spectra = _read_spectra_mzml\n elif ext == '.mzxml':\n _read_spectra = _read_spectra_mzxml\n else:\n logger.error('Unsupported peak file format (supported formats: MGF, '\n 'mzML, mzXML)')\n raise ValueError('Unsupported peak file format (supported formats: '\n 'MGF, mzML, mzXML)')\n\n for spec in _read_spectra(filename):\n if spec is not None:\n yield spec", "title": "" }, { "docid": "4be81cecee64fa9a1a167f837f3c2ad6", "score": "0.6147781", "text": "def _read_spectra_mgf(filename: str) -> Iterable[sus.MsmsSpectrum]:\n for spectrum_dict in tqdm.tqdm(pyteomics.mgf.read(filename),\n desc='Spectra read', unit='spectra'):\n identifier = spectrum_dict['params']['title']\n\n mz_array = spectrum_dict['m/z array']\n intensity_array = spectrum_dict['intensity array']\n retention_time = float(spectrum_dict['params'].get('rtinseconds', -1))\n\n precursor_mz = float(spectrum_dict['params']['pepmass'][0])\n if 'charge' in spectrum_dict['params']:\n precursor_charge = int(spectrum_dict['params']['charge'][0])\n else:\n return None\n\n spectrum = sus.MsmsSpectrum(\n identifier, precursor_mz, precursor_charge, mz_array,\n intensity_array, None, retention_time)\n\n spectrum.filename = spectrum_dict['params'].get(\n 'filename', os.path.splitext(os.path.basename(filename))[0])\n if 'scan' in spectrum_dict['params']:\n spectrum.scan = spectrum_dict['params']['scan']\n if 'cluster' in spectrum_dict['params']:\n spectrum.cluster = spectrum_dict['params']['cluster']\n\n yield spectrum", "title": "" }, { "docid": "60ed0273ce0c2c76f688067f641bf27d", "score": "0.611942", "text": "def readSpectrum(**kwargs):\n \n filename=kwargs.get('filename', 'apStar-test.fits')\n master = fits.open(HOME+filename)[0]\n spectra = fits.open(HOME+filename)[1]\n noise = fits.open(HOME+filename)[2]\n sky = fits.open(HOME+filename)[4]\n sky_err = fits.open(HOME+filename)[5]\n telluric = fits.open(HOME+filename)[6]\n tell_err = fits.open(HOME+filename)[7]\n rv_info = fits.open(HOME+filename)[9]\n\n \" conversion from pixel to wavelength, info available in the hdu header\"\n crval=spectra.header['CRVAL1']\n cdelt= spectra.header['CDELT1']\n wave=np.array(pow(10, crval+cdelt*np.arange(spectra.header['NAXIS1']))/10000)*u.micron #microns\n # convert fluxes from (10^-17 erg/s/cm^2/Ang) to ( erg/s/cm^2/Mircon)\n spectras=[1e-13*np.array(f)*u.erg/u.s/u.centimeter**2/u.micron for f in spectra.data]\n noises= [1e-13*np.array(f)*u.erg/u.s/u.centimeter**2/u.micron for f in noise.data]\n skys=[1e-13*np.array(f)*u.erg/u.s/u.centimeter**2/u.micron for f in sky.data]\n #create a splat Spectrum object just for the combine\n combined= splat.Spectrum(wave=wave, flux= spectra.data[0], noise= noise.data[0])\n #create APOGEE spectrum object\n sp= Spectrum(wave=combined.wave, combined=combined, noise=noises, sky=skys, visits= spectras, nvisits = len(spectra.data))\n return sp", "title": "" }, { "docid": "e8b18ddc2f81afe93a90bb5d52dc4ac7", "score": "0.6056962", "text": "def _read_spectra_mzml(filename: str) -> Iterable[sus.MsmsSpectrum]:\n with pyteomics.mzml.MzML(filename) as f_in:\n try:\n for spectrum_dict in tqdm.tqdm(f_in, desc='Spectra read',\n unit='spectra'):\n if int(spectrum_dict.get('ms level', -1)) == 2:\n precursor = spectrum_dict['precursorList']['precursor'][0]\n precursor_ion = (precursor['selectedIonList']\n ['selectedIon'][0])\n if 'charge state' in precursor_ion:\n precursor_charge = int(precursor_ion['charge state'])\n elif 'possible charge state' in precursor_ion:\n precursor_charge = int(\n precursor_ion['possible charge state'])\n else:\n logger.warning('Unknown precursor charge, skipped '\n 'spectrum...')\n continue\n spectrum = sus.MsmsSpectrum(\n spectrum_dict['id'],\n precursor_ion['selected ion m/z'],\n precursor_charge,\n spectrum_dict['m/z array'],\n spectrum_dict['intensity array'],\n None,\n (spectrum_dict['scanList']['scan'][0]\n ['scan start time']))\n spectrum.filename = spectrum_dict.get(\n 'filename',\n os.path.splitext(os.path.basename(filename))[0])\n if 'scan' in spectrum_dict:\n spectrum.scan = str(int(spectrum_dict['scan']))\n elif 'scan=' in spectrum.identifier:\n spectrum.scan = int(\n spectrum.identifier[\n spectrum.identifier.find('scan=')\n + len('scan='):])\n if 'cluster' in spectrum_dict:\n spectrum.cluster = int(spectrum_dict['cluster'])\n yield spectrum\n except LxmlError as e:\n logger.error('Failed to read file %s: %s', filename, e)", "title": "" }, { "docid": "0b42548cc5c824edc92a7b82371479de", "score": "0.6056593", "text": "def read_sounding_file(filename):\n data = np.genfromtxt(filename, dtype=None, skip_header=15, usecols=(1, 2, 3, 5, 6, 14))\n return data", "title": "" }, { "docid": "7cb2de4117af56dbc7b7082e2e55b241", "score": "0.6042696", "text": "def read_spectrum_alt(file_path):\n try:\n spectrum = pd.read_pickle(file_path)\n except OSError as e:\n raise e\n\n wave = spectrum['wavelength']\n flux = spectrum['flux']\n\n try:\n fluxerr = spectrum['uncertainty']\n except Exception:\n pass\n\n try:\n fluxerr = spectrum['error (RMS+SYS)']\n except Exception:\n pass\n\n return wave, flux, fluxerr, spectrum", "title": "" }, { "docid": "09c6214b041d6306e311cfb90f9f0d0f", "score": "0.6036134", "text": "def load_wav(\n path:str,\n sr:int=22050,\n maps:dict=None,\n)->[np.array,int, int]:\n maps = maps or get_maps()\n label_map = maps[\"label_map\"]\n remark_map = maps[\"remark_map\"]\n \n x, sr = librosa.load(path,sr=sr)\n filename = path.split(\"/\")[-1].split('.')[0]\n label = label_map[filename]\n remark = remark_map[filename]\n return x, sr, label, remark", "title": "" }, { "docid": "8fc2b88ab2a8abd43714903d84221c7f", "score": "0.6011125", "text": "def load_audio(self,audio_file_path):\n x,sr = librosa.load(audio_file_path, sr = 16000)\n return x", "title": "" }, { "docid": "7f004e5b7b183593b4ee1021b6ce2181", "score": "0.6004323", "text": "def extract_spectra_from_file(\n log,\n pathToSpectrum,\n convertLumToFlux=False):\n ################ > IMPORTS ################\n ## STANDARD LIB ##\n import os\n ## THIRD PARTY ##\n import numpy as np\n ## LOCAL APPLICATION ##\n import dryxPython.astrotools as at\n\n ################ > VARIABLE SETTINGS ######\n ################ >ACTION(S) ################\n # USE numPy TO EXTRACT THE DATA FROM FILE\n pwd = os.getcwd()\n log.debug('pwd %s' % (pwd,))\n log.debug('pathToSpectrum %s' % (pathToSpectrum,))\n data = np.genfromtxt(pathToSpectrum, skip_header=0, usecols=(0, 1))\n wavelengthArray = data[:, 0]\n # minWl = wavelengthArray.min()\n # maxWl = wavelengthArray.max()\n luminosityArray = data[:, 1]\n # CONVERT TO FLUX: F = L / 4*pi*(r**2)\n if convertLumToFlux:\n fluxArray = at.luminosity_to_flux(luminosityArray, 1e-5)\n else:\n fluxArray = luminosityArray\n\n # DEBUG BLOCK\n log.debug('pathToSpectrum: %s' % (pathToSpectrum,))\n # for i in range(len(fluxArray)):\n # print \"\"\"%s\\t%s\\t%s\"\"\" % (wavelengthArray[i], luminosityArray[i], fluxArray[i] )\n # print \"\\n\\n\\n\"\n return wavelengthArray, fluxArray", "title": "" }, { "docid": "64e450100a8b39ea389a4a5a3a4c44ef", "score": "0.5998584", "text": "def load_specCSVs_from_directory(path: str, fixName: str = None, maxSpectra=1e6) -> Tuple[List[str], np.ndarray]:\n spectra: np.ndarray = None\n names: list = []\n numSpectra: int = 0\n for file in os.listdir(path):\n if file.lower().endswith('.csv') and numSpectra < maxSpectra:\n curSpec: list = []\n specName = fixName if fixName is not None else file.lower().split('.csv')[0]\n names.append(specName)\n\n with open(os.path.join(path, file), 'r') as fp:\n if spectra is None:\n wavenumbers = []\n # for index, row in enumerate(reader):\n for line in fp.readlines():\n wavenum, intensity = get_numbers_from_line(line)\n curSpec.append(intensity)\n wavenumbers.append(wavenum)\n\n spectra = np.array(wavenumbers)\n else:\n tmpSpec = []\n tmpWavenumber = []\n for line in fp.readlines():\n wavenum, intensity = get_numbers_from_line(line)\n tmpSpec.append(intensity)\n tmpWavenumber.append(wavenum)\n\n tmpSpec = np.array(tmpSpec)\n tmpWavenumber = np.array(tmpWavenumber)\n for number in spectra[:, 0]:\n index = np.argmin(np.abs(tmpWavenumber - number))\n curSpec.append(tmpSpec[index])\n\n if len(spectra.shape) == 1:\n spectra = np.append(spectra[:, np.newaxis], np.array(curSpec)[:, np.newaxis], axis=1)\n else:\n spectra = np.append(spectra, np.array(curSpec)[:, np.newaxis], axis=1)\n\n numSpectra += 1\n\n return names, spectra", "title": "" }, { "docid": "611f3bae5860c655a3422d142bd83316", "score": "0.5967071", "text": "def import_spear_frames(filename):\n def rhdr(f):\n l = f.readline()\n if l == '':\n raise ValueError(f\"Reached EOF while parsing file header of {filename}\")\n return l[:-1]\n \n file = open(filename, 'r')\n line = rhdr(file)\n if not line == \"par-text-frame-format\":\n raise ValueError(f\"Expected 'par-text-frame-format' but got '{line}'\")\n line = rhdr(file)\n if not line == \"point-type index frequency amplitude\":\n raise ValueError(f\"Expected 'point-type index frequency amplitude' but got '{line}'\")\n\n # flush remaining header lines\n while True:\n if line == \"frame-data\":\n break\n line = rhdr(file)\n\n # file now at frame-data, read spectra till eof\n frames = []\n line = file.readline()\n while (line):\n spec = _read_spear_frame(line[:-1])\n if spec:\n frames.append(spec)\n line = file.readline()\n return frames", "title": "" }, { "docid": "f9026f2657d1061aedec62cbcc887b4d", "score": "0.5966715", "text": "def _load_from_file(self,filename,nfilt=1,norm=False):\n filename_orig = filename\n if not os.path.isfile(filename):\n realdir = os.path.dirname(os.path.realpath(__file__))\n filename = os.path.join(realdir, 'rad_data', filename)\n if not os.path.isfile(filename):\n raise OSError('Cannot find filter file %s' % filename_orig)\n\n self.filename = filename\n self.nfilt = nfilt\n\n f,t = np.loadtxt(filename,unpack=True,skiprows=1)\n t = t**nfilt\n l = 1.0e4/f # microns\n l = np.append(np.insert(l,0,1e6),1e-6)\n t = np.append(np.insert(t,0,t[0]),t[-1])\n self.wavelength_raw = l\n if norm: t /= np.max(t)\n self.trans_raw = threshold(t,low=0.0,high=1.0)", "title": "" }, { "docid": "48d8a7df5f8ee3170ff1ddb0ad2f17c3", "score": "0.5957273", "text": "def load_data(self, file_path):\n self.data, self.sr = librosa.load(file_path, sr=None)", "title": "" }, { "docid": "a02a5e35d1e2ef72bb78542412dcff16", "score": "0.59471905", "text": "def read_ps(file_name,spectra=None):\n \n data=np.loadtxt(file_name)\n if spectra is None:\n return data[:,0],data[:,1]\n else:\n ps={}\n l=data[:,0]\n for c,f in enumerate(spectra):\n ps[f]=data[:,c+1]\n return(l,ps)", "title": "" }, { "docid": "16dfb9d31b88cfd9e53a1fc2c9eae61b", "score": "0.59283096", "text": "def load_raw(self, wave_units='AA'):\n funit = q.erg / q.s / q.cm**2 / q.AA\n self.WavelengthUnit = wave_units\n self.ZeroPointUnit = str(funit)\n x, f = self.raw\n\n # Rebin Vega to filter\n vega_file = resource_filename('svo_filters', 'data/spectra/vega.txt')\n vega_data = np.genfromtxt(vega_file, unpack=True)[: 2]\n vega_data[0] *= 10000\n vega = rebin_spec(vega_data, x)\n\n # Calculate the filter's properties\n self.ZeroPoint = np.trapz(f * x * vega, x=x) / np.trapz(f * x, x=x)\n self.WavelengthPeak = x[np.argmax(f)]\n self.WavelengthMin = x[np.where(f > f.max() / 100.)][0]\n self.WavelengthMax = x[np.where(f > f.max() / 100.)][-1]\n self.WavelengthEff = np.trapz(f * x**2 * vega, x=x) / np.trapz(f * x * vega, x=x)\n self.WavelengthMean = np.trapz(f * x, x=x) / np.trapz(f, x=x)\n self.WidthEff = np.trapz(f, x=x) / f.max()\n self.WavelengthPivot = np.sqrt(np.trapz(f, x=x) / np.trapz(f / x**2, x=x))\n self.WavelengthPhot = np.trapz(f * vega * x**3, x=x) / np.trapz(f * vega * x**2, x=x)\n\n # Half max stuff\n halfmax = f.max() / 2.\n self.hm_x1 = x[f > halfmax][0]\n self.hm_x2 = x[f > halfmax][-1]\n self.FWHM = self.hm_x2 - self.hm_x1\n self.WavelengthCen = (self.hm_x1 + self.hm_x2) / 2.\n\n # Add missing attributes\n self.path = ''\n self.pixels_per_bin = self.raw.shape[-1]\n self.n_bins = 1", "title": "" }, { "docid": "3c547fc217ec58144d6a53e7d39ac4c7", "score": "0.59149337", "text": "def load_audio_signal(audio_file, verbose=1):\n print(\"\\n--------------------------------\")\n print(\"Loading audio signal ...\")\n y, sr = sf.read(audio_file, dtype='float32')\n print(\"First load complete\")\n\n # From stereo to mono-channel, carefully due to mem limitations\n if len(y.shape) == 2:\n # AWS 1 Gb-ram VirtualMachine has very limited memory\n mem_to_alloc = y.shape[0] * 32/8 # array len * Float 32 bit / 8 bit per byte\n mem_available = 0.9 * float(pu.virtual_memory().available) # bytes\n if y.shape[1] == 2:\n if mem_to_alloc > mem_available:\n print(\"ERROR: max allocation reached in VM: \", mem_to_alloc * 0.000001, \" Mb, from: \",\n mem_available * 0.000001, \"Mb\")\n raise MemoryError\n y = y.mean(axis=1) # from 2 channel wav to 1 channel\n print(\"Audio signal loaded \")\n\n if verbose:\n print(\"Sample rate: \", sr)\n describe_signal(y, \"Loaded wav signal\")\n print(\"--------------------------------\\n\")\n return y, sr", "title": "" }, { "docid": "0943d73c2aaf0eaa9c613688e9bedd79", "score": "0.59018517", "text": "def from_file(cls, file_: typing.Union[str, typing.BinaryIO]) -> 'Sound':\n\n data, samplerate = soundfile.read(file_)\n data /= numpy.max([-data.min(), data.max()])\n return cls(data, samplerate)", "title": "" }, { "docid": "70cc964f0cab2c0bad43d628a0100bae", "score": "0.5895637", "text": "def read_NGSL_spectra(infile, path_spectra, l_out, l_norm_i, l_norm_f):\n\n # Reading list of spectra\n\n f = open(infile,'r')\n data = f.readlines()\n f.close()\n\n file_spec_star = []\n for line in data:\n p = line.split()\n if p[0] <> '#':\n file_spec_star.append(str(p[0]))\n\n # Loading list of spectra\n\n star_spectra = np.zeros(len(file_spec_star) * len(l_out)).reshape(len(file_spec_star), len(l_out))\n fobsnorm_stars = np.zeros(len(file_spec_star))\n for i in range(len(file_spec_star)):\n\n #print i, file_spec_star[i],len(file_spec_star)\n f = open(path_spectra + file_spec_star[i], 'r')\n data = f.readlines()\n f.close()\n\n l = []\n f = []\n for line in data:\n p = line.split()\n if p[0] <> '#':\n l.append(float(p[0]))\n f.append(float(p[1]))\n f = np.array(f)\n l = np.array(l)\n\n # Interpolate star spectrum to be in the same lambda array\n\n star_spectra[i] = np.interp(l_out, l, f)\n\n # Normalization factor of the spectrum\n\n fobsnorm_stars[i] = norm_spec(l_out, star_spectra[i], l_norm_i, l_norm_f)\n\n return star_spectra, fobsnorm_stars", "title": "" }, { "docid": "a170ed5aeba97569199921308fcc5693", "score": "0.58949393", "text": "def test_load(self):\n\n # load it via Filter\n print(data_filename('filters'))\n f = Filter('V', path=data_filename('filters'))\n\n # load it as Spectrum\n s = SpectrumAscii(data_filename('filters/V.txt'), separator=None)\n\n # should be identical\n assert np.array_equal(f.wave, s.wave)\n assert np.array_equal(f.throughput, s.flux)", "title": "" }, { "docid": "c86e7d947b8a78ca721f5f3eab8023cd", "score": "0.5892054", "text": "def read_samples_from_file(self):\n self.log(u\"Loading audio data...\")\n\n # check the file can be read\n if not gf.file_can_be_read(self.file_path):\n self.log_exc(u\"File '%s' cannot be read\" % (self.file_path), None, True, OSError)\n\n # determine if we need to convert the audio file\n convert_audio_file = (\n (self.file_format is None) or\n (\n (self.rconf.safety_checks) and\n (self.file_format != (\"pcm_s16le\", 1, self.rconf.sample_rate))\n )\n )\n\n # convert the audio file if needed\n if convert_audio_file:\n # convert file to PCM16 mono WAVE with correct sample rate\n self.log(u\"self.file_format is None or not good => converting self.file_path\")\n tmp_handler, tmp_file_path = gf.tmp_file(suffix=u\".wav\", root=self.rconf[RuntimeConfiguration.TMP_PATH])\n self.log([u\"Temporary PCM16 mono WAVE file: '%s'\", tmp_file_path])\n try:\n self.log(u\"Converting audio file to mono...\")\n converter = FFMPEGWrapper(rconf=self.rconf, logger=self.logger)\n converter.convert(self.file_path, tmp_file_path)\n self.file_format = (\"pcm_s16le\", 1, self.rconf.sample_rate)\n self.log(u\"Converting audio file to mono... done\")\n except FFMPEGPathError:\n gf.delete_file(tmp_handler, tmp_file_path)\n self.log_exc(u\"Unable to call ffmpeg executable\", None, True, AudioFileConverterError)\n except OSError:\n gf.delete_file(tmp_handler, tmp_file_path)\n self.log_exc(u\"Audio file format not supported by ffmpeg\", None, True, AudioFileUnsupportedFormatError)\n else:\n # read the file directly\n if self.rconf.safety_checks:\n self.log(u\"self.file_format is good => reading self.file_path directly\")\n else:\n self.log_warn(u\"Safety checks disabled => reading self.file_path directly\")\n tmp_handler = None\n tmp_file_path = self.file_path\n\n # TODO allow calling C extension cwave to read samples faster\n try:\n self.audio_format = \"pcm16\"\n self.audio_channels = 1\n self.audio_sample_rate, self.__samples = scipywavread(tmp_file_path)\n # scipy reads a sample as an int16_t, that is, a number in [-32768, 32767]\n # so we convert it to a float64 in [-1, 1]\n self.__samples = self.__samples.astype(\"float64\") / 32768\n self.__samples_capacity = len(self.__samples)\n self.__samples_length = self.__samples_capacity\n self._update_length()\n except ValueError:\n self.log_exc(u\"Audio format not supported by scipywavread\", None, True, AudioFileUnsupportedFormatError)\n\n # if we converted the audio file, delete the temporary converted audio file\n if convert_audio_file:\n gf.delete_file(tmp_handler, tmp_file_path)\n self.log([u\"Deleted temporary audio file: '%s'\", tmp_file_path])\n\n self._update_length()\n self.log([u\"Sample length: %.3f\", self.audio_length])\n self.log([u\"Sample rate: %d\", self.audio_sample_rate])\n self.log([u\"Audio format: %s\", self.audio_format])\n self.log([u\"Audio channels: %d\", self.audio_channels])\n self.log(u\"Loading audio data... done\")", "title": "" }, { "docid": "6cbc70ff44e57f079757e2c4f37941c0", "score": "0.58837366", "text": "def load_obs_lris(filename=None, objname=None, #dist = 1e-5, vel = 0.0,\n wlo=3550., whi=5500., verbose=False,\n phottable='data/f2_apcanfinal_6phot_v2.fits',\n **kwargs):\n from sedpy import observate\n\n obs ={}\n \n ####### SPECTRUM #######\n if verbose:\n print('Loading data from {0}'.format())\n\n #fluxconv = np.pi * 4. * (dist * 1e6 * pc)**2 / lsun #erg/s/AA/cm^2 to L_sun/AA\n fluxconv = 1.0\n scale = 1e0 #testing\n #redshift = vel / 2.998e8\n dat = pyfits.getdata(filename)\n hdr = pyfits.getheader(filename)\n \n obs['wavelength'] = dat[0]['wave_opt']\n obs['spectrum'] = dat[0]['spec']\n obs['unc'] = 1./np.sqrt(dat[0]['ivar'])\n #masking\n obs['mask'] = ((obs['wavelength'] >= wlo ) & (obs['wavelength'] <= whi))\n #obs['wavelength'] /= (1.0 + redshift)\n \n\n ######## PHOTOMETRY ######\n if verbose:\n print('Loading mags from {0} for {1}'.format(phottable, objname))\n mags, mags_unc, flag = query_phatcat(objname, phottable = phottable, **kwargs)\n \n obs['filters'] = observate.load_filters(['wfc3_uvis_'+b.lower() for b in\n [\"F275W\", \"F336W\", \"F475W\", \"F814W\"]] +\n ['wfc3_ir_'+b.lower() for b in\n [\"F110W\", \"F160W\"]])\n obs['maggies'] = 10**(-0.4 * (mags -\n np.array([f.ab_to_vega for f in obs['filters']]) -\n 2.5*np.log10(scale) ))\n obs['maggies_unc'] = mags_unc * obs['maggies'] / 1.086\n\n return obs", "title": "" }, { "docid": "8646406be43a7893ea3ae78cbe1f6b8c", "score": "0.58546954", "text": "def read_frame_as_spectra(filename, night, expid, band, single=False):\n fr = read_frame(filename)\n if fr.fibermap is None:\n raise RuntimeError(\"reading Frame files into Spectra only supported if a fibermap exists\")\n\n nspec = len(fr.fibermap)\n \n fmap = np.zeros(shape=(nspec,), dtype=spectra_columns())\n for s in range(nspec):\n for tp in fr.fibermap.dtype.fields:\n fmap[s][tp] = fr.fibermap[s][tp]\n\n fmap[:][\"NIGHT\"] = night\n fmap[:][\"EXPID\"] = expid\n\n fmap = encode_table(fmap)\n\n bands = [ band ]\n\n mask = None\n if fr.mask is not None:\n mask = {band : fr.mask}\n\n res = None\n if fr.resolution_data is not None:\n res = {band : fr.resolution_data}\n\n extra = None\n if fr.chi2pix is not None:\n extra = {band : {\"CHI2PIX\" : fr.chi2pix}}\n\n spec = Spectra(bands, {band : fr.wave}, {band : fr.flux}, {band : fr.ivar}, \n mask=mask, resolution_data=res, fibermap=fmap, meta=fr.meta, \n extra=extra, single=single)\n\n return spec", "title": "" }, { "docid": "3b163a0ccdad42d5bcb496c0cd167e01", "score": "0.5851949", "text": "def raspi_import(path, channels=5):\n\n with open(path, 'r') as fid:\n sample_period = np.fromfile(fid, count=1, dtype=float)[0]\n data = np.fromfile(fid, dtype=np.uint16)\n data = data.reshape((-1, channels))\n return sample_period, data", "title": "" }, { "docid": "ceee1fedcd48c0a307c0c62c467e80e3", "score": "0.58441967", "text": "def load_data(spectra_path, labels_path):\n\n with open(spectra_path, \"rb\") as fp:\n spectra = np.load(fp)\n\n with open(labels_path, \"rb\") as fp:\n labels = np.load(fp)\n\n if spectra.shape[0] != labels.size:\n raise ValueError(\n f\"spectra and labels have different shapes ({spectra.shape[0]} != {labels.size})\"\n )\n\n return (spectra, labels)", "title": "" }, { "docid": "ec42dae92078692a3141b5d7c7e6f895", "score": "0.5817368", "text": "def __init__(self, pha_file, srm_file=None, srm_custom=None, custom_channel_bins=None, custom_photon_bins=None, **kwargs):\n\n self._construction_string = f\"RhessiLoader(pha_file={pha_file},srm_file={srm_file},srm_custom={srm_custom},custom_channel_bins={custom_channel_bins},custom_photon_bins={custom_photon_bins},**{kwargs})\"\n self._loaded_spec_data = self._load1spec(pha_file, srm_file, srm=srm_custom, channel_bins=custom_channel_bins, photon_bins=custom_photon_bins)\n\n self._time_fmt, self._time_scale = \"isot\", \"utc\"\n self._start_background_time, self._end_background_time = None, None\n self._start_event_time, self._end_event_time = self._full_obs_time[0], self._full_obs_time[1]\n\n # used to give the user a warning if incompatible times are set\n self.__warn = True", "title": "" }, { "docid": "506bd8062113baa6793632900767e4d0", "score": "0.5812433", "text": "def read(self, path: str):\n\n data = np.load(path)\n\n start, end, interval = data[\"shape\"]\n self._shape = SpectralShape(start, end, interval)\n self._basis_functions = data[\"basis_functions\"]\n self._means = data[\"means\"]\n self._selector_array = data[\"selector_array\"]", "title": "" }, { "docid": "28b57e62c81d9d0b1a31fe94a7cd9162", "score": "0.57945853", "text": "def __read_file(self):\n self.sample_rate, self.signal = scipy.io.wavfile.read(self.file_path)", "title": "" }, { "docid": "2f56a516510e27ef6be8112bee55ad1e", "score": "0.57781756", "text": "def _load1spec(self, f_pha, f_arf=None, f_rmf=None, srm=None, channel_bins=None, photon_bins=None):\n\n # what files might be needed (for NuSTAR)\n f_arf = f_pha[:-3]+\"arf\" if type(f_arf) == type(None) else f_arf\n f_rmf = f_pha[:-3]+\"rmf\" if type(f_rmf) == type(None) else f_rmf\n\n # need effective exposure and energy binning since likelihood works on counts, not count rates etc.\n _, counts, eff_exp = io._read_pha(f_pha)\n\n # now calculate the SRM or use a custom one if given\n if type(srm) == type(None):\n\n # if there is an ARF file load it in\n if os_path.isfile(f_arf):\n e_lo_arf, e_hi_arf, eff_area = io._read_arf(f_arf)\n\n # if there is an RMF file load it in and convert to a redistribution matrix\n if os_path.isfile(f_rmf):\n e_lo_rmf, e_hi_rmf, ngrp, fchan, nchan, matrix, redist_m = self._load_rmf(f_rmf)\n\n srm = nu_spec.make_srm(rmf_matrix=redist_m, arf_array=eff_area)\n else:\n e_lo_arf, e_hi_arf, eff_area = None, None, None\n e_lo_rmf, e_hi_rmf, ngrp, fchan, nchan, matrix, redist_m = None, None, None, None, None, None, None\n\n channel_bins = self._calc_channel_bins(e_lo_rmf, e_hi_rmf) if type(channel_bins) == type(None) else channel_bins\n channel_binning = np.diff(channel_bins).flatten()\n\n phot_channels = channel_bins if type(photon_bins) == type(None) else photon_bins\n phot_binning = np.diff(phot_channels).flatten()\n\n # get the count rate information\n count_rate, count_rate_error = nu_spec.flux_cts_spec(f_pha, bin_size=channel_binning)\n\n # what spectral info you want to know from this observation\n return {\"photon_channel_bins\": phot_channels,\n \"photon_channel_mids\": np.mean(phot_channels, axis=1),\n \"photon_channel_binning\": phot_binning,\n \"count_channel_bins\": channel_bins,\n \"count_channel_mids\": np.mean(channel_bins, axis=1),\n \"count_channel_binning\": channel_binning,\n \"counts\": counts,\n \"count_error\": np.sqrt(counts),\n \"count_rate\": count_rate,\n \"count_rate_error\": count_rate_error,\n \"effective_exposure\": eff_exp,\n \"srm\": srm,\n \"extras\": {\"pha.file\": f_pha,\n \"arf.file\": f_arf,\n \"arf.e_lo\": e_lo_arf,\n \"arf.e_hi\": e_hi_arf,\n \"arf.effective_area\": eff_area,\n \"rmf.file\": f_rmf,\n \"rmf.e_lo\": e_lo_rmf,\n \"rmf.e_hi\": e_hi_rmf,\n \"rmf.ngrp\": ngrp,\n \"rmf.fchan\": fchan,\n \"rmf.nchan\": nchan,\n \"rmf.matrix\": matrix,\n \"rmf.redistribution_matrix\": redist_m}\n } # this might make it easier to add different observations together", "title": "" }, { "docid": "ffbe4934f164da4198a3b6a65dd7f68a", "score": "0.5758071", "text": "def load_spectrum_observations(name, low_binning=True):\n if low_binning == True:\n prefix = 'spectra/low_binning'\n else:\n prefix = 'spectra'\n if name == 'joint':\n spec_obs_list = SpectrumObservationList()\n # extend the list adding all the other SpectrumObservationList\n for n in {'fermi', 'magic', 'hess', 'fact', 'veritas'}:\n spectra_path = f'{prefix}/{n}'\n spec_obs = SpectrumObservationList.read(spectra_path)\n spec_obs_list.extend(spec_obs)\n \n else:\n spectra_path = f'{prefix}/{name}'\n spec_obs_list = SpectrumObservationList.read(spectra_path)\n\n for obs in spec_obs_list:\n obs.hi_threshold = fit_range[name][1]\n obs.lo_threshold = fit_range[name][0]\n\n return spec_obs_list, fit_range[name]", "title": "" }, { "docid": "7d1845e5b39e7801d5aa1403ec3e425e", "score": "0.5755667", "text": "def load_wave(path: str, camera_name: str) -> np.ndarray:\n # Dispatch call to load_wave_rate\n rate, wav = load_wave_rate(path, camera_name)\n # Return just the wave\n return wav", "title": "" }, { "docid": "a9df05291b6478b554965f2f38db5f8d", "score": "0.5745742", "text": "def load_recordings(paths=[\"recordings\"], label_type=\"number\", sr=RATE):\n res = []\n for path in paths:\n print(f\"Loading from {path}\")\n for f in tqdm(sorted(os.listdir(path))):\n if f.endswith('.wav'):\n if \"pitch\" in f and label_type == \"speaker\":\n # do not consider pitch alteration\n next\n else:\n # Load file and extract features\n audio, sample_rate = librosa.load(path + \"/\" + f, sr=sr)\n res.append(audio)\n return np.array(res)", "title": "" }, { "docid": "3837033a344c0fc7224def93abc28239", "score": "0.5742266", "text": "def load_spectrum(self):\n fcent = self.get_param('fcent')\n bw = self.get_param('bw')\n flr = self.get_param('det_floor')\n f = self.frequency\n t = np.ones_like(f)\n t[f<fcent*(1-bw/2)] = flr\n t[f>fcent*(1+bw/2)] = flr\n self.spectrum = t\n return self.spectrum", "title": "" }, { "docid": "fb37fffad626c50193c9eaa3e866e794", "score": "0.57272524", "text": "def _load1spec(self, f_pha, f_srm, srm=None, channel_bins=None, photon_bins=None):\n # need effective exposure and energy binning since likelihood works on counts, not count rates etc.\n obs_channel_bins, self._channel_bins_inds_perspec, self._time_bins_perspec, self._lvt_perspec, self._counts_perspec, self._counts_err_perspec, self._count_rate_perspec, self._count_rate_error_perspec = self._getspec(\n f_pha)\n\n # now calculate the SRM or use a custom one if given\n if type(srm) == type(None):\n # needs an srm file load it in\n srm_photon_bins, srm_channel_bins, srm = self._getsrm(f_srm)\n # make sure the SRM will only produce counts to match the data\n data_inds2match = np.where((obs_channel_bins[0, 0] <= srm_channel_bins[:, 0]) & (srm_channel_bins[:, 1] <= obs_channel_bins[-1, -1]))\n srm = srm[:, data_inds2match[0]]\n else:\n srm_photon_bins = None\n\n photon_bins = srm_photon_bins if type(photon_bins) == type(None) else photon_bins\n photon_binning = np.diff(photon_bins).flatten()\n\n # from the srm file #channel_binning = np.diff(channel_bins).flatten()\n channel_bins = obs_channel_bins if type(channel_bins) == type(None) else channel_bins\n\n # default is no background and all data is the spectrum to be fitted\n self._full_obs_time = [self._time_bins_perspec[0, 0], self._time_bins_perspec[-1, -1]]\n counts = np.sum(self._data_time_select(stime=self._full_obs_time[0], full_data=self._counts_perspec, etime=self._full_obs_time[1]), axis=0)\n counts_err = np.sqrt(np.sum(self._data_time_select(stime=self._full_obs_time[0], full_data=self._counts_err_perspec,\n etime=self._full_obs_time[1])**2, axis=0)) # sum errors in quadrature, Poisson still sqrt(N)\n\n _livetimes = np.mean(self._data_time_select(stime=self._full_obs_time[0], full_data=self._lvt_perspec,\n etime=self._full_obs_time[1]), axis=0) # to convert a model count rate to counts, so need mean\n eff_exp = np.diff(self._full_obs_time)[0].to_value(\"s\")*_livetimes\n\n channel_binning = np.diff(obs_channel_bins, axis=1).flatten()\n count_rate = counts/eff_exp/channel_binning # count rates from here are counts/s/keV\n count_rate_error = counts_err/eff_exp/channel_binning # was np.sqrt(counts)/eff_exp/channel_binning\n\n # what spectral info you want to know from this observation\n return {\"photon_channel_bins\": photon_bins,\n \"photon_channel_mids\": np.mean(photon_bins, axis=1),\n \"photon_channel_binning\": photon_binning,\n \"count_channel_bins\": channel_bins,\n \"count_channel_mids\": np.mean(channel_bins, axis=1),\n \"count_channel_binning\": channel_binning,\n \"counts\": counts,\n \"count_error\": counts_err,\n \"count_rate\": count_rate,\n \"count_rate_error\": count_rate_error,\n \"effective_exposure\": eff_exp,\n \"srm\": srm,\n \"extras\": {\"pha.file\": f_pha,\n \"srm.file\": f_srm,\n \"counts=data-bg\": False}\n } # this might make it easier to add different observations together", "title": "" }, { "docid": "e582b698cd601da92bcddfed076867a0", "score": "0.57265985", "text": "def load_audio_file(filepath, fs, mono=True, mode='normal'):\n # TODO: mode is temporary, remove after validating if load function of librosa 0.7 is faster or not\n if mode in ['normal']:\n import subprocess\n ch = 1 if mono else 2\n call = ['ffmpeg', '-v', 'quiet', '-y', '-i', filepath, '-f', 's16le', '-ac', f'{ch}', '-ar', f'{fs}', 'pipe:1']\n proc = subprocess.Popen(call, stdout=subprocess.PIPE, bufsize=-1)\n signal, _ = proc.communicate()\n signal = np.frombuffer(signal, dtype=np.int16).astype(np.float32)\n if not mono:\n # TODO: implement stereo\n signal = signal.reshape((-1, ch)).T\n return signal\n elif mode in ['librosa']:\n signal = librosa.core.load(path=filepath, sr=fs, mono=mono)[0]\n return signal\n else:\n raise NotImplementedError", "title": "" }, { "docid": "513f6c4c183494ea10c402ea29aeb65c", "score": "0.57229495", "text": "def read_spex(self, line, path='./'):\n self.read_spectra(line, path)\n\n # add units\n self.fluxes = self.fluxes.value*(u.erg/((u.cm**2)*u.s*u.angstrom))\n self.uncs = self.uncs.value*(u.erg/((u.cm**2)*u.s*u.angstrom))", "title": "" }, { "docid": "8ac6ae745d468adf7de16fedc70a616b", "score": "0.57212514", "text": "def _read_spectra_mzxml(filename: str) -> Iterable[sus.MsmsSpectrum]:\n with pyteomics.mzxml.MzXML(filename) as f_in:\n try:\n for spectrum_dict in tqdm.tqdm(f_in, desc='Spectra read',\n unit='spectra'):\n if int(spectrum_dict.get('msLevel', -1)) == 2:\n if 'precursorCharge' in spectrum_dict['precursorMz'][0]:\n precursor_charge = (spectrum_dict['precursorMz'][0]\n ['precursorCharge'])\n else:\n logger.warning('Unknown precursor charge, skipped '\n 'spectrum...')\n continue\n spectrum = sus.MsmsSpectrum(\n spectrum_dict['id'],\n spectrum_dict['precursorMz'][0]['precursorMz'],\n precursor_charge,\n spectrum_dict['m/z array'],\n spectrum_dict['intensity array'],\n None,\n spectrum_dict['retentionTime'])\n spectrum.scan = int(spectrum_dict['id'])\n spectrum.filename = os.path.splitext(\n os.path.basename(filename))[0]\n yield spectrum\n except LxmlError as e:\n logger.warning('Failed to read file %s: %s', filename, e)", "title": "" }, { "docid": "34a564e78caeac86331688ba8e32fe33", "score": "0.57198507", "text": "def load_obs_mmt(filename=None, objname=None, #dist = 1e-5, vel = 0.0,\n wlo=3750., whi=7200., verbose=False,\n phottable='data/f2_apcanfinal_6phot_v2.fits',\n **kwargs):\n from sedpy import observate\n\n obs ={}\n\n ####### SPECTRUM #######\n if verbose:\n print('Loading data from {0}'.format(filename))\n\n scale = 1e0 #testing\n #fluxconv = np.pi * 4. * (dist * 1e6 * pc)**2 / lsun #erg/s/AA/cm^2 to L_sun/AA\n fluxconv = 1.0#5.0e-20 * scale #approximate counts to erg/s/AA/cm^2\n #redshift = 0.0 #vel / 2.998e8\n dat = np.squeeze(pyfits.getdata(filename))\n hdr = pyfits.getheader(filename)\n \n crpix = (hdr['CRPIX1'] -1) #convert from FITS to numpy indexing\n try:\n cd = hdr['CDELT1']\n except (KeyError):\n cd = hdr['CD1_1']\n\n obs['wavelength'] = (np.arange(dat.shape[1]) - crpix) * cd + hdr['CRVAL1']\n obs['spectrum'] = dat[0,:] * fluxconv\n obs['unc'] = np.sqrt(dat[1,:]) * fluxconv\n \n #Masking. should move to a function that reads a mask definition file\n #one should really never mask in the rest frame - that should be modeled!\n obs['mask'] = ((obs['wavelength'] >= wlo ) & (obs['wavelength'] <= whi))\n obs['mask'] = obs['mask'] & ((obs['wavelength'] <= 5570) |\n (obs['wavelength'] >= 5590)) #mask OI sky line\n obs['mask'] = obs['mask'] & ((obs['wavelength'] <= 6170) |\n (obs['wavelength'] >= 6180)) #mask...something.\n\n #obs['wavelength'] /= (1.0 + redshift)\n\n ######## PHOTOMETRY ########\n if verbose:\n print('Loading mags from {0} for {1}'.format(phottable, objname))\n mags, mags_unc, flag = query_phatcat(objname, phottable = phottable, **kwargs)\n \n obs['filters'] = observate.load_filters(['wfc3_uvis_'+b.lower() for b in\n [\"F275W\", \"F336W\", \"F475W\", \"F814W\"]] +\n ['wfc3_ir_'+b.lower() for b in\n [\"F110W\", \"F160W\"]])\n obs['maggies'] = 10**(-0.4 * (mags -\n np.array([f.ab_to_vega for f in obs['filters']]) -\n 2.5*np.log10(scale) ))\n obs['maggies_unc'] = mags_unc * obs['maggies'] / 1.086\n\n return obs", "title": "" }, { "docid": "5e5255254ae533dfb54a82325159b2de", "score": "0.57169896", "text": "def openspectrumfile(self):\n print(\"Open file dialog...\")\n filedialog = QtWidgets.QFileDialog(self)\n filedialog.setWindowTitle('Load spectrum file')\n filedialog.setNameFilters(self.filterslist)\n if self.selectedfiltername is not None:\n filedialog.selectNameFilter(self.selectedfiltername)\n if self.selectedfilepath is None:\n filedialog.setDirectory(\"~\")\n else:\n filedialog.setDirectory(self.selectedfilepath)\n\n if filedialog.exec_() == filedialog.Accepted:\n fname = filedialog.selectedFiles()[0]\n print(\"Loading file: \" + fname)\n\n self.selectedfilepath = os.path.dirname(os.path.abspath(fname))\n\n for parsermodule in self.availableparsers:\n if parsermodule.filtername == filedialog.selectedNameFilter(): # Only if there is a coincident parser\n self.selectedfiltername = parsermodule.filtername # Remember the previous filtername\n\n currentparse = parsermodule.parser(fname) # Remember parser for this programme run\n\n traces = currentparse.parse() #\n\n # Data parse\n rowsnumber = self.spectraTableWidget.rowCount()\n\n for trace in traces:\n self.spectraTableWidget.insertRow(rowsnumber)\n\n # 1st column - Filename and data\n dataitem = QtWidgets.QTableWidgetItem(os.path.basename(fname))\n dataitem.setData(core.Qt.UserRole, trace)\n dataitem.setFlags(self.filenameItemFlags)\n self.spectraTableWidget.setItem(rowsnumber, 0, dataitem)\n\n # 2nd column - Trace number in file\n tracenumberinfileitem = QtWidgets.QTableWidgetItem(str(trace.number))\n tracenumberinfileitem.setFlags(self.traceItemFlags)\n self.spectraTableWidget.setItem(rowsnumber, 1, tracenumberinfileitem)\n\n # 2nd column - RBW\n if currentparse.rbw != 0:\n rbwitem = QtWidgets.QTableWidgetItem(str(trace.rbw))\n rbwitem.setData(core.Qt.UserRole, trace.rbw)\n else:\n rbwitem = QtWidgets.QTableWidgetItem(\"0.0\")\n self.spectraTableWidget.setItem(rowsnumber, 2, rbwitem)\n\n # 3rd column - Measurement unit\n combobox = QtWidgets.QComboBox()\n for unit in SC.spectraunits:\n combobox.addItem(unit.symbol + \" (\" + unit.physical_quantity + \")\", unit)\n\n if True: # If unit was recognized by parser\n combobox.setCurrentIndex(0)\n self.spectraTableWidget.setCellWidget(rowsnumber, 3, combobox)\n\n # 4th column - Chart label\n labelitem = QtWidgets.QTableWidgetItem(os.path.basename(fname) + \" TR:\" + str(trace.number))\n self.spectraTableWidget.setItem(rowsnumber, 4, labelitem)\n\n del currentparse\n else:\n print(\"QFileDialog canceled\")", "title": "" }, { "docid": "56274375c68dfb078f40a2cafb8e2ecd", "score": "0.5677389", "text": "def load_data( self ):\n self.sample_freq, data = wavfile.read(self.name)\n norm_factor = iinfo(data.dtype).max\n self.data = data/norm_factor\n self.numsamples_total = self.data.shape[0]\n self.numchannels_total = self.data.shape[1]", "title": "" }, { "docid": "477b79e09b5a358ee385719b83a901f8", "score": "0.56739175", "text": "def _get_spec_file_info(spec_file):\n rdict = io._read_rhessi_spec_file(spec_file)\n\n if rdict[\"1\"][0][\"SUMFLAG\"] != 1:\n print(\"Apparently spectrum file\\'s `SUMFLAG` should be one and I don\\'t know what to do otherwise at the moment.\")\n return\n\n # Note that for rate, the units are per detector, i.e. counts sec-1 detector-1. https://hesperia.gsfc.nasa.gov/rhessi3/software/spectroscopy/spectrum-software/index.html\n # -> I tnhink this is the default but false for the first simple case I tried. I think sum_flag=1 sums the detectors up for spectra and srm\n\n channel_bins_inds = rdict[\"1\"][1][\"CHANNEL\"] # channel numbers, (rows,columns) -> (times, channels)\n lvt = rdict[\"1\"][1][\"LIVETIME\"] # livetimes, (rows,columns) -> (times, channels)\n times_s = rdict[\"1\"][1][\"TIME\"] # times of spectra, entries -> times. Times from start of the day of \"DATE_OBS\"; e.g.,\"DATE_OBS\"='2002-10-05T10:38:00.000' then times measured from '2002-10-05T00:00:00.000'\n time_deltas = rdict[\"1\"][1][\"TIMEDEL\"] # times deltas of spectra, entries -> times\n # spectrum number in the file, entries -> times # spec_num = rdict[\"1\"][1][\"SPEC_NUM\"]\n\n td = times_s-times_s[0]\n spec_stimes = [Time(rdict[\"0\"][0][\"DATE_OBS\"], format='isot', scale='utc')+TimeDelta(dt * u.s) for dt in td]\n spec_etimes = [st+TimeDelta(dt * u.s) for st, dt in zip(spec_stimes, time_deltas)]\n time_bins = np.concatenate((np.array(spec_stimes)[:, None], np.array(spec_etimes)[:, None]), axis=1)\n\n channels = rdict[\"2\"][1] # [(chan, lowE, hiE), ...], rdict[\"2\"][0] has units etc.\n channel_bins = np.concatenate((np.array(channels['E_MIN'])[:, None], np.array(channels['E_MAX'])[:, None]), axis=1)\n\n # get counts [counts], count rate [counts/s], and error on count rate\n counts, counts_err, cts_rates, cts_rate_err = _spec_file_units_check(rhessi_dict=rdict, livetimes=lvt, time_dels=time_deltas, kev_binning=channel_bins)\n\n return channel_bins, channel_bins_inds, time_bins, lvt, counts, counts_err, cts_rates, cts_rate_err", "title": "" }, { "docid": "419912ece0b26cbfac01a3bc2ce3b43b", "score": "0.5668708", "text": "def load_raw_training_data(concussed_path, control_path):\n\n\t# variable to hold raw data from matlab structs\n\tconcussed_data = []\n\tcontrol_data = []\n\n\t# load concussed data\n\tfor filename in os.listdir(concussed_path):\n\t\t# load matlab struct\n\t\tmstruct = sio.loadmat(concussed_path + '/' + filename)\n\t\t# transpose matrix, change datatype\n\t\tconcussed_data.append(np.transpose(mstruct['besa_channels_artRej']['amplitudes'][0][0].astype(float)))\n\n\t# load control data\n\tfor filename in os.listdir(control_path):\n\t\t# load matlab struct\n\t\tmstruct = sio.loadmat(control_path + '/' + filename)\n\t\t# transpose matrix, change datatype\n\t\tcontrol_data.append(np.transpose(mstruct['besa_channels_artRej']['amplitudes'][0][0].astype(float)))\n\n\n\tsample_rate = mstruct['besa_channels_artRej']['samplingrate'][0][0][0][0]\n\n\treturn concussed_data, control_data, sample_rate", "title": "" }, { "docid": "7fb4e49acb11da9b4f0c87ac661357a3", "score": "0.5661103", "text": "def _initFromData(self, file):\n\n magic, bom, version, filesize, headersize, numblocks = \\\n _common.NDS_STD_FILE_HEADER.unpack_from(file, 0)\n if version != 0x100:\n raise ValueError(f'Unsupported STRM version: {version}')\n assert magic == b'STRM'\n\n headMagic, headSize = struct.unpack_from('<4sI', file, 0x10)\n assert headMagic == b'HEAD'\n\n (waveType, self.isLooped, numChannels, self.unk03,\n self.sampleRate, self.time,\n self.loopOffset,\n numSamples,\n dataOffset,\n numBlocks,\n bytesPerBlock, # per channel\n self.samplesPerBlock, # per channel\n bytesInLastBlock, # per channel\n self.samplesInLastBlock, # per channel\n self.unk28,\n self.unk2C,\n self.unk30,\n self.unk34,\n self.unk38,\n self.unk3C,\n self.unk40,\n self.unk44,\n ) = struct.unpack_from('<B?BB2H16I', file, 0x18)\n assert dataOffset == 0x68\n self.waveType = WaveType(waveType)\n\n dataOffs = 0x10 + headSize\n dataMagic, dataSize = struct.unpack_from('<4sI', file, dataOffs)\n assert dataMagic == b'DATA'\n data = file[dataOffs + 8 : dataOffs + dataSize]\n\n isOneBigLongBlock = (numBlocks == 1 and waveType == WaveType.ADPCM)\n\n self.channels = []\n for _ in range(numChannels):\n self.channels.append([])\n\n offs = 0\n for bn in range(numBlocks):\n blockSize = bytesPerBlock\n if bn == numBlocks - 1:\n blockSize = bytesInLastBlock\n\n if isOneBigLongBlock:\n blockSize += 4\n\n for cn in range(numChannels):\n self.channels[cn].append(data[offs : offs + blockSize])\n offs += blockSize\n\n while offs % 4: offs += 1", "title": "" }, { "docid": "30afe5272d5fc63b1ac18ebcd85e4b74", "score": "0.56459963", "text": "def read_spectrum(file_in, head_type='cube'):\n\n # Read in spectrum file.\n # Loaded as 'str' initially due to a byte conversion issue.\n try:\n data_in = np.loadtxt(file_in, delimiter=',', dtype='str')\n except Exception as e:\n print(e)\n\n # Cut off the extraneous final column\n data_in = data_in[:, :-1].astype(float).T\n\n # Assign variables.\n if head_type == 'cube':\n wave, flux, fluxerr, spline, _ = data_in\n elif head_type == 'galcen':\n print(\"code this here.\")\n st()\n\n return wave, flux, fluxerr, spline", "title": "" }, { "docid": "ac3c62bd3e46b04bb633f25409d1ee7a", "score": "0.5642734", "text": "def runDECODEandSAVE_RAWfiles(filesRAW_to_read,out_path,channels,survey_date,range_detection,depth_max_toSave):\r\n for channel in channels : # on parcout les canaux \r\n for file in filesRAW_to_read : # on parcourt les fichiers de donnees .raw\r\n line = os.path.basename(file)[:5]\r\n print('-> Lecture du fichier RAW, Ligne : '+line+' Canal : '+str(channel))\r\n # Ouverture du fichier\r\n f = open(file, 'rb')\r\n # lecture et remplissage de dictionnaires :\r\n d_power,d_param,d_traj = read_RAWfile(f,line,channel,survey_date,range_detection,depth_max_toSave)\r\n print('Lecture achevee')\r\n print('-> Sauvegarde du fichier RAW, Ligne : '+line+' Canal : '+str(channel))\r\n # Enregistrement des donnees dans des fichiers h5\r\n save_RAWbdd(out_path,channel,d_param,d_power,d_traj)\r\n print('Sauvegarde achevee')\r\n return None", "title": "" }, { "docid": "abd3a067c76f383d2cb05aa644e18b90", "score": "0.56420183", "text": "def load_files(self, mp3_path: str, srt_path: str):\n self.mp3 = AudioSegment.from_mp3(mp3_path)\n self.srt = pysrt.open(srt_path)", "title": "" }, { "docid": "46b9627c61375aea8d90249b4738f341", "score": "0.56391644", "text": "def sample(self, file_name):\n \n # WAV file path\n file_path = \"WAV/unprocessed/\"+file_name\n \n # load WAV file using soundfile\n data, sr = sf.read(file_path) \n \n # convert to Wave object\n self.waveform = Wave(data)\n \n return self.waveform", "title": "" }, { "docid": "5c4bc1f61cb8316e5485d1b22f49fbd6", "score": "0.56117195", "text": "def load_volume(patnr):\r\n \r\n# tmp = r'C:\\Users\\Michel\\Documents\\BMT\\Bacheloropdracht\\data\\croppedAvg7_pat%02i.ssdf'\r\n tmp = r'C:\\almar\\data\\dicom\\cropped\\croppedReg_pat%02i_gravity.bsdf'\r\n s = ssdf.load(tmp%patnr)\r\n vol = Aarray(s.vol.astype(np.float32), s.sampling, s.origin)\r\n\r\n #Save sampling\r\n sampling = s.sampling\r\n origin = s.origin\r\n\r\n return vol, sampling, origin", "title": "" }, { "docid": "b6dec9c35c1458bf55ae93290f913172", "score": "0.5610218", "text": "def load_joyrad_file(self, filename, load_to_ram=False):\r\n self.type = 'joyrad'\r\n\r\n self.f = netCDF4.Dataset(filename, 'r')\r\n print('keys ', self.f.variables.keys())\r\n\r\n #print(self.f)\r\n self.timestamps = self.f.variables['time'][:].astype(np.float64) + self.f.variables['sampleTms'][:]/1000.\r\n self.timestamps += h.dt_to_ts(datetime.datetime(2001, 1, 1, 0, 0, 0))\r\n print('time ', self.timestamps[:10])\r\n self.delta_ts = np.mean(np.diff(self.timestamps)) if self.timestamps.shape[0] > 1 else 2.0\r\n self.range = self.f.variables['range'][:]\r\n print('range ', self.range[:10])\r\n self.minvel = self.f.variables['MinVel'][:]\r\n #self.shiftvel = self.f.variables['MinVel_Correction'][:]\r\n\r\n self.ir_chirps = self.f.variables['range_offsets'][:] - 1\r\n self.chirp_vel = self.f.variables['velocity'][:]\r\n self.velocity = np.zeros(512)\r\n# print('velocity ', self.velocity[:10])\r\n print('Z chunking ', self.f.variables['spec'].chunking())\r\n\r\n self.begin_dt = h.ts_to_dt(self.timestamps[0])\r\n\r\n if load_to_ram == True:\r\n self.spectra_in_ram = True\r\n self.spec = self.f.variables['spec'][:]\r\n # self.LDR = self.f.variables['LDR'][:]\r\n # self.SNRco = self.f.variables['SNRco'][:]\r", "title": "" }, { "docid": "02041faf0514746910ce5373bd3f2d53", "score": "0.5609865", "text": "def load(self):\n time_array = np.zeros(self.n_frame, dtype=np.double)\n x = np.zeros((self.n_frame, self.n_data), dtype=np.double)\n adc_array = np.zeros((self.n_frame, self.nadc), dtype=np.double)\n with open(self.file_name, \"rb\") as fh:\n fh.read(self.offset)\n for i in range(self.n_frame):\n # get a whole frame\n d = fh.read(self.frame_size)\n # extract time ticks\n time_array[i] = unpack(self.p[\"ts_format\"], d[8:16])[0]\n # extract ADC samples (double precision)\n dp = d[960 : self.header_size]\n adc_array[i] = np.array(unpack(\"8d\", dp))\n # extract demodulated I,Q data\n x[i] = np.array(unpack(\"512d\", d[self.header_size :]))\n\n # build complex-valued data from Re, Im measurements\n data = x[:, :256] + 1j * x[:, 256:]\n\n # rescale data to Ohms\n data = -data * self.p[\"scale\"]\n\n return time_array, data, adc_array", "title": "" }, { "docid": "31aebed0af46dd540be2ecc7a56a6544", "score": "0.560405", "text": "def load_mcraw(fname): \n lines = open(fname, \"r\").readlines()\n lines = [float(line) for line in lines]\n print(\"Loaded {} with {} values\".format(fname, len(lines)))\n return lines", "title": "" }, { "docid": "097d20cda0dfa0b4c5c65a574bbf85a1", "score": "0.55983543", "text": "def getsong(file):\n data, data_seg = read(file)\n# for i in range(len(data_seg.analogsignals)):\n# if data_seg.analogsignals[i].name == 'Channel bundle (RAW 009) ':\n# song=data_seg.analogsignals[0][i].as_array()\n s = data_seg.analogsignals[0].name.split('(')[1].split(')')[0].split(',') # What happened? Was working without this earlier. No clue what changed.\n analog_signals = np.array([data_seg.analogsignals[0]])\n analog_signals = analog_signals.transpose()\n \n for i in range(len(s)):\n if s[i] == 'CSC5':\n song = analog_signals[i]\n else:\n continue\n print('Saving song to ', file[:-4], \"_songfile.npy\")\n np.save(file[:-4]+\"_songfile\", song)", "title": "" }, { "docid": "cd4e530bde3ddc5b0562d0b0d15a9d1d", "score": "0.55982155", "text": "def load(self, file: str) -> Signal:\n self._raise_file_extension_error_if_file_extension_isnt_allowed(file)\n waveform, _ = librosa.load(file,\n mono=self.mono,\n sr=self.sample_rate,\n dtype=self.data_type)\n signal = Signal(self._signal_type,\n self.sample_rate,\n waveform,\n file)\n logger.info(\"Loaded audio file at %s\", file)\n return signal", "title": "" }, { "docid": "4d64580d17825003f151843fb3a0f8bc", "score": "0.559687", "text": "def display_spectra(self):\n self.titlename = [] # reset molecule\n self.spectra_plot.clear() # reset plot canvas\n self.color = 0 # reset color to default\n \n if self.noentry or self.noinchi or self.nospectra:\n return\n data,origin = data_process(self.file_location,\"\")\n for i in data[1:]: \n #self.save_location = self.file_location.rstrip(\".jdx\")+\"_%s\"%count+\".png\"\n #simple_plot(data[0],i,False,True,self.save_location,\"Wavenumber (1/CM)\",\"Transmittance/Absorbance\",\"Molecule Spectra Feature for %s\"%self.INCHIKEY)\n x = data[0]\n y = i\n break #right now only dealing with single cases\n\n self.xdata = x\n self.ydata = y\n \n # reading what unit x and y is from the data file title\n XUNIT = \"\"\n YUNIT = \"\"\n for i in origin:\n if \"XUNITS\" in i:\n XUNIT = i\n if \"YUNITS\" in i:\n YUNIT = i\n if XUNIT == \"\" or YUNIT == \"\":\n print \"Something is wrong with datafile, xunit or yunit is missing\"\n \n # convert xdata to the right unit as set by self.wave\n if (\"MICROMETERS\" in XUNIT and self.wave == \"Wavenumber\") or (\"CM\" in XUNIT and self.wave == \"Wavelength\"):\n self.xdata = 10000.0/self.xdata \n elif (\"MICROMETERS\" in XUNIT and self.wave == \"Wavelength\") or (\"CM\" in XUNIT and self.wave == \"Wavenumber\"):\n pass\n else:\n print \"Something is wrong with xdata, please check file and settings\"\n\n # converting absorbance to transmittance, not sure if method is correct. \n # need some inputs for this. original file with absorbance range from 0-1000 ish.\n if \"ABSORBANCE\" in YUNIT:\n self.ydata = 1-self.ydata/float(max(self.ydata))\n\n # Configure plot title, x and y labels \n self.titlename.append(self.FORMULA)\n self.spectra_plot.set_title(\"Molecule Spectra for %s\"%self.FORMULA)\n if self.wave == \"Wavenumber\":\n self.spectra_plot.set_xlabel(\"Wavenumber (1/CM)\")\n elif self.wave == \"Wavelength\": \n self.spectra_plot.set_xlabel(\"WaveLength (micron)\")\n else:\n print \"Something is wrong with self.wave, please check settings\"\n self.spectra_plot.set_ylabel(\"Transmittance\") \n self.spectra_plot.plot(self.xdata,self.ydata, linestyle=\"-\", color=COLORS[self.color], marker=\"*\",linewidth=1.0, markersize=2) \n self.canvas.draw()\n self.display_2D_structure()", "title": "" }, { "docid": "f1b35c120d13247f2e49008f48d22c17", "score": "0.55883837", "text": "def read_spectrum(spectrum, index):\n polarity = 'MS:1000130' in spectrum\n ms_level = spectrum['ms level']\n rt, units = spectrum['MS:1000016']\n if units != 'minute':\n rt /= 60\n\n collision_energy = spectrum.get('MS:1000045', 0)\n precursor_intensity = spectrum.get('MS:1000042', 0)\n precursor_mz = spectrum.get('MS:1000744', 0)\n min_mz = spectrum.get('lowest m/z value', 0)\n max_mz = spectrum.get('highest m/z value', 0)\n\n if ms_level == 1:\n data = [(mz, i, rt, polarity) for (mz, i) in spectrum.peaks]\n else:\n data = [(mz, i, rt, polarity, precursor_mz, precursor_intensity,\n collision_energy) for (mz, i) in spectrum.peaks]\n\n return data, ms_level, polarity", "title": "" }, { "docid": "6be807a85ec7a4f714f20a9f17c70527", "score": "0.5581273", "text": "def load(filename, subbands):\n A = np.loadtxt(filename)\n R = np.array([[-0.1195950000, -0.7919540000, 0.5987530000],\n [ 0.9928230000, -0.0954190000, 0.0720990000],\n [ 0.0000330000, 0.6030780000, 0.7976820000]])\n L = A.dot(R)\n\n u, v = [], []\n\n for a1 in range(0, NUM_ANT):\n for a2 in range(0, a1+1):\n u.append(L[a1, 0] - L[a2, 0])\n v.append(L[a1, 1] - L[a2, 1])\n\n c = 299792458.0\n return [np.ravel([(np.array(u)/(c/(s*(2e8/1024))/2.0)) for s in subbands]),\n np.ravel([(np.array(v)/(c/(s*(2e8/1024))/2.0)) for s in subbands])]", "title": "" }, { "docid": "f4dbc06f305a088141187ba3674d75d6", "score": "0.55774885", "text": "def read_fuse(self, line, path='./'):\n self.read_spectra(line, path)", "title": "" }, { "docid": "86d41b7bcf71f5e4cf8132b4d75cebb1", "score": "0.5571679", "text": "def show_waveform_and_spectrogram():\n filename = filedialog.askopenfilename()\n spf = wave.open(filename, 'r')\n sound_info = spf.readframes(-1)\n sound_info = fromstring(sound_info, 'Int16')\n f = spf.getframerate()\n\n subplot(211)\n plot(sound_info)\n title('Waveform and Spectrogram of ' + filename + '\\n')\n\n subplot(212)\n specgram(sound_info, Fs=f, scale_by_freq=True, sides='default')\n\n show()\n spf.close()", "title": "" }, { "docid": "6791e3b9e6cd102e1e9d8e742a4862dc", "score": "0.55688214", "text": "def wavfile_to_examples(wav_file):\n\n sr, wav_data = wavfile.read(wav_file) \n print((\"SR, {}\".format(sr)))\n print((\"wav_data, {}, and shape is {}\".format(wav_data, wav_data.shape)))\n print(\"max element in wav_data is {}\".format(np.amax(wav_data)))\n\n\n assert wav_data.dtype == np.int16, 'Bad sample type: %r' % wav_data.dtype\n samples = wav_data / 32768.0 # Convert to [-1.0, +1.0]\n\n\n data=wav_data\n sample_rate=sr\n\n if len(data.shape) > 1:\n data = np.mean(data, axis=1)\n # Resample to the rate assumed by VGGish.\n if sample_rate != vggish_params.SAMPLE_RATE:\n data = resampy.resample(data, sample_rate, vggish_params.SAMPLE_RATE)\n\n # Compute log mel spectrogram features.\n log_mel = mel_features.log_mel_spectrogram(\n data,\n audio_sample_rate=vggish_params.SAMPLE_RATE,\n log_offset=vggish_params.LOG_OFFSET,\n window_length_secs=vggish_params.STFT_WINDOW_LENGTH_SECONDS,\n hop_length_secs=vggish_params.STFT_HOP_LENGTH_SECONDS,\n num_mel_bins=vggish_params.NUM_MEL_BINS,\n lower_edge_hertz=vggish_params.MEL_MIN_HZ,\n upper_edge_hertz=vggish_params.MEL_MAX_HZ)\n\n # Frame features into examples.\n features_sample_rate = 1.0 / vggish_params.STFT_HOP_LENGTH_SECONDS\n example_window_length = int(round(\n vggish_params.EXAMPLE_WINDOW_SECONDS * features_sample_rate))\n example_hop_length = int(round(\n vggish_params.EXAMPLE_HOP_SECONDS * features_sample_rate))\n log_mel_examples = mel_features.frame(\n log_mel,\n window_length=example_window_length,\n hop_length=example_hop_length)\n \n return log_mel_examples", "title": "" }, { "docid": "da70164190c793f864738f0cfd9fa66b", "score": "0.55656505", "text": "def load_raw(raw_pickle_file):\n with open(raw_pickle_file, 'rb') as f:\n raw = pickle.load(f)\n logging.info(\"Loaded %i recordings.\", len(raw['handwriting_datasets']))\n return raw", "title": "" }, { "docid": "328e0bdf3306dc0fd2bbe96de65d9cea", "score": "0.55651265", "text": "def load_spectrum_gnirs(file, velScale, resolution, rebinWave=True):\n spec, hdr = pyfits.getdata(file, header=True)\n\n pixScale = hdr['CD1_1'] * 1.0e-4 # microns\n\n wavelength = np.arange(len(spec), dtype=float)\n wavelength -= (hdr['CRPIX1']-1.0) # get into pixels relative to the reference pix\n wavelength *= hdr['CD1_1'] # convert to the proper wavelength scale (Ang)\n wavelength += hdr['CRVAL1'] # shift to the wavelength zeropoint\n\n # Convert from Angstroms to microns\n wavelength *= 1.0e-4\n\n deltaWave = 2.21344 / resolution # microns\n resInPixels = deltaWave / pixScale # pixels\n sigmaInPixels = resInPixels / 2.355\n psfBins = np.arange(-4*math.ceil(resInPixels), 4*math.ceil(resInPixels))\n psf = py.normpdf(psfBins, 0, sigmaInPixels)\n specLowRes = np.convolve(spec, psf, mode='same')\n\n if rebinWave:\n # Rebin into equals logarithmic steps in wavelength\n logWave, specNew, vel = log_rebin2(wavelength, specLowRes,\n inMicrons=True, velScale=velScale)\n outWave = logWave\n else:\n outWave = wavelength\n specNew = specLowRes\n\n return outWave, specNew", "title": "" }, { "docid": "1d4b2c12053bced3c0292f0f16890e39", "score": "0.55642474", "text": "def loadAudio(self, filename):\n\n\n audio = None\n\n if filename:\n # loader = essentia.standard.MonoLoader(filename=filename)\n #\n # # and then we actually perform the loading:\n # audio = loader()\n\n #Essentia's loader (above) has a bug that doesn't close files\n #It causes problems processing large number of files, use madmom instead\n # audio, sample_rate = madmom.audio.signal.load_wave_file(filename, num_channels=1)\n \n y, sr = librosa.load(filename, sr=None)\n\n audio = essentia.array(y)\n\n return audio", "title": "" }, { "docid": "ce1b282a7180278290c1fd869db47c59", "score": "0.5557775", "text": "def _parse_file(cls, filepath):\n hdus = fits.open(filepath)\n return cls._parse_hdus(hdus)", "title": "" }, { "docid": "ce1b282a7180278290c1fd869db47c59", "score": "0.5557775", "text": "def _parse_file(cls, filepath):\n hdus = fits.open(filepath)\n return cls._parse_hdus(hdus)", "title": "" }, { "docid": "ce1b282a7180278290c1fd869db47c59", "score": "0.5557775", "text": "def _parse_file(cls, filepath):\n hdus = fits.open(filepath)\n return cls._parse_hdus(hdus)", "title": "" }, { "docid": "3367f64db62098cb32dc65b0ccb97f88", "score": "0.5553841", "text": "def manipulate(file):\n audio, sr = librosa.load(file)\n\n # add noise\n noise_factor = random.uniform(0.0001, 0.02)\n noise_audio = noise(audio, noise_factor)\n\n # shift\n shift_max = round(random.uniform(0.05, 0.15), 2)\n shift_direction = random.choice([\"both\", \"right\"])\n shift_audio = shift(noise_audio, 22050, shift_max, shift_direction)\n\n # change pitch\n n1 = round(random.uniform(-2, -0.2), 2)\n n2 = round(random.uniform(0.2, 2), 2)\n res = np.stack((n1, n2))\n pitch_factor = np.random.choice(res)\n pitch_audio = pitch(shift_audio, 22050, pitch_factor)\n\n spectrogram = librosa.feature.melspectrogram(y=pitch_audio, sr=sr)\n spectrogram_db = librosa.amplitude_to_db(spectrogram, ref=np.max)\n return spectrogram_db", "title": "" }, { "docid": "0c4b642899f9682e5e129d1b0f1a7419", "score": "0.5552952", "text": "def load_telluric_lines(line_list, plot=False):\n #wmin = self.wmin\n #wmax = self.wmax\n\n # import all fluxes\n log.info(\"Loading telluric spectra.\")\n fluxes = []\n species_fn = [os.path.join(tell_dir, \"LBLRTM_%s_+0.0.npy\" % x) for x in line_list]\n for species in species_fn:\n log.info(\"Loading '%s'.\", species)\n data = np.load(species)\n fluxes.append(data[1])\n waves = data[0] # wavelengths are same for all species\n log.info(\"Combining species together.\")\n all_spec = np.prod(fluxes, axis=0) # multiply all species together\n if plot:\n log.info(\"Opening plot...\")\n import matplotlib.pyplot as plt\n plt.plot(waves, all_spec)\n plt.xlabel(\"Angstrom\")\n plt.ylabel(\"Normalized flux\")\n #plt.xlim([wmin, wmax])\n plt.show()\n return waves, all_spec", "title": "" }, { "docid": "be6383f7b0277a1ad7fc9850d327fd39", "score": "0.55479676", "text": "def load_audio(filename, mono=True, fs=44100):\n\n file_base, file_extension = os.path.splitext(filename)\n if file_extension == '.wav':\n print(filename)\n audio_file = wave.open(filename)\n\n # Audio info\n sample_rate = audio_file.getframerate()\n sample_width = audio_file.getsampwidth()\n number_of_channels = audio_file.getnchannels()\n number_of_frames = audio_file.getnframes()\n\n # Read raw bytes\n data = audio_file.readframes(number_of_frames)\n audio_file.close()\n\n # Convert bytes based on sample_width\n num_samples, remainder = divmod(len(data), sample_width * number_of_channels)\n if remainder > 0:\n raise ValueError('The length of data is not a multiple of sample size * number of channels.')\n if sample_width > 4:\n raise ValueError('Sample size cannot be bigger than 4 bytes.')\n\n if sample_width == 3:\n # 24 bit audio\n a = np.empty((num_samples, number_of_channels, 4), dtype=np.uint8)\n raw_bytes = np.fromstring(data, dtype=np.uint8)\n a[:, :, :sample_width] = raw_bytes.reshape(-1, number_of_channels, sample_width)\n a[:, :, sample_width:] = (a[:, :, sample_width - 1:sample_width] >> 7) * 255\n audio_data = a.view('<i4').reshape(a.shape[:-1]).T\n else:\n # 8 bit samples are stored as unsigned ints; others as signed ints.\n dt_char = 'u' if sample_width == 1 else 'i'\n a = np.fromstring(data, dtype='<%s%d' % (dt_char, sample_width))\n audio_data = a.reshape(-1, number_of_channels).T\n\n if mono:\n # Down-mix audio\n audio_data = np.mean(audio_data, axis=0)\n\n # Convert int values into float\n audio_data = audio_data / float(2 ** (sample_width * 8 - 1) + 1)\n\n # Resample\n if fs != sample_rate:\n audio_data = librosa.core.resample(audio_data, sample_rate, fs)\n sample_rate = fs\n\n return audio_data, sample_rate\n\n elif file_extension == '.flac':\n audio_data, sample_rate = librosa.load(filename, sr=fs, mono=mono)\n\n return audio_data, sample_rate\n\n return None, None", "title": "" }, { "docid": "2eebf8a431b5af71bc2d46c6546197f7", "score": "0.5537028", "text": "def buildSpectra(self,verbose=False):\n from astropy.io import fits\n \n def wavelength(pixel,w1,dw):\n wave = w1 + (pixel-1)*dw\n return wave\n \n self.spectra = {}\n for i in range(1,self.n_apertures+1):\n open_image = fits.open(self.path+self.image+'.{:04}'.format(i)+'.fits')\n if verbose:\n print open_image.info()\n \n image_data = open_image[0].data\n self.spectra[i] = image_data\n open_image.close()\n\n image_data = self.spectra[i]\n \n if self.direction == 0:\n self.n_pix = len(image_data[1][0])\n else:\n self.n_pix = len(image_data)\n \n x_pix = np.linspace(1,self.n_pix,self.n_pix)\n x_wav = wavelength(x_pix,self.w1,self.dw)\n if self.direction == 0:\n y_val = image_data[1][0]\n else:\n y_val = image_data\n \n if verbose:\n print 'x pixels: ', self.n_pix\n print 'x wavelengths:', x_wav[0],x_wav[-1]\n del fits", "title": "" }, { "docid": "bbf7b70b2cfa40d6d05ce6c084278c14", "score": "0.5530208", "text": "def read_raw(filename, interpolate=True):\n if not os.path.exists(filename):\n raise IOError(\"File {} does not exist!\".format(filename))\n \n if interpolate:\n # Converting the raw to PPM\n p = subprocess.Popen([\"dcraw\",\"-q\",\"1\",\"-f\",\"-v\",\"-a\",filename]).communicate()[0]\n \n ppm_filename = os.path.splitext(filename)[0] + \".ppm\"\n raw_data = np.array(Image.open(ppm_filename))\n \n else:\n # Converting the raw to PGM\n p = subprocess.Popen([\"dcraw\",\"-D\",\"-4\",filename]).communicate()[0]\n \n pgm_filename = os.path.splitext(filename)[0] + \".pgm\"\n raw_data = read_pgm(pgm_filename)\n \n return raw_data", "title": "" }, { "docid": "e0e1c17225ce8ddbb2c0219a4b4c6dd3", "score": "0.55282193", "text": "def load_samples(self, file_name):\n \n with open(\"{}.pkl\".format(file_name), 'rb') as f:\n self.samples = pickle.load(f) \n self.__extract_samples()", "title": "" }, { "docid": "f20d7045ab6d18b586339577d191c635", "score": "0.55246854", "text": "def dr(self, file_, format):\n self.savuka.read(file_, format)", "title": "" }, { "docid": "8da52f6a000c1bac865a6513f78e87cd", "score": "0.55208844", "text": "def load_stream(path, snip=None):\n downsample = Default_Samplerate\n normalize = True\n\n audio, sr = rosa.load(path, sr=downsample, mono=True)\n\n if normalize:\n audio = util.normalize(audio)\n\n duration = rosa.get_duration(audio)\n if snip is not None:\n if snip > duration:\n util.fix_length(audio, snip)\n audio = audio[:snip * sr]\n\n return audio", "title": "" }, { "docid": "c7508b3ed72586ccab87cb7b77f15fcf", "score": "0.55179274", "text": "def _load_raw(self, **kwargs): # pylint: disable=unused-argument\n list_of_arrs = []\n for patient_id in self.indices:\n raw_data = sitk.ReadImage(self.index.get_fullpath(patient_id))\n patient_pos = self.index.get_pos(patient_id)\n list_of_arrs.append(sitk.GetArrayFromImage(raw_data))\n\n # *.mhd files contain information about scans' origin and spacing;\n # however the order of axes there is inversed:\n # so, we just need to reverse arrays with spacing and origin.\n self.origin[patient_pos, :] = np.array(raw_data.GetOrigin())[::-1]\n self.spacing[patient_pos, :] = np.array(raw_data.GetSpacing())[::-1]\n\n new_data = np.concatenate(list_of_arrs, axis=0)\n new_bounds = np.cumsum(np.array([len(a) for a in [[]] + list_of_arrs]))\n self.images = new_data\n self._bounds = new_bounds\n return self", "title": "" }, { "docid": "5751cc82b152c32ea13de24e867557c2", "score": "0.55085266", "text": "def load(filename):", "title": "" }, { "docid": "daeab9fcfb29200bee29a490cf5e3cf2", "score": "0.5498867", "text": "def load_txt(self, filepath):\n # Get the transmission data from the file\n self.raw = np.genfromtxt(filepath, unpack=True)\n\n # Convert to Angstroms if microns\n if self.raw[0][-1] < 100:\n self.raw[0] = self.raw[0] * 10000\n\n # Load it into the object\n self.filterID = os.path.splitext(os.path.basename(filepath))[0]\n self.load_raw()", "title": "" }, { "docid": "e322acae6e7ee3c2b8a8961e41901f2f", "score": "0.54893446", "text": "def _extract_bands(self, filepath):\n self.name = filepath.split(os.sep)[-1].split('.')[0]\n self.heights, self.timestamps, speeds, directions = read_sdr(filepath)\n self.data = np.empty((len(SODAR_FIELDS.keys()), MAX_TIMESTAMPS, len(self.heights)))\n self.data.fill(NO_DATA) # 0 is speeds, 1 is directions\n for j in range(len(speeds)):\n time_idx = timestamp_to_index(self.timestamps[j])\n for i in range(len(speeds[j])):\n self.data[0][time_idx][i] = speeds[j][i]\n self.data[1][time_idx][i] = directions[j][i]\n\n # At this point we can discard and regenerate the timestamps since\n # some can be missing, but now they are filled with NO_DATA for every 5 minute interval\n if len(self.timestamps) != MAX_TIMESTAMPS:\n # Reassign them based on file name and year\n base_timestamp = str(self.timestamps[0])[:6]\n date = base_timestamp[:6]\n self.timestamps = list()\n seconds = '00' # seconds are always 0\n for i in range(MAX_TIMESTAMPS):\n hours, minutes = divmod(i*5, 60)\n if hours < 10:\n hours = '0' + str(hours)\n else:\n hours = str(hours)\n if minutes < 10:\n minutes = '0' + str(minutes)\n else:\n minutes = str(minutes)\n self.timestamps.append(int(base_timestamp + hours + minutes + '00')) # seconds ", "title": "" }, { "docid": "d761148c35b022e6d0f9aa73945b36f5", "score": "0.5488984", "text": "def read_wav(file):\n with contextlib.closing(wave.open(file)) as f:\n params = f.getparams()\n frames = f.readframes(params[3])\n return array.array(\"h\", frames), params", "title": "" }, { "docid": "7c52b01fca73e754660b58844aed19c6", "score": "0.54866177", "text": "def load_waves(self, pathname):\n\n # alpha-num sort\n convert = lambda t: int(t) if t.isdigit() else t\n sort = lambda filepath: [convert(t) for t in re.split('([0-9]+)', filepath)]\n\n # populate from audio files in pathname\n filepaths = glob.glob(pathname)\n name_suffix = ''\n\n if len(filepaths) == 0:\n name_suffix = ' (missing)'\n # use features.csv as a backup if no audio files present\n filename = FILE_FEATURE[0]\n filepath = os.path.join(\"features\", filename)\n if not os.path.exists(filepath):\n log('load waves: %s not found.' % filepath)\n return\n\n try:\n df = pd.read_csv(filepath, index_col=0)\n filepaths = df.index.values\n debug('loaded file info from %s' % filepath)\n except:\n log('Error loading %s:' % (filepath))\n log(traceback.format_exc())\n return\n\n # sorted filepaths for loading in order\n filepaths = sorted(filepaths, key=sort, reverse=False)\n\n for filepath in filepaths:\n # populate list of AudioFiles\n try:\n filename = os.path.split(filepath)[1]\n label = LABEL_MUSIC if filename.startswith(\"mu\") else LABEL_SPEECH\n id = filename\n name = filename + name_suffix\n\n # add wave entry to db\n self._files[id] = AudioFile(id, name, filepath, label)\n debug('loaded %s' % filepath)\n except:\n log('Error processing file: %s' % filepath)\n log(traceback.format_exc())", "title": "" }, { "docid": "f22147f36a1f5692fb66e1fa827d707a", "score": "0.5479652", "text": "def read_samples(filename):\n return read_image_as_np(filename, as_patches=True)", "title": "" }, { "docid": "f6cac575911bc99f45db1236bc88e3f7", "score": "0.5476408", "text": "def load(self):\n\n #if self.loaded:\n # return\n\n self._gst_init()\n self._construct_pipeline()\n\n self.log.debug('Loaded file: %s Sample duration: %s',\n self.file_name, self.sample.duration)", "title": "" }, { "docid": "5798118327a4352f5d6dc1e1cb762118", "score": "0.54758316", "text": "def audio_read(file, sr=22050) -> np.ndarray:\n y, sample_rate = librosa.load(file, sr=sr)\n mfcc = np.mean(librosa.feature.mfcc(y, sample_rate, n_mfcc=100).T, axis=0).reshape((1, 20, 5))\n return mfcc", "title": "" }, { "docid": "671f241b75b5473f70452323d0fa7fa1", "score": "0.545672", "text": "def load_starfish_spectrum(params, limits=None, hdr=False, normalize=False,\n area_scale=False, flux_rescale=False, wav_scale=True):\n my_hdf5 = HDF5Interface()\n my_hdf5.wl = my_hdf5.wl / 10 # Turn into Nanometer\n\n if hdr:\n flux, myhdr = my_hdf5.load_flux_hdr(np.array(params))\n spec = Spectrum(flux=flux, xaxis=my_hdf5.wl, header=myhdr)\n else:\n flux = my_hdf5.load_flux(np.array(params))\n spec = Spectrum(flux=flux, xaxis=my_hdf5.wl)\n\n if flux_rescale:\n spec = spec * 1e-7 # convert flux unit from /cm to /nm\n\n if area_scale:\n if hdr:\n emitting_area = phoenix_area(spec.header)\n spec = spec * emitting_area\n spec.header[\"emit_area\"] = (emitting_area, \"pi*r^2\")\n else:\n raise ValueError(\"No header provided for stellar area scaling\")\n\n if wav_scale:\n # Convert into photon counts, (constants ignored)\n spec = spec * spec.xaxis\n\n if normalize:\n spec = spec_local_norm(spec, method=\"exponential\")\n\n if limits is not None:\n if limits[0] > spec.xaxis[-1] or limits[-1] < spec.xaxis[0]:\n logging.warning(\"Warning: The wavelength limits do not overlap the spectrum.\"\n \"There is no spectrum left... Check your wavelength, or limits.\")\n spec.wav_select(*limits)\n\n return spec", "title": "" }, { "docid": "518a4fd47e584797c9dbd184717adb76", "score": "0.5455071", "text": "def load_recording(fname, nchannels, dtype=np.dtype('int16')):\n file_info = os.stat(fname)\n file_size = file_info.st_size\n nsamples = int(file_size / (dtype.itemsize * nchannels))\n return np.memmap(fname, dtype=dtype.name, mode='r', shape=(int(nchannels), int(nsamples)), order='F')", "title": "" } ]
d28270e8adc9d043239caf4a1c9fb088
Gets player ready to take a turn
[ { "docid": "d99491b0ec7ec3919a427f9d9d069418", "score": "0.68041337", "text": "def takeTurn(self):\n # PUTTING IN A TEMPORARY FIX\n for player in self.world.getPlayerList():\n if player.showhandbutton['state'] == NORMAL:\n print('Not ' + self.name + \"'s turn yet\")\n return\n if self.verbose:\n print(self.name, \"Taking a turn\")\n if self.log is not None:\n self.log.write(self.name + \" Taking a turn\")\n self.world.setCurrentPlayer(self) # tell the world to set it's current player to self\n self.world.incTurnNum() # update the world's turn number\n self.world.updateMessage(self.name + \"'s turn to play \" + cardNumToStr(self.world.getTurnNum()) + 's')\n self.tkConfigureShowHand(NORMAL) # allow the player to click the 'show hand' button", "title": "" } ]
[ { "docid": "d1e68be4f905ec1aa1a1a1ce3372e8e0", "score": "0.69901085", "text": "def play_turn(self):\n pass", "title": "" }, { "docid": "f7614474157def4ed5a856906b2f3919", "score": "0.683937", "text": "def passes_turn(self) -> Player:\n self.players.current_player.my_turn = False\n\n next_player = self.players.next_player()\n\n next_player.my_turn = True\n\n return next_player", "title": "" }, { "docid": "ad3aa8cbac3e97b9e7256bd79a6f9a6c", "score": "0.6733295", "text": "def next_turn(self):\n temp = self.current_player\n self.current_player = self.opponent\n self.opponent = temp", "title": "" }, { "docid": "55d0afc2022753771b749912efd3dab4", "score": "0.6468337", "text": "def get_turn(self):\r\n return self._turn", "title": "" }, { "docid": "a4db62baf732d26ea162a864a62d31c8", "score": "0.6460994", "text": "def get_turn(self):\n return self.turn", "title": "" }, { "docid": "fb6655e0dd94994cea1a613a367da61b", "score": "0.645719", "text": "def run(self):\n self.start()\n while not self.has_winner:\n self.take_turn()\n print(f'Player {self.winning_player} wins!')", "title": "" }, { "docid": "1bd969f2b959a4a8ad5b70c6f9f8a537", "score": "0.6447281", "text": "def wait_choice(self) :\n\n if self.player_idx == 4:\n # All players said no, second turn\n self.player_idx = 0\n self.notify(\"Second tour !\")\n self.set_state('sec_choice')\n else :\n self.notify(u\"À %s de parler\" % self.players[self.player_idx])", "title": "" }, { "docid": "d9f2e4018a40cb2af356a6877dc92b56", "score": "0.63894624", "text": "def starting_player(self):\n if self.first_player:\n self.p2.turn = True\n else:\n self.p1.turn = True", "title": "" }, { "docid": "0e9d3ee2c985c6135b7c6309c4f948df", "score": "0.63722813", "text": "def evaluate_click(self):\n player = self.players[self.turn % 2]\n if self.status == 'playing':\n player.play_next_move(self)\n \n elif self.status == 'game over':\n self.__init__()", "title": "" }, { "docid": "136ed9aa2d0c50cfd10a32907f22d4f9", "score": "0.635221", "text": "def game_won(self):\r\n if self.check_win_player_one():\r\n return 1\r\n elif self.check_win_player_two():\r\n return 2\r\n else:\r\n return None", "title": "" }, { "docid": "4dd2644070f9b5e80a70b813ca911b56", "score": "0.63021046", "text": "def ready_player(user):\n try:\n user.ready = True\n user.save()\n except User.DoesNotExist:\n raise ClientError(\"Unknown Error readying player\")\n return True", "title": "" }, { "docid": "8c348c0b34d4a27d6b0691ad2629cbfd", "score": "0.6290166", "text": "def getTurn(self):\n return self.turn", "title": "" }, { "docid": "e0422742c92749a64875a18df487d9f2", "score": "0.62690824", "text": "def playerTurn(self):\r\n\t\tif self.turns % 2 == 0:\r\n\t\t\tself.turns += 1\r\n\t\t\treturn \"player2\"\r\n\t\t\t\r\n\t\telse:\r\n\t\t\tself.turns += 1\r\n\t\t\treturn \"player1\"", "title": "" }, { "docid": "04ce51ba478f09a1538ab1bc8f35a568", "score": "0.6253626", "text": "def get_turn(self):\n return self._turn", "title": "" }, { "docid": "8d449eed2ac7e5c5882f965fbdfc3b33", "score": "0.62509507", "text": "def _take_turns(self):\n # Beginning with the first player,\n # alternate turns between players until the game ends\n self.current_player_id = 1 # the id of the current player\n user_command = '' # the command entered by the user\n while(self.board.is_game_over() is False):\n if self.current_player_id == self.computer_player_id: \n self.board.take_best_move(self.computer_player_id)\n # End turn and allow the user to take a turn\n self.current_player_id = self.user_player_id\n else:\n # Display the board\n self.board.display()\n # Remind the user whether they are X's or O's\n if self.user_player_id == 1:\n print \"You are X's\"\n else:\n print \"You are O's\"\n # Ask user to input the coordinates of her mark, or to press q to quit\n user_command = raw_input('<enter \"{rowNum}, {columnNum}\" or \"q\" to quit>: ')\n print \"\"\n # Process the user command\n if user_command.lower().strip() == 'q':\n # End the game\n break\n else:\n # Mark the board for the user\n self._mark_board_for_user(user_command)\n # Display final board \n self.board.display()\n # Determine winner\n self.winner_id = self.board.get_winner()", "title": "" }, { "docid": "24e1b09344c5b480919e2b403039e202", "score": "0.6238954", "text": "def wait_play(self) :\n\n if len(self.trick) == 4 :\n self.set_state('end_trick')\n else :\n self.notify(u\"C'est au tour de %s\" % self.players[self.player_idx])", "title": "" }, { "docid": "68ced8201dd65436e7f23f8f5cc4ec9d", "score": "0.6230617", "text": "def get_next_turn(game):\n return game['next_turn']", "title": "" }, { "docid": "1a2a7bf8e66c9206efeced4adb7e6aa8", "score": "0.62200665", "text": "def turn(self):\n\t\treturn self.toplay", "title": "" }, { "docid": "93db167e5f77ad92cd617cfc7388f8a5", "score": "0.6211587", "text": "def handle_take_turn(player):\n global ROUND_STARTED, TURNS_TAKEN\n if not ROUND_STARTED:\n return json.dumps(False)\n TURNS_TAKEN += 1\n if TURNS_TAKEN == MAX_TURNS:\n ROUND_STARTED = False\n card = player.pick_discard()\n json_card = Conversions.convert_card_to_json(card)\n return json.dumps(json_card)", "title": "" }, { "docid": "9ebc8260153c323896768aa6cee09304", "score": "0.62024665", "text": "def play(self, players, click):\n current_player = self.board.get_current_player()\n player_in_turn = players[current_player - 1]\n\n if player_in_turn.id == 'ai':\n move = player_in_turn.get_action(self.board)\n else:\n move = player_in_turn.get_action(click)\n\n flag = self.board.move(move)\n if not flag:\n return -1, current_player\n\n win, winner = self.board.get_game_status()\n\n movements = self.board.get_all_movements()\n self.board.change_player()\n\n return win, winner, movements", "title": "" }, { "docid": "538eb62d7f6b09abe3315e7e736d33d9", "score": "0.6192032", "text": "def askEndOfTurn(self):\n if self.played:\n self.player = self.nextPlayer(self.player)\n self.played = False\n self.captured = False\n return True\n return False", "title": "" }, { "docid": "9ca2bbab4e08e3b6fb17dbed2cb4a1b9", "score": "0.61678445", "text": "def wait_for_next(self, turn):\n\t\tself.turn_condition.acquire()\n\t\twhile self.turn < int(turn):\n\t\t\tself.turn_condition.wait()\n\t\tself.turn_condition.notify()\n\t\tself.turn_condition.release()", "title": "" }, { "docid": "d63750d2ced1e6c510ac3ae32caf9275", "score": "0.6166293", "text": "def _check_status(self):\n\t\tif self._is_full():\n\t\t\tself.finished = True\n\t\telif self._is_connect_four():\n\t\t\tself.finished = True\n\t\t\tself.winner = self.currentPlayer", "title": "" }, { "docid": "123114bd6cb425b0cbed064f49e2b407", "score": "0.61556214", "text": "def current_player(self):\n return self.__turn", "title": "" }, { "docid": "97af8b0dc66e388901e5cd7b756f63a1", "score": "0.61211133", "text": "def is_not_turn_of(self):\r\n if self.is_turn_of == self.player1:\r\n return self.player2\r\n else:\r\n return self.player1", "title": "" }, { "docid": "3ff731b40fa816d4c271eb9f8410fdb4", "score": "0.61130023", "text": "def play(self):\n done = False\n while not done:\n move = get_next_move()\n move_player(move)\n if self.board.game_is_over():\n done = True\n return 'Victory!'", "title": "" }, { "docid": "4bb9c9b92fecb21b2105b9851941490d", "score": "0.6105986", "text": "async def ready(self, ctx):\n if not await self.check_pm(ctx.message):\n return\n if self.game_status == 0:\n await ctx.send(embed=self.make_embed(\"Avalon\", 0xff4055, \"Error\", \"There is no active game. Start a game by running `[p]avalon start`.\"))\n return\n if ctx.author.id in self.ready_players:\n await ctx.send(embed=self.make_embed(\"Avalon\", 0xff4055, \"Error\", \"You are already ready.\"))\n return\n self.ready_players.append(ctx.author)\n for player in self.players:\n await player.send(\"{0} has readied up! ({1}/{2})\".format(self.displayname(ctx.author), len(self.ready_players), len(self.players)))\n\n if set(self.ready_players) == set(self.players):\n if len(self.players) < self.min_players:\n for player in self.players:\n await player.send(embed=self.make_embed(\"Avalon\", 0xff4055, \"Error\", \"You must have 5 players to start the game.\"))\n else:\n self.game_status = 2\n await self.assign_roles()\n #self.bot.loop.create_task(self.game_loop())", "title": "" }, { "docid": "b825985f5630aa8e6efec69e57733dd1", "score": "0.60842305", "text": "def _check_status(self):\r\n if self._is_full():\r\n self._finished = True\r\n elif self._are_someone_won():\r\n self._finished = True\r\n self._winner = self._current_player", "title": "" }, { "docid": "2e24dad6001b1942dc233007d37b151f", "score": "0.6078226", "text": "def __do_step_take_turn(self):\r\n self._state.game.current_actor.take_turn()\r\n\r\n if not self._state.activity == States.TAKE_TURN:\r\n return\r\n\r\n if (self._state.game.engine.is_terminal()):\r\n self._state.activity = States.WIN\r\n else:\r\n # Switch turn\r\n temp = self._state.game.current_actor\r\n self._state.game.current_actor = self._state.game.waiting_actor\r\n self._state.game.waiting_actor = temp\r\n # Tell engine to prepare next turn\r\n self._state.game.engine.to_next_turn()", "title": "" }, { "docid": "f8e287991f87de7ba02493b03e3fcfa4", "score": "0.60237706", "text": "def run_turn(self):\n # <<-- Creer-Merge: runTurn -->> - Code you add between this comment and the end comment will be preserved between Creer re-runs.\n # Put your game logic here for runTurn\n\n if self.game.moves:\n self.update_board(self.state, self.game.moves[-1])\n self.nicePrintMove(self.game.moves[-1], len(self.game.moves))\n\n\n action = self.TLHTQSID_ABDLMM(self.state, self.depth_limit)\n\n #Gets piece from x1, y1, x2, y2, promotion\n move_from = bToP(action.x1, action.y1)\n move_to = bToP(action.x2, action.y2)\n piece = None\n for p in self.player.pieces:\n if p.file == move_from[0] and p.rank == move_from[1]:\n piece = p\n #Check Promotion\n if action.promotion:\n piece.move(move_to[0], move_to[1], action.promotion)\n else:\n piece.move(move_to[0], move_to[1])\n self.state.enPass = ''\n \n self.update_board(self.state, self.game.moves[-1])\n self.nicePrintMove(self.game.moves[-1], len(self.game.moves))\n \n return True\n # <<-- /Creer-Merge: runTurn -->>", "title": "" }, { "docid": "0a3cbac75432e2b72395fbc9da107164", "score": "0.60225016", "text": "def wait_player(self) :\n if len(self.players) == 4 :\n # Shuffle to determine teams\n random.shuffle(self.players)\n # Reset new id with new order\n for i, j in enumerate(self.players):\n j.id = i\n # Creation of teams and go forward in the game (choice turn)\n self.teams = Team(self.players[::2]), Team(self.players[1::2])\n self.notify(u\"Les équipes : %s contre %s\"% self.teams)\n self.set_state('init_choice')", "title": "" }, { "docid": "661158a9af91c19d8b7331e1921156f9", "score": "0.60077447", "text": "def next_turn(self):\n # if player has another turn\n is_finished = self.turn.finish_turn()\n if is_finished is False:\n self.update_users()\n return\n\n # increment player index\n for i in range(0, self.iterate_turn_by):\n self.player_turn_index += self.direction\n if self.player_turn_index == -1:\n self.player_turn_index = len(self.players) - 1\n elif self.player_turn_index == len(self.players):\n self.player_turn_index = 0\n\n self.iterate_turn_by = 1\n\n self.turn = self.players[self.player_turn_index]\n self.turn.start_turn()", "title": "" }, { "docid": "eabe4555ee6a5c2c3c6e46a61a795b2a", "score": "0.6005897", "text": "def elready(data):\n tiro_t = minimax(data['board'], data['player_turn_id'], True, 0, 7)\n s.emit('play', {'tournament_id': TID, 'player_turn_id': data['player_turn_id'], 'game_id': data['game_id'], 'movement': tiro_t})", "title": "" }, { "docid": "2ad929c79db327b2be1d01e9f190eac4", "score": "0.5988043", "text": "def play(self, player, game):\r\n return", "title": "" }, { "docid": "4b7e7f731f75a90cd2a96297f8865188", "score": "0.5981913", "text": "def player_turn(self):\n return self._turn_index % len(self._players)", "title": "" }, { "docid": "5b07bd116d5569e8c02ab6a5c616623a", "score": "0.59712094", "text": "def player_turn(self) -> Tuple[int, bool]:\n player_blocked = False\n input_message = \"1) Attack 2) Defend 3) Inventory 4) Flee\\n\"\n\n while True:\n while (selection := input(input_message).lower()) not in \\\n (\"1\", \"attack\", \"2\", \"defend\", \"3\", \"inventory\", \"4\" \"flee\"):\n print(\"Invalid Input\")\n\n if selection == \"1\":\n player_damage = self.player_dmg - self.en.defence if self.player_dmg - self.en.defence >= 1 else 1\n\n if random.randint(1, 100) in range(1, self.player_crit_chance + 1):\n player_damage += self.player_crit\n\n if self.en.block != 0:\n if random.randint(1, 100) in range(1, self.en.block + 1):\n player_damage = 0\n\n return player_damage, player_blocked\n\n elif selection == \"2\":\n player_blocked = True if random.randint(0, self.player_block) == 0 else False\n player_damage = 0\n return player_damage, player_blocked\n\n elif selection == \"3\":\n pass\n\n elif selection == \"4\":\n pass\n\n else:\n print(\"Invalid input\")", "title": "" }, { "docid": "381864f727d04ac160b03ce7269add55", "score": "0.59523976", "text": "def play(self, player, turn):\n while True:\n self.take_turn(player, turn)\n self.print_board()\n win, tie = self.check_for_win(player, turn)\n if win or tie:\n if win:\n print('Game over. %s wins!\\n' % player)\n else:\n print('Game over. It\\'s a tie.\\n')\n break\n turn += 1\n player = self.switch_player(turn)", "title": "" }, { "docid": "2104730bea185a6486edbbbf3689e0f3", "score": "0.594999", "text": "def play(self):\n while self._board.check() == 2: # moves available\n print(self._board)\n # print('current player: {}'.format(str(self._players[self.current_player])))\n self._board = self._players[self.current_player].play(self._board)\n self._change_player()\n\n if self._board.check() == 'o':\n self._change_player()\n winner = self._players[self.current_player]\n print('\\n\\nWinning o! Congrats, {}'.format(winner.name))\n\n elif self._board.check() == 'x':\n self._change_player()\n winner = self._players[self.current_player]\n print('\\n\\nWinning x! Congrats, {}'.format(winner.name))\n\n else:\n winners = (self._players[1], self._players[2])\n print('\\n\\nTIE! Congrats, {}. Anf for you {}, too.'.format(winners[0].name, winners[1].name))\n print(self._board)\n return self._finish()", "title": "" }, { "docid": "e1440d854d0ea50e74b9067733b4041d", "score": "0.59318304", "text": "def play(user_name, user_self_board, user_shot_board, cpu_self_board, cpu_shot_board):\n\n # Welcoming the user\n print(f\"Welcome to BATTLESHIP, {user_name}. The CPU is ready to play vs you.\")\n time.sleep(1)\n\n return user_first_turn(user_name, user_self_board, user_shot_board, \n cpu_self_board, cpu_shot_board)", "title": "" }, { "docid": "5643406f57f861a82f910756ea34329e", "score": "0.59298694", "text": "def player_vs_player(self):\n has_game_ended = False\n while not (has_game_ended):\n\n self.show_game()\n print(\"PLAYER {} TURN!\".format(self.current_player.no_player))\n self.current_player.print_pawns()\n\n chosen_pawn_id = int(input(\"Choose which Pawn ID to play with: \"))\n possible_moves = self.current_player.listAllPossibleMove(\n chosen_pawn_id, self.board\n )\n\n for i in range(len(possible_moves)):\n print(\"{}. {}\".format(i + 1, possible_moves[i]))\n\n chosen_move = int(input(\"Select the desired move by entering the number: \"))\n self.current_player.move_pawn(\n chosen_pawn_id, possible_moves[chosen_move - 1], self.board\n )\n if self.board.check_winner(self.current_player.no_player):\n has_game_ended = True\n else:\n self.next_turn()\n\n self.show_game()\n print(\"Player {} win the game!\".format(self.current_player.no_player))", "title": "" }, { "docid": "d818925cd0f36c61a80c5bf279ad5fdb", "score": "0.5925173", "text": "def passTurn(self):\n self.passCounter+=1\n if(self.passCounter<(len(self.playerTiles)*2)):\n \n self.recall()\n self.coverTiles=True\n self.messageBox=True\n self.redrawAll()\n tkMessageBox.showinfo(title=\"Turn Change\",\n message=\"Player %d are you ready?\"\n %(((self.currentPlayer+1)%len(\n self.playerTiles))+1))\n self.currentPlayer=((self.currentPlayer+1)%len(self.playerTiles))\n self.coverTiles=False\n self.messageBox=False\n self.timeMinutes=1\n self.timeSeconds=0\n self.dictionaryX1,self.dictionaryX2=0,0\n self.dictionaryY1,self.dictionaryY2=0,0\n self.dictionarySet=[]\n else:\n self.gameOver()", "title": "" }, { "docid": "584084d80e58656050d99de80e772f6d", "score": "0.59094185", "text": "def readyok(sid, data=None):\n game = sid_game[sid]\n color = game.position.get_side_to_move()\n\n for client in game.clients:\n if client.sid == sid:\n client.readyok = True\n\n if game.clients[0].readyok and game.clients[1].readyok:\n # Send `usinewgame` message to the clients.\n sio.emit('usinewgame', namespace='/match', room=game.clients[0].sid)\n sio.emit('usinewgame', namespace='/match', room=game.clients[1].sid)\n\n # Ask a first move.\n game.ongoing = True\n ask_nextmove(game, color)\n\n display(game)", "title": "" }, { "docid": "010781d943fd31e610720ac9c7ca1cf1", "score": "0.59060717", "text": "def _play_game(self, opponent=RandomPlayer, my_turn=True):\n state = copy.deepcopy(self.game)\n player1 = RandomPlayer(self.mark, self.opponent, state)\n player2 = opponent(self.opponent, self.mark, state)\n if my_turn:\n winner = ConnectFour.play(p1=player1, p2=player2, game=state, verbose=False)\n\n else:\n winner = ConnectFour.play(p1=player2, p2=player1, game=state, verbose=False)\n\n if my_turn:\n return winner == 0\n\n else:\n return winner == 1", "title": "" }, { "docid": "7a9067f294fed9a858e7e6e528ccad70", "score": "0.5901015", "text": "def switch_player(self):\r\n player_names = list(self._players.keys())\r\n\r\n if self._turn == player_names[0]:\r\n self._turn = player_names[1]\r\n else:\r\n self._turn = player_names[0]", "title": "" }, { "docid": "5d7408b0e585326bcf27ab1c347d9045", "score": "0.5900848", "text": "def switch_player(self, turn):\n current_player = '0' if turn % 2 == 0 else 'X'\n return current_player", "title": "" }, { "docid": "31404a4b5fa2e31a0c3aa4a43ba67ab9", "score": "0.5898616", "text": "def is_players_turn(game, player):\n if game:\n players_turn = game.players_turn\n if player:\n number = player.number\n is_thier_turn = False\n if number == players_turn:\n is_thier_turn = True\n return is_thier_turn", "title": "" }, { "docid": "da03aa17cf17924122aa6876e645aa83", "score": "0.58936787", "text": "def _take_turn(self, user_input, computer_input=None):\n\n # TODO: refactor common method parts here\n pass", "title": "" }, { "docid": "ac91777da6885a37ca9a5acdaee769de", "score": "0.58842885", "text": "def play_game(self):\n p = 1\n while self.win == 0:\n self.player_play(p)\n print(self.board.board)\n if p == 1:\n p = 2\n else:\n p = 1\n self.win = self.board.has_winner('x' if p == 1 else 'o')\n if self.win == 1:\n return \"Player 1\"\n elif self.win == 2:\n return \"Player 2\"", "title": "" }, { "docid": "45333ef6d905f1260b0e7b4e5c77f601", "score": "0.58804977", "text": "def play(self) -> None:\n while True:\n self.start_new_move()\n self._do_turn()\n if self.current_move.knocking is True:\n print(\"{} has knocked to end the game\".\n format(self._current_player))\n # FIXME - add validity checking & scoring\n break\n self.next_turn()", "title": "" }, { "docid": "d06d269d9fdf05cd9baa24c76a2c8cf2", "score": "0.5877676", "text": "def get_result(self, player):\n # If the play has nothing in their hand then they've won.\n return 1 if not self.player_hands[player] else 0", "title": "" }, { "docid": "0b28fe7fff2dfbd34af79d4d8f2c0871", "score": "0.5875138", "text": "def main(self):\n while(isinstance(self.finished, bool) and (not self.finished)): # not self.finished is not useful\n printBoard(self.board, self.player)\n normalInput, row, col = self.prompt()\n if(normalInput):\n self.playAgain = True\n self.captured = False\n self.played = False\n while(self.playAgain):\n direction, moves = self.promptDirection(isKing(self.board, row, col))\n # ---- cut -----\n if(direction == 's'):\n self.playAgain = False\n if(not self.played):\n print('Player want to select another piece.')\n self.player = self.nextPlayer(self.player)\n else:\n error = self.makeMove(row, col, direction, moves)\n if error == NO_ERROR:\n printBoard(self.board, self.player)\n else:\n print('Error: ' + strerr(error) + '.')\n if(error in (NO_PIECE, OPPONENT_PIECE)):\n self.playAgain = False\n self.player = self.nextPlayer(self.player)\n self.finished = checkEndOfGame(self.board, self.player)\n if(isinstance(self.finished, int) and self.finished != 0):\n self.playAgain = False\n becomeKing(self.board, row, col)\n self.player = self.nextPlayer(self.player)\n self.finished = checkEndOfGame(self.board, self.player)\n elif(row != 0):\n self.player, self.board = row, col # game loaded\n else:\n self.finished = 0 # Abording (interrupt request)\n self.player = self.nextPlayer(self.player)\n print(self.sayWinner())", "title": "" }, { "docid": "6524076a7267500b1dfb77d6def1b673", "score": "0.58713496", "text": "def current_player(self):\n if self.turn_count % 2 != 0:\n return self.player1\n else:\n return self.player2", "title": "" }, { "docid": "3a10c4c12ee643b5136ca9c32f11f2a9", "score": "0.58511347", "text": "def get_opponent(self):\n if self.get_turn() == 'red':\n return self.get_blue_player()\n return self.get_red_player()", "title": "" }, { "docid": "70aa5da5cccdb4ce47a40c7c1aec0d5b", "score": "0.5850025", "text": "def player_won(self):\n if self.game_complete:\n return (self.__secret_word == self.__revealed_word)\n else:\n return None", "title": "" }, { "docid": "62ba25c0e87f7246622d203be2a5b38e", "score": "0.58446074", "text": "def take_turn(self, player_index: int, turn: tuple) -> bool:\r\n\r\n if 0 > player_index >= len(self.players):\r\n # player does not exist\r\n return False\r\n if not turn[0] in ('play', 'pickup'):\r\n # turn not valid\r\n return False\r\n if turn[0] == 'play' and len(turn) != 2:\r\n # no card name provided with 'play' turn\r\n return False\r\n \r\n player_obj = self.players[player_index]\r\n\r\n print(f'[{player_index}] : {turn}')\r\n\r\n if turn[0] == 'play':\r\n card_name = card_logic.real_card_name(turn[1])\r\n if not card_name in player_obj.hand:\r\n # player doesn't have card\r\n return False\r\n if not card_logic.card_playable(card_name, self.upfacing_card):\r\n # card not playable\r\n return False\r\n\r\n card_type = card_logic.card_type(card_name)\r\n\r\n used_plus = False\r\n is_special = card_type == 'special'\r\n if is_special:\r\n if card_name[1] == 'p': # a plus card\r\n plus_count = 2\r\n if card_name[0] == '_':\r\n plus_count = 4\r\n self.stacked_plus += plus_count\r\n used_plus = True\r\n \r\n if used_plus or self.stacked_plus==0:\r\n self.put_card(self.upfacing_card)\r\n self.upfacing_card = turn[1]\r\n player_obj.hand.remove(card_name)\r\n\r\n if is_special:\r\n if card_name[1] == 's': # a skip card\r\n self.players_turn += self.turn_direction\r\n elif card_name[1] == 'r': # a reverse card\r\n self.turn_direction *= -1\r\n\r\n else:\r\n for i in range(self.stacked_plus):\r\n player_obj.hand.append(self.take_card())\r\n self.stacked_plus = 0\r\n \r\n self.players_turn += self.turn_direction\r\n self.players_turn %= len(self.players)\r\n\r\n return True\r\n \r\n elif turn[0] == 'pickup':\r\n if self.stacked_plus == 0:\r\n top_card = self.take_card()\r\n player_obj.hand.append(top_card)\r\n else:\r\n for i in range(self.stacked_plus):\r\n player_obj.hand.append(self.take_card())\r\n self.stacked_plus = 0\r\n \r\n self.players_turn += self.turn_direction\r\n self.players_turn %= len(self.players)\r\n \r\n return True", "title": "" }, { "docid": "ea5cb8e5290b877f46038a653cee7861", "score": "0.58434075", "text": "def mc_trial(board, player):\n player_tmp = player\n winner = None\n # game is in progress\n while winner == None:\n empty = board.get_empty_squares()\n row, col = random.choice(empty)\n board.move(row, col, player_tmp)\n winner = board.check_win()\n player_tmp = provided.switch_player(player_tmp)", "title": "" }, { "docid": "7d05c7e604d549ee4333ecb82630039a", "score": "0.58423847", "text": "def run(self):\n player = random.randint(0, 1)\n print(\"Game started...\\n\")\n if player == 0:\n while True:\n self.__players_turn()\n if self.__game.is_game_won() or self.__game.is_draw(): break\n self.__computers_turn()\n if self.__game.is_game_won() or self.__game.is_draw(): break\n else:\n while True:\n self.__computers_turn()\n if self.__game.is_game_won() or self.__game.is_draw(): break\n self.__players_turn()\n if self.__game.is_game_won() or self.__game.is_draw(): break", "title": "" }, { "docid": "fba3306ac3f21e80f75988f141c2d1c7", "score": "0.58382976", "text": "async def is_my_turn() -> bool:\n my_turn = False\n my_turn_html = 'You to play'\n\n source = Bs(await gb.page.content(), 'lxml').prettify()\n\n if my_turn_html in source:\n my_turn = True\n\n return my_turn", "title": "" }, { "docid": "5ae54883f580a175150d6fd9406863e5", "score": "0.58329713", "text": "def player(board):", "title": "" }, { "docid": "12ede042c3039162a97bf684bd75fb89", "score": "0.5831257", "text": "def test():\n loop.counter+= 1\n l = api.settings('turnsLimit')\n return loop.world.isFinished() or l < loop.counter + 1 and 0 < l", "title": "" }, { "docid": "b394f9f3d647baa32c9feb2cb7047110", "score": "0.5824326", "text": "def play_singleplayer(self):\n try:\n player_turn = True\n while not self._backend.game_over():\n same_turn = False\n if player_turn:\n same_turn = self._display.move(True)\n else:\n same_turn = self._computer.move()\n if not same_turn:\n player_turn = not player_turn\n except KeyboardInterrupt:\n pass\n finally:\n curses.endwin()", "title": "" }, { "docid": "0312530703d8f87243f9d0e5d9fd4c36", "score": "0.5806062", "text": "def take_turn(self, try_again=False):\n if try_again:\n move = input(f'Invalid move {self.current_player.name}. Try again :')\n else:\n move = input(f\"Make your move {self.current_player.name} : \")\n\n if self.__is_valid_turn(move):\n self.grid.grid[(move[0], move[1])] = self.current_player.badge\n self.grid.print_grid()\n self.__check_state()\n self.__switch_player()\n else:\n self.take_turn(try_again=True)", "title": "" }, { "docid": "1026e6cf52a4c104973d00ff2575ce1f", "score": "0.5805168", "text": "def play(self):\r\n \r\n # flad if game is over:\r\n game_is_not_over = self.game_still_going()\r\n # on first round, no car is at TARGET_CELL, so returns True\r\n \r\n print(Game.MSG['welcome']) # prints welcome msg\r\n \r\n print(self.board_game) # prints current board\r\n \r\n while game_is_not_over:\r\n self.__single_turn()\r\n game_is_not_over = self.game_still_going()\r\n print(self.board_game)\r\n \r\n print(Game.MSG['victory'])", "title": "" }, { "docid": "3637a127770d139a1a655a7b7020ec99", "score": "0.5801913", "text": "def current_turn(self):\n return self._current_turn", "title": "" }, { "docid": "34724c8a0df74bf0ad0d663a20a7d519", "score": "0.5801577", "text": "def check_vencedor(self):\n player_on = []\n for player in self.jogadores:\n if player.status:\n player_on.append(player)\n if len(player_on) == 1:\n return player_on[0]", "title": "" }, { "docid": "a1742762ee52d318fa2ad23bc82eaab4", "score": "0.5798222", "text": "def turn(self):\n self.score += self.play() ## increment the player's score\n self.add_player_status() ## mark the player's status\n self.add_player_score() ## mark the player's score\n self.add_turn() ## increment the player's turn count", "title": "" }, { "docid": "b0a0c0d6b4c0309753406968dc672e0f", "score": "0.5796997", "text": "def player_loop(self):\n\n while True:\n # send message to game that you are ready\n msg = self.receiver()\n if msg[\"game_over\"]:\n return", "title": "" }, { "docid": "60f3af6598b7c40070e2ff426ae3df45", "score": "0.5796779", "text": "def take_turn(self, state):\n can_pass = state.players[0].coins > 0\n node = self.tree.nodes[state.prehash()]\n node['can_pass'] = can_pass\n\n prob_take = 1/2 if can_pass else 1\n new_state = deepcopy(state)\n new_state.take()\n self.add_edge(state, new_state, prob_take)\n # After taking a card, deal a new one.\n self.deal_card(new_state)\n\n if can_pass:\n # Since the player has an choice to make, we initialize some fields\n # that will be used to track the startegy learning.\n node['visits'] = 0\n node['regret'] = [0, 0] # pass, take\n \n new_state = deepcopy(state)\n new_state.pass_turn()\n self.add_edge(state, new_state, 1/2)\n # After passing the turn, next player takes turn.\n self.take_turn(new_state)", "title": "" }, { "docid": "c2faf177f5e371b3adecb0906c36232d", "score": "0.5791997", "text": "def next_play(board, selection, active_player):", "title": "" }, { "docid": "8ce81b262530296dc69dc66dd38bddec", "score": "0.5790241", "text": "def get_player(self):\r\n return self.logged_in and self.player", "title": "" }, { "docid": "19d8ebad24ec79efe6f4f838366b33a7", "score": "0.57900465", "text": "def insurance(player, taken=False):\n pass", "title": "" }, { "docid": "f25a6e7b709a644ddfce9963fd48f9e2", "score": "0.5789911", "text": "def get_next_player(self) -> None:\n if not any([player.total_score >= 10000 for player in self.base.list_o_players]):\n temp = self.base.list_o_players.popleft()\n self.base.current_player = temp\n if self.base.current_player.first_turn and not self.base.current_player.comp_player:\n self.open_first_popup(self.base.current_player)\n self.base.list_o_players.append(temp)\n if self.base.current_player.comp_player:\n Clock.schedule_once(self.base.buttons.roll.on_release, 1.)\n\n else:\n self.base.list_o_winners.append(self.base.current_player)\n self.base.current_player = self.base.list_o_players.popleft()\n if self.base.list_o_players and self.base.current_player.comp_player:\n Clock.schedule_once(self.base.buttons.roll.on_release, 1.)\n if self.base.list_o_players and not self.base.current_player.comp_player:\n self.open_last_popup()\n\n if not self.base.list_o_players:\n self.find_winner()", "title": "" }, { "docid": "674bb1fa325fbd6e6d94e216bdb38d62", "score": "0.5786288", "text": "def after_turn(self):\n pass", "title": "" }, { "docid": "dc809cccc70bf9c0c17cfab90b17b272", "score": "0.5783267", "text": "def play_outside_of_turn(self):\n pass", "title": "" }, { "docid": "c9058768ed3fccbbda2d6c96f0e01855", "score": "0.5781783", "text": "def next_turn(currentTurn):\n if currentTurn == X:\n return O\n else:\n return X", "title": "" }, { "docid": "9846e4118640e49557aa3dd98bd78410", "score": "0.57740325", "text": "def turret_ready(self):\n return self.time_since_shot > self.cooldown", "title": "" }, { "docid": "736e059c4ff6eac0fb9bbe1bc1db2a3c", "score": "0.5772326", "text": "def __players_turn(self):\n try:\n print(self.__table)\n y = self.__read_int(\"Your turn,\\nmove: \")\n self.__game.player_moves(y)\n if self.__game.is_game_won():\n print(self.__table)\n print(\"Well done, human.\")\n elif self.__game.is_draw():\n print(self.__table)\n print(\"Draw.\")\n except ValidationException as ve:\n print(ve)\n self.__players_turn()", "title": "" }, { "docid": "d049b0228a2f9cd46a4e8a1da1b7c127", "score": "0.5768517", "text": "def change_player(self):\r\n if self.is_turn_of == self.player1:\r\n self.is_turn_of = self.player2\r\n else:\r\n self.is_turn_of = self.player1", "title": "" }, { "docid": "7dee9f3bd1f0ce852ca7fdfc81949e54", "score": "0.57684773", "text": "def choice(self, player_s, take, suit=None) :\n\n player = self.find_player(player_s)\n\n if self.state != 'wait_choice' and self.state != 'sec_choice':\n self.notify(u\"Pas un tour de demande\")\n return\n\n if player != self.players[self.player_idx] :\n self.notify(u\"Pas à ton tour de parler\")\n return\n\n if take :\n if self.state == 'wait_choice' :\n self.trump_suit = self.propos.suit\n if self.state == 'sec_choice' :\n if suit is None:\n self.notify(u\"On choisit un couleur au deuxième tour\")\n self.set_state(self.state)\n return\n if suit == self.propos.suit :\n self.notify(u\"Nope, fallait prendre au premier tour !\")\n self.set_state(self.state)\n return\n self.trump_suit = suit\n self.bidder = player\n self.set_state('init_game')\n return\n else :\n # Said no, next player\n self.player_idx += 1\n self.set_state(self.state)\n return", "title": "" }, { "docid": "933a9b13a43a66d6a0b9cf7439ccba7f", "score": "0.5759454", "text": "def user_first_turn(user_name, user_self_board, user_shot_board, cpu_self_board, cpu_shot_board):\n\n print(f\"{user_name}, in this board you will see your shots.\")\n print(user_shot_board)\n time.sleep(2)\n print(\"HOW THE HITS ARE DISPLAYED:\\n- Hit: will be marked with X\\n- Water: will be marked with O\\n\")\n time.sleep(2)\n return user_turn(user_name, user_self_board, user_shot_board, cpu_self_board, cpu_shot_board)", "title": "" }, { "docid": "b57e27d193682795d290ea77fbb565d4", "score": "0.5756873", "text": "def check_winner(self, player, opponent):\n if self.sticks == 0:\n print(f\"\\n{player.name} LOSES!\\n\")\n print(\"\\nThanks for playing!\\n\\n\")\n self.update_memory(self.moves[opponent.name], self.moves[player.name])\n time.sleep(2)\n sys.exit()", "title": "" }, { "docid": "38780703013735ebb09b1ee560394cf7", "score": "0.5754888", "text": "def play_turn(player_name, sticks):\n choice = -1\n while choice != 1 and choice != 2:\n print_sticks(sticks)\n choice = int(input(player_name + \": pick 1 or 2 sticks?\\n\\t\"))\n if choice !=1 and choice != 2:\n print(\"Can only pick 1 or 2, not \"+str(choice))\n \n return choice", "title": "" }, { "docid": "6ae876d5ad1a647a51b6b76694231dcc", "score": "0.575369", "text": "def pre_turn_hook(self):", "title": "" }, { "docid": "af34301c6d181dab150f3b3220997945", "score": "0.5748553", "text": "def take_turn(self, game):\r\n self.hand.append(self.deck.draw())\r\n # Reset the targetable state at the beginning of each turn.\r\n self.is_targetable = True\r\n card = self._get_card_to_play()\r\n card.play(self, game)\r\n return card", "title": "" }, { "docid": "6e53317b74fd7c09e448eefcf0f4e9ad", "score": "0.5747229", "text": "def get_current_player(self):\n if self.get_turn() == 'red':\n return self.get_red_player()\n return self.get_blue_player()", "title": "" }, { "docid": "0c582c4942a93210d685619940a4d4f9", "score": "0.5746862", "text": "def on_before_turn(self, pokemon, foe):", "title": "" }, { "docid": "b3ab89a8ad0aaf8493812bd15df37e9e", "score": "0.5745037", "text": "def _set_next_player(self):\n if self.player_turn_queue < self.players:\n self.player_turn_queue += 1\n return True\n elif self.player_turn_queue == self.players:\n self.player_turn_queue = 1\n return True\n else:\n return None", "title": "" }, { "docid": "30b8c415bafbed4f8bec848e02e07921", "score": "0.57437575", "text": "def next_player(self):\n\n self.current_player.turn = False\n self.players.enqueue(self.current_player)\n self.current_player = self.players.dequeue()\n print '\\nNext player is {}'.format(self.current_player.name)\n self.current_player.turn = True", "title": "" }, { "docid": "ad8bd7985bb48ecf425bb303df70a2f3", "score": "0.5742927", "text": "def play(self):\n self.player1 = game_agent.AlphaBetaPlayer(score_fn=sample_players.improved_score)\n self.player2 = game_agent.AlphaBetaPlayer(score_fn=game_agent.custom_score)\n self.game = isolation.Board(self.player1, self.player2)\n\n self.game.apply_move(random.choice(self.game.get_legal_moves()))\n\n if verbose:\n print('Player 1', self.player1)\n print('Player 2', self.player2)\n winner, history, outcome = self.game.play()\n print(\"\\nWinner: {}\\nOutcome: {}\".format(winner, outcome))\n print(self.game.to_string())\n print(\"Move history:\\n{!s}\".format(history))\n return winner == self.player2\n else:\n winner, history, outcome = self.game.play()\n return winner == self.player2", "title": "" }, { "docid": "1908a464f2c9e92721860b763822f961", "score": "0.5742297", "text": "async def start_mission(self):\n # convert Players to names to display\n voted_accept_names = []\n for temp_player in self.voter.voted_accept:\n voted_accept_names.append(temp_player.name)\n voted_reject_names = []\n for temp_player in self.voter.voted_reject:\n voted_reject_names.append(temp_player.name)\n await self.general_channel.send(f'The team was accepted.\\nAccepted: {voted_accept_names}\\nRejected: {voted_reject_names}')\n await self.do_pre_mission_actions()\n # reset and open mission window\n self.voter.reset()\n self.set_window(2)\n # determine which mission cards are avaliable to each player out of 7 possibilities\n for temp_player in self.get_current_team():\n if temp_player.possible_mission_cards == [True, True, True]:\n await temp_player.member.dm_channel.send(f'You are on the Mission {self.get_round()} team. Please `>>mission success`, `>>mission fail`, or `>>mission switch`')\n elif temp_player.possible_mission_cards == [True, True, False]:\n await temp_player.member.dm_channel.send(f'You are on the Mission {self.get_round()} team. Please `>>mission success` or `>>mission fail`.')\n elif temp_player.possible_mission_cards == [True, False, True]:\n await temp_player.member.dm_channel.send(f'You are on the Mission {self.get_round()} team. Please `>>mission success` or `>>mission switch`.')\n elif temp_player.possible_mission_cards == [False, True, True]:\n await temp_player.member.dm_channel.send(f'You are on the Mission {self.get_round()} team. Please `>>mission fail` or `>>mission switch`.')\n elif temp_player.possible_mission_cards == [True, False, False]:\n await temp_player.member.dm_channel.send(f'You are on the Mission {self.get_round()} team. Please `>>mission success`.')\n elif temp_player.possible_mission_cards == [False, True, False]:\n await temp_player.member.dm_channel.send(f'You are on the Mission {self.get_round()} team. Please `>>mission fail`.')\n else:\n await temp_player.member.dm_channel.send(f'You are on the Mission {self.get_round()} team. Please `>>mission switch`.')\n await self.general_channel.send(f'Team members, prepare to conduct Mission {self.get_round()}.\\nPlease message me privately using the `>>mission` command.')\n await self.client.change_presence(activity=discord.Game(f'Conducting Mission {self.get_round()}!'))", "title": "" }, { "docid": "df818b26b8d9a71de683b1c1a43c207f", "score": "0.5739122", "text": "def turn():\n \n robottype = get_type()\n if robottype == RobotType.PAWN:\n pawn_turn()\n else:\n overlord_turn()\n bytecode = get_bytecode()", "title": "" }, { "docid": "df818b26b8d9a71de683b1c1a43c207f", "score": "0.5739122", "text": "def turn():\n \n robottype = get_type()\n if robottype == RobotType.PAWN:\n pawn_turn()\n else:\n overlord_turn()\n bytecode = get_bytecode()", "title": "" }, { "docid": "7f859b5552ba63f0e29640d235b860b4", "score": "0.57384795", "text": "def startNextTurn(self):\n if self.spinsRemaining <= 0:\n self.roundEnd()\n else:\n self.game_screen.wheel.enableSpin()", "title": "" }, { "docid": "6cb6602c43765eeebd5d78305bcde113", "score": "0.5733218", "text": "def pollPlay(self, state):\n # chooses a card from the players hand\n hand = self.player.getHand()\n cardOfChoice = self.humanPlayerGui.decisionMaking_Playing(hand,\n self.player.canEmergencyStop())\n if cardOfChoice is not None:\n #return choice\n return cardOfChoice\n else:\n # returns None as long as no choice is made\n return None", "title": "" }, { "docid": "48c7670402935bcdfc17bde2949a5e56", "score": "0.5730701", "text": "def restart(self):\n\t\treturn self.winner", "title": "" }, { "docid": "a28fbddb61ea1028ad1d0fec7877007c", "score": "0.57151437", "text": "def act(self):\n self.currentLevel.player.deck.draw()\n self.currentLevel.player.regenerate()\n return config.TURN_TICKS", "title": "" }, { "docid": "35b9e8f04fe703395e89c12481da1425", "score": "0.5709444", "text": "def update_player(self):\n self.player = self.player % 2 + 1\n return self.current_player()", "title": "" }, { "docid": "18deff92de2d2ea2ca1d293ce0ff828d", "score": "0.5707758", "text": "def next_up(self, player):\n if player is self.player1:\n return self.player2\n else: \n return self.player1", "title": "" }, { "docid": "006c94e6369196f24a0706db9ed633dc", "score": "0.5703956", "text": "def getNextPlayer(self):\n\t\treturn self.__getPlayer(self.turn)", "title": "" }, { "docid": "4154dde189c3816ac34a9edfd16f4033", "score": "0.56963056", "text": "def game():\n \n print(\"Game: Pick one pick two!\\n\")\n print(\"\\n-------------------------\")\n \n multiplayer = True\n if len(sys.argv) > 1 and sys.argv[1]==\"ai\":\n multiplayer = False\n \n start(multiplayer)\n \n sticks = random.randint(15, 25)\n turn = 1\n #last_player = \"\"\n while sticks > 0:\n last_player = players[turn % len(players)]\n if(not multiplayer and last_player == \"AI\" ):\n sticks -= ai_turn(sticks)\n else:\n sticks -= play_turn( last_player, sticks )\n turn = turn + 1\n \n print(\"\\n-------------------------\")\n print(last_player + \" won!\")", "title": "" } ]
d90f519778af27fa3f96cd02ff606899
Get adresse from coordinates
[ { "docid": "d45fdc976395b175215ebb8584f0f811", "score": "0.72789454", "text": "def get_adress(x, y):\n url = str((\"http://api-adresse.data.gouv.fr/reverse/?lon=\" + str(x) + \"&lat=\" + str(y)))\n headers = {\"Content-Type\": \"application/json\"}\n r = requests.get(url, headers=headers, data=\"\")\n js = json.loads(r.text)\n address = js['features'][0]['properties']['label']\n return address", "title": "" } ]
[ { "docid": "46e5221e9cd0da12a88540f87d3dd5e7", "score": "0.7197132", "text": "def get_address(coordinates):\n locator = Nominatim(user_agent=\"myGeocoder\")\n location = locator.reverse(coordinates)\n locate = location.address.split(',')\n len_location = len(locate)\n address = locate[0] + ',' + locate[1] + ',' + locate[len_location - 5] + ',' + locate[len_location - 3] + ', USA - ' + locate[len_location - 2]\n return address", "title": "" }, { "docid": "7aeee10b053efc80249fda131a655e58", "score": "0.67783356", "text": "def get_coord_from_address(code_postal, adresse=None):\n headers = {\"Content-Type\": \"application/json\"}\n if adresse != None:\n url = str((\"http://api-adresse.data.gouv.fr/search/?q=\" + str(adresse) + \"&postcode=\" + str(code_postal)))\n else:\n url = str((\"http://api-adresse.data.gouv.fr/search/?q=\" + str(code_postal)))\n print(url)\n r = requests.get(url, headers=headers, data=\"\")\n js = json.loads(r.text)\n\n if len(js['features']) < 1:\n url = str((\"http://api-adresse.data.gouv.fr/search/?q=\" + str(code_postal)))\n r = requests.get(url, headers=headers, data=\"\")\n js = json.loads(r.text)\n\n if code_postal == 75001:\n x = js['features'][1]['geometry']['coordinates']\n else:\n \tx = js['features'][0]['geometry']['coordinates']\n longitude = x[0]\n latitude = x[1]\n pos = []\n pos.append(longitude)\n pos.append(latitude)\n return pos", "title": "" }, { "docid": "d7b00aaae6fe5ec055685062d70bb670", "score": "0.63429296", "text": "def get_geolocation(self, ipaddr):\n ipad = ipaddress.ip_address(ipaddr)\n #print(ipad)\n #match = geoip.geolite2.lookup(ipaddr)\n #print(match)", "title": "" }, { "docid": "0510c200b34984d13bfb719eb14a7114", "score": "0.62939227", "text": "def get_geocode(self, address):", "title": "" }, { "docid": "27d980fa360118bdb67fe7b061cbacb2", "score": "0.6205955", "text": "def get_coordinates(ip):\n url = \"http://freegeoip.net/xml/{}\".format(ip)\n dom = minidom.parse(urllib.urlopen(url))\n lat = dom.getElementsByTagName('Latitude')[0].childNodes[0].nodeValue\n long = dom.getElementsByTagName('Longitude')[0].childNodes[0].nodeValue\n return (float(lat),float(long))", "title": "" }, { "docid": "6e6a0192548867f16b60d8abffb74552", "score": "0.6190394", "text": "def getAddress(self, coordinate):\n try:\n params = \"latlng={lat},{lon}\".format(lat=coordinate['latitude'], lon=coordinate['longitude'])\n service = ServiceUtil().getDefaultService()\n service_ep = service.getServiceUrl()\n url = '{base}&{params}'.format(base=service_ep, params=params)\n return self.__servicecall(service, url, params, True)\n except Exception:\n raise GeoException(\"Service request fails\")", "title": "" }, { "docid": "e0e99a3558fa0d4f281da8e3fa025083", "score": "0.6149384", "text": "def _GetAddress(self):\n raise NotImplementedError", "title": "" }, { "docid": "63d23b8bb55107ddb0be27fea766b5e2", "score": "0.61307585", "text": "def getNearestLocation(iss):\n \n nearestLocation = reverse_geocode.get(iss[\"coordinates\"])\n return nearestLocation", "title": "" }, { "docid": "e28034a3d31343eaa7a432e277ac72b4", "score": "0.6122911", "text": "def getAddress(self):\n\t\treturn self.address", "title": "" }, { "docid": "ff425a5114b87cbec6974f5817dec143", "score": "0.6085749", "text": "def coords(self):\n temp=_a2cr(self.address)\n if len(temp)==2:\n return temp[0],temp[1],temp[0],temp[1]\n return temp[0],temp[1],temp[2],temp[3]", "title": "" }, { "docid": "294cb8bfd614b26d694f0052d19e9282", "score": "0.60812336", "text": "def _get_address(self, address):\n return self.pool.get('res.partner').\\\n get_partner_address(self.cr, self.uid, address)", "title": "" }, { "docid": "294cb8bfd614b26d694f0052d19e9282", "score": "0.60812336", "text": "def _get_address(self, address):\n return self.pool.get('res.partner').\\\n get_partner_address(self.cr, self.uid, address)", "title": "" }, { "docid": "314fa743d0641b5200c8178120b5ca25", "score": "0.60629743", "text": "def point_to_addr(self, pt: QPointF) -> Optional[Tuple[int, bool]]:\n ascii_column = False\n row = self.point_to_row(pt)\n if row is None:\n return None\n col = self.point_to_column(pt, self.byte_column_offsets)\n if col is None:\n col = self.point_to_column(pt, self.ascii_column_offsets)\n if col is None:\n return None\n else:\n ascii_column = True\n addr = self.row_col_to_addr(row, col)\n if addr < self.start_addr or addr >= self.end_addr:\n return None\n return addr, ascii_column", "title": "" }, { "docid": "ab4bd38da90a89424903dc5d9f7e4c9f", "score": "0.603997", "text": "def get_address(self):\n return self.address", "title": "" }, { "docid": "ab4bd38da90a89424903dc5d9f7e4c9f", "score": "0.603997", "text": "def get_address(self):\n return self.address", "title": "" }, { "docid": "ab4bd38da90a89424903dc5d9f7e4c9f", "score": "0.603997", "text": "def get_address(self):\n return self.address", "title": "" }, { "docid": "20fe84cebb68b2b6752f225c879e2499", "score": "0.60398674", "text": "def _get_address(self):\n return self.__address", "title": "" }, { "docid": "20fe84cebb68b2b6752f225c879e2499", "score": "0.60398674", "text": "def _get_address(self):\n return self.__address", "title": "" }, { "docid": "20fe84cebb68b2b6752f225c879e2499", "score": "0.60398674", "text": "def _get_address(self):\n return self.__address", "title": "" }, { "docid": "20fe84cebb68b2b6752f225c879e2499", "score": "0.60398674", "text": "def _get_address(self):\n return self.__address", "title": "" }, { "docid": "24f861ecade1e506e9a86ee66b187618", "score": "0.6010114", "text": "def geocode(s: requests.Session, address: str) -> Point:\n url = \"http://pagis.org/arcgis/rest/services/LOCATORS/AddressPoints/GeocodeServer/findAddressCandidates\"\n query = {\n \"category\": \"\",\n \"distance\": \"\",\n \"location\": \"\",\n \"magicKey\": \"\",\n \"maxLocations\": \"\",\n \"outFIelds\": \"\",\n \"outSR\": \"\",\n \"searchExtent\": \"\",\n \"Single Line Input\": address,\n \"Street\": \"\",\n \"ZIP\": \"\",\n \"f\": \"pjson\",\n }\n try:\n resp = s.get(url, params=query)\n j = resp.json()\n if len(j[\"candidates\"]) == 0:\n print(f\"No candidates for {address}\")\n return Point(0,0)\n loc_dict = j[\"candidates\"][0][\"location\"]\n location = Point(loc_dict[\"x\"], loc_dict[\"y\"]) # could just return dict\n except Exception as e: # a famous antipattern\n print(f\"Failed to fetch {address} with error:\")\n print(e)\n location = Point(0, 0)\n return location", "title": "" }, { "docid": "658c99c05bad35da1b006e09c504362d", "score": "0.60077477", "text": "def find_coords_by_bad_address(address):\n data = pd.read_excel(\"sample.xlsx\")\n rows = data.shape[0]\n for row in range(rows):\n if address in data.at[row, \"Address\"]:\n return [data.at[row, \"Latitude\"], data.at[row, \"Longitude\"]]\n return None", "title": "" }, { "docid": "ec6c3ff4ed6a742b7c1520937d5e5d8c", "score": "0.59989905", "text": "def find_location_by_bad_address(address):\n coords = find_coords_by_bad_address(address)\n if coords:\n return get_address(\",\".join([str(pos) for pos in coords]))\n return None", "title": "" }, { "docid": "a3804667323c7133c689c05304696895", "score": "0.5990293", "text": "def getAddress(self):\n \n parts = self.code.split()\n parts = parts[1].split(\",\")\n \n try:\n return parts[1]\n except:\n \" Apparently we're trying to return empty data, skipping. \"", "title": "" }, { "docid": "e087aa06ae852834e44b96456e6c2242", "score": "0.5989511", "text": "def find_map_coordinates(full_address, geolocator):\n \n # Create geocoder object\n location = geolocator.geocode(full_address)\n return location.latitude, location.longitude", "title": "" }, { "docid": "e4d28435e3c1689ae3399cafb5cb550a", "score": "0.59781873", "text": "def addr(self) -> Tuple[str, int]:\n\n return self.__addr", "title": "" }, { "docid": "4faf5fabde83d2639234205a37bf01b8", "score": "0.5958611", "text": "def reverse_address(address, original_lat = None, original_long = None):\n locator = Nominatim(user_agent=\"openmapquest\")\n try:\n location = locator.geocode(address)\n print(location)\n if location:\n return location.latitude, location.longitude\n else:\n if original_lat and original_long:\n print(\"Returning original lat and long\")\n return original_lat,original_long\n else:\n return 0,0\n except geopy.exc.GeocoderUnavailable as e:\n if original_lat and original_long:\n print(\"Returning original lat and long\")\n\n return original_lat,original_long\n else:\n return 0,0", "title": "" }, { "docid": "29f96cf373977117de18d280c1d8c809", "score": "0.5939801", "text": "def getCoordinate(self, address):\n\n params = \"address={address}\".format(address=address)\n geoService = ServiceUtil().getDefaultService()\n service_ep = geoService.getServiceUrl()\n url = '{base}&{params}'.format(base=service_ep, params=params)\n return self.__servicecall(geoService, url, params, False)", "title": "" }, { "docid": "694b9a5ba84e85996d01b13e06190374", "score": "0.59092116", "text": "def _get_latlon_ref(self):\n xodr = self.world.get_map().to_opendrive()\n tree = ET.ElementTree(ET.fromstring(xodr))\n\n # default reference\n lat_ref = 42.0\n lon_ref = 2.0\n\n for opendrive in tree.iter(\"OpenDRIVE\"):\n for header in opendrive.iter(\"header\"):\n for georef in header.iter(\"geoReference\"):\n if georef.text:\n str_list = georef.text.split(' ')\n for item in str_list:\n if '+lat_0' in item:\n lat_ref = float(item.split('=')[1])\n if '+lon_0' in item:\n lon_ref = float(item.split('=')[1])\n return lat_ref, lon_ref", "title": "" }, { "docid": "7da50a1bbbfdd51267784fb634e7f3f5", "score": "0.5899446", "text": "def pixel2coord(x, y,geoform):\n xoff=geoform[0] \n a=geoform[1]\n b=geoform[2]\n yoff=geoform[3]\n d=geoform[4]\n e=geoform[5]\n xp = a * x + b * y + xoff\n yp = d * x + e * y + yoff\n return(xp, yp)", "title": "" }, { "docid": "d762e3144f67aa44b2709b84ac5df735", "score": "0.58968854", "text": "def getWaypointAddress(self, step): \r\n return self.way[step]", "title": "" }, { "docid": "436762437353e0ef16ac28a22a5c1251", "score": "0.5892708", "text": "def get_address(self) -> str:\n return self.address", "title": "" }, { "docid": "2d5f5eb4228f4a013681ecfeee1c5afa", "score": "0.58841515", "text": "def getMinAddress(self) -> ghidra.program.model.address.Address:\n ...", "title": "" }, { "docid": "c569ffbe7983bef43e826a18a8bc5e6d", "score": "0.5879805", "text": "def address(self):\n return self._get_prop_value(self._ADDRESS_KEY)", "title": "" }, { "docid": "1d31b75eb4efa6a6ab292905a318dd65", "score": "0.58699214", "text": "def address(self, use_uncompressed=None):\n if self._use_uncompressed(use_uncompressed):\n return self._address_uncompressed\n return self._address_compressed", "title": "" }, { "docid": "5e115544cbf8c44604eff58143361798", "score": "0.58696544", "text": "def get_my_address(self):\n return self.vasp_address.as_str()", "title": "" }, { "docid": "0aa3a705a86d91039444b869f31ea002", "score": "0.5861548", "text": "def get_address(stop):\n\n line_name = find_line_by_stop(stop)\n stop = get_stop_detail(stop, line_name)\n address = get_address_by_coordinates(stop[\"lat\"], stop[\"lon\"])\n\n return address", "title": "" }, { "docid": "8c6698e66cac0f2c265e55259898ded9", "score": "0.58560514", "text": "def georss_coords(self, coords):\n if self.is_input_latitude_first:\n return u' '.join([u'%f %f' % x for x in coords])\n else:\n return u' '.join([u'%f %f' % (x[1], x[0]) for x in coords])", "title": "" }, { "docid": "351e3c09fd053dcab6f40c2776b5a465", "score": "0.5839486", "text": "def external_address(node):\n return node.networks[1].address", "title": "" }, { "docid": "83612e9c784d467ff3ce77c6c7d2f474", "score": "0.5837246", "text": "def get_gps_from_address(adress):\n\n google_api_url = \"http://maps.google.com/maps/api/geocode/json?address=%s&sensor=false\" \\\n % adress.encode('utf8')\n\n data_google = json.loads(requests.get(google_api_url).content)\n if data_google.get('results'):\n lat = float(data_google['results'][0]['geometry']['location']['lat'])\n lng = float(data_google['results'][0]['geometry']['location']['lng'])\n else:\n lat = 48\n lng = 2\n return lat, lng", "title": "" }, { "docid": "28847a63a223ea999540ec09dc3ec2c9", "score": "0.58335376", "text": "def getStartAddress(self) -> ghidra.program.model.address.Address:\n ...", "title": "" }, { "docid": "9c54cbeeb606743586c17ba1f665c8c8", "score": "0.5829284", "text": "def getAdresse ( self, c, style, info ) :\n self.getPreformatted ( c, 20, 235-35, 85, 35, style, info, 0 )", "title": "" }, { "docid": "e231be60224168b7bede151243b5b3a4", "score": "0.58288026", "text": "def getAddressForName(self,name):\n return HopperLowLevel.getAddressForName(self.__internal_document_addr__,name)", "title": "" }, { "docid": "0e7ba2d3530277de18beb9ae8ffb4673", "score": "0.582525", "text": "def get_geo(twitter_msg):\n try:\n x, y = twitter_msg.place[\"bounding_box\"][\"coordinates\"][0][0]\n return \"https://www.google.com/maps/place/{},{}\".format(y, x)\n except Exception as e:\n return \"\"", "title": "" }, { "docid": "7426e15eca5d4566b13ec71ac243d854", "score": "0.58202803", "text": "def ndaoAddress() -> address:\n return self.ndao", "title": "" }, { "docid": "59a46e5302a11737dc50aa40fcd33906", "score": "0.5817369", "text": "def read_address(node):\n return _get_attr(node, ATTR_ADDRESS)", "title": "" }, { "docid": "851978cf751b0569a4cde681fd118413", "score": "0.5791481", "text": "def get_location_coordinates(address, url=URL_LOCATION_REQUEST):\n querystring = {\n 'q': address,\n 'format': 'json',\n }\n # запрос\n response = requests.get(url, params=querystring)\n data = response.json()\n\n # ничего не нашлось\n if (len(data) == 0):\n return 200,None\n\n return data[0]['lat'], data[0]['lon'],data[0]['display_name']", "title": "" }, { "docid": "688ba219f19c53921d7d9f57201c6b40", "score": "0.57911193", "text": "def get_address_from_lat_long(self, location):\n try:\n location = self.geo_locator.reverse(str(location.get('latitude')) + ', ' + str(location.get('longitude')))\n return location.address\n except Exception as error:\n print('Exception in get distance..:' + str(error))\n print(format_exc().splitlines())\n return None", "title": "" }, { "docid": "08f38d683b1e8b21504f99cbfe41d036", "score": "0.5784676", "text": "def get_address(self):\n return self._fields['address']", "title": "" }, { "docid": "ea3aed1ba4463b72638cd7bddf6377a0", "score": "0.5780703", "text": "def get_addr(self):\n\t\treturn self._ADDR", "title": "" }, { "docid": "7e884403b71866cc8756c74b5ffaca38", "score": "0.5774532", "text": "def get_address(self, number):\n assert(number < len(self.__addresses))\n return self.__addresses[number]", "title": "" }, { "docid": "5246badfb00e9d6796d611fe580c6058", "score": "0.5767797", "text": "def internal_address(node):\n return node.networks[0].address", "title": "" }, { "docid": "90617241869fadbc05bb606396adddb6", "score": "0.57465184", "text": "def ipAdres(self):\n return self._ipAdres.get_waarde()", "title": "" }, { "docid": "195d6dc141e48f036abacdc4df44221e", "score": "0.5734523", "text": "def get_address(self) -> str:\n return self._address", "title": "" }, { "docid": "91fc0fd01586b6ad41dd9a7389394c17", "score": "0.57311213", "text": "def get_addr(symbol_entry):\n return symbol_entry[1]", "title": "" }, { "docid": "017a7a686fe50b06af597365507cc269", "score": "0.5721834", "text": "def get_location(self, origin):", "title": "" }, { "docid": "83b205b5920a598ae5f0108f37293b9a", "score": "0.5710465", "text": "def get_addr(self):\n return self._addr", "title": "" }, { "docid": "64bb55e816d5046b95954a1c8d3760e7", "score": "0.5709512", "text": "def get_coord(es_poi):\n coord = es_poi.get_coord()\n if coord:\n lon = coord.get(\"lon\")\n lat = coord.get(\"lat\")\n return (lat, lon)\n return None", "title": "" }, { "docid": "d9513bf5629f3d5ff2360b3783d6b2d8", "score": "0.5701218", "text": "def get_ip_location(self):\n self.fhdhr.logger.info(\"Getting location via IP Address.\")\n location_url = 'https://api.locastnet.org/api/watch/dma/ip'\n return self.get_geores_json(location_url, \"IP\")", "title": "" }, { "docid": "65143446ffd742ead86493e6ee359c94", "score": "0.56914073", "text": "def get_address():\n node = uuid.getnode()\n return \":\".join((\"%012X\" % node)[i : i + 2] for i in range(0, 12, 2))", "title": "" }, { "docid": "972caeacc15edb3282a0a2dc6b4d8411", "score": "0.56638354", "text": "def get_coordinates(location_name):\n locator = Nominatim(user_agent=\"myGeocoder\")\n location = locator.geocode(location_name)\n if location == None:\n return((None,None))\n return ((location.latitude,location.longitude))", "title": "" }, { "docid": "89be8c68e105e658d55dfc05be6b3101", "score": "0.56636477", "text": "def Addr(cls, placeholder: str):\n return cls(Op.addr, TealType.bytes, placeholder, TmplKind.Addr)", "title": "" }, { "docid": "05c33b17adeb99c9ec9865fd6ac0022a", "score": "0.5660498", "text": "def get_geo_coords(record):\n longitude = get_ds_field(record, \"034\", \"d\")\n latitude = get_ds_field(record, \"034\", \"f\")\n\n if pd.isnull(latitude) and not pd.isnull(longitude):\n latitude = get_ds_field(record, \"034\", \"d\", take_first=False)[1]\n\n return longitude, latitude", "title": "" }, { "docid": "f9e5dd4d8141f1586b978c19979776c4", "score": "0.5659446", "text": "def get_neighborhood(full_address):\n\n api_xml_parsed = parse_xml(full_address)\n api_xml_data = api_xml_parsed['api_parsed_data']\n neighborhood = api_xml_data.find('region').get('name')\n\n return neighborhood", "title": "" }, { "docid": "bf5e3f5a76bba8bd41ad076b7d891b64", "score": "0.56535065", "text": "def get_gps_crd(address):\n return geolocator.geocode(address)[1]", "title": "" }, { "docid": "0bf6c16b7d3bf20cc56aab863867ff2e", "score": "0.5645044", "text": "def evaluate(p: ghidra.program.model.listing.Program, s: unicode) -> ghidra.program.model.address.Address:\n ...", "title": "" }, { "docid": "e56c80b6779bb32058caa6deeb82c585", "score": "0.5644398", "text": "def reverse_geocode(userCoords):\r\n lat, lng = userCoords\r\n latlng = \"{0},{1}\".format(lat, lng)\r\n data = urllib.parse.urlencode({\"latlng\" : latlng,\r\n \"result_type\" : \"locality\",\r\n \"key\" : API_KEY})\r\n \r\n result = make_google_api_request(API_URL + data)\r\n if result[\"status\"] == \"OK\":\r\n return result[\"results\"][0][\"formatted_address\"]\r\n else:\r\n return \"Status: \" + result[\"status\"]", "title": "" }, { "docid": "88e37ea4a68b4835c8e4b8a9c13a4f26", "score": "0.5640328", "text": "def find_coordinates(place):\n geolocator = Nominatim(user_agent=\"specify_your_app_name_here\")\n location = geolocator.geocode(place)\n return location.latitude, location.longitude", "title": "" }, { "docid": "ed012416c05e8a550d9559229106be4b", "score": "0.5632032", "text": "def nearest_street(coordinates, host='http://router.project-osrm.org'):\n # FIXME: API does not seem to do what I expected, check\n try:\n url = '{}/nearest?loc={},{}'.format(host, coordinates[0], coordinates[1])\n response = requests.get(url)\n return response.json()['mapped_coordinate']\n except Exception as e:\n print(\"OSRM error: {}\".format(e))\n return 0", "title": "" }, { "docid": "a49cc28b4e1cfd09fa7a71c71b093e07", "score": "0.5628854", "text": "def find_location(ip_addr):\n try:\n conn = psycopg2.connect(host=GEOLITE_DB_HOST, port = GEOLITE_DB_PORT, database=GEOLITE_DB_NAME, user=GEOLITE_DB_USER, options = GEOLITE_DB_CONNECTION_OPTION)\n location = 'unknown:unknown' \n try:\n cur = conn.cursor()\n cur.execute('select find_location(\\'' + ip_addr + '\\');' )\n row = cur.fetchone()\n location = row[0]\n finally:\n conn.close()\n except Exception:\n location = 'error:error'\n return location", "title": "" }, { "docid": "c1ac368be7112876f163f54d9f2edcdf", "score": "0.56282103", "text": "def get(address_type, address):", "title": "" }, { "docid": "3f6b0b7344c2c54a0e867165310be12b", "score": "0.5623603", "text": "def get_XY(self,lat,lon):\n pass", "title": "" }, { "docid": "3053f66dee5afb43be1ed36f26f3d10e", "score": "0.5620131", "text": "def ipv4_neighbor_address(self) -> Optional[pulumi.Input[Sequence[pulumi.Input['NeighborAddressArgs']]]]:\n return pulumi.get(self, \"ipv4_neighbor_address\")", "title": "" }, { "docid": "cccfa2ac98a4ff1c2da7748e6dbc0073", "score": "0.56189775", "text": "def get_lat_lon_client(ip):\n try:\n url = 'http://freegeoip.net/json/%s' % (ip)\n data = simplejson.load(urlopen(url))\n lat = float(data['latitude'])\n lon = float(data['longitude'])\n if lat == 0.0 or lon == 0.0:\n lat, lon = 48.833, 2.333\n return lat, lon\n except:\n return 48.833, 2.333", "title": "" }, { "docid": "96ceef0c14a1e9e289ffa2eb4be2b186", "score": "0.56159556", "text": "def full_address(self):\n addr = \"\"\n # if self.building:\n # addr = addr + \"(\" + self.building + \") \"\n if self.house_number:\n addr = addr + self.house_number\n if self.street_prefix:\n addr = addr + \" \" + self.street_prefix\n if self.street:\n addr = addr + \" \" + self.street\n if self.street_suffix:\n addr = addr + \" \" + self.street_suffix\n if self.apartment:\n addr = addr + \" \" + self.apartment\n if self.city:\n addr = addr + \", \" + self.city\n if self.state:\n addr = addr + \", \" + self.state\n if self.zip:\n addr = addr + \" \" + self.zip\n return addr", "title": "" }, { "docid": "86a40704deab823acd527ca33cd36dd5", "score": "0.5614364", "text": "def get_coordinates_by_ip(ip_address):\n data = get_response(ip_address)\n latitude = data.get('latitude')\n longitude = data.get('longitude')\n return Coordinates(latitude=latitude, longitude=longitude)", "title": "" }, { "docid": "2efda172965386c0796f450830d5b2ec", "score": "0.56089973", "text": "def getCoords(geoFile,verbose):\n if verbose:\n print(\" <> Getting GEO coordinates from \", geoFile)\n nc = Dataset(geoFile)\n lon = nc.variables['clon'][:,:]\n lat = nc.variables['clat'][:,:]\n missing = nc.variables['clon'].missing_value\n return (nc,lon,lat,missing)", "title": "" }, { "docid": "01d406c45d32831a652913b6cd59a10c", "score": "0.5598835", "text": "def get_address(self):\n return self.account.address", "title": "" }, { "docid": "c5047f63832844679dd33abfeaf64ff6", "score": "0.55981636", "text": "def geo_reverse_coding(latitude, longitude, UA=UA):\n address = str(latitude) + ',' + str(longitude)\n location = geolocator.reverse(address, language='en')\n print('Lat&Lon:', address)\n print('location:', location.address)\n print('location.raw', location.raw)\n return location", "title": "" }, { "docid": "0b15abc79a9de688d2f54b3570e995fd", "score": "0.5596447", "text": "def address(self) -> pulumi.Output[str]:\n return pulumi.get(self, \"address\")", "title": "" }, { "docid": "0b15abc79a9de688d2f54b3570e995fd", "score": "0.5596447", "text": "def address(self) -> pulumi.Output[str]:\n return pulumi.get(self, \"address\")", "title": "" }, { "docid": "e13f9b1f1a50fc4778dc4a6c3c94ff2d", "score": "0.55876344", "text": "def address(self):\n return self._addr", "title": "" }, { "docid": "bf637384714a9db0830c4c1ca2a1c07d", "score": "0.55805266", "text": "def addr_to_point(self, addr: HexAddress, ascii_section: bool = False) -> QPointF:\n return self.row_col_to_point(*self.addr_to_row_col(addr), ascii_section)", "title": "" }, { "docid": "cc8a1d1320617558cfa0606ccb724951", "score": "0.5576166", "text": "def get_coords(query):\n response = geocoder.forward(query)\n if response.status_code and len(response.geojson()['features']) >= 1:\n first = response.geojson()['features'][0]\n return first['geometry']['coordinates']\n else:\n return handle_failure(query)", "title": "" }, { "docid": "1530aefe903cf7701771e997891b3c48", "score": "0.5572053", "text": "def ft2latlng(self,x,y):\r\n lat = self.home_lat-self.rad2angle(np.arcsin(x/self.earth_radius))\r\n lng = self.home_lng-self.rad2angle(np.arcsin(y/self.earth_radius))\r\n return (lat,lng)", "title": "" }, { "docid": "9f84a13c1fa96ea4c77e1545641f9c3b", "score": "0.5551644", "text": "def get_pos(link_id, off, lattice_link_length):\n ((x0, y0), (x1, y1)) = link_id\n u = off / lattice_link_length\n lat = lattice_link_length * (u * x1 + (1 - u) * x0)\n lng = lattice_link_length * (u * y1 + (1 - u) * y0)\n return (lat, lng)", "title": "" }, { "docid": "4b45b68da3d49e062a0dc6d5c90fa26d", "score": "0.5545421", "text": "def _hexword2lonlat(self, hex_str):\n b = format(int(hex_str[12:14], 16), \"08b\")\n # newpos = int(b[7])\n lonneg = int(b[1])\n latneg = int(b[0])\n lat = (\n (-1) ** latneg\n * (\n int(hex_str[:2], 16) * 65536\n + int(hex_str[2:4], 16) * 256\n + int(hex_str[4:6], 16)\n )\n / 5e4\n )\n lon = (\n (-1) ** lonneg\n * (\n int(hex_str[6:8], 16) * 65536\n + int(hex_str[8:10], 16) * 256\n + int(hex_str[10:12], 16)\n )\n / 5e4\n )\n return lat, lon", "title": "" }, { "docid": "0786635c54af03f7a24944a262e580d5", "score": "0.5545283", "text": "def find_coordinates(street_name, key, city='warszawa'):\n link = \"https://maps.googleapis.com/maps/api/geocode/json?address={},+{}&key={}\".format(street_name,city,key)\n # make a get rquest to download the location data in json format\n req = requests.get(link, timeout=5.0)\n try:\n # extract the coordinates from the downloaded json file\n latitude = json.loads(req.text)[\"results\"][0][\"geometry\"][\"location\"][\"lat\"]\n longitude = json.loads(req.text)[\"results\"][0][\"geometry\"][\"location\"][\"lng\"]\n #print((latitude, longitude))\n return (latitude, longitude)\n except IndexError:\n return None", "title": "" }, { "docid": "e96398764127512fd2df9edf3bc9c25a", "score": "0.5539214", "text": "def address_details(geo_data):\n lat_lng = geo_data['latLng']\n coords = (lat_lng['lat'], lat_lng['lng'])\n try:\n state = geo_data['adminArea3']\n except:\n state = None\n return state, coords", "title": "" }, { "docid": "12d9b42069d7895f0dc34901621c1f39", "score": "0.5536046", "text": "def get_address_info(self, faceid: int) -> AddressInfo:", "title": "" }, { "docid": "596d593529edc7f2d957aebadfcc7962", "score": "0.5531082", "text": "def get_address_from_gps(lat, lng):\n\n google_api_url = \"http://maps.google.com/maps/api/geocode/json?latlng={},{}&sensor=false\".format(lat, lng)\n\n data_google = json.loads(requests.get(google_api_url).content)\n\n if data_google.get('results'):\n formatted_address = data_google['results'][0]['formatted_address']\n else:\n formatted_address = ''\n\n return formatted_address", "title": "" }, { "docid": "eddc99182c4063e87a4c53c87c075d7e", "score": "0.55222434", "text": "def Find_Lat_Lon_Address(Locations):\r\n N=len(Locations)\r\n print(\"Total locations need to process :\",N)\r\n i=0;\r\n LocationLatLon={};\r\n for loc in Locations:\r\n #Get the lat,Lon, and Address of that loc\r\n locationD =geocode(loc)\r\n #If location has found, then \r\n if locationD is not None:\r\n #locationD[1][0] : Lat of that location, locationD[1][1] : Lon of that location, locationD[0]: Correspondance Address of that loc\r\n LocationLatLon[loc]=[locationD[1][0],locationD[1][1],locationD[0]]\r\n i=i+1;\r\n print(\"Remaining :\",(N-i),\"Total\",N)\r\n return LocationLatLon;", "title": "" }, { "docid": "555c12ff8793be861d36d4809fcec447", "score": "0.551774", "text": "def address(self, is_compressed=None):\n return self._network.address.for_p2pkh(self.hash160(is_compressed=is_compressed))", "title": "" }, { "docid": "db399616eb01701ea464afec0b019b10", "score": "0.55151594", "text": "def find_with_ip():\n state_filter = \" nud \" + \" nud \".join(HOME_STATES.values()).lower()\n cmd = f\"ip neigh show {state_filter}\".split()\n neighbours = subprocess.run(cmd, shell=False, capture_output=True, text=True)\n neighbours_ip = [_.split()[0] for _ in neighbours.stdout.splitlines()]\n return neighbours_ip", "title": "" }, { "docid": "f0847d820d49bb9b64a892475a030e91", "score": "0.55100703", "text": "def get_address_for_search(self) -> str:\n raise NotImplementedError()", "title": "" }, { "docid": "d14e8c38398b8b0813934bfdcba1f400", "score": "0.5498637", "text": "def get_address(self):\n addr_str = self.addr_entry.get_text()\n try:\n if \"0x\" in addr_str:\n address = int(addr_str, 16)\n elif \"0b\" in addr_str:\n address = int(addr_str, 2)\n else:\n address = int(addr_str)\n except ValueError:\n raise FormatError(\"Please enter a valid address (format: dec/hex/bin).\")\n else:\n return address", "title": "" }, { "docid": "cb69dd6807fe967f786149eebbf9241e", "score": "0.5496826", "text": "def get_addr(m, a):\n return a if m == 0 else (a + mem['rb'])", "title": "" }, { "docid": "147af062fc3c66693da69b9db53e0070", "score": "0.54932845", "text": "def getVillageCoords(self, s=''):\n if s: self.parse(s)\n # we try to get village coordinates\n c = {}\n nx = self.doc.getElementById(\"x\")\n ny = self.doc.getElementById(\"y\")\n #print nx.textContent, ny.textContent\n c['x'] = nx.textContent\n c['y'] = ny.textContent\n return c", "title": "" }, { "docid": "66b0705c6adcf47403ef9099ad2f2ddc", "score": "0.5490657", "text": "def addr(self):\n return self.__addr", "title": "" }, { "docid": "c6ad17bb1000f46f00228d58bad2ac41", "score": "0.54786843", "text": "def getgeo(self, x: float, y: float, z: float) -> tuple[float, float, float]:\n logger.debug(\"input x,y(%s, %s)\", x, y)\n x -= self.refxyz[0]\n y = -(y - self.refxyz[1])\n if z is None:\n z = self.refxyz[2]\n else:\n z -= self.refxyz[2]\n px = self.refproj[0] + self.pixels2meters(x)\n py = self.refproj[1] + self.pixels2meters(y)\n lon, lat = self.to_geo.transform(px, py)\n alt = self.refgeo[2] + self.pixels2meters(z)\n logger.debug(\"result lon,lat,alt(%s, %s, %s)\", lon, lat, alt)\n return lat, lon, alt", "title": "" } ]
0b92e07bb768414209e0068aaf7f76b2
Internal helper. Returns the salted/hashed password using the argon2id13 algorithm. The return value is base64encoded.
[ { "docid": "b155daf89bcf2e57ce6d536af0ce830c", "score": "0.6997533", "text": "def _hash_argon2id13_secret(password, salt, iterations, memory):\n rawhash = hash_secret(\n secret=password,\n salt=base64.b64decode(salt),\n time_cost=iterations,\n memory_cost=memory,\n parallelism=1, # hard-coded by WAMP-SCRAM spec\n hash_len=32,\n type=Type.ID,\n version=0x13, # note this is decimal \"19\" which appears in places\n )\n # spits out stuff like:\n # '$argon2i$v=19$m=512,t=2,p=2$5VtWOO3cGWYQHEMaYGbsfQ$AcmqasQgW/wI6wAHAMk4aQ'\n\n _, tag, ver, options, salt_data, hash_data = rawhash.split(b'$')\n return hash_data", "title": "" } ]
[ { "docid": "fcbd677c75f3a83a7652200df41e7426", "score": "0.7048091", "text": "def gen_base64_passwd(length):\n return base64.b64encode(gen_passwd(length))", "title": "" }, { "docid": "cbb6a3aea458d4eb72a3574953c347c5", "score": "0.68947244", "text": "def _get_hashed_pw(self, pw: str) -> str:\n\n # Generate a random 32 bit salt:\n salt = os.urandom(4)\n\n # Concatenate that with the UTF-8 representation of the password\n tmp0 = salt + pw.encode('utf-8')\n\n # Take the SHA256 hash and get the bytes back\n tmp1 = hashlib.sha256(tmp0).digest()\n\n # Concatenate the salt again:\n salted_hash = salt + tmp1\n\n # convert to base64 encoding:\n pass_hash = base64.b64encode(salted_hash)\n return pass_hash.decode('utf-8')", "title": "" }, { "docid": "abb74acb0c33ad9d937a1413a782b244", "score": "0.67679363", "text": "def b64():\r\n return 'ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz0123456789+/'", "title": "" }, { "docid": "cd18becd9bed417b0d9d4dde931acac3", "score": "0.67550206", "text": "def _generate_salt_bytes():\n randomness = os.urandom(64)\n b64encoded = base64.b64encode(randomness)\n bytestring = force_bytes(b64encoded)\n return bytestring", "title": "" }, { "docid": "65cace0e3f75c04022cfffa376bec646", "score": "0.6626767", "text": "def raw_password(self):\r\n return self._split_auth()[1]", "title": "" }, { "docid": "892bca6ed248b926315a75a4f8cc8837", "score": "0.6588064", "text": "def _b64e(self, s: str) -> str:\n return base64.b64encode(s.encode(\"ascii\")).decode(\"ascii\")", "title": "" }, { "docid": "c6e82d84cde1ed8ea93e66f35c4f235e", "score": "0.6506385", "text": "def _md5_base_password(self, password):\n return subprocess.check_output(['openssl',\n 'passwd',\n '-1',\n password]).decode('utf-8').strip()", "title": "" }, { "docid": "36430e4386161b3ce7ce6fb370f5850f", "score": "0.6467458", "text": "def __get_encrypted_password__(self, username, password):\n\n user_id = userutils.get_user_id(username)\n\n salt_data = str(user_id) + os.urandom(16)\n salt = base64.b64encode(salt_data)\n\n encoded_password = crypt.crypt(password, \"$6$%s$\" % salt)\n\n return encoded_password", "title": "" }, { "docid": "0f06552518dcaea6348c0ea3f0523c4a", "score": "0.6420897", "text": "def encodestring(username, password): \n ranstring = randomstring(10)\n if sys.version_info[0] > 2:\n thishash = hashlib.sha1((password + ranstring).encode(\"utf8\")).hexdigest()\n else:\n thishash = hashlib.sha1(password + ranstring).hexdigest()\n return pass_enc('||'.join([username, thishash, ranstring]), daynumber=True, timestamp=True)", "title": "" }, { "docid": "52bc6b2aa6dc18f9663c777611ef0135", "score": "0.64040995", "text": "def get_salt():\n salt = b64encode(os.urandom(64))\n return binascii.hexlify(salt).decode('utf-8')", "title": "" }, { "docid": "ea0441bebd9d36bd4060672a25656ba1", "score": "0.6392954", "text": "def get_b64(self):\n\n # return base64 encoded (url safe) hash digest\n return base64.urlsafe_b64encode(self.m.digest()).decode('utf-8')", "title": "" }, { "docid": "aeb75b089a5ca0a50a6871479b5dfe37", "score": "0.6289366", "text": "def __generate_salt(self) -> str:\n method = ALGORITHMS_METHODS_MAP[self._algorithm]\n return method(string=secrets.token_bytes(nbytes=64)).hexdigest() # noqa", "title": "" }, { "docid": "257cba372301df8ddb3ae42421c581ed", "score": "0.6278873", "text": "def make_hs_password(self,password):\r\n return hashlib.sha256(str.encode(password+self.__salt)).hexdigest()", "title": "" }, { "docid": "cf6bba2dda69f105e3726235c9f996da", "score": "0.62756807", "text": "def makeSalt(self):\n self.salt = os.urandom(16).encode('base_64')", "title": "" }, { "docid": "4976dcd6952201c2be5ebae9150bc7c5", "score": "0.6167618", "text": "def get_value_for_datastore(self, model_instance):\n raw = super(self.__class__, self).get_value_for_datastore(model_instance)\n \n if raw is None:\n raise ValueError(_(\"Password can't be empty\"))\n try:\n if len(raw) > 12:\n alg, seed, passw = raw.split('$')\n return raw# the password is encrypted\n except Exception:\n pass\n \n if len(raw) < 5:\n raise ValueError(_(\"Invalid password\"))\n if re.search(r'[^a-zA-Z0-9]', raw):\n raise ValueError(_(\"Invalid password\"))\n \n from random import random\n alg = \"sha1\"\n seed = sha_constructor(str(random()) + str(random())).hexdigest()[:5]\n passw = sha_constructor(seed + raw).hexdigest()\n \n return '%s$%s$%s' % (alg, seed, passw)", "title": "" }, { "docid": "b7e3a0d234ae78488c09ed19befd7995", "score": "0.6162677", "text": "def make_salt(length=8):\n\n # this implementation should be fairly efficient because it uses only\n # 1 function call and 1 implied loop\n return ''.join( [ a64converter[i] for i in os.urandom(length) ] );\n\n # old implementation\n #return reduce( lambda x,y:x+y, \n # [ itoa64[ ord(i)&63 ] for i in os.urandom(length) ] );", "title": "" }, { "docid": "d36e6a4cc2aef627eb926d7c425c4e54", "score": "0.61304384", "text": "def encode_password(password, salt, iterations=30000):\n assert password is not None\n assert salt and '$' not in salt\n hash = pbkdf2(password, salt, iterations)\n hash = base64.b64encode(bytes(hash)).decode('ascii').strip()\n return \"%s$%d$%s$%s\" % ('pbkdf2_sha256', iterations, salt, hash)", "title": "" }, { "docid": "43dccae55b60c5306ec00dd56c6b3272", "score": "0.6126545", "text": "def user_data_base64(self) -> pulumi.Output[str]:\n return pulumi.get(self, \"user_data_base64\")", "title": "" }, { "docid": "67d5c1deb0725adb8e1b6e6b5a0ef3fd", "score": "0.611818", "text": "def __hash(self, *, salt: str, password: str, encoding: str = \"UTF-8\") -> str:\n password_hash = hashlib.pbkdf2_hmac(\n hash_name=self._algorithm,\n password=password.encode(encoding=encoding),\n salt=salt.encode(encoding=encoding),\n iterations=self.iterations,\n )\n password_hash = binascii.hexlify(password_hash).decode(encoding)\n return password_hash", "title": "" }, { "docid": "89588bb1222657730798aa288b25ae83", "score": "0.6114024", "text": "def b64_string(input_string):\n return b64encode(input_string.encode('utf-8')).decode('utf-8')", "title": "" }, { "docid": "003ec2a3caa5acf7b21caae963019c82", "score": "0.6083144", "text": "def _encrypt_password(self, pw):\n\n salt = bcrypt.gensalt()\n pw = bcrypt.hashpw(pw.encode(\"utf-8\"), salt)\n\n # decode to utf-8\n # convert bytes into str\n return pw.decode(\"utf-8\")", "title": "" }, { "docid": "c209a81fabd713765fd9a64760095892", "score": "0.6080087", "text": "def _encode_luci_password(unencoded_string, token):\n md5_hash = hashlib.md5(unencoded_string.encode('ascii')).hexdigest()\n sha_hash = hashlib.sha512(md5_hash.encode('ascii')).hexdigest()\n return hashlib.sha512((token + sha_hash).encode('ascii')).hexdigest()", "title": "" }, { "docid": "27f23b41f4d2380d37ead93bd9497f08", "score": "0.6070137", "text": "def b64text(nbytes):\n return encodebytes(os.urandom(nbytes)).decode('ascii')", "title": "" }, { "docid": "5ea7671ce0822759cee3784f83a57ae0", "score": "0.60497", "text": "def _encrypt_pw(self, password):\n hash_string = self.username + password\n hash_string = hash_string.encode(\"utf8\")\n return hashlib.sha256(hash_string).hexdigest()", "title": "" }, { "docid": "4ee9a17e0d6ac233776bfe6037baa93e", "score": "0.6020352", "text": "def generate_salt():\n return randombytes(16)", "title": "" }, { "docid": "868364943148f35018b8334c1b412895", "score": "0.6019123", "text": "def key_from_short_term_cred(password):\n return saslprep(password).encode('utf-8')", "title": "" }, { "docid": "c8b61b115f1c9e57b2b114d3edc89ef1", "score": "0.60179955", "text": "def argon2_hash( # pylint: disable=too-many-arguments,invalid-name\n password, salt, t=16, m=8, p=1, buflen=128, argon_type=Argon2Type.Argon2_i):\n outbuf = create_string_buffer(buflen)\n password = _ensure_bytes(password)\n salt = _ensure_bytes(salt)\n\n result = C_ARGON2_HASH(t, m, p,\n password, len(password),\n salt, len(salt),\n outbuf, buflen,\n None, 0,\n argon_type)\n\n if result:\n raise Argon2Exception(Argon2Exception.errors[result])\n\n return outbuf.raw", "title": "" }, { "docid": "cd9870b1b53033f38cd1b9baf891454f", "score": "0.6012185", "text": "def genSalt():\n return ''.join(secrets.choice(string.printable) for i in range(32, 128))", "title": "" }, { "docid": "c9d8d2b0b0d6d0c0354f4964427742c4", "score": "0.5994164", "text": "def _md5_password(self, password):\n return subprocess.check_output(['openssl',\n 'passwd',\n '-apr1',\n password]).decode('utf-8').strip()", "title": "" }, { "docid": "c5f584fc547be6dc68ad8efc173a6dc1", "score": "0.59758407", "text": "def create_password(algorithm, raw_password):\r\n salt = os.urandom(5).encode('hex')\r\n hsh = get_hexdigest(algorithm, salt, raw_password)\r\n return '$'.join((algorithm, salt, hsh))", "title": "" }, { "docid": "a8663bf2fec2ed9c1dc8d8f87ab13900", "score": "0.59718823", "text": "def Password(self) -> str:", "title": "" }, { "docid": "527b3f3292f132975039073154c1127f", "score": "0.59712774", "text": "def get_inject_string_base64(opts, command_script):\n return \"\"\"INJECT=\\\"mkdir {tmpdir};\n echo \\\\$(echo '{cmd}' |\n base64 -di -) |\n sed 's/{eol}/\\\\n/g' >{tmpdir}/rc\\\"\"\"\".format(\n tmpdir=\"/tmp/sshuttle.{}\".format(SSHUTTLEID),\n cmd=command_script,\n eol=SSHUTTLE_EOL,\n )", "title": "" }, { "docid": "ff5dc02fb9798f218d94707456cff0e3", "score": "0.5969139", "text": "def password(self) -> str:\n return str(self.sas_token)", "title": "" }, { "docid": "2045734ce36e78d6c6d2f57d275945e3", "score": "0.5956951", "text": "def make_password(self, *, password: str) -> str:\n salt: str = self.__generate_salt()\n new_password_hash: str = self.__hash(salt=salt, password=password)\n return salt + new_password_hash", "title": "" }, { "docid": "0b55c6637d636846937d8162a6a37e5e", "score": "0.5946579", "text": "def base64(self) -> 'String':\r\n try:\r\n ascii = str_2_asci_trans(self)\r\n if len(ascii) == 0:\r\n raise AttributeError\r\n bin_8 = ascii_2_bin_trans(ascii, 8)\r\n except AttributeError:\r\n print(f'<{self}> is not possible for base 64 encoding')\r\n return String(self)\r\n bit6 = turn_to_6_or_8_bits(bin_8, 6)\r\n new_asci = ascii_2_bin_trans(bit6)\r\n str_b64 = ascii_2_base64_trans(new_asci)\r\n return String(str_b64)", "title": "" }, { "docid": "3796c436074c5ce5770f8f2d1ed754e1", "score": "0.59434855", "text": "def user_data_base64(self) -> Optional[pulumi.Input[str]]:\n return pulumi.get(self, \"user_data_base64\")", "title": "" }, { "docid": "3796c436074c5ce5770f8f2d1ed754e1", "score": "0.59434855", "text": "def user_data_base64(self) -> Optional[pulumi.Input[str]]:\n return pulumi.get(self, \"user_data_base64\")", "title": "" }, { "docid": "2e4ac271021b6bbd42a45bdebbb139ee", "score": "0.59367967", "text": "def gen_salt():\n presalt = ''.join(random.choice(string.printable) for i in range(20))\n return sha512_hash_hex(presalt)", "title": "" }, { "docid": "31cad96170d727f85f9d150b3794cac6", "score": "0.5934622", "text": "def generate_b64cred(client_id: str, client_secret: str) -> str:\n cred = f\"{client_id}:{client_secret}\"\n b64_byt = base64.b64encode(cred.encode(\"ascii\"))\n encoded = b64_byt.decode(\"ascii\")\n\n return encoded", "title": "" }, { "docid": "ecef3d837496961bc75219b498bc76aa", "score": "0.59288484", "text": "def my_hash_function(password,*args):\n salt = hashlib.sha256(os.urandom(60)).hexdigest().encode('ascii')\n pwdhash = hashlib.pbkdf2_hmac('sha512', password.encode('utf-8'), \n salt, 100000)\n pwdhash = binascii.hexlify(pwdhash)\n return (salt + pwdhash).decode('ascii')", "title": "" }, { "docid": "f2005f89eb93a4070ab8b570cb01de87", "score": "0.59275615", "text": "def get_password(self):\n pass", "title": "" }, { "docid": "4c1ab079f75e2489663e9014d46efd93", "score": "0.59272677", "text": "def md5crypt(password, salt, magic='$1$'):\n # /* The password first, since that is what is most unknown */\n # /* Then our magic string */\n # /* Then the raw salt */\n m = md5(password + magic + salt)\n\n # /* Then just as many characters of the MD5(pw,salt,pw) */\n mixin = md5(password + salt + password).digest()\n for i in range(0, len(password)):\n m.update(mixin[i % 16])\n\n # /* Then something really weird... */\n # Also really broken, as far as I can tell. -m\n i = len(password)\n while i:\n if i & 1:\n m.update('\\x00')\n else:\n m.update(password[0])\n i >>= 1\n\n final = m.digest()\n\n # /* and now, just to make sure things don't run too fast */\n for i in range(1000):\n m2 = md5()\n if i & 1:\n m2.update(password)\n else:\n m2.update(final)\n\n if i % 3:\n m2.update(salt)\n\n if i % 7:\n m2.update(password)\n\n if i & 1:\n m2.update(final)\n else:\n m2.update(password)\n\n final = m2.digest()\n\n # This is the bit that uses to64() in the original code.\n\n itoa64 = './0123456789ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz'\n\n rearranged = ''\n for a, b, c in ((0, 6, 12), (1, 7, 13), (2, 8, 14), (3, 9, 15), (4, 10, 5)):\n v = ord(final[a]) << 16 | ord(final[b]) << 8 | ord(final[c])\n for i in range(4):\n rearranged += itoa64[v & 0x3f]\n v >>= 6\n\n v = ord(final[11])\n for i in range(2):\n rearranged += itoa64[v & 0x3f]\n v >>= 6\n\n return magic + salt + '$' + rearranged", "title": "" }, { "docid": "25a6851b45e734acfe0f4696e6749739", "score": "0.5925824", "text": "def _crypt_password(self, raw_value):\n scheme = param_tools.get_global_parameter(\n \"password_scheme\", raise_exception=False)\n if scheme is None:\n from modoboa.core.apps import load_core_settings\n load_core_settings()\n scheme = param_tools.get_global_parameter(\n \"password_scheme\", raise_exception=False)\n raw_value = smart_bytes(raw_value)\n return get_password_hasher(scheme.upper())().encrypt(raw_value)", "title": "" }, { "docid": "a47066eab227e273c48d86c2655922d4", "score": "0.5920682", "text": "def generate_salt(size=16):\n return secrets.token_bytes(size)", "title": "" }, { "docid": "12f4bd05f671575d91d958716d801886", "score": "0.59147847", "text": "def b64encode(self, s):\n return urlsafe_b64encode(s).split('=', 1)[0].decode('ascii')", "title": "" }, { "docid": "90589f02dd6ce4cf60071cdaa29da036", "score": "0.59102917", "text": "def b64encode(*args, **kwargs):\n return base64.encodestring(*args, **kwargs)", "title": "" }, { "docid": "47a9802f7506fe8f7e7d38e789c2c438", "score": "0.590884", "text": "def _salt_string(self, plain_string):\n bytes_ = bytearray(plain_string)\n salted_value = bytearray(len(bytes_) * 2)\n\n i = 0\n for b in bytes_:\n salt_byte = self.salt.byte(i)\n masked_values = self._merge_bytes(b, salt_byte)\n salted_value[i * 2] = masked_values[0]\n salted_value[i * 2 + 1] = masked_values[1]\n i += 1\n return bytes(salted_value)", "title": "" }, { "docid": "9d1a0b3db7ac260d42c8b89ec0436821", "score": "0.5899965", "text": "def gen_password(password):\n salt = str(urandom(16)) # Generates a random salt\n hash = str(scrypt.hash(password, salt)) # Generates a salted hash\n return (salt, hash) # Retruns the salt and hash in a tuple", "title": "" }, { "docid": "977e696e418ccda403bfa870996fe6c7", "score": "0.5886307", "text": "def salage(encode):\n\n guerande = bytes(salt.bytes.hex(), 'utf-8')\n return b''.join([encode, guerande])", "title": "" }, { "docid": "6f055effe5a5c227fb4338adac6ed6a2", "score": "0.5880622", "text": "def hash_password(self, password):\n salt = hashlib.sha256(os.urandom(60)).hexdigest().encode('ascii')\n passwordhash = hashlib.pbkdf2_hmac('sha512', password.encode('utf-8'), salt, 100000)\n passwordhash = binascii.hexlify(passwordhash)\n return (salt + passwordhash).decode('ascii')", "title": "" }, { "docid": "7a6c5a6b3c04778b8f9776e53fe07617", "score": "0.587388", "text": "def encode_password(password, identity_uid, device_uid=None):\n from Acquire.Crypto import Hash as _Hash\n\n encoded_password = _Hash.multi_md5(identity_uid, password)\n\n encoded_password = Credentials.encode_device_uid(\n encoded_password=encoded_password,\n device_uid=device_uid)\n\n return encoded_password", "title": "" }, { "docid": "dad45f9d8b13d344d5515cf8eeab793c", "score": "0.5873507", "text": "def _get_password(self):\n return self.password", "title": "" }, { "docid": "5a3b0a106da5a3e0265338e64743a947", "score": "0.58619153", "text": "def hash_password(self):\n # Note: It is needed to encode in base64 the salt, otherwise it will\n # cause an exception trying to store non utf-8 characteres\n self.salt = base64.urlsafe_b64encode(\n Crypto.Random.get_random_bytes(16))\n hash_helper = SHA256.new()\n hash_helper.update(self.password + self.salt)\n self.password = hash_helper.hexdigest()", "title": "" }, { "docid": "47682c67da2309c844722f8b7039b507", "score": "0.585834", "text": "def generate_salt():\n return uuid.uuid1()", "title": "" }, { "docid": "e1bb94c1b9083ff825aef457159be0a4", "score": "0.58517563", "text": "def hash_password(password, salt, iterations=ITERATIONS):\n if not hasattr(password, \"decode\"):\n password = password.encode(\"utf-8\")\n master_key = make_master_key(password, salt, iterations)\n hashpw = hashlib.pbkdf2_hmac(\"sha256\", master_key, password, 1)\n return base64.b64encode(hashpw), master_key", "title": "" }, { "docid": "f42235c82c465f72029843544df4c053", "score": "0.5848486", "text": "def get_auth(username: str, password: str, version=tcrypt.VERSION) -> bytes:\n seed = sha256((password + username).encode(\"utf8\")).hexdigest()\n user_record = (\n sha256(password.encode(\"utf8\")).hexdigest()\n + \":\"\n + sha256(username.encode(\"utf8\")).hexdigest()\n )\n auth_iv = seed[:16].encode()\n utf8_random = int(user_record[18:20], 16) / 256\n key = get_key(username, password)\n desc = tcrypt.encode_payload_description()\n formatted = tcrypt.serialize(b\"\", version, desc, auth_iv)\n _, cipher, tag = tcrypt.encrypt(\n key, user_record.encode(), utf8_random, auth_iv, formatted\n )\n return b64encode(formatted + cipher + tag)", "title": "" }, { "docid": "55faa3e70a451caed66d82764087820b", "score": "0.5847841", "text": "def password(self):\r\n rv = self._split_auth()[1]\r\n if rv is not None:\r\n return _url_unquote_legacy(rv)", "title": "" }, { "docid": "678fd36e4f44361f0b92a36ed9c43b86", "score": "0.58418953", "text": "def password(self) -> str:\n userinfo = self._uri_reference.userinfo or \"\"\n return unquote(userinfo.partition(\":\")[2])", "title": "" }, { "docid": "c203c9e0b6f9dadc2aa85d1bea3cc716", "score": "0.583806", "text": "def get_credential_from_wifi_password(wifi_password: str) -> str:\n hash_ = hashlib.sha512()\n hash_.update(wifi_password.encode(\"utf-8\"))\n return base64.b64encode(hash_.digest()).decode(\"utf-8\")", "title": "" }, { "docid": "22d32e3faa32184d9115e893ed3d5510", "score": "0.5829377", "text": "def base64(input_string):\n import base64\n return base64.b64encode(input_string.encode()).decode()", "title": "" }, { "docid": "b3464d74b6ff9159d215c9e4a87c35e6", "score": "0.58233273", "text": "def derive_password_for_storage(password):\n salt = os.urandom(16)\n key = Scrypt(\n salt=salt,\n length=32,\n n=2 ** 14,\n r=8,\n p=1,\n backend=default_backend()\n ).derive(bytes.fromhex(password))\n return salt, key", "title": "" }, { "docid": "4c0a6f027d175da5d018c9a517bb752c", "score": "0.5811971", "text": "def _base64_plaintext_data_key():\n plaintext_data_key = _plaintext_data_key()\n return base64.urlsafe_b64encode(plaintext_data_key)", "title": "" }, { "docid": "9fc7758bc587f41c6ba24ae1d14051fd", "score": "0.5810989", "text": "def generate_security_credential(initiator_password) -> str:\n bytearray_password = bytearray(initiator_password.encode('utf-8'))\n with open(os.getenv(\"PUBKEY_PATH\"), \"rb\") as f:\n public_key = f.read()\n f.close()\n pub_key = RSA.importKey(public_key)\n cipher = PKCS1_v1_5.new(pub_key)\n\n security_credential = cipher.encrypt(bytearray_password)\n\n b64_encrypted = base64.b64encode(security_credential)\n\n security_credential = b64_encrypted.decode(\"utf-8\")\n return security_credential", "title": "" }, { "docid": "dffbecdfcc9e63c52021abed7f563e0a", "score": "0.5809718", "text": "def b64encode(*args, **kwargs):\n return base64.encodebytes(*args, **kwargs)", "title": "" }, { "docid": "9adea595e11b0cb3db5aa0306655fe3b", "score": "0.58021003", "text": "def stringify_base64(self):\n if self.__memoryBuffer is not None:\n return base64.b64encode(self.__memoryBuffer.getvalue())\n else:\n return None", "title": "" }, { "docid": "bc38d905b0b05ec7b92e09f7526e7926", "score": "0.5785224", "text": "def _get_password(self):\n return self._password", "title": "" }, { "docid": "e985d889899176b98d91cb90265da6c4", "score": "0.5769953", "text": "def gen_password_key(password: str) -> bytes:\n return sha256(password.encode(\"utf-8\")).digest()", "title": "" }, { "docid": "4a6689150488a187902027a53a895a87", "score": "0.576159", "text": "def hash_once(raw_password, salt):\n hashed = pbkdf2(raw_password, salt, 1)\n return base64.b64encode(hashed).decode('ascii').strip()", "title": "" }, { "docid": "d1e73d16006f4e44af74b82e6a5d8a01", "score": "0.5758605", "text": "def generate_password(self):\n pwd = self.make_request(method=\"update\", resource=\"generate_password\")\n if \"value\" in pwd:\n return pwd[\"value\"][0]", "title": "" }, { "docid": "61b106c03a3412979ea4b3bdff057ba0", "score": "0.57529455", "text": "def get_encoded_auth_token(self):\n auth_token = '{}:{}'.format(self.client_id, self.client_secret).encode('ascii')\n return base64.b64encode(auth_token).decode('ascii')", "title": "" }, { "docid": "f98ff4268f085ac1d6717a49593e23ac", "score": "0.57478225", "text": "def _encrypted_data_as_base64(self):\n bitstream = self._encrypted_data_as_bitstream()\n bitstream.seek(0)\n length = bitstream.get_length()\n\n assert length % 8 == 0, \\\n \"The ciphertext data must be a multiple of eight bits in size.\"\n\n return bitstream.get_base64(length)", "title": "" }, { "docid": "8c377da00af631ab28e95b7a474e73bd", "score": "0.5731763", "text": "def _generate_password(length=40):\n uuid_str = six.text_type(uuid.uuid4()).encode(\"UTF-8\")\n return hashlib.sha1(uuid_str).hexdigest()[:length]", "title": "" }, { "docid": "ea5cdd9ccd9563f0e0e01718b2f6cb00", "score": "0.5723594", "text": "def password(self):\n return unquote(self._protocol_info.password)", "title": "" }, { "docid": "dbcdaf0b0cf2dd1d87610cd0da5ea554", "score": "0.5719362", "text": "def encode(self, password: str) -> str:\n if len(self._key) < len(password):\n raise ValueError(\"The password is too long for the key\")\n\n encoded: list = [None for _ in range(len(password) * 2)]\n\n for i in range(len(password)):\n # password char and key\n c: int = ord(password[i])\n k: int = ord(self._key[i])\n\n # get the divider and modulo values\n d: int = c // 16\n r: int = c % 16\n\n # encode into base64\n encoded[i * 2] = Base64.chr_mod(d + k)\n encoded[i * 2 + 1] = Base64.chr_mod(r + k)\n\n return \"\".join(encoded)", "title": "" }, { "docid": "f25f0accd6027c63889c1636cb31f0cd", "score": "0.5718315", "text": "def get_password_hash(password: str) -> str:\n pwd_context.hash(password)", "title": "" }, { "docid": "f03cdca91eb32ae02ce2a39d48234384", "score": "0.5717514", "text": "def _prepare_auth(self, usr, pwd, db, flags, seed):\n if usr is not None and len(usr) > 0:\n _username = usr.encode('utf-8') + b'\\x00'\n else:\n _username = b'\\x00'\n \n if pwd is not None and len(pwd) > 0:\n _password = utils.int1store(20) +\\\n self._scramble_password(pwd.encode('utf-8'),seed)\n else:\n _password = b'\\x00'\n \n if db is not None and len(db):\n _database = db.encode('utf-8') + b'\\x00'\n else:\n _database = b'\\x00'\n \n return (_username, _password, _database)", "title": "" }, { "docid": "3e9db95c87273420c5e3a5351d6581a4", "score": "0.5714319", "text": "def _hash_password(self, plain_password: str) -> bytes:\n return bcrypt.hashpw(plain_password, bcrypt.gensalt())", "title": "" }, { "docid": "9939bd58bd52181ba36dae462a9b5887", "score": "0.56995714", "text": "def encode_hash_and_salt(plaintext):\n # use the latest algorithm (algorithm also covers how salt is chosen).\n algorithmstring = DEF_salthash_algorithm_u4s512v1\n # use a random salt\n salt = uuid.uuid4().hex\n # create the hash-salted string\n return encode_hash_and_salt_withparams(plaintext, algorithmstring, salt)", "title": "" }, { "docid": "4c9087132644fcb56be4750220bafcef", "score": "0.56867343", "text": "def _sign(self, text):\n secure_hash = self.hasher()\n secure_hash.update(text)\n return b64encode(secure_hash.digest())", "title": "" }, { "docid": "3dce8f9533900a9798d9740f0d66f187", "score": "0.56855893", "text": "def hex_to_base64(hexstring):\r\n\r\n return bytes_to_base64(hex_string_to_bytes(hexstring))", "title": "" }, { "docid": "3627cf52f205ca619242ae7a0694d951", "score": "0.5676248", "text": "def make_secret(password):\n salt = os.urandom(4)\n h = hashlib.sha1(password)\n h.update(salt)\n return \"{SSHA}\" + encode(h.digest() + salt)[:-1]", "title": "" }, { "docid": "5d9b173a4da282b0e3a2144f5772ad7a", "score": "0.5668635", "text": "def encoded_client() -> str:\n return base64.b64encode(f\"{Fitbit.client_id}:{Fitbit.client_secret}\".encode('ascii')).decode('ascii')", "title": "" }, { "docid": "436f54ccd397ef96b882038ff0a5f0bf", "score": "0.56610996", "text": "def encrypt_password(password: str) -> str:\n return hash(password)", "title": "" }, { "docid": "1578179ecd8729cb13bf8fde73cb538c", "score": "0.56603456", "text": "def b64encode(value):\n return base64.b64encode(value)", "title": "" }, { "docid": "fb8c752db31f450d1eab46a7023bb158", "score": "0.5650952", "text": "def hash_pass(password: str, salt: int) -> bytes:\n # RSA PBKDF2 (Password-Based Key Derivation Function 2) using HMAC-SHA256\n assert type(salt) is int, repr(type(salt))\n return hashlib.pbkdf2_hmac('sha256', password.encode('utf-8'), salt_to_bytes(salt), 32768)", "title": "" }, { "docid": "128bbfd919ccf95b2e052c0f1981d447", "score": "0.5636497", "text": "def password(self) -> pulumi.Output[str]:\n return pulumi.get(self, \"password\")", "title": "" }, { "docid": "29efa208eb6fbdaab679bd0017f16900", "score": "0.56278163", "text": "def hash_salt(password, salt=uuid.uuid4().hex):\n algorithm = 'sha512'\n hash_obj = hashlib.new(algorithm)\n password_salted = salt + password\n hash_obj.update(password_salted.encode('utf-8'))\n password_hash = hash_obj.hexdigest()\n password_db_string = \"$\".join([algorithm, salt, password_hash])\n return password_db_string", "title": "" }, { "docid": "deb799426b2db8a433ecf92b6b78528c", "score": "0.56250083", "text": "def hash_password(self, password):", "title": "" }, { "docid": "f3a584a78fa1318930e5787a5e75341e", "score": "0.56141514", "text": "def genSalt(salt_length=20):\n return ''.join([ choice(charset) for i in xrange(salt_length)])", "title": "" }, { "docid": "db18ec54751283ab783aea34dbd328cd", "score": "0.5608588", "text": "def password(self):\n return self.check_output(\"getent shadow %s\", self.name).split(\":\")[1]", "title": "" }, { "docid": "35ca35e929a9bdfb853faa0be9703bb2", "score": "0.5608301", "text": "def hash_password(password, salt):\r\n salt = salt.encode('ascii')\r\n pwdhash = hashlib.pbkdf2_hmac('sha512', password.encode('utf-8'),\r\n salt, 100000)\r\n pwdhash = binascii.hexlify(pwdhash)\r\n return (salt + pwdhash).decode('ascii')", "title": "" }, { "docid": "e36bfc0639268f717dddb05030b3cf70", "score": "0.5607272", "text": "def genid(s):\n if isinstance(s, unicode):\n str_8bit = s.encode('UTF-8')\n else:\n str_8bit = s\n return sha1('salt' + str_8bit).hexdigest()", "title": "" }, { "docid": "35c1ecc48771340b3e242bc2cb81ee9e", "score": "0.5602571", "text": "def hash_password(password):\r\n salt = hashlib.sha256(os.urandom(60)).hexdigest().encode('ascii')\r\n pwdhash = hashlib.pbkdf2_hmac('sha512', password.encode('utf-8'), \r\n salt, 100000)\r\n pwdhash = binascii.hexlify(pwdhash)\r\n return (salt + pwdhash).decode('ascii')", "title": "" }, { "docid": "80db79c75b6a45a3caf29da2319eff60", "score": "0.5599591", "text": "def hash_password(password: str, salt: str = None):\n if salt is None:\n salt = get_random_string()\n enc = hashlib.pbkdf2_hmac(\"sha256\", password.encode(), salt.encode(), 100_000)\n return enc.hex()", "title": "" }, { "docid": "e1ca76ece58a4bf305535aee50a00999", "score": "0.55883753", "text": "def password(self) -> str:\n return pulumi.get(self, \"password\")", "title": "" }, { "docid": "e1ca76ece58a4bf305535aee50a00999", "score": "0.55883753", "text": "def password(self) -> str:\n return pulumi.get(self, \"password\")", "title": "" }, { "docid": "b7a739602601f132f18254c846ea768a", "score": "0.55859435", "text": "def get_password(self):\n return self._get_password", "title": "" }, { "docid": "84740d9aef9d9352592701ed15647a61", "score": "0.5584123", "text": "def key_from_long_term_cred(username, realm, password):\n creds = b':'.join([username.encode('utf-8'),\n realm.encode('utf-8'),\n saslprep(password).encode('utf-8')])\n return hashlib.md5(creds).digest()", "title": "" }, { "docid": "370e0bbbb8320ceb696bd71e793101a0", "score": "0.55816084", "text": "def generate_password(self) -> str:\n return self.test_case.generate_password()", "title": "" }, { "docid": "9dacc4985bd0775ceae509dc26d726f1", "score": "0.558059", "text": "def _buildBody(self):\n data = b''\n data += pack('<H', self.__devicePassword)\n return data", "title": "" } ]
5616d56f5b345b2ed8fc300ef20417a9
Accuracy as a function of the height of class' the lowest common ancestor. This is only applicable to 2way ImageNet episodes. Given the class set of each episode, we find the corresponding 2 leaves of the ImageNet graph and compute the lowest common ancestor of those leaves. Its height is computed as the maximum over the length of the paths from that node to each of the two leaves. This height is the estimate of finegrainedness. Intuitively, the larger the height, the more coarsegrained the episode's classification task.
[ { "docid": "e611c854613c936284707eee19c54e54", "score": "0.532225", "text": "def get_height_to_accuracy(class_ids, logits, targets):\n height_to_accuracy = collections.defaultdict(list)\n for episode_num, episode_class_ids in enumerate(class_ids):\n if len(episode_class_ids) != 2:\n raise ValueError('There should have been exactly 2 elements in the list '\n \"of each episode's class id's.\")\n # Get the Synsets corresponding to the class id's episode_class_ids.\n episode_synsets = get_synsets_from_class_ids(episode_class_ids)\n assert len(episode_synsets) == 2, ('Fine- vs coarse- grained analysis '\n 'should be restricted to binary tasks.')\n # Compute the height of the lowest common ancestor of the episode's Synsets.\n _, height = imagenet_spec.get_lowest_common_ancestor(\n episode_synsets[0], episode_synsets[1])\n # Compute the accuracy of the episode.\n episode_logits = logits[episode_num]\n episode_targets = targets[episode_num]\n episode_acc = compute_episode_accuracy(episode_logits, episode_targets)\n height_to_accuracy[height].append(episode_acc)\n return height_to_accuracy", "title": "" } ]
[ { "docid": "2b98e5a25740fc9c70b1d02b642706e3", "score": "0.57431614", "text": "def optimum(self):\n return self.accuracy(self.classify)", "title": "" }, { "docid": "9764555874146dc2da06586bad0d1fb3", "score": "0.57204765", "text": "def get_best_accuracy(self) -> float:\n return (\n tf.reduce_max(self.fitness_values) / tf.shape(self.true_labels)[0]\n ).numpy()", "title": "" }, { "docid": "cd94859f42f2f3d1ed89ecd1563ae9f2", "score": "0.56745374", "text": "def height(self):\n if(self.is_leaf()): # base case\n return 0\n\n heights = []\n for next in self.nexts:\n if(next != None):\n heights.append(next.height())\n\n max_height = max(heights)\n\n return 1 + max_height # visit current node", "title": "" }, { "docid": "1d6b24d18084f44b047d3ab217e53c49", "score": "0.5664625", "text": "def _height1(self):\n return max(self.depth(p) for p in self.positions() if self.is_leaf(p))", "title": "" }, { "docid": "92273c8ea40c6c477f4ce53fe85ce026", "score": "0.56564355", "text": "def height(t):\n if branches(t) == []:\n return 0\n deepest = 0\n for child in branches(t):\n deepest = max(deepest, height(child))\n return deepest + 1", "title": "" }, { "docid": "fa69fed367de8ff25855aa78fa390676", "score": "0.56304234", "text": "def get_accuracy(tree, examples):\n correct_guesses = 0\n \n for e in examples:\n if tree.classify(e.attributes) == e.classification:\n correct_guesses += 1\n \n return correct_guesses / len(examples)", "title": "" }, { "docid": "04bb1f5e438e2aca5c933a9c22bfa7f7", "score": "0.5621831", "text": "def resnet_inf(image_path):\n image = load_image(image_path.encode('utf-8'))\n predictions = InferenceNet(image).get()\n clsidx = predictions.ndarray().argmax() + 161\n return predictions.ndarray().max(), clsidx", "title": "" }, { "docid": "5d90e4dca469e5795b74dd3ed570227d", "score": "0.5556104", "text": "def height(self: TreapNode[T]) -> int:\n return 1 + max(\n self.left.height() if self.left else 0,\n self.right.height() if self.right else 0,\n )", "title": "" }, { "docid": "fed851919956145d75a6c3c2151412d4", "score": "0.5542218", "text": "def _get_height(self):\r\n\r\n if self.leaf:\r\n return 1\r\n return 1 + self.children[-1]._get_height()", "title": "" }, { "docid": "66bba3a31df6eea7f3a623d7bf01e8a2", "score": "0.552328", "text": "def predict_winning_percentage_well(instance):\n infile = open(\"best_classifier.p\", \"rb\")\n header, my_rf = pickle.load(infile)\n infile.close()\n # 2. use the tree to make a prediction\n try: \n return my_rf.predict([instance])[0]# recursive function\n except:\n return None", "title": "" }, { "docid": "9fb9ac32b31544f98bca0bc03512158e", "score": "0.5518245", "text": "def height(self):\n # Check if left child has a value and if so calculate its height\n left_height = 0\n if self.left is not None:\n left_height += self.count_edges('left')\n # Check if right child has a value and if so calculate its height\n right_height = 0\n if self.right is not None:\n right_height += self.count_edges('right')\n # Return one more than the greater of the left height and right height\n if left_height > right_height:\n return (left_height)\n else:\n return (right_height)", "title": "" }, { "docid": "71ff131c635489ac7d3835c7ea9c8308", "score": "0.54873717", "text": "def height(self):\n # TODO: Check if left child has a value and if so calculate its height\n if not self.is_leaf():\n right_height = 0\n left_height = 0\n if self.right != None:\n # make a new height variable without changing property of node\n right_height = self.right.height()\n if self.left != None:\n # ask for the right node height\n left_height = self.left.height()\n # Return one more than the greater of the left height and right height\n # if left_height doesn't exsit, gonna return error, so we assign 0 to l_h, r_h on top\n # max(0, None) is None, all empty list, tuple is false\n return 1 + max(right_height, left_height)\n # if node doesn't have child, height is 0\n return 0", "title": "" }, { "docid": "1e2668568736968365c957dc9d464277", "score": "0.547626", "text": "def closest_common_ancestor(self, *classes):\n mros = [cls.mro() for cls in classes]\n track = defaultdict(int)\n while mros:\n for mro in mros:\n cur = mro.pop(0)\n track[cur] += 1\n if track[cur] == len(classes):\n return cur\n if len(mro) == 0:\n mros.remove(mro)\n assert(0) # should never be reached...", "title": "" }, { "docid": "ae56073d11bf0f88b90a94e4e6131504", "score": "0.54542947", "text": "def _height2(self, p):\n if self.is_leaf(p):\n return 0\n else:\n return 1 +max(self.height2(c) for c in self.children(p))", "title": "" }, { "docid": "aad29c5c6b89b3355755df13a3618d67", "score": "0.5424262", "text": "def id3(labels, attributes, data):\n data_size = len(data)\n if data_size == 0:\n raise RuntimeError(\"Attempt to build Tree without training data!\")\n\n classes = defaultdict(int)\n for class_value in labels:\n classes[class_value] += 1\n\n entropy = 0.0\n\n best_class = list(classes)[0] # Get first key as starting class\n for curr_class in classes.keys():\n if classes[curr_class] == data_size:\n return TreeNode(curr_class)\n if classes[curr_class] > classes[best_class]:\n best_class = curr_class\n\n # Calculate data set entropy\n p = classes[curr_class] / data_size\n entropy -= p * math.log2(p)\n\n if len(attributes) == 0:\n return TreeNode(best_class)\n\n # Calculate information gain\n best_attribute = None\n best_inf_gain = -math.inf\n best_attribute_values = None\n\n for attribute in attributes:\n #\n # Values table counts all class_values for all attribute_values\n # Entropy for each attribute can be then calculated by iterating over\n # each class value for given attribute value\n #\n # class/attrib | attrib_val0 | attrib_val1 | attrib_val2 | ...\n # class_val0 | val00 | val10 | val20 | ...\n # class_val1 | val01 | val11 | val21 | ...\n # class_val2 | val02 | val12 | val22 | ...\n # ...\n values_table = defaultdict(lambda: defaultdict(int))\n values_total = defaultdict(int)\n\n for class_value, attribute_value in zip(labels, data[:, attribute]):\n values_table[attribute_value][class_value] += 1 # Count class values for given attrib value\n values_total[attribute_value] += 1 # Count total number of attribute value\n\n # Calculate current information gain\n inf_gain = 0.0\n for attribute_value in values_table.keys():\n # Calculate attribute value entropy\n value_entropy = 0.0\n for class_value in values_table[attribute_value].values():\n p = class_value / values_total[attribute_value]\n value_entropy -= p * math.log2(p)\n\n inf_gain += (values_total[attribute_value] / data_size) * value_entropy\n\n inf_gain = entropy - inf_gain\n\n if inf_gain > best_inf_gain:\n best_attribute = attribute\n best_attribute_values = values_table.keys()\n best_inf_gain = inf_gain\n\n attributes.remove(best_attribute)\n root = TreeNode(best_attribute)\n\n for attribute_value in best_attribute_values:\n data_subset_indices = np.argwhere(data[:, best_attribute] == attribute_value)\n data_subset_indices = np.reshape(data_subset_indices, len(data_subset_indices))\n\n data_subset = data[data_subset_indices]\n labels_subset = labels[data_subset_indices]\n root.branches[attribute_value] = id3(labels_subset, attributes, data_subset)\n\n return root", "title": "" }, { "docid": "44a76b2cdd2be75834b2d539749c1dc5", "score": "0.54194003", "text": "def test_find_best_architecture_with_class_weights(self):\n tf.random.set_seed(1234) # Needs tensorflow API v2\n\n X_train, y_train = _create_2_class_labeled_dataset(1, 999) # very unbalanced\n X_val, y_val = _create_2_class_labeled_dataset(1, 99)\n X_test, y_test = _create_2_class_labeled_dataset(10, 10)\n class_weight = {0: 2, 1: 0.002}\n\n best_model, best_params, best_model_type, knn_acc = find_architecture.find_best_architecture(\n X_train, y_train, X_val, y_val, verbose=False, subset_size=1000,\n number_of_models=5, nr_epochs=1, model_type='CNN', class_weight=class_weight)\n\n probabilities = best_model.predict_proba(X_test)\n predicted = probabilities.argmax(axis=1)\n np.testing.assert_array_equal(predicted, y_test.argmax(axis=1))", "title": "" }, { "docid": "ac9bbf635d3fc64ece5143654c43c9c9", "score": "0.54184705", "text": "def get_height(node: AVLNode) -> int:\n if node is None:\n return -1\n if node.left is None and node.right is None:\n return 0\n else:\n return 1 + max(get_height(node.left), get_height(node.right))", "title": "" }, { "docid": "0a72e054019afcff4a3eeebf02fa68f4", "score": "0.54179996", "text": "def test_accuracy():\n gender = np.array([0,0,1,1,0,0,1,1,0,0,1,2,2,2,2,2,2,2,2,1])\n income = np.array([0,0,1,1,2,0,1,1,1,0,1,0,0,0,0,0,0,0,0,0])\n\n ndarr = np.transpose(np.vstack([gender]))\n tree = CHAID.Tree.from_numpy(ndarr, income, alpha_merge=0.9,\n min_child_node_size=1, min_parent_node_size=1)\n\n assert tree.accuracy() == 0.85\n assert tree.accuracy() == 1 - tree.risk()", "title": "" }, { "docid": "bb940cdf16a2ab258030b2eab9deb47e", "score": "0.5414274", "text": "def estimate(self):\n return (self.a_best + 4 * self.m_most_likely + self.b_worst) / 6", "title": "" }, { "docid": "e97b495c7833d6ae2b1357ce63b619bf", "score": "0.5403171", "text": "def height(self):\n if self.is_empty():\n return 0\n else:\n return max(self._left.height(), self._right.height()) + 1\n\n ##############################################################################\n # Task 1: More BST practice\n ##############################################################################", "title": "" }, { "docid": "520628d63310e4ed5744fe4f018949af", "score": "0.53939897", "text": "def learnModel(self):\n if self.classifications.count(1) == len(self.classifications):\n return Node(True, 0)\n elif self.classifications.count(0) == len(self.classifications):\n return Node(False, 0)\n elif len(self.dataset[0]) == 0:\n if self.classifications.count(1)/len(self.classifications) >= .5:\n return Node(True, 0)\n else:\n return Node(False, 0)\n else:\n available = [x for x in range(len(self.classifications))]\n ent = self.ent(self.classifications)\n root = Node(examples=available, entropy=ent)\n best = self.findBestAttr(root)\n root = Node(examples=available, entropy=ent, value=best, isroot=True)\n node_stack = [root]\n nodes_tested = 0\n while node_stack:\n # print(\"Nodes in tree: {}\".format(nodes_tested))\n nodes_tested += 1\n node = node_stack.pop()\n # print(node_stack)\n best = node.value\n # print(best)\n # print(best)\n # node.value = best\n # examples are the indeces of the examples in the dataset\n lchild_examples = []\n rchild_examples = []\n data = [self.dataset[x] for x in node.examples]\n for i, x in enumerate(data):\n # print(i)\n if x[best] == 0:\n lchild_examples.append(node.examples[i])\n else:\n rchild_examples.append(node.examples[i])\n # print(\"left children: {}\\nright children: {}\".format(len(lchild_examples), len(rchild_examples)))\n # deal with empty example set\n if len(lchild_examples) == 0:\n if [self.classifications[x] for x in node.examples].count(1)/len(node.examples) >= .5:\n lchild = Node(True, 0, isleaf=True, side=\"left\", parent=node)\n node.l = lchild\n else:\n lchild = Node(False, 0, isleaf=True, side=\"left\", parent=node)\n node.l = lchild\n elif [self.classifications[x] for x in lchild_examples].count(1) == len(lchild_examples):\n lchild = Node(True, 0, isleaf=True, side=\"left\", parent=node)\n node.l = lchild\n elif [self.classifications[x] for x in lchild_examples].count(0) == len(lchild_examples):\n lchild = Node(False, 0, isleaf=True, side=\"left\", parent=node)\n node.l = lchild\n if len(rchild_examples) == 0:\n if [self.classifications[x] for x in node.examples].count(1)/len(node.examples) >= .5:\n rchild = Node(True, 0, isleaf=True, side=\"right\", parent=node)\n node.r = lchild\n else:\n rchild = Node(False, 0, isleaf=True, side=\"right\", parent=node)\n node.r = rchild\n elif [self.classifications[x] for x in rchild_examples].count(1) == len(rchild_examples):\n rchild = Node(True, 0, isleaf=True, side=\"right\", parent=node)\n node.r = rchild\n elif [self.classifications[x] for x in rchild_examples].count(0) == len(rchild_examples):\n rchild = Node(False, 0, isleaf=True, side=\"right\", parent=node)\n node.r = rchild\n if node.l is None:\n c = [self.classifications[x] for x in lchild_examples]\n node.l = Node(parent=node, entropy=self.ent(c), examples=lchild_examples, side=\"left\")\n best = self.findBestAttr(node.l)\n if best is None:\n node.l.value = True\n node.l.isleaf = True\n node.l.side = \"left\"\n else:\n node.l.value = best\n node_stack.append(node.l)\n # print(lchild_examples)\n # print(\"left best: {}\".format(best))\n if node.r is None:\n c = [self.classifications[x] for x in rchild_examples]\n node.r = Node(parent=node, entropy=self.ent(c), examples=rchild_examples, side=\"right\")\n best = self.findBestAttr(node.r)\n if best is None:\n node.r.value = True\n node.r.isleaf = True\n else:\n node.r.value = best\n node_stack.append(node.r)\n # print(\"right best: {}\".format(best))\n # print(node.r.value)\n self.model = root\n return root", "title": "" }, { "docid": "916fe414d9c5548c529d2b5e966e73a3", "score": "0.5367506", "text": "def approximate_betweenness(graph, max_depth):\n edge2betweenness = defaultdict(int)\n for n in graph.nodes():\n node2distances, node2num_paths, node2parents = bfs(graph, n, max_depth)\n edge2credit_value = bottom_up(n, node2distances, node2num_paths, node2parents)\n for e,c in edge2credit_value.items():\n edge2betweenness[tuple(sorted(e))] += c\n # divide by 2\n for e,c in edge2credit_value.items():\n edge2betweenness[e] = c / 2\n return edge2betweenness", "title": "" }, { "docid": "fcbb33431e1db5718eb40d3bcf78dc15", "score": "0.53547686", "text": "def max_error_leaf(self) -> int:\n return sorted([m for m in self.mlc_meas], key=lambda x: x.error)[0].leaf_num", "title": "" }, { "docid": "aac1083d18ad85b87acf2862c4ffc727", "score": "0.5354543", "text": "def _best_split(self, X, y):\n \n #m = len(y)\n m = X.shape[0]\n\n # Count of each class in the current node.\n num_parent = [np.sum(y == c) for c in range(self.n_classes_)]\n #num_parent = node._observed_class_distribution\n\n # Gini of current node.\n #best_gini = 1.0 - sum((n / m) ** 2 for n in num_parent)\n best_misclassification = 1.0 - sum((n / m) for n in num_parent if n != max(num_parent))\n #best_misclassification = 1.0 - sum(n/m for k,n in num_parent.items() if k != max(num_parent, key=lambda k: num_parent[k]))\n best_idx, best_thr = None, None\n\n # Loop through all features.\n for idx in range(self.n_features):\n # Sort data along selected feature.\n thresholds, classes = zip(*sorted(zip(X[:, idx], y)))\n\n # We could actually split the node according to each feature/threshold pair\n # and count the resulting population for each class in the children, but\n # instead we compute them in an iterative fashion, making this for loop\n # linear rather than quadratic.\n num_left = [0] * self.n_classes_\n #num_left = dict.fromkeys(num_parent.keys(), 0)\n num_right = num_parent.copy()\n for i in range(1, m): # possible split positions\n c = classes[i - 1]\n num_left[c] += 1\n num_right[c] -= 1\n\n #gini_left = 1.0 - sum((num_left[x] / i) ** 2 for x in range(self.n_classes_))\n #gini_right = 1.0 - sum((num_right[x] / (m - i)) ** 2 for x in range(self.n_classes_))\n misclassification_left = 1.0 - sum((n / i) for n in num_left if n != max(num_left))\n misclassification_right = 1.0 - sum((n / (m-i)) for n in num_right if n != max(num_right)) \n\n # The Gini impurity of a split is the weighted average of the Gini\n # impurity of the children.\n #gini = (i * gini_left + (m - i) * gini_right) / m\n misclassification = (i * misclassification_left + (m - i) * misclassification_right) / m\n\n # The following condition is to make sure we don't try to split two\n # points with identical values for that feature, as it is impossible\n # (both have to end up on the same side of a split).\n if thresholds[i] == thresholds[i - 1]:\n continue\n\n #if gini < best_gini:\n # best_gini = gini\n # best_idx = idx\n # best_thr = (thresholds[i] + thresholds[i - 1]) / 2 # midpoint\n\n if misclassification < best_misclassification:\n best_misclassification = misclassification\n best_idx = idx\n best_thr = (thresholds[i] + thresholds[i - 1]) / 2 # midpoint\n\n return best_idx, best_thr", "title": "" }, { "docid": "c4c48cf8cd7cee415e554221e3b179a8", "score": "0.5321145", "text": "def tree_height(self):\n\n if self.left is None:\n left_height = -1\n if self.left is not None:\n left_height = self.left.tree_height()\n\n if self.right is None:\n right_height = -1\n if self.right is not None:\n right_height = self.right.tree_height()\n\n return 1 + max(left_height, right_height)", "title": "" }, { "docid": "81ab17db7c448e25e461d466a18078df", "score": "0.53011847", "text": "def tree_height(self):\n if self.lidar_img is None:\n self.height = 0\n else:\n with open(self.lidar_filename) as lidar_file:\n lidar_json = json.load(lidar_file)\n\n # Want coord tuples for the unmoved crown coordinates so using the\n # lidar copied crown file\n lidar_coords = lidar_json[\"features\"][self.number][\"geometry\"][\"coordinates\"][0]\n geo = [{\n \"type\": \"Polygon\",\n \"coordinates\": [self.get_tuple_coords(lidar_coords)],\n }]\n\n with rasterio.open(self.lidar_img) as src:\n out_image, out_transform = mask(src, geo, all_touched=True,\n crop=True)\n out_meta = src.meta.copy() # noqa:F841\n\n # remove all the values that are nodata values and recorded as negatives\n fixed_array = (out_image[out_image > 0])\n\n # the lidar data can have missed out areas or have noise meaning\n # the array is empty hence we will give this feature height 0 so\n # it is still used in calculating F1 scores in general but ignored\n # if any height restriction is used\n if len(fixed_array) != 0:\n sorted_array = np.sort(fixed_array)\n self.height = sorted_array[int(len(sorted_array) * 0.5)]\n else:\n self.height = 0", "title": "" }, { "docid": "d7f6fc2a7d1b21ad7c30842cc86638ba", "score": "0.52928334", "text": "def calculate_accuracy(self):\n \n # Computation of correct predictions\n n_correct = torch.sum(torch.eq(self.similarity_rounded, self.labels))\n \n # Accuracy\n accuracy = n_correct.item()/self.similarity_rounded.size()[0]*100\n \n return accuracy", "title": "" }, { "docid": "c67f636839ada953d8807c74f42ab5a7", "score": "0.52786726", "text": "def tree_accuracy(tree, representatives): # NOTE do first?\r\n counter = 0\r\n cut_data = []\r\n issues = frozenset([0,1,2,3,4,5,6,7,8,9])\r\n for r in representatives:\r\n cut_data = representatives\r\n cut_data.remove(r)\r\n active = tree_maker(cut_data, None, issues)\r\n while active.is_leaf() == False:\r\n issue = active.issue\r\n vote = r.votes[issue]\r\n if vote == '+':\r\n active = active.left\r\n elif vote == '-':\r\n active = active.center\r\n else:\r\n active = active.right\r\n if active.party == r.party:\r\n counter = counter + 1\r\n return counter/len(representatives)", "title": "" }, { "docid": "0829b1e3fbdea1c3955fe5b8d19a2a54", "score": "0.5265106", "text": "def log_stats_finegrainedness(nodes,\n get_leaves_fn,\n get_lowest_common_ancestor_fn,\n graph_name=None,\n num_per_height_to_print=2,\n num_leaf_pairs=10000,\n path='longest'):\n if not nodes:\n # Empty set\n return\n logging.info(\n 'Finegrainedness analysis of %s graph using %s paths in '\n 'finding the lowest common ancestor.', graph_name, path)\n leaves = get_leaves_fn(nodes)\n # Maps the height of the lowest common ancestor of two leaves to the 'example'\n # in which that height occurred. The example is a tuple of the string words\n # associated with (first leaf, second leaf, lowest common ancestor).\n heights_to_examples = collections.defaultdict(list)\n # Maps the height of the lowest common ancestor of two leaves to the number of\n # leaf pairs whose LCA has that height and is the root.\n heights_to_num_lca_root = collections.defaultdict(int)\n # A list of all observed LCA heights.\n heights = []\n # Sample a number of random pairs of leaves, and compute the height of their\n # lowest common ancestor.\n for _ in range(num_leaf_pairs):\n first_ind = np.random.randint(len(leaves))\n second_ind = np.random.randint(len(leaves))\n while first_ind == second_ind:\n second_ind = np.random.randint(len(leaves))\n leaf_a = leaves[first_ind]\n leaf_b = leaves[second_ind]\n lca, height = get_lowest_common_ancestor_fn(leaf_a, leaf_b, path=path)\n heights.append(height)\n\n heights_to_examples[height].append((leaf_a.words, leaf_b.words, lca.words))\n if not lca.parents:\n heights_to_num_lca_root[height] += 1\n\n name_message = ' of the {} graph'.format(\n graph_name) if graph_name is not None else ''\n stats_message = 'mean: {}, median: {}, max: {}, min: {}'.format(\n np.mean(heights), np.median(heights), max(heights), min(heights))\n logging.info(\n 'Stats on the height of the Lowest Common Ancestor of random leaf pairs%s'\n ': %s', name_message, stats_message)\n\n # For each given height, how many pairs of leaves are there?\n heights_to_num_examples = {}\n heights_to_proportion_root = {}\n for h, examples in heights_to_examples.items():\n heights_to_num_examples[h] = len(examples) / num_leaf_pairs\n heights_to_proportion_root[h] = heights_to_num_lca_root[h] / float(\n len(examples))\n logging.info(\n 'Proportion of example leaf pairs (out of num_leaf_pairs '\n 'random pairs) for each height of the LCA of the leaves: %s',\n heights_to_num_examples)\n\n # What proportion of those have the root as LCA, for each possible height?\n logging.info(\n 'Proportion of example leaf pairs per height whose LCA is the root: %s',\n heights_to_proportion_root)\n\n logging.info('Examples with different fine-grainedness:\\n')\n for height in heights_to_examples.keys():\n # Get representative examples of this height.\n for i, example in enumerate(heights_to_examples[height]):\n if i == num_per_height_to_print:\n break\n logging.info('Examples with height %s:\\nleafs: %s and %s. LCA: %s',\n height, example[0], example[1], example[2])", "title": "" }, { "docid": "3f1d759db7eb5dc29c4304dd37273ebd", "score": "0.5250158", "text": "def best_thickness(g):\n return len(best_thickness_graphs(g))", "title": "" }, { "docid": "ed6e9072f3a87a15ce73c7f8e0e454d2", "score": "0.5239669", "text": "def get_classification(self, image):\n #TODO implement light color prediction\n etection_count = np.zeros(3, dtype=int)\n\n with self.graph.as_default():\n expanded_img = np.expand_dims(image, axis=0)\n feed_dict = {self.image: expanded_img}\n (scores, classes) = self.sess.run([self.detections,\n self.d_classes],\n feed_dict=feed_dict)\n for i, score in enumerate(scores[0]):\n if score > MIN_CONFIDENCE:\n detection_count[int(classes[0][i]) - 1] += 1\n\n if np.sum(detection_count) == 0:\n # No confident votes for any class\n return TrafficLight.UNKNOWN\n else:\n # Pick best class\n detected_class = np.argmax(detection_count) + 1\n if detected_class == 1:\n return TrafficLight.GREEN\n elif detected_class == 2:\n return TrafficLight.YELLOW\n elif detected_class == 3:\n return TrafficLight.RED\n\n rospy.loginfo('Traffic Light: {}'.format(self.traffic_light_to_str(output)))\n return TrafficLight.UNKNOWN", "title": "" }, { "docid": "d9d6935b4fbe292f13d4aa9a5f3be3d5", "score": "0.52181625", "text": "def maxAncestorDiff(self, root: TreeNode) -> int:\n self.dfs(root)\n return self.ret", "title": "" }, { "docid": "42d464eea7f4840cb36a564a2774440d", "score": "0.5215034", "text": "def height(t):\n \"\"\"BEGIN PROBLEM 3.2\"\"\"\n # strat:\n # - recursively walk the branches and keep track of each depth and we go in one layer at a time\n # - for each branch, take the max value after calling height() for each branch and add 1 before returning it\n # - if we hit a leaf, then there's no more branches to go through...just return 0\n return 0 if is_leaf(t) else (1 + max([height(b) for b in branches(t)]))\n \"\"\"END PROBLEM 3.2\"\"\"", "title": "" }, { "docid": "e78d7a14089d0c63a45be5bf04e44b59", "score": "0.5203662", "text": "def test_recalculate_max_depth():\n X = iris.data\n clf = IsolationForest().fit(X)\n for est in clf.estimators_:\n assert est.max_depth == int(np.ceil(np.log2(X.shape[0])))", "title": "" }, { "docid": "4c107ea827047e4748d81f139760f7c5", "score": "0.5201881", "text": "def unlabeled_accuracy(truth, answer):\n right = 0\n total = 0\n answer_lookup = dict((y, x) for x, y in answer)\n\n for parent, child in correct_positions(truth):\n total += 1\n if answer_lookup[child] == parent:\n right += 1\n\n return right, total", "title": "" }, { "docid": "8aa7211af65e820ef87e737a535874ad", "score": "0.51986843", "text": "def one_label_accuracy(self, data):\n output_results = [(np.argmax(self.network_output_before_softmax(x)), y)\n for (x, y) in data]\n return sum(int(x == y) for (x, y) in output_results)/float(len(data))", "title": "" }, { "docid": "be99572c724eea2d9aaa90b8b8c1e3dd", "score": "0.5193206", "text": "def classify(training, p, k):\n closestk = closest(training, p, k)\n topkclasses = closestk.select('Class')\n return majority(topkclasses)", "title": "" }, { "docid": "822cf63b9e65ddb0030215461fe6327b", "score": "0.5187433", "text": "def _height2(self, x):\n if self.is_leaf(x):\n return 0\n else:\n return 1 + max(self._height2(child) for child in self.children(x))", "title": "" }, { "docid": "bc3ce4cabda3a8ad1aa90a0aa1b43e78", "score": "0.51857775", "text": "def map(self):\n def _nodes():\n t = self.tree\n left = 0\n\n while t:\n yield (t.base_log_prob + left, t.base)\n left += t.left_log_prob + log_0_5 \n t = t.right_child\n\n return max(_nodes(), key = lambda x: x[0])[1]", "title": "" }, { "docid": "44882fa1d0348eaeb380ea6732914f12", "score": "0.51782686", "text": "def learn_tree(self, examples):\n if self.entropy(examples) == 0:\n max_info = self.major_label(examples)\n return LeafNode(max_info['max_label'], max_info['count'], max_info['total'])\n else:\n split = self.best_split(examples)\n if split['leaf_node']:\n max_info = self.major_label(examples)\n return LeafNode(max_info['max_label'], max_info['count'], max_info['total'])\n print(split['attr_name'], split['threshold'])\n if len(split['miss_list']) < self.min_leaf_count:\n return DecisionNode(split['attr_name'], split['threshold'], self.learn_tree(split['less_than_list']), self.learn_tree(split['ge_list']), self.learn_tree(random.choice([split['ge_list'], split['less_than_list']])))\n\n max_info = self.major_label(split['ge_list']) if len(split['ge_list']) >= len(\n split['less_than_list']) else self.major_label(split['less_than_list'])\n return DecisionNode(split['attr_name'], split['threshold'], self.learn_tree(split['less_than_list']), self.learn_tree(split['ge_list']), self.learn_tree(random.choice([split['ge_list'], split['less_than_list']])))", "title": "" }, { "docid": "0be914735b6287a6d9ec3df792823689", "score": "0.51717687", "text": "def accuracy(db_testing, tree):\n\tcorrect_predictions = 0.0\n\tprint(\"Predictions: \", end=\"\")\n\tfor row in db_testing:\n\t\tprediction = predict(tree, row)\n\t\tprint(\"{:.0f}\".format(prediction), end=\"\")\n\t\tif prediction == row[-1]:\n\t\t\tcorrect_predictions += 1.0\n\tprint()\n\treturn correct_predictions / len(db_testing)", "title": "" }, { "docid": "c1ee0ff83ad80733d5f4f04f725a974d", "score": "0.51709396", "text": "def calculateHeight(self) :\n if self.leftChild == None or self.rightChild == None :\n return 1\n \n return 1 + max(self.leftChild.getHeight(), self.rightChild.getHeight())", "title": "" }, { "docid": "f34e60e5acb84fc0d3dfd19f8bc0981c", "score": "0.516878", "text": "def accuracy(self, params, batch):\n inputs, target_class = batch\n predicted_class = jnp.where(self.predict(params, inputs) < 0.5, 0, 1)\n return jnp.mean(predicted_class == target_class)", "title": "" }, { "docid": "c67266013a692346bdbb61fbb6d43617", "score": "0.5168721", "text": "def classify(self):\n return tf.reduce_max(self.output, axis=0)", "title": "" }, { "docid": "adc07db85563dc94687b58e9c0ec800a", "score": "0.51661766", "text": "def height(self):\n return self.depth_from_node(self.root)", "title": "" }, { "docid": "1ea93a242bd4c2024a55b10e9c09858e", "score": "0.5163774", "text": "def tree_height(self):\n if self.lidar_img is None:\n self.height = 0\n else:\n\n coords = self.geometry['coordinates'][0]\n geo = [{\n 'type': 'Polygon',\n 'coordinates': [self.get_tuple_coords(coords)]\n }]\n\n with rasterio.open(self.lidar_img) as src:\n out_image, out_transform = mask(src, geo, all_touched=True,\n crop=True)\n out_meta = src.meta.copy() # noqa:F841\n\n # remove all the values that are nodata values and recorded as negatives\n fixed_array = out_image[out_image > 0]\n\n # the lidar data can have missed out areas or have noise meaning\n # the array is empty hence we will give this feature height 0 so\n # it is still used in calculating F1 scores in general but ignored\n # if any height restriction is used\n if len(fixed_array) != 0:\n sorted_array = np.sort(fixed_array)\n self.height = sorted_array[int(len(sorted_array) * 0.5)]\n else:\n self.height = 0", "title": "" }, { "docid": "7b8d7e8516b68bea6e5443021fc472db", "score": "0.5157587", "text": "def _calculate_entropy_target(self) -> float:\r\n value_classes = self.__get_target_classes(self.DATA_INSTANCES)\r\n num_data_instances = len(self.DATA_INSTANCES)\r\n\r\n entropy = 0\r\n\r\n for c in value_classes:\r\n class_data_instances = self.__get_all_data_instances_for_target_class(self.DATA_INSTANCES, c)\r\n num_data_instances_for_class = len(class_data_instances)\r\n entropy -= (num_data_instances_for_class / num_data_instances) \\\r\n * math.log2(num_data_instances_for_class / num_data_instances)\r\n \r\n return entropy", "title": "" }, { "docid": "97e276dc99b93cc4fed4fbd3592dca33", "score": "0.5152564", "text": "def _converge(self, dev_instances):\n correct = [self.classify(x) == x.label for x in dev_instances]\n return round(float(sum(correct)) / len(correct), 4)", "title": "" }, { "docid": "ba068941a40ad1d61075ac1859616705", "score": "0.51426446", "text": "def __calculate_entropy_class(self, data_instances: List[DataInstance]) -> float:\r\n target_classes = self.__get_target_classes(data_instances)\r\n num_data_instances = len(data_instances)\r\n \r\n class_entropy = 0\r\n \r\n for c in target_classes:\r\n class_data_instances = self.__get_all_data_instances_for_target_class(data_instances, c)\r\n num_data_instances_for_class = len(class_data_instances)\r\n class_entropy -= (num_data_instances_for_class / num_data_instances) * \\\r\n math.log2(num_data_instances_for_class / num_data_instances)\r\n \r\n return class_entropy", "title": "" }, { "docid": "00e596836ca5ef2ff6e9d89d6231d284", "score": "0.5134571", "text": "def get_classification(self,image):\n\n # scale and center\n image = (image/255) -.5\n\n # reshape as array\n height = 90\n width = 40\n image = image.reshape((1,height,width,3))\n\n with self.graph.as_default():\n pred = self.model.predict(image)\n klass = np.argmax(pred)\n\n tl_state = self.encoder[klass]\n # print(\"Got class: {}, {}\".format(tl_state, time.time()))\n\n return tl_state", "title": "" }, { "docid": "335c9cd078d9dbeeecc10c6935eb2660", "score": "0.5133847", "text": "def fit(self, dataset, classes, parent=None):\n col_idx, split_value, left, right = self._find_best_split(dataset)\n new_node = Node(split_value, col_idx, parent=parent)\n\n if not left or not right:\n label = self._get_majority_class(left + right)\n return label\n\n if new_node.depth() >= self.max_depth:\n new_node.left = self._get_majority_class(left)\n new_node.right = self._get_majority_class(right)\n return new_node\n\n if self._h_val(left, classes) != 0 and len(left) > self.min_leaf_size:\n new_node.left = self.fit(left, classes, parent=new_node)\n else:\n label = self._get_majority_class(left)\n new_node.left = label\n\n if self._h_val(right, classes) != 0 and len(right) > self.min_leaf_size:\n new_node.right = self.fit(right, classes, parent=new_node)\n else:\n label = self._get_majority_class(right)\n new_node.right = label\n\n if new_node.parent:\n return new_node\n else:\n self.root = new_node", "title": "" }, { "docid": "6b0d48a62c08b4eaa59ff18b8171d510", "score": "0.51252365", "text": "def classify(self, training_example):\n\n if self.flower == training_example[-1]:\n return 1\n else:\n return -1", "title": "" }, { "docid": "d442767a31cab6468aae4bb78ef056e0", "score": "0.5116065", "text": "def acs(self, partition):\n redistricted = [0] * self.n_classes\n count = [0] * self.n_classes\n\n previous_labels = self.labels[-2][int(partition.name)]\n for index, instance in self.labels[-1][int(partition.name)].items():\n if index in previous_labels and previous_labels[index] != instance:\n redistricted[partition[index]] += 1\n count[partition[index]] += 1\n\n rc = [\n redistricted[i] / count[i] if count[i] > 0 else 0\n for i in range(self.n_classes)\n ]\n lbl = rc.index(max(rc))\n return lbl", "title": "" }, { "docid": "98b5eeb8aba439c41040b45c0c28aad7", "score": "0.51147616", "text": "def classify():\n classifier = load_model(\"kerasmodel.h5\")\n im = cv2.imread(\"result.png\", 0)\n im2 = cv2.resize(im, (28, 28))\n im = im2.reshape(28, 28, -1)\n im = im.reshape(1, 784)\n im = cv2.bitwise_not(im)\n plt.imshow(im.reshape(28, 28), cmap='Greys')\n result = classifier.predict(im)\n a = np.argmax(result)\n return a", "title": "" }, { "docid": "924b2576e0cde748689ff1c90b2cf82a", "score": "0.511094", "text": "def get_classification_accuracy(self, gt='resolve', subset=None):\n idxs, gt = self.get_labeled_ground_truth(gt, subset)\n pred = self.get_predicted(idxs)\n return np.mean(gt == pred)", "title": "" }, { "docid": "d429876fd6c94cd9c6c1cdf9f0becc15", "score": "0.511032", "text": "def get_depth(self):\n return max(self.left.get_depth(), self.right.get_depth()) + self.distance", "title": "" }, { "docid": "438656b0b01f7bd5fe110742ba94d1a0", "score": "0.5106438", "text": "def get_total_nodes(max_depth):\n return np.power(2, max_depth + 1) - 1", "title": "" }, { "docid": "01b31b58879a9151588967c0f96e3d2a", "score": "0.50981414", "text": "def get_height_best_block(self) -> int:\n heads = [self.get_transaction(h) for h in self.get_best_block_tips()]\n highest_height = 0\n for head in heads:\n head_height = head.get_metadata().height\n if head_height > highest_height:\n highest_height = head_height\n\n return highest_height", "title": "" }, { "docid": "01b31b58879a9151588967c0f96e3d2a", "score": "0.50981414", "text": "def get_height_best_block(self) -> int:\n heads = [self.get_transaction(h) for h in self.get_best_block_tips()]\n highest_height = 0\n for head in heads:\n head_height = head.get_metadata().height\n if head_height > highest_height:\n highest_height = head_height\n\n return highest_height", "title": "" }, { "docid": "66921630dbf17394b0d9cd86a0c7474c", "score": "0.5097734", "text": "def ComputeLikelyhoodOfClassification(instanceFeatures, featureCount, instanceCount, classification):\n # default is that likelyhood is dependant on how many instances we've already seen\n likelyHood = math.log(float(instanceCount[classification]) + float(.5)) - math.log(float(instanceCount[0]) + float(instanceCount[1]) + float(.5))\n numFeatures = len(instanceFeatures)\n # Adjust the likelyhood based on how many features of the classification it matches\n for featureIndex in range(numFeatures):\n countOfFeature = featureCount[classification][featureIndex]\n if instanceFeatures[featureIndex] == 0:\n countOfFeature = instanceCount[classification] - countOfFeature\n likelyHood += math.log(float(countOfFeature) + float(.5)) - math.log(float(instanceCount[classification]) + float(.5))\n return likelyHood", "title": "" }, { "docid": "4da1bb30b69a17ae742723c928fdc736", "score": "0.50937825", "text": "def computeHeight(self):\n height = -1\n if self.left:\n height = max(height, self.left.height)\n if self.right:\n height = max(height, self.right.height)\n self.height = height + 1", "title": "" }, { "docid": "80521cb4daee4ba568abf978213460f2", "score": "0.50931144", "text": "def accuracy(predictions, targets):\n assert len(predictions) == len(targets)\n count_pos = 0\n for predic, gold in zip(predictions, targets):\n if predic == gold:\n count_pos += 1\n\n return float(count_pos) / len(targets)", "title": "" }, { "docid": "b1930de92b1fe265b675e849729adcb8", "score": "0.5087781", "text": "def calculate_classification_performance(paths, params, limit = None):\n\n\t# path to trained CNN model\n\tmodel_file = os.path.join(paths['model_folder'], params['cnn_model'], 'model.h5')\n\tcheckpoint_model_file = os.path.join(paths['model_folder'], params['cnn_model'], 'checkpoint_model.h5')\n\n\t# load cnn models\n\tmodel = load_model(model_file)\n\tcheckpoint_model = load_model(checkpoint_model_file)\n\n\t# read dataset paths\n\tdatasets = get_datasets_paths(paths['dataset_folder'])\n\n\t# empty dictionary to hold results\n\tresults = {}\n\t# empty dictionary to hold confusion matrix\n\tconfusion_results = {}\n\n\t# calculate accuracy for each dataset\n\tfor dataset in ['train', 'val', 'test']:\n\t\t\n\t\tlogging.info(f'Calculating accuracy for {dataset} dataset')\n\t\t\n\t\t# read X\n\t\tX = np.load(datasets[f'X_{dataset}'])\n\t\t# read Y\n\t\tY = np.load(datasets[f'Y_{dataset}'])\n\t\t\n\t\t# rescale X\n\t\tX = X * params['rescale_factor']\n\t\t\n\t\t# performance inference of cnn model\n\t\ty_hat = model.predict_classes(X[:limit])#.reshape(-1,1)\n\t\t# inference of checkpoint model\n\t\tcheckpoint_y_hat = checkpoint_model.predict_classes(X[:limit])#.reshape(-1,1)\n\n\n\t\t\"\"\"\n\t\t\tAccuracy\n\t\t\"\"\"\n\n\t\t# calculate accuracy cnn model\n\t\taccuracy = accuracy_score(y_true = Y[:limit], y_pred = y_hat)\n\t\t# calculate accuracy of checkpoint model\n\t\tcheckpoint_accuracy = accuracy_score(y_true = Y[:limit], y_pred = checkpoint_y_hat)\n\n\t\tlogging.info(f'Accuracy CNN model: {accuracy}')\n\t\tlogging.info(f'Accuracy checkpoint CNN model: {checkpoint_accuracy}')\n\n\t\t# add to dictionary\n\t\tresults[f'{dataset}'] = accuracy\n\t\tresults[f'{dataset}_checkpoint'] = checkpoint_accuracy\n\n\t\t\"\"\"\n\t\t\tConfusion matrix\n\t\t\"\"\"\n\t\tconfusion = confusion_matrix(y_true = Y[:limit], y_pred = y_hat)\n\t\tcheckpoint_confusion = confusion_matrix(y_true = Y[:limit], y_pred = y_hat)\n\n\t\tconfusion_results[f'{dataset}'] = confusion\n\t\tconfusion_results[f'{dataset}_checkpoint'] = checkpoint_confusion\n\t\n\t# save results to file\n\tresults = pd.DataFrame(pd.Series(results))\n\t# save results to disk\n\tresults.to_csv(path_or_buf = os.path.join(paths['model_folder'], params['cnn_model'], 'accuracy.csv'))\n\n\t# save confusion as pickle\n\tsave_pickle(obj = confusion_results, file_name = 'confusion_results', folder = os.path.join(paths['model_folder'], params['cnn_model']))", "title": "" }, { "docid": "795046e82a70b535e627a679680954fa", "score": "0.5085683", "text": "def guide_tree(self):\n while self.n > 2:\n self.calculate_divergence()\n self.build_new_distance_matrix()\n row, column = self.choose_neighbor()\n self.u_distances(row, column)\n self.distances_from_u(row, column)\n\n root = (self.sequences[0], self.sequences[1])\n _, _, common = self.traverse(root)\n\n print(self.sequences)\n results = []\n for seq in self.base_sequences:\n _, res, _, _, _ = self.global_align(common, seq, 1, -1, -2)\n results.append(res)\n print(res)\n\n score = 0\n for ri in range(len(results)):\n for rj in range(ri + 1, len(results)):\n res1 = results[ri]\n res2 = results[rj]\n\n for i in range(len(res1)):\n if res1[i] == \"-\" or res2[i] == \"-\":\n score -= 2\n elif res1[i] == res2[i]:\n score += 1\n else:\n score -= 1\n print(score)", "title": "" }, { "docid": "6d8415c1d508bc6f9b228c8c63f34bc0", "score": "0.50732976", "text": "def _predict(self, inputs):\n node = self.tree_\n while node.left:\n if inputs[node.feature_index] < node.threshold:\n node = node.left\n else:\n node = node.right\n return node.predicted_class", "title": "" }, { "docid": "0c39ea297e8f06c707d8ba4908ed00cc", "score": "0.50687456", "text": "def calc_hierarchies(self):\n\n hierarchies = 0\n for _ in self.hierarchies:\n hierarchies += 1\n return hierarchies", "title": "" }, { "docid": "9b2558acaa3a6b324b5e2c3694e13911", "score": "0.506596", "text": "def classify(test_node, nearest_neighbours):\n\n names_dict = {}\n # Count the freqency of class names from nearest neighbours\n for node in nearest_neighbours:\n name = node.iris_name\n if node.iris_name not in names_dict:\n names_dict[name] = 0\n else:\n names_dict[name] += 1\n\n test_node.calibrated_name = max(names_dict)", "title": "" }, { "docid": "4c62ae3fced6f88514981a93076fed6a", "score": "0.50657874", "text": "def best(self):\n return 1.0", "title": "" }, { "docid": "115301342daf1e5a42ae4722e0a696ae", "score": "0.5064696", "text": "def get_accuracy(self):\n NC = 0\n NW = 0\n for k, v in self.test_Documents.iteritems():\n NCSub = 0\n NWSub = 0\n sum = 0\n min = 100000\n max = -100000\n for doc in v:\n textc = self.get_text_count_for_test_doc(doc)\n val = 0\n for word, count in textc.iteritems():\n # print \"current val is \" + str(val)\n if word in self.weights:\n val += count * self.weights[word]\n t = self.class_to_val(k)\n o = self.step_function(val)\n # print val, k\n sum += val\n if val < min:\n min = val\n elif val > max:\n max = val\n if t == o:\n NC += 1\n NCSub += 1\n else:\n NW += 1\n NWSub += 1\n accSub = (NCSub * 100.0)/(NCSub + NWSub)\n print str(accSub) + \" is accuracy of class \" + k\n # print str(min) + \" is min for class \" + k\n # print str(max) + \" is max for class \" + k\n # print str(sum/(NCSub + NWSub)) + \" is avg for class \" + k\n # print NC, NW\n accuracy = (NC * 100.0)/(NC + NW)\n return accuracy", "title": "" }, { "docid": "4db80440b42014a9ab77cc21bfc507fc", "score": "0.50602025", "text": "def cluster_quality(self):\n n_clusters = len(self.neurons_by_cat_id)\n total_sum = 0\n for k in self.neurons_by_cat_id:\n max_sum = 0\n max_cat = 0\n for l in self.neurons_by_cat_id :\n if k == l:\n continue\n sum_value = ((self.centroid_distance(k) +\n self.centroid_distance(l)) /\n self.between_clusters_distance(k, l))\n if sum_value > max_sum:\n max_sum = sum_value\n max_cat = l\n total_sum += max_sum\n return total_sum / n_clusters", "title": "" }, { "docid": "aedb3e89d5f2a3e26225a868e37bec66", "score": "0.50526714", "text": "def getClassification(dt):\n labelCount = [0,0]\n for attr in dt.branches:\n if dt.branches[attr].nodetype is NODE:\n return None\n labelCount[dt.branches[attr].classification] += 1\n return 0 if labelCount[0] > labelCount[1] else 1", "title": "" }, { "docid": "515f69856a7128a15a08fa0c33c5ea4c", "score": "0.5046937", "text": "def Height(self):\n\n if self.IsLeaf():\n return 1\n else:\n return 1 + max([ch.Height() for ch in self.Children])", "title": "" }, { "docid": "7b401aa528114416edc42ebbaff879f9", "score": "0.5041179", "text": "def height_helper(self, node):\n if node is not None and node.left is None and node.right is None: # When there are no children return 0.\n return 0\n if node.left is not None and node.right is None: # When only left.\n return 1 + self.height_helper(node.left)\n if node.right is not None and node.left is None: # When only right.\n return 1 + self.height_helper(node.right)\n if self.height_helper(node.left) > self.height_helper(node.right): # When dealing with both children.\n return 1 + self.height_helper(node.left)\n else:\n return 1 + self.height_helper(node.right)", "title": "" }, { "docid": "5006d8344f34c0fe7996d7ae19576e7a", "score": "0.5040769", "text": "def getInterestingLabel(self, treeNode):\n if len(treeNode) < 2:\n return None\n if self.granularity == ma_util.GRANULARITY_COARSE:\n raise NotImplementedError(\n 'getInterestingLabels does not handle coarse labels!')\n catP = ma_util.sen(treeNode.node, self.granularity)\n catL = ma_util.sen(treeNode[0].node, self.granularity)\n catR = ma_util.sen(treeNode[1].node, self.granularity)\n if catP == catL and catP == catR:\n return \"ID\"\n cat_max = max(catL, catR)\n cat_min = min(catL, catR)\n cat_avg = (catL + catR) / 2.0\n # cat_miv is low if max sentiment is very POS\n cat_miv = ma_util.VERY_POS - cat_max\n # cat_mav is low if min sentiment is very POS\n cat_mav = ma_util.VERY_POS - cat_min\n if catP <= cat_max and catP >= cat_min:\n r_label = 'AVG'\n # is parent less positive than most positive child\n # and more positive than least positive?\n elif catP <= cat_mav and catP >= cat_miv:\n r_label = 'INV'\n elif catL == ma_util.FINE_NEU and ((catP - ma_util.FINE_NEU)\n * (catR - ma_util.FINE_NEU) < 0):\n #r_label = 'INV/L'\n r_label = 'INV'\n elif catR == ma_util.FINE_NEU and ((catP - ma_util.FINE_NEU)\n * (catL - ma_util.FINE_NEU) < 0):\n #r_label = 'INV/R'\n r_label = 'INV'\n elif (catP - ma_util.FINE_NEU) * (cat_avg - ma_util.FINE_NEU) > 0:\n # (3 (4 wundervollen) (2 Szenen)) would match\n # here, but other rules fire first\n # Also not sure if there is intensification happening there?\n r_label = 'INT'\n else:\n #r_label = '???'\n r_label = \"MWE\"\n return r_label", "title": "" }, { "docid": "90b9afd824d89f0908358253f2847495", "score": "0.50385034", "text": "def treeHeight(t):\n if isEmpty(t):\n return 0\n else:\n left = getLeftBranch(t)\n right = getRightBranch(t)\n return max(treeHeight(left), treeHeight(right))\n return -1", "title": "" }, { "docid": "38c963ec6eac5514259d8d3b8a7dd5af", "score": "0.50372255", "text": "def root(self):\n return np.average(score_root(self.detections, self.annotations),\n weights=self.durations)", "title": "" }, { "docid": "7bb6589b46f4a04fe7794f83fe9207a7", "score": "0.5035762", "text": "def computeAccuracy(self, X, y):\n softmax_outputs = self.evaluateClassifier(X) # Get probability distribution of outputs\n # Reduce to a vector of the labels with the highest probability\n predictions = np.argmax(softmax_outputs, axis = 0)\n accuracy = (predictions == y).mean()\n return accuracy", "title": "" }, { "docid": "78608beaa785f229158944616f6c2e70", "score": "0.50312066", "text": "def index_of_biggest_parent(self):\n return (self.groesse//2)-1", "title": "" }, { "docid": "563de8105de1920f8c4940b50ef7cb99", "score": "0.50263584", "text": "def height(self, node):\n\n if node is None:\n return -1\n\n # search both sides of tree to find just how big it is\n left_height = self.height(node.left)\n right_height = self.height(node.right)\n return 1 + max(left_height, right_height)", "title": "" }, { "docid": "fba46724fd5fa92b48a6c23d3e01f33f", "score": "0.50261366", "text": "def ProfondeurMoyenne(tree):\n return tree.av_leaf_height()", "title": "" }, { "docid": "288fdd0f6b0dbebfb16b18fba840ddce", "score": "0.50261337", "text": "def height(self):\n stack = [0]\n max_depth = 0\n for elem in self:\n depth = stack.pop()\n max_depth = max(max_depth, depth)\n stack.extend([depth + 1] * elem.arity)\n return max_depth", "title": "" }, { "docid": "567fe936fcad249786aeb1b6519fb8be", "score": "0.5018969", "text": "def calc_training_score(classifications):\n\n trueclassifications = fetch_training_truth()\n full, rough = [], []\n\n for usermorph in classifications:\n trueclass = ''\n\n for truth in trueclassifications:\n if usermorph[0] == truth[0]:\n trueclass = truth[1]\n break\n\n if trueclass == usermorph[1]:\n full.append(1.)\n rough.append(1.)\n elif check_broad_training_categories(trueclass, usermorph[1]):\n full.append(0.)\n rough.append(1.)\n else:\n full.append(0.)\n rough.append(0.)\n\n print \"==========================================\"\n print \"================ Summary =================\"\n print \"Classified {} galaxies out of the training set of {}\".format(\n len(classifications), len(trueclassifications))\n print \"Score (Bad/E/S0/Sp/Irr bins): {:.1f}\".format(100.*sum(rough)/len(rough))\n print \"Score (Full bin resolution): {:.1f}\".format(100.*sum(full)/len(full))\n return", "title": "" }, { "docid": "4ff3c3319046a74c836101cd39c4a81c", "score": "0.50148535", "text": "def get_worst(self):\r\n\r\n worst = None\r\n worst_score = np.inf\r\n for el in self.pool:\r\n if el.fitness < worst_score:\r\n worst_score = el.fitness\r\n worst = el\r\n\r\n return worst", "title": "" }, { "docid": "5f5fe20d719a1c858dc79b5c047c91db", "score": "0.50146294", "text": "def classify(classifier, session, img_arr, stride=16):\n height, width = FEATURE_HEIGHT, FEATURE_WIDTH\n\n resized_img = resize_to_desired(img_arr)\n img_height, img_width = resized_img.shape\n assert img_height == height\n\n features = []\n for x in range(0, img_width - FEATURE_WIDTH + 1, stride):\n this_win = resized_img[:, x:x + FEATURE_WIDTH]\n features.append(this_win.reshape(-1))\n\n _, indices = classifier.predict(session, np.asarray(features))\n\n img_cnt = len(features)\n\n cls_list = []\n for i in range(img_cnt):\n cls_list.append(indices[i][0])\n\n class_id, pos_cnt = Counter(cls_list).most_common()[0]\n confidence = (pos_cnt / img_cnt) * 100.0\n\n return class_id, confidence", "title": "" }, { "docid": "303acc024b0198d80227ed3afb1567fc", "score": "0.5014301", "text": "def decision_tree_algorithm(k, train, label):\n tree_classifier = DecisionTreeClassifier(criterion='gini', max_depth=8, max_leaf_nodes=50)\n\n score = k_fold_cross_validation(tree_classifier, k, train, label)\n\n print('Decision Tree accuracy is: {}%'.format(round(score, 1)))\n return round(score, 1)", "title": "" }, { "docid": "beb85ca193c5e2507ced0ece58b4af01", "score": "0.5013385", "text": "def flatness(taxonomy, node):\n non_root_nodes = len(taxonomy.get_descendants(node)) - 1\n\n non_leaf_nodes = sum([1 for d in taxonomy.get_descendants(node) if taxonomy.is_instance(d) is not True])\n\n average_branching_factor = non_root_nodes / non_leaf_nodes\n\n return average_branching_factor", "title": "" }, { "docid": "3bae6f7b0d933e5ef83e91b6ca94045e", "score": "0.5011181", "text": "def classify(instance):\n\tinstance['HD'] = 1\n\tprob1 = getJointProb(instance)\n\tinstance['HD'] = 2\n\tprob2 = getJointProb(instance)\n\tif (prob1 >= prob2):\n\t\treturn 1\n\telse :\n\t return 2", "title": "" }, { "docid": "8d9f22f0c65811b2433ba6b1127eeda7", "score": "0.5007111", "text": "def height(self) -> int:\n if self.root is None: # If BinaryTree is empty return -1.\n return -1\n if self.root is not None and self.root.right is None and self.root.left is None: # If root is only node.\n return 0\n else: # For anything else use helper.\n return self.height_helper(self.root)", "title": "" }, { "docid": "46f8113f87a63079f15120281f3ae306", "score": "0.5004398", "text": "def HFscore(Z, classe):\r\n n = Z.shape[0]+1\r\n size = float(len(classe))\r\n # we need to loop on all the nodes of the hierarchy\r\n # compute recalls\r\n Recall_leaves = [int(i in classe) /size for i in range(n)]\r\n Recall_nodes = []\r\n for j in range(n-1):\r\n if Z[j,0] < n :\r\n r1 = Recall_leaves[Z[j,0]]\r\n else :\r\n r1 = Recall_nodes[Z[j,0]-n]\r\n if Z[j,1] < n :\r\n r2 = Recall_leaves[Z[j,1]]\r\n else :\r\n r2 = Recall_nodes[Z[j,1]-n]\r\n Recall_nodes.append(r1+r2)\r\n # compute precisions\r\n Precision_leaves = [float(i in classe) for i in range(n) ]\r\n Precision_nodes = []\r\n for j in range(n-1):\r\n if Z[j,0] < n :\r\n p1 = Precision_leaves[Z[j,0]]\r\n s1 = 1\r\n else :\r\n p1 = Precision_nodes[Z[j,0]-n]\r\n s1 = Z[Z[j,0]-n,3]\r\n if Z[j,1] < n :\r\n p2 = Precision_leaves[Z[j,1]]\r\n s2 = 1\r\n else :\r\n p2 = Precision_nodes[Z[j,1]-n]\r\n s2 = Z[Z[j,1]-n,3]\r\n Precision_nodes.append( (p1*s1 + p2*s2)/float(s1+s2) )\r\n ### compute fscores\r\n Fscores_nodes = []\r\n for i in range(len(Precision_nodes)):\r\n if Recall_nodes[i]+Precision_nodes[i] != 0:\r\n f = (2*Recall_nodes[i]*Precision_nodes[i]) / (Recall_nodes[i]+Precision_nodes[i])\r\n Fscores_nodes.append(f)\r\n else :\r\n Fscores_nodes.append(0)\r\n\r\n return max(Fscores_nodes)", "title": "" }, { "docid": "985d57038918644a172c6c42022b0d9a", "score": "0.500347", "text": "def get_class_accuracy(X,y,W1,b1,W2,b2):\n pred = predict(X, W1, b1, W2, b2)\n labels = np.unique(y_train)\n accuracy = []\n for label in labels:\n print('Checking for class: ',label)\n d = np.count_nonzero(y == label)\n n = 0\n incorrect_found = False\n correct_found = False\n for i in range(len(pred)):\n if y[i][0]==label:\n if pred[i] == y[i][0]:\n n+=1\n if not correct_found:\n correct_found = True\n print('Predicted Label: ', pred[i])\n print('Actual Label: ', y[i, :])\n plt.imshow(X[i, :], cmap=plt.get_cmap('gray'))\n plt.savefig('data/correct' + str(label) + '.jpg')\n plt.show()\n else:\n if not incorrect_found:\n incorrect_found = True\n print('Predicted Label: ',pred[i])\n print('Actual Label: ', y[i,:])\n plt.imshow(X[i,:], cmap=plt.get_cmap('gray'))\n plt.savefig('data/incorrect'+str(label)+'.jpg')\n plt.show()\n accuracy.append(n/d)\n return accuracy", "title": "" }, { "docid": "2a453a0450923e2d00412eacd8d433bb", "score": "0.5000336", "text": "def count_accuracy(B_true, B_est):\n if ((B_est == -1) & (B_est.T == -1)).any():\n raise ValueError('undirected edge should only appear once')\n d = B_true.shape[0]\n # linear index of nonzeros\n pred_und = np.flatnonzero(B_est == -1)\n pred = np.flatnonzero(B_est == 1)\n cond = np.flatnonzero(B_true)\n cond_reversed = np.flatnonzero(B_true.T)\n cond_skeleton = np.concatenate([cond, cond_reversed])\n # true pos\n true_pos = np.intersect1d(pred, cond, assume_unique=True)\n # treat undirected edge favorably\n true_pos_und = np.intersect1d(pred_und, cond_skeleton, assume_unique=True)\n true_pos = np.concatenate([true_pos, true_pos_und])\n # false pos\n false_pos = np.setdiff1d(pred, cond_skeleton, assume_unique=True)\n false_pos_und = np.setdiff1d(pred_und, cond_skeleton, assume_unique=True)\n false_pos = np.concatenate([false_pos, false_pos_und])\n # reverse\n extra = np.setdiff1d(pred, cond, assume_unique=True)\n reverse = np.intersect1d(extra, cond_reversed, assume_unique=True)\n # compute ratio\n pred_size = len(pred) + len(pred_und)\n cond_neg_size = 0.5 * d * (d - 1) - len(cond)\n fdr = float(len(reverse) + len(false_pos)) / max(pred_size, 1)\n tpr = float(len(true_pos)) / max(len(cond), 1)\n fpr = float(len(reverse) + len(false_pos)) / max(cond_neg_size, 1)\n # structural hamming distance\n pred_lower = np.flatnonzero(np.tril(B_est + B_est.T))\n cond_lower = np.flatnonzero(np.tril(B_true + B_true.T))\n extra_lower = np.setdiff1d(pred_lower, cond_lower, assume_unique=True)\n missing_lower = np.setdiff1d(cond_lower, pred_lower, assume_unique=True)\n shd = len(extra_lower) + len(missing_lower) + len(reverse)\n return {'fdr': fdr, 'tpr': tpr, 'fpr': fpr, 'shd': shd, 'nnz': pred_size, 'cnz': len(cond)}", "title": "" }, { "docid": "1666dd37b897df4ecb057d0dad15e65d", "score": "0.49983037", "text": "def calculate_split_accuracy(cm):\n n = len(cm)\n first = int(n / 2)\n cm_small = np.zeros((2, 2))\n for i in range(n):\n class_i = int(i < first)\n for j in range(n):\n class_j = int(j < first)\n cm_small[class_i][class_j] += cm[i][j]\n return (float(cm_small[0][0] + cm_small[1][1]) / cm_small.sum())", "title": "" }, { "docid": "e8d3497495ef7a0550bcc56a80dc7896", "score": "0.4995909", "text": "def accuracy(self):\n return (self.confusionmatrix.diag()/(self.confusionmatrix.sum(dim=1) + 1e-7)).mean().item()", "title": "" }, { "docid": "e25194357c4c6e73dda1a7f971453b94", "score": "0.49927983", "text": "def calculate_score(self, node):\n # Calculate G: the movement cost from the start node to the current node\n g = node.parent.g + 1\n\n # Calculate H: the estimated movement cost from the current node to the end node\n # Manhattan distance method: # of horizontal + # of vertical squares to reach the end\n h = get_distance_to_end(node, self.end)\n\n return g, h", "title": "" }, { "docid": "c6cbae20a80cfbae9afe09d70bf29d7b", "score": "0.4992562", "text": "def get_max_branching_factor(self):\n if self.children == []:\n return 1\n return 1 + max([child.get_max_branching_factor() for child in self.children])", "title": "" }, { "docid": "e615f4db1d63995af62e0a8611b97f66", "score": "0.49889502", "text": "def calculate_edge_distance(self, state, parent_node):\n traj, traj_distance = construct_dubins_traj(parent_node.state, state)\n if collision_found(traj, self.objects, self.walls):\n return self.LARGE_NUMBER\n\n return traj_distance", "title": "" }, { "docid": "f212d82eccbdd5027b14fa966caf19d8", "score": "0.4988397", "text": "def class_avg(self):\n return self._class_loss.avg", "title": "" }, { "docid": "caa4f4d97012f0ff2ca8e01225e3fe23", "score": "0.49875462", "text": "def _intermediate_classifiers(self):\n ### hard code, use the output of 1st and 2nd resnet unit\n config = self.config\n # stride = config.strides\n h1 = self._saved_hidden[1] # (?, 32, 32, 16)\n print('h1 shape: {}'.format(h1.shape))\n if h1.shape[-1] == 16:\n h1 = self._conv(\"conv_add1\", h1, 3, 16, 32, [1,2,2,1])\n elif h1.shape[-1] == 64:\n h1 = self._conv(\"conv_add1\", h1, 3, 64, 64, [1,2,2,1])\n print('h1 shape: {}'.format(h1.shape))\n \n with tf.variable_scope(\"additional1\"):\n h1 = self._batch_norm(\"additional1_bn\", h1)\n h1 = self._relu(\"additional1_relu\", h1)\n print('h1 shape: {}'.format(h1.shape))\n\n h1 = self._global_avg_pool(h1)\n print('h1 shape: {}'.format(h1.shape))\n\n with tf.variable_scope(\"logit_more1\"):\n h1 = self._fully_connected(h1, config.num_classes)\n print('h1 shape: {}'.format(h1.shape))\n\n h2 = self._saved_hidden[2] # (?, 16, 16, 32)\n print('h2 shape: {}'.format(h2.shape))\n if h2.shape[-1] == 32:\n h2 = self._conv(\"conv_add2\", h2, 3, 32, 32, [1,2,2,1])\n elif h2.shape[-1] == 64:\n h2 = self._conv(\"conv_add2\", h2, 3, 64, 64, [1,2,2,1])\n else:\n h2 = self._conv(\"conv_add2\", h2, 3, h2.shape[-1], 64, [1,2,2,1])\n print('h2 shape: {}'.format(h2.shape))\n\n with tf.variable_scope(\"additional2\"):\n h2 = self._batch_norm(\"additional2_bn\", h2)\n h2 = self._relu(\"additional2_relu\", h2)\n print('h2 shape: {}'.format(h2.shape))\n\n h2 = self._global_avg_pool(h2)\n print('h2 shape: {}'.format(h2.shape))\n with tf.variable_scope(\"logit_more2\"):\n h2 = self._fully_connected(h2, config.num_classes)\n print('h2 shape: {}'.format(h2.shape))\n return [h1, h2]", "title": "" }, { "docid": "dafe9883f4ebf998c35f923079a9f9ff", "score": "0.4986622", "text": "def compute_accuracy(network_output_tensor, expert_output_tensor):\n network_chosen_positions = data2tensor.get_chosen_positions(network_output_tensor)\n expert_chosen_positions = data2tensor.get_chosen_positions(expert_output_tensor)\n\n matches = (network_chosen_positions == expert_chosen_positions)\n accuracy = np.mean(matches)\n return accuracy", "title": "" }, { "docid": "8a156e377a69ec9b220c6021aa289a7d", "score": "0.49810866", "text": "def predict(self, data_to_classify):\n answer_occurrences = {}\n max_occurrences = -1\n final_answer = 0\n for tree in self.forest:\n current_answer = tree.predict(data_to_classify)\n if current_answer in answer_occurrences:\n answer_occurrences[current_answer] = answer_occurrences[current_answer] + 1\n else:\n answer_occurrences[current_answer] = 1\n if max_occurrences < answer_occurrences[current_answer]:\n max_occurrences = answer_occurrences[current_answer]\n final_answer = current_answer\n\n return final_answer", "title": "" } ]
5ff149adf70930497f1654028eafdbf5
Load up and normalize the data
[ { "docid": "21359bbcda5ebf7e576f3ce55127b9da", "score": "0.0", "text": "def load_data(infile_counts, infile_seqs, infile_total_reads):\n\n # Load up the counts mapping to each gene\n df_counts = pd.read_csv(infile_counts, sep = \",\", index_col=0)\n df_counts[\"name\"] = df_counts.index\n # Load up the total library counts\n df_total_reads = pd.read_csv(infile_total_reads, sep=\",\", index_col=0)\n # Load up the sequence lengths\n lengths = hash_seq_lengths(infile_seqs)\n\n # Merge on total library size\n df_counts = df_counts.merge(df_total_reads, \"inner\", on=[\"name\"])\n\n assert df_counts.shape[0] > 0, \"No lines after merging your gene counts and library counts\"\n\n # Normalize the data\n df_norm = pd.DataFrame()\n for j in range(df_counts.shape[1]):\n # Not relevant columns\n if (df_counts.columns[j] == \"name\") | (df_counts.columns[j] == \"reads\"):\n pass\n else:\n # Normalized by sequence length and by total library reads\n df_norm[df_counts.columns[j]] = (df_counts.iloc[:, j] / lengths[df_counts.columns[j]]) / df_counts.loc[:, \"reads\"]\n df_norm.index = df_counts[\"name\"]\n return df_norm", "title": "" } ]
[ { "docid": "9b51362af4fa34926dd3979d119cd9ce", "score": "0.69728583", "text": "def load_data(self):", "title": "" }, { "docid": "4c5734a2335ef562a6894d5a7730397c", "score": "0.6786813", "text": "def load_data(self):\n self.dataset, self.info = DataLoader()\n self._preprocess_data()", "title": "" }, { "docid": "a1baffcf78d5245b01860ad904ee45f5", "score": "0.67727035", "text": "def load_data(self):\n pass", "title": "" }, { "docid": "c3c56715dd983ddeaf662024502d1672", "score": "0.6706325", "text": "def import_data():", "title": "" }, { "docid": "a69c305f5dd6e71bab971c3dc213f7c4", "score": "0.66237205", "text": "def prepare_data(self):\n self.dataset.prepare_data()", "title": "" }, { "docid": "06995dcb9b4aa5e7ea5fae4ba59c65e2", "score": "0.6596201", "text": "def pre_load_data(self):\n pass", "title": "" }, { "docid": "b8c5dff172047098a53be4fb777cc7ed", "score": "0.65837413", "text": "def normalize_all_data(self):\n self.normalizer = MinMaxScaler().fit(self.X)\n dump(self.normalizer, open(\n f'Results//Transformers/normalizer_{self.params.DATA_TYPE}_{self.params.PREDICTION_TYPE}.pkl', 'wb'))\n normalized_data = self.normalizer.transform(self.X)\n self.X = normalized_data\n self.X_TEST = self.normalizer.transform(self.X_TEST)\n return normalized_data", "title": "" }, { "docid": "8ea9ea15afda1bd5e2eec6998a215392", "score": "0.65526396", "text": "def load_data(self):\n return", "title": "" }, { "docid": "35c52d67baf2246296fad72a06aa8829", "score": "0.6427925", "text": "def prepare_raw_data(self):\n if not os.path.isdir(self.data_dir):\n self.stdout.write(\"Making directory %s\" % self.data_dir);\n os.mkdir(self.data_dir);\n\t\n self.prepare_tree_csv()\n self.parse_tree_csv()\n self.calc_tree_stats();\n \n self.prepare_chair_csv()\n self.parse_chair_csv()", "title": "" }, { "docid": "9423ae4bb7c47407ea6ac6c23e18c08b", "score": "0.63051087", "text": "def load_raw_data(\n scaler: str = \"normalize\",\n) -> Tuple[pd.DataFrame, pd.DataFrame, pd.DataFrame, pd.DataFrame, Dict[int, str], Dict[str, int]]:\n X_train, X_test = preprocess_raw_data(scaler=scaler)\n # X_train = np.load(os.path.join(DATA_DIR, \"my_dataset/Raw_X_train.npy\"))\n # X_test = np.load(os.path.join(DATA_DIR, \"my_dataset/Raw_X_test.npy\"))\n y_train = pd.read_table(os.path.join(DATA_DIR, \"my_dataset/y_train.txt\"), sep=\" \", header=None)\n y_test = pd.read_table(os.path.join(DATA_DIR, \"my_dataset/y_test.txt\"), sep=\" \", header=None)\n\n activity_labels = pd.read_table(\n os.path.join(DATA_DIR, \"hapt_data_set/activity_labels.txt\"), header=None\n ).values.flatten()\n activity_labels = np.array([label.rstrip().split() for label in activity_labels])\n label2act, act2label = {}, {}\n for label, activity in activity_labels:\n label2act[int(label)] = activity\n act2label[activity] = int(label)\n\n # Replace 6 to 0\n rep_activity = label2act[6]\n label2act[0] = rep_activity\n label2act.pop(6)\n act2label[rep_activity] = 0\n\n y_train = y_train.replace(6, 0)\n y_test = y_test.replace(6, 0)\n\n return X_train, X_test, y_train.values, y_test.values, label2act, act2label", "title": "" }, { "docid": "2f7915fec23608f097b5a0a45746065e", "score": "0.6276998", "text": "def load_data(self):\n self.transforms = TransformList.from_database(self.curs)", "title": "" }, { "docid": "03141e00247f5333ef44c1df41770ffe", "score": "0.6265393", "text": "def prepare_datatraining(self):\n self.array_features_totrain, self.array_labels_totrain , self.df_totrain = ext.prepare_data(self.df_decks_totrain, self.features)\n\n self.next(self.prepare_mlmagic)", "title": "" }, { "docid": "dd41f86ed4ea59f09c7d087a00f1937b", "score": "0.6224487", "text": "def normalizeData(self):\n return self._normalize_data", "title": "" }, { "docid": "c3aa0fbadded4c78224319776dd9a334", "score": "0.622305", "text": "def preprocess(self):\n # standardize the data\n if self.params.STANDARDIZE_DATA:\n self.standardize_all_data()\n\n # normalize the data\n if self.params.NORMALIZE_DATA:\n self.normalize_all_data()\n\n # reduce the dimensions of the data with PCA\n if self.params.PCA_DATA:\n self.pca_all_data()\n\n # increase the dimensionality of the data with polynomial features\n if self.params.POLY_DATA:\n self.poly_all_data()\n\n # resample the data\n if self.sampler is not None:\n self.resample()", "title": "" }, { "docid": "053608fc408e931e2a539fe28af61f6d", "score": "0.6202006", "text": "def load_data(self, *args, **kwargs):\n pass", "title": "" }, { "docid": "ddeff4790564bf5e23cef7008af46631", "score": "0.619477", "text": "def _load_training_data(self):\n if self._all_data is None:\n self._load_all_data()\n test_index = int(self.training_fraction * len(self._all_data[0]))\n self._training_data = self._all_data[0][test_index:], self._all_data[1][test_index:]\n super()._load_training_data()", "title": "" }, { "docid": "bdf1689fbd7dffe4f7db8a39f013b048", "score": "0.6139918", "text": "def prepare_dataset(self, dataset_path = \"\"):\n \n projection_train = open(dataset_path + 'x_projection_train.pkl', 'rb')\n state_train = open(dataset_path + 'state_train.pkl', 'rb')\n mnist = open(dataset_path + 'mnist.pkl','rb')\n\n u_1 = pickle._Unpickler(projection_train)\n u_2 = pickle._Unpickler(state_train)\n u_3 = pickle._Unpickler(mnist)\n u_1.encoding = 'latin1'\n u_2.encoding = 'latin1'\n u_3.encoding = 'latin1'\n \n self.projection_train_data = u_1.load()\n self.state_train_data = u_2.load()", "title": "" }, { "docid": "f0adc88554f7802dbbbe9bd64b0b885f", "score": "0.61389536", "text": "def preprocess(cls, data):", "title": "" }, { "docid": "ee36e545c4edc37a07822bbdce14b067", "score": "0.61366", "text": "def load(self):\r\n\t\tdf = pd.read_csv(self.input_file)\r\n\t\tself.features = [col for col in df.columns if col!='label']\r\n\r\n\t\tdf['cluster'] = np.nan\t\r\n\t\tdf['original'] = 1\r\n\t\tself.data = df", "title": "" }, { "docid": "b97bd8053080a1daaddac641c88306ef", "score": "0.6133768", "text": "def preprocess(self, **kwargs):\n\n self._load_data()\n self._clean_sentence_column()\n self._clean_label_column()\n self._drop_underrepresented_classes()\n train_dataloader, val_dataloader = self._prepare_data_for_training()\n\n logger.debug('Data preprocessing complete')\n\n return train_dataloader, val_dataloader", "title": "" }, { "docid": "13b75bd436261e3d489dcb7e0d407b40", "score": "0.61122125", "text": "def get_data(self):\n path = str(Path(__file__).parent.parent.joinpath(\"data\", \"data-normalization-output.csv\"))\n df = pandas.read_csv(path)\n\n df.drop(df.columns[0], axis=1, inplace=True)\n avg_avg_score = df['average_score'].mean()\n df['average_score'] = df['average_score'].replace(numpy.nan, avg_avg_score)\n df.fillna(0, inplace=True)\n\n self.change_oh_cat(\"code_presentation\", df)\n self.change_oh_cat(\"code_module\", df)\n self.change_oh_cat(\"gender\", df)\n self.change_oh_cat(\"region\", df)\n self.change_oh_cat(\"highest_education\", df)\n self.change_oh_cat(\"imd_band\", df)\n self.change_oh_cat(\"age_band\", df)\n self.change_oh_cat(\"disability\", df)\n result_order = {'final_result__Distinction': 3, 'final_result__Fail': 0,\n 'final_result__Pass': 2, 'final_result__Withdrawn': 1}\n self.change_oh_cat(\"final_result\", df, result_order)\n\n target = df[\"final_result\"]\n df.drop([\"final_result\"], axis=1, inplace=True)\n\n x_train, x_test, y_train, y_test = train_test_split(df, target, test_size=0.1,\n random_state=32, shuffle=True,\n stratify=target)\n\n return x_train, x_test, y_train, y_test", "title": "" }, { "docid": "f86ad10f7290b5cfd93002d2fa034066", "score": "0.61115366", "text": "def load_data(self):\n raise NotImplementedError()", "title": "" }, { "docid": "4e1e1e8914885fa4d3cfc9b17986ab90", "score": "0.6090532", "text": "def _from_scratch(self):\n self.logger.info('Loading data from scratch')\n self._load_target_data()\n self._load_source_data()\n if self.tokenize_strategy != 'none':\n self._build_vocab()\n self._text2idx()\n if self.config['vocab_size'] is not None or self.source_vocab_size == 1e8:\n self.vocab_size = self.target_vocab_size\n self.idx2token = self.target_idx2token\n self.token2idx = self.target_token2idx\n if self.config['seq_len'] is not None or self.source_max_length == 1e4:\n self.max_length = self.target_max_length\n self._build_data()\n self._dump_data()", "title": "" }, { "docid": "e3c39b726adb561e48f1bef51606bde8", "score": "0.60856247", "text": "def load_data(self, folder):\n self.splits['raw'] = pickle.load(open(os.path.join('processed_data', folder, \"data\"), 'rb'))\n self.settings = json.load(open(os.path.join('processed_data', folder, \"settings\"), 'r'))\n self.sc_y = pickle.load(open(os.path.join('processed_data', folder, \"sc_y\"), 'rb'))", "title": "" }, { "docid": "d2de579c8247a32f3e118166bed1d6c9", "score": "0.6079877", "text": "def _load_restored(self):\n for prefix in ['train', 'valid', 'test']:\n filename = os.path.join(self.dataset_path, f'{prefix}.bin')\n data = torch.load(filename)\n setattr(self, f'{prefix}_data', data)\n\n for key, value in self.test_data.items():\n if not isinstance(value, list):\n setattr(self, key, value)", "title": "" }, { "docid": "76b648449c364b95a0df2e8043794ad2", "score": "0.6069593", "text": "def migrate_data(self):\r\n # migrate keys\r\n self.__migrate_daily_cached_keys()\r\n\r\n # solidify md\r\n self.__make_daily_storage_md_table()\r\n self.__solidify_md_data()\r\n\r\n # solidify k lines\r\n self.__make_daily_storage_kl_table()\r\n self.__solidify_kl_data()\r\n\r\n # flush cached db\r\n self.cache_wrapper.flush_db()", "title": "" }, { "docid": "439bb6249fb5022e935a54b40e3a3929", "score": "0.6050759", "text": "def data_preprocessing(self):\r\n self.complete_data = self.frame.dropna()\r\n self.partial_data = self.frame[self.frame.isnull().any(axis=1)]\r\n # Splitting complete and incomplete rows.\r\n \r\n arr = np.array(self.complete_data, dtype = np.float)\r\n arr2 = np.array(self.partial_data, dtype = np.float)\r\n # Converting values to floats.\r\n arr = MinMaxScaler().fit_transform(arr)\r\n # Scaling complete rows for further processing.\r\n self.X, self.y = arr[:, :-1], arr[:, -1]\r\n self.to_fill_X = arr2[:, :-1]\r\n # Splitting scaled data for further processing.\r\n self.to_fill_X = MinMaxScaler().fit_transform(self.to_fill_X)\r\n # Scaling incomplete rows.\r\n return", "title": "" }, { "docid": "5a20d7960043d661b2c77fe12fda4a27", "score": "0.6021032", "text": "def _preprocess_data(self, data):\n if len(data) < 1:\n self.feature_count = 0\n return data\n self._feature_extractor.init(data)\n data = [self._feature_extractor.get(x) for x in data]\n return data", "title": "" }, { "docid": "5265d69e7b822debe7505256485e9e70", "score": "0.60204136", "text": "def ingest_data(self):\n\n self.parse_size()\n self.make_tiles()\n self.set_map_bytes()\n self.make_tile_grid()", "title": "" }, { "docid": "134f63f56094efa52c69eadbdfcf5bef", "score": "0.6009589", "text": "def __cargar_data(self):\n self._feature_name = self.__iris.feature_names\n self._target_name = self.__iris.target_names\n self._x_train, \\\n self._x_test, \\\n self._y_train, \\\n self._y_test = \\\n train_test_split(self.__iris.data, self.__iris.target)\n self._entrenar_arbol()", "title": "" }, { "docid": "bd6085435f766583a55810221197a9f3", "score": "0.6005019", "text": "def Load_Data(path):\n raw_data = pd.read_csv(path)\n\n raw_label = raw_data[[\"price\"]].copy()\n raw_data = raw_data.drop(columns= [\"price\"])\n X_train, X_test, y_train, y_test = train_test_split(raw_data, raw_label, test_size = 0.1, random_state = 42,shuffle= True)\n\n\n\n # process the \"neighbourhood_group\"\n group_encoder = OneHotEncoder()\n group_encoder.fit(X_train, \"neighbourhood_group\")\n\n # process the \"neighbourhood\"\n neighbour_encoder = OneHotEncoder()\n neighbour_encoder.fit(X_train, \"neighbourhood\")\n\n # process the latitude\n latitude_norm = Max_Min_Norm()\n latitude_norm.fit(X_train, \"latitude\")\n\n # process the longitude\n longitude_norm = Max_Min_Norm()\n longitude_norm.fit(X_train, \"longitude\")\n\n # process the room_type\n room_type_encoder = OneHotEncoder()\n room_type_encoder.fit(X_train, \"room_type\")\n\n # process the review_per_month\n review_per_month = FillNa()\n review_per_month.fit(X_train, \"reviews_per_month\", \"mean\")\n\n\n group_train = group_encoder.transform(X_train, \"neighbourhood_group\")\n neighbourhood_train = neighbour_encoder.transform(X_train, \"neighbourhood\")\n latitude_train = latitude_norm.transform(X_train, \"latitude\")\n longitude_train = longitude_norm.transform(X_train,\"longitude\")\n room_type_train = room_type_encoder.transform(X_train, \"room_type\")\n minimum_nights_train = np.log1p(X_train[[\"minimum_nights\"]]).to_numpy()\n number_of_reviews_train = X_train[[\"number_of_reviews\"]].to_numpy()\n last_review_train = X_train[[\"last_review\"]].to_numpy()\n reviews_per_month_train = review_per_month.transform(X_train, \"reviews_per_month\")\n calculated_host_listings_count_train = X_train[[\"calculated_host_listings_count\"]].to_numpy()\n availability_365_train = X_train[[\"availability_365\"]].to_numpy()\n rate_train = X_train[[\"rate\"]].to_numpy()\n\n X_train = np.hstack((group_train, neighbourhood_train, latitude_train,longitude_train, room_type_train, minimum_nights_train, number_of_reviews_train ,\n last_review_train,reviews_per_month_train, calculated_host_listings_count_train, availability_365_train, rate_train ))\n Y_train = np.log1p(y_train).to_numpy()\n\n group_test = group_encoder.transform(X_test, \"neighbourhood_group\")\n neighbourhood_test = neighbour_encoder.transform(X_test, \"neighbourhood\")\n latitude_test = latitude_norm.transform(X_test, \"latitude\")\n longitude_test = longitude_norm.transform(X_test, \"longitude\")\n room_type_test = room_type_encoder.transform(X_test, \"room_type\")\n minimum_nights_test = np.log1p(X_test[[\"minimum_nights\"]]).to_numpy()\n number_of_reviews_test = X_test[[\"number_of_reviews\"]].to_numpy()\n last_review_test = X_test[[\"last_review\"]].to_numpy()\n reviews_per_month_test = review_per_month.transform(X_test, \"reviews_per_month\")\n calculated_host_listings_count_test = X_test[[\"calculated_host_listings_count\"]].to_numpy()\n availability_365_test = X_test[[\"availability_365\"]].to_numpy()\n rate_test = X_test[[\"rate\"]].to_numpy()\n\n X_test = np.hstack((group_test, neighbourhood_test, latitude_test, longitude_test, room_type_test,\n minimum_nights_test, number_of_reviews_test,\n last_review_test, reviews_per_month_test, calculated_host_listings_count_test,\n availability_365_test, rate_test))\n Y_test = np.log1p(y_test).to_numpy()\n\n return np.array(X_train), np.array(Y_train), np.array(X_test), np.array(Y_test)", "title": "" }, { "docid": "51d276ac82ef35caa0465bba08fb46e6", "score": "0.60014564", "text": "def preprocess(self):\n print \"PREPROCESSING...\"\n\n # if the data has been preprocessed before, just return\n for file in os.listdir(\".\"):\n if file == self.destination:\n print \"Loading from \" + self.destination\n with open(self.destination, \"r\") as f:\n self.data = json.load(f)\n return\n\n # preprocess business.json\n print \"Preprocessing business.json\"\n with open(self.business, \"r\") as f:\n for line in f:\n line = json.loads(line)\n id = line[\"business_id\"]\n if id in self.data:\n print \"id already exists\"\n else:\n self.data[id] = line\n self.data[id][\"review\"] = []\n self.data[id][\"tip\"] = []\n\n # preprocess review.json\n print \"Preprocessing review.json\"\n with open(self.review, \"r\") as f:\n for line in f:\n line = json.loads(line)\n if line[\"business_id\"] in self.data:\n self.data[line[\"business_id\"]][\"review\"].append(line)\n else:\n print \"review not related!\"\n\n # preprocess tip.json\n print \"Preprocessing tip.json\"\n with open(self.tip, \"r\") as f:\n for line in f:\n line = json.loads(line)\n if line[\"business_id\"] in self.data:\n self.data[line[\"business_id\"]][\"tip\"].append(line)\n else:\n print \"tip not related!\"\n\n # save the preprocessed data to destination file\n if self.destination:\n print \"Writing to \" + self.destination\n with open(self.destination, \"w\") as f:\n json.dump(self.data, f)", "title": "" }, { "docid": "124ad32b54f295fe3a95f65292f5692d", "score": "0.59963626", "text": "def load_data_full():\n return process_all()", "title": "" }, { "docid": "570c7ac11f09f0062af93cd9e3bb552b", "score": "0.5992851", "text": "def prepare_datascoring(self):\n self.array_features_toscore, self.array_labels_toscore, self.df_toscore = ext.prepare_data(self.df_decks_toscore, self.features)\n \n self.next(self.prepare_mlmagic)", "title": "" }, { "docid": "dfb49767b68c7f2c60f80aa547b30023", "score": "0.59888357", "text": "def init_dataset():\n data.clean()\n data.extract_from_api()\n data.desc_sol_data()\n data.entity_data()\n data.intent_data()\n data.code_entity()", "title": "" }, { "docid": "8a9c96c50ae158fa9f38b4d84d669bc1", "score": "0.59846157", "text": "def load_data():\n for i in range(CLASSES):\n tmp = np.loadtxt(\"./Iris_TTT4275/class_\"+str(i+1),delimiter=\",\")\n \n # Add the class, and 1\n class_number = np.ones((tmp.shape[0],2)) \n class_number[:,-1] *= i \n\n tmp = np.hstack((tmp, class_number))\n if i > 0:\n data = np.vstack((data, tmp))\n else:\n data = copy.deepcopy(tmp)\n\n # Normalize\n tmp = data[:,:-1] \n # tmp = tmp - tmp.mean(axis=0)\n tmp = tmp / tmp.max(axis=0)\n data[:,:-1] = tmp\n\n return data", "title": "" }, { "docid": "f2ab8a0970da932652dbb2e1c7103b8f", "score": "0.5976011", "text": "def prepareData(self): \n \n # find train & test ID based on label or not\n #trainInd = [i for i, e in enumerate(self.label) if e != None] # index of training data\n #testInd = [i for i, e in enumerate(self.label) if e == None] # index of test data\n \n trainInd = [i for i, e in enumerate(self.label) if e != ''] # index of training data\n testInd = [i for i, e in enumerate(self.label) if e == ''] # index of test data\n\n logging.info('%s number of samples is used to train model, %s number of samples for testing', len(trainInd), len(testInd))\n logging.debug('train data indices are: ' + ','.join(map(str, trainInd)))\n logging.debug('test data indices are: ' + ','.join(map(str, testInd)))\n \n self.X_train = self.features[trainInd]\n self.X_test = self.features[testInd]\n self.y_train = [self.label[idx] for idx in trainInd]\n \n # document ID (database table key) without labeling\n self.id_test = [self.srids[idx] for idx in testInd]", "title": "" }, { "docid": "d8e5fcb6aa3c9ecf9e3e76db9a2e7315", "score": "0.59677124", "text": "def load_data_norm(self):\n Batch_Img = self.data_loader.load_facade()\n return img255_normalization(Batch_Img)", "title": "" }, { "docid": "9c423ce2c52c0473b74d25c21a15c8f9", "score": "0.5965409", "text": "def load_data():\n current_app.shops = pd.read_csv(data_path('shops.csv'), index_col=0)\n current_app.products = pd.read_csv(data_path('products.csv'), index_col=0)\n current_app.tags = pd.read_csv(data_path('tags.csv'), index_col=0)\n current_app.taggings = pd.read_csv(data_path('taggings.csv')).reset_index()\n current_app.taggings.drop(['id'], axis=1, inplace=True)", "title": "" }, { "docid": "127dc10fffc69bdb6502739115fc4789", "score": "0.5962598", "text": "def load_data(file):", "title": "" }, { "docid": "103516cdd9b6777962dce9956020b663", "score": "0.59471387", "text": "def preprocess(self):\n lines = [line.rstrip() for line in open(self.attr_path, 'r')]\n all_attr_names = lines[1].split()\n for i, attr_name in enumerate(all_attr_names):\n self.attr2idx[attr_name] = i\n self.idx2attr[i] = attr_name\n \n lines = lines[2:]\n random.seed(1234)\n random.shuffle(lines)\n # Dataset has 7757 images.\n if self.mode == 'train':\n lines = lines[:-1500] # train set contains x - 1500 images\n if self.mode == 'val':\n lines = lines[-1500:-500] # val set contains 1000 images\n if self.mode == 'test':\n lines = lines[-500:] # test set contains 500 images\n\n imgs, labels = [], []\n for i, line in enumerate(lines):\n split = line.split()\n filename = split[0]\n imgs.append(filename)\n values = split[1:]\n\n for attr_name in self.selected_attrs:\n idx = self.attr2idx[attr_name]\n labels.append(values[idx] == '1')\n \n self.dataset = {'imgs': imgs, 'labels': labels}", "title": "" }, { "docid": "b929a63b66980e3250c67497e0811b54", "score": "0.5927378", "text": "def prepare_data(self):\n if self.vectorizer == 'count':\n # can change to 'vocabulary=pickle.load(open(\"feature.pkl\", \"rb\"))'\n vec = CountVectorizer(vocabulary=self.vocabulary, ngram_range=(1, 2))\n else:\n vec = TfidfVectorizer(vocabulary=self.vocabulary, ngram_range=(1, 2))\n x_test = vec.fit_transform(np.array(self.newdata))\n self.newdata = x_test.toarray()", "title": "" }, { "docid": "0baf45d7c9e5e7fe8fc223ccd0bcb535", "score": "0.5916923", "text": "def _load_data(self):\n # row: users col: movies\n ratings = np.array(pd.read_csv(RATINGS_DATA, index_col=0))\n # use subset of movies\n self.ratings = ratings[:,:TOTAL_MOVIES]\n\n # movies information\n items = pd.read_csv(MOVIES_DATA, index_col=0)\n self.movie_names = items.Name[:TOTAL_MOVIES]\n\n # pre-compute the similarity matrix\n print \"Computing ratings similarity matrix...\"\n self.sim_mat = ur.getsim(self.ratings)", "title": "" }, { "docid": "83bd91de16614801da3ffa7f72874627", "score": "0.5913714", "text": "def load_data():\n data = raw_data()\n data['offense_win'] = (data.offense_final > data.defense_final).astype(int)\n data['score_diff'] = data.offense_score - data.defense_score\n data['score_rate'] = data.score_diff / data.time_left\n return data", "title": "" }, { "docid": "151d6571c71a96747d4be2875cf2c52e", "score": "0.5913239", "text": "def load(self,data):\n raise NotImplementedError", "title": "" }, { "docid": "22d734ae04dfc88c7fd28debe14ab8bf", "score": "0.59109485", "text": "def sort_dataset(self) -> None: \r\n self._make_directories()\r\n\r\n data = self._load_data()\r\n\r\n data_sp = self._split_data(data)\r\n data_sp[\"train\"] = self._balance_dataset(data_sp[\"train\"])\r\n\r\n self._save_data(data_sp)", "title": "" }, { "docid": "794651d957ca9b2ded68916563bc201b", "score": "0.5906732", "text": "def prepare_data_train(fname):\n # Read data\n data = pd.read_csv(fname)\n # events file\n events_fname = fname.replace('_data', '_events')\n # read event file\n labels = pd.read_csv(events_fname)\n clean = data.drop(['id'], axis=1) # remove id\n labels = labels.drop(['id'], axis=1) # remove id\n return clean, labels", "title": "" }, { "docid": "784e84ee350b3399fd2e1ae7ed27fc59", "score": "0.5906414", "text": "def data_preprocess(self, raw_data):\n return raw_data", "title": "" }, { "docid": "9158ce1d9c9355e3442e723160991e2a", "score": "0.5904077", "text": "def preprocess(self):\n preprocessed_directory = os.path.join(self.database_directory,\n 'preprocessed_{}'.format(self.preprocessed_image_size))\n if os.path.exists(preprocessed_directory):\n shutil.rmtree(preprocessed_directory)\n os.makedirs(preprocessed_directory)\n for data_type in ['train', 'validation', 'test']:\n preprocessed_data_type_directory = os.path.join(preprocessed_directory, data_type)\n os.makedirs(preprocessed_data_type_directory)\n for item in os.listdir(os.path.join(self.database_directory, data_type)):\n item_path = os.path.join(self.database_directory, data_type, item)\n if item.startswith('.'):\n continue\n elif item.endswith('.jpg'):\n item_directory = os.path.dirname(item_path)\n self.crop_image_to_face(item_directory, item, preprocessed_data_type_directory)\n elif os.path.isdir(item_path):\n for image_name in os.listdir(item_path):\n if not image_name.endswith('.jpg'):\n raise NotImplementedError()\n self.crop_image_to_face(item_path, image_name, preprocessed_data_type_directory)\n elif item.endswith('_gt.csv'):\n with open(item_path) as csv_file:\n csv_reader = csv.reader(csv_file)\n json_list = []\n next(csv_reader) # Skip header line.\n for csv_line in csv_reader:\n image_name, age, age_standard_deviation = csv_line\n example_meta_dict = {'image_name': image_name, 'age': age,\n 'age_standard_deviation': age_standard_deviation}\n json_list.append(example_meta_dict)\n with open(os.path.join(preprocessed_data_type_directory, 'meta.json'), 'w+') as json_file:\n json.dump(json_list, json_file)", "title": "" }, { "docid": "2ec33f7892b7a4f31d00b51a57c47e91", "score": "0.5898681", "text": "def normalize_data(self):\n\n # Normalize inputs\n if self.normalize_input:\n logger.debug(\"Normalizing X of shape %s.\" % str(self.X.shape))\n self.X, self.X_mean, self.X_std = zero_mean_unit_var_normalization(self.X)\n logger.debug(\"Normalized X, mean and std have shapes %s, %s and %s\" %\n (str(self.X.shape), self.X_mean.shape, self.X_std.shape))\n\n # Normalize ouputs\n if self.normalize_output:\n logger.debug(\"Normalizing y of shape %s.\" % str(self.y.shape))\n self.y, self.y_mean, self.y_std = zero_mean_unit_var_normalization(self.y)\n logger.debug(\"Normalized y, mean and std have shapes %s, %s and %s\" %\n (str(self.y.shape), self.y_mean.shape, self.y_std.shape))", "title": "" }, { "docid": "b1ed627babdc7f159eb95e7365ca9586", "score": "0.58948594", "text": "def __initData(self):\n\n # attempt to read serialized data:\n try:\n self.locations = pickle.load(open(os.path.join(\"tsp-data\", self.name + \"-loc.pickle\"), \"rb\"))\n self.distances = pickle.load(open(os.path.join(\"tsp-data\", self.name + \"-dist.pickle\"), \"rb\"))\n except (OSError, IOError):\n pass\n\n # serailized data not found - create the data from scratch:\n if not self.locations or not self.distances:\n self.__createData()\n\n # set the problem 'size':\n self.tspSize = len(self.locations)", "title": "" }, { "docid": "f257af8a8df04f7970dadac4727886a4", "score": "0.58925456", "text": "def preprocess_data(self):\n g = self.graph.G\n look_back = self.graph.look_back_list\n self.features = np.vstack([g.nodes[look_back[i]]['feature']\n for i in range(g.number_of_nodes())])\n self.features = preprocess_features(self.features)\n self.build_label()\n self.build_train_val_test()\n adj = nx.adjacency_matrix(g) # the type of graph\n self.support = [preprocess_adj(adj)]", "title": "" }, { "docid": "41399aa716a8bb46caf22e8af681af5e", "score": "0.58901167", "text": "def prepare_data(self):\n modes = self.dataset.modes()\n for mode in modes:\n cached_features_file = self._feature_file(mode)\n if not os.path.exists(cached_features_file)\\\n or self.hparams['overwrite_cache']:\n self.load_features(mode)", "title": "" }, { "docid": "59245fb0c40ce2cfba674e753560dbc0", "score": "0.58856004", "text": "def __load(self):\r\n full_classifier_name = pjoin(\r\n 'data', 'model', self.type, self.name) + '.pkl'\r\n with open(full_classifier_name, 'rb') as output:\r\n self.data = load(output)", "title": "" }, { "docid": "e72a7416e0577ce1eda45a1d1a16608b", "score": "0.5872454", "text": "def load_data_set(self,filename):\n pass", "title": "" }, { "docid": "071e64313d7d659de6d483d8ae103238", "score": "0.5869272", "text": "def _read_train_data(self):\r\n label_file = pd.read_csv(self.train_data_file)\r\n print('READING LABELS OF TRAIN DATA')\r\n for i in range(label_file.shape[0]):\r\n if label_file.at[i, 'image_category'] == self.dress_type: # Only take the type we want\r\n joints = []\r\n name = str(label_file.at[i, 'image_id'])\r\n weight = []\r\n box = []\r\n for joint_name in self.joints_list:\r\n joint_value = []\r\n value = str(label_file.at[i, joint_name])\r\n value = value.split('_')\r\n # print(value)\r\n joint_value.append(int(value[0]))\r\n joint_value.append(int(value[1]))\r\n joints.append(joint_value)\r\n if value[2] == '-1':\r\n weight.append(0)\r\n else:\r\n weight.append(1)\r\n # box of body,[x_box_min,y_box_min,x_box_max,y_box_max]\r\n box.append(self._min_point(joints, 0))\r\n box.append(self._min_point(joints, 1))\r\n box.append(max([x[0] for x in joints]))\r\n box.append(max([x[1] for x in joints]))\r\n # print(box)\r\n # print(name)\r\n joints = np.reshape(joints, (-1, 2))\r\n self.data_dict[name] = {'box': box, 'joints': joints, 'weights': weight}\r\n self.train_table.append(name)\r\n print('FINISH')\r\n return [self.train_table, self.data_dict]", "title": "" }, { "docid": "433c5fac9487250607e1e4a5ffa2308e", "score": "0.5862384", "text": "def load_validating_data(self):\n if self.data_val is not None:\n return \n self.data_val, self.ngraphs_val = self.load_data('val')", "title": "" }, { "docid": "9d41d695fce2ded0bb53039b7b845516", "score": "0.58602554", "text": "def load_data(self):\n def blank_filter(text):\n \"\"\"\n Remove blank line entries.\n\n :param str text: original text\n :return: text with blank entries removed\n :rtype: str\n \"\"\"\n return re.sub(r'^\\d+,+$', '', text, flags=re.MULTILINE)\n\n def year_parser(year):\n \"\"\"\n Format year string to be ISO 8601 string.\n :param year: year\n :return: year in ISO 8601 format\n :rtype: str\n \"\"\"\n return [f'{x}-01-01' for x in year]\n\n self.players = (pd.read_csv(players_file,\n date_parser=year_parser,\n dtype=self.players_types,\n header=None,\n index_col=5,\n names=self.players_types.keys(),\n parse_dates=[5],\n skiprows=1,\n )\n .drop('idx', axis=1)\n .dropna(how='all'))\n self.players.player = self.players.player.str.replace('*', '')\n\n with open(season_file, 'r') as f:\n season_text = f.read()\n filtered_text = blank_filter(season_text)\n logger.info('Season Stats Dataset cleaned')\n\n self.stats = (pd.read_csv(io.StringIO(filtered_text),\n date_parser=year_parser,\n dtype=self.stats_types,\n header=None,\n index_col=1,\n names=self.stats_types.keys(),\n parse_dates=[1],\n skiprows=1,\n )\n .drop(['blank_1', 'blank_2', 'idx'], axis=1))\n self.stats.player = self.stats.player.str.replace('*', '')\n\n # Hall of Fame\n filter_players = self.fame.query('category == \"Player\"').name\n\n self.players_fame = (self.players[(self.players.player\n .isin(filter_players))]\n .drop_duplicates())\n\n stats_mask = self.stats.player.isin(filter_players)\n self.stats_fame = self.stats[stats_mask]\n\n # Features and Response\n self.features = self.stats.drop('player', axis=1)\n for col in self.features.select_dtypes(include=['category']).columns:\n self.features[col] = (self.features[col]\n .cat\n .codes)\n self.features['response'] = 0\n self.features.loc[stats_mask, 'response'] = 1\n logger.info('Datasets Loaded')", "title": "" }, { "docid": "e6f1c32cbcb8c67998695e1144b0f1c6", "score": "0.5854773", "text": "def data_preprocess(self):\n def worker_init_fn(worker_id):\n np.random.seed(np.random.get_state()[1][0] + worker_id)\n\n transform_config = self.config[\"dataloader\"][\"transform\"]\n test_img_df = pd.read_csv(self.__input_path() / \"test.csv\")\n\n dataset_name = self.config[\"dataloader\"][\"dataset\"]\n\n test_transform = self.__load_transforms(transform_config[\"test\"])\n test_dataset_params = {\"img_df\": test_img_df, \"transform\": test_transform}\n test_dataset = self.__load_dataset(dataset_name, test_dataset_params)\n self.predict = np.zeros((len(test_dataset))) # only test process\n self.test_loader = DataLoader(test_dataset,\n shuffle=False,\n num_workers=4,\n batch_size=self.config[\"dataloader\"][\"batch_size\"],\n worker_init_fn=worker_init_fn)", "title": "" }, { "docid": "1a038f19b0d779d5985ab876e845f3c1", "score": "0.5847199", "text": "def dataPrep(train_file_path,instance_label,class_label,test_file_path):\n train = pd.read_csv(train_file_path)\n if instance_label != 'None':\n train = train.drop(instance_label,axis=1)\n trainX = train.drop(class_label,axis=1).values\n trainY = train[class_label].values\n del train #memory cleanup\n test = pd.read_csv(test_file_path)\n if instance_label != 'None':\n test = test.drop(instance_label,axis=1)\n testX = test.drop(class_label,axis=1).values\n testY = test[class_label].values\n del test #memory cleanup\n return trainX,trainY,testX,testY", "title": "" }, { "docid": "636f060ad5060bc6aea7da61bfb256b1", "score": "0.5846362", "text": "def load_data(self):\n start = time()\n\n # organize info\n if self.transform in (None, 'none'):\n if self.label_type == 'all':\n file_name = 'ICLabels_expert.pkl'\n elif self.label_type == 'luca':\n file_name = 'ICLabels_onlyluca.pkl'\n processed_file_name = 'processed_dataset'\n if self.unique:\n processed_file_name += '_unique'\n if self.label_type == 'all':\n processed_file_name += '_all'\n self.check_for_download('train_labels')\n elif self.label_type == 'luca':\n processed_file_name += '_luca'\n self.check_for_download('train_labels')\n elif self.label_type == 'database':\n processed_file_name += '_database'\n self.check_for_download('database')\n processed_file_name += '.pkl'\n\n # load processed data file if it exists\n if isfile(join(self.datapath, 'cache', processed_file_name)):\n dataset = joblib.load(join(self.datapath, 'cache', processed_file_name))\n\n # if not, create it\n else:\n # load features\n features = []\n feature_labels = []\n print('Loading full dataset...')\n\n self.check_for_download('train_features')\n # topo maps, old psd, dipole, and handcrafted\n with h5py.File(join(self.datapath, 'features', 'features_0D1D2D.mat'), 'r') as f:\n print('Loading 0D1D2D features...')\n features.append(np.asarray(f['features']).T)\n feature_labels.append(self.__load_matlab_cellstr(f, 'labels'))\n # new psd\n with h5py.File(join(self.datapath, 'features', 'features_PSD_med_var_kurt.mat'), 'r') as f:\n print('Loading PSD features...')\n features.append(list())\n for element in f['features_out'][0]:\n data = np.array(f[element]).T\n # if no data, skip\n if data.ndim == 1 or data.dtype != np.float64:\n continue\n nyquist = (data.shape[1] - 2) / 3\n nfreq = 100\n # if more than nfreqs, remove extra\n if nyquist > nfreq:\n data = data[:, np.concatenate((range(2 + nfreq),\n range(2 + nyquist, 2 + nyquist + nfreq),\n range(2 + 2*nyquist, 2 + 2*nyquist + nfreq)))]\n # if less than nfreqs, repeat last frequency value\n elif nyquist < nfreq:\n data = data[:, np.concatenate((range(2 + nyquist),\n np.repeat(1 + nyquist, nfreq - nyquist),\n range(2 + nyquist, 2 + 2*nyquist),\n np.repeat(1 + 2*nyquist, nfreq - nyquist),\n range(2 + 2*nyquist, 2 + 3*nyquist),\n np.repeat(1 + 3*nyquist, nfreq - nyquist))\n ).astype(int)]\n\n features[-1].append(data)\n features[-1] = np.concatenate(features[-1], axis=0)\n feature_labels.append(['ID_set', 'ID_ic'] + ['psd_median']*nfreq\n + ['psd_var']*nfreq + ['psd_kurt']*nfreq)\n # autocorrelation\n with h5py.File(join(self.datapath, 'features', 'features_AutoCorr.mat'), 'r') as f:\n print('Loading AutoCorr features...')\n features.append(list())\n for element in f['features_out'][0]:\n data = np.array(f[element]).T\n if data.size > 2 and data.shape[1] == 102 and not len(data.dtype):\n features[-1].append(data)\n features[-1] = np.concatenate(features[-1], axis=0)\n feature_labels.append(self.__load_matlab_cellstr(f, 'feature_labels')[:2] + ['Autocorr'] * 100)\n\n # find topomap duplicates\n print('Finding topo duplicates...')\n _, duplicate_order = np.unique(features[0][:, 2:742].astype(np.float32), return_inverse=True, axis=0)\n do_sortind = np.argsort(duplicate_order)\n do_sorted = duplicate_order[do_sortind]\n do_indices = np.where(np.diff(np.concatenate(([-1], do_sorted))))[0]\n group2indices = [do_sortind[do_indices[x]:do_indices[x + 1]] for x in range(0, duplicate_order.max())]\n del _\n\n # load labels\n if self.label_type == 'database':\n # load data from database\n conn = sqlite3.connect(join(self.datapath, 'labels', 'database.sqlite'))\n c = conn.cursor()\n dblabels = c.execute('SELECT * FROM labels '\n 'INNER JOIN images ON labels.image_id = images.id '\n 'WHERE user_id IN '\n '(SELECT user_id FROM labels '\n 'GROUP BY user_id '\n 'HAVING COUNT(*) >= 30)'\n ).fetchall()\n conn.close()\n # reformat as list of ndarrays\n dblabels = [(x[1], np.array(x[15:17]), np.array(x[3:11])) for x in dblabels]\n dblabels = [np.stack(x) for x in zip(*dblabels)]\n # organize labels by image\n udb = np.unique(dblabels[1], return_inverse=True, axis=0)\n dblabels = [(dblabels[0][y], dblabels[1][y][0], dblabels[2][y])\n for y in (udb[1] == x for x in range(len(udb[0])))]\n label_index = np.stack((x[1] for x in dblabels))\n\n elif self.label_type == 'luca':\n # load data from database\n conn = sqlite3.connect(join(self.datapath, 'labels', 'database.sqlite'))\n c = conn.cursor()\n dblabelsluca = c.execute('SELECT * FROM labels '\n 'INNER JOIN images ON labels.image_id = images.id '\n 'WHERE user_id = 1').fetchall()\n conn.close()\n # remove low-confidence labels\n dblabelsluca = [x for x in dblabelsluca if x[10] == 0]\n # reformat as ndarray\n labels = np.array([x[3:10] for x in dblabelsluca]).astype(np.float32)\n labels /= labels.sum(1, keepdims=True)\n labels = [labels]\n label_index = np.array([x[15:17] for x in dblabelsluca])\n transforms = ['none']\n\n else:\n # load labels from files\n with open(join(self.datapath, 'labels', file_name), 'rb') as f:\n print('Loading labels...')\n data = pkl.load(f)\n if 'transform' in data.keys():\n transforms = data['transform']\n else:\n transforms = ['none']\n labels = data['labels']\n if isinstance(labels, np.ndarray):\n labels = [labels]\n if 'labels_cov' in data.keys():\n label_cov = data['labels_cov']\n label_index = np.stack((data['instance_set_numbers'], data['instance_ic_numbers'])).T\n del data\n\n # match components and labels\n print('Matching components and labels...')\n temp = self.__match_indices(label_index.astype(np.int), features[0][:, :2].astype(np.int))\n label2component = dict(zip(*temp))\n del temp\n # match feature-sets\n print('Matching features...')\n feature_inds = self.__match_indices(*[x[:, :2].astype(np.int) for x in features])\n\n # check which labels are not kept\n print('Rearanging components and labels...')\n kept_labels = [x for x, y in label2component.iteritems() if y in feature_inds[0]]\n dropped_labels = [x for x, y in label2component.iteritems() if y not in feature_inds[0]]\n\n # for each label, pick a new component that is kept (if any)\n ind_n_data_points = [x for x, y in enumerate(feature_labels[0]) if y == 'number of data points'][0]\n for ind in dropped_labels:\n group = duplicate_order[label2component[ind]]\n candidate_components = np.intersect1d(group2indices[group], feature_inds[0])\n # if more than one choice, pick the one from the dataset with the most samples unless one from this\n # group has already been found\n if len(candidate_components) >= 1:\n if len(candidate_components) == 1:\n new_index = features[0][candidate_components, :2]\n else:\n new_index = features[0][candidate_components[features[0][candidate_components,\n ind_n_data_points].argmax()], :2]\n if not (new_index == label_index[dropped_labels]).all(1).any() \\\n and not any([(x == label_index[kept_labels]).all(1).any()\n for x in features[0][candidate_components, :2]]):\n label_index[ind] = new_index\n del label2component, kept_labels, dropped_labels, duplicate_order\n\n # feature labels (change with features)\n psd_lims = np.where(np.char.startswith(feature_labels[0], 'psd'))[0][[0, -1]]\n feature_labels = np.concatenate((feature_labels[0][:psd_lims[0]],\n feature_labels[0][psd_lims[1] + 1:],\n feature_labels[1][2:],\n feature_labels[2][2:]))\n\n # combine features, keeping only components with all features\n print('Combining feature-sets...')\n\n def index_features(data, new_index):\n return np.concatenate((data[0][feature_inds[0][new_index], :psd_lims[0]].astype(np.float32),\n data[0][feature_inds[0][new_index], psd_lims[1] + 1:].astype(np.float32),\n data[1][feature_inds[1][new_index], 2:].astype(np.float32),\n data[2][feature_inds[2][new_index], 2:].astype(np.float32)),\n axis=1)\n\n # rematch with labels\n print('Rematching components and labels...')\n ind_labeled_labels, ind_labeled_features = self.__match_indices(\n label_index.astype(np.int),features[0][feature_inds[0], :2].astype(np.int))\n del label_index\n\n # find topomap duplicates\n _, duplicate_order = np.unique(features[0][feature_inds[0], 2:742].astype(np.float32), return_inverse=True,\n axis=0)\n do_sortind = np.argsort(duplicate_order)\n do_sorted = duplicate_order[do_sortind]\n do_indices = np.where(np.diff(np.concatenate(([-1], do_sorted))))[0]\n group2indices = [do_sortind[do_indices[x]:do_indices[x + 1]] for x in range(0, duplicate_order.max())]\n\n # aggregate data\n dataset = dict()\n try:\n dataset['transform'] = transforms\n except UnboundLocalError:\n pass\n if self.label_type == 'database':\n dataset['labeled_labels'] = [dblabels[x] for x in np.where(ind_labeled_labels)[0]]\n else:\n dataset['labeled_labels'] = [x[ind_labeled_labels, :] for x in labels]\n if 'label_cov' in locals():\n dataset['labeled_label_covariances'] = [x[ind_labeled_labels, :].astype(np.float32)\n for x in label_cov]\n dataset['labeled_features'] = index_features(features, ind_labeled_features)\n\n # find equivalent datasets with most samples\n unlabeled_groups = [x for it, x in enumerate(group2indices)\n if not np.intersect1d(x, ind_labeled_features).size]\n ndata = features[0][feature_inds[0]][:, ind_n_data_points]\n ind_unique_unlabled = [x[ndata[x].argmax()] for x in unlabeled_groups]\n dataset['unlabeled_features'] = index_features(features, ind_unique_unlabled)\n\n # close h5py pscorr file and clean workspace\n del features, group2indices\n try:\n del labels\n except NameError:\n del dblabels\n if 'label_cov' in locals():\n del label_cov\n\n # remove inf columns\n print('Cleaning data of infs...')\n inf_col = [ind for ind, x in enumerate(feature_labels) if x == 'SASICA snr'][0]\n feature_labels = np.delete(feature_labels, inf_col)\n dataset['unlabeled_features'] = np.delete(dataset['unlabeled_features'], inf_col, axis=1)\n dataset['labeled_features'] = np.delete(dataset['labeled_features'], inf_col, axis=1)\n\n # remove nan total_rows\n print('Cleaning data of nans...')\n # unlabeled\n unlabeled_not_nan_inf_index = np.logical_not(\n np.logical_or(np.isnan(dataset['unlabeled_features']).any(axis=1),\n np.isinf(dataset['unlabeled_features']).any(axis=1)))\n dataset['unlabeled_features'] = \\\n dataset['unlabeled_features'][unlabeled_not_nan_inf_index, :]\n # labeled\n labeled_not_nan_inf_index = np.logical_not(np.logical_or(np.isnan(dataset['labeled_features']).any(axis=1),\n np.isinf(dataset['labeled_features']).any(axis=1)))\n dataset['labeled_features'] = dataset['labeled_features'][labeled_not_nan_inf_index, :]\n if self.label_type == 'database':\n dataset['labeled_labels'] = [dataset['labeled_labels'][x]\n for x in np.where(labeled_not_nan_inf_index)[0]]\n else:\n dataset['labeled_labels'] = [x[labeled_not_nan_inf_index, :] for x in dataset['labeled_labels']]\n if 'labeled_label_covariances' in dataset.keys():\n dataset['labeled_label_covariances'] = [x[labeled_not_nan_inf_index, :, :]\n for x in dataset['labeled_label_covariances']]\n if not self.unique:\n dataset['unlabeled_duplicates'] = dataset['unlabeled_duplicates'][unlabeled_not_nan_inf_index]\n dataset['labeled_duplicates'] = dataset['labeled_duplicates'][labeled_not_nan_inf_index]\n\n # save feature labels (names, e.g. psd)\n dataset['feature_labels'] = feature_labels\n\n # save the results\n print('Saving aggregated dataset...')\n joblib.dump(dataset, join(self.datapath, 'cache', processed_file_name), 0)\n\n # print time\n total = time() - start\n print('Time to load: ' + strftime(\"%H:%M:%S\", gmtime(total)) +\n ':' + np.mod(total, 1).astype(str)[2:5] + '\\t(HH:MM:SS:sss)')\n\n return dataset", "title": "" }, { "docid": "72979cb4febdf46025f18cc4868a2904", "score": "0.58455724", "text": "def load_data(self):\n self._load_data(self.file_location)", "title": "" }, { "docid": "096c6180eaa87b7914b77447a9f852b2", "score": "0.58425766", "text": "def _transform_data(self) -> None:\n raise NotImplementedError", "title": "" }, { "docid": "7dd0c05d5c6dbd75100734646d93a737", "score": "0.5841228", "text": "def load(self, data):\n self.last_epoch = data[\"current_epoch_index\"]\n self.hyperparam_value = data[\"hyperparam_value\"]\n self.metric_values = data[\"metric_values\"]\n self.current_patient = data[\"current_patient\"]", "title": "" }, { "docid": "48bb60500db64b02a73ced3c5cfd3111", "score": "0.58397883", "text": "def prepare_data(self):\n test_transform = Compose([\n Normalize(self.hparams.dataset_mean, self.hparams.dataset_std),\n ToTensor()\n ])\n # apply data augmentation only on training set\n train_transform = Compose([\n RandomInPlaneRotation(pi / 6),\n RandomFlip(0),\n # RandomFlip(1),\n RandomFlip(2),\n Normalize(self.hparams.dataset_mean, self.hparams.dataset_std),\n RandomNoise(.05),\n ToTensor(),\n #lambda x: torch.randn(1, 50, 50, 50)\n ])\n full_dataset = RadcureDataset(self.hparams.root_directory,\n self.hparams.clinical_data_path,\n self.hparams.patch_size,\n train=True,\n transform=train_transform,\n cache_dir=self.hparams.cache_dir,\n num_workers=self.hparams.num_workers)\n test_dataset = RadcureDataset(self.hparams.root_directory,\n self.hparams.clinical_data_path,\n self.hparams.patch_size,\n train=False,#set back to False at test phase\n transform=test_transform,\n cache_dir=self.hparams.cache_dir,\n num_workers=self.hparams.num_workers)\n\n # make sure the validation set is balanced\n val_size = floor(.1 / .7 * len(full_dataset)) # use 10% of all data for validation\n full_indices = range(len(full_dataset))\n full_targets = full_dataset.clinical_data[\"target_binary\"]\n train_indices, val_indices = train_test_split(full_indices, test_size=val_size, stratify=full_targets)\n train_dataset, val_dataset = Subset(full_dataset, train_indices), Subset(full_dataset, val_indices)\n val_dataset.dataset = copy(full_dataset)\n val_dataset.dataset.transform = test_transform\n self.pos_weight = torch.tensor(len(full_targets) / full_targets.sum())\n\n self.train_dataset = train_dataset\n self.val_dataset = val_dataset\n self.test_dataset = test_dataset", "title": "" }, { "docid": "35b72758cfdc7228298bcba8931b92f3", "score": "0.58396935", "text": "def _load_data(self):\n keys, examples = self._input_reader.read_input(\n self._data_path,\n self._config.batch_size,\n randomize_input=self._model.is_training,\n distort_inputs=self._model.is_training)\n\n self._observations = examples[\"decoded_observation\"]\n self._labels = examples[\"decoded_label\"]", "title": "" }, { "docid": "d6902512c39aeba69f7e882e388a9189", "score": "0.58345693", "text": "def load_nessie_data(self):\n pass", "title": "" }, { "docid": "cc104dd831426dbe776e190a06041953", "score": "0.5833998", "text": "def process_data(self):\n work_done = {'load':False, 'weather':False, 'ml':False}\n\n if self.status[\"Load\"] == Status.NOT_LOADED:\n return MissingDataError(\"Load data not loaded !\")\n\n # Load Preprocessor \n if self.status[\"Load\"] == Status.LOADED:\n demand_preprocessor, y_var = self.__get_preprocessor(self.settings[\"Data\"][\"location\"])\n error = demand_preprocessor.load(self.loaders['Load'].get_data(), y_var)\n self.__attach_load_preprocessor(demand_preprocessor, error)\n \n self.preprocessors[\"Load\"].fix_data(max_error_width=3, threshold=(1.5,0.99), **self.settings[\"Load\"][\"Preprocessor\"])\n self._load = self.preprocessors[\"Load\"].get_data()\n \n work_done['load'] = True\n self.status[\"Load\"] = Status.READY\n\n # Weather Preprocessor\n if self.status[\"Weather\"] == Status.NOT_LOADED and self.verbose:\n warnings.warn(\"Weather data not loaded.\")\n self.settings[\"Data\"][\"weather_data\"] = False\n\n elif self.status[\"Weather\"] == Status.LOADED:\n self.__set_countries_to_keep(y_var)\n self.settings[\"Weather\"][\"Preprocessor\"][\"resample\"] = str(self.preprocessors[\"Load\"].settlement_period_minutes) + \" min\"\n self.preprocessors[\"Weather\"] = pr.NCDC_Preprocessor(self.loaders['Weather'], year_range=self.settings[\"Data\"][\"year_range\"])\n self.preprocessors[\"Weather\"].fix_data(**self.settings[\"Weather\"][\"Preprocessor\"]) \n work_done['weather'] = True\n self.settings[\"Data\"][\"weather_data\"] = True # Duplicate of status[\"Weather\"] but only self.settings is backed-up.\n self.status[\"Weather\"] = Status.READY\n\n # ML_Preprocessor\n if work_done['load'] or work_done['weather'] or self.status[\"ML\"] == Status.NOT_LOADED:\n self.preprocessors['ML'] = mlp.ML_Preprocessor(load=self._load, weather_preprocessor=self.preprocessors[\"Weather\"], y_vars=['TS'])\n if self.settings[\"ML_Preprocessor\"][\"load_difference\"]:\n self.preprocessors[\"ML\"].transform_load_difference()\n \n self.preprocessors[\"ML\"].add_historical_ts(**self.settings[\"ML_Preprocessor\"][\"historical_ts\"])\n self.preprocessors[\"ML\"].encode_time(time_encoding=self.settings[\"ML_Preprocessor\"][\"time_encoding\"])\n\n # TODO : Should be done from the ML_Preprocessor 'encode_time' method directly.\n if self.settings[\"ML_Preprocessor\"][\"time_encoding\"] == 'categorical':\n self.preprocessors['ML'].encode_vars(cat_vars=['Month', 'Day', 'Hour', 'Minute', 'DoW'])\n \n # Data normalization\n self.sc_X, self.sc_y = self.preprocessors['ML'].std_vars(stdz=self.settings[\"ML_Preprocessor\"][\"stdz\"])\n work_done['ML'] = True\n self.status[\"ML\"] = Status.READY\n\n # If any change to the data was done : Regenerate the cross-validation splits\n if any(v==True for v in work_done.values()):\n self.splits[\"raw\"] = self.preprocessors['ML'].get_cv_splits(self.settings[\"ML_Preprocessor\"][\"train_test\"])\n \n # Identify how many clusters exist for each weather variable.\n data_columns = self.splits['raw'][0][0][0].columns.tolist()\n for var in self.settings[\"Data\"][\"weather_variables\"]:\n self.settings[\"Data\"][\"weather_variables\"][var] = sum([1 for x in data_columns if x.startswith(var)])", "title": "" }, { "docid": "f043b9c2422408496edcb15c61f90aa3", "score": "0.5819679", "text": "def load_prepared_data(self) -> Vocabulary:\n with open(os.path.join(self.data_dir, self.config[\"data\"][\"formated_file\"]),\n encoding=\"utf-8\") as f:\n lines: List[str] = f.read().strip().split('\\n')\n\n pairs: Pairs = [\n [preprocessing.basic_preprocessing(s) for s in line.split('\\t')]\n for line in lines\n ]\n vocabulary: Vocabulary = Vocabulary(pairs, self.config)\n return vocabulary", "title": "" }, { "docid": "07871ac53021aeee60a343a16d20c1c0", "score": "0.5815294", "text": "def load(self):\n self.collector.reset()\n self.data.read()", "title": "" }, { "docid": "a91f9eb5dd15befe0829e1ba94022007", "score": "0.58132225", "text": "def prep_data(settings, obsDatasetList, gridBox, modelList):\n \n # TODO: Stop the object Deserialization and work on refactoring the core code here\n cachedir = settings.cacheDir\n workdir = settings.workDir\n\n # Use list comprehensions to deconstruct obsDatasetList\n # ['TRMM_pr_mon', 'CRU3.1_pr'] Basically a list of Dataset NAME +'_' + parameter name - THE 'CRU*' one triggers unit conversion issues later\n # the plan here is to use the obsDatasetList which contains a longName key we can use.\n obsList = [str(x['longname']) for x in obsDatasetList]\n # Also using the obsDatasetList with a key of ['dataset_id']\n obsDatasetId = [str(x['dataset_id']) for x in obsDatasetList]\n # obsDatasetList ['paramter_id'] list\n obsParameterId = [str(x['parameter_id']) for x in obsDatasetList]\n obsTimestep = [str(x['timestep']) for x in obsDatasetList]\n mdlList = [model.filename for model in modelList]\n\n # Since all of the model objects in the modelList have the same Varnames and Precip Flag, I am going to merely \n # pull this from modelList[0] for now\n modelVarName = modelList[0].varName\n precipFlag = modelList[0].precipFlag\n modelTimeVarName = modelList[0].timeVariable\n modelLatVarName = modelList[0].latVariable\n modelLonVarName = modelList[0].lonVariable\n regridOption = settings.spatialGrid\n timeRegridOption = settings.temporalGrid\n \n \"\"\"\n Routine to read-in and re-grid both obs and mdl datasets.\n Processes both single and multiple files of obs and mdl or combinations in a general way.\n i) retrieve observations from the database\n ii) load in model data\n iii) spatial regridding\n iv) temporal regridding\n v) area-averaging\n Input:\n cachedir \t- string describing directory path\n workdir \t- string describing directory path\n obsList - string describing the observation data files\n obsDatasetId \t- int, db dataset id\n obsParameterId\t- int, db parameter id \n latMin, latMax, lonMin, lonMax, dLat, dLon, naLats, naLons: define the evaluation/analysis domain/grid system\n \t latMin\t\t- float\n latMax\t\t- float\n lonMin\t\t- float\n lonMax\t\t- float\n dLat \t\t- float\n dLon \t\t- float\n naLats\t\t- integer\n naLons\t\t- integer\n mdlList\t- string describing model file name + path\n modelVarName\t- string describing name of variable to evaluate (as written in model file)\n \t precipFlag\t- bool (is this precipitation data? True/False)\n modelTimeVarName - string describing name of time variable in model file \t\n modelLatVarName - string describing name of latitude variable in model file \n modelLonVarName - string describing name of longitude variable in model file \n regridOption \t - string: 'obs'|'model'|'user'\n timeRegridOption -string: 'full'|'annual'|'monthly'|'daily'\n maskOption - Boolean\n \n # TODO: This isn't true in the current codebase.\n Instead the SubRegion's are being used. You can see that these values are not\n being used in the code, at least they are not being passed in from the function\n \n maskLatMin - float (only used if maskOption=1)\n maskLatMax - float (only used if maskOption=1)\n \t maskLonMin - float (only used if maskOption=1)\n maskLonMax - float (only used if maskOption=1)\n Output: image files of plots + possibly data\n Jinwon Kim, 7/11/2012\n \"\"\"\n\n\n # check the number of obs & model data files\n numOBSs = len(obsList)\n numMDLs = len(mdlList)\n \n # assign parameters that must be preserved throughout the process\n \n print 'start & end time = ', settings.startDate, settings.endDate\n yymm0 = settings.startDate.strftime(\"%Y%m\")\n yymm1 = settings.endDate.strftime(\"%Y%m\")\n print 'start & end eval period = ', yymm0, yymm1\n\n\n\n #TODO: Wrap in try except blocks instead\n if numMDLs < 1: \n print 'No input model data file. EXIT'\n sys.exit()\n if numOBSs < 1: \n print 'No input observation data file. EXIT'\n sys.exit()\n\n ## Part 0: Setup the regrid variables based on the regridOption given\n\n # preparation for spatial re-gridding: define the size of horizontal array of the target interpolation grid system (ngrdX and ngrdY)\n print 'regridOption in prep_data= ', regridOption\n if regridOption == 'model':\n ifile = mdlList[0]\n typeF = 'nc'\n lats, lons, mTimes = files.read_data_from_one_file(ifile, modelVarName, \n modelLatVarName, \n modelLonVarName, \n modelTimeVarName, \n typeF)[:3]\n modelObject = modelList[0]\n latMin = modelObject.latMin\n latMax = modelObject.latMax\n lonMin = modelObject.lonMin\n lonMax = modelObject.lonMax\n elif regridOption == 'user':\n # Use the GridBox Object\n latMin = gridBox.latMin\n latMax = gridBox.latMax\n lonMin = gridBox.lonMin\n lonMax = gridBox.lonMax\n naLats = gridBox.latCount\n naLons = gridBox.lonCount\n dLat = gridBox.latStep\n dLon = gridBox.lonStep\n lat = np.arange(naLats) * dLat + latMin\n lon = np.arange(naLons) * dLon + lonMin\n lons, lats = np.meshgrid(lon, lat)\n lon = 0.\n lat = 0.\n else:\n print \"INVALID REGRID OPTION USED\"\n sys.exit()\n \n ngrdY = lats.shape[0]\n ngrdX = lats.shape[1]\n\n regObsData = []\n \n \n \n ## Part 1: retrieve observation data from the database and regrid them\n ## NB. automatically uses local cache if already retrieved.\n \n for n in np.arange(numOBSs):\n # spatial regridding\n oLats, oLons, _, oTimes, oData = db.extractData(obsDatasetId[n],\n obsParameterId[n],\n latMin, latMax,\n lonMin, lonMax,\n settings.startDate, settings.endDate,\n cachedir, obsTimestep[n])\n \n # TODO: modify this if block with new metadata usage.\n if precipFlag == True and obsList[n][0:3] == 'CRU':\n oData = 86400.0 * oData\n\n nstOBSs = oData.shape[0] # note that the length of obs data can vary for different obs intervals (e.g., daily vs. monthly)\n print 'Regrid OBS dataset onto the ', regridOption, ' grid system: ngrdY, ngrdX, nstOBSs= ', ngrdY, ngrdX, nstOBSs\n print 'For dataset: %s' % obsList[n]\n \n tmpOBS = ma.zeros((nstOBSs, ngrdY, ngrdX))\n \n print 'tmpOBS shape = ', tmpOBS.shape\n \n # OBS SPATIAL REGRIDING \n for t in np.arange(nstOBSs):\n tmpOBS[t, :, :] = process.do_regrid(oData[t, :, :], oLats, oLons, lats, lons)\n \n # TODO: Not sure this is needed with Python Garbage Collector\n # The used memory should be freed when the objects are no longer referenced. If this continues to be an issue we may need to look\n # at using generators where possible.\n oLats = 0.0\n oLons = 0.0 # release the memory occupied by the temporary variables oLats and oLons.\n \n # OBS TEMPORAL REGRIDING\n oData, newObsTimes = process.calc_average_on_new_time_unit_K(tmpOBS, oTimes, unit=timeRegridOption)\n\n tmpOBS = 0.0\n \n # check the consistency of temporally regridded obs data\n if n == 0:\n oldObsTimes = newObsTimes\n else:\n if oldObsTimes != newObsTimes:\n print 'temporally regridded obs data time levels do not match at ', n - 1, n\n print '%s Time through Loop' % (n + 1)\n print 'oldObsTimes Count: %s' % len(oldObsTimes)\n print 'newObsTimes Count: %s' % len(newObsTimes)\n # TODO: We need to handle these cases using Try Except Blocks or insert a sys.exit if appropriate\n sys.exit()\n else:\n oldObsTimes = newObsTimes\n # if everything's fine, append the spatially and temporally regridded data in the obs data array (obsData)\n regObsData.append(oData)\n\n\n \"\"\" all obs datasets have been read-in and regridded. convert the regridded obs data from 'list' to 'array'\n also finalize 'obsTimes', the time coordinate values of the regridded obs data.\n NOTE: using 'list_to_array' assigns values to the missing points; this has become a problem in handling the CRU data.\n this problem disappears by using 'ma.array'.\"\"\"\n\n obsData = ma.array(regObsData)\n obsTimes = newObsTimes\n\n # compute the simple multi-obs ensemble if multiple obs are used\n if numOBSs > 1:\n ensemble = np.mean(regObsData, axis=0)\n regObsData.append(ensemble)\n numOBSs = len(regObsData)\n obsList.append('ENS-OBS')\n obsData = ma.array(regObsData) # Cast to a masked array\n\n\n ## Part 2: load in and regrid model data from file(s)\n ## NOTE: the two parameters, numMDLs and numMOmx are defined to represent the number of models (w/ all 240 mos) &\n ## the total number of months, respectively, in later multi-model calculations.\n\n typeF = 'nc'\n regridMdlData = []\n # extract the model names and store them in the list 'mdlList'\n mdlName = []\n mdlListReversed=[]\n if len(mdlList) >1:\n for element in mdlList:\n mdlListReversed.append(element[::-1])\n prefix=os.path.commonprefix(mdlList)\n postfix=os.path.commonprefix(mdlListReversed)[::-1]\n for element in mdlList:\n mdlName.append(element.replace(prefix,'').replace(postfix,''))\n else:\n mdlName.append('model') \n\n \n for n in np.arange(numMDLs):\n # read model grid info, then model data\n ifile = mdlList[n]\n print 'ifile= ', ifile\n modelLats, modelLons, mTimes, mdlDat, mvUnit = files.read_data_from_one_file(ifile, \n modelVarName,\n modelLatVarName,\n modelLonVarName,\n modelTimeVarName, \n typeF)\n mdlT = []\n mStep = len(mTimes)\n\n for i in np.arange(mStep):\n mdlT.append(mTimes[i].strftime(\"%Y%m\"))\n\n wh = (np.array(mdlT) >= yymm0) & (np.array(mdlT) <= yymm1)\n modelTimes = list(np.array(mTimes)[wh])\n mData = mdlDat[wh, :, :]\n \n # determine the dimension size from the model time and latitude data.\n nT = len(modelTimes)\n\n print ' The shape of model data to be processed= ', mData.shape, ' for the period ', min(modelTimes), max(modelTimes)\n # spatial regridding of the modl data\n tmpMDL = ma.zeros((nT, ngrdY, ngrdX))\n\n if regridOption != 'model':\n for t in np.arange(nT):\n tmpMDL[t, :, :] = process.do_regrid(mData[t, :, :], modelLats, modelLons, lats, lons)\n else:\n tmpMDL = mData\n\n # temporally regrid the model data\n mData, newMdlTimes = process.regrid_in_time(tmpMDL, modelTimes, unit=timeRegridOption)\n tmpMDL = 0.0\n \n # check data consistency for all models \n if n == 0:\n oldMdlTimes = newMdlTimes\n else:\n if oldMdlTimes != newMdlTimes:\n print 'temporally regridded mdl data time levels do not match at ', n - 1, n\n print len(oldMdlTimes), len(newMdlTimes)\n sys.exit()\n else:\n oldMdlTimes = newMdlTimes\n\n # if everything's fine, append the spatially and temporally regridded data in the obs data array (obsData)\n regridMdlData.append(mData)\n\n modelData = ma.array(regridMdlData)\n modelTimes = newMdlTimes\n\n if (precipFlag == True) & (mvUnit == 'KG M-2 S-1'):\n print 'convert model variable unit from mm/s to mm/day'\n modelData = 86400.*modelData\n \n # check consistency between the time levels of the model and obs data\n # this is the final update of time levels: 'Times' and 'nT'\n if obsTimes != modelTimes:\n print 'time levels of the obs and model data are not consistent. EXIT'\n print 'obsTimes'\n print obsTimes\n print 'modelTimes'\n print modelTimes\n sys.exit()\n # 'Times = modelTimes = obsTimes' has been established and modelTimes and obsTimes will not be used hereafter. (de-allocated)\n Times = modelTimes\n nT = len(modelTimes)\n modelTimes = 0\n obsTimes = 0\n\n print 'Reading and regridding model data are completed'\n print 'numMDLs, modelData.shape= ', numMDLs, modelData.shape\n\n # TODO: Commented out until I can talk with Jinwon about this\n # compute the simple multi-model ensemble if multiple models are evaluated\n if numMDLs > 1:\n model_ensemble = np.mean(regridMdlData, axis=0)\n regridMdlData.append(model_ensemble)\n numMDLs = len(regridMdlData)\n modelData = ma.array(regridMdlData)\n mdlName.append('ENS-MODEL')\n print 'Eval mdl-mean timeseries for the obs periods: modelData.shape= ',modelData.shape\n # GOODALE: This ensemble code should be refactored into process.py module since it's purpose is data processing\n\n # Processing complete\n print 'data_prep is completed: both obs and mdl data are re-gridded to a common analysis grid'\n return numOBSs, numMDLs, nT, ngrdY, ngrdX, Times, lons, lats, obsData, modelData, obsList, mdlName", "title": "" }, { "docid": "9104b3bb4ff4edff496d8cf3637ab592", "score": "0.58099765", "text": "def load_training_data(self):\n if self.data_train is not None:\n return \n self.data_train, self.ngraphs_train = self.load_data('train')", "title": "" }, { "docid": "5c6845302e1d8d1df5d2d3d3ad26c956", "score": "0.5803218", "text": "def preprocess_all_data(self):\n all_files = self._create_filename_list(SIM_DATA_PATH)\n np.random.shuffle(all_files)\n\n train_val_test_files = self._split_datafiles(all_files) # train_set, val_set, test_set\n self.train_files = train_val_test_files[0]\n self.val_files = train_val_test_files[1]\n self.test_files = train_val_test_files[2]\n\n # Report sizes and load all datasets\n self.logger.info('Train set size: %d', len(self.train_files))\n self.logger.info('Validation set size: %d', len(self.val_files))\n self.logger.info('Test set size: %d', len(self.test_files))\n self._load_all_datasets()", "title": "" }, { "docid": "4190e2ac9543ffdf86b659c2380156b9", "score": "0.57983017", "text": "def _form_data(self, source_file):\n df = self._load_data(source_file)\n norm_cols = ['sigma0', 'normalizedVariance']\n norm_cols += ['s' + str(idx) for idx in range(20)]\n for col in norm_cols:\n s_scaler = joblib.load(f'./models/scalerModels/{col}.scl')\n norm_col = np.float64(pd.Series(s_scaler.transform(df[col].values.reshape(-1, 1)).squeeze()))\n df[col] = norm_col\n\n # Lastly, normalize incidence angle column\n df['idx'] = range(1, len(df)+1)\n md1 = df[df['incidenceAngleMode'] == 0]\n md2 = df[df['incidenceAngleMode'] != 0]\n subsets = [md1.copy(), md2.copy()]\n\n for idx, s_df in enumerate(subsets):\n s_scaler = joblib.load(f'./models/scalerModels/incidenceAngle{idx}.scl')\n norm_col = np.float64(pd.Series(s_scaler.transform(s_df['incidenceAngle'].values.reshape(-1, 1)).squeeze()))\n s_df['incidenceAngle'] = norm_col\n subsets[idx] = s_df.copy()\n\n out_df = pd.concat(subsets, ignore_index=True)\n out_df.sort_values(by=['idx'], inplace=True)\n out_df['dt'] = 0\n out_df['dx'] = 0\n\n out_df.drop(['idx'], axis=1, inplace=True)\n\n return out_df", "title": "" }, { "docid": "0cdc30b7eb855992b1e7591d3efbc850", "score": "0.57927144", "text": "def load_data(path_dataset):\n data = read_txt(path_dataset)[1:]\n return preprocess_data(data)", "title": "" }, { "docid": "f86350e422a34543a9da3bb06eb0c17c", "score": "0.5764217", "text": "def load_data():\n with open(FLAGS.source_data, 'rb') as f:\n data_x = pickle.load(f)\n data_y = pickle.load(f)\n test_x = pickle.load(f)\n test_y = pickle.load(f)\n dev_x = pickle.load(f)\n dev_y = pickle.load(f)\n texts = pickle.load(f)\n # id2word = pickle.load(f)\n word2id = pickle.load(f)\n tag2id = pickle.load(f)\n id2tag = pickle.load(f)\n test_weight = pickle.load(f)\n train_weight = pickle.load(f)\n dev_weight = pickle.load(f)\n train_sentence_len = pickle.load(f)\n test_sentence_len = pickle.load(f)\n dev_sentence_len = pickle.load(f)\n return data_x, data_y, test_x, test_y, dev_x, dev_y, texts, word2id, tag2id, id2tag, test_weight, train_weight, dev_weight, \\\n train_sentence_len, test_sentence_len, dev_sentence_len", "title": "" }, { "docid": "82e1f84fbeeb4dcc4f49f3bd48e9ef0a", "score": "0.57631624", "text": "def _load_test_data(self):\n if self._all_data is None:\n self._load_all_data()\n test_index = int(self.training_fraction * len(self._all_data[0]))\n self._test_data = self._all_data[0][:test_index], self._all_data[1][:test_index]\n super()._load_test_data()", "title": "" }, { "docid": "7fd99894e7c4e335db9418518413e684", "score": "0.5761422", "text": "def prepare_data(data_dir, low_frequency):\n # date_path = data_dir + '/'\n\n # Create vocabularies.\n input_vocab_path = os.path.join(data_dir + '/input.vocab.%d' % low_frequency)\n label_vocab_path = os.path.join(data_dir + '/tag.vocab')\n create_vocabulary(input_vocab_path, data_dir + '/input', low_frequency)\n create_label_vocabulary(label_vocab_path, data_dir + '/tag')\n\n\n # Convert data into token ids.\n train_input_ids_path = data_dir + '/input.ids'\n train_label_ids_path = data_dir + '/tag.ids'\n # test_input_ids_path = data_dir + '/test_input.ids'\n # test_label_ids_path = data_dir + '/test_label.ids'\n data_to_token_ids(data_dir + '/input', train_input_ids_path, input_vocab_path)\n data_to_token_ids(data_dir + '/tag', train_label_ids_path, label_vocab_path)\n # data_to_token_ids(data_dir + '/test.input', test_input_ids_path, input_vocab_path)\n # data_to_token_ids(data_dir + '/test.label', test_label_ids_path, label_vocab_path)\n \n return input_vocab_path, label_vocab_path, train_input_ids_path, train_label_ids_path", "title": "" }, { "docid": "d1733e5e6c908e55e6c57e4be755ac7a", "score": "0.57513416", "text": "def _from_restored(self):\n self.logger.info('Loading data from restored')\n self._load_restored()", "title": "" }, { "docid": "d5a69bac90fb3a1c14b2fa97526a698c", "score": "0.57391477", "text": "def __load_data_from_file(self):\r\n\r\n with open(self.__data_file, 'rb') as f:\r\n # self.__features, self.__targets, self.__indices = pickle.load(f)\r\n self.__features, self.__targets, self.__columns, self.__norm_params = pickle.load(f)", "title": "" }, { "docid": "bf2cc995364ca86dc2fb4e4cb682af3f", "score": "0.57386607", "text": "def read(self):\n if not self.bertam:\n dataset = self._dpeter()\n else:\n dataset = self._bertam()\n\n if not self.dataset:\n self.dataset = dict()\n\n for partition in self.partitions:\n self.dataset[partition] = {'dt': [], 'gt': []}\n\n for partition in self.partitions:\n self.dataset[partition]['dt'] += dataset[partition]['dt']\n self.dataset[partition]['gt'] += dataset[partition]['gt']\n\n self.preprocess_partitions()", "title": "" }, { "docid": "5dfe1b424dcbf51f3b2cbecaa30161c3", "score": "0.57361513", "text": "def _collect_unaug_data():\n\torig_data_dict = {}\n\tnum_samples = {}\n\tvoi_df = drm.get_voi_dfs()[0]\n\t#voi_df = voi_df[voi_df[\"run_num\"] <= C.test_run_num]\n\tpatient_info_df = pd.read_csv(C.patient_info_path)\n\tpatient_info_df[\"AccNum\"] = patient_info_df[\"AccNum\"].astype(str)\n\n\tfor cls in C.classes_to_include:\n\t\tx = np.empty((10000, C.dims[0], C.dims[1], C.dims[2], C.nb_channels))\n\t\tz = []\n\n\t\tif C.dual_img_inputs:\n\t\t\tx2 = np.empty((10000, *C.context_dims, C.nb_channels))\n\t\telif C.non_imaging_inputs:\n\t\t\tx2 = np.empty((10000, C.num_non_image_inputs))\n\n\t\tfor index, lesion_id in enumerate(voi_df[voi_df[\"cls\"] == cls].index):\n\t\t\timg_path = os.path.join(C.orig_dir, cls, lesion_id+\".npy\")\n\t\t\ttry:\n\t\t\t\tx[index] = np.load(img_path)\n\t\t\t\tif C.hard_scale:\n\t\t\t\t\tx[index] = vm.scale_intensity(x[index], 1, max_int=2)#, keep_min=True)\n\t\t\texcept:\n\t\t\t\traise ValueError(img_path + \" not found\")\n\t\t\tz.append(lesion_id)\n\t\t\t\n\t\t\tif C.dual_img_inputs:\n\t\t\t\ttmp = np.load(os.path.join(C.crops_dir, cls, lesion_id+\".npy\"))\n\t\t\t\tx2[index] = tr.rescale_img(tmp, C.context_dims)[0]\n\n\t\t\telif C.non_imaging_inputs:\n\t\t\t\tvoi_row = voi_df.loc[lesion_id]\n\t\t\t\tpatient_row = patient_info_df[patient_info_df[\"AccNum\"] == voi_row[\"acc_num\"]]\n\t\t\t\tx2[index] = get_non_img_inputs(voi_row, patient_row)\n\n\t\tx.resize((index+1, C.dims[0], C.dims[1], C.dims[2], C.nb_channels)) #shrink first dimension to fit\n\t\tif C.dual_img_inputs or C.non_imaging_inputs:\n\t\t\tx2.resize((index+1, *x2.shape[1:]))\n\t\t\torig_data_dict[cls] = [x, x2, np.array(z)]\n\t\telse:\n\t\t\torig_data_dict[cls] = [x, np.array(z)]\n\n\t\tnum_samples[cls] = index + 1\n\t\t\n\treturn orig_data_dict, num_samples", "title": "" }, { "docid": "392ba098a975eb87a197b6899bb0b0c8", "score": "0.57358766", "text": "def load_data(self):\n import shapefile\n data_path = os.path.join(self.data_path, self.dataset_name)\n shp = shapefile.Reader(data_path)\n\n self.fields = fields = [\n f[0] for f in shp.fields if not isinstance(f, tuple)]\n self.shapes = list(shp.shapes())\n self.records = records = list(shp.records())\n\n assert len(records)\n assert len(records[0]) == len(fields)", "title": "" }, { "docid": "b868efca0b1b5786560ce6a2e2a76ba8", "score": "0.57336974", "text": "def load_data(self):\n # Load and preprocess data\n sentences, y_tag, y_sentiment = self.load_data_and_labels()\n sentences_padded, sequence_length = self.pad_sentences(sentences)\n vocabulary, vocabulary_inv = self.build_vocab(sentences_padded)\n embedding_dict= self.load_glove_embeddings(self.filepath_glove)\n embedding_mat = self.build_embedding_matrix(embedding_dict, vocabulary)\n x, y_tag, y_sentiment = self.build_input_data(sentences_padded, y_tag, y_sentiment, vocabulary)\n return [x, y_tag, y_sentiment, vocabulary, vocabulary_inv, sequence_length, embedding_mat]", "title": "" }, { "docid": "738e44e67ac22b227f4a7605c3a6e32e", "score": "0.5722735", "text": "def prepare_data(path, one_hot = True, binary = False, normalize = True):\n\n # Load data into pandas dataframe\n data = pd.read_csv(path)\n\n # Drop ID column\n data = data.drop(['id'], axis=1)\n\n # Make a mask for groups with stroke and non-stroke\n stroke = data['stroke'] == 1\n not_stroke = data['stroke'] == 0\n\n # Get the mean BMI for the stroke and non-stroke group\n means_bmi = data.groupby(\"stroke\")[\"bmi\"].mean()\n\n # Use these masks and the bmi per group to fill the NA data with the mean\n # bmi for that group\n data[stroke] = data[stroke].fillna(means_bmi[1], axis=1)\n data[not_stroke] = data[not_stroke].fillna(means_bmi[0], axis=1)\n\n # Drop the 'other' sample for gender because there is only one sample and\n # we could not one-hot encode gender otherwise\n data = data[data.gender != 'Other']\n\n #Create columns with binary values if the binary argument is true\n if binary:\n\n # Replace columns with two categories with binaries\n data = data.replace({'Male': 1, 'Female': 0, 'Urban': 1, 'Rural': 0,\n 'Yes': 1, 'No': 0})\n\n # Change categorical columns to one-hot encoded columns\n if one_hot:\n\n # One hot encode the data with the function\n data = one_hot_encode(data)\n\n # Clean the column names of uppercase letters and spaces\n data.columns = data.columns.str.lower().str.replace(' ','_')\n\n # Normalize the numerical data\n if normalize:\n\n # Create a list with column names of numerical data as we only want\n # to normalize these\n numeric_data = ['age', 'bmi', 'avg_glucose_level']\n\n # Normalize the numeric values using the zscore\n data[numeric_data] = data[numeric_data].apply(zscore)\n\n return data", "title": "" }, { "docid": "bee97bcf402ae695816078c9b50fd94a", "score": "0.57216984", "text": "def load_data():\n # Load and preprocess data\n sentences, labels = load_data_and_labels()\n sentences_padded = pad_sentences(sentences)\n vocabulary, vocabulary_inv = build_vocab(sentences_padded)\n x, y = build_input_data(sentences_padded, labels, vocabulary)\n return [x, y, vocabulary, vocabulary_inv]", "title": "" }, { "docid": "d9de03611f22663b4281ee3d1d811a63", "score": "0.5719672", "text": "def prepare_data(self):\n print(\"starting preapare data..........\")\n print(\"the output file path is: {}\".format(self.output_path))\n\n info=self.gen_pair_dic()\n self.save_pair_to_txt(copy.deepcopy(info))\n print(\"data preprocessing finished\")", "title": "" }, { "docid": "84c7deeab8b8d321c58c6bb6a4795697", "score": "0.5712413", "text": "def _load_file(self):\n self.data = pd.read_csv(self.input_filepath)", "title": "" }, { "docid": "ccbf8f4baf99297860f2e8ebaad1deed", "score": "0.5706504", "text": "def pre_processing(self):\n self.encoding()\n self.normalise()\n self.save_data()\n\n return self.X_df, self.y_df", "title": "" }, { "docid": "9155cd9971dd708fb9085d1178613617", "score": "0.5701846", "text": "def loadData(analyzer):\n\n loadEvents(analyzer)\n loadHashtags(analyzer)\n loadSentiments(analyzer)\n return", "title": "" }, { "docid": "e823503fbdc78b14f15f813e603df519", "score": "0.57018226", "text": "def transform(self, data):", "title": "" }, { "docid": "ce26b6459789150405b94d93188d0cfe", "score": "0.5700221", "text": "def data_preprocessing(messages, categories):\n clean_df = clean_categories(categories)\n df = merge_datasets(messages, clean_df)\n df.iloc[:,4:] = df.iloc[:,4:].astype('int64')\n df.drop(['related','child_alone'], axis=1, inplace=True)\n return df", "title": "" }, { "docid": "e2a2dbc2bd418e564f0d81d8797c283e", "score": "0.56936836", "text": "def load_data(name):\n if name in ['comm_violence', 'community_crime', 'facebook']:\n df = pd.read_csv(join(DATA_DIR, name + '.data')).replace('?', 0)\n X, y = df.iloc[:, :-1], df.iloc[:, -1]\n\n elif name in ['coepra1', 'coepra2', 'coepra3']:\n df = pd.read_csv(join(DATA_DIR, name + '.data')).replace('?', 0)\n X, y = df.iloc[:, df.columns != 'Prop_001'], df['Prop_001']\n\n elif name in ['residential']:\n df = pd.read_csv(join(DATA_DIR, name + '.data')).replace('?', 0)\n X, y = df.drop(['V-9', 'V-10'], axis=1), df['V-9']\n\n elif name == 'imports-85':\n df = pd.read_csv(join(DATA_DIR, name + '.data'), header=None).replace('?', 0)\n X, y = df.iloc[:, :-1], df.iloc[:, -1]\n\n else:\n raise ValueError(\"%s not a valid data set\" % name)\n\n # Convert object dtypes to float\n if 'object' in X.dtypes.values:\n for col in X.select_dtypes(['object']).columns:\n try:\n X[col] = X[col].astype(float)\n except:\n X[col] = LabelEncoder().fit_transform(X[col].astype(str))\n\n # Cast to float before returning\n return X.astype(float).values, y.astype(float).values", "title": "" }, { "docid": "3b0019d3d427cf7d869256ec7cfa0a52", "score": "0.56920326", "text": "def _load_all_data(self, data_dir, field_sep):\n inter_feat_path = os.path.join(data_dir, self.config['inter_feat_name'])\n self.inter_feat = self._load_feat(inter_feat_path, self.config['inter_feat_header'], field_sep, self.config['inter_feat_field'])\n self.inter_feat = self.inter_feat.dropna(how='any')\n if self.frating is None:\n self.frating = 'rating'\n self.inter_feat.insert(0, self.frating, 1)\n self.field2type[self.frating] = 'float'\n self.field2maxlen[self.frating] = 1\n self.user_feat = None\n if self.config['user_feat_name'] is not None:\n user_feat = []\n for _, user_feat_col in zip(self.config['user_feat_name'], self.config['user_feat_field']):\n user_feat_path = os.path.join(data_dir, _)\n user_f = self._load_feat(user_feat_path, self.config['user_feat_header'], field_sep, user_feat_col)\n user_f.set_index(self.fuid, inplace=True)\n user_feat.append(user_f)\n self.user_feat = pd.concat(user_feat, axis=1)\n self.user_feat.reset_index(inplace=True)\n self._fill_nan(self.user_feat)\n self.item_feat = None\n if self.config['item_feat_name'] is not None:\n item_feat = []\n for _, item_feat_col in zip(self.config['item_feat_name'], self.config['item_feat_field']):\n item_feat_path = os.path.join(data_dir, _)\n item_f = self._load_feat(item_feat_path, self.config['item_feat_header'], field_sep, item_feat_col)\n item_f.set_index(self.fiid, inplace=True)\n item_feat.append(item_f)\n self.item_feat = pd.concat(item_feat, axis=1)\n self.item_feat.reset_index(inplace=True)\n self._fill_nan(self.item_feat)\n if self.config['network_feat_name'] is not None:\n self.network_feat = [None] * len(self.config['network_feat_name'])\n self.node_link = [None] * len(self.config['network_feat_name'])\n self.node_relink = [None] * len(self.config['network_feat_name'])\n self.mapped_fields = [[(field.split(':')[0] if field != None else field) for field in fields] for fields in self.config['mapped_feat_field']]\n for i, (name, fields) in enumerate(zip(self.config['network_feat_name'], self.config['network_feat_field'])):\n if len(name) == 2:\n net_name, link_name = name\n net_field, link_field = fields\n link = self._load_feat(os.path.join(data_dir, link_name), self.config['network_feat_header'][i][1], field_sep, link_field, update_dict=False).to_numpy()\n self.node_link[i] = dict(link)\n self.node_relink[i] = dict(link[:, [1, 0]])\n feat = self._load_feat(os.path.join(data_dir, net_name), self.config['network_feat_header'][i][0], field_sep, net_field)\n for j, col in enumerate(feat.columns):\n if self.mapped_fields[i][j] != None:\n feat[col] = [(self.node_relink[i][id] if id in self.node_relink[i] else id) for id in feat[col]]\n self.network_feat[i] = feat\n else:\n net_name, net_field = name[0], fields[0]\n self.network_feat[i] = self._load_feat(os.path.join(data_dir, net_name), self.config['network_feat_header'][i][0], field_sep, net_field)", "title": "" }, { "docid": "e07cfd3f9dd9527c7dd25754821c5874", "score": "0.5691594", "text": "def load_data(self):\n print(\"Loading data...\")\n with open(self.train_in_file, 'r') as train_in:\n train_vectors = np.loadtxt(train_in)\n with open(self.train_out_file, 'r') as train_out:\n train_output = np.loadtxt(train_out)\n\n if self.test_in_file and self.test_out_file:\n with open(self.test_in_file, 'r') as test_in:\n test_vectors = np.loadtxt(test_in)\n with open(self.test_out_file, 'r') as test_out:\n test_output = np.loadtxt(test_out)\n else:\n test_vectors = None\n test_output = None\n\n if self.s_shaped is True or self.s_shaped is False:\n # I.e. \"s_shaped\" is not \"None\" and therefore the data will be\n # prepared for a CNN.\n train_vectors, test_vectors = self.reshape_data(train_vectors,\n test_vectors)\n\n print(\"- Done.\")\n return train_vectors, train_output, test_vectors, test_output", "title": "" }, { "docid": "76056d0f3606d434b26259e4d585e4ca", "score": "0.56902", "text": "def preprocess(self):\n if self.params.STANDARDIZE_DATA:\n self.X_train, self.X_validation = self.preprocessing.standardize(\n self.X_train, self.X_validation)\n if self.params.NORMALIZE_DATA:\n self.X_train, self.X_validation = self.preprocessing.normalize(\n self.X_train, self.X_validation)\n if self.params.PCA_DATA:\n self.X_train, self.X_validation = self.preprocessing.PCA(\n self.X_train, self.X_validation)\n if self.params.POLY_DATA:\n self.X_train, self.X_validation = self.preprocessing.polynomial(\n self.X_train, self.X_validation)\n if self.params.POWER_DATA:\n self.X_train, self.X_validation = self.preprocessing.power_transform_data(\n self.X_train, self.X_validation)\n if self.sampler is not None:\n try:\n self.resample()\n except AttributeError:\n pass", "title": "" }, { "docid": "2f440fd6b5dacc56253bc0aa104bff97", "score": "0.56889707", "text": "def load():\n filepath = dirname(abspath(__file__))\n##### EDIT THE FOLLOWING TO POINT TO DatasetName.csv #####\n data = recfromtxt(open(filepath + '/randhie.csv',\"rb\"), delimiter=\",\",\n names=True, dtype=float)\n names = list(data.dtype.names)\n endog = array(data[names[0]]).astype(float)\n endog_name = names[0]\n exog = data[list(names[1:])]\n exog_name = names[1:]\n dataset = Dataset(data=data, names=names, endog=endog, exog=exog,\n endog_name = endog_name, exog_name=exog_name)\n return dataset", "title": "" }, { "docid": "0f0150174d765f8a85d247af91d49dc4", "score": "0.5683159", "text": "def process(self):\n data_list = list()\n self.seqs = list()\n for file in self.raw_file_names:\n m = re.search(r'(\\d{3}_)?PF\\d{5}_\\d+', file)\n if m is None:\n continue # skip files with no sequences\n aln = osp.join(self.alns_dir, m.group(0) + '.fasta')\n # print(aln)\n # alternative features. Must be eliminated soon\n # sequences, new_tree_path = fastml_asr(file, aln, osp.join(self.root, 'fml_output'))\n tree = Phylo.read(file, 'newick')\n with open(aln) as fin:\n seqs = SeqIO.parse(fin, 'fasta')\n seq_dict = {seq.name: seq.seq for seq in seqs}\n features, ei, el, y, lud = to_coo(tree, self.target_tree, seq_dict)\n self.seqs.append((aln, lud)) # stores sequences of the corresponding Data objects\n\n data_list.append(Data(x=features, edge_index=ei, edge_attr=el, y=y, filename=file))\n\n if self.pre_filter is not None:\n data_list = [data for data in data_list if self.pre_filter(data)]\n\n if self.pre_transform is not None:\n data_list = [self.pre_transform(data) for data in data_list]\n\n self.data, self.slices = self.collate(data_list) # to internal format\n torch.save((self.data, self.slices), self.processed_paths[0])\n with open(self.processed_paths[1], 'wb') as fout:\n pickle.dump(self.seqs, fout)", "title": "" }, { "docid": "1a8f92a7d4ef08b8324af51f1f8eef07", "score": "0.5683028", "text": "def process_data(self):", "title": "" }, { "docid": "a618d888dc5c50274c62fbce50796468", "score": "0.56760615", "text": "def load_dict(self):\n print(\"\")\n print(\"Start preprocessing all in one...\")\n with open(self.data_file, 'r') as json_file:\n data = json.load(json_file)\n print(\"...done preprocessing.\")\n print(\"\")\n return data", "title": "" }, { "docid": "724cd2da0087c18e79d7a54b1ddc4084", "score": "0.5675452", "text": "def prepare_for_use(self):\n if self.transform:\n print(\"Preparing non-transformed dataset...\")\n self.X, self.y, self.n_classes = load_wos(version=self.version)\n else:\n print(\"Preparing transformed dataset...\")\n self.X, self.y, self.n_classes = torch.load(\n TRANSFORMED_DATASETS[self.dataset_idx]\n )\n if self.test_split:\n self.X, self.X_test, self.y, self.y_test = train_test_split(\n self.X, self.y, test_size=0.2\n )\n self.n_samples = len(self.y)\n self.sample_idx = 0\n self.current_sample_x = None\n self.current_sample_y = None\n self.current_seq_lengths = None", "title": "" } ]
8c7e27b74fb3c9a67ed9ef8289982f49
Convert a string into a slug, such as what is used for entity ids.
[ { "docid": "6e4d7635c0970f0453063ba140efb493", "score": "0.6548574", "text": "def slugify(value, separator=\"_\"):\n return slugify_util(value, separator=separator)", "title": "" } ]
[ { "docid": "ea58bb1b6b657f83082acb78fd78913c", "score": "0.84102273", "text": "def to_slug(string):\n return string.replace(' ', '-').lower()", "title": "" }, { "docid": "1eb8f3ccb75563fd56cdd52615dfeb18", "score": "0.82447875", "text": "def create_slug(string):\n output = string.strip().lower().replace(' ', '-')\n output = re.sub(r'-+', '-', output)\n output = re.sub(r'[^-A-Za-z0-9_]+', '', output)\n return output", "title": "" }, { "docid": "29a3dac341d1e6e6645ea081f3ced35f", "score": "0.8060671", "text": "def slugify(string):\n\n slug = unicodedata.normalize('NFKD', unicode(string))\n slug = slug.encode('ascii', 'ignore').lower()\n slug = re.sub(r'[^a-z0-9]+', '_', slug).strip('_')\n slug = re.sub(r'-+', '-', slug)\n\n return slug", "title": "" }, { "docid": "8ddd0e0923ce9d83c5e25725fe30f2b8", "score": "0.77522105", "text": "def slugize(slug):\r\n assert name_pattern.match(slug) is not None\r\n slug = slug.lower()\r\n for c in (' ', ',', '.', '_'):\r\n slug = slug.replace(c, '-')\r\n while '--' in slug:\r\n slug = slug.replace('--', '-')\r\n slug = slug.strip('-')\r\n return slug", "title": "" }, { "docid": "468c091864926c000e1a32d10f7cdbbe", "score": "0.7704633", "text": "def gen_slug(s):\n new_slug = django_slugify(''.join(alphabet.get(w, w) for w in s.lower()), allow_unicode=True)\n return new_slug", "title": "" }, { "docid": "99864302e50878869677614cf4dfce70", "score": "0.75536823", "text": "def slugify(s):\n return import_dotted_path(settings.SLUGIFY)(s)", "title": "" }, { "docid": "8189fbbd2601a0a716c23b9e49726c6b", "score": "0.7486175", "text": "def slugify(inString):\n value = unicode(inString)\n\n import unicodedata\n value = unicodedata.normalize('NFKD', value)\n value = unicode(re.sub('[-\\s]+', '-', value))\n\n return value", "title": "" }, { "docid": "c0d0178b9fd64232daf6e8bc49987f47", "score": "0.7363451", "text": "def sluggify(string_to_slugify, item_id, item_coll_name):\n possible_slug = Slug.collection().find_one({\n \"coll_name\": item_coll_name,\n \"item_id\": item_id\n })\n if possible_slug is not None:\n return possible_slug[\"name\"]\n\n base_slugged_name = unidecode.unidecode(string_to_slugify).lower().replace(' ','-')\n\n #ensure that this is unique\n i = 1\n slugged_name = base_slugged_name\n while get_slug_w_attr(slugged_name, item_coll_name):\n slugged_name = \"%s-%d\" % (base_slugged_name, i)\n i += 1\n\n #lets save it so we can retrieve it l8r on\n s = Slug()\n s.coll_name = item_coll_name\n s.item_id = item_id\n s.name = slugged_name\n s.save()\n\n return slugged_name", "title": "" }, { "docid": "df8e4e404a2be6d45588763bae479244", "score": "0.7306249", "text": "def _slugify(value: str) -> str:\n # import unicodedata\n # value = unicodedata.normalize('NFKD', value).encode('ascii', 'ignore')\n # value = re.sub('[^\\w\\s-]', '', value).strip().lower()\n # value = re.sub('[-\\s]+', '-', value)\n return value", "title": "" }, { "docid": "40fbaf6157f75f139fe8054c355ee64e", "score": "0.7305739", "text": "def slugify(value):\n value = str(value)\n value = re.sub(r\"[^\\w\\s-]\", \"\", value).strip().lower()\n return re.sub(r\"[-\\s]+\", \"-\", value)", "title": "" }, { "docid": "ca3d2c8d7cc3e8a1bcf1c9c2f7468288", "score": "0.727568", "text": "def slugify(s, max_length=None):\n s = ustr(s)\n if slugify_lib:\n # There are 2 different libraries only python-slugify is supported\n try:\n return slugify_lib.slugify(s, max_length=max_length)\n except TypeError:\n pass\n uni = unicodedata.normalize('NFKD', s).encode('ascii', 'ignore').decode('ascii')\n slug = re.sub('[\\W_]', ' ', uni).strip().lower()\n slug = re.sub('[-\\s]+', '-', slug)\n\n return slug[:max_length]", "title": "" }, { "docid": "b0af4c76e1dec7648f9398c5aa1bd7c3", "score": "0.72679514", "text": "def slug(self):\n slug = unicodedata.normalize('NFKD', self['title']).encode('ascii', 'ignore')\n slug = slug.lower().replace(' ', '_')\n slug = re.sub(r'[^a-z0-9_]', '', slug)\n return slug", "title": "" }, { "docid": "f63759a8240bde9c57bcf15435d45f87", "score": "0.7253083", "text": "def slugId():\n return slugid.nice()", "title": "" }, { "docid": "f43fed2b4b439112643a5fd7983c6258", "score": "0.72389674", "text": "def slugify(value):\n value = unicodedata.normalize('NFKD', value).encode('ascii', 'ignore').decode('ascii')\n value = re.sub(r'[^\\w\\s-]', '', value).strip().lower()\n return re.sub(r'[-\\s]+', '_', value)", "title": "" }, { "docid": "91ac0abd22b76f5685583157bf2e4d04", "score": "0.7222285", "text": "def get_slug(text: str) -> str:\n return quote_plus(normalize(\"NFKD\", text)).replace(\"+\", \"_\").casefold()", "title": "" }, { "docid": "1ee7053e125b3b788189f7999d1e48e0", "score": "0.7219612", "text": "def slugify(value):\n value = unicode(value)\n value = unicodedata.normalize('NFKD', value).encode('ascii', 'ignore')\n value = unicode(re.sub('[^\\w\\s-]', '', value).strip().lower())\n return re.sub('[-\\s]+', '-', value)", "title": "" }, { "docid": "24a06038f82b84c84ea217d68c717920", "score": "0.7198493", "text": "def slug(text):\n return re.sub(r'\\W+', '-', unidecode(text).lower().strip()).strip('-')", "title": "" }, { "docid": "e6c4118815ae898e4cf88c070de1e0a1", "score": "0.7162107", "text": "def slugify(value):\n import unicodedata\n if sys.version_info < (3, 0):\n value = str(value).decode()\n value = unicodedata.normalize('NFKD', value).encode('ascii', 'ignore').decode()\n value = re.sub('[^\\w\\s-]', '', value).strip().lower()\n\n return re.sub('[-\\s]+', '-', value)", "title": "" }, { "docid": "7899bd05da09d686306f5c2dd20696d5", "score": "0.71608794", "text": "def url_to_slug(url):\n \n url = get_path(url.decode('utf-8', 'ignore'))\n url = re_html.sub('', url).strip().lower()\n url = re_slug.sub(r'-', url)\n url = re_slug_end.sub('', url)\n\n if url.startswith('-'):\n url = url[1:]\n elif url.endswith('-'):\n url = url[-1]\n\n # remove starting slugs for\n # sections\n for path in GOOD_PATHS:\n slug = \"%s-\" % path\n if url.startswith(slug):\n url = url.replace(slug, '')\n\n return url.strip()", "title": "" }, { "docid": "73738bd24bd1c77f245a1c890afba80c", "score": "0.7156237", "text": "def slugify(value):\n value = str(value)\n\n value = unicodedata.normalize('NFKD', value).encode('ascii', 'ignore').decode('ascii')\n value = re.sub(r'[^\\w\\s-]', '', value).strip().lower()\n return re.sub(r'[-\\s]+', '-', value)", "title": "" }, { "docid": "e34f69f195ef9743a9c5ee72c9517429", "score": "0.7134269", "text": "def slugify(value):\n import re\n import unicodedata\n from six import text_type\n value = text_type(value)\n value = unicodedata.normalize('NFKD', value).encode('ascii', 'ignore').decode('utf8')\n value = re.sub(r'[^\\w\\s-]', '', value).strip().lower()\n value = re.sub(r'[-\\s]+', '_', value)\n return value", "title": "" }, { "docid": "b2cb1cb5ffa58cca90a62cb63fd19406", "score": "0.71176183", "text": "def slugify(value):\n import unicodedata\n value = unicodedata.normalize('NFKD', value)\n value = unicode(re.sub('[^\\w\\s-]', '', value).strip())\n return re.sub('[-\\s]+', '-', value)", "title": "" }, { "docid": "e8e7140c100b3135f873295815c7b4f6", "score": "0.7101699", "text": "def slugify(value):\n import unicodedata\n import re\n value = str(unicodedata.normalize('NFKD', value))#.encode('ascii', 'ignore'))\n value = re.sub('[^\\w\\s-]', '', value).strip().lower()\n value = re.sub('[-\\s]+', '-', value)\n return value", "title": "" }, { "docid": "9ee5d0ff8aa349ed1acd6ec7f8d489cc", "score": "0.708062", "text": "def slugify(value):\n import unicodedata\n value = unicodedata.normalize('NFKD', value).encode('ascii', 'ignore')\n value = unicode(re.sub('[^\\w\\s-]', '', value).strip().lower())\n return (re.sub('[-\\s]+', '-', value))", "title": "" }, { "docid": "f77fbf804b92cdfdf7e1879bf6efa300", "score": "0.7067411", "text": "def slugify(value):\n value = str(value)\n value = (\n unicodedata.normalize(\"NFKD\", value).encode(\"ascii\", \"ignore\").decode(\"ascii\")\n )\n value = re.sub(r\"[^\\w\\s-]\", \"\", value).strip().lower()\n return re.sub(r\"[-\\s]+\", \"-\", value)", "title": "" }, { "docid": "90f92edeb1ee03bf2d8ed6d0a16560d3", "score": "0.7063539", "text": "def slugify(val: str) -> str:\n despace = re.compile(r\"(?<!_)\\s+(?!_)\") # spaces not surrounded by underscores\n underscores = re.compile(r\"__+\") # multiple underscores\n nonslug = re.compile(r\"[^a-zA-Z0-9_\\-]\") # not letters, numbers, underscores, hyphens\n\n val = despace.sub(\"_\", val)\n val = nonslug.sub(\"\", val)\n val = underscores.sub(\"_\", val) # replace multiple underscores by a single one\n val = val.strip(\"_\") # remove leading and trailing underscores\n\n return val", "title": "" }, { "docid": "3812ad59815874353b4926a08c0ac5fa", "score": "0.70606685", "text": "def slugify(value):\n import re\n value = unicodedata.normalize('NFKD', value).encode('ascii', 'ignore').decode('ascii')\n value = re.sub(r'\\.+', '-', value)\n value = re.sub(r'[^\\w\\s-]', '', value).strip().lower()\n return re.sub(r'[-\\s]+', '-', value)", "title": "" }, { "docid": "13cbc5cb1fa7622e070424a6bda44db1", "score": "0.70574164", "text": "def slugify(value):\n import re\n import unicodedata\n value = str(value)\n value = unicodedata.normalize('NFKD', value).encode('ascii', 'ignore').decode('utf8').strip().lower()\n value = re.sub(r'[^\\w\\s\\-\\.]', '', value)\n value = re.sub(r'[-\\s]+', '-', value)\n return value", "title": "" }, { "docid": "33573bb88f581cf88ce0839f95a173cd", "score": "0.7051772", "text": "def slug(self):\n return slugify(self.name, max_length=25)", "title": "" }, { "docid": "00b94538cb330fc308d7f829b41d0399", "score": "0.7048197", "text": "def slugify(value):\n value = unicodedata.normalize('NFKD', str(value)).encode(\n 'ascii', 'ignore').decode('ascii')\n value = re.sub(r'[/:\\.]+', '-', value)\n value = re.sub(r'[^\\w\\s-]', '', value).strip().lower()\n value = re.sub(r'[-\\s]+', '-', value)\n return value", "title": "" }, { "docid": "8b1a5a425d6c87377e79dfd62b525a08", "score": "0.70476", "text": "def slugify(value):\n value = unicodedata.normalize('NFKD', value).encode('ascii', 'ignore')\n value = value.decode('ascii')\n value = re.sub('[^\\w\\s-]', '', value).strip().lower()\n return re.sub('[-\\s]+', '-', value)", "title": "" }, { "docid": "9327da1cabb86206e6d7f62583f1ea2b", "score": "0.70466787", "text": "def _slugify(value):\n value = unicodedata.normalize('NFKD', value).encode('ascii', 'ignore')\n value = re.sub(r'[^\\w\\s-]', '', value.decode('utf-8', 'ignore'))\n value = value.strip().lower()\n value = re.sub(r'[-\\s]+', '-', value)\n return value", "title": "" }, { "docid": "272554cffe00691d094a6e20b57ba4d0", "score": "0.7040396", "text": "def to_slug(value, incoming=None, errors=\"strict\"):\r\n value = safe_decode(value, incoming, errors)\r\n # NOTE(aababilov): no need to use safe_(encode|decode) here:\r\n # encodings are always \"ascii\", error handling is always \"ignore\"\r\n # and types are always known (first: unicode; second: str)\r\n value = unicodedata.normalize(\"NFKD\", value).encode(\r\n \"ascii\", \"ignore\").decode(\"ascii\")\r\n value = SLUGIFY_STRIP_RE.sub(\"\", value).strip().lower()\r\n return SLUGIFY_HYPHENATE_RE.sub(\"-\", value)", "title": "" }, { "docid": "591c87b0077d16e2c3be729cfd8a170b", "score": "0.70101255", "text": "def slugify(value):\n _slugify_strip_re = re.compile(r'[^\\w\\s-]')\n _slugify_hyphenate_re = re.compile(r'[-\\s]+')\n\n if not isinstance(value, unicode):\n value = unicode(value, errors='ignore')\n value = unicodedata.normalize('NFKD', value).encode('utf8', 'ignore')\n value = unicode(_slugify_strip_re.sub('', value).strip().lower())\n return _slugify_hyphenate_re.sub('-', value)", "title": "" }, { "docid": "faa603dc553da21c5e69d594a6ed00c2", "score": "0.7000664", "text": "def slugify(value):\n return value.replace(' ', '_')", "title": "" }, { "docid": "2a2d1b748b13af3c3611e4ecf0cae3ea", "score": "0.6994946", "text": "def slugify(value):\n ## import unicodedata\n ## value = unicodedata.normalize('NFKD', value).encode('ascii', 'ignore')\n value = re.sub('[^\\w\\s-]', '', value).strip()\n re.sub('[-\\s]+', '-', value)\n return(value)", "title": "" }, { "docid": "3e1f373d321f19a4f647efc389eaeb28", "score": "0.6970696", "text": "def slugify(value):\n import unicodedata\n value = str(unicodedata.normalize('NFKD', value).encode('ascii', 'ignore'))\n # Chomp the leading 'b\\'\n value = value[2:]\n value = str(re.sub(\"[^\\w\\s-]\", \"\", value).strip().lower())\n value = str(re.sub(\"[-\\s]+\", \"-\", value))\n return value", "title": "" }, { "docid": "5117197069c5e3f67338f643c5c975a9", "score": "0.6963973", "text": "def slugify(value):\n # import unicodedata\n # value = unicodedata.normalize('NFKD', value).encode('ascii', 'ignore')\n # value = unicode(re.sub('[^\\w\\s-]', '', value).strip().lower())\n # value = unicode(re.sub('[-\\s]+', '-', value))\n value = re.sub('[^\\w\\s-]', '', value).strip().lower()\n value = re.sub('[-\\s]+', '-', value)\n return value", "title": "" }, { "docid": "9757d3e884218cb95ca8d49356fde627", "score": "0.6961866", "text": "def slugify(value: str) -> str:\n value = str(value)\n value = (\n unicodedata.normalize(\"NFKD\", value).encode(\"ascii\", \"ignore\").decode(\"ascii\")\n )\n value = re.sub(r\"[^\\w\\.\\s-]\", \"\", value).strip()\n return mark_safe(re.sub(r\"[-\\.\\s]+\", \"-\", value))", "title": "" }, { "docid": "db26811275578c043bd482eef3311453", "score": "0.6943121", "text": "def slugify(value):\n value = (\n unicodedata.normalize(\"NFKD\", value).encode(\"ascii\", \"ignore\").decode(\"ascii\")\n )\n value = re.sub(r\"[^\\w\\s-]\", \"\", value).strip().lower()\n return re.sub(r\"[-\\s]+\", \"-\", value)", "title": "" }, { "docid": "e52f081d006308baf1c1d9814d1dc464", "score": "0.6934419", "text": "def slugify(value):\n import unicodedata\n value = unicodedata.normalize('NFKD', unicode(value)).encode('ascii', 'ignore')\n value = unicode(re.sub('[^\\w\\s-]', '', value).strip().lower())\n value = unicode(re.sub('[-\\s]+', '-', value))\n return value", "title": "" }, { "docid": "67a4ec9ece2780576cb0c8ac2364fa74", "score": "0.692242", "text": "def slugify(self, text):\n text = re.sub(r\"[-'_\\s]\", '_', text)\n text = re.sub(r\"_+\", '_', text).strip('_')\n pat = r\"([^,\\(]*)\\((.*?)\\)\" # Remove content within parentheses\n text = re.sub(pat, r'\\g<1>', text).strip()\n try:\n text = unicodedata.normalize('NFKD', text).encode('ascii',\n 'ignore')\n text = str(re.sub(r'[-\\s]+', ' ', text.decode('utf-8')))\n except UnicodeDecodeError:\n self._log.exception(\"Failing to normalize '{0}'\", text)\n return text", "title": "" }, { "docid": "0df5e9e2d907289aa32ce37c425c6105", "score": "0.6919032", "text": "def slugify(text):\n return re.sub(r'\\W', '-', text).lower()", "title": "" }, { "docid": "16aeff86eec012d325fa03e23117209c", "score": "0.6915377", "text": "def slugify(name: str) -> str:\n x = '[\\t !\"#$%&\\'()*\\-/<=>?@\\[\\\\\\]^_`{|},.:]+'\n string = \"\"\n for c in name:\n if c in x or ord(c) >= 128:\n string += \"-{ord:d}-\".format(ord=ord(c))\n else:\n string += c\n return string", "title": "" }, { "docid": "e42833ef891dbace7396ea73bf860a40", "score": "0.6876794", "text": "def slugify_unicode(s):\n chars = []\n for char in str(smart_text(s)):\n cat = unicodedata.category(char)[0]\n if cat in \"LN\" or char in \"-_~\":\n chars.append(char)\n elif cat == \"Z\":\n chars.append(\" \")\n return re.sub(r\"[-\\s]+\", \"-\", \"\".join(chars).strip()).lower()", "title": "" }, { "docid": "0afe007cd7bc3a5c07675f02d718056d", "score": "0.68594295", "text": "def make_file_name(s):\n # adapted from\n # https://docs.djangoproject.com/en/2.1/_modules/django/utils/text/#slugify\n import unicodedata, re\n s = unicodedata.normalize('NFKD', s).encode('ascii', 'ignore')\n s = s.decode()\n s = re.sub(r'[^\\w\\s-]', '', s).strip().lower()\n s = re.sub(r'[-\\s]+', '-', s)\n return s", "title": "" }, { "docid": "244377fb2ccf8dcafd748fb816c63fa2", "score": "0.67958814", "text": "def slugify_from_name(context):\n return slugify(context.current_parameters['name'])", "title": "" }, { "docid": "b3ca995aa2ddaf2e5baaac9faa551963", "score": "0.6794249", "text": "def slugify_user(name):\n slug = sluggable_settings.slugify(name)\n if slug.isdigit():\n return \"-\" + slug\n return slug", "title": "" }, { "docid": "811f41c096ef010194f8dbe358ae1b2d", "score": "0.6693201", "text": "def slug(self):\n pass", "title": "" }, { "docid": "c5f2ef93a4cdb8e429cd2597c4d73528", "score": "0.66848135", "text": "def slug(self):\n import unicodedata\n sin_acento = unicodedata.normalize('NFKD', unicode(self.titulo)).encode('ASCII', 'ignore')\n titulo = sin_acento.replace(' ', '-').strip().lower()\n return titulo", "title": "" }, { "docid": "3fc3051fd6e7f0c20c2c776772d8807f", "score": "0.6627607", "text": "def uuslug(s, instance, entities=True, decimal=True, hexadecimal=True,\n slug_field='slug', filter_dict=None, start_no=1, max_length=0,\n word_boundary=False, separator='-', save_order=False, stopwords=(),\n language_code=None):\n\n if isinstance(instance, ModelBase):\n raise Exception(\"Error: you must pass an instance to uuslug, not a model.\")\n\n queryset = instance.__class__.objects.all()\n if filter_dict:\n queryset = queryset.filter(**filter_dict)\n if instance.pk:\n queryset = queryset.exclude(pk=instance.pk)\n\n # The slug max_length cannot be bigger than the max length of the field\n if language_code:\n # To support django parler there's a little more work needed to get the field.\n try:\n meta = instance._parler_meta._get_extension_by_field(slug_field)\n slug_field_max_length = instance._get_translated_model(\n language_code, meta=meta\n )._meta.get_field(slug_field).max_length\n except AttributeError:\n raise Exception(\"Error: This instance is not from a Django Parler TranslatableModel. Remove language_code parameter and try again.\")\n\n else:\n slug_field_max_length = instance._meta.get_field(slug_field).max_length\n\n if not max_length or max_length > slug_field_max_length:\n max_length = slug_field_max_length\n\n slug = slugify(s, entities=entities, decimal=decimal, hexadecimal=hexadecimal,\n max_length=max_length, word_boundary=word_boundary, separator=separator,\n save_order=save_order, stopwords=stopwords)\n\n new_slug = slug\n counter = start_no\n\n def _new_slug(slug, separator, counter, max_length):\n if len(slug) + len(separator) + len(str(counter)) > max_length:\n slug = slug[:max_length - len(slug) - len(separator) - len(str(counter))]\n return \"{}{}{}\".format(slug, separator, counter)\n\n if language_code:\n # need to use translated queryset\n while queryset.translated(**{slug_field: new_slug}):\n new_slug = _new_slug(slug=slug, separator=separator, counter=counter, max_length=max_length)\n counter += 1\n else:\n while queryset.filter(**{slug_field: new_slug}):\n new_slug = _new_slug(slug=slug, separator=separator, counter=counter, max_length=max_length)\n counter += 1\n\n return new_slug", "title": "" }, { "docid": "9f8913c78f811f617e87a209a80a85a3", "score": "0.6627238", "text": "def slugify(value, allow_unicode=False):\n value = str(value)\n if allow_unicode:\n value = unicodedata.normalize(\"NFKC\", value)\n else:\n value = (\n unicodedata.normalize(\"NFKD\", value)\n .encode(\"ascii\", \"ignore\")\n .decode(\"ascii\")\n )\n value = re.sub(r\"[^\\w\\s-]\", \"\", value.lower())\n return re.sub(r\"[-\\s]+\", \"-\", value).strip(\"-_\")", "title": "" }, { "docid": "66f48718f3af65b52165157be54e2ec3", "score": "0.6621387", "text": "def unslug(s):\n m = _UNSLUG_RE.match(s)\n if not m:\n return None, None\n return m.group(1), int(m.group(2))", "title": "" }, { "docid": "462bd03785795a639c5f4bb9ead4c225", "score": "0.659121", "text": "def deslugify(name: str) -> str:\n for special_char in re.findall('(-([\\d]+)-)', name):\n name = name.replace(special_char[0], chr(int(special_char[1])))\n return name", "title": "" }, { "docid": "045e4c55fb9d7930f09034621443464e", "score": "0.6571965", "text": "def slugify(text, entities=True, decimal=True, hexadecimal=True, max_length=0,\n word_boundary=False, separator='-', save_order=False, stopwords=()):\n\n return smart_str(pyslugify(text, entities, decimal, hexadecimal, max_length,\n word_boundary, separator, save_order, stopwords))", "title": "" }, { "docid": "264677a3224c42f93b14a8d617c95ba7", "score": "0.6554725", "text": "def slug(self):\n slug = self.props.get('slug', None)\n if not slug:\n slug = safeslug(self.title)\n return slug", "title": "" }, { "docid": "6abe250cf58bac07f39f77c938ff57b3", "score": "0.654463", "text": "def slugify(self,value):\n self.value = unicode(value)\n self.value = unicodedata.normalize('NFKD', self.value).encode('ascii', 'ignore')\n self.value = unicode(re.sub('[^\\w\\s-]', '', self.value).strip().lower())\n self.value = unicode(re.sub('[-\\s]+', '-', self.value))\n return self.value", "title": "" }, { "docid": "aff36cf92e755fe8da6529d937d5b589", "score": "0.65197545", "text": "def sluggify(title):\n slugged = re.sub(r\"(\\s+)\", \"-\", title.lower())\n stripped = re.sub(r\"(\\..*)\", \"\", slugged)\n return stripped", "title": "" }, { "docid": "a70f72da3d83957432febc8d55771f87", "score": "0.6512801", "text": "def generate_slug(self):\n\n # Create the unique string that will be hashed\n # Multiple things are added so people can't reverse hash the id\n hashable_string = \"{0}{1}{2}{3}\".format(self.user_profile.user.id, self.created, self.number_of_tenants, self.id)\n\n # Create the md5 object\n m = hashlib.md5()\n\n # Add the string to the hash function\n m.update(hashable_string.encode('utf-8'))\n\n # Now return the has has the url\n return slugify(m.hexdigest())", "title": "" }, { "docid": "0ace07c9da008174961f3525a0360b52", "score": "0.64331985", "text": "def _clean_slug_value(value):\n return value.replace('.','-').replace('+','-')", "title": "" }, { "docid": "81b655b5d1d1422ef8606c115942ae11", "score": "0.64329165", "text": "def slugify(value, max_length=None, default=None):\r\n value = _unicode(value)\r\n s = unicodedata.normalize('NFKD', value).encode('ascii', 'ignore').lower()\r\n s = re.sub('-+', '-', re.sub('[^a-zA-Z0-9-]+', '-', s)).strip('-')\r\n if not s:\r\n return default\r\n\r\n if max_length:\r\n # Restrict length without breaking words.\r\n while len(s) > max_length:\r\n if s.find('-') == -1:\r\n s = s[:max_length]\r\n else:\r\n s = s.rsplit('-', 1)[0]\r\n\r\n return s", "title": "" }, { "docid": "272af85bc1e11a97b453b5b130fd1383", "score": "0.6402615", "text": "def test_slugify(self):\n slugify(\"\", \"\")", "title": "" }, { "docid": "e4d066db122399e28b090eae4fb304e8", "score": "0.63949955", "text": "def slug(self):\n return None if self.id is None else str(self.id)", "title": "" }, { "docid": "e247597bf3c3e0f919e4da82c9a322a9", "score": "0.638516", "text": "def get_slug_name(self) -> str:\n return \"_\".join(self.name.lower().split())", "title": "" }, { "docid": "3db54e65130fba0f870860784e917f76", "score": "0.63813305", "text": "def get_slugify(s, entities=False, decimal=False, hexadecimal=False, invalid=None,\r\n instance=None, manager=None, slug_field='slug', extra_lookup=None,\r\n lower_case=False):\r\n s = force_unicode(s)\r\n if entities:\r\n s = re.sub('&(%s);' % '|'.join(name2codepoint),\r\n lambda m: unichr(name2codepoint[m.group(1)]), s)\r\n if decimal:\r\n try:\r\n s = re.sub('&#(\\d+);',\r\n lambda m: unichr(int(m.group(1))), s)\r\n except ValueError:\r\n pass\r\n if hexadecimal:\r\n try:\r\n s = re.sub('&#x([\\da-fA-F]+);',\r\n lambda m: unichr(int(m.group(1), 16)), s)\r\n except ValueError:\r\n pass\r\n \r\n #translate\r\n s = unicodedata.normalize('NFKD', s).encode('ascii', 'ignore')\r\n #replace unwanted characters\r\n if lower_case:\r\n s = re.sub(r'[^-a-z0-9]+', '-', s.lower())\r\n else:\r\n s = re.sub(r'[^-A-Za-z0-9]+', '-', s)\r\n #remove redundant -\r\n s = re.sub('-{2,}', '-', s).strip('-')\r\n\r\n invalid = invalid or []\r\n if instance:\r\n lookup = extra_lookup or {}\r\n if not manager:\r\n manager = instance.__class__._default_manager\r\n \r\n slug, counter = s, 1\r\n while True:\r\n if slug in invalid:\r\n pass\r\n elif not instance:\r\n return slug\r\n else:\r\n lookup[slug_field] = slug\r\n qs = manager.filter(**lookup)\r\n if instance.pk:\r\n qs = qs.exclude(pk=instance.pk)\r\n if not qs.count():\r\n return slug\r\n slug = \"%s-%s\" % (s, counter)\r\n counter += 1", "title": "" }, { "docid": "9e429cee39c1febdcccc680448bcb5ed", "score": "0.63652635", "text": "def slugify(text):\n # https://leancrew.com/all-this/2014/10/asciifying/\n text = re.sub(\"[–—/:;,.]\", \"-\", text) # replace separating punctuation\n ascii_text = unidecode(text).lower() # best ASCII substitutions, lowercased\n ascii_text = re.sub(r\"[^a-z0-9 -]\", \"\", ascii_text) # delete any other characters\n ascii_text = ascii_text.replace(\" \", \"-\") # spaces to hyphens\n ascii_text = re.sub(r\"-+\", \"-\", ascii_text) # condense repeated hyphens\n return ascii_text", "title": "" }, { "docid": "9e429cee39c1febdcccc680448bcb5ed", "score": "0.63652635", "text": "def slugify(text):\n # https://leancrew.com/all-this/2014/10/asciifying/\n text = re.sub(\"[–—/:;,.]\", \"-\", text) # replace separating punctuation\n ascii_text = unidecode(text).lower() # best ASCII substitutions, lowercased\n ascii_text = re.sub(r\"[^a-z0-9 -]\", \"\", ascii_text) # delete any other characters\n ascii_text = ascii_text.replace(\" \", \"-\") # spaces to hyphens\n ascii_text = re.sub(r\"-+\", \"-\", ascii_text) # condense repeated hyphens\n return ascii_text", "title": "" }, { "docid": "11278c76e1aafda22543b05c0c1f7765", "score": "0.63634795", "text": "def slugify(input_text, max_length=150):\n if input_text == '':\n return input_text\n\n allow_unicode_slugs = getattr(settings, 'ALLOW_UNICODE_SLUGS', False)\n if isinstance(input_text, unicode) and not allow_unicode_slugs:\n input_text = unidecode(input_text)\n\n if isinstance(input_text, unicode):\n slug = unicode_slugify(input_text)\n else:\n slug = defaultfilters.slugify(input_text)\n while len(slug) > max_length:\n # try to shorten word by word until len(slug) <= max_length\n temp = slug[:slug.rfind('-')]\n if len(temp) > 0:\n slug = temp\n else:\n #we have nothing left, do not apply the last crop,\n #apply the cut-off directly\n slug = slug[:max_length]\n break\n return slug", "title": "" }, { "docid": "97b9592ad23187d77d14de1d300e288f", "score": "0.6326649", "text": "def slugify_filename(self):\n # import unicodedata\n # import re\n # self.filename = str(unicodedata.normalize('NFKD', self.filename))\n # self.filename = re.sub('[^\\w\\s-]', '', self.filename).strip()\n # self.filename = re.sub('[-\\s]+', '-', self.filename)\n self.filename = self.filename.replace(' ', '_')\n\n return self.filename", "title": "" }, { "docid": "78481321600f5e8f5eba92433938d3e8", "score": "0.63129497", "text": "def slugify_from_title(context):\n return slugify(context.current_parameters['title'])", "title": "" }, { "docid": "1047a1e2166c1f4a7516419ee2e914f6", "score": "0.62419236", "text": "def slug(self) -> str:\n return self._slug", "title": "" }, { "docid": "b014339abbca3484680e1c75bf860217", "score": "0.62275547", "text": "def slug(self, value: str):\n if not value:\n data = dict(id=self.id, title=self.title)\n value = generate_contextual_slug(data)\n self._slug = value", "title": "" }, { "docid": "273e3cb47e5f059cf343c395ff4fa70f", "score": "0.6226737", "text": "def hashslug(word):\n if not isinstance(word, unicode):\n word = word.decode('utf-8')\n return \"{}:{}\".format(\n slugify(word),\n hashlib.md5(word.encode('utf-8')).hexdigest()[:6]\n )", "title": "" }, { "docid": "229b95cffdabc59779a4e54c3aece3ab", "score": "0.6225536", "text": "def _generate_unique_slug() -> str:\n slug = secrets.token_urlsafe(15)\n if not Entries.query.filter_by(slug_link=slug).first():\n return slug\n return _generate_unique_slug()", "title": "" }, { "docid": "0551c7d5a2130777ae8ea515ce0ee701", "score": "0.6199829", "text": "def slugify(text):\n non_url_safe = ['\"', '#', '$', '%', '&', '+',\n ',', '/', ':', ';', '=', '?',\n '@', '[', '\\\\', ']', '^', '`',\n '{', '|', '}', '~', \"'\"]\n non_safe = [c for c in text if c in non_url_safe]\n if non_safe:\n for c in non_safe:\n text = text.replace(c, '')\n # Strip leading, trailing and multiple whitespace, convert remaining whitespace to _\n text = u'_'.join(text.split())\n return text.lower()", "title": "" }, { "docid": "c43e569fdde2b99509ba978e8d6f03f9", "score": "0.61951995", "text": "def create_title_slug(self):\n slug = slugify(self.title)\n new_slug = slug\n n = 1\n while ArticlesModel.objects.filter(slug=new_slug).exists():\n new_slug = '{}-{}'.format(slug, n)\n n += 1\n\n return new_slug", "title": "" }, { "docid": "cdf6641b8d936be61e48a38ed207acc5", "score": "0.6105683", "text": "def unique_slug(s,model,num_chars=50):\n\n slug = slugify(s)\n slug = slug[0:num_chars].strip('-') #prendo i primi num_chars caratteri togliendo gli eventuali '-' a inizio e fine\n while True:\n dup = model.objects.filter(slug=slug)\n if not dup:\n return slug\n slug = slug[:39] +'-' + random_string(10) #loop finché lo slug non è unico", "title": "" }, { "docid": "836af387c5cdf61eaa385661aff95c9f", "score": "0.60875386", "text": "def format_mapping_identifier(string):\n\n if not isinstance(string, str):\n helper_logger.log_warning(\n \"Error: mapping identifier is not a string {}\".format(string))\n return\n\n\n # create a working copy (and make it lowercase, while we're at it)\n s = string.lower()\n\n # remove leading and trailing whitespace\n s = s.strip()\n\n # Replace whitespace with underscores\n # Make spaces into underscores\n s = re.sub(r'\\s+', '_', s)\n\n return s", "title": "" }, { "docid": "488665d2bdd68b713ceeb1f62b175888", "score": "0.6052934", "text": "def _create_slug(self, title):\n self.slug = slugify(self.title)", "title": "" }, { "docid": "3ac2e3cf1564f1886c7db3343b38738b", "score": "0.6041467", "text": "def get_unique_slug(cls, title: str) -> str:\n unique_slug = slug = slugify(title)\n num = 1\n while cls.objects.filter(slug=unique_slug).exists():\n unique_slug = f\"{slug}-{num}\"\n num += 1\n return unique_slug", "title": "" }, { "docid": "3650d7a615ec9504190c6916ead9e302", "score": "0.6022097", "text": "def test_slug():\n slugify = fields.SlugField().adapt\n\n for input, expect in [\n ('01 HDR test', '01-hdr-test'),\n ('--&*$#(8$jjsdsd77-----test phrase12 123--', '8jjsdsd77-test-phrase12-123'),\n ('1234', '1234'),\n ('abcdEFG', 'abcdefg'),\n ]:\n assert slugify(input) == expect", "title": "" }, { "docid": "1907e3e8d1ffe534a11f00859e882c15", "score": "0.60158134", "text": "def make_id(string): # {{{\n id = string.lower()\n if not isinstance(id, unicode):\n id = id.decode()\n id = id.translate(_non_id_translate_digraphs)\n id = id.translate(_non_id_translate)\n # get rid of non-ascii characters.\n # 'ascii' lowercase to prevent problems with turkish locale.\n id = unicodedata.normalize('NFKD', id).\\\n encode('ascii', 'ignore').decode('ascii')\n # shrink runs of whitespace and replace by hyphen\n id = _non_id_chars.sub('-', ' '.join(id.split()))\n id = _non_id_at_ends.sub('', id)\n return str(id)", "title": "" }, { "docid": "28d4a887527c6d834d71dde8704bc437", "score": "0.601082", "text": "def test_slugify(self):\n from garage.slugify import slugify\n self._msg('test', 'slugify', first=True)\n\n txt = 'The Renaissance of Giselle “G” Töngi'\n expected = 'the-renaissance-of-giselle-g-tongi'\n result = slugify(txt)\n self._msg('text', txt)\n self._msg('result', result)\n self._msg('expected', expected)\n self.assertEqual(result, expected)\n\n txt = 'Apoyan resolución a favor de niños migrantes en LA'\n expected = 'apoyan-resolucion-a-favor-de-ninos-migrantes-en-la'\n result = slugify(txt)\n self._msg('text', txt)\n self._msg('result', result)\n self._msg('expected', expected)\n self.assertEqual(result, expected)\n\n txt = '“foo! écriture 寫作 #bar???”'\n expected = 'foo-ecriture-bar'\n result = slugify(txt)\n self._msg('text', txt)\n self._msg('result', result)\n self._msg('expected', expected)\n self.assertEqual(result, expected)\n\n txt = txt = 'Nín hǎo. Wǒ shì zhōng guó rén'\n expected = 'nin-hao-wo-shi-zhong-guo-ren'\n result = slugify(txt)\n self._msg('text', txt)\n self._msg('result', result)\n self._msg('expected', expected)\n self.assertEqual(result, expected)", "title": "" }, { "docid": "56580f460ad8eeb9b8c5b82c63b99081", "score": "0.60001695", "text": "def makeTemplateSlugUnique(template_model, slug):\n existing_slugs = db.session.query(template_model).filter(template_model.slug.contains(slug)).order_by(template_model.slug).all()\n # Split the slug so we can check if the last word is a number (count), if it is, we increment by 1 and add\n # it to the end of the slug\n if len(existing_slugs) != 0:\n slug_array = str(existing_slugs[len(existing_slugs) - 1].slug).split(\"-\")\n if slug_array[len(slug_array)-1].isdigit():\n slug = slug + \"-\" + str(int(slug_array[len(slug_array)-1]) + 1)\n else:\n slug = slug + \"-\" + \"1\"\n\n return slug", "title": "" }, { "docid": "dcde52e53ef24b20b537a964c830b7ec", "score": "0.5999104", "text": "def slugify():\n df = pipein()\n df.columns = [slugify_name(name) for name in df.columns]\n pipeout(df)", "title": "" }, { "docid": "79f7ce6d40a970302b701cd148e9e820", "score": "0.59949183", "text": "def generate_usable_slug(recipe):\n slug = slugify(recipe.name)\n\n # Reuse existing slug if we can\n if recipe.slug and recipe.slug == slug:\n return recipe.slug\n\n append = 0\n while True:\n count = Recipe.all()\\\n .filter('owner =', recipe.owner)\\\n .filter('slug =', slug)\\\n .count()\n\n if not count:\n break\n\n append += 1\n slug = slugify(recipe.name) + str(append)\n\n return slug", "title": "" }, { "docid": "8c1e5a50b89583dd81d3752f1d7d36c6", "score": "0.5994874", "text": "def slug(self, slug):\n if slug is not None and len(slug) > 128:\n raise ValueError(\"Invalid value for `slug`, length must be less than or equal to `128`\")\n if slug is not None and len(slug) < 1:\n raise ValueError(\"Invalid value for `slug`, length must be greater than or equal to `1`\")\n\n self._slug = slug", "title": "" }, { "docid": "baced453f93fcb8724eeb016f2376193", "score": "0.5994069", "text": "def generate_unique_slug(model_instance, slug_field_name):\n address = getattr(model_instance, 'address')\n title = getattr(model_instance, 'title')\n\n street = address.get('Street')\n\n if street is not None:\n slug_text = f'{street} {title}'\n else:\n slug_text = f'{title}'\n\n slug = slugify(slug_text)\n unique_slug = slug\n extension = 1\n ModelClass = model_instance.__class__\n\n while ModelClass._default_manager.filter(\n **{slug_field_name: unique_slug}\n ).exists():\n unique_slug = '{}-{}'.format(slug, extension)\n extension += 1\n\n return unique_slug", "title": "" }, { "docid": "35f6d6f566f8da284dd1d81c585e58a3", "score": "0.5983918", "text": "def slugify_file(filename):\n if \".\" in filename:\n name = \"\".join(filename.split('.')[:-1])\n extension = \".{0}\".format(filename.split('.')[-1].strip())\n else:\n name = filename\n extension = \"\"\n return \"{0}{1}\".format(slugify(name), extension)", "title": "" }, { "docid": "dc4e2c6ecb2993c1576c3e6d1229af77", "score": "0.5983344", "text": "def cloudsearch_to_django_id(s):\n return s.replace('__', '.')", "title": "" }, { "docid": "f927a29a9c2f351c4431e9af473586a1", "score": "0.5968814", "text": "def slug(self):\n return self._slug", "title": "" }, { "docid": "f927a29a9c2f351c4431e9af473586a1", "score": "0.5968814", "text": "def slug(self):\n return self._slug", "title": "" }, { "docid": "f927a29a9c2f351c4431e9af473586a1", "score": "0.5968814", "text": "def slug(self):\n return self._slug", "title": "" }, { "docid": "f927a29a9c2f351c4431e9af473586a1", "score": "0.5968814", "text": "def slug(self):\n return self._slug", "title": "" }, { "docid": "5d0ea975769e778612ac6069c692ed3a", "score": "0.59678334", "text": "def _generate_valid_slug(source, parent, language):\n if parent:\n qs = Title.objects.filter(language=language, page__parent=parent)\n else:\n qs = Title.objects.filter(language=language, page__parent__isnull=True)\n used = qs.values_list('slug', flat=True)\n baseslug = slugify(source)\n slug = baseslug\n i = 1\n while slug in used:\n slug = '%s-%s' % (baseslug, i)\n i += 1\n return slug", "title": "" }, { "docid": "2a72a03f3e50d5a0900cdb34088a57f1", "score": "0.59392196", "text": "def get_slug(self):\n if self.slug:\n return self.slug\n else:\n raise NotImplementedError('You must override get_slug(), or send a slug to __init__().')", "title": "" }, { "docid": "3d069c08f490a1ce26ee33d3979bbfd6", "score": "0.5901197", "text": "def path_to_slug(path):\n from mezzanine.urls import PAGES_SLUG\n\n lang_code = translation.get_language_from_path(path)\n for prefix in (lang_code, settings.SITE_PREFIX, PAGES_SLUG):\n if prefix:\n path = path.replace(prefix, \"\", 1)\n return clean_slashes(path) or \"/\"", "title": "" }, { "docid": "d3427ea5f16b6ee5ddd9792b85bf6949", "score": "0.5849399", "text": "def unique_slug(queryset, slug_field, slug):\n i = 0\n while True:\n if i > 0:\n if i > 1:\n slug = slug.rsplit(\"-\", 1)[0]\n slug = \"%s-%s\" % (slug, i)\n try:\n queryset.get(**{slug_field: slug})\n except ObjectDoesNotExist:\n break\n i += 1\n return slug", "title": "" }, { "docid": "75557ed9b815f62e5fa4385e47528b2c", "score": "0.5844323", "text": "def unique_slugify(instance, value, slug_field_name='slug', queryset=None,\n slug_separator='-'):\n slug_field = instance._meta.get_field(slug_field_name)\n\n slug = getattr(instance, slug_field.attname)\n slug_len = slug_field.max_length\n\n # Sort out the initial slug. Chop its length down if we need to.\n slug = slugify(value)\n if slug_len:\n slug = slug[:slug_len]\n slug = _slug_strip(slug, slug_separator)\n original_slug = slug\n\n # Create a queryset, excluding the current instance.\n if not queryset:\n queryset = instance.__class__._default_manager.all()\n if instance.pk:\n queryset = queryset.exclude(pk=instance.pk)\n\n # Find a unique slug. If one matches, at '-2' to the end and try again\n # (then '-3', etc).\n next = 2\n while not slug or queryset.filter(**{slug_field_name: slug}):\n slug = original_slug\n end = '-%s' % next\n if slug_len and len(slug) + len(end) > slug_len:\n slug = slug[:slug_len-len(end)]\n slug = _slug_strip(slug, slug_separator)\n slug = '%s%s' % (slug, end)\n next += 1\n\n setattr(instance, slug_field.attname, slug)", "title": "" }, { "docid": "98215b2da8110020b245357e3f94ec7c", "score": "0.5832261", "text": "def _slug(self):\n return \"{} {}\".format(self.description, self.version)", "title": "" } ]
e89be0eb7eb2b4e6fb6a949ba47ad99e
Check the given kind is of our type.
[ { "docid": "640d7d9074cbae98b60f03e272a6fc59", "score": "0.7642859", "text": "def match(cls, kind: 'dsl.Any') -> bool:\n return isinstance(kind, cls)", "title": "" } ]
[ { "docid": "c11b585ca9647e51a22fc6a9c1ce9840", "score": "0.7114784", "text": "def verify_type(self, obj):\n return isinstance(obj, self.type_)", "title": "" }, { "docid": "fe57552b29789df749ffecff55d4755a", "score": "0.69722956", "text": "def CheckType(self, *args, **kwargs):\n pass", "title": "" }, { "docid": "ee403968a5b6b0bde4e1def6a07bf637", "score": "0.6845358", "text": "def is_type(self, typ):\n return typ == self.__class__.__name__", "title": "" }, { "docid": "6eee24d08a9b98f8576ba11e8e9d17f3", "score": "0.6817635", "text": "def is_valid_type(type):\n return type in type_to_adapter", "title": "" }, { "docid": "8831e290f5fb5727e0adb3fcc6db001c", "score": "0.66755646", "text": "def ContainsHandlesOrInterfaces(kind):\n # We remember the types we already checked to avoid infinite recursion when\n # checking recursive (or mutually recursive) types:\n checked = set()\n\n def Check(kind):\n if kind.spec in checked:\n return False\n checked.add(kind.spec)\n if IsStructKind(kind):\n return any(Check(field.kind) for field in kind.fields)\n if IsUnionKind(kind):\n return any(Check(field.kind) for field in kind.fields)\n if IsAnyHandleKind(kind):\n return True\n if IsAnyInterfaceKind(kind):\n return True\n if IsArrayKind(kind):\n return Check(kind.kind)\n if IsMapKind(kind):\n return Check(kind.key_kind) or Check(kind.value_kind)\n return False\n\n return Check(kind)", "title": "" }, { "docid": "5743313b68bbd93d72f0f85ec0c6dc4a", "score": "0.65869826", "text": "def check_type(self):\n return True", "title": "" }, { "docid": "09797e178b92462f28fab9a2a417d5f9", "score": "0.6571978", "text": "def check_type(instance, type):\n\tif not isinstance(instance, type):\n\t\traise TypeError('Instance expected type {0}, but got: {1}', type(type), type(instance))", "title": "" }, { "docid": "c91629b2e5a678b3e69b2109452a248c", "score": "0.64874387", "text": "def has_exactly_type(obj, tpe):\r\n return type(obj) == tpe", "title": "" }, { "docid": "9f6fab0c92bf50d3034c7c6e786877a5", "score": "0.6454575", "text": "def test_has_exactly_type():\r\n\r\n return has_exactly_type(1, int) and not has_exactly_type(True, int) and has_exactly_type(True, bool)", "title": "" }, { "docid": "67845fbd06503d25bc596f7bbfdfa245", "score": "0.6449043", "text": "def assert_kind(kind: Any) -> None:\r\n if not kind in {\"T\", \"P\"}:\r\n raise ValueError(f\"`kind` should be 'T' or 'P', but got {kind}.\")", "title": "" }, { "docid": "e6e27d284426e560dbccda82fcaebaf2", "score": "0.6442731", "text": "def _isinstancetype(an_obj):\n if an_obj is None: return False\n if not PY3K:\n return isinstance(an_obj, types.InstanceType)\n typstr = str(type(an_obj))\n # the following logic works, as PyRAF users expect, in both v2 and v3\n return typstr==\"<type 'instance'>\" or \\\n (typstr.startswith(\"<class '\") and ('.' in typstr))", "title": "" }, { "docid": "a164d502f3537e4f3c397c0ff1a1c03a", "score": "0.6386812", "text": "def is_valid_type(self, question_type):\n\t\treturn question_type in self.valid_types", "title": "" }, { "docid": "46cda4ceae4bc4a080c802a4dc0880e7", "score": "0.63858825", "text": "def ensure(cls, kind: 'dsl.Any') -> 'dsl.Any':\n if not cls.match(kind):\n raise _exception.GrammarError(f'{kind} is not a {cls.__name__}')\n return kind", "title": "" }, { "docid": "3cb33fb5b3816c409c4932dde0e0689b", "score": "0.6375703", "text": "def checkType(self, value):\n pass", "title": "" }, { "docid": "8423ef3f52e2fb7e1bb21ff0474ff80d", "score": "0.63279814", "text": "def assert_type(instance, classtype):\n assert_cond(isinstance(instance, classtype), TypeCheckError(type(instance), classtype))", "title": "" }, { "docid": "199d7031c4bcc2ce11164281f1580f63", "score": "0.63035226", "text": "def is_type(obj: Any) -> bool:\n return type(obj).__name__ == \"type\"", "title": "" }, { "docid": "336de900a6623ecec0d9b386d60b7067", "score": "0.62628084", "text": "def type_check(typ, obj):\n type_s = type_str(typ) # convert to string if necessary\n\n nest_depth = type_s.count(\"List\")\n assert type_s.count(\"[\") == nest_depth, \"type_check only supports List for now, no Sets, Dicts, Tuples, ...\"\n\n assert type_s.startswith(\"List[\" * nest_depth) and type_s.endswith(\"]\" * nest_depth)\n base_type = {\"bool\": bool, \"int\": int, \"float\": float, \"str\": str}[type_s[5 * nest_depth:len(type_s) - nest_depth]]\n\n def helper(depth, o):\n if depth == 0:\n return type(o) is base_type\n else:\n return type(o) is list and all(helper(depth - 1, i) for i in o)\n\n return helper(nest_depth, obj)", "title": "" }, { "docid": "1356c99492cab02944ee82b41601578f", "score": "0.6258652", "text": "def is_type(self, type_name):\n\n return type_name in self._symtab", "title": "" }, { "docid": "803e8aea15b243f05c57a6bee5211b65", "score": "0.6245005", "text": "def check_tree_type(tree):\n return tree.type in ref", "title": "" }, { "docid": "9116e2345348b35d42c7948399eb82a5", "score": "0.6239373", "text": "def _determineItemKind(self, path, kind):\n # pylint: disable=E1101\n # E1101: pylint could not resolve the node_kind attribute.\n \n entry = self._determineInfo(path)\n return entry.kind == kind", "title": "" }, { "docid": "cb29031b9eafd26cb087191552ca6317", "score": "0.62293893", "text": "def ContainsNativeTypes(kind):\n # We remember the types we already checked to avoid infinite recursion when\n # checking recursive (or mutually recursive) types:\n checked = set()\n\n def Check(kind):\n if kind.spec in checked:\n return False\n checked.add(kind.spec)\n if IsEnumKind(kind):\n return kind.native_only\n if IsStructKind(kind):\n if kind.native_only:\n return True\n if any(enum.native_only for enum in kind.enums):\n return True\n return any(Check(field.kind) for field in kind.fields)\n if IsUnionKind(kind):\n return any(Check(field.kind) for field in kind.fields)\n if IsInterfaceKind(kind):\n return any(enum.native_only for enum in kind.enums)\n if IsArrayKind(kind):\n return Check(kind.kind)\n if IsMapKind(kind):\n return Check(kind.key_kind) or Check(kind.value_kind)\n return False\n\n return Check(kind)", "title": "" }, { "docid": "cd03e996d5205e3a4e5b527c75674b49", "score": "0.6197552", "text": "def is_valid_type(self, attr: Optional[str] = None) -> bool:\n try:\n self.validate_type(attr)\n except TypeError:\n return False\n return True", "title": "" }, { "docid": "18812f2923878c505e1d4b6763ddc30b", "score": "0.6193997", "text": "def has_type(self, item_type):\n raise NotImplementedError()", "title": "" }, { "docid": "58b5b12276560319d3b752bf6ac19fd3", "score": "0.6192976", "text": "def respondsTo(cls, self, kind):\n return True", "title": "" }, { "docid": "16f4ae15e81cab5269dde1512b81ca46", "score": "0.6151988", "text": "def is_my_case(self, type_):\n return (\n isinstance(self.__apply_sequence(type_), self.declaration_class)\n )", "title": "" }, { "docid": "3699c9e7af22cd77a32e5e9402fcef97", "score": "0.61519825", "text": "def _valid_typable_object(ui_object, platform=Platform.ANDROID):\n if platform == Platform.ANDROID:\n return ui_object.obj_type in _TYPABLE_OBJECT_DESC.keys()\n else:\n assert False, 'Wrong Platform'", "title": "" }, { "docid": "e7d704a6688ff8793b9039ae8d311ff9", "score": "0.6150494", "text": "def is_my_type(type_str):\n raise NotImplementedError()", "title": "" }, { "docid": "78c35847210eeec74f07a8384de0c686", "score": "0.61154616", "text": "def exists_type(self, type):\n for i in range(1, self.grid_size - 1):\n for j in range(1, self.grid_size - 1):\n obj = self.grid.get(i, j)\n if obj and obj.type == type:\n return True\n return False", "title": "" }, { "docid": "5e991aa4fedc9bb4a0d0c48a4790da95", "score": "0.60748667", "text": "def __is_type_instance( self, instance_type ):\n for index, instance in enumerate(INSTANCE_TYPES):\n if instance == instance_type:\n return True\n return False", "title": "" }, { "docid": "c31afa0024de479b1abf826c05f48d30", "score": "0.6065099", "text": "def is_kind_of_class(obj, a_class):\n return(isinstance(obj, a_class))", "title": "" }, { "docid": "2d2213dcb03f5457a113163cc47702b4", "score": "0.60533005", "text": "def validatePredefinedType(self, type: int) -> bool:\n ...", "title": "" }, { "docid": "01fc0f8ed02d3dde9c398425e175a95b", "score": "0.605305", "text": "def _is_valid_type(_type: Type[Any]) -> bool:\n\n if _type in _TYPE_MAP:\n return True\n\n if not inspect.isclass(_type):\n return False\n\n return issubclass(_type, Table)", "title": "" }, { "docid": "b337f37ee111273065fc81c61ae14da8", "score": "0.60432154", "text": "def is_type(self, *seg_type: str) -> bool:\n return self.class_is_type(*seg_type)", "title": "" }, { "docid": "31beb89ab2b637cddd2fc399a93622f2", "score": "0.6037506", "text": "def _valid_typable_object_with_name(ui_object, platform=Platform.ANDROID):\n if platform == Platform.ANDROID:\n return (ui_object.obj_type in _TYPABLE_OBJECT_DESC.keys() and\n _valid_object_with_name(ui_object))\n else:\n assert False, 'Wrong Platform'", "title": "" }, { "docid": "a2efd9fd18aa5a83367ae839abc9dbf3", "score": "0.60301566", "text": "def _assert_type(type):\n if isinstance(type, str):\n o, v, p, t = type.split('.')\n if not ontologies.is_supported(o, v, p, t):\n rt.throw(\"Type {0}.v{1}.{2} is unsupported.\".format(o, v, p, t))\n elif type not in ontologies.get_types():\n rt.throw(\"Type {0} is unsupported.\".format(type))", "title": "" }, { "docid": "ea5ab40fc0b90ddc971be666b0698c82", "score": "0.6029887", "text": "def _isinstance(cls, x):\n return isinstance(x, cls.PYTHON_TYPE_CHECK)", "title": "" }, { "docid": "8cdbba175413452d9938d09b77eef07a", "score": "0.60142946", "text": "def is_kind_of_class(obj, a_class):\n if isinstance(obj, a_class) is True:\n return True\n else:\n return False", "title": "" }, { "docid": "c06b250f0a799474b0dedba949042f4d", "score": "0.600672", "text": "def is_kind_of_class(obj, a_class):\n return (isinstance(obj, a_class))", "title": "" }, { "docid": "5e7a79adef032b1b26f0eca8aa623600", "score": "0.60043645", "text": "def check_class(instance, type):\n\tif not issubclass(instance, type):\n\t\traise TypeError('Subclass expected type {0}, but got: {1}', type(type), type(instance))", "title": "" }, { "docid": "3a53512bf638b1007d2eae86345985c9", "score": "0.59954184", "text": "async def checktype(self, ctx:commands.Context):\r\n\r\n t = await self.GetChannelType(ctx.guild, ctx.channel.id)\r\n if t == 'none':\r\n await ctx.send(\r\n f'<#{ctx.channel.id}> is a normal channel (use `register <channel type>` to make this a specialized channel)')\r\n else:\r\n await ctx.send(f'<#{ctx.channel.id}> is a {t}')", "title": "" }, { "docid": "f9e51ccc0138b8bfb5342451267168df", "score": "0.5984067", "text": "def check_input_type(var, type_name):\n\n type_options = [\"int\", \"float\", \"Date\", \"Region\"]\n if type_name == type_options[0]:\n if int(var):\n return True\n else:\n return False\n elif type_name == type_options[1]:\n if float(var):\n return True\n else:\n return False\n elif type_name == type_options[2]:\n if datetime.date.fromisoformat(var):\n return True\n else:\n return False\n elif type_name == type_options[3]:\n valid_regions = [\"NW\", \"SW\", \"MN\", \"MS\", \"NE\", \"SE\"]\n is_valid = False\n for region in valid_regions:\n if var == region:\n is_valid = True\n return is_valid\n else:\n Exception(\"This type doesn't exist in the checker!\")", "title": "" }, { "docid": "d9cfcac54d012c85938d63162c7f6b85", "score": "0.597329", "text": "def is_kind_of_class(obj, a_class):\n if isinstance(obj, a_class):\n return True\n else:\n return False", "title": "" }, { "docid": "d9cfcac54d012c85938d63162c7f6b85", "score": "0.597329", "text": "def is_kind_of_class(obj, a_class):\n if isinstance(obj, a_class):\n return True\n else:\n return False", "title": "" }, { "docid": "b1491eda0a8586e2646aba96bd8aacc2", "score": "0.5972322", "text": "def is_kind_of_class(obj, a_class):\n\n return (isinstance(obj, a_class))", "title": "" }, { "docid": "17304fed55dd7e30a46e9fcf73981a61", "score": "0.5951034", "text": "def type_check_expr(value, kind):\n if not isinstance(kind, type):\n raise TypeError(\"%s is not a type\" % kind)\n if not isinstance(value, kind):\n msg = \"expression value has invalid type '%s'\"\n raise TypeError(msg % type(value).__name__)", "title": "" }, { "docid": "5529a272779e57ea03a6bc065973f70a", "score": "0.59442425", "text": "def is_kind_of_class(obj, a_class):\n if isinstance(obj, a_class):\n return True\n return False", "title": "" }, { "docid": "de06f731a7a73f7888a6d2ff4af96ccb", "score": "0.5937826", "text": "def is_kind_of_class(obj, a_class):\n return isinstance(obj, a_class)", "title": "" }, { "docid": "de06f731a7a73f7888a6d2ff4af96ccb", "score": "0.5937826", "text": "def is_kind_of_class(obj, a_class):\n return isinstance(obj, a_class)", "title": "" }, { "docid": "de06f731a7a73f7888a6d2ff4af96ccb", "score": "0.5937826", "text": "def is_kind_of_class(obj, a_class):\n return isinstance(obj, a_class)", "title": "" }, { "docid": "de06f731a7a73f7888a6d2ff4af96ccb", "score": "0.5937826", "text": "def is_kind_of_class(obj, a_class):\n return isinstance(obj, a_class)", "title": "" }, { "docid": "de06f731a7a73f7888a6d2ff4af96ccb", "score": "0.5937826", "text": "def is_kind_of_class(obj, a_class):\n return isinstance(obj, a_class)", "title": "" }, { "docid": "de06f731a7a73f7888a6d2ff4af96ccb", "score": "0.5937826", "text": "def is_kind_of_class(obj, a_class):\n return isinstance(obj, a_class)", "title": "" }, { "docid": "71facf48ebc192f6027b2bb0bc6f675a", "score": "0.59377545", "text": "def _is_mechanism_spec(spec):\n if inspect.isclass(spec) and issubclass(spec, Mechanism):\n return True\n if isinstance(spec, Mechanism):\n return True\n return False", "title": "" }, { "docid": "dc95b5213965cf5d11e89d1be7fb9cd4", "score": "0.5935159", "text": "def is_instance(self, thing: Any) -> bool:\n return isinstance(thing, self.underlying)", "title": "" }, { "docid": "7f0b199563eb3c82074f6877403f14bc", "score": "0.5930548", "text": "def check_type(filename):\n try:\n im = Image.read(filename)\n except SanperaError:\n return False\n else:\n return im.original_format in [b'JPEG', b'PNG', b'GIF']", "title": "" }, { "docid": "30661f473881ab9e9e8f6f8e5b0be09b", "score": "0.5928545", "text": "def isinstance_safe(value, type_):\n try:\n return isinstance(value, type_)\n except TypeError:\n # Cannot perform isinstance on some types\n return False", "title": "" }, { "docid": "1b5970bf2e068fc4d1ca735341b582e4", "score": "0.59265137", "text": "def is_type(obj):\n return type(obj) is type or type(obj) is types.ClassType", "title": "" }, { "docid": "8312551a084ba5028923f63efd6788d0", "score": "0.5925926", "text": "def __instancecheck__(self, instance):\n\n if isinstance(instance, ObjCInstance):\n return bool(instance.isKindOfClass(self))\n else:\n return False", "title": "" }, { "docid": "1aa0e53ca30082b32eb3753c04d9f5fc", "score": "0.5924234", "text": "def is_kind_of_class(obj, a_class):\n\n if isinstance(obj, a_class):\n return True\n else:\n return False", "title": "" }, { "docid": "c049ac524eddacafe0721da7c5cefe97", "score": "0.59214234", "text": "def check_type(obj, expected_type):\n\n class Model(pydantic.BaseModel):\n data: expected_type\n\n # convert ValidationError to TypeError if the obj does not match the expected type\n try:\n Model(data=obj)\n except pydantic.ValidationError as ve:\n raise TypeError(str(ve.errors()))\n\n return True # allow constructs like assert check_type(x, List[float])", "title": "" }, { "docid": "66790d3843c5f32e7715f7b1f42921f6", "score": "0.59047765", "text": "def type_valid(self):\n return contain_in_list_equal(self._type_or_ref, PARAM_RES_TYPES)", "title": "" }, { "docid": "9227ee640b08e68eab8b2ef62bbe928d", "score": "0.5886737", "text": "def is_choices(typ) -> bool:\n return type(typ) is type(Choices)", "title": "" }, { "docid": "d0138e03b88abeb7fc45986e3a6c6130", "score": "0.5885978", "text": "def is_type(verifield, required):\n return verifield is None or isinstance(verifield, required)", "title": "" }, { "docid": "e01ad94f2c7a37aaef7567ae31c05657", "score": "0.58816344", "text": "def is_of_type(cmd):\r\n raise NotImplementedError()", "title": "" }, { "docid": "054d882d8ce92f97f009fecd9b5396d8", "score": "0.58784306", "text": "def match(self, cls):\n return isinstance(self, cls)", "title": "" }, { "docid": "f5c0b8631b84ce7924108b1ec2e2e9ee", "score": "0.587695", "text": "def _type_check(data):\n if data.__class__.__name__ != \"Matrix3\":\n return False\n return True", "title": "" }, { "docid": "8d2cdb9934a914f59cb302c52daa993c", "score": "0.5873936", "text": "def check_individual_type(self, individual, classURI):\r\n\r\n\t\t# Query to return classes of individual\r\n\t\ttxtQuery = \"select ?type { <\" + individual + \"> a ?type.}\"\r\n\r\n\t\t# issue sparql query\r\n\t\tresultSet = self.ontology.query(query_object=txtQuery)\r\n\t\tclasses = []\r\n\t\tfor r in resultSet:\r\n\t\t\tclasses.append(str(r.type))\r\n\t\t# if include_self include concept\r\n\t\tif classURI in classes:\r\n\t\t\treturn True\r\n\t\telse:\r\n\t\t\treturn False", "title": "" }, { "docid": "6b92e4e353e2d99fa6006e89ab7a972b", "score": "0.5869611", "text": "def is_kind_of_class(obj, a_class):\n\n return isinstance(obj, a_class)", "title": "" }, { "docid": "695a0a4efd19f7535ef26669936fcc8c", "score": "0.5861913", "text": "def _isinstance(self, instance, raise_error=True):\n\n if isinstance(instance, self.__model__):\n return True\n elif raise_error:\n raise ValueError('{} is not of type {}.'.format(\n instance, self.__model__,\n ))\n else:\n return False", "title": "" }, { "docid": "064855884ff1f0cca80b8c6557110d0d", "score": "0.58614653", "text": "def _is_valid_entity_type(self, entity_type):\n return entity_type in [\"artist\", \"song\", \"genre\"]", "title": "" }, { "docid": "bef1d821522e0a1d99869db68a9130dc", "score": "0.585617", "text": "def type(self, kind):\n self.type = kind", "title": "" }, { "docid": "1cdf82cd2e1e28b0fd067eb8127822a0", "score": "0.5848438", "text": "def typematch(variable, expectedtype):\n\n # Return the result\n return isinstance(variable, expectedtype)", "title": "" }, { "docid": "f58f240ca9c3ce9861f18c51839acd59", "score": "0.58297235", "text": "def is_managing_kind(self, kind):\n return False", "title": "" }, { "docid": "657f6e644a664ee6bd43ca5ccbb6d24f", "score": "0.58293086", "text": "def __type_okay(value, argtype, allow_none=False):\n if value is None:\n return allow_none\n if isinstance(argtype, str):\n if argtype in __macros:\n return __type_okay(value, __macros[argtype], allow_none=allow_none)\n elif argtype is 'int':\n return __is_int(value)\n elif argtype is 'float':\n return __is_float(value)\n return argtype in [cls.__name__ for cls in value.__class__.__mro__]\n elif isinstance(argtype, type):\n if argtype == six.text_type:\n return isinstance(value, six.text_type) or isinstance(value, six.string_types)\n elif argtype == str:\n return isinstance(value, six.string_types)\n elif argtype is int:\n return __is_int(value)\n elif argtype is float:\n return __is_float(value)\n return isinstance(value, argtype)\n elif isinstance(argtype, tuple) or isinstance(argtype, list):\n return any(__type_okay(value, i) for i in argtype)\n else: # argtype is None\n return True", "title": "" }, { "docid": "0e23c914889c1ccd76f845175b0e5c48", "score": "0.58269256", "text": "def _is_valid(self, value):\n\n # Entities have an istypeof method that can perform more sophisticated\n # type checking.\n if hasattr(self._type, \"istypeof\"):\n return self._type.istypeof(value)\n else:\n return isinstance(value, self._type)", "title": "" }, { "docid": "885283b7c9f7c3726e2674c0feb2f954", "score": "0.5822533", "text": "def test_expected_type(val, exp_type):\n\n if not isinstance(val, exp_type):\n return False", "title": "" }, { "docid": "8ae00ec44179db2eead263de69b7b1a3", "score": "0.5805722", "text": "def _validate_type(self) -> None:\n # TODO: add transformation logic so that we don't have to transform inputs at every place they are used, including v1 back compat support\n if not spec_type_is_parameter(self.type):\n type_utils.validate_bundled_artifact_type(self.type)", "title": "" }, { "docid": "f9b21fdb525fd11d79f109d0eb89d92a", "score": "0.58027023", "text": "def check_type(self, environment: Environment) -> Type:\n return self.typt_type", "title": "" }, { "docid": "e5f92ea715fc55682c53f18d6690cca5", "score": "0.5801304", "text": "def analyze_type(self, tipo):\n\n if not self.c_types:\n return self._validate_built_in(tipo)\n elif tipo in self.c_types or self._match_array(tipo, self.c_array_types):\n return self._validate_source(tipo)\n else:\n return self._validate_built_in(tipo)", "title": "" }, { "docid": "a5946d66e4e3a8f86b2ac1e7e4a7eb67", "score": "0.5790497", "text": "def is_Type(tp):\n if isinstance(tp, type):\n return True\n try:\n typing._type_check(tp, '')\n return True\n except TypeError:\n return False", "title": "" }, { "docid": "5806df4c39bf375765b2bb1d42d66d26", "score": "0.5776113", "text": "def check_ref_type(self, ref, allowed_types):\n obj_type = self.get_object_type(ref).lower()\n for t in allowed_types:\n if t.lower() in obj_type:\n return True\n return False", "title": "" }, { "docid": "f86f4b9fa147e691df0fefe581e4092c", "score": "0.5773768", "text": "def check_filekind(self):\n assert self.filekind in self.obs_package.FILEKINDS, \\\n \"Invalid filekind \" + repr(self.filekind) + \" in \" + repr(self.filename)", "title": "" }, { "docid": "b608cc4d4007abc2f305db052ae0e1f8", "score": "0.57645214", "text": "def is_type(self, ent_type):\n # type: (str) -> bool\n # its always an entity ...\n if ent_type.lower() in ('entity', self.settings['_type'].lower()):\n return True\n else:\n return False", "title": "" }, { "docid": "adb2201e1751dbe319eff9206620ff9a", "score": "0.5762839", "text": "def _check_type(self):\n check_type = DESCRIPTOR_VALUE.get(self.descriptor.type)\n if check_type is None:\n raise ValueError(\"Unknown metric descriptor type\")\n for ts in self.time_series:\n if not ts.check_points_type(check_type):\n raise ValueError(\"Invalid point value type\")", "title": "" }, { "docid": "f02d7728ba75e21bb2d1a4db1a9146c8", "score": "0.57608366", "text": "def is_instance(instance, expected_types):\n for expected_type in expected_types:\n if isinstance(instance, expected_type):\n return True\n\n return False", "title": "" }, { "docid": "a7844526897c325def95160f12c5b804", "score": "0.5757373", "text": "def is_supported_type(self) -> bool:\n t = self.type.strip()\n return t in self.SUPPORTED_LABELS or t.lower() in self.SUPPORTED_LABELS", "title": "" }, { "docid": "92aa1a8f685d4fada7f2e86980de6d8e", "score": "0.57550436", "text": "def isUnitKind(*args):\n return _libsbml.Unit_isUnitKind(*args)", "title": "" }, { "docid": "17bdb2ab0e160984ed4e8ae0ea272706", "score": "0.575196", "text": "def _is_wildcard_match(s, wildcard):\n\n wildcard = wildcard.strip()\n glob_pat = re.compile(r'\\*(:(?P<type>\\w+))?$')\n m = glob_pat.match(wildcard)\n if m:\n if m.group('type'):\n type_to_meth = globals()['__builtins__']\n type_to_meth = {k:v for k,v in type_to_meth.items()\n if k in ['str','int','float','bool']}\n try:\n return isinstance(s, type_to_meth[m.group('type')])\n except KeyError:\n raise InvalidWildcardError(\"{} is an invalid type in {}\".format(\n m.group('type'), wildcard))\n return True\n raise InvalidWildcardError(wildcard)", "title": "" }, { "docid": "6849f2251649f8cb09b41e54f492aa10", "score": "0.5735394", "text": "def _valid_input_type(self, input_type):\n # pylint: disable=W0613, R0201\n return True", "title": "" }, { "docid": "80e80388f1c8bb1b1cf8c306f6f718cc", "score": "0.5732902", "text": "def _validate_type(self):\n # TODO: add transformation logic so that we don't have to transform outputs at every place they are used, including v1 back compat support\n if not spec_type_is_parameter(self.type):\n type_utils.validate_bundled_artifact_type(self.type)", "title": "" }, { "docid": "d83cff460dc04627eb3ec924539b0051", "score": "0.5730199", "text": "def is_type_correct(*args):\n return _ida_hexrays.is_type_correct(*args)", "title": "" }, { "docid": "c4b6ff3ea2cdb2f75ce82a81b343b8b6", "score": "0.57290936", "text": "def assert_is_correct_type(obj_type: Type, required_type: ObjectType):\n\n if not isinstance(obj_type, type) or not issubclass(obj_type, required_type):\n raise TypeError(f\"Expected {required_type!r}, but got: {obj_type!r}\")", "title": "" }, { "docid": "607160f3adffb68668a626eefcb1e742", "score": "0.5728469", "text": "def _isinstance(self, value: Any, typ: Any) -> bool:\n typ_args = getattr(typ, '__args__', ())\n if hasattr(typ, '__origin__'):\n # Drop subscripted extra type parameters from generic type.\n # (e.g. Dict[str, str].__origin__ == dict)\n # See https://www.python.org/dev/peps/pep-0585 for more information.\n typ = typ.__origin__\n if typ == Union:\n return any(self._isinstance(value, t) for t in typ_args)\n else:\n return isinstance(value, typ)", "title": "" }, { "docid": "c7ed32f042d276211b4ecbdeb120cc56", "score": "0.5728326", "text": "def validate(self):\n self._check_type()", "title": "" }, { "docid": "1cd96495dfada098336dbdd4fcde140c", "score": "0.5715316", "text": "def _is_name_type(self, type_id):\n return type_id == self.name_type", "title": "" }, { "docid": "2465944b977d6588f41e0cbe40777a23", "score": "0.5714489", "text": "def _check_type(self, new_value):\n raise NotImplementedError", "title": "" }, { "docid": "8183f11dbc44be95d500ced509a7880f", "score": "0.57140833", "text": "def validate_type(type):\n\n types_upper = [i.upper() for i in officeTypes]\n if type.upper() in types_upper:\n return True\n return False", "title": "" }, { "docid": "91030d2a9388f0e2725160a1ea4ae3a3", "score": "0.57039505", "text": "def _check(self, class_):\r\n\r\n if isinstance(class_, (types.FunctionType, types.LambdaType,\r\n types.ClassType, types.InstanceType)):\r\n return False\r\n if not hasattr(class_, '__dict__'):\r\n if not hasattr(class_, '__slots__'):\r\n return False\r\n return True", "title": "" }, { "docid": "8a50fb737b08073aae24cfc3fb4444af", "score": "0.5693937", "text": "def kind_of(obj):\n # why don't I use isinstance - it saves us big time\n\n # dict, list, and tuple are differianted from str, unicode, int, bool, and float\n # because they have special treatment and simple `==` or `is` is not enough to\n # prove them valid.\n obj_type = type(obj)\n if obj_type is dict:\n return TYPE_DICTIONARY\n elif obj_type is list:\n return TYPE_LIST\n elif obj_type is tuple:\n return TYPE_TUPLE\n elif obj in ATOMIC_TYPES:\n return TYPE_TYPE\n elif obj is object:\n return TYPE_OBJECT\n elif getattr(obj, \"__class__\", False) and issubclass(obj.__class__, BaseValidator):\n return TYPE_VALIDATOR\n elif callable(obj):\n return TYPE_FUNCTION\n # this f##king SRE_Pattern, why can't I f##king kill it\n elif getattr(obj, \"match\", False) and getattr(obj, \"search\", False):\n return TYPE_REGEX\n else:\n return TYPE_UNKNOWN", "title": "" }, { "docid": "3fa3145b083d38bbf6103b9c8eeb80dc", "score": "0.5692209", "text": "def _check_value_type(self, value):\n if value is not None and self.value_type is not None:\n valid = isinstance(value, self.value_type)\n if not valid:\n return False\n return True", "title": "" } ]
3890b2352dbe58ac483e3d71788032f3
reset the iterator to the starting position
[ { "docid": "83ce75be55bd351ece23d309076c9b98", "score": "0.6484919", "text": "def rewind(self):\n # virtual bool rewind() = 0;\n if self._index < 0:\n return False\n self._index = 0\n return True", "title": "" } ]
[ { "docid": "ee8d71f0cc8808c5655c458dd3186ec5", "score": "0.7686198", "text": "def reset_curr_iter(self):\n self.curr_iter = 0", "title": "" }, { "docid": "cc422a19bfebbcf07f078fba564b26b2", "score": "0.7562216", "text": "def reset(self):\n self.iterable = self._iterate()", "title": "" }, { "docid": "6b3c514e1caf8e2bac20dd9a977a4da5", "score": "0.7462351", "text": "def reset(self):\n self.cur_index = 0", "title": "" }, { "docid": "e1757ac67c4c29c70477fdf3717efa37", "score": "0.72797877", "text": "def reset(self) -> None:\n self.iidx = 0", "title": "" }, { "docid": "7f970879fefb5e094b23a9d5f9070138", "score": "0.7176328", "text": "def rewind(self):\n self._cursor = 0", "title": "" }, { "docid": "53169dab6a81b00c2d0c4a8b70f50d21", "score": "0.706502", "text": "def hard_reset(self):\n self.cursor = -self.batch_size", "title": "" }, { "docid": "dc9c99833287032ea8714762b4c6a2f5", "score": "0.6916338", "text": "def reset(self):\n self.index = 0", "title": "" }, { "docid": "dc9c99833287032ea8714762b4c6a2f5", "score": "0.6916338", "text": "def reset(self):\n self.index = 0", "title": "" }, { "docid": "cdabc4d2255549eb9624b4a30ac653f6", "score": "0.68905336", "text": "def reset(self):\n if self._counter > self._size:\n self._counter = self._counter % self._size\n else:\n logging.warning(\"DALI iterator does not support resetting while epoch is not finished. Ignoring...\")", "title": "" }, { "docid": "4e38216beaa7141e2f5cea619a5c20d7", "score": "0.68827045", "text": "def __iter__(self):\n self._position = -1\n return self", "title": "" }, { "docid": "cf51155ec00abcd6a3e1dc3021806e5f", "score": "0.68751705", "text": "def rewind(self):\n self.pos = 0", "title": "" }, { "docid": "d8e990fe56a1bf134f6b699564fa4135", "score": "0.6862966", "text": "def reset(self):\n self.currentIndex = 0\n self.ret = True", "title": "" }, { "docid": "483adffba3b0f05e064e1cfbc2293527", "score": "0.67884105", "text": "def reset(self):\n\t\tstart, initial = list(self.history.items())[0]\n\t\tself.positions = initial\n\t\tself.history = {start: initial}", "title": "" }, { "docid": "2ce3d52202e6065fdbc67e0f6f170aec", "score": "0.6784575", "text": "def restart_iteration(self):\n self._iter_cnt = 0", "title": "" }, { "docid": "c6a9868c664efa94c2cabefe82394878", "score": "0.67810756", "text": "def reset(self):\n self.t_start = 0", "title": "" }, { "docid": "af0ad5a4ecc6d6bd4db580a739b161a5", "score": "0.6768034", "text": "def reset(self):\n if self.next:\n self.next.reset()", "title": "" }, { "docid": "a7ffbea3a39b44bbca48d91187e742bd", "score": "0.671249", "text": "def reset(self):\n self.rest = set(range(0, self.data_num, 1))", "title": "" }, { "docid": "f1d50c19ea7ee44d870d8ae86c9e95bf", "score": "0.66844857", "text": "def start_iteration(self):\r\n self.iteration = 0", "title": "" }, { "docid": "ba5eae42d70ece28c4b83a8b09e1b9d0", "score": "0.66772074", "text": "def reset_iterator(self) -> None:\n if self.shuffle:\n np.random.shuffle(self.basket_list)\n self.basket_iterator = self._basket_iterator(self.basket_list)\n self.cached_samples = []", "title": "" }, { "docid": "29c48bd023a9de3750ef0676568e9a20", "score": "0.6657629", "text": "def reset(self):\n if self._length != 0:\n self._selected = self._head.next", "title": "" }, { "docid": "ca3ce84eb089066161e8f42a18c4fcaa", "score": "0.66411686", "text": "def reset(self):\n self.position = 0", "title": "" }, { "docid": "777d961147db4174e6225cca2bcee4a0", "score": "0.6627916", "text": "def reset_start_indices(self):\n\n ungap_ct = self.ungapped_count()\n for i in range(0, self.no_seq):\n if ungap_ct[i] == 0:\n self.start_indices[i] -= 1", "title": "" }, { "docid": "b9fa81c6c4cb831e0e1c802b2970c3da", "score": "0.6615494", "text": "def reset(cls) -> None:\n cls.idx = count(0)", "title": "" }, { "docid": "cf0fc6811de67733f13eada9b9c7c864", "score": "0.6556001", "text": "def rewind(self):\n Trace.trace(25, \"rewind\")\n self.loc = 0", "title": "" }, { "docid": "6371f89153d4949979768e60a5ee774c", "score": "0.6549016", "text": "def rewind(self, marker=None):\n pass", "title": "" }, { "docid": "9eca4130c2a1a71ae398dfd681dc14a8", "score": "0.65422374", "text": "def reset_pos(self):\n self.go_to(0, 0)", "title": "" }, { "docid": "17c262f7a86d796d36be5e2ae48eca87", "score": "0.6522708", "text": "def reset(self):\n\n self.cursor = [0, 0]\n self.pos = [0, 0]\n self.value = [\"\"] * self.rows", "title": "" }, { "docid": "bfbe0b58acd2bc81fb107013489ce2ec", "score": "0.6482749", "text": "def __iter__(self):\n self.iterator = 0\n return self", "title": "" }, { "docid": "bfbe0b58acd2bc81fb107013489ce2ec", "score": "0.6482749", "text": "def __iter__(self):\n self.iterator = 0\n return self", "title": "" }, { "docid": "5f6a8dd9bfecc4ae0ee9ce974e39bd09", "score": "0.64627934", "text": "def rewind(self, position: int):\n self.cursor = position", "title": "" }, { "docid": "d29dd9f36e1d0e684d6eaf058a7627d3", "score": "0.6457096", "text": "def reset(self):\r\n\r\n if (self.is_reversed):\r\n self.current = self.last\r\n else:\r\n self.current = self.first", "title": "" }, { "docid": "59916cc0135a6fac8de5141caad27f37", "score": "0.6455758", "text": "def reset(self):\n self._cursor = self._head._next\n return (self._cursor._element)", "title": "" }, { "docid": "c13d77ed4c7a19017916aee6ef0ae015", "score": "0.6449094", "text": "def __iter__(self):\n self.index = 0\n return self", "title": "" }, { "docid": "44781c9b64b2fe3d080842ed4bd48c2d", "score": "0.6449081", "text": "def __iter__(self):\n self._index = 0\n return self", "title": "" }, { "docid": "98645a67751177581513b48803c9c8f8", "score": "0.6444585", "text": "def reset(self):\n self.start = 0\n self.end = 0\n self.interval = 0", "title": "" }, { "docid": "163d5382663e1b1bbab723a8173e3ef2", "score": "0.64411396", "text": "def reset(self):\n\n self.p = 0\n self.line = 1\n self.charPositionInLine = 0\n self._markers = [ ]", "title": "" }, { "docid": "2e87b814a0edaf0a88672596cf2e2e26", "score": "0.64327496", "text": "def reload(self):\n self.index = 0\n self.iter = iter(self.subset)", "title": "" }, { "docid": "4703f6debb1e59af45bf48d37ac4f29a", "score": "0.6431341", "text": "def __iter__(self):\n self.islice = 0\n return self", "title": "" }, { "docid": "e0e055b38c7b0249957800affb89c9bf", "score": "0.6389766", "text": "def reset(self):\n self.start_vec = None\n self.end_vec = None\n self.captured = None", "title": "" }, { "docid": "69ec6f736338a23b3ccf8c1080cff6e9", "score": "0.6388214", "text": "def Reset(self):\n self._tc.SetValue(self.startValue)\n self._tc.SetInsertionPointEnd()", "title": "" }, { "docid": "54187a07139f5f8f50ef296dee4b13ef", "score": "0.6375765", "text": "def reset(self):\n self.close()\n self.cur_item = 0", "title": "" }, { "docid": "fcb8451873fb10fde1d3ba06965e2a3b", "score": "0.6375754", "text": "def Reset(self, start=0):\n self.ReplaceDocument({'count': start})\n return self.GetCount()", "title": "" }, { "docid": "1001e710926d6f5a6bcb0356d0ae0dba", "score": "0.63672763", "text": "def reset_position(self):\n \n for i in range(self.__current_step):\n self.__stepper.onestep(direction = stepper.BACKWARD, style = stepper.DOUBLE)\n self.__current_step = 0", "title": "" }, { "docid": "0c34d436b9d61df39ca0e0a89633a0c0", "score": "0.6359039", "text": "def rewind(self, marker=None):\n\n raise NotImplementedError", "title": "" }, { "docid": "a99d7fedc3f4bf1f9588e1562d401f1a", "score": "0.6353796", "text": "def _reset(self):\r\n self._index = 1\r\n self._files.clear()", "title": "" }, { "docid": "3df765d89210f9fa264986a94b01c4bb", "score": "0.6317247", "text": "def _reset_state(self):\n super(Cursor, self)._reset_state()\n self._nextUri = None\n self._columns = None", "title": "" }, { "docid": "49f35e03ebfc5ff058a874605800e489", "score": "0.6300255", "text": "def reset(self):\n self._next = 0\n self._cur_delay = self._initial_delay", "title": "" }, { "docid": "1d7ea6abae66bcfcaff739281b6e90a0", "score": "0.62965417", "text": "def reset(self):\r\n self.__start = None\r\n self.__done = False", "title": "" }, { "docid": "a0584e7ad2cee1ba562e1475156ad555", "score": "0.62238806", "text": "def reset(self):\n self.page_number = 0\n self.next_page_token = None\n self.num_results = 0\n self._page = None", "title": "" }, { "docid": "8ba6335acb476d2015405f502146e867", "score": "0.6217486", "text": "def reset(self):\n self.index= 0\n self.cIndex= 0\n self.cLocation= (0, 0)", "title": "" }, { "docid": "dd9f4b94155aeb50501048d5ef76521d", "score": "0.62153906", "text": "def reset(self):\n self._step = 0", "title": "" }, { "docid": "308e6fe7d1080f9f2ba176f14d3ec873", "score": "0.6172614", "text": "def reset(self):\n\n self.pause()\n self.move_to_step({'step': 0})", "title": "" }, { "docid": "5eba595e96fe3c0f8e1748103576c4d1", "score": "0.6139679", "text": "def reset(self):\n self.cur = self.root", "title": "" }, { "docid": "270c4f774ab1f554fa63ca617aee2eb6", "score": "0.61195815", "text": "def reset(self) -> None:\n self.tick = iter(self.stylus)\n self.loc = 0", "title": "" }, { "docid": "22a7493e1517c40b965051a4fb2d49c9", "score": "0.6089358", "text": "def __iter__(self):\n self._iter_count = -1\n return self", "title": "" }, { "docid": "61a25f0a5123c9e8d55041f931e46c24", "score": "0.60688967", "text": "def advance(self):\n self.__index = self.__index + 1", "title": "" }, { "docid": "08fcaffbb8e8894228222d5c51adb91b", "score": "0.604613", "text": "def reset(self):\r\n self.pop()", "title": "" }, { "docid": "08fcaffbb8e8894228222d5c51adb91b", "score": "0.604613", "text": "def reset(self):\r\n self.pop()", "title": "" }, { "docid": "afde25c9682af393c70a25afc0bf934d", "score": "0.6016786", "text": "def _reset(self):\n\n self._orbit_breaks = []\n self.num = 0\n self._current = 0\n return", "title": "" }, { "docid": "e3d1bfc99ceee9076ac8ab6038c88336", "score": "0.6014592", "text": "def reset(self):\n\n self.currentDataPointIndex = 0\n self.batchDataPointOrder = list(range(self.maxBatchSize))\n self.previousAction = None\n self.b = None", "title": "" }, { "docid": "928d7f4c8e20150639a2501509db9673", "score": "0.6009265", "text": "def set_iterator(self, matched: dict):\n self.iterator = [i for i, node in enumerate(self.nodes)\n if node not in matched.keys()][1:] # we exclude zero index", "title": "" }, { "docid": "9f8a0bdee1433342f9bf76d1e4f45808", "score": "0.6008185", "text": "def reset_start(self, sub_batch=0):\n pass", "title": "" }, { "docid": "07862b2bd0fee7931dfc375119a53247", "score": "0.6006511", "text": "def __iter__(self):\n self._count = 0\n return self", "title": "" }, { "docid": "fe9f2334e83c1213f7d6bf7b7429ac32", "score": "0.59668946", "text": "def reset(self):\n for segment in self.segment_list:\n segment.goto(x=600, y=600)\n self.segment_list.clear()\n self.create_snake()\n self.head = self.segment_list[0]", "title": "" }, { "docid": "961fbff2154548c5a8b7264e1fc5b522", "score": "0.5964466", "text": "def reset(self):\r\n self._buffer.fill(0)", "title": "" }, { "docid": "e9be3a803b914a60918e8ae69771a961", "score": "0.5951785", "text": "def reset(self):\n self._buffer.fill(0)", "title": "" }, { "docid": "083e471c19eb46e4f2b6700a6ca0af44", "score": "0.59481937", "text": "def reset(self):\n self.index = 0\n self.acc = 0\n self.index_run = []\n self.temp_code = self.read_input_strings()", "title": "" }, { "docid": "df1e138a3e8a68362fd922eeb531a003", "score": "0.59440506", "text": "def __iter__(self):\n self._counter = 0\n return self", "title": "" }, { "docid": "6a403a55ee29f3439e43abb7bb08fb90", "score": "0.5941028", "text": "def advance(self, offset: int):\n self.rewind(self.cursor + offset)", "title": "" }, { "docid": "a1280c02999fe98531a1ec18b35b63be", "score": "0.59370184", "text": "def rewind(self):\n\n self.rawfile.seek(0)", "title": "" }, { "docid": "2addce62663b584c03c12d9867db8834", "score": "0.593201", "text": "def reset_start(self):\n pass", "title": "" }, { "docid": "6f0a79f156ebca94343b6e554fd50cf7", "score": "0.59237546", "text": "def reset(self):\n #todo", "title": "" }, { "docid": "3f833684370aba8474857b7314624e77", "score": "0.59132004", "text": "def restart(self):\n self[:] = []\n self.begin_time = time.time()", "title": "" }, { "docid": "92c8247af8829565902bff44d2518bd6", "score": "0.5907727", "text": "def __init__(self, start=False):\n self.reset(start=start)", "title": "" }, { "docid": "86c760d740dec852e4ff9d6175919559", "score": "0.59052366", "text": "def reset(self):\n self.super_rows.fill(0)", "title": "" }, { "docid": "fbcb1269c93993519b97927ffbb53542", "score": "0.58938193", "text": "def reset(self):", "title": "" }, { "docid": "fbcb1269c93993519b97927ffbb53542", "score": "0.58938193", "text": "def reset(self):", "title": "" }, { "docid": "fbcb1269c93993519b97927ffbb53542", "score": "0.58938193", "text": "def reset(self):", "title": "" }, { "docid": "fbcb1269c93993519b97927ffbb53542", "score": "0.58938193", "text": "def reset(self):", "title": "" }, { "docid": "fbcb1269c93993519b97927ffbb53542", "score": "0.58938193", "text": "def reset(self):", "title": "" }, { "docid": "fbcb1269c93993519b97927ffbb53542", "score": "0.58938193", "text": "def reset(self):", "title": "" }, { "docid": "fbcb1269c93993519b97927ffbb53542", "score": "0.58938193", "text": "def reset(self):", "title": "" }, { "docid": "fbcb1269c93993519b97927ffbb53542", "score": "0.58938193", "text": "def reset(self):", "title": "" }, { "docid": "fbcb1269c93993519b97927ffbb53542", "score": "0.58938193", "text": "def reset(self):", "title": "" }, { "docid": "fbcb1269c93993519b97927ffbb53542", "score": "0.58938193", "text": "def reset(self):", "title": "" }, { "docid": "fbcb1269c93993519b97927ffbb53542", "score": "0.58938193", "text": "def reset(self):", "title": "" }, { "docid": "fbcb1269c93993519b97927ffbb53542", "score": "0.58938193", "text": "def reset(self):", "title": "" }, { "docid": "fbcb1269c93993519b97927ffbb53542", "score": "0.58938193", "text": "def reset(self):", "title": "" }, { "docid": "f2c36c070470f4202c6bcd6bfe95e933", "score": "0.58935314", "text": "def reset(self):\n raise NotImplementedError", "title": "" }, { "docid": "f2c36c070470f4202c6bcd6bfe95e933", "score": "0.58935314", "text": "def reset(self):\n raise NotImplementedError", "title": "" }, { "docid": "f2c36c070470f4202c6bcd6bfe95e933", "score": "0.58935314", "text": "def reset(self):\n raise NotImplementedError", "title": "" }, { "docid": "f2c36c070470f4202c6bcd6bfe95e933", "score": "0.58935314", "text": "def reset(self):\n raise NotImplementedError", "title": "" }, { "docid": "f2c36c070470f4202c6bcd6bfe95e933", "score": "0.58935314", "text": "def reset(self):\n raise NotImplementedError", "title": "" }, { "docid": "f2c36c070470f4202c6bcd6bfe95e933", "score": "0.58935314", "text": "def reset(self):\n raise NotImplementedError", "title": "" }, { "docid": "f2c36c070470f4202c6bcd6bfe95e933", "score": "0.58935314", "text": "def reset(self):\n raise NotImplementedError", "title": "" }, { "docid": "f2c36c070470f4202c6bcd6bfe95e933", "score": "0.58935314", "text": "def reset(self):\n raise NotImplementedError", "title": "" }, { "docid": "f2c36c070470f4202c6bcd6bfe95e933", "score": "0.58935314", "text": "def reset(self):\n raise NotImplementedError", "title": "" }, { "docid": "f2c36c070470f4202c6bcd6bfe95e933", "score": "0.58935314", "text": "def reset(self):\n raise NotImplementedError", "title": "" }, { "docid": "f2c36c070470f4202c6bcd6bfe95e933", "score": "0.58935314", "text": "def reset(self):\n raise NotImplementedError", "title": "" }, { "docid": "6ae1c60b824cc09ad0fbcddf0e24547b", "score": "0.5884278", "text": "def reset(self):\n self.num_used = 0", "title": "" } ]
01b117fec804d3ca4610422e1eefe55a
Launch djv_view. If path is specified and is a file, automatically load sequence. If path is a directory, start in that directory. If path is not specified, use shot directory.
[ { "docid": "40c4bac34994153aece947f111717519", "score": "0.7444207", "text": "def viewer(path=None):\n\t# Get starting directory\n\tstartupDir = os.environ['IC_SHOTPATH']\n\tpathIsFile = False\n\tif path is not None:\n\t\tif os.path.isfile(path):\n\t\t\tstartupDir = os.path.dirname(path)\n\t\t\tpathIsFile = True\n\t\telif os.path.isdir(path):\n\t\t\tstartupDir = path\n\n\t# Export path to djv codec libraries according to OS\n\tcmdStr = exportDjvLibs()\n\tif os.environ['IC_RUNNING_OS'] == \"Windows\":\n\t\tcmdStr += 'cd /d \"%s\" & ' %startupDir\n\telse:\n\t\tcmdStr += \"cd %s; \" %startupDir\n\n\t# Process playback speed string\n\tplaybackSpeedArg = processSpeed(\"playback_speed\")\n\n\t# Build the command based on whether path is a file or a directory\n\tif pathIsFile:\n\t\tcmdStr += '\"%s\" %s \"%s\"' %(os.environ['DJV_PLAY'], playbackSpeedArg, path)\n\telse:\n\t\tcmdStr += '\"%s\" %s' %(os.environ['DJV_PLAY'], playbackSpeedArg)\n\n\t# Call command with subprocess in order to not lock the system while djv\n\t# is running\n\tverbose.print_(cmdStr, 4)\n\tsubprocess.Popen(cmdStr, shell=True)", "title": "" } ]
[ { "docid": "1ec470864416f9fe138c2513231647fc", "score": "0.6450935", "text": "def run(self, path, frameRanges, views, options):\n ...", "title": "" }, { "docid": "55571b58c4c6de83a726f97a9e9b8494", "score": "0.58631945", "text": "def run_frame(path):\n\n if not os.path.exists(getabspath(path, False)):\n flash(\"Couldn't find genome in: %s\" % path)\n return redirect(url_for('start'))\n\n return flask.render_template(\n 'processing.html', **{\n 'BASE_URL': app.config['APPLICATION_ROOT'],\n 'PATH': path,\n 'status_base': url_for('.run_status', path=''),\n 'kickstart': url_for('kickstart', path=path), # args=[path]\n 'translate': url_for('translate'),\n 'fetch_base': url_for('raw', path=''),\n 'acgt_gamma_base': url_for('.acgt_gamma_file_list', path=''),\n 'STATIC_BASE': url_for('static', filename=''),\n 'base_href': url_for('run_frame', path=path)\n })", "title": "" }, { "docid": "37e79820b96a5aa84c65ade488a01b2a", "score": "0.585221", "text": "def cmd_view(args):\n config.load(args['root'])\n view_archive_files(args['files'])", "title": "" }, { "docid": "977bb26f31ee6c51940bca6beb6c1d6d", "score": "0.576764", "text": "def _run_path(self, path):\n # Expand home and use absolute path\n path = os.path.abspath(os.path.expanduser(path))\n if os.path.exists(path):\n if os.path.isdir(path):\n # Focus directory in the library\n self._app[\"library\"].move_up(path)\n self._last_widget = \"lib\"\n else:\n # If it is an image open it\n self._app.populate([path])\n self._app[\"image\"].load()\n # Reload library in lib mode, do not open it in image mode\n pathdir = os.path.dirname(path)\n if self._last_widget == \"lib\":\n self._app[\"library\"].move_up(pathdir)\n # Focus it in the treeview so it can be accessed via \"l\"\n index = \\\n self._app[\"library\"].files.index(os.path.basename(path))\n self._app[\"library\"].set_cursor(\n Gtk.TreePath(index), None, False)\n # Show the image\n self._app[\"library\"].set_hexpand(False)\n self._app[\"main_window\"].show()\n else:\n self._app[\"library\"].move_up(pathdir, True)\n else:\n self._app[\"statusbar\"].message(\"Not a valid path\", \"error\")", "title": "" }, { "docid": "16f37a22b3ad10b2c721090dbdf59dc2", "score": "0.56455743", "text": "def launch(path):\n from glue.external.six.moves.socketserver import TCPServer\n from glue.external.six.moves.SimpleHTTPServer import SimpleHTTPRequestHandler\n from random import randrange\n from socket import error\n import webbrowser\n from threading import Thread\n\n os.chdir(path)\n\n while True:\n try:\n PORT = randrange(8000, 9000)\n server = TCPServer((\"\", PORT), SimpleHTTPRequestHandler, False)\n server.allow_reuse_address = True\n server.server_bind()\n break\n except error: # port already taken\n pass\n\n print('Serving D3PO on port 0.0.0.0:%i' % PORT)\n server.server_activate()\n\n thread = Thread(target=server.serve_forever)\n thread.setDaemon(True) # do not prevent shutdown\n thread.start()\n webbrowser.open('http://0.0.0.0:%i' % PORT)", "title": "" }, { "docid": "b67b3fe7bc86cc2443e3b187e29851ea", "score": "0.558446", "text": "def startfile(path):\n os.startfile(path) # nosec: B606", "title": "" }, { "docid": "c865e61b094f6a84aa052ed8edb94ac4", "score": "0.5561676", "text": "def cli(ctx, path, **kwds):\n click.launch(path)", "title": "" }, { "docid": "3ccd8539de397b235122b74de9501c4b", "score": "0.5560015", "text": "def view():\n\n # Test if the module is available.\n if not dep_check.vmd_module:\n raise RelaxError(\"VMD is not available (cannot import Scientific.Visualization.VMD due to missing Numeric dependency).\")\n\n # Test if the PDB file has been loaded.\n if not hasattr(cdp, 'structure'):\n raise RelaxNoPdbError\n\n # Create an empty scene.\n cdp.vmd_scene = VMD.Scene()\n\n # Add the molecules to the scene.\n for i in range(len(cdp.structure.structures)):\n cdp.vmd_scene.addObject(VMD.Molecules(cdp.structure.structures[i]))\n\n # View the scene.\n cdp.vmd_scene.view()", "title": "" }, { "docid": "293ac9fd11840026a5ed80a8f6fc5ba6", "score": "0.5520021", "text": "def run(path, debug):\n click.echo(f'Running {path}')\n mapper = Mapper(path, debug=debug)\n mapper.run()", "title": "" }, { "docid": "13f4382a830503184f1e73427fe1e4bd", "score": "0.5445485", "text": "def startfile(path):\n pass", "title": "" }, { "docid": "c78e63636612dc1215c74a547d52ee5b", "score": "0.5380279", "text": "def run(path: str, resolution_width: int, resolutuon_height: int,\n standard_size: int):\n\n theApp = App(path, resolution_width, resolutuon_height, standard_size)\n theApp.on_execute()", "title": "" }, { "docid": "9aeb47a3ff6169756dca426cba04da1f", "score": "0.53429234", "text": "def load_from_view(self):\n\n # Set pillowtop checkpoint for doc_class\n # though this might cause some superfluous reindexes of docs,\n # we're going to set the checkpoint BEFORE we start our operation so that any changes\n # that happen to cases while we're doing our reindexing would not get skipped once we\n # finish.\n\n current_db_seq = self.pillow.couch_db.info()['update_seq']\n self.pillow.set_checkpoint({'seq': current_db_seq})\n\n # Write sequence file to disk\n seq_filename = self.get_seq_filename()\n self.log('Writing sequence file to disk: {}'.format(seq_filename))\n with open(seq_filename, 'w') as fout:\n fout.write(str(current_db_seq))\n\n # Load entire view to disk\n dump_filename = self.get_dump_filename()\n self.log('Writing dump file to disk: {}, starting at {}'.format(\n dump_filename, datetime.utcnow().isoformat()))\n with open(dump_filename, 'w') as fout:\n for row in self.full_couch_view_iter():\n fout.write('{}\\n'.format(simplejson.dumps(row)))\n self.log(\"View and sequence written to disk: %s\" % datetime.utcnow().isoformat())", "title": "" }, { "docid": "8817a5b474dc73602975862d5f284962", "score": "0.51839685", "text": "def run_script(self, pathname):\n\t\tpass", "title": "" }, { "docid": "338eb182fa57818be571dcbdef83823e", "score": "0.51783466", "text": "def pre(self, command, output_dir, vars):\n vars['view_filename'] = vars['view_name'].lower().replace(' ', '')\n vars['view_classname'] = vars['view_name'].replace(' ', '')", "title": "" }, { "docid": "e61e5dc755c94ffe017249a4eb7a962b", "score": "0.51756454", "text": "def run_view(self):\n self.view.mainloop()", "title": "" }, { "docid": "4434fdac616e80a135498dc917aaa50c", "score": "0.5137802", "text": "def run(view, edit):\n errors.remove(\"vet\", view)\n root = buffer.root(view)\n file = buffer.filename(view)\n pkg = buffer.package(view)\n\n if len(view.window().folders()) == 0:\n # When sublime is opened with `$ subl main.go`\n target = file\n cwd = root\n else:\n target = './...'\n cwd = pkg\n\n args = [\"vet\"] + [\"--\" + a for a in conf.vet_analyzers()] + [target]\n cmd = exec.Command(\"go\", args=args, cwd=cwd)\n res = cmd.run()\n\n if res.code != 0:\n errs = lint.parse(res.stderr, (root, cwd, file), \"vet\")\n log.debug('vet: {}', errs)\n errors.update(\"vet\", view, errs)", "title": "" }, { "docid": "57e230544bdc70322c45112fc9541eb2", "score": "0.51179403", "text": "def _LaunchViewerProcess(filename):\n launch_base_command = []\n if platform.system() == \"Darwin\": # MacOSX\n launch_base_command = [\"open\", \"-W\", \"-a\", \"Skim\"]\n elif platform.system() == \"Linux\":\n launch_base_command = [\"evince\"]\n\n return subprocess.Popen(launch_base_command + [filename])", "title": "" }, { "docid": "03896304135be85b71dd9ec6919b4b5e", "score": "0.5091855", "text": "def load(self, path, filename, *args, **kwargs):\n if len(filename) > 0:\n self.path = os.path.join(path, filename[0])\n self.tlm.ui_path.text = ' ' + self.path\n self.tlm.viewer.load(self.path)\n\n self.dismiss_popup()", "title": "" }, { "docid": "67d674e5c9461a17d16ea51e7cd3d164", "score": "0.5079934", "text": "def run(self):\n self._view.run()", "title": "" }, { "docid": "c769152c43b2aa4010dcba672204c115", "score": "0.50435644", "text": "def vdq(dbdir):\n _ibsdb = constants.PATH_NAMES._ibsdb\n utool.util_cplat.view_directory(join(dbdir, _ibsdb))\n sys.exit(1)", "title": "" }, { "docid": "a52d62e26a51c68cf56ed9057aad46b6", "score": "0.5023681", "text": "def view():\n parser = argparse.ArgumentParser(description=view_archive_files.__doc__)\n parser.add_argument('files', nargs='+',\n help=\"\"\"files to view.\"\"\")\n args = vars(parser.parse_args())\n view_archive_files(args['files'])", "title": "" }, { "docid": "0af53e84fb722cff12bff1d5438d3405", "score": "0.5009949", "text": "def run(self):\r\n return self.inner_cmd(\"launch\")", "title": "" }, { "docid": "c766528e3db155711ea09be5bde13043", "score": "0.5000481", "text": "def assimilate(self, path):\n logger.info(\"Getting task doc for base dir :{}\".format(path))\n vasprun_files = self.filter_files(path, file_pattern=\"vasprun.xml\")\n outcar_files = self.filter_files(path, file_pattern=\"OUTCAR\")\n if len(vasprun_files) > 0 and len(outcar_files) > 0:\n d = self.generate_doc(path, vasprun_files, outcar_files)\n self.post_process(path, d)\n else:\n raise ValueError(\"No VASP files found!\")\n self.validate_doc(d)\n return d", "title": "" }, { "docid": "77c7e2f9708ee87ea912ca379e1c2d21", "score": "0.4998633", "text": "def load_from_file(self, path):\n\n if self.allow_output:\n print(\"<-Load file '%s'->\\n\" % path)\n\n selma_file_reader.load_selma_file(self, path)\n\n if self.allow_output:\n print(\"\")", "title": "" }, { "docid": "575a71e84983775c5cb4a64ec71b7b3c", "score": "0.49925908", "text": "def run_export(path=None):\n filename = str(uuid1())\n return _export_selected(path=path, filename=filename)", "title": "" }, { "docid": "c7c56b511aed9a5e442d4ee5630e0cc8", "score": "0.49863985", "text": "def view_file(self, path=None, num_hdu=0, chname=None, imname=None,\n tag=None):\n self.ocs.view_file(path, num_hdu=num_hdu, chname=chname,\n imname=imname)", "title": "" }, { "docid": "1d2d2d300042b7912dee18a8ac65d4bc", "score": "0.4984439", "text": "def StartLocalDvid(self, repoName, portName, port, imagePath):\n\t\treturn DvidResource.StartLocalDvid(repoName,portName,port,imagePath)", "title": "" }, { "docid": "25f235d1fe70001aada53b1600616c76", "score": "0.49723914", "text": "def view(*args, **kwargs):\n app = get_qapplication_instance()\n \n window = AstViewer(*args, **kwargs)\n window.show()\n\n if 'darwin' in sys.platform:\n window.raise_()\n \n logger.info(\"Starting {} the event loop.\".format(PROGRAM_NAME))\n exit_code = app.exec_()\n logger.info(\"{} viewer done...\".format(PROGRAM_NAME))\n return exit_code", "title": "" }, { "docid": "1ece1287cd185fd11cb73994e8f286b0", "score": "0.4939685", "text": "def execute(view: AbstractView, configuration: AbstractConfiguration) -> Controller:\n\n controller = start(view, configuration)\n controller.waitForProgram()\n\n return controller", "title": "" }, { "docid": "8687999de6d27ef6b07d1f7dae213511", "score": "0.49318635", "text": "def run(self, directory):\n pass", "title": "" }, { "docid": "369cfd0ee4c2728af147cc3a90c5a9c8", "score": "0.4928434", "text": "def run_import(path=None):\n return _import_file(path=path)", "title": "" }, { "docid": "0029c8813d3ce687b63eb69a8cba3e3a", "score": "0.4916667", "text": "def read_traj_vmd():\n # parser = argparse.ArgumentParser()\n\n parser = argparse.ArgumentParser(description=__doc__,\n formatter_class=argparse.ArgumentDefaultsHelpFormatter)\n parser.add_argument(\"-f\", \"--psf\", dest=\"psffile\",\n help=\"Name of the future files, all other files will start with FILE\",\n metavar=\"FILE\")\n parser.add_argument(\"-d\", \"--data\", dest=\"datafile\",\n default=\"./figures/polymer_0.8.data\",\n # type=lambda x: is_valid_file(parser, x),\n help=\"read datafile and if exists then convert it to psf file by invoking a vmd script\",\n metavar=\"FILE\")\n\n parser.add_argument(\"-t\", \"--trajectroy\", dest=\"traj\",\n default=\"quench.dcd\",\n type=lambda x: is_valid_file(parser, x),\n help=\"Input trajectory file)\", metavar=\"FILE\")\n\n parser.add_argument(\"-e\", \"--endframe\", dest=\"endframe\",\n default=-1,\n type=int,\n help=\"End frame of the trajectory file type (default: %(default)s)\")\n parser.add_argument(\"-st\",\n \"--startframe\",\n dest=\"startframe\",\n default=0,\n type=int,\n help=\"Start frame of the trajectory file type (default: %(default)s)\")\n\n\n ######## skipping info\n\n parser.add_argument(\"-autos\",\n \"--auto_traj_skip\",\n action=\"store_true\",\n dest=\"auto_traj_skip\",\n default=False,\n help=\"---Skipping---: do you want skipping automaticallty configured as 120 frames of whole trajectory. If provided --trajskip becomes a dummy variable(default: %(default)s)\")\n parser.add_argument(\"-s\", \"--trajskip\", dest=\"trajskip\",\n default=40,\n type=int,\n help=\"How many steps are to be skipped when trajectory \\\n file is being red\\\n (needs to be > 1, < number of frames) \\\n type (default: %(default)s)\")\n parser.add_argument(\"-fd\",\n \"--filedumpskip\",\n action=\"store_true\",\n dest=\"filedumpskip\",\n default=False,\n help=\"---Skipping---: Use dumpskip info from dumpskip.txt(default: %(default)s)\")\n\n\n parser.add_argument(\"-kp\",\n \"--keyparameter\",\n dest=\"keyparameter\",\n default=\"quench\",\n help=\"keyparameter, can be: kremer,coolprep,restprep,quench\")\n\n\n parser.add_argument(\"-ds\",\n \"--dumptrajskip\",\n dest=\"dumpskip\",\n default=200000,\n type=float,\n help=\"---Skipping---: This is a factor for transforming trajectory frames into lj units, numframes*dumpskip = simtime [lj] (default: %(default)s)\")\n\n parser.add_argument(\"-ts\",\n \"--timestep\",\n dest=\"timestep\",\n default=0.001,\n type=float,\n help=\"in case filelogskip=False, This is a timestep for transforming trajectory frames into lj units, numframes*logskip = simtime [lj] (default: %(default)s)\")\n\n\n\n ## ----------Crystallization ----------------\n\n parser.add_argument(\"-ne\",\n \"--neighbor\",\n dest=\"neigh\",\n default=10,\n type=int,\n help=\"ICC Crystalinity: Number of neighbours to consider for AnalyzeChain crystalinity parameter program (default: %(default)s)\")\n parser.add_argument(\"-th\",\n \"--threshold\",\n dest=\"threshold\",\n default=0.95,\n type=float,\n help=\"ICC Crystalinity: threshold for ICC crystalinity parameter, chains with p2 higher than threshold are \\\n considered to be crystaline(default: %(default)s)\")\n\n parser.add_argument(\"-na\",\n \"--nAtomsPerBox\",\n dest=\"nAtomsPerBox\",\n default=3,\n type=int,\n help=\"---YAMAMOTO---: Mesh size for Yamamoto crystalinity parameter program, How many atoms per little mesh box do you want? (default: %(default)s)\")\n\n parser.add_argument(\"-wr\",\n \"--wrap\",\n action=\"store_true\",\n dest=\"wrap\",\n default=False,\n help=\"---YAMAMOTO---: wraping trajectory or no? Yamamoto crystalinity parameter program (default: %(default)s)\")\n\n ## --------- for other programs like AnalyzeRDF, AnalyzeEnd2End\n\n parser.add_argument(\"-noff\",\n \"--noffset\",\n dest=\"Noff\",\n default=1,\n type=int,\n help=\"End2End: Number of last points that we don't want to see (default: %(default)s)\")\n\n\n parser.add_argument(\"-calc\",\n \"--calculateRDF\",\n action=\"store_true\",\n dest=\"calc\",\n default=False,\n help=\"(---RDF---: Do you want to write .vmd script that can analyze RDF (default: %(default)s)\")\n parser.add_argument(\"-p\",\n \"--plot\",\n action=\"store_true\",\n dest=\"plot\",\n default=False,\n help=\"---RDF---: Do you want to plot your results? (default: %(default)s)\")\n parser.add_argument(\"-lp\",\n \"--logplot\",\n action=\"store_true\",\n dest=\"logplot\",\n default=False,\n help=\"Do you want time(or N) to be plotted with log(t) scale? (default: %(default)s)\")\n\n\n\n\n args = parser.parse_args()\n args = create_psf(args)\n if args.wrap==True:\n args = create_wrapdcd(args)\n else:\n print \" wrap is set to False, using non-wrapped coordinates\"\n\n print args\n print colored('parameters have been red', 'green')\n\n return args", "title": "" }, { "docid": "226ecf788efb6c813bd101ec48ca19ec", "score": "0.48919916", "text": "def open_file(self, path=None):\n if not path:\n path = tkinter.filedialog.askopenfilename()\n if path:\n self.model.open_file(path)", "title": "" }, { "docid": "ba9ae18db97a31416905f8505a7561fc", "score": "0.48877218", "text": "def main(filepath):\n file_output = view_directory_contents(filepath)\n \n click.echo(file_output)", "title": "" }, { "docid": "94d1d5c3195cdd58a3e7bcb7ca13d641", "score": "0.48768693", "text": "def run_visualization(url, target_path, skeleton_point=None, skeleton_box=None):\n # TODO :: Need to modify url to path\n try:\n # f = urllib.request.urlopen(url)\n # jpeg_str = f.read()\n # original_im = Image.open(BytesIO(jpeg_str))\n\n\n original_im = Image.open(url)\n except IOError:\n print('Cannot retrieve image. Please check url: ' + url)\n return\n\n print('running deeplab on image %s...' % url)\n resized_im, seg_map = MODEL.run(original_im)\n\n\n if skeleton_point is not None:\n normalized_hip_point = get_normalized_hip_point_of_image(original_im, skeleton_point, skeleton_box)\n\n seg_map = get_separated_segmap_with_hip_skeleton(seg_map, normalized_hip_point, is_top=True)\n\n seg_map = get_highlight_segmap(seg_map, normalized_hip_point)\n\n vis_segmentation(resized_im, seg_map, target_path)", "title": "" }, { "docid": "61653665694a6296ecce06078e9694ee", "score": "0.4869505", "text": "def view(self,filename,param=None,importcgi=False):\n if filename.startswith('mvc://'):\n viewerfile = self.config.mvcpath+'/'+filename[6:] \n\n elif filename.startswith('file://'):\n viewerfile = filename[7:]\n\n else:\n viewerfile = self.config.mvcpath+self.path+'/'+filename\n\n viewerfile = os.path.normpath(viewerfile.replace('\\\\','/')) \n \n try:\n fViewer = open(viewerfile,'r')\n except Exception,e:\n self.html= self.main.appException(self.controllerfilename,str(\"Kann VIEWER nicht finden:<br />%(filename)s<br />%(error)s\" % {'filename':filename,'error':e}))\n return False\n\n text = fViewer.read()\n\n try:\n psp = PSP(template=text,filename=filename,cgiparam=self.cgiparam,isReadonly=self.isReadonly,controller=self)\n except Exception,e:\n self.html= self.main.appException(self.controllerfilename,str(\"Fehler im Template:<br />%(filename)s<br />%(error)s\" % {'filename':filename,'error':e}))\n return False\n\n # Importiert alle CGI Parameter in Param\n if importcgi:\n self.form=cgi.FieldStorage()\n for fld in self.form:\n param[fld]=self.form[fld].value\n value = ''\n \n value = psp.render(param)\n\n self.html = self.html + value\n return True", "title": "" }, { "docid": "7736607610710ca14ba031321665eeb1", "score": "0.4863449", "text": "def start(view: AbstractView, configuration: AbstractConfiguration) -> Controller:\n \n controller = setup(view, configuration)\n controller.startProgramLoop()\n\n return controller", "title": "" }, { "docid": "3126c1254aa421d3fbba9a039591c441", "score": "0.4860672", "text": "def quick_view(\n dir_path: Path,\n config: Optional[Configuration] = None,\n decimate: bool = False,\n max_pts: int = 10_000,\n):\n logger.info(f\"Plotting time data in {dir_path}\")\n if config is None:\n config = get_default_configuration()\n\n time_data = quick_read(dir_path, config)\n time_data = run_time_processors(config, time_data)\n if not decimate:\n return time_data.plot(max_pts=max_pts)\n dec_params = config.dec_setup.run(time_data.metadata.fs)\n dec_data = run_decimation(config, time_data, dec_params=dec_params)\n return dec_data.plot(max_pts=max_pts)", "title": "" }, { "docid": "4457227be95fad293c76c216c513e55e", "score": "0.48583424", "text": "def view(args = None):\n parser = argparse.ArgumentParser(description='Create a new note')\n parser.add_argument('filename', help='filename to view')\n args = parser.parse_args(args)\n notes.view_file(args.filename)", "title": "" }, { "docid": "edc7b5aff034c5512863839b877ad550", "score": "0.48381686", "text": "def open_window(path):\n try:\n call([\"open\", \"-R\", str(Path(str(path)))])\n except FileNotFoundError:\n Popen(r'explorer /select,' + str(Path(str(path))))", "title": "" }, { "docid": "4529d2ab725d42ca772f8e6fa83a7b64", "score": "0.48322922", "text": "def open_in_ds9():\n # ds9 commands\n ds9 = '/Applications/ds9.app/Contents/MacOS/ds9' + ' '\n \n files = [ 'stamp_0/stamp_%d.fits.gz' %i for i in range(1000)]\n filenames = ' '.join(files)\n \n cmd = ds9 + ' -height 1200 ' + ' -width 2500 ' + filenames\n print(cmd)\n subprocess.call(cmd, shell=True)", "title": "" }, { "docid": "01e7bc44d7eb8819cb21f0be032c64c8", "score": "0.48187009", "text": "def load(self, path=\"./\"):\n return self", "title": "" }, { "docid": "377284db7e639c941f6a02af1cc2531c", "score": "0.4817326", "text": "def view(ctx):\n\n report.info(ctx, \"docs.view\", f\"viewing documentation\")\n build_path = ctx.docs.directory / \"build\" / \"html\" / \"index.html\"\n build_path = pathname2url(build_path.as_posix())\n webbrowser.open(f\"file:{build_path!s}\")", "title": "" }, { "docid": "b126cb46af35b92ba782dd7fee20382e", "score": "0.48167342", "text": "def reactvr(path=None):\n if path:\n return flask.render_template('pano_reactvr.html', context=config.CONTEXT, pano=path)\n else:\n return flask.render_template('index_reactvr.html', context=config.CONTEXT, panos=utils.get_panos())", "title": "" }, { "docid": "6ec0ae6e0656a697488cdfaa464e7691", "score": "0.48157912", "text": "def __init__(self, path='', width=0, height=0, depth=3, database='Unknown', segmented=0):\n environment = Environment(loader=PackageLoader('pascal_voc_tools', 'templates'), keep_trailing_newline=True)\n self.annotation_template = environment.get_template('annotation.xml')\n\n abspath = os.path.abspath(path)\n\n self.template_parameters = {\n 'path': abspath,\n 'filename': os.path.basename(abspath),\n 'folder': os.path.basename(os.path.dirname(abspath)),\n 'size': {'width': width, 'height': height, 'depth': depth, },\n 'source': {'database': database, },\n 'segmented': segmented,\n 'object': []\n }", "title": "" }, { "docid": "166aa4875065d015893c4d96b6479ae7", "score": "0.48018083", "text": "def _call_codev(self, seqfile):\n # Make the CODEV Call\n logging.info(\"Calling codev.exe\")\n subprocess.call(\"C:\\CODEV105_FCS\\codev.exe %s\"%seqfile)", "title": "" }, { "docid": "752b54ccf03f111447d3268f4808aa65", "score": "0.47832507", "text": "def open_as(self, path):\n path = self.conform(path)\n nuke.scriptOpen(path)\n nuke.scriptSaveAs(path.replace(\n \".nk\", \"_{}.nk\".format(time.time())))", "title": "" }, { "docid": "0bee2cc234ffe3e4f4c67514d6e74d15", "score": "0.47822186", "text": "def main(argv=None):\n\n if argv is None:\n argv = sys.argv\n\n parser = E.OptionParser(\n version=\"%prog version: $Id: jalview.py 2782 2009-09-10 11:40:29Z andreas $\", usage=globals()[\"__doc__\"])\n\n parser.add_option(\"-m\", \"--method\", dest=\"method\", type=\"choice\",\n choices=(\"list2annotation\", ),\n help=\"methods.\")\n\n parser.add_option(\"--filename-mali\", dest=\"filename_mali\", type=\"string\",\n help=\"filename with multiple alignment used for calculating sites - used for filtering\")\n\n parser.add_option(\"--jalview-title\", dest=\"jalview_title\", type=\"string\",\n help=\"title for jalview annotation.\")\n\n parser.set_defaults(\n method=None,\n jalview_symbol=\"*\",\n jalview_title=\"anno\",\n filename_mali=None,\n )\n\n (options, args) = E.Start(parser, add_pipe_options=True)\n\n if not options.filename_mali:\n raise \"please specify a multiple alignment.\"\n\n mali = Mali.Mali()\n mali.readFromFile(open(options.filename_mali, \"r\"))\n\n if options.method == \"list2annotation\":\n\n options.stdout.write(\"JALVIEW_ANNOTATION\\n\")\n options.stdout.write(\"# Created: %s\\n\\n\" %\n (time.asctime(time.localtime(time.time()))))\n\n codes = [\"\"] * mali.getWidth()\n\n first = True\n for line in sys.stdin:\n if line[0] == \"#\":\n continue\n if first:\n first = False\n continue\n\n position = int(line[:-1].split(\"\\t\")[0])\n codes[position - 1] = options.jalview_symbol\n\n options.stdout.write(\"NO_GRAPH\\t%s\\t%s\\n\" %\n (options.jalview_title, \"|\".join(codes)))\n\n E.Stop()", "title": "" }, { "docid": "6f9dfab10a976a761d1bbf8a3a5d44b9", "score": "0.47609362", "text": "def show_preview(viewer_path, pdf_file_name):\n try:\n cmd = [viewer_path, pdf_file_name]\n run_external_subprocess_in_background(cmd)\n except (subprocess.CalledProcessError, OSError, IOError) as e:\n print(\"\\nWarning from pdfCropMargins: The argument to the '--viewer' option:\"\n \"\\n \", viewer_path, \"\\nwas not found or failed to execute correctly.\\n\",\n file=sys.stderr)\n return", "title": "" }, { "docid": "35e3b09c4394aa6e971ed88c0201b8cb", "score": "0.47553506", "text": "def simulate(self, path=[], caltag='default'): \n \n print ('[INFO] Starting to simulate a test beam run ...') \n \n self.import_caltag(caltag)\n self.run(path)\n self.export_caltag(caltag)", "title": "" }, { "docid": "e15e669721b3fdb03c24347cfb36df48", "score": "0.4750639", "text": "def openimage(self, path):\n self.ui.statusbar.showMessage('Loading image...')\n self.app.processEvents()\n newimagetab = viewer.imageTabTracker([path], self.experiment, self)\n tabwidget = self.ui.findChild(QtGui.QTabWidget, 'tabWidget')\n tabwidget.setCurrentIndex(tabwidget.addTab(newimagetab, path.split('/')[-1]))\n self.ui.findChild(QtGui.QStackedWidget, 'viewmode').setCurrentIndex(1)\n self.ui.statusbar.showMessage('Ready...')", "title": "" }, { "docid": "0e1ea97c411dbeb3db5adb169cbad476", "score": "0.47415316", "text": "def run(self, path):\n if self.linux():\n #cmd = os.path.join(path, \"swat.exe\")\n cmd = os.path.join(self.current_path, \"swat2012_rev637_linux\")\n if self.custom_swat_path is not None:\n cmd = self.custom_swat_path\n logger.debug(\"Using custom SWAT : \" + self.custom_swat_path)\n #return subprocess.call([cmd], cwd=path, shell=True)\n process = subprocess.Popen(cmd, cwd=path, stderr=subprocess.STDOUT,\n stdout=subprocess.PIPE, universal_newlines=True)\n for line in process.stdout:\n sys.stdout.write(line)\n return process.returncode\n if self.windows():\n #cmd = os.path.join(path, \"swat.exe\")\n cmd = os.path.join(self.current_path, \"swat2012_rev637_windows.exe\")\n if self.custom_swat_path is not None:\n cmd = self.custom_swat_path\n logger.debug(\"Using custom SWAT : \" + self.custom_swat_path)\n #return subprocess.call([cmd], cwd=path, creationflags=subprocess.CREATE_NEW_CONSOLE)\n #return subprocess.call([cmd], cwd=path)\n process = subprocess.Popen(cmd, cwd=path, stdout=subprocess.PIPE, universal_newlines=True)\n for line in process.stdout:\n sys.stdout.write(line)\n return process.returncode", "title": "" }, { "docid": "9e067da5ee2e61bfdd41280a12ba12ee", "score": "0.4737881", "text": "def render(self, fpath, view=True):\n self._graphviz().render(fpath, view=view)", "title": "" }, { "docid": "1419d0d38d3bf803f964362896cbc032", "score": "0.47378168", "text": "def set_vitrage(self):\r\n self.vitrage.start_script()", "title": "" }, { "docid": "8dcaa7ac4b4aeb1f6bc625775b209b89", "score": "0.47312513", "text": "def run(cls):\n subprocess.Popen(\n [\n cls.Path,\n ],\n )", "title": "" }, { "docid": "733c8d96c21f7a61181fff90633d2f52", "score": "0.4719372", "text": "def run(jarvis, s):\n os.system(s)", "title": "" }, { "docid": "dfd2ea5f9ba6e5d8318dcca01cfe0d6b", "score": "0.4715972", "text": "def __call__(path):", "title": "" }, { "docid": "c353329f7e5f789f45c92ee7568f3c9c", "score": "0.47127804", "text": "def do_dot_load(self, args):\n self.do_dot_run(args)", "title": "" }, { "docid": "03cfacc59eb61ac572271cf1c007dfab", "score": "0.47109213", "text": "def start(self):\n self.args['workdir'] = self.workdir\n revision = commands.GetProp(self, 'use_swarming_client_revision', None)\n self.startVC(None, revision, None)", "title": "" }, { "docid": "fe4fdc7e824e51b2295586009703d52c", "score": "0.47085676", "text": "def call_skyview(field, survey, pos, fov, coord, proj='Car', pix=500):\n\n\t# Assign position coordinates\n\tx,y = pos[0],pos[1]\n\n\t# Construct name of the resulting file\n\tfitsname = \"Skyview_{field}_{survey}.fits\".format(**locals())\n\n\t# Construct and send the command\n\tcmd = 'java -jar skyview.jar coordinates={coord} projection={proj} position={x:.4f},{y:.4f} size={fov},{fov} pixels={pix},{pix} survey=\"{survey}\" output=\"results/{fitsname}\"'.format(**locals())\n\tprint(cmd)\n\tprint('Executing command...')\n\tos.system(cmd)\n\tprint('... done!')\n\n\treturn(fitsname)", "title": "" }, { "docid": "8c6fc28e182f36b530af45e35bd07250", "score": "0.47062925", "text": "def _runPath(self):\n if not hasattr(self, 'pdf'):\n self.pdf = self.process.sourcemanager.get(self.cfg['pdf'])\n if not hasattr(self, 'argset'):\n self.argset = self.cfg['argset']\n if self.cfg['preloadFiles'] and any([os.path.exists(f) for f in self.cfg['preloadFiles']]):\n for f in self.cfg['preloadFiles']:\n try:\n fin = ROOT.TFile(f)\n data = fin.Get(\"{0}Data\".format(self.pdf.GetName()))\n if not data == None:\n if self.data is None:\n self.data = data.emptyClone(\"{0}Data_preload\".format(self.pdf.GetName()))\n self.data.append(data)\n finally:\n fin.Close()\n else:\n # Remark: \n # 1. User is responsible for the positiveness of the PDF.\n # 2. Name of the generated dataset is pdf.GetName()+\"Data\"\n # 3. Generation time is linear to scale and depend on the PDF, so don't use large scale\n # TODO: Parallel production with large scale\n if (self.cfg['scale'] > 100):\n self.logger.logWARNING(\"It may take longer time to generate a large sample. (Scale={0})\".format(self.cfg['scale']))\n self.data = self.pdf.generate(self.argset, ROOT.gRandom.Poisson(self.cfg['expectedYields'] * self.cfg['scale']), *self.cfg.get('generateOpt', []))\n self.logger.logINFO(\"ToyGenerator {0} generates based on {1} with {2} events.\".format(self.name, self.pdf.GetName(), self.data.sumEntries()))", "title": "" }, { "docid": "acbf7fae0bed44bc5c2c7e5e13b7fb28", "score": "0.46987456", "text": "def run(self):\n seqfile = self._write_seq() # write\n\n # Check for existence of output directory before writing.\n check_dir(\"%s%s\"%(self.out_dir, self.date))\n\n self._call_codev(seqfile) # run\n self._move_seq() # move", "title": "" }, { "docid": "13ebced1f44166715c348d402a07380d", "score": "0.46952757", "text": "def _main():\n reference_viewer(sys.argv)", "title": "" }, { "docid": "cf5c9530721222ff89508fc620a7c327", "score": "0.46886978", "text": "def runLeoTest(self, path):\n c = self.c\n # Do not set or clear g.app.unitTesting: that is only done in leoTest.runTest.\n assert g.app.unitTesting\n try:\n c2 = None\n old_gui = g.app.gui\n c2 = g.openWithFileName(path, old_c=c)\n assert(c2)\n structure_errors = c2.checkOutline()\n assert not structure_errors, structure_errors\n finally:\n g.app.gui = old_gui\n if c2 and c2 != c:\n c2.setChanged(False)\n g.app.closeLeoWindow(c2.frame)\n c.frame.update() # Restored in Leo 4.4.8.", "title": "" }, { "docid": "dc73a7e4d5e22dbd2031ca87f8867416", "score": "0.4679496", "text": "def visualize(*kargs,**kwargs):\n job=kwargs['job']\n\n if job=='short wrapped dcd trajectory':\n \"\"\"save the trajectory from 1ns to 1.1ns with wrapped coordinates and with the BaSO4 in the center of the simulation box\"\"\"\n cmds='''mol new %s waitfor all\n mol addfile %s type lammpstrj first 10000 last 10999 waitfor all\n pbc set {34.4475 28.1422 57.2552} -all\n pbc box_draw\n set sel [atomselect top all]\n set nf 1000\n for {set i 0} {$i < $nf} {incr i} { \n $sel frame $i \n $sel moveby {0. 0. 13.5} \n }\n pbc wrap -all\n pbc join residue -all\n animate write dcd %s\n quit'''%(kwargs['pdb'],kwargs['lammpstrj'],kwargs['dcd'])\n vmdexec(cmds)\n\n else:\n print 'job not recognized, nothing to calculate'", "title": "" }, { "docid": "f61f7a7980900ea81d3b426fb0c4ffd6", "score": "0.4673853", "text": "def gateway_start( path, opts ):\n \n gateway_argv = [path, \"-g\", \"%s\" % opts.g, \"-v\", \"%s\" % opts.v, \"-u\", \"%s\" % opts.u]\n\n try:\n p = subprocess.Popen( gateway_argv, executable=path, close_fds=True, shell=False )\n return p\n except Exception, e:\n print >> sys.stderr, \"\\n\".join(traceback.format_exc())\n return None", "title": "" }, { "docid": "e62047bcdb69c44d11298ff110ed12fb", "score": "0.4666706", "text": "def _view_file_select():\n ar = aruco.Dictionary_get(aruco.DICT_6X6_250)\n param = aruco.DetectorParameters_create()\n path = Path(\"../datasets/P2/images/\")\n fnames = list()\n for file in path.glob(\"*.jpg\"):\n fnames.append(str(file))\n cam = Camera(\"mobile\")\n cam.read_param()\n tmen = TerminalMenu(fnames)\n s = tmen.show()\n img = cv2.imread(fnames[s])\n v = View(fnames[s],img,ar,param,171/1000,cam)\n cv2.imwrite(\"../logs/temp.jpg\",v.img)\n scale_percent = 40 # percent of original size\n width = int(v.img.shape[1] * scale_percent / 100)\n height = int(v.img.shape[0] * scale_percent / 100)\n dim = (width, height)\n resized = cv2.resize(img, dim, interpolation = cv2.INTER_AREA)\n cv2.imshow(str(fnames[s]),resized)\n cv2.waitKey(0)\n print(f\"fnames[s]={fnames[s]}\")", "title": "" }, { "docid": "7392dde486af24ebe0e5d236aa6260e6", "score": "0.46647665", "text": "def load(self, path):\n pass", "title": "" }, { "docid": "4276f5380801e87141321639a935591e", "score": "0.4663376", "text": "def run(app, args):\n experiment = app.make('experiment', args.name)\n\n if args.config is not None:\n experiment.config_file = args.config\n\n if args.progress is True:\n experiment.show_progress = True\n\n if args.hide_performance is True:\n experiment.hide_performance = True\n\n experiment.start(args.n)", "title": "" }, { "docid": "d2fd4a8bfb0a0901c8472f176b935368", "score": "0.4660436", "text": "def play(self, path):\n cap = cv2.VideoCapture(path)\n if cap.isOpened() == False: print('ERROR! Cannot open video.')\n \n while cap.isOpened():\n ret, frame = cap.read()\n if ret == True:\n cv2.imshow('Frame', frame)\n if cv2.waitKey(25) & 0xFF == ord('q'): break\n else: break\n cap.release()", "title": "" }, { "docid": "540d02eaeba5c0e1f8514c45bfa6abb6", "score": "0.46525946", "text": "def load_pdf(self, path=None):\n\t\tif path:\n\t\t\tself.path = path\n\t\tself.load(QtCore.QUrl.fromLocalFile(self.path))", "title": "" }, { "docid": "c96fcc1bd0ee97373e343b3ae97f3e6f", "score": "0.4651659", "text": "def process_cmd_line(app, opts, args): \n\n from enthought.mayavi.core.common import error, exception\n from enthought.tvtk.common import camel2enthought\n\n sources = _get_non_file_sources()\n script = app.script\n last_obj = None\n\n # Start a new scene by default if there is none currently and none\n # was specified at the start of the command line arguments.\n if script.engine.current_scene is None:\n new_scene = False\n if len(opts) == 0:\n if len(args) == 0:\n new_scene = True\n elif (opts[0][0] not in ('-n', '--new-scene', '-z',\n '--visualization', '--viz',\n '-x', '--exec')):\n new_scene = True\n if new_scene: \n last_obj = script.new_scene()\n \n for o, a in opts:\n if o in ('-d', '--data'):\n base, ext = splitext(a)\n if exists(a):\n last_obj = script.open(a)\n elif a in sources:\n md = sources[a]\n src = md.get_callable()()\n script.add_source(src)\n last_obj = src\n else:\n error(\"File/Source %s does not exist!\"%a)\n return\n\n if o in ('-m', '--module'):\n if '.' in a:\n idx = a.rfind('.')\n modname = a[:idx]\n classname = a[idx+1:]\n else:\n modname = 'enthought.mayavi.modules.%s'%camel2enthought(a)\n classname = a\n try:\n mod = __import__(modname, globals(), locals(), [classname])\n except ImportError, msg:\n exception(str(msg))\n return\n else:\n m = getattr(mod, classname)()\n if classname == 'Labels':\n m.object = script.engine.current_object\n script.add_module(m)\n last_obj = m\n\n if o in ('-f', '--filter'):\n if '.' in a:\n idx = a.rfind('.')\n modname = a[:idx]\n classname = a[idx+1:]\n else:\n if a[:12] == 'UserDefined:':\n modname = 'enthought.mayavi.filters.user_defined'\n classname = 'UserDefined'\n # Create the wrapped filter.\n fname = a[12:]\n from enthought.tvtk.api import tvtk\n try:\n extra = getattr(tvtk, fname)()\n except (AttributeError, TypeError):\n # Don't worry about errors.\n extra = None\n else:\n modname = 'enthought.mayavi.filters.%s'%camel2enthought(a)\n classname = a\n extra = None\n try:\n mod = __import__(modname, globals(), locals(), [classname])\n except ImportError, msg:\n exception(str(msg))\n return\n else:\n klass = getattr(mod, classname)\n if classname != 'UserDefined':\n f = klass()\n else:\n if extra is not None:\n f = klass(filter=extra)\n else:\n f = klass()\n f.setup_filter()\n script.add_filter(f)\n last_obj = f\n\n if o in ('-M', '--module-mgr'):\n from enthought.mayavi.core.module_manager \\\n import ModuleManager\n mm = ModuleManager() \n script.add_filter(mm)\n last_obj = mm\n\n if o in ('-n', '--new-scene'):\n script.new_scene()\n e = script.engine\n s = e.scenes[-1]\n e.set(current_scene=s, current_object=s)\n last_obj = s\n\n if o in ('-x', '--exec' ):\n err = run_script(script, a)\n if err: # stop processing options.\n return\n\n if o in ('-s', '--set'):\n try:\n stmt = 'last_obj.' + a\n exec stmt in locals(), globals()\n except Exception, msg:\n exception(str(msg))\n\n if o in ('-z', '--visualization', '--viz'):\n script.load_visualization(a)\n\n # for remaining arguments simply load saved visualizations.\n for arg in args:\n base, ext = splitext (arg)\n if ext == '.mv2':\n script.load_visualization(arg)\n elif ext == '.py':\n err = run_script(script, arg)\n if err: # stop processing arguments.\n return\n else:\n script.open(arg)", "title": "" }, { "docid": "ce56ae16a66ba9614bc4873612fdd1b3", "score": "0.46385577", "text": "def main(path: str):\n data_file = os.path.join(path, 'raw_data.csv')\n trg_file = os.path.join(path, 'triggers.txt')\n data, device_info = file_data(data_file)\n triggers = read_triggers(trg_file)\n\n plot_triggers(data, device_info, triggers, title=pathlib.Path(path).name)", "title": "" }, { "docid": "f781df8a7ec99b607f63048944f8e0eb", "score": "0.4636935", "text": "def setup_node():\n parser = argparse.ArgumentParser(description=\"This script starts the VIEW component of the MDB 3.0\")\n parser.add_argument(\"-s\", \"--seed\", help=\"Yaml file with some nodes to populate the LTM\")\n parser.add_argument(\"-l\", \"--log_level\", help=\"ROS log level (DEBUG, INFO, WARN, ERROR or FATAL)\")\n parser.add_argument(\"-d\", \"--save_dir\", help=\"Directory to save images and files\")\n args, _ = parser.parse_known_args()\n kwargs = vars(args)\n view = View()\n view.run(**kwargs)", "title": "" }, { "docid": "a13874528b4f368a6655d1ae915ac668", "score": "0.4636766", "text": "def test_view_on_file(self):\n tracefile = create_tracefile(self.shark)\n columns, filters = setup_defaults()\n\n with self.shark.create_view(\n tracefile, columns,\n None,\n name='test_view_on_file') as view:\n data = view.get_data()\n try:\n self.assertTrue(len(data) > 0)\n except:\n # this may fail in low traffic machines\n pass\n self.assertTrue(\n view.config['input_source']['path'].startswith('fs'))", "title": "" }, { "docid": "a83911dc592c3b7b2caff91edea583a8", "score": "0.46334937", "text": "def load_vst(self, path_to_so_file):\n self._vst = VstPlugin(path_to_so_file, self._callback)\n\n # Not sure I need this but I've seen it in other hosts\n self.vst.open()\n\n if not self._vst.is_synth:\n raise RuntimeError('Your VST must be a synth!')\n\n self.vst.set_sample_rate(self.sample_rate)\n self.vst.set_block_size(self.block_size)\n self.vst.resume()\n\n # Warm up the VST by playing a quick note. It has fixed some issues for TyrellN6 where\n # otherwise the first note is funny.\n self._path_to_so_file = path_to_so_file\n self.play_note(note=64, min_duration=.1, max_duration=.1, note_duration=.1, velocity=127,\n reload=False)", "title": "" }, { "docid": "843e2ff7a837bdfc1b7f757a41f4d0ce", "score": "0.463222", "text": "def MonteOpen(self):\n __location__ = os.path.dirname(sys.argv[0]) \n os.system(__location__+'/Monte.py') \n self.statusVariable.set(\"Please restart UI after running the Monte Carlo generator\")", "title": "" }, { "docid": "9725d40b690cf211d58320e8becd9b74", "score": "0.46302435", "text": "def open_article(filepath):\n if platform.system() == \"Darwin\": # macOS\n subprocess.call((\"open\", filepath))\n elif platform.system() == \"Windows\": # Windows\n os.startfile(filepath)\n else: # linux variants\n subprocess.call((\"xdg-open\", filepath))", "title": "" }, { "docid": "c2ae9cefc6862b8ed9e8d62ba3aa9ab7", "score": "0.462636", "text": "def run(filename):\n\n # Initialize the GLUT window.\n glutInit([])\n glutInitDisplayMode(GLUT_DOUBLE | GLUT_RGB | GLUT_DEPTH)\n glutInitWindowPosition(0, 20)\n glutInitWindowSize(width, height)\n glutCreateWindow( 'object-view.py - Press ESC to quit' )\n\n # Initialize the object viewer's state.\n init(filename)\n\n # Register interaction callbacks.\n glutKeyboardFunc(keypress)\n glutSpecialFunc(arrow)\n glutReshapeFunc(resize)\n glutDisplayFunc(draw)\n glutMouseFunc(mouse)\n glutMotionFunc(motion)\n\n # Issue some instructions.\n print()\n print('Drag the object with the mouse to reorient it.')\n print('Press the arrow keys move the flashlight.')\n print('Press SPACE to show the mesh.')\n print('Press \"/\" to refine the mesh.')\n print('Press ESC to quit.')\n print()\n\n glutMainLoop()\n return 0", "title": "" }, { "docid": "c77c9bde49831bdfee34ac025384ad6e", "score": "0.46077797", "text": "def main(args=sys.argv):\r\n usage = ('\\t%prog [options] <log file>')\r\n parser = OptionParser(usage=usage, version=__version__)\r\n parser.disable_interspersed_args()\r\n parser.add_option(\"-f\", \"--flat\",\r\n dest=\"flat\",\r\n default=False,\r\n action=\"store_true\",\r\n help=\"Display the log line-by-line in a syslog-like format.\"\r\n )\r\n parser.add_option(\"-p\", \"--playback\",\r\n dest=\"playback\",\r\n default=True,\r\n action=\"store_false\",\r\n help=(\"Play back the log in a video-like fashion. This is the default \"\r\n \"view.\")\r\n )\r\n parser.add_option(\"--pretty\",\r\n dest=\"pretty\",\r\n default=True,\r\n action=\"store_true\",\r\n help=(\"Preserve font and character renditions when displaying the log \"\r\n \"in flat view (default).\")\r\n )\r\n parser.add_option(\"--raw\",\r\n dest=\"raw\",\r\n default=False,\r\n action=\"store_true\",\r\n help=\"Display control characters and escape sequences when viewing.\"\r\n )\r\n parser.add_option(\"--html\",\r\n dest=\"html\",\r\n default=False,\r\n action=\"store_true\",\r\n help=(\r\n \"Render a given .golog as a self-contained HTML playback file \"\r\n \"(to stdout).\")\r\n )\r\n parser.add_option(\"--metadata\",\r\n dest=\"metadata\",\r\n default=False,\r\n action=\"store_true\",\r\n help=( \"Prints (to stdout) the metadata of the given .golog\")\r\n )\r\n (options, args) = parser.parse_args(args=args)\r\n if len(args) < 1:\r\n print(\"ERROR: You must specify a log file to view.\")\r\n parser.print_help()\r\n sys.exit(1)\r\n log_path = args[0]\r\n if not os.path.exists(log_path):\r\n print(\"ERROR: %s does not exist\" % log_path)\r\n sys.exit(1)\r\n sys_stdout = sys.stdout\r\n if bytes != str: # Python 3\r\n sys_stdout = sys.stdout.buffer\r\n sys.stdout.flush() # Make sure it's empty before writing to the buffer\r\n try:\r\n if options.metadata:\r\n import json\r\n if curses and sys.stderr.isatty():\r\n try:\r\n curses.setupterm()\r\n print(json.dumps(get_log_metadata(log_path), indent=4))\r\n except Exception:\r\n print(json.dumps(get_log_metadata(log_path)))\r\n sys.exit(0)\r\n elif options.flat:\r\n flatten_log(\r\n log_path,\r\n sys_stdout,\r\n preserve_renditions=options.pretty, show_esc=options.raw)\r\n elif options.html:\r\n result = render_html_playback(log_path)\r\n print(result)\r\n else:\r\n playback_log(log_path, sys_stdout, show_esc=options.raw)\r\n except (IOError, KeyboardInterrupt):\r\n # Move the cursor to the bottom of the screen to ensure it isn't in the\r\n # middle of the log playback output\r\n rows, cols = get_terminal_size()\r\n print(\"\\x1b[%s;0H\\n\" % rows)\r\n sys.exit(0)", "title": "" }, { "docid": "2a964705edf8b370ea4258613753786c", "score": "0.4604574", "text": "def load(self, filename):\n self.ui.graphicsView.load_document(filename)", "title": "" }, { "docid": "e3e12801f9a83a8746a5c2860f9b6ed4", "score": "0.46034473", "text": "def onOpenDir(self, e):\n self.presenter.openPath()", "title": "" }, { "docid": "63bbab2d94af57e1810b65daa5692062", "score": "0.4595219", "text": "def load(cls, path: str) -> DevBench:\n\n # Path to the save directory\n savedir = pathlib.Path(path)\n\n # Load all the slices\n slices = []\n for sl_path in tqdm(list((savedir / \"slices\").glob(\"*\"))):\n try:\n slices.append(DataPanel.read(str(sl_path)))\n except FileNotFoundError:\n continue\n\n # Load metrics\n metrics = dill.load(open(str(savedir / \"metrics.dill\"), \"rb\"))\n\n # Load metrics\n aggregators = dill.load(open(str(savedir / \"aggregators.dill\"), \"rb\"))\n\n # Create the devbench\n devbench = cls()\n\n # Set previously stored metrics\n devbench.metrics = metrics\n\n # Set previously stored aggregators\n devbench.aggregators = aggregators\n\n # Set the slices\n devbench.add_slices(slices)\n\n # Load version info\n with open(str(savedir / \"version.dill\"), \"rb\") as f:\n devbench._loads_version(f.read())\n\n return devbench", "title": "" }, { "docid": "68a5e4d6d4ff16a7bc528885a06ea0a2", "score": "0.45926932", "text": "def plot_from_database(run_path: str):\n output_db_path = os.path.join(run_path, \"outputs.db\")\n assert os.path.exists(output_db_path), \"Folder does not contain outputs.db\"\n app_dirname = [x for x in run_path.split(\"/\") if x][-2]\n params_path = os.path.join(run_path, \"params.yml\")\n with open(params_path, \"r\") as f:\n params = yaml.safe_load(f)\n\n app_code_path = try_find_app_code_path(app_dirname)\n\n # Load plot config from project dir\n plot_config_path = os.path.join(app_code_path, \"plots.yml\")\n with open(plot_config_path, \"r\") as f:\n plot_config = yaml.safe_load(f)\n\n plots.validate_plot_config(plot_config)\n\n # Load post processing config from the project dir\n post_processing_path = os.path.join(app_code_path, \"post-processing.yml\")\n with open(post_processing_path, \"r\") as f:\n post_processing_config = yaml.safe_load(f)\n\n # Get database from model data dir.\n db_path = os.path.join(run_path, \"outputs.db\")\n scenarios = load_model_scenarios(db_path, params, post_processing_config)\n\n plot_scenarios(scenarios, run_path, plot_config)", "title": "" }, { "docid": "4e29c312eb5b3d905bec4a72880ee02b", "score": "0.45913714", "text": "def run(self, path=\"\"):\n entity, project, run = self._parse_path(path)\n if not self._runs.get(path):\n self._runs[path] = Run(self.client, entity, project, run)\n return self._runs[path]", "title": "" }, { "docid": "8d6d4c7ec57f315904f98346f89b90b2", "score": "0.45867917", "text": "def _view_test():\n ar = aruco.Dictionary_get(aruco.DICT_6X6_250)\n param = aruco.DetectorParameters_create()\n # fname=\"../datasets/P2/images/093440.jpg\" # ids[4,3,8,9,11]\n fname=\"../datasets/P2/images/093428.jpg\" # ids[0,4,5,6,7,9,11]\n img = cv2.imread(fname)\n cam = Camera(\"mobile\")\n cam.read_param()\n v = View(fname,img,ar,param,171/1000,cam)\n print(v)\n print(\"Show image\")\n cv2.imwrite(\"../logs/temp.jpg\",v.img)\n tmen = TerminalMenu(['No','Yes'])\n if tmen.show() == 1:\n print(\"Showing image\")\n scale_percent = 40 # percent of original size\n width = int(v.img.shape[1] * scale_percent / 100)\n height = int(v.img.shape[0] * scale_percent / 100)\n dim = (width, height)\n # resize image\n resized = cv2.resize(v.img, dim, interpolation = cv2.INTER_AREA)\n cv2.imshow(fname,resized)\n cv2.waitKey(0)\n else:\n print(\"Did not show menu\")", "title": "" }, { "docid": "6524b546f87513721571c39125f9fcd7", "score": "0.45848945", "text": "def shoot(path):\n\n pyautogui.screenshot(path)", "title": "" }, { "docid": "a1e48350761686d3a07001beb37bc11f", "score": "0.45777315", "text": "def load(path):\n # TODO this needs to be more robust / x-platform with libpath or something\n # TODO Investigate what this does precisely and check for dual (unload)\n data = {\"path\": path}\n response = GNS3API.post_request(f'/projects/load', data=data)\n if not response.ok:\n raise Exception('Unable to open project')", "title": "" }, { "docid": "3cdaf8c34e6966fcaa35e0eb27651d7d", "score": "0.45744595", "text": "def generate_example_rst(self, path):\r\n example_file = os.path.join(self.source_dir, path)\r\n\r\n stdout_file = os.path.join(self.target_dir, self.stdout_filename(path))\r\n image_file = os.path.join(self.target_dir, self.image_filename(path))\r\n thumb_file = os.path.join(self.target_dir, self.thumb_filename(path))\r\n rst_file = os.path.join(self.target_dir, self.rst_filename(path))\r\n py_file = os.path.join(self.target_dir, self.py_filename(path))\r\n\r\n # check if we should re-run the example\r\n if os.path.exists(stdout_file):\r\n output_modtime = os.stat(stdout_file).st_mtime\r\n source_modtime = os.stat(example_file).st_mtime\r\n\r\n if output_modtime >= source_modtime:\r\n run_example = self.force_rerun and self.execute_files\r\n else:\r\n run_example = self.execute_files\r\n else:\r\n run_example = self.execute_files\r\n\r\n EF = ExecFile(example_file, execute=run_example)\r\n \r\n if run_example:\r\n # make sure directories exist\r\n for f in (stdout_file, image_file, thumb_file, rst_file, py_file):\r\n d, f = os.path.split(f)\r\n if not os.path.exists(d):\r\n os.makedirs(d)\r\n \r\n # write output\r\n open(stdout_file, 'w').write(EF.output)\r\n\r\n # save all figures & thumbnails\r\n figure_list = EF.save_figures(image_file, thumb_file)\r\n \r\n # if no figures are created, we need to make a\r\n # blank thumb file\r\n if len(figure_list) == 0:\r\n shutil.copy('images/blank_image.png', thumb_file % 1)\r\n\r\n else:\r\n figure_list = list(glob.glob(image_file % '[1-9]'))\r\n\r\n # copy source code to generated file tree\r\n shutil.copyfile(example_file, py_file)\r\n\r\n figure_list = [os.path.relpath(f, os.path.join(self.target_dir,\r\n os.path.dirname(path)))\r\n for f in figure_list]\r\n\r\n fname = os.path.split(path)[1]\r\n stdout = os.path.split(stdout_file)[1]\r\n docstring = EF.docstring\r\n if docstring == '':\r\n docstring = '\\n'.join([fname, '-' * len(fname), ''])\r\n\r\n rst_file = open(rst_file, 'w')\r\n rst_file.write(self.template_example %\r\n dict(sphinx_tag=self.sphinx_tag(path),\r\n docstring=docstring,\r\n stdout=stdout,\r\n fname=fname,\r\n image_list=self.image_list(figure_list),\r\n end_line=EF.end_line))", "title": "" }, { "docid": "4f6188f0134632160e8eb8d7eb1bca73", "score": "0.45741817", "text": "def start(self):\n self.args['workdir'] = self.workdir\n revision = commands.GetProp(self, 'use_swarm_client_revision', None)\n self.startVC(None, revision, None)", "title": "" }, { "docid": "7485157720548ac66ab4b90c11822b48", "score": "0.45730114", "text": "def start_motiven():\n parser = argparse.ArgumentParser(description=\"This script starts the MOTIVEN component of the MDB 3.0\")\n parser.add_argument(\n \"-b\",\n \"--backup\",\n help=\"MOTIVEN backup / memory dump file. \"\n \"If it exists, it reads its contents. \"\n \"Otherwise, it creates the file. \"\n \"When the process ends, the backup file is updated.\",\n )\n parser.add_argument(\"-l\", \"--log_level\", help=\"ROS log level (DEBUG, INFO, WARN, ERROR or FATAL)\")\n parser.add_argument(\"-s\", \"--standalone\", action=\"store_true\", help=\"Run MOTIVEN without other MDB components\")\n parser.add_argument(\"-p\", \"--plot\", action=\"store_true\", help=\"Show MOTIVEN diagrams in real time.\")\n args, _ = parser.parse_known_args()\n kwargs = vars(args)\n backup = kwargs.pop(\"backup\", None)\n motiven = MOTIVEN.restore(backup)\n motiven.run(**kwargs)", "title": "" }, { "docid": "4c9aeebb3f2896228ba672db569da537", "score": "0.45723343", "text": "def init_data_command(slide_dir):\n slide_dir = Path(slide_dir)\n click.echo(f\"Load data from {slide_dir}.\")\n assert slide_dir.exists()\n slide_paths = [p.absolute() for p in slide_dir.glob(\"*.svs\")]\n db = get_db()\n insert_new_slide_records(db, slide_paths)\n click.echo(\"Finish inserting slides info.\")\n\n annotation_paths = [slide_path.with_suffix(\".xml\") for slide_path in slide_paths]\n valid_annotation_paths = []\n slide_ids = []\n for slide_path, annotation_path in zip(slide_paths, annotation_paths):\n if slide_path.exists() and annotation_path.exists():\n slide_id = md5_hash(slide_path).hexdigest()\n slide_ids.append(slide_id)\n\n moved_annotation_path = slide_dir.parent / Path(f\"annotations/{slide_id}/initial_annotation.xml\")\n if not moved_annotation_path.parent.exists():\n moved_annotation_path.parent.mkdir(parents=True)\n\n copyfile(str(annotation_path), str(moved_annotation_path))\n click.echo(f\"Moved from {annotation_path} to {moved_annotation_path}.\")\n valid_annotation_paths.append(moved_annotation_path.absolute())\n\n insert_new_annotation_records(db, slide_ids, valid_annotation_paths)\n click.echo(\"Finish inserting annotations info.\")\n\n for slide_path in slide_paths:\n click.echo(f\"Start cropping slide {slide_path.name}.\")\n patch_size = current_app.config[\"PATCH_SIZE\"]\n crop_patches_from_slide(slide_path, patch_size)\n click.echo(f\"Finish cropping slide {slide_path.name}.\")", "title": "" }, { "docid": "3215d120b9b7142555a31fb9ddb48c7e", "score": "0.4567896", "text": "async def snapshot(command: Command[JaegerActor], fps: FPS, path: str | None = None):\n\n if path is not None:\n path = str(path)\n\n try:\n filename = await fps.save_snapshot(path, write_to_actor=False)\n except Exception as err:\n return command.fail(f\"Snapshot failed with error: {err}\")\n\n return command.finish(snapshot=filename)", "title": "" }, { "docid": "6841d5d614da91e2ccdbd87157869f02", "score": "0.45633242", "text": "def play_run(path, command, user):\n\n cmd = \"%s %s %s --%%console\" % (env.python_bin, env.play_bin, command)\n with cd(path):\n # Make absolutely sure resulting directories are readable by the\n # the Play process which runs as a different user.\n with prefix('umask 0002'):\n return sudo(cmd, user=user)", "title": "" }, { "docid": "4824c1c5664e40863ec1dfc8bf75084d", "score": "0.45402744", "text": "def file(self, path):\r\n return self.inner_cmd(\"file %s\" % path)", "title": "" }, { "docid": "c899bfbc5dafb9df64608f5088c55782", "score": "0.4537384", "text": "def open_browser():\n if system == 'Windows':\n os.startfile(instfolder['open'])\n elif system == 'Darwin':\n subprocess.call([\"open\", instfolder])\n else:\n try:\n subprocess.call(['xdg-open', instfolder])\n except OSError:\n print(\"Operating system doesn't have lazy file browser opener\")", "title": "" }, { "docid": "bc778b8f55078250ce2b0223e7a17d42", "score": "0.4534796", "text": "def run(input_file=\"\"):\r\n app = GraphApplication(file_name=input_file)\r\n app.mainloop()", "title": "" }, { "docid": "fe3cba25e28c02782433f2e04aa19ff3", "score": "0.45289814", "text": "def launch_program():\n\n browser = Browse(main_menu_choice=menu.MAIN_MENU_CHOICES,\n correction_menu_choice=menu.CORRECTION_MENU_CHOICES,\n str_control_expression=STR_CONTROL_EXPRESSION,\n str_int_control_expression=STR_INT_CONTROL_EXPRESSION,\n date_control_expression=DATE_FORMAT,\n int_control_expression=INT_CONTROL_EXPRESSION,\n players_dao=model.PlayersDAO,\n tournaments_dao=model.TournamentsDAO)\n\n browser.players_dao.load_dao()\n browser.tournaments_dao.load_dao()\n browser.main_menu_control()", "title": "" }, { "docid": "acd0213aa8fd83e22387e9348a03c32b", "score": "0.45284355", "text": "def run_standalone():\n parser = argparse.ArgumentParser(description='Writes values of DICOM tags inside a directory to a CSV file')\n parser.add_argument('out', metavar='O')\n parser.add_argument('directory')\n parser.add_argument('tags', nargs='+')\n\n args = parser.parse_args()\n\n try:\n with open(args.out, 'w') as f:\n # MVVM pattern\n model = Model(args.directory, args.tags) # contains the model\n view = CSVView(f) # pointer to the visual widget (command line)\n viewmodel = CSVViewModel(model, view) # updates the view with the model's contents\n view.update()\n\n except AttributeError as e:\n sys.stderr.write(str(e))\n\n sys.exit(0)", "title": "" }, { "docid": "416d2951878f1a54d90877c677b0e802", "score": "0.4526969", "text": "def plot_database(model_run_path):\n plot_from_database(model_run_path)", "title": "" } ]
2633d44d1e5945a959b683e89ab3b53b
Transformation from abc representation to alphabeta representation
[ { "docid": "32adbf73832c443e858889e2a3816596", "score": "0.0", "text": "def t_23(quantities):\r\n return np.matmul(SynchronousMotor._t23, quantities)", "title": "" } ]
[ { "docid": "d0a85869f5b35dd44b3379dd290b2997", "score": "0.7189133", "text": "def aa_alphabet():\n abc = ['G', 'A', 'S', 'P', 'V', 'T', 'C', 'I', 'L', 'N', 'D', 'K', 'Q', 'E', 'M', 'H', 'F', 'R', 'Y', 'W']\n return abc", "title": "" }, { "docid": "db0ccd77914199a84a06ac22acc30abf", "score": "0.69957733", "text": "def anc(old):\n \n #Convert alphanumeric string into numeric string\n charlist=list(old)\n i=len(charlist)-1\n num=[]\n for c in charlist:\n num.append(ord(c))\n \n while i>0:\n #If character has value '9', set next value to 'a'\n if num[i]==57:\n num[i]=97\n i=0\n #If character has value 'z', set next value to '0' and transition to the next column\n elif num[i]==122:\n num[i]=48\n i=i-1\n #Else add one to the value \n else:\n num[i]=num[i]+1\n i=0\n\n #Convert back into a alphanumeric string\n new=[] \n for n in num:\n new.append(chr(n))\n out=''.join(new)\n\n return out", "title": "" }, { "docid": "d442a807df76b62c041703a2c9479261", "score": "0.672239", "text": "def Base16Alphabet(alphabet: str) -> Alphabet:\n return Alphabet(alphabet=alphabet, base=16)", "title": "" }, { "docid": "332ae9883f155f5f1a6e283fe310dee5", "score": "0.66170466", "text": "def Base32Alphabet(alphabet: str) -> Alphabet:\n return Alphabet(alphabet=alphabet, base=32)", "title": "" }, { "docid": "d7cf71f8475fec623e6c72eb77848b44", "score": "0.6595336", "text": "def to_alphabet(self, original):\n mapped = []\n offsets = []\n for i, character in enumerate(original):\n if character in self.charset:\n mapped.append(character)\n offsets.append(i)\n elif character in self.mapper:\n mapped.append(self.mapper[character])\n offsets.append(i)\n return mapped, offsets", "title": "" }, { "docid": "008625719fb29d0e030e63f5daef8a15", "score": "0.6406765", "text": "def encode(self, in_str):\n out_str = ''\n for ch in in_str:\n if ch.isalpha():\n ch_idx = string.ascii_lowercase.index(ch.lower())\n en_idx = (self.a * ch_idx + self.b) % self.m\n if ch.isupper():\n out_str += string.ascii_uppercase[en_idx]\n else:\n out_str += string.ascii_lowercase[en_idx]\n else:\n out_str += str(ch)\n return out_str", "title": "" }, { "docid": "bed620d44f36103b8ad441700d102a45", "score": "0.63907623", "text": "def prep_alphabet(self, alphabet=tc.alphabet): \n alphabet_all = list(alphabet[0])\n # In config, entered the alphabet as strings. Decode using utf-8,\n # in order to be able to join to other unicode in edits1\n alphabet_all.extend([a.decode(\"utf-8\") for a in alphabet[1]])\n self.alphabet = alphabet_all", "title": "" }, { "docid": "b60094ffd079499cb48e3077fac9f8f7", "score": "0.6380764", "text": "def alpha(index):\n import string\n alphabet = list(string.ascii_lowercase)\n out = ''\n n = (index+1)/26\n if n != 0:\n out += alphabet[n-1]\n out += alphabet[(index+1)%26-1]\n else:\n out = alphabet[index]\n return out", "title": "" }, { "docid": "f1ee284dbc74863a793f8af65ef599db", "score": "0.6357978", "text": "def augment_a(b):\n bfirst = b[0] # first char\n if bfirst in init.vowel_set:\n # 08-20-03. Use vfdDi1 so 'e'->'E'\n b1 = vfdDi1(bfirst) + b[1:]\n elif bfirst == 'C':\n # based upon example of 'Cfd' (lfN)\n b1 = 'a' + 'c' + b\n else:\n b1 = 'a' + b\n return b1", "title": "" }, { "docid": "16afa1cbb7a1e67712bd876154ca13d8", "score": "0.62970257", "text": "def letter_converter(self, letter):\n\n if letter.lower() == \"a\":\n letter = 0\n return letter\n if letter.lower() == \"b\":\n letter = 1\n return letter\n if letter.lower() == \"c\":\n letter = 2\n return letter\n if letter.lower() == \"d\":\n letter = 3\n return letter\n if letter.lower() == \"e\":\n letter = 4\n return letter\n if letter.lower() == \"f\":\n letter = 5\n return letter\n if letter.lower() == \"g\":\n letter = 6\n return letter\n if letter.lower() == \"h\":\n letter = 7\n return letter\n if letter.lower() == \"i\":\n letter = 8\n return letter\n else:\n letter = 10\n return letter", "title": "" }, { "docid": "001472d221af95bd0884eb614e8dfce4", "score": "0.6282243", "text": "def test_should_return_alphabet(self):\n self.assertEqual('aBbcDeFgHiJkLmNoPqRsTuVwXyZ', alphabetize('ZyXwVuTsRqPoNmLkJiHgFeDcBba'))", "title": "" }, { "docid": "50221535d7dd8be37e27ad99b07f43ee", "score": "0.62368274", "text": "def change_alphabet(AA):\n from ..constants import Constants\n\n three_to_one_lookup = Constants.THREE_LETTER_TO_ONE_LETTER\n one_to_three_lookup = Constants.ONE_LETTER_TO_THREE_LETTER\n\n try:\n return three_to_one_lookup[AA]\n except:\n try:\n return one_to_three_lookup[AA]\n except:\n logging.warning('Amino acid code provided did not match any of three or one letter code; returning unknown amino acid code.')\n if(len(AA)==3):\n return 'X'\n if(len(AA)==1):\n return 'UNK'", "title": "" }, { "docid": "727ef9b0a08e313664a2d2159b4be429", "score": "0.62195575", "text": "def string_encode(m):\n global alphabet \n r = []\n for c in list(m):\n r.append(str(alphabet.index(c)).zfill(2))\n return long(\"\".join(r))", "title": "" }, { "docid": "693e8acd8b6e303b245a15cb74b4d422", "score": "0.6163908", "text": "def get_basic_alphabet() -> Set[str]:\r\n a = sf.get_semantic_robust_alphabet()\r\n # remove cations/anions except oxygen anion\r\n to_remove = []\r\n for ai in a:\r\n if \"+1\" in ai:\r\n to_remove.append(ai)\r\n elif \"-1\" in ai:\r\n to_remove.append(ai)\r\n # remove [P],[#P],[=P]\r\n to_remove.extend([\"[P]\", \"[#P]\", \"[=P]\"])\r\n\r\n a -= set(to_remove)\r\n a.add(\"[O-1expl]\")\r\n return a", "title": "" }, { "docid": "dc3914411a9f39e0133233e95202e1f1", "score": "0.61617875", "text": "def alphabet_position(text):\r\n return ' '.join([str(ord(c.upper()) - 64) for c in text if c.isalpha()])", "title": "" }, { "docid": "7368014028a26c2306c92a5c0e25ac80", "score": "0.6136409", "text": "def __init__(self, alpha=5, beta=8):\n self.alpha = alpha\n self.beta = beta\n\n # Create a true alphabet string\n self.true_alphabet = string.ascii_uppercase\n\n # Create transposed alphabet string using affine formula:\n # Where Cipher Value = (alpha * True Value + beta) % 26 for each letter\n # Values, correspond with a characters index in a string.\n self.encrypted_alphabet = (\n ''.join([chr(((alpha*i + beta) % 26) + 65) for i in range(26)])\n )", "title": "" }, { "docid": "ba9554352e0cceb7b89163eba3a5fe5d", "score": "0.61196697", "text": "def numbers_to_letters(data):\n for k in range(len(data)):\n if data[k] == 0:\n data[k] = 'a'\n elif data[k] == 1:\n data[k] = 'b'\n elif data[k] == 2:\n data[k] = 'c'\n elif data[k] == 3:\n data[k] = 'd'", "title": "" }, { "docid": "79381e2a3813ea47dde082637905226c", "score": "0.6115303", "text": "def charCaesar(c, k):\n if not c.isalpha():\n return c\n\n if c.isupper():\n r = 'A'\n else:\n r = 'a'\n\n alpha_val = ord(c) % ord(r)\n alpha_val = (alpha_val + k) % 26\n return chr(ord(r) + alpha_val)", "title": "" }, { "docid": "2d300252cc80bd19b01229d7657aee72", "score": "0.61134136", "text": "def oneToOneAlpha(text, generator, standardFormat=True, incNums=True, basicAlpha = 'abcdefghijklmnopqrstuvwxyz'):\r\n\t#Requires: filterString, keyAlphabet, alphaToKey\r\n\tif standardFormat:\r\n\t\ttext = filterString(text, incNums)\r\n\r\n\tkeyAlpha = keyAlphabet(generator, basicAlpha)\r\n\tcypherText = ''\r\n\r\n\tfor char in text:\r\n\t\t#cap to keep track is char was uppercase\r\n\t\tcap = False\r\n\t\t#if uppercase cap set true and char put in lowercase\r\n\t\tif char <= 'Z' and char >= 'A':\r\n\t\t\tcap = True\r\n\t\t\tchar = char.lower()\r\n\t\t#if checks if char was in the original alphabet, but its saved as lowercase\r\n\t\t#\tso any caps are put into lowercase, so a bunch of extra stuff\r\n\t\tif char in basicAlpha:\r\n\t\t\tnewChar = alphaToKey(char, keyAlpha, basicAlpha)\r\n\t\t\t#tertiary operator in python\r\n\t\t\t#if cap is true newChar set to uppercase vertion, lowercase otherwise\r\n\t\t\tnewChar = newChar.upper() if cap else newChar\r\n\t\t\tcypherText = cypherText + newChar\r\n\t\telse:\r\n\t\t\t#tertiary for same thing\r\n\t\t\tchar = char.upper() if cap else char\r\n\t\t\tcypherText = cypherText + char\r\n\r\n\treturn cypherText", "title": "" }, { "docid": "38f9d77dc4664305e80aba6a80d73a93", "score": "0.6107585", "text": "def num_to_alpha(num):\n assert(0 <= num < 4 ** 3)\n d = {0:'A', 1:'T', 2:'G', 3:'C'}\n L = []\n for i in range(2, -1, -1):\n q = num // pow(4, i)\n L.append(d[q])\n num -= pow(4, i) * q\n return ''.join(L)", "title": "" }, { "docid": "13df62688050492031ded5173073e416", "score": "0.6101233", "text": "def alphaToKey(char, keyAlpha, basicAlpha):\r\n\t#find the location of char in normal alphabet and gets char at that index \r\n\t#\tin the key alphabet\r\n\treturn keyAlpha[basicAlpha.index(char)]", "title": "" }, { "docid": "49d110ddd1c18a7a52bbd47290dc1d45", "score": "0.60766864", "text": "def toGoatLatin(self, S: str) -> str:\r\n words = S.split(\" \")\r\n vowels = {\"a\", \"e\", \"i\", \"u\", \"o\", \"A\", \"O\", \"I\", \"U\", \"E\"}\r\n added_chars = \"ma\"\r\n for i in range(len(words)):\r\n first_char = words[i][0]\r\n if first_char in vowels:\r\n words[i] = words[i] + added_chars + (\"a\"*(i+1))\r\n else:\r\n words[i] = words[i][1:] + first_char + added_chars + (\"a\"*(i+1))\r\n return \" \".join(words)", "title": "" }, { "docid": "fde409a22554b8c03ea9502c7e2bf5b7", "score": "0.60583836", "text": "def translate(letter):\n if letter.isalpha() and ord(letter) < 121:\n letter = chr(ord(letter) + 2)\n elif letter.isalpha():\n letter = chr(ord(letter) - 24)\n return letter", "title": "" }, { "docid": "50ddc4e6971204003a64c8cb09c52ac6", "score": "0.60517085", "text": "def Base64Alphabet(alphabet: str) -> Alphabet:\n return Alphabet(alphabet=alphabet, base=64)", "title": "" }, { "docid": "e2d2942f8d8382851775ed128cce41d2", "score": "0.6013476", "text": "def solution(data):\n\t\treturn data.replace('A','Z')", "title": "" }, { "docid": "c266200d1dfad917a6d030132cd1f9bf", "score": "0.5996477", "text": "def alphabet(text: str) -> str:\n pattern = re.compile('[^a-zA-Z]+')\n return pattern.sub('', text)", "title": "" }, { "docid": "a9c647f0511caec81f62ab00ef9278b9", "score": "0.599026", "text": "def __alphabetical_value(word):\n return sum(ord(c) - ord('A') + 1 for c in word.upper())", "title": "" }, { "docid": "7e191bd958c21d5b8882f5d5b0707214", "score": "0.5963385", "text": "def alphas(cls):\n return \"\".join(filter(str.isalpha, cls._chars_for_ranges))", "title": "" }, { "docid": "f350e286865b0374ea750c30cc28d248", "score": "0.59548616", "text": "def alphabet_ascii(phred, ascii):\n alphabet = None\n\n if phred == \"33\": #check for phred format\n alphabet = {char:value-33 for value,char in enumerate(ascii,33) if value < 74} #make dictionary w ascii-character + phred score\n\n elif phred == \"64\":\n alphabet = {char:value-64 for value,char in enumerate(ascii,33) if value > 63 and value < 105} #make dictionary w ascii-character + phred score\n\n return alphabet", "title": "" }, { "docid": "90df8013836b591f9d0508714806d1de", "score": "0.59385353", "text": "def encode(acid):\n if acid == \"G\":\n return \"C\"\n elif acid == \"C\":\n return \"G\"\n elif acid == \"T\":\n return \"A\"\n elif acid == \"A\":\n return \"U\"", "title": "" }, { "docid": "5d974774a8212e2dda3632d6bc5eaae3", "score": "0.59278464", "text": "def fix_alphabet(self, s):\n fix_positions = [\n pos for pos, char in enumerate(s) if char not in ALPHABET\n ]\n self.chars_fixed = len(fix_positions)\n [s.__setitem__(pos, \"X\") for pos in fix_positions]\n return s", "title": "" }, { "docid": "68a85440c8e3f96ffa69ea60e8aede4b", "score": "0.5921383", "text": "def _alpha2FromNum(num, upperCase=True):\n if num <= 0:\n return ''\n result = ''\n charPos = (num - 1) % 26\n char = chr(charPos + ord('A'))\n qty = (num - 1) // 26 + 2\n result = char * qty\n if not upperCase:\n result = result.lower()\n return result", "title": "" }, { "docid": "25936169861d2cef2fb63fba587afc37", "score": "0.59173596", "text": "def get_alphabet(self):\r\n\r\n # Sort the dictionary by the keys. Keeping the key/value pairs here\r\n # for debugging purposes\r\n sorted_alphabet = [(k,self.dictionary[k]) for k in sorted(\r\n self.dictionary, key=self.dictionary.get, reverse=True)]\r\n\r\n # Generate the final alphabet string\r\n count = 0\r\n final_alphabet = ''\r\n for item in sorted_alphabet:\r\n # Bail out if we have grabbed the N most common items so far\r\n if count >= self.alphabet_size:\r\n return final_alphabet\r\n\r\n # Append item to our final alphabet\r\n final_alphabet += item[0]\r\n count += 1\r\n\r\n return final_alphabet", "title": "" }, { "docid": "af8316d4bf25d92a3d8eb69f1106bfa2", "score": "0.5908195", "text": "def LLAAAM_2_zzaaam(nuc):\n newnuc = \"\"\n nucstr = nuc.upper()\n nucstr = nucstr.strip('-')\n\n anum = nucstr.strip('ABCDEFGHIJKLMNOPQRSTUVWXYZ-')\n anum = int(anum)\n\n if nucstr[-1] == \"M\":\n newnuc = (10*anum) + 1\n elif nucstr[-1] in [\"0\", \"1\", \"2\", \"3\", \"4\", \"5\", \"6\", \"7\", \"8\", \"9\"]:\n newnuc = (10*anum)\n else:\n raise NotANuclide(nucstr, newnuc)\n\n LL = nucstr[:-1].strip('0123456789-')\n\n if LL in LLzz.keys():\n newnuc = (LLzz[LL]*10000) + newnuc\n else:\n newnuc = \"zz{0}\".format(newnuc)\n raise NotANuclide(nucstr, newnuc)\n\n return newnuc", "title": "" }, { "docid": "3b78d44bdbe4197c82daa4d574310d56", "score": "0.5903307", "text": "def alphabet() -> List[str]:\n return [chr(i) for i in range(97, 123)]", "title": "" }, { "docid": "1a91018dc38554fd83907ad8cad34e61", "score": "0.589358", "text": "def translate(string):\n new_string = \"\"\n for letter in string:\n if letter in not_letter:\n new_string += letter\n else:\n new_string += chr((ord(letter)+2)%123) if (ord(letter)+2)%123> 96 else chr( ( (ord(letter)+2)%123 )+97) \n return new_string", "title": "" }, { "docid": "08f158281d4c5f39bbd8a906ac885964", "score": "0.5883965", "text": "def characterordering():\n nonletters = [tuple(c for c in r''' !\"#$%&'()*+,-./0123456789:;<=>?@[\\]^_''')]\n letters = [\n (\"A\", \"a\", \"à\", \"á\", \"â\"),\n (\"B\", \"b\"),\n (\"C\", \"c\"),\n (\"D\", \"d\"),\n (\"E\", \"e\", \"é\"),\n (\"F\", \"f\"),\n (\"G\", \"g\"),\n (\"H\", \"h\"),\n (\"I\", \"i\", \"ì\", \"í\", \"î\"),\n (\"J\", \"j\"),\n (\"K\", \"k\"),\n (\"L\", \"l\"),\n (\"M\", \"m\"),\n (\"N\", \"n\"),\n (\"O\", \"o\", \"ò\", \"ó\", \"ô\"),\n (\"P\", \"p\"),\n (\"Q\", \"q\"),\n (\"R\", \"r\"),\n (\"S\", \"s\"),\n (\"T\", \"t\"),\n (\"U\", \"u\", \"ú\"),\n (\"V\", \"v\"),\n (\"W\", \"w\"),\n (\"X\", \"x\"),\n (\"Y\", \"y\"),\n (\"Z\", \"z\"),\n ]\n return nonletters + letters", "title": "" }, { "docid": "fbd62138ffbb9b4f72ca691824ff6616", "score": "0.586726", "text": "def encrypt(self, text):\n text = text.upper()\n self.output = []\n for letter in text:\n if letter in string.ascii_uppercase:\n letters = (26 - (ord(letter) - ord(\"A\"))) + ord(\"A\")-1\n self.output.append(chr(letters))\n elif letter in string.ascii_lowercase:\n letters = (26 - (ord(letter) - ord(\"a\"))) + ord(\"a\")-1\n self.output.append(chr(letters))\n else:\n self.output.append(letter)\n return \"\".join(self.output)", "title": "" }, { "docid": "652b10d074f7ddf3e9ffccdbda99db2e", "score": "0.58666146", "text": "def convert_phred(letter):\r\n return ord(letter)-33", "title": "" }, { "docid": "b6515fe69ec0e4542a1c28cd804cb834", "score": "0.58603925", "text": "def invertedKeyAlpha(generator, original='abcdefghijklmnopqrstuvwxyz'):\r\n\tkey = keyAlphabet(generator, original)\r\n\tprint(key)\r\n\tnewKey = ''\r\n\tfor char in original:\r\n\t\tprint(char)\r\n\t\tindex = key.find(char)\r\n\t\tnewKey = newKey + original[index]\r\n\r\n\treturn newKey", "title": "" }, { "docid": "1efcd62500bcb071d3b7f5c082c7c054", "score": "0.5858468", "text": "def __convert_char__(self,key):\n\t\tif (key\t>= K_a and key <= K_z):\t\t\n\t\t\treturn self.hash[self.shifted^self.mayus].get(key,'')\n\t\telse:\n\t\t\treturn self.hash[self.shifted].get(key,'')", "title": "" }, { "docid": "6c34a6c3ef2c610fa0bfa9ff5d4fa384", "score": "0.5857833", "text": "def get_codon_alphabet(codon_table, gap_char=\"-\"):\n letters = list(codon_table.forward_table.keys())\n letters.extend(codon_table.stop_codons)\n letters.extend(codon_table.start_codons)\n if gap_char:\n letters.append(gap_char * 3)\n generic_codon_alphabet = CodonAlphabet()\n generic_codon_alphabet.letters = letters\n generic_codon_alphabet.gap_char = '-'\n generic_codon_alphabet.names = codon_table.names\n return generic_codon_alphabet", "title": "" }, { "docid": "4d51b9e95812845170b1b0cfac295825", "score": "0.5857137", "text": "def _alphaFromNum(num, upperCase=True):\n if num <= 0:\n return ''\n result = ''\n charPos = (num - 1) % 26\n char = chr(charPos + ord('A'))\n qty = (num - 1) // 26 + 1\n result = char * qty\n if not upperCase:\n result = result.lower()\n return result", "title": "" }, { "docid": "a7181e6deff2d8e380e7b7ed77b6f19d", "score": "0.5849222", "text": "def best_letter_for_pets():\n import string\n the_alphabet = string.ascii_lowercase\n pass", "title": "" }, { "docid": "5387ca6a8e7d62c8456b50323c74b634", "score": "0.58470005", "text": "def apply_rules(letter):\n if letter == \"a\":\n return \"a\"\n elif letter == \"b\":\n return \"b\"\n elif letter == \"o\":\n return \"o\"\n else:\n return letter", "title": "" }, { "docid": "b4535d33f6eebdb7982382c6a0d31de4", "score": "0.58462757", "text": "def _encode_function(self, original_char):\r\n start_index = int(len(self._alphabet) / 2)\r\n # put all the ASCII value of [0-9a-zA_Z] in the list\r\n alphabet_ord = list(range(48, 58)) + list(range(97, 123)) + list(range(65, 91))\r\n if ord(original_char) not in alphabet_ord:\r\n return\r\n\r\n if ord(original_char) in alphabet_ord:\r\n for ord_value in alphabet_ord:\r\n if ord_value == ord(original_char):\r\n return self._alphabet[start_index]\r\n\r\n if start_index == len(self._alphabet) - 1:\r\n start_index = 0\r\n else:\r\n start_index += 1", "title": "" }, { "docid": "c975257de83c65375aed9b2aaf2f8d66", "score": "0.5837203", "text": "def test_convert__ab_cd_2(self):\n out = ZigZagConversion().convert(\"abcd\", 2)\n self.assertEqual(\"acbd\", out)", "title": "" }, { "docid": "0eef186184380c3bb7881d3e4ecdbb24", "score": "0.5818062", "text": "def convert_phred(letter):\n return ord(letter) - 33", "title": "" }, { "docid": "8f2d74be145179f409636619a8a3bef5", "score": "0.5818012", "text": "def lower_upper_ascii(st):\n result = ''\n for letters in xrange(len(st)):\n if ord(st[letters]) >= 97 and ord(st[letters]) <= 122:\n result += chr(ord(st[letters])-32) # changing lower ASCII orders into capitals\n else:\n result += st[letters]\n return result", "title": "" }, { "docid": "2595013cc95b5db5b650330c723790bd", "score": "0.5810806", "text": "def compress_hex_to_alphanumeric(hex):\n # Don't be tempted to add other characters in here. \n return compress_hex(hex)", "title": "" }, { "docid": "7d66db25a60d47a0ebc2b931faceacf3", "score": "0.58078265", "text": "def convert_ascii(kmer):\n output = ''.join(bin(ascii_code)[2:] for ascii_code in [ord(character) for character in kmer])\n return output", "title": "" }, { "docid": "9a327d55e17dcde9cd6c05d6a57c259b", "score": "0.5803808", "text": "def encode_bech32(nums):\n result = \"\"\n for n in nums:\n result += BECH32_ALPHABET[n]\n return result", "title": "" }, { "docid": "e8836a8b395f74bac935925c70f481e4", "score": "0.57980883", "text": "def num_2_alpha(integer):\n\n return chr(97+integer-1)", "title": "" }, { "docid": "c57aad4fa2acfe9bfd1c6f1ab9168849", "score": "0.5784755", "text": "def balrot(stringy):\n alphabet_in = \"abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ\"\n alphabet_ot = \"zyxwvutsrqponmlkjihgfedcbaZYXWVUTSRQPONMLKJIHGFEDCBA\"\n return generate_map(alphabet_in, alphabet_ot)(stringy)", "title": "" }, { "docid": "10f9ecf1ab83a4024c314a39cbcc4839", "score": "0.5780989", "text": "def convert_phred(letter):\n return ord(letter)-33", "title": "" }, { "docid": "daa1a9663e75c005872e121103453b2b", "score": "0.5780459", "text": "def pig_latin():\n return", "title": "" }, { "docid": "d1eb47f1aee2c20982a231b01361bd07", "score": "0.57630765", "text": "def enbase(n, base=36, alphabet=None):\n alphabet = alphabet or ALPHABET\n\n is_negative = (n < 0)\n if is_negative:\n n *= -1\n\n result = []\n if(n == 0):\n result.append('0')\n else:\n while n:\n result.append(ALPHABET[n % base])\n n /= base\n \n if is_negative:\n result.append('-')\n\n result.reverse()\n\n return ''.join(result)", "title": "" }, { "docid": "047e131bdbf35fc27a53ac7c92de9024", "score": "0.575871", "text": "def text_to_char_array(original, alphabet):\n return np.asarray([alphabet.label_from_string(c) for c in original])", "title": "" }, { "docid": "41f51f737dd5cf31783322987c1c6b0c", "score": "0.5750759", "text": "def limpa_bits(b):\n limpo = []\n for a in b:\n if a.isalpha():\n limpo.append(a)\n return ''.join(limpo)", "title": "" }, { "docid": "a0b244d387d653119b36f75daafc6a62", "score": "0.5749298", "text": "def getEncodedStr(toencode, alphabet):\r\n temp = []\r\n for c in toencode:\r\n temp.append(alphabet.index(c))\r\n return temp", "title": "" }, { "docid": "cc2f16b9649ef345b3f96dc6da3e06d6", "score": "0.57470995", "text": "def pig_latin_original(w):\n if starts_with_a_vowel(w):\n return w + 'ay'\n return pig_latin_original(rest(w) + first(w))", "title": "" }, { "docid": "99593384b5bad851cc56493eefc51a5a", "score": "0.57458484", "text": "def convert_pybites_chars(text):\n return ''.join(letter.swapcase() if letter.lower() in PYBITES else letter for letter in text)", "title": "" }, { "docid": "a3c8aab6dce4efb268800e067cbafc5d", "score": "0.57407063", "text": "def smallAlphabet(k, sigma_base=\"a\"):\n Sigma = []\n if k >= 52:\n raise common.CFGterminalError(k)\n lim = min(26, k)\n for i in range(lim):\n Sigma.append(chr(ord(sigma_base) + i))\n if k >= 26:\n sigma_base = 'A'\n for i in range(k - lim):\n Sigma.append(chr(ord(sigma_base) + i))\n return Sigma", "title": "" }, { "docid": "22744850b5532d30cc65772223afedd0", "score": "0.5712574", "text": "def caesar(text, factor, cyphNums=True, standardFormat=True, incNums=True):\r\n\t#Requires: filterString, isAlphaNum\r\n\t#Can be treated as special case of vigenere\r\n\tif standardFormat:\r\n\t\ttext = filterString(text, incNums)\r\n\r\n\tcypherText = ''\r\n\tfor char in text:\r\n\t\tif isAlphaNum(char, cyphNums):\r\n\t\t\tcypherText = cypherText + shift(char, factor)\r\n\t\telse:\r\n\t\t\tcypherText = cypherText + char\r\n\r\n\treturn cypherText", "title": "" }, { "docid": "601db3ef8c749666c0b1b10bbd59f536", "score": "0.57022876", "text": "def row2letters(i):\n r = ''\n i = int(i)\n while i >= 0:\n r = _alpha[i % len(_alpha)] + r\n i = i // len(_alpha) - 1\n return r", "title": "" }, { "docid": "62f73f60cf3ce8ef25d25125c4493777", "score": "0.57014704", "text": "def numeric_to_alpha(num_list: List[int]) -> str:\n # make sure only [A-Za-z] in the `num_list`\n for num in num_list:\n if num <= 0 and num > 52:\n raise TypeError(f\"num_list is invalid: {repr(num_list)}\")\n # return ans\n ret = \"\"\n # convert\n for num in num_list:\n if num % 2 == 1:\n # [A-Z]\n offset = int((num - 1) / 2)\n ret += chr(ord(\"A\") + offset)\n else:\n # [a-z]\n offset = int((num - 1) / 2)\n ret += chr(ord(\"a\") + offset)\n return ret", "title": "" }, { "docid": "3ac78b47c550ca3dd97a1d045399855d", "score": "0.56963533", "text": "def alpha_score(upper_letters):\n return sum(map(lambda l: 1 + ord(l) - ord('A'), upper_letters))", "title": "" }, { "docid": "1ec4958ab92675afb6a82acaa3ce32db", "score": "0.5689983", "text": "def convert_phred(letter):\n return(ord(letter)-33)", "title": "" }, { "docid": "6705d23512b1796acf0c4e643cba418c", "score": "0.56737393", "text": "def caesar(input_text, input_shift, input_direction):\n result_text = \"\"\n for character in input_text:\n if character not in alphabet:\n result_text += character\n else:\n index = alphabet.index(character)\n if input_direction == \"encode\":\n result_text += alphabet[index - input_shift]\n elif input_direction == \"decode\":\n new_index = (index + input_shift) % 26\n result_text += alphabet[new_index]\n print(f\"The {input_direction}d text is: {result_text}\")", "title": "" }, { "docid": "b6f978e759901a59eeea4d67d311c865", "score": "0.56730425", "text": "def normalize(self,key):\n return util.latin1_to_ascii(key[:4].upper())", "title": "" }, { "docid": "3636525d797c5fcbb591ad44ba1f902f", "score": "0.56641513", "text": "def testAlphabet(self):\n read = AAReadWithX(\"id\", \"ATFDX\")\n expected = set(\"ACDEFGHIKLMNPQRSTVWXY\")\n self.assertEqual(expected, read.ALPHABET)", "title": "" }, { "docid": "6b6923971232dde2670186e4ec432fca", "score": "0.56637377", "text": "def scramble(base, hexa):\n\tresult = bytearray()\n\tfor c in hexa:\n\t\tresult.append(base.index(c))\n\treturn bytes(result)", "title": "" }, { "docid": "494443422edcde38e738fc45a093d36f", "score": "0.5661298", "text": "def a(self, i):\n\n return self.u(i * 127)[self.long_term_key[\"alpha\"]]", "title": "" }, { "docid": "91e84ab9939e0a07b52f537ca3c930f3", "score": "0.5658519", "text": "def number_to_letters(number: int) -> str:\n assert number >= 0, \"number must be a positive integer\"\n quotient, remainder = divmod(number, 26)\n return quotient * \"Z\" + chr(65 + remainder)", "title": "" }, { "docid": "2161666f7fd2168aa32820f86b5d2e24", "score": "0.56579715", "text": "def string_decode(c):\n global alphabet\n return \"\".join([alphabet[k] for k in digits(c, 100, rev=True)])", "title": "" }, { "docid": "7893fdf943d2ac51da094111eea9da9e", "score": "0.56515676", "text": "def encrypt_caesar(plaintext):\n # PUT YOUR CODE HERE\n ciphertext = ''\n step = 3\n for s in plaintext:\n if ('A' <= s <= 'Z') or ('a' <= s <= 'z'):\n ss = ord(s) + step\n if (chr(ss) > 'Z') and (chr(ss) < 'a'):\n ss -= 26\n elif chr(ss) > 'z':\n ss -= 26\n ciphertext += chr(ss)\n else:\n ciphertext += s\n return ciphertext", "title": "" }, { "docid": "1adf711a461e56853aefd3a90ece1720", "score": "0.56415385", "text": "def test_parseAlphabetNotation_simple(self):\n actualValue = interface.parseAlphabetNotation(\"a3b3\")\n self.assertEqual(actualValue, \"1323\")", "title": "" }, { "docid": "66c422ee5ba14e0c77d5cc01e445c967", "score": "0.5638256", "text": "def get_ascii_letters(asciiarr):\n asciiarr = get_ascii_letters_nums(asciiarr)\n asciiarr = get_ascii_nonnum(asciiarr)\n\n return asciiarr", "title": "" }, { "docid": "e5add847132a72da81bd71f16b3d0986", "score": "0.56355613", "text": "def _get_alphabet(self, position):\n \n position = makeNonnegInt(position)\n \n if position in self._alphabets:\n result = self._alphabets[position]\n else:\n result = self._alphabets[self.DEFAULT_KEY]\n #end if\n \n return result", "title": "" }, { "docid": "bf7f5ca786d823be66887432edb598be", "score": "0.5627248", "text": "def __init__(self, text, alphabet=None, is_text=True):\n self.original = text\n if alphabet is None:\n self.alphabet = UPCASE_ALPHABET\n else:\n self.alphabet = alphabet\n self.is_text = is_text\n self.alphatext, self.offsets = self.alphabet.to_alphabet(text)", "title": "" }, { "docid": "f0afb83fd55f605433c4374706e2c2d5", "score": "0.56133485", "text": "def keyAlphabet(generator, original= 'abcdefghijklmnopqrstuvwxyz'):\r\n\tif len(generator) > len(original):\r\n\t\tgenerator = generator[:len(original)]\r\n\t#idk if i actually need this part ^^^\r\n\t#Im pretty sure I dont but I dont feel like testing it\r\n\r\n\t#loop goes through letters in generator and removes from alphabet\r\n\tfor char in generator:\r\n\t\t#str.index(substr) gives error if substring not found. try except handles it\r\n\t\ttry:\r\n\t\t\ti = original.index(char)\r\n\t\t\t#replace with .find(char)?\r\n\t\texcept:\r\n\t\t\t#continue to next iteration of loop\r\n\t\t\tcontinue\r\n\t\t#if index found in original, use it to split into two substrings and\r\n\t\t#\tcombine wihtout the letter\r\n\t\t#\talso pretty sure there a string method for this\r\n\t\t#\tstr.replace(old, new)\r\n\t\toriginal = original[:i] + original[i+1:]\r\n\treturn generator + original", "title": "" }, { "docid": "5017bd0820775ce9dad6f057cb5a853f", "score": "0.5610029", "text": "def atbash(self, text, num):\r\n\r\n # Encrypting list from alphabet and affine_set list, packing together\r\n value_co = self.pack(list(self.alphabet), list(reversed(self.alphabet)))\r\n # Encrypting list from alphabet and affine_set list, packing together inverse order\r\n value_de = self.pack(list(reversed(self.alphabet)), list(self.alphabet))\r\n out = []\r\n words = text.split(' ')\r\n if num:\r\n cipher = dict(value_co) # Sets cipher to the created dict\r\n else:\r\n cipher = dict(value_de) # Sets cipher to the created dict\r\n for word in words:\r\n out.append(self.encrypt_decrypt(word, cipher))\r\n return ' '.join(out)", "title": "" }, { "docid": "e0e06ef9f7185199bfa9e45d0d6eace3", "score": "0.5607488", "text": "def generate_letter(self, history):\n \n if not history in self.lm:\n return \"~\"\n\n letters, probabilities = self.unzip(self.lm[history])\n\n i = np.random.choice(letters, p=probabilities)\n\n return i", "title": "" }, { "docid": "37e0ae281bfe0d80904798fff4d4ea5a", "score": "0.5603537", "text": "def int_to_alpha(num):\n letters = string.ascii_lowercase\n int_map = {i + 1: value for i, value in enumerate(letters)}\n double_letters = [\"{0}{0}\".format(letter) for letter in letters]\n double_range = list(range(27, 53))\n double_map = dict(zip(double_range, double_letters))\n int_map.update(double_map)\n return int_map.get(num, None)", "title": "" }, { "docid": "37eb46d0029b9df8bd48b9574b31dae5", "score": "0.55931205", "text": "def alpha_to_za(alpha, sig_digits=-5):\n return round_h_up(st.norm.ppf(1 - alpha), sig_digits)", "title": "" }, { "docid": "94e351375895b18fe0e1acd48067a858", "score": "0.5589661", "text": "def alpha(self):\n\n return ZZ(1)/ZZ(2) * (ZZ(1)/ZZ(2) - ZZ(1)/self._n)", "title": "" }, { "docid": "1d36e49ee837bdaca0087d0e5502621d", "score": "0.5589386", "text": "def _get_alphabet(self):\n \n return self._alphabet[:]", "title": "" }, { "docid": "781d01ba09f1ee53e62a2f6d7b6c7e59", "score": "0.5584785", "text": "def encode(message, output = \"\"):\r\n for letter in message:\r\n if letter.isalpha() and ord(letter) + 3 <= Encode.find_range(letter)[1]:\r\n output += chr(ord(letter) + 3)\r\n elif letter.isalpha():\r\n output += chr(ord(letter) + 2 - Encode.find_range(letter)[1] + Encode.find_range(letter)[0])\r\n else:\r\n output += letter\r\n return output", "title": "" }, { "docid": "07f7884ba532c464ea86817ae8e66644", "score": "0.5577965", "text": "def _letters_set(self, word):\n letters = tuple(word.strip())\n return ''.join(sorted(list(letters)))", "title": "" }, { "docid": "4979533e980528dc19ad41e957ebc9fa", "score": "0.5568013", "text": "def encodeAlphaNumeric(self, string):\n print(\"Encoding \" + str(len(string)) + \" character string for alpha numeric display\")\n for ch in string:\n binstr = numdict[ch]\n binstr = self.mapAlphaBinaryToPinOrder(binstr)\n self.encodeBinaryString(binstr)", "title": "" }, { "docid": "b63b6270a124954a8503964a38e450d4", "score": "0.5565568", "text": "def char_in_alphabet(char):\n return ord(char) - ord('A') + 1", "title": "" }, { "docid": "c9b6dea3b5dfb807fe01a4fc1dd98dcd", "score": "0.5557643", "text": "def base62_encode(num, alphabet=ALPHABET):\n if (num == 0):\n return alphabet[0]\n arr = []\n base = len(alphabet)\n while num:\n rem = num % base\n num = num // base\n arr.append(alphabet[rem])\n arr.reverse()\n return ''.join(arr)", "title": "" }, { "docid": "c12fa2ebc6ab8cea6cd93fe0623a9553", "score": "0.55563873", "text": "def __init__(self, alphabet):\r\n self.alphabet = alphabet\r\n self.base = len(self.alphabet)", "title": "" }, { "docid": "36eb71076f5b01aa93e2c9eaa6fc581e", "score": "0.55526173", "text": "def _base_encode(integer):\n length = len(ShrunkClient.ALPHABET)\n result = []\n while integer != 0:\n result.append(ShrunkClient.ALPHABET[integer % length])\n integer //= length\n\n return \"\".join(reversed(result))", "title": "" }, { "docid": "5cbf18e3eab1669c194c607847723bce", "score": "0.5549501", "text": "def b36encode(number, alphabet='0123456789abcdefghijklmnopqrstuvwxyz'):\n if not isinstance(number, (int, long)):\n raise TypeError('number must be an integer')\n\n base36 = ''\n sign = ''\n\n if number < 0:\n sign = '-'\n number = -number\n\n if 0 <= number < len(alphabet):\n return sign + alphabet[number]\n\n while number != 0:\n number, i = divmod(number, len(alphabet))\n base36 = alphabet[i] + base36\n\n return sign + base36", "title": "" }, { "docid": "826b4e9f409be503cb2d65a4f372049c", "score": "0.5537102", "text": "def get_default():\n result = AlphabetTable()\n result.characters = [\n '\\u0026', '\\u0040', '\\u0041', '\\u0042', '\\u0043', '\\u0044', '\\u0045',\n '\\u0046', '\\u0047', '\\u0048', '\\u0049', '\\u004A', '\\u004B', '\\u004C',\n '\\u004D', '\\u004E', '\\u004F', '\\u0050', '\\u0051', '\\u0052', '\\u0053',\n '\\u0054', '\\u0055', '\\u0056', '\\u0057', '\\u0058', '\\u0059', '\\u005A',\n '\\u0061', '\\u0062', '\\u0063', '\\u0064', '\\u0065', '\\u0066', '\\u0067',\n '\\u0068', '\\u0069', '\\u006A', '\\u006B', '\\u006C', '\\u006D', '\\u006E',\n '\\u006F', '\\u0070', '\\u0071', '\\u0072', '\\u0073', '\\u0074', '\\u0075',\n '\\u0076', '\\u0077', '\\u0078', '\\u0079', '\\u007A', '\\u0098', '\\u00A0',\n '\\u00A4', '\\u00A6', '\\u00A7', '\\u00A9', '\\u00AB', '\\u00AC', '\\u00AD',\n '\\u00AE', '\\u00B0', '\\u00B1', '\\u00B5', '\\u00B6', '\\u00B7', '\\u00BB',\n '\\u0401', '\\u0402', '\\u0403', '\\u0404', '\\u0405', '\\u0406', '\\u0407',\n '\\u0408', '\\u0409', '\\u040A', '\\u040B', '\\u040C', '\\u040E', '\\u040F',\n '\\u0410', '\\u0411', '\\u0412', '\\u0413', '\\u0414', '\\u0415', '\\u0416',\n '\\u0417', '\\u0418', '\\u0419', '\\u041A', '\\u041B', '\\u041C', '\\u041D',\n '\\u041E', '\\u041F', '\\u0420', '\\u0421', '\\u0422', '\\u0423', '\\u0424',\n '\\u0425', '\\u0426', '\\u0427', '\\u0428', '\\u0429', '\\u042A', '\\u042B',\n '\\u042C', '\\u042D', '\\u042E', '\\u042F', '\\u0430', '\\u0431', '\\u0432',\n '\\u0433', '\\u0434', '\\u0435', '\\u0436', '\\u0437', '\\u0438', '\\u0439',\n '\\u043A', '\\u043B', '\\u043C', '\\u043D', '\\u043E', '\\u043F', '\\u0440',\n '\\u0441', '\\u0442', '\\u0443', '\\u0444', '\\u0445', '\\u0446', '\\u0447',\n '\\u0448', '\\u0449', '\\u044A', '\\u044B', '\\u044C', '\\u044D', '\\u044E',\n '\\u044F', '\\u0451', '\\u0452', '\\u0453', '\\u0454', '\\u0455', '\\u0456',\n '\\u0457', '\\u0458', '\\u0459', '\\u045A', '\\u045B', '\\u045C', '\\u045E',\n '\\u045F', '\\u0490', '\\u0491', '\\u2013', '\\u2014', '\\u2018', '\\u2019',\n '\\u201A', '\\u201C', '\\u201D', '\\u201E', '\\u2020', '\\u2021', '\\u2022',\n '\\u2026', '\\u2030', '\\u2039', '\\u203A', '\\u20AC', '\\u2116', '\\u2122'\n ]\n return result", "title": "" }, { "docid": "f7f5f535be2553018ad9f0e502510372", "score": "0.55306566", "text": "def _put_alphabet(self):\n for k, v in self.dict_alphabet.iteritems():\n self.bst.put(k, v)", "title": "" }, { "docid": "571ddcd374db06c2a250babfb4c5fcad", "score": "0.5526667", "text": "def sort_into_alphabets(alph_char_idxs, alph_idxs, data, do_plot):\n alphabets = []\n for i in np.unique(alph_idxs):\n alph_gather_idxs = np.nonzero(alph_idxs == i)\n alphabet_i = data[alph_gather_idxs]\n char_idxs_for_alphabet_i = alph_char_idxs[alph_gather_idxs]\n\n characters_for_alphabet_i = []\n for j in np.sort(np.unique(char_idxs_for_alphabet_i)):\n char_j_gather_idxs = np.nonzero(char_idxs_for_alphabet_i == j)\n character_ij = alphabet_i[\n char_j_gather_idxs] # array of images that represent jth character of ith alphabet\n characters_for_alphabet_i.append(character_ij)\n\n alphabets.append(characters_for_alphabet_i)\n\n # sort in decreasing order of alphabet size\n alphabets.sort(reverse=True, key=lambda x: len(x))\n\n # plot the alphabets so I can spot check for correctness (alphabet 19 is english)\n if do_plot:\n plot_all_omniglot_alphabets(alphabets)\n\n return alphabets", "title": "" }, { "docid": "2cf16497b6d68e0c2896569f5e7143fd", "score": "0.55263925", "text": "def generate_map(alphabet_in, alphabet_out):\n mapping = make_mapping(alphabet_in, alphabet_out)\n def mapper(stringy):\n return ''.join((mapping[char] if char in mapping else char for char in stringy))\n return mapper", "title": "" }, { "docid": "facb43aaab613a159e184ec9a74557c8", "score": "0.5524256", "text": "def generate_cipher_alpha():\n pre_random = list('abcdefghijklmnopqrstuvwxyz')\n reference = list('abcdefghijklmnopqrstuvwxyz')\n ciphered = {}\n index = 0\n while len(pre_random) > 0:\n charac = secrets.choice(pre_random)\n ciphered[reference[index]] = charac\n pre_random.remove(charac)\n index += 1\n return ciphered", "title": "" }, { "docid": "eae7117fa6699b96834eacd394caa88a", "score": "0.55226433", "text": "def make_alpha(cls, str_val):\n str_val.value = gxapi_cy.WrapSTR._make_alpha(GXContext._get_tls_geo(), str_val.value.encode())", "title": "" } ]
596a49e2f1dddcc2a18c5e3d2fc6d484
r"""Converts `hparams` value into a list.
[ { "docid": "1b0dd68ab6f3b90b382f12a234e76abd", "score": "0.7053691", "text": "def _to_list(value: Union[Dict[str, Any], List, Tuple, int], name=None,\n list_length=None):\n if not isinstance(value, (list, tuple)):\n if list_length is not None:\n value = [value] * list_length\n else:\n value = [value]\n if list_length is not None and len(value) != list_length:\n name = '' if name is None else name\n raise ValueError(\"hparams '%s' must be a list of length %d\"\n % (name, list_length))\n return value", "title": "" } ]
[ { "docid": "b272f8df421d7be9da3b018912810091", "score": "0.6540132", "text": "def params_to_lists(params):\n if type(params) != list: raise TypeError('Incorrect data type: function params_to_list needs a list of dictionaries.')\n listofparams = [params[0]['yoffset']] # yoffset should be the same for all functions, so just pass it from the first one.\n for peak in params:\n listofparams.append(peak['ymax'])\n listofparams.append(peak['halfwidth'])\n listofparams.append(peak['x0'])\n return listofparams", "title": "" }, { "docid": "ba548336556dd148f59301d076015e6f", "score": "0.633654", "text": "def get_param_vals(self):\n\n # In this case, trivial as there is only one parameter set and it is not\n # fixed, but always free. Make sure there is a copy\n return list(self._param.value)", "title": "" }, { "docid": "095a523031d91f381b398f246f3d1a57", "score": "0.61254", "text": "def paramstr2list(param, delim=\";\"):\r\n if param:\r\n return param.split(delim)\r\n else:\r\n return []", "title": "" }, { "docid": "737c511359868368df0139d7034ec329", "score": "0.6073573", "text": "def do_list(value):\r\n return list(value)", "title": "" }, { "docid": "bd1119cfcac2468b9297ef98d5a46658", "score": "0.6066907", "text": "def convert_value(self, params):\n result = []\n for val in params:\n try:\n result.append(int(val))\n except ValueError:\n result.append(float(val))\n return result", "title": "" }, { "docid": "4389599b3f8ed1a90a971faa807042ba", "score": "0.6040302", "text": "def _to_list(self, value):\n if value is None:\n return []\n elif isinstance(value, list):\n return value\n else:\n return [value]", "title": "" }, { "docid": "64f33075945ec6a192b020ea68dd31ad", "score": "0.5956147", "text": "def get_as_list(parameter):\n if not isinstance(parameter, list):\n return [parameter]\n return parameter", "title": "" }, { "docid": "72ccb1263625e16052d1ebfd41e0e3dc", "score": "0.5933554", "text": "def _parse_parameter_list(self, env, params):\n if self._accept(lexer.Tag.VOID):\n return params\n else:\n params = self._parse_parameters(env, params)\n\n return params", "title": "" }, { "docid": "c3f298c08f930215da23b7717c69808c", "score": "0.5914609", "text": "def to_list(self, key: str) -> List[Any]:\n return self._get_value(key)", "title": "" }, { "docid": "623be0e6fa49255b0e5b4fadd87eb69e", "score": "0.5878307", "text": "def test_to_list(self):\n from simtk import unit\n\n p1 = BondHandler.BondType(\n smirks=\"[*:1]-[*:2]\",\n length=1.01 * unit.angstrom,\n k=5 * unit.kilocalorie_per_mole / unit.angstrom ** 2,\n )\n p2 = BondHandler.BondType(\n smirks=\"[*:1]=[*:2]\",\n length=1.02 * unit.angstrom,\n k=6 * unit.kilocalorie_per_mole / unit.angstrom ** 2,\n )\n p3 = BondHandler.BondType(\n smirks=\"[*:1]#[*:3]\",\n length=1.03 * unit.angstrom,\n k=7 * unit.kilocalorie_per_mole / unit.angstrom ** 2,\n )\n parameter_list = ParameterList([p1, p2, p3])\n ser_param_list = parameter_list.to_list()\n assert len(ser_param_list) == 3\n assert ser_param_list[0][\"length\"] == 1.01 * unit.angstrom", "title": "" }, { "docid": "f21282328e54f81211efa166abd9e806", "score": "0.58470404", "text": "def hrlist2list(hrlst):\n groups = re.split(r'[,; _]+', WHITESPACE_RE.sub('', hrlst))\n lst = []\n if not groups:\n return lst\n for group in groups:\n lst.extend(_hrgroup2list(group))\n return lst", "title": "" }, { "docid": "53d8be62dde988cd46f0327536e27581", "score": "0.58198225", "text": "def get_param_values(self, param):\n values = []\n for well in self.mp_dict:\n values.append(self.mp_dict[well][param])\n return values", "title": "" }, { "docid": "d77dd82888866121ac531a4bad1dfec3", "score": "0.5804736", "text": "def getParamList(self): \n return [k for k in self._parorder if self.params[k]]", "title": "" }, { "docid": "30d28e1762a5e4c0cc582f0e3480ae71", "score": "0.57985634", "text": "def param_list(self) -> List[str]:\n raise NotImplementedError()", "title": "" }, { "docid": "6472842853ce0b6bd2fde28d299ed257", "score": "0.57527894", "text": "def as_list(self, key):\n value = [key]\n if isinstance(value, list):\n return value * 10\n elif isinstance(value, basestring):\n return value.split(\",\")\n return [value] * 10", "title": "" }, { "docid": "44a45f6641c582b84069e16d7e96d934", "score": "0.5724839", "text": "def as_list(self, key):\r\n result = self[key]\r\n if isinstance(result, (tuple, list)):\r\n return list(result)\r\n return [result]", "title": "" }, { "docid": "5c455d35bba55c6f2c9790796b611dc5", "score": "0.5710305", "text": "def getParamList(self):\n list = _ordered_keys(self.params)\n # WARNING: Extending the list with the dispersion parameters\n list.extend(self.getDispParamList())\n return list", "title": "" }, { "docid": "30ce720b0270047b8c36ba17c2e38912", "score": "0.57049626", "text": "def parse_args_to_list(args: dict):\n arg_list = []\n for k, v in args.items():\n if v is not None:\n arg_list.append(str(k))\n arg_list.append(str(v))\n return arg_list", "title": "" }, { "docid": "ced664c9021cdc3349a88a4cd9e880a8", "score": "0.57020724", "text": "def _to_list(val):\n if isinstance(val, list):\n return val\n else:\n return [val]", "title": "" }, { "docid": "9b73c8d490874bd5c3e245d6bf6aa1af", "score": "0.5700072", "text": "def _get_param_list(params):\n param_list = []\n for param in re.split(r',[\\t ]+', params):\n match = re.match(r'^(\\w+)(?::(\\w+))?$', param)\n if not match:\n if match is None:\n raise FormatFileError(\n 'param or attribute cannot be empty'\n )\n else:\n raise FormatFileError('invalid param ' + match.group(0))\n\n try:\n param = RegexParam(match.group(1), match.group(2))\n except FormatFileError as e:\n raise e\n else:\n param_list.append(param)\n\n return param_list", "title": "" }, { "docid": "d0e76ca92af9244d5fea08551b0d4855", "score": "0.56976897", "text": "def split_params(params):\n return params.split(',')", "title": "" }, { "docid": "4aaca57afc1abb8136730898fbb080f3", "score": "0.5668608", "text": "def to_list(val):\n return val if type(val) is list else [val]", "title": "" }, { "docid": "a9e34ae0afa246f92aa71cfa3ca792ef", "score": "0.5661735", "text": "def get_value_as_list(self, dictionary, key):\n if key not in dictionary:\n return None\n value = dictionary[key]\n if not isinstance(value, list):\n return [value]\n else:\n return value", "title": "" }, { "docid": "501d5856935827faa60fcbcf5b5690ac", "score": "0.56588", "text": "def as_list(self, key):\n result = self[key]\n if isinstance(result, (tuple, list)):\n return list(result)\n return [result]", "title": "" }, { "docid": "cdfec53a967076f0b5a058dfd21171e3", "score": "0.56395", "text": "def to_list(value):\n if isinstance(value, list):\n return value\n else:\n return [value]", "title": "" }, { "docid": "64cc04666d0329ff9e444c97ab677f72", "score": "0.5637971", "text": "def getlist(self, key):\n value = self.get(key, [])\n if value is None or isinstance(value, (list, tuple)):\n return value\n else:\n return [value]", "title": "" }, { "docid": "13f81178e2d39feee0bdaabb0a8a2be2", "score": "0.5636445", "text": "def to_list(value):\n if not isinstance(value, (tuple, list)):\n value = [value]\n return value", "title": "" }, { "docid": "59e201b4d4ef43c93896a46b5541cdde", "score": "0.5629681", "text": "def _prepare_params(params):\n for (key, value) in params.items():\n if isinstance(value, list):\n params[key] = ','.join([item for item in value])\n return params", "title": "" }, { "docid": "e1c2639dd56f67fef78ed5798928a2a2", "score": "0.562698", "text": "def toList(value):\n if value is None:\n return []\n elif not isinstance(value, list):\n return [value]\n else:\n return value", "title": "" }, { "docid": "79b7a65abf0d67d60336e5769899447d", "score": "0.5618902", "text": "def convert_to_list(value):\n if type(value) is not list:\n result = []\n result.append(value)\n return result\n else:\n return value", "title": "" }, { "docid": "5bebd3b5fa32b107367c7f33ab8f14db", "score": "0.56010467", "text": "def convert_list_params(params: RequestParams) -> RequestParams:\n return {k: convert_csv_list(v) for k, v in params.items()}", "title": "" }, { "docid": "51fc7a863d65db61713cde7fa682c1c6", "score": "0.5594883", "text": "def _cast_list(definition: dict, value: list) -> list:\n return [cast(definition.get(\"member\"), v) for v in value]", "title": "" }, { "docid": "6a955edd149e6b2540eaf6fff028cd6a", "score": "0.5586332", "text": "def get_list(value):\n if value is None:\n return []\n if isinstance(value, list):\n return value\n return [value]", "title": "" }, { "docid": "54bd3992083cdca6cacb480e40e02e5c", "score": "0.5580285", "text": "def force_list(value):\n if not isinstance(value, list):\n# if ',' in value: return value.split(',')\n# if ';' in value: return value.split(';')\n return [value]\n return value", "title": "" }, { "docid": "53519ce258b7c48475d1f246a7dbe5c5", "score": "0.5525833", "text": "def get_params_list(self):\n \n self.update_params_list()\n \n return super().get_params_list(self.params_list)", "title": "" }, { "docid": "5e46afe2d0cc554d4cb8fdc811dda78e", "score": "0.5517112", "text": "def get_hyperparameters(self) -> list[Hyperparameter]:\n warnings.warn(\n \"Prefer using `list(space.values())` over `get_hyperparameters`\",\n DeprecationWarning,\n stacklevel=2,\n )\n return list(self._hyperparameters.values())", "title": "" }, { "docid": "d9d85e502e3c455aeacedf4495ea6fe3", "score": "0.5511135", "text": "def to_key_val_list(value):\r\n if value is None:\r\n return None\r\n\r\n if isinstance(value, (str, bytes, bool, int)):\r\n raise ValueError('cannot encode objects that are not 2-tuples')\r\n\r\n if isinstance(value, dict):\r\n value = value.items()\r\n\r\n return list(value)", "title": "" }, { "docid": "523cc22f3b64d3b28f6f2545488c91bb", "score": "0.54798394", "text": "def key_to_list(key):\n return [x.strip() for x in key.split(',')]", "title": "" }, { "docid": "840555ee349358d8a203a95729363786", "score": "0.54788387", "text": "def listvalues(self):\n return list(self.iterlistvalues())", "title": "" }, { "docid": "a4a14c6841e264b74b83947e36c01cef", "score": "0.5477761", "text": "def get_parVaryList(self):\n\t\tparList = []\n\t\tfor modelName in self._modelList:\n\t\t\tmodel = self.__modelDict[modelName]\n\t\t\tmodelParDict = model.parFitDict\n\t\t\tfor parName in modelParDict.keys():\n\t\t\t\tif modelParDict[parName][\"vary\"]:\n\t\t\t\t\tparList.append(modelParDict[parName][\"value\"])\n\t\t\t\telse:\n\t\t\t\t\tpass\n\t\treturn parList", "title": "" }, { "docid": "17117f8673b9b138ffa6b90038b4cdfe", "score": "0.54721993", "text": "def hobj_list(hob, p):\n dtinv = {}\n for i, j in iteritems(hob.dt):\n dtinv[j] = i\n\n\n items = p.items()\n items.sort(key=lambda k: (count_bits_set(k[0]), _monom(k[0])))\n a = [[dtinv[i] for i in _monom(expv)] for expv, y in items]\n return a", "title": "" }, { "docid": "0b8324f535ddbd8d0699c725f792bd7f", "score": "0.54714453", "text": "def _unpackHyperParameters(self, transitionModel, values=False):\n paramList = []\n # recursion step for sub-models\n if hasattr(transitionModel, 'models'):\n for m in transitionModel.models:\n paramList.append(self._unpackHyperParameters(m, values=values))\n\n # extend hyper-parameter based on current (sub-)model\n if hasattr(transitionModel, 'hyperParameterNames'):\n if values:\n paramList.extend(transitionModel.hyperParameterValues)\n else:\n paramList.extend(transitionModel.hyperParameterNames)\n\n return paramList", "title": "" }, { "docid": "bfb4951e95b3172ca4a37eb9e2ecf6f5", "score": "0.5471185", "text": "def extract_params(params):\n return [params[-2:], params[:-2]]", "title": "" }, { "docid": "d021bb982b81ab3904b12d3cd9bca568", "score": "0.54187214", "text": "def items(self):\n return list(self._params.items())", "title": "" }, { "docid": "d62313dca165cbc84d62f4035b18e9a3", "score": "0.5360987", "text": "def params(self):\n return list(self.parameters.values())", "title": "" }, { "docid": "cc927cc7eede4c55d6964430cc40116f", "score": "0.53528607", "text": "def get_well_params(self, well):\n values = []\n try:\n well_params = self.mp_dict[well]\n except KeyError as e:\n logger.error(\"Cannot get given well: %s\"\n \"parameters.\", str(e))\n return\n for param in well_params:\n values.append(self.mp_dict[well][param])\n return values", "title": "" }, { "docid": "6fea6072144e851c3f71cf8907d129de", "score": "0.5340712", "text": "def as_list(self):\n pass", "title": "" }, { "docid": "edce755332a3ba25251b13c662281d34", "score": "0.5330845", "text": "def _get_list_value(value):\n result = value\n if consts.NEWLINE in value:\n split_values = value.split(consts.NEWLINE)\n result = [val for val in split_values if not val.isspace()]\n result = [\"'\" + val + \"'\" if not val.isdigit() else val for val in result]\n result = ', '.join(result)\n result = ''.join(['[', result, ']'])\n\n return result", "title": "" }, { "docid": "f9de1d8771198584b6d6ad2e6699215b", "score": "0.5329689", "text": "def read_param_list():\n logger = logging.getLogger('parser.param_list')\n try:\n logger.info('Loading capability params list file')\n with open('capab_keys_list.json', 'r') as file:\n params = file.read()\n except FileNotFoundError:\n logger.error('capab_keys_list.json with params shall be located in a current dir')\n logger.error('program interrupted....................')\n exit(1)\n\n param_list: dict\n param_list = json.loads(params)\n\n # THIS IS AN EXAMPLE PRINT SECTION\n # this is for example and print params with value\n # uncomment block below to see printouts:\n\n # for key, value in param_list.items():\n # log.info('key: %s value: %s', key, value)\n # # this is for example and print only params\n # for key in param_list:\n # log.info('Current parameter: {}'.format(key))\n # # this is for example and print only values of params\n # for value in param_list.values():\n # log.info('Current type of parameter: %s', value)\n\n return param_list", "title": "" }, { "docid": "99d4f45ea4a5a748fe169838a3cb544e", "score": "0.53289014", "text": "def to_key_val_list(value):\n if value is None:\n return None\n\n if isinstance(value, (str, bytes, bool, int)):\n raise ValueError('cannot encode objects that are not 2-tuples')\n\n if isinstance(value, collections.Mapping):\n value = value.items()\n\n return list(value)", "title": "" }, { "docid": "d3bafab86045f66f6422aeb560ecc49e", "score": "0.5319102", "text": "def get_params(self):\r\n return []", "title": "" }, { "docid": "e5b4c913b92569e9a74e532d8c150376", "score": "0.52993804", "text": "def _unpackAllHyperParameters(self, values=True):\n return list(flatten(self._unpackHyperParameters(self.transitionModel, values=values)))", "title": "" }, { "docid": "afb10ca33800bf0aef1a97025a48b21a", "score": "0.52746826", "text": "def line_to_list(self, line):\n line_list = line.split()\n val1 = None\n val2 = None\n val3 = None\n ret_list = []\n for i in range(0, len(line_list), 3):\n val1 = int(line_list[i])\n val2 = int(line_list[i+1])\n val3 = int(line_list[i+2])\n ret_list.append([val1, val2, val3])\n return ret_list", "title": "" }, { "docid": "2047c2f730b4d0447ff5c4b5b071c2f7", "score": "0.5263749", "text": "def parse_headers_values(headers):\n if headers is None:\n return []\n\n return [\n \"=\".join([value.strip(' ') for value in header.split('=')])\n for header in list(map(str.strip, headers.split(',')))\n ]", "title": "" }, { "docid": "cb2829662c22a07c3f56abe603a257cc", "score": "0.5262578", "text": "def _shape_to_list(shape):\n tmp = []\n for i in shape:\n tmp.append(i.value)\n return tmp", "title": "" }, { "docid": "a0e494e627fa7c6e59529d9ad180572e", "score": "0.5262295", "text": "def _canonicalize_to_list(value):\n if isinstance(value, list):\n return value\n if value == '' or value is None:\n return []\n return [value]", "title": "" }, { "docid": "93d9fb72c5cf43fe73f6b1a096271665", "score": "0.52608436", "text": "def convert_to_list(dict):\n\treturn [[k,dict[k]] for k in dict];", "title": "" }, { "docid": "7233457b05fc52d9ebfa2cb281fab245", "score": "0.526064", "text": "def _convert_value(self, value):\n if isinstance(value, dict):\n return Params(value)\n elif isinstance(value, list):\n value = [self._convert_value(item) for item in value]\n return value", "title": "" }, { "docid": "00f1934916d0987cdcf226206e2c7d1a", "score": "0.5244587", "text": "def get_list(list=None):\n return list if list is not None else []", "title": "" }, { "docid": "ba6a8e1776e81f7c9b232b6ccbb57548", "score": "0.5242041", "text": "def _build_dict_as_list_params(self, params, dictionary, name):\n items = sorted(dictionary.items(), key=lambda x:x[0])\n for kv, index in zip(items, list(range(1, len(items)+1))):\n key, value = kv\n prefix = '%s.entry.%s' % (name, index)\n params['%s.key' % prefix] = key\n params['%s.value' % prefix] = value", "title": "" }, { "docid": "228092d8e153a51063a086865a8ebdad", "score": "0.5234492", "text": "def value_display_list(self):\n value_display = self.value_display\n if value_display is not None and not isinstance(value_display, list):\n return [value_display]\n else:\n return value_display", "title": "" }, { "docid": "7f9a3f66be020e902f718be638964b6a", "score": "0.52160287", "text": "def get_secure_config_params(self, ctx, params):\n # ctx is the context object\n # return variables are: returnVal\n #BEGIN get_secure_config_params\n returnVal = self.cc.get_secure_config_params(ctx.get('user_id'), ctx.get('token'), params)\n #END get_secure_config_params\n\n # At some point might do deeper type checking...\n if not isinstance(returnVal, list):\n raise ValueError('Method get_secure_config_params return value ' +\n 'returnVal is not type list as required.')\n # return the results\n return [returnVal]", "title": "" }, { "docid": "3a0e3913f18c595b412bd609d4d19599", "score": "0.5210953", "text": "def line_to_list(self, _line, delim):\n\n\t\tresult = list()\t\t\n\t\t_line_splited = _line.split(delim)\n\n\t\tfor value in _line_splited:\n\t\t\tvalue_stripped = value.strip().rstrip()\t\t\t\n\t\t\tresult.append(value_stripped)\t\t\t\t\n\t\t\n\t\treturn result", "title": "" }, { "docid": "46a872be5dd5779dbd2d638bad68f7fe", "score": "0.5197112", "text": "def get_list(self, sec, opt):\n list_ = self.get(sec, opt).lower().split(',')\n return [x.strip() for x in list_ if x.strip()]", "title": "" }, { "docid": "31b8d82f111594ad113d6f2e646b9702", "score": "0.5192333", "text": "def parameters(self) -> 'list[str]':\n pass", "title": "" }, { "docid": "4a93a1160507ea9a6c6196b9da8fb4c6", "score": "0.51920694", "text": "def get_prep_value(self, value):\n # If no valid value was given, return an empty list.\n if not value:\n return []\n\n # Appropriately coerce each individual value within\n # our array.\n return [self.of.get_prep_value(item) for item in value]", "title": "" }, { "docid": "f87604b47d366d17be60a53e84d23b65", "score": "0.51916873", "text": "def get_cgi_parameter_list(form: cgi.FieldStorage, key: str) -> List[str]:\n return form.getlist(key)", "title": "" }, { "docid": "45e5b0da79e0ceed768380acf9946921", "score": "0.5189815", "text": "def compensation_parameter_to_list(self, parameter_name):\n\n compensation_parameter_list = []\n try:\n for compensation_parameter in self.client_compensation_parameters:\n compensation_parameter_list.append(compensation_parameter[parameter_name])\n except Exception as convert_exp:\n logger.error(f'Project {self.project_id_hash}: Converting compensation parameters to list was failed!')\n logger.error(f'Project {self.project_id_hash}: The exception is: {convert_exp}')\n self.set_operation_status_failed()\n\n return compensation_parameter_list", "title": "" }, { "docid": "a7cc3c7207e7ee24dbdc8b43741c692d", "score": "0.5175294", "text": "def ensure_list(value):\n if value is None:\n return []\n if isinstance(value, list):\n return value\n return [value]", "title": "" }, { "docid": "f9a5578dff1dd7c5b97a7be1f34e6bf1", "score": "0.51751924", "text": "def pose_to_list(self, p):\n assert type(p) is dict\n return [p['x'], p['y'], p['z'], p['rx'], p['ry'], p['rz']]", "title": "" }, { "docid": "2889c4fbbc860994150ec7decdef4117", "score": "0.517049", "text": "def param_to_list(ast):\n params = []\n while ast is not None:\n # Append [var_name, var_type]\n params.append(ast[\"token\"])\n # params.append(ast[\"right\"][\"token\"]) # Matt doesn't want this.\n # Add the variables to the main type checker\n var_types.variable_add(ast[\"token\"], ast[\"right\"][\"token\"])\n ast = ast[\"left\"]\n return params", "title": "" }, { "docid": "51cbf982f00cdd94f8b2f9af7fe87915", "score": "0.51684463", "text": "def get_parameters(data: bytes) -> list:\n param_list = []\n index = 0\n while index < len(data):\n name = _get_param_name(data[index:])\n index += settings.IOTORO_PARAM_MAX_NAME_SIZE\n type = _get_param(data[index])\n index += 1\n value = _get_param_value(data[index:], type)\n\n param_list.append(IotoroParam(\n name,\n type,\n value\n ))\n\n index += type.size\n\n\n return param_list", "title": "" }, { "docid": "15bd4f4c2aed84065c1fe5f490482f21", "score": "0.5155122", "text": "def hobj_list(p):\n items = list(p.items())\n items.sort(key=lambda k: (count_bits_set(k[0]), _monom(k[0])))\n a = [_monom(expv) for expv, y in items]\n return a", "title": "" }, { "docid": "14fcfb7b0a36e75d8df553cee7a24100", "score": "0.5153037", "text": "def as_list(self):\n return [self.__data[x] for x in self.__data.keys()]", "title": "" }, { "docid": "6dd1763290a8233e95706a7574a6a946", "score": "0.51458967", "text": "def returnList(value):\n \n if type(value) == type(()):\n value = list(value)\n elif value and type(value) != type([]):\n value = [value]\n \n return value", "title": "" }, { "docid": "c2f8826ec057877b6ab5cde6cfd6d66e", "score": "0.5144354", "text": "def hvals(self, name: str) -> Union[Awaitable[List], List]:\n return self.execute_command(\"HVALS\", name)", "title": "" }, { "docid": "93b5633a25b839a50708a155d7cfcaa8", "score": "0.514406", "text": "def state_to_list(self, state):\n return list(map(float, list(state)))", "title": "" }, { "docid": "fb8f5bff52257979509120d211f28603", "score": "0.5140982", "text": "def get_param_argnames_as_list(argnames):\n if isinstance(argnames, string_types):\n argnames = argnames.replace(' ', '').split(',')\n return list(argnames)", "title": "" }, { "docid": "fb8f5bff52257979509120d211f28603", "score": "0.5140982", "text": "def get_param_argnames_as_list(argnames):\n if isinstance(argnames, string_types):\n argnames = argnames.replace(' ', '').split(',')\n return list(argnames)", "title": "" }, { "docid": "be58a9cf677ff661da838be2337bb12e", "score": "0.5136998", "text": "def param_grid(param_values: dict) -> list:\n # Create a meshgrid of the param values\n mg = np.meshgrid(*param_values.values())\n if not isinstance(mg, (list, np.ndarray)):\n raise pyrado.TypeErr(given=mg, expected_type=[list, np.ndarray])\n\n # Flatten the grid arrays so they can be iterated\n mg_flat = (arr.flatten() for arr in mg)\n\n # Convert the meshgrid arrays to a parameter set list\n return [dict(zip(param_values.keys(), pvals)) for pvals in zip(*mg_flat)]", "title": "" }, { "docid": "c25f780dfe81509d37ef93f2d3f0b6e0", "score": "0.51278657", "text": "def values(self):\n return {n: getattr(self, n) for n in self._hparam_types.keys()}", "title": "" }, { "docid": "6e3b489680c29e37fa81c7120ef4e3b6", "score": "0.5127413", "text": "def _get_field_as_list(self, field_name):\n return getattr(self, field_name).rstrip(',').split(',')", "title": "" }, { "docid": "178a83e89d4f7945804ac60c282d0e1a", "score": "0.512731", "text": "def to_list(data):\n return data if isinstance(data, list) else [data]", "title": "" }, { "docid": "ffc66dc1c376115e939b8d6327200698", "score": "0.5122904", "text": "def _dictionary_to_list_values(dictionary):\n new_list = []\n for x in dictionary:\n new_list.append(dictionary[x])\n return new_list", "title": "" }, { "docid": "602dcb4aff760aeefb30b83581085d96", "score": "0.51190495", "text": "def val_list(data, column):\r\n return [val[column] for val in data.values()]", "title": "" }, { "docid": "694b9beeae259a2c0d55452fc0e86c9e", "score": "0.5116177", "text": "def get_params_iter(self):\n\t\treturn []", "title": "" }, { "docid": "cf3b422d220f2db853d9dd496ffd600e", "score": "0.5112", "text": "def _unpackSelectedHyperParameters(self):\n # if no hyper-parameters are selected, choose all\n if not self.selectedHyperParameters:\n return self._unpackAllHyperParameters()\n\n # if self.selectedHyperParameters is not empty\n nameTree = self._unpackHyperParameters(self.transitionModel)\n valueTree = self._unpackHyperParameters(self.transitionModel, values=True)\n output = []\n\n # loop over selected hyper-parameters\n for name in self.selectedHyperParameters:\n iFound = recursiveIndex(nameTree, name) # choose first hit\n if len(iFound) == 0:\n raise ConfigurationError('Could not find any hyper-parameter named {}.'.format(name))\n\n value = valueTree[:]\n for i in iFound:\n value = value[i]\n\n output.append(value)\n\n # remove occurrence from nameTree (if name is listed twice, use second occurrence...)\n assignNestedItem(nameTree, iFound, ' ')\n\n # return selected values of hyper-parameters\n return output", "title": "" }, { "docid": "8bae41783750d08dbe4d3db251f61f91", "score": "0.50996983", "text": "def get_params(params):\n parsed_params = {}\n for k, v in params.iteritems():\n vv = v\n if isinstance(v, np.ndarray):\n vv = v.tolist()\n if len(vv) == 1:\n vv = vv[0]\n if isinstance(v, basestring) and len(v) == 0:\n vv = None\n elif isinstance(v, list) and len(v) == 1:\n vv = vv[0]\n\n if isinstance(vv, basestring) and len(vv) == 0:\n vv = None\n\n parsed_params[k] = vv\n return parsed_params", "title": "" }, { "docid": "5b0169ef6cf09c21e4868626fa0b499c", "score": "0.50919497", "text": "def _cast_parms(self,params):\n lparams = list(params)\n\n for i, param in enumerate(lparams):\n if isinstance(param, dict):\n lparams[i] = self._to_ordered_dict(param)\n\n return OneServer._cast_parms(self, lparams)", "title": "" }, { "docid": "890e57888a78198560d8368d6661d39f", "score": "0.5091424", "text": "def list_parameters(self):\n params = self.parameters.values()\n return sorted(params, key=lambda p: (p.index, p.identifier))", "title": "" }, { "docid": "390a8ba629706dbc852c5c27c8d81cc9", "score": "0.5088345", "text": "def _distill_params(multiparams, params): # noqa\n\n if not multiparams:\n if params:\n return [params]\n else:\n return []\n elif len(multiparams) == 1:\n zero = multiparams[0]\n if isinstance(zero, (list, tuple)):\n if (\n not zero\n or hasattr(zero[0], \"__iter__\")\n and not hasattr(zero[0], \"strip\")\n ):\n # execute(stmt, [{}, {}, {}, ...])\n # execute(stmt, [(), (), (), ...])\n return zero\n else:\n # execute(stmt, (\"value\", \"value\"))\n return [zero]\n elif hasattr(zero, \"keys\"):\n # execute(stmt, {\"key\":\"value\"})\n return [zero]\n else:\n # execute(stmt, \"value\")\n return [[zero]]\n else:\n if hasattr(multiparams[0], \"__iter__\") and not hasattr(\n multiparams[0], \"strip\"\n ):\n return multiparams\n else:\n return [multiparams]", "title": "" }, { "docid": "f0c194cac84f5e13c1989f43e609fbfb", "score": "0.50860745", "text": "def param_values(self, pnames=None):\n l = self.get_params(pnames)\n v = [p.value for p in l]\n return np.array(v)", "title": "" }, { "docid": "be000cffec8ad73a6f36cb75251108f7", "score": "0.5085007", "text": "def args_to_list(items):\n result = []\n if items is not None:\n for item in items:\n if item:\n for val in item.split(','):\n val = val.strip()\n if val:\n result.append(val)\n return result", "title": "" }, { "docid": "8b6b7e2b9fb5dfd35a95ecffbd493025", "score": "0.5076829", "text": "def values(self):\n return self.params.values()", "title": "" }, { "docid": "2fbfa1fbf447f877dd1d5b1dae023313", "score": "0.5076043", "text": "def check_list(option, opt, value): # pylint: disable=W0613\n # we have to make this explicit check since \"\".split(\",\") is [\"\"],\n # not an empty list :(\n if not value:\n return []\n else:\n return utils.UnescapeAndSplit(value)", "title": "" }, { "docid": "11cc55307b181c87bef337815b0d4ed4", "score": "0.50716674", "text": "def get_params_iter(self):\n return []", "title": "" }, { "docid": "b56b7975d717178ecead60d49745347b", "score": "0.50515753", "text": "def getlist(self, key):\n try:\n return dict.__getitem__(self, key)\n except KeyError:\n return []", "title": "" }, { "docid": "94afaabf39efcc7d9edc388191799b3f", "score": "0.50511026", "text": "def _build_list_params(param_name, key, values):\n params = {}\n if hasattr(values, \"__iter__\"):\n index = 0\n for value in values:\n params[\n str(param_name) + \"[\" + str(index) + \"].\" + str(key)\n ] = str(value)\n index += 1\n else:\n params[str(param_name) + \"[0].\" + str(key)] = str(values)\n return params", "title": "" }, { "docid": "86226465bbcf315ca94ebdf53a4ca4c2", "score": "0.5046906", "text": "def getParameters(self):\n return list(self._parameters_.items())", "title": "" }, { "docid": "504908544fef76185c887478c9bf641d", "score": "0.5035936", "text": "def _interpolate_list(value, params):\n return [_interpolate(member, params) for member in value]", "title": "" } ]
e6f61daa42d943e628e298f7eedecfb5
Provides operations to call the getSkypeForBusinessParticipantActivityCounts method.
[ { "docid": "ec1fbabf6807349ca4bbe6b52ace4c0d", "score": "0.5942781", "text": "def get_skype_for_business_participant_activity_counts_with_period(self,period: Optional[str] = None) -> GetSkypeForBusinessParticipantActivityCountsWithPeriodRequestBuilder:\n if not period:\n raise TypeError(\"period cannot be null.\")\n from .get_skype_for_business_participant_activity_counts_with_period.get_skype_for_business_participant_activity_counts_with_period_request_builder import GetSkypeForBusinessParticipantActivityCountsWithPeriodRequestBuilder\n\n return GetSkypeForBusinessParticipantActivityCountsWithPeriodRequestBuilder(self.request_adapter, self.path_parameters, period)", "title": "" } ]
[ { "docid": "e3de64b0950a51dd41c9c6e704e9d55e", "score": "0.7065211", "text": "def get_skype_for_business_participant_activity_user_counts(\n self,\n period, # type: str\n **kwargs # type: Any\n ):\n # type: (...) -> \"models.MicrosoftGraphReport\"\n cls = kwargs.pop('cls', None) # type: ClsType[\"models.MicrosoftGraphReport\"]\n error_map = {\n 401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError\n }\n error_map.update(kwargs.pop('error_map', {}))\n accept = \"application/json\"\n\n # Construct URL\n url = self.get_skype_for_business_participant_activity_user_counts.metadata['url'] # type: ignore\n path_format_arguments = {\n 'period': self._serialize.url(\"period\", period, 'str'),\n }\n url = self._client.format_url(url, **path_format_arguments)\n\n # Construct parameters\n query_parameters = {} # type: Dict[str, Any]\n\n # Construct headers\n header_parameters = {} # type: Dict[str, Any]\n header_parameters['Accept'] = self._serialize.header(\"accept\", accept, 'str')\n\n request = self._client.get(url, query_parameters, header_parameters)\n pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)\n response = pipeline_response.http_response\n\n if response.status_code not in [200]:\n map_error(status_code=response.status_code, response=response, error_map=error_map)\n error = self._deserialize(models.OdataError, response)\n raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat)\n\n deserialized = self._deserialize('MicrosoftGraphReport', pipeline_response)\n\n if cls:\n return cls(pipeline_response, deserialized, {})\n\n return deserialized", "title": "" }, { "docid": "25d1b06198ad4da5f3f3a4de08d0a2a1", "score": "0.7062087", "text": "def get_skype_for_business_participant_activity_counts(\n self,\n period, # type: str\n **kwargs # type: Any\n ):\n # type: (...) -> \"models.MicrosoftGraphReport\"\n cls = kwargs.pop('cls', None) # type: ClsType[\"models.MicrosoftGraphReport\"]\n error_map = {\n 401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError\n }\n error_map.update(kwargs.pop('error_map', {}))\n accept = \"application/json\"\n\n # Construct URL\n url = self.get_skype_for_business_participant_activity_counts.metadata['url'] # type: ignore\n path_format_arguments = {\n 'period': self._serialize.url(\"period\", period, 'str'),\n }\n url = self._client.format_url(url, **path_format_arguments)\n\n # Construct parameters\n query_parameters = {} # type: Dict[str, Any]\n\n # Construct headers\n header_parameters = {} # type: Dict[str, Any]\n header_parameters['Accept'] = self._serialize.header(\"accept\", accept, 'str')\n\n request = self._client.get(url, query_parameters, header_parameters)\n pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)\n response = pipeline_response.http_response\n\n if response.status_code not in [200]:\n map_error(status_code=response.status_code, response=response, error_map=error_map)\n error = self._deserialize(models.OdataError, response)\n raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat)\n\n deserialized = self._deserialize('MicrosoftGraphReport', pipeline_response)\n\n if cls:\n return cls(pipeline_response, deserialized, {})\n\n return deserialized", "title": "" }, { "docid": "6feb264feae2b1aaaf4c6a5682ec8fac", "score": "0.68109614", "text": "def get_skype_for_business_activity_user_counts(\n self,\n period, # type: str\n **kwargs # type: Any\n ):\n # type: (...) -> \"models.MicrosoftGraphReport\"\n cls = kwargs.pop('cls', None) # type: ClsType[\"models.MicrosoftGraphReport\"]\n error_map = {\n 401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError\n }\n error_map.update(kwargs.pop('error_map', {}))\n accept = \"application/json\"\n\n # Construct URL\n url = self.get_skype_for_business_activity_user_counts.metadata['url'] # type: ignore\n path_format_arguments = {\n 'period': self._serialize.url(\"period\", period, 'str'),\n }\n url = self._client.format_url(url, **path_format_arguments)\n\n # Construct parameters\n query_parameters = {} # type: Dict[str, Any]\n\n # Construct headers\n header_parameters = {} # type: Dict[str, Any]\n header_parameters['Accept'] = self._serialize.header(\"accept\", accept, 'str')\n\n request = self._client.get(url, query_parameters, header_parameters)\n pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)\n response = pipeline_response.http_response\n\n if response.status_code not in [200]:\n map_error(status_code=response.status_code, response=response, error_map=error_map)\n error = self._deserialize(models.OdataError, response)\n raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat)\n\n deserialized = self._deserialize('MicrosoftGraphReport', pipeline_response)\n\n if cls:\n return cls(pipeline_response, deserialized, {})\n\n return deserialized", "title": "" }, { "docid": "320c65436ffb8ad5ae6856ca3d0274ab", "score": "0.67302626", "text": "def get_skype_for_business_peer_to_peer_activity_user_counts(\n self,\n period, # type: str\n **kwargs # type: Any\n ):\n # type: (...) -> \"models.MicrosoftGraphReport\"\n cls = kwargs.pop('cls', None) # type: ClsType[\"models.MicrosoftGraphReport\"]\n error_map = {\n 401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError\n }\n error_map.update(kwargs.pop('error_map', {}))\n accept = \"application/json\"\n\n # Construct URL\n url = self.get_skype_for_business_peer_to_peer_activity_user_counts.metadata['url'] # type: ignore\n path_format_arguments = {\n 'period': self._serialize.url(\"period\", period, 'str'),\n }\n url = self._client.format_url(url, **path_format_arguments)\n\n # Construct parameters\n query_parameters = {} # type: Dict[str, Any]\n\n # Construct headers\n header_parameters = {} # type: Dict[str, Any]\n header_parameters['Accept'] = self._serialize.header(\"accept\", accept, 'str')\n\n request = self._client.get(url, query_parameters, header_parameters)\n pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)\n response = pipeline_response.http_response\n\n if response.status_code not in [200]:\n map_error(status_code=response.status_code, response=response, error_map=error_map)\n error = self._deserialize(models.OdataError, response)\n raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat)\n\n deserialized = self._deserialize('MicrosoftGraphReport', pipeline_response)\n\n if cls:\n return cls(pipeline_response, deserialized, {})\n\n return deserialized", "title": "" }, { "docid": "25c06b442a8360eddaef5ba54e2137a3", "score": "0.669353", "text": "def get_skype_for_business_activity_counts(\n self,\n period, # type: str\n **kwargs # type: Any\n ):\n # type: (...) -> \"models.MicrosoftGraphReport\"\n cls = kwargs.pop('cls', None) # type: ClsType[\"models.MicrosoftGraphReport\"]\n error_map = {\n 401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError\n }\n error_map.update(kwargs.pop('error_map', {}))\n accept = \"application/json\"\n\n # Construct URL\n url = self.get_skype_for_business_activity_counts.metadata['url'] # type: ignore\n path_format_arguments = {\n 'period': self._serialize.url(\"period\", period, 'str'),\n }\n url = self._client.format_url(url, **path_format_arguments)\n\n # Construct parameters\n query_parameters = {} # type: Dict[str, Any]\n\n # Construct headers\n header_parameters = {} # type: Dict[str, Any]\n header_parameters['Accept'] = self._serialize.header(\"accept\", accept, 'str')\n\n request = self._client.get(url, query_parameters, header_parameters)\n pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)\n response = pipeline_response.http_response\n\n if response.status_code not in [200]:\n map_error(status_code=response.status_code, response=response, error_map=error_map)\n error = self._deserialize(models.OdataError, response)\n raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat)\n\n deserialized = self._deserialize('MicrosoftGraphReport', pipeline_response)\n\n if cls:\n return cls(pipeline_response, deserialized, {})\n\n return deserialized", "title": "" }, { "docid": "286e2ac27520e3057d61920ce1dab0d1", "score": "0.6688006", "text": "def get_skype_for_business_organizer_activity_user_counts(\n self,\n period, # type: str\n **kwargs # type: Any\n ):\n # type: (...) -> \"models.MicrosoftGraphReport\"\n cls = kwargs.pop('cls', None) # type: ClsType[\"models.MicrosoftGraphReport\"]\n error_map = {\n 401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError\n }\n error_map.update(kwargs.pop('error_map', {}))\n accept = \"application/json\"\n\n # Construct URL\n url = self.get_skype_for_business_organizer_activity_user_counts.metadata['url'] # type: ignore\n path_format_arguments = {\n 'period': self._serialize.url(\"period\", period, 'str'),\n }\n url = self._client.format_url(url, **path_format_arguments)\n\n # Construct parameters\n query_parameters = {} # type: Dict[str, Any]\n\n # Construct headers\n header_parameters = {} # type: Dict[str, Any]\n header_parameters['Accept'] = self._serialize.header(\"accept\", accept, 'str')\n\n request = self._client.get(url, query_parameters, header_parameters)\n pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)\n response = pipeline_response.http_response\n\n if response.status_code not in [200]:\n map_error(status_code=response.status_code, response=response, error_map=error_map)\n error = self._deserialize(models.OdataError, response)\n raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat)\n\n deserialized = self._deserialize('MicrosoftGraphReport', pipeline_response)\n\n if cls:\n return cls(pipeline_response, deserialized, {})\n\n return deserialized", "title": "" }, { "docid": "f2322bdab1a4c26ecbc730834eaab088", "score": "0.6646424", "text": "def get_skype_for_business_participant_activity_minute_counts(\n self,\n period, # type: str\n **kwargs # type: Any\n ):\n # type: (...) -> \"models.MicrosoftGraphReport\"\n cls = kwargs.pop('cls', None) # type: ClsType[\"models.MicrosoftGraphReport\"]\n error_map = {\n 401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError\n }\n error_map.update(kwargs.pop('error_map', {}))\n accept = \"application/json\"\n\n # Construct URL\n url = self.get_skype_for_business_participant_activity_minute_counts.metadata['url'] # type: ignore\n path_format_arguments = {\n 'period': self._serialize.url(\"period\", period, 'str'),\n }\n url = self._client.format_url(url, **path_format_arguments)\n\n # Construct parameters\n query_parameters = {} # type: Dict[str, Any]\n\n # Construct headers\n header_parameters = {} # type: Dict[str, Any]\n header_parameters['Accept'] = self._serialize.header(\"accept\", accept, 'str')\n\n request = self._client.get(url, query_parameters, header_parameters)\n pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)\n response = pipeline_response.http_response\n\n if response.status_code not in [200]:\n map_error(status_code=response.status_code, response=response, error_map=error_map)\n error = self._deserialize(models.OdataError, response)\n raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat)\n\n deserialized = self._deserialize('MicrosoftGraphReport', pipeline_response)\n\n if cls:\n return cls(pipeline_response, deserialized, {})\n\n return deserialized", "title": "" }, { "docid": "2a3a7552493a050a5658d5b178769103", "score": "0.6638871", "text": "def get_skype_for_business_organizer_activity_counts(\n self,\n period, # type: str\n **kwargs # type: Any\n ):\n # type: (...) -> \"models.MicrosoftGraphReport\"\n cls = kwargs.pop('cls', None) # type: ClsType[\"models.MicrosoftGraphReport\"]\n error_map = {\n 401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError\n }\n error_map.update(kwargs.pop('error_map', {}))\n accept = \"application/json\"\n\n # Construct URL\n url = self.get_skype_for_business_organizer_activity_counts.metadata['url'] # type: ignore\n path_format_arguments = {\n 'period': self._serialize.url(\"period\", period, 'str'),\n }\n url = self._client.format_url(url, **path_format_arguments)\n\n # Construct parameters\n query_parameters = {} # type: Dict[str, Any]\n\n # Construct headers\n header_parameters = {} # type: Dict[str, Any]\n header_parameters['Accept'] = self._serialize.header(\"accept\", accept, 'str')\n\n request = self._client.get(url, query_parameters, header_parameters)\n pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)\n response = pipeline_response.http_response\n\n if response.status_code not in [200]:\n map_error(status_code=response.status_code, response=response, error_map=error_map)\n error = self._deserialize(models.OdataError, response)\n raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat)\n\n deserialized = self._deserialize('MicrosoftGraphReport', pipeline_response)\n\n if cls:\n return cls(pipeline_response, deserialized, {})\n\n return deserialized", "title": "" }, { "docid": "c55d43cd0cde62ca6e04dbd34abec921", "score": "0.6619389", "text": "def get_skype_for_business_peer_to_peer_activity_counts(\n self,\n period, # type: str\n **kwargs # type: Any\n ):\n # type: (...) -> \"models.MicrosoftGraphReport\"\n cls = kwargs.pop('cls', None) # type: ClsType[\"models.MicrosoftGraphReport\"]\n error_map = {\n 401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError\n }\n error_map.update(kwargs.pop('error_map', {}))\n accept = \"application/json\"\n\n # Construct URL\n url = self.get_skype_for_business_peer_to_peer_activity_counts.metadata['url'] # type: ignore\n path_format_arguments = {\n 'period': self._serialize.url(\"period\", period, 'str'),\n }\n url = self._client.format_url(url, **path_format_arguments)\n\n # Construct parameters\n query_parameters = {} # type: Dict[str, Any]\n\n # Construct headers\n header_parameters = {} # type: Dict[str, Any]\n header_parameters['Accept'] = self._serialize.header(\"accept\", accept, 'str')\n\n request = self._client.get(url, query_parameters, header_parameters)\n pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)\n response = pipeline_response.http_response\n\n if response.status_code not in [200]:\n map_error(status_code=response.status_code, response=response, error_map=error_map)\n error = self._deserialize(models.OdataError, response)\n raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat)\n\n deserialized = self._deserialize('MicrosoftGraphReport', pipeline_response)\n\n if cls:\n return cls(pipeline_response, deserialized, {})\n\n return deserialized", "title": "" }, { "docid": "fca14c236d2939fc747fda0a8dd3b9a6", "score": "0.6447872", "text": "def get_skype_for_business_organizer_activity_minute_counts(\n self,\n period, # type: str\n **kwargs # type: Any\n ):\n # type: (...) -> \"models.MicrosoftGraphReport\"\n cls = kwargs.pop('cls', None) # type: ClsType[\"models.MicrosoftGraphReport\"]\n error_map = {\n 401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError\n }\n error_map.update(kwargs.pop('error_map', {}))\n accept = \"application/json\"\n\n # Construct URL\n url = self.get_skype_for_business_organizer_activity_minute_counts.metadata['url'] # type: ignore\n path_format_arguments = {\n 'period': self._serialize.url(\"period\", period, 'str'),\n }\n url = self._client.format_url(url, **path_format_arguments)\n\n # Construct parameters\n query_parameters = {} # type: Dict[str, Any]\n\n # Construct headers\n header_parameters = {} # type: Dict[str, Any]\n header_parameters['Accept'] = self._serialize.header(\"accept\", accept, 'str')\n\n request = self._client.get(url, query_parameters, header_parameters)\n pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)\n response = pipeline_response.http_response\n\n if response.status_code not in [200]:\n map_error(status_code=response.status_code, response=response, error_map=error_map)\n error = self._deserialize(models.OdataError, response)\n raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat)\n\n deserialized = self._deserialize('MicrosoftGraphReport', pipeline_response)\n\n if cls:\n return cls(pipeline_response, deserialized, {})\n\n return deserialized", "title": "" }, { "docid": "fe219bb945fa0a431d5bdf45b5d270bb", "score": "0.64032483", "text": "def get_skype_for_business_peer_to_peer_activity_minute_counts(\n self,\n period, # type: str\n **kwargs # type: Any\n ):\n # type: (...) -> \"models.MicrosoftGraphReport\"\n cls = kwargs.pop('cls', None) # type: ClsType[\"models.MicrosoftGraphReport\"]\n error_map = {\n 401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError\n }\n error_map.update(kwargs.pop('error_map', {}))\n accept = \"application/json\"\n\n # Construct URL\n url = self.get_skype_for_business_peer_to_peer_activity_minute_counts.metadata['url'] # type: ignore\n path_format_arguments = {\n 'period': self._serialize.url(\"period\", period, 'str'),\n }\n url = self._client.format_url(url, **path_format_arguments)\n\n # Construct parameters\n query_parameters = {} # type: Dict[str, Any]\n\n # Construct headers\n header_parameters = {} # type: Dict[str, Any]\n header_parameters['Accept'] = self._serialize.header(\"accept\", accept, 'str')\n\n request = self._client.get(url, query_parameters, header_parameters)\n pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)\n response = pipeline_response.http_response\n\n if response.status_code not in [200]:\n map_error(status_code=response.status_code, response=response, error_map=error_map)\n error = self._deserialize(models.OdataError, response)\n raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat)\n\n deserialized = self._deserialize('MicrosoftGraphReport', pipeline_response)\n\n if cls:\n return cls(pipeline_response, deserialized, {})\n\n return deserialized", "title": "" }, { "docid": "cf96717807745e73803c7cac89b1feef", "score": "0.6205501", "text": "def get_skype_for_business_device_usage_user_counts(\n self,\n period, # type: str\n **kwargs # type: Any\n ):\n # type: (...) -> \"models.MicrosoftGraphReport\"\n cls = kwargs.pop('cls', None) # type: ClsType[\"models.MicrosoftGraphReport\"]\n error_map = {\n 401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError\n }\n error_map.update(kwargs.pop('error_map', {}))\n accept = \"application/json\"\n\n # Construct URL\n url = self.get_skype_for_business_device_usage_user_counts.metadata['url'] # type: ignore\n path_format_arguments = {\n 'period': self._serialize.url(\"period\", period, 'str'),\n }\n url = self._client.format_url(url, **path_format_arguments)\n\n # Construct parameters\n query_parameters = {} # type: Dict[str, Any]\n\n # Construct headers\n header_parameters = {} # type: Dict[str, Any]\n header_parameters['Accept'] = self._serialize.header(\"accept\", accept, 'str')\n\n request = self._client.get(url, query_parameters, header_parameters)\n pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)\n response = pipeline_response.http_response\n\n if response.status_code not in [200]:\n map_error(status_code=response.status_code, response=response, error_map=error_map)\n error = self._deserialize(models.OdataError, response)\n raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat)\n\n deserialized = self._deserialize('MicrosoftGraphReport', pipeline_response)\n\n if cls:\n return cls(pipeline_response, deserialized, {})\n\n return deserialized", "title": "" }, { "docid": "7dc6d478e2fda8c5199a94fcc04fcbd4", "score": "0.59453106", "text": "def get_skype_for_business_device_usage_distribution_user_counts(\n self,\n period, # type: str\n **kwargs # type: Any\n ):\n # type: (...) -> \"models.MicrosoftGraphReport\"\n cls = kwargs.pop('cls', None) # type: ClsType[\"models.MicrosoftGraphReport\"]\n error_map = {\n 401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError\n }\n error_map.update(kwargs.pop('error_map', {}))\n accept = \"application/json\"\n\n # Construct URL\n url = self.get_skype_for_business_device_usage_distribution_user_counts.metadata['url'] # type: ignore\n path_format_arguments = {\n 'period': self._serialize.url(\"period\", period, 'str'),\n }\n url = self._client.format_url(url, **path_format_arguments)\n\n # Construct parameters\n query_parameters = {} # type: Dict[str, Any]\n\n # Construct headers\n header_parameters = {} # type: Dict[str, Any]\n header_parameters['Accept'] = self._serialize.header(\"accept\", accept, 'str')\n\n request = self._client.get(url, query_parameters, header_parameters)\n pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)\n response = pipeline_response.http_response\n\n if response.status_code not in [200]:\n map_error(status_code=response.status_code, response=response, error_map=error_map)\n error = self._deserialize(models.OdataError, response)\n raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat)\n\n deserialized = self._deserialize('MicrosoftGraphReport', pipeline_response)\n\n if cls:\n return cls(pipeline_response, deserialized, {})\n\n return deserialized", "title": "" }, { "docid": "a45ebf9120dfda495dd13891baa8d145", "score": "0.58515793", "text": "def call_skype(self):\n self.start_call('skype')\n return self.partner_id.call_skype()", "title": "" }, { "docid": "04dc2740bedf7be4c82a2a54e8fb6d2c", "score": "0.57981265", "text": "def get_skype_for_business_participant_activity_user_counts_with_period(self,period: Optional[str] = None) -> GetSkypeForBusinessParticipantActivityUserCountsWithPeriodRequestBuilder:\n if not period:\n raise TypeError(\"period cannot be null.\")\n from .get_skype_for_business_participant_activity_user_counts_with_period.get_skype_for_business_participant_activity_user_counts_with_period_request_builder import GetSkypeForBusinessParticipantActivityUserCountsWithPeriodRequestBuilder\n\n return GetSkypeForBusinessParticipantActivityUserCountsWithPeriodRequestBuilder(self.request_adapter, self.path_parameters, period)", "title": "" }, { "docid": "7d4234cdbb9ef2a777be859ea1be5949", "score": "0.57130975", "text": "def get_skype_for_business_activity_counts_with_period(self,period: Optional[str] = None) -> GetSkypeForBusinessActivityCountsWithPeriodRequestBuilder:\n if not period:\n raise TypeError(\"period cannot be null.\")\n from .get_skype_for_business_activity_counts_with_period.get_skype_for_business_activity_counts_with_period_request_builder import GetSkypeForBusinessActivityCountsWithPeriodRequestBuilder\n\n return GetSkypeForBusinessActivityCountsWithPeriodRequestBuilder(self.request_adapter, self.path_parameters, period)", "title": "" }, { "docid": "33e5b3085c5b5875a1a71762853f463e", "score": "0.5686292", "text": "def get_skype_for_business_activity_user_counts_with_period(self,period: Optional[str] = None) -> GetSkypeForBusinessActivityUserCountsWithPeriodRequestBuilder:\n if not period:\n raise TypeError(\"period cannot be null.\")\n from .get_skype_for_business_activity_user_counts_with_period.get_skype_for_business_activity_user_counts_with_period_request_builder import GetSkypeForBusinessActivityUserCountsWithPeriodRequestBuilder\n\n return GetSkypeForBusinessActivityUserCountsWithPeriodRequestBuilder(self.request_adapter, self.path_parameters, period)", "title": "" }, { "docid": "25f71db5d399025f5421ba3d3ac14b9b", "score": "0.5636225", "text": "def get_share_point_activity_user_counts(\n self,\n period, # type: str\n **kwargs # type: Any\n ):\n # type: (...) -> \"models.MicrosoftGraphReport\"\n cls = kwargs.pop('cls', None) # type: ClsType[\"models.MicrosoftGraphReport\"]\n error_map = {\n 401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError\n }\n error_map.update(kwargs.pop('error_map', {}))\n accept = \"application/json\"\n\n # Construct URL\n url = self.get_share_point_activity_user_counts.metadata['url'] # type: ignore\n path_format_arguments = {\n 'period': self._serialize.url(\"period\", period, 'str'),\n }\n url = self._client.format_url(url, **path_format_arguments)\n\n # Construct parameters\n query_parameters = {} # type: Dict[str, Any]\n\n # Construct headers\n header_parameters = {} # type: Dict[str, Any]\n header_parameters['Accept'] = self._serialize.header(\"accept\", accept, 'str')\n\n request = self._client.get(url, query_parameters, header_parameters)\n pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)\n response = pipeline_response.http_response\n\n if response.status_code not in [200]:\n map_error(status_code=response.status_code, response=response, error_map=error_map)\n error = self._deserialize(models.OdataError, response)\n raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat)\n\n deserialized = self._deserialize('MicrosoftGraphReport', pipeline_response)\n\n if cls:\n return cls(pipeline_response, deserialized, {})\n\n return deserialized", "title": "" }, { "docid": "dae2881d6284af5a8927b7916140f911", "score": "0.5609698", "text": "def get_office365_activations_user_counts(\n self,\n **kwargs # type: Any\n ):\n # type: (...) -> \"models.MicrosoftGraphReport\"\n cls = kwargs.pop('cls', None) # type: ClsType[\"models.MicrosoftGraphReport\"]\n error_map = {\n 401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError\n }\n error_map.update(kwargs.pop('error_map', {}))\n accept = \"application/json\"\n\n # Construct URL\n url = self.get_office365_activations_user_counts.metadata['url'] # type: ignore\n\n # Construct parameters\n query_parameters = {} # type: Dict[str, Any]\n\n # Construct headers\n header_parameters = {} # type: Dict[str, Any]\n header_parameters['Accept'] = self._serialize.header(\"accept\", accept, 'str')\n\n request = self._client.get(url, query_parameters, header_parameters)\n pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)\n response = pipeline_response.http_response\n\n if response.status_code not in [200]:\n map_error(status_code=response.status_code, response=response, error_map=error_map)\n error = self._deserialize(models.OdataError, response)\n raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat)\n\n deserialized = self._deserialize('MicrosoftGraphReport', pipeline_response)\n\n if cls:\n return cls(pipeline_response, deserialized, {})\n\n return deserialized", "title": "" }, { "docid": "df1e7389ba15b5b5fed9e0342e27e117", "score": "0.5510291", "text": "def get_skype_for_business_peer_to_peer_activity_user_counts_with_period(self,period: Optional[str] = None) -> GetSkypeForBusinessPeerToPeerActivityUserCountsWithPeriodRequestBuilder:\n if not period:\n raise TypeError(\"period cannot be null.\")\n from .get_skype_for_business_peer_to_peer_activity_user_counts_with_period.get_skype_for_business_peer_to_peer_activity_user_counts_with_period_request_builder import GetSkypeForBusinessPeerToPeerActivityUserCountsWithPeriodRequestBuilder\n\n return GetSkypeForBusinessPeerToPeerActivityUserCountsWithPeriodRequestBuilder(self.request_adapter, self.path_parameters, period)", "title": "" }, { "docid": "8662d6b6ffae3119b2557f06c8d596ba", "score": "0.5509663", "text": "def get_skype_for_business_peer_to_peer_activity_counts_with_period(self,period: Optional[str] = None) -> GetSkypeForBusinessPeerToPeerActivityCountsWithPeriodRequestBuilder:\n if not period:\n raise TypeError(\"period cannot be null.\")\n from .get_skype_for_business_peer_to_peer_activity_counts_with_period.get_skype_for_business_peer_to_peer_activity_counts_with_period_request_builder import GetSkypeForBusinessPeerToPeerActivityCountsWithPeriodRequestBuilder\n\n return GetSkypeForBusinessPeerToPeerActivityCountsWithPeriodRequestBuilder(self.request_adapter, self.path_parameters, period)", "title": "" }, { "docid": "b850c4f1a300850d9dd8c3838189f018", "score": "0.54499984", "text": "def get_skype_for_business_organizer_activity_user_counts_with_period(self,period: Optional[str] = None) -> GetSkypeForBusinessOrganizerActivityUserCountsWithPeriodRequestBuilder:\n if not period:\n raise TypeError(\"period cannot be null.\")\n from .get_skype_for_business_organizer_activity_user_counts_with_period.get_skype_for_business_organizer_activity_user_counts_with_period_request_builder import GetSkypeForBusinessOrganizerActivityUserCountsWithPeriodRequestBuilder\n\n return GetSkypeForBusinessOrganizerActivityUserCountsWithPeriodRequestBuilder(self.request_adapter, self.path_parameters, period)", "title": "" }, { "docid": "4b35fb0a4d576fc0b594e09cf4ddc339", "score": "0.54361016", "text": "def get_skype_for_business_organizer_activity_counts_with_period(self,period: Optional[str] = None) -> GetSkypeForBusinessOrganizerActivityCountsWithPeriodRequestBuilder:\n if not period:\n raise TypeError(\"period cannot be null.\")\n from .get_skype_for_business_organizer_activity_counts_with_period.get_skype_for_business_organizer_activity_counts_with_period_request_builder import GetSkypeForBusinessOrganizerActivityCountsWithPeriodRequestBuilder\n\n return GetSkypeForBusinessOrganizerActivityCountsWithPeriodRequestBuilder(self.request_adapter, self.path_parameters, period)", "title": "" }, { "docid": "7ff3826dd9ffe69b028d5cced598ab4b", "score": "0.5433191", "text": "def get_teams_user_activity_counts(\n self,\n period, # type: str\n **kwargs # type: Any\n ):\n # type: (...) -> \"models.MicrosoftGraphReport\"\n cls = kwargs.pop('cls', None) # type: ClsType[\"models.MicrosoftGraphReport\"]\n error_map = {\n 401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError\n }\n error_map.update(kwargs.pop('error_map', {}))\n accept = \"application/json\"\n\n # Construct URL\n url = self.get_teams_user_activity_counts.metadata['url'] # type: ignore\n path_format_arguments = {\n 'period': self._serialize.url(\"period\", period, 'str'),\n }\n url = self._client.format_url(url, **path_format_arguments)\n\n # Construct parameters\n query_parameters = {} # type: Dict[str, Any]\n\n # Construct headers\n header_parameters = {} # type: Dict[str, Any]\n header_parameters['Accept'] = self._serialize.header(\"accept\", accept, 'str')\n\n request = self._client.get(url, query_parameters, header_parameters)\n pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)\n response = pipeline_response.http_response\n\n if response.status_code not in [200]:\n map_error(status_code=response.status_code, response=response, error_map=error_map)\n error = self._deserialize(models.OdataError, response)\n raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat)\n\n deserialized = self._deserialize('MicrosoftGraphReport', pipeline_response)\n\n if cls:\n return cls(pipeline_response, deserialized, {})\n\n return deserialized", "title": "" }, { "docid": "5bd9f72f2bf6915804b1d21537563ece", "score": "0.54134274", "text": "def count(cls, client) :\n\t\ttry :\n\t\t\tobj = lsnrtspalgprofile()\n\t\t\toption_ = options()\n\t\t\toption_.count = True\n\t\t\tresponse = obj.get_resources(client, option_)\n\t\t\tif response :\n\t\t\t\treturn response[0].__dict__['___count']\n\t\t\treturn 0\n\t\texcept Exception as e :\n\t\t\traise e", "title": "" }, { "docid": "8e548c60229cdf352c35013561a392df", "score": "0.5404569", "text": "async def total_sessions(self):\n res = await self.call(\"wamp.session.count\")\n self.log.info(res)", "title": "" }, { "docid": "4de96a597b5808750643168ed8b77ed5", "score": "0.54014826", "text": "def count(cls, client) :\n\t\ttry :\n\t\t\tobj = nsicapprofile()\n\t\t\toption_ = options()\n\t\t\toption_.count = True\n\t\t\tresponse = obj.get_resources(client, option_)\n\t\t\tif response :\n\t\t\t\treturn response[0].__dict__['___count']\n\t\t\treturn 0\n\t\texcept Exception as e :\n\t\t\traise e", "title": "" }, { "docid": "58741de941a6d8a300f814737335a314", "score": "0.5391661", "text": "def get_email_activity_user_counts(\n self,\n period, # type: str\n **kwargs # type: Any\n ):\n # type: (...) -> \"models.MicrosoftGraphReport\"\n cls = kwargs.pop('cls', None) # type: ClsType[\"models.MicrosoftGraphReport\"]\n error_map = {\n 401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError\n }\n error_map.update(kwargs.pop('error_map', {}))\n accept = \"application/json\"\n\n # Construct URL\n url = self.get_email_activity_user_counts.metadata['url'] # type: ignore\n path_format_arguments = {\n 'period': self._serialize.url(\"period\", period, 'str'),\n }\n url = self._client.format_url(url, **path_format_arguments)\n\n # Construct parameters\n query_parameters = {} # type: Dict[str, Any]\n\n # Construct headers\n header_parameters = {} # type: Dict[str, Any]\n header_parameters['Accept'] = self._serialize.header(\"accept\", accept, 'str')\n\n request = self._client.get(url, query_parameters, header_parameters)\n pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)\n response = pipeline_response.http_response\n\n if response.status_code not in [200]:\n map_error(status_code=response.status_code, response=response, error_map=error_map)\n error = self._deserialize(models.OdataError, response)\n raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat)\n\n deserialized = self._deserialize('MicrosoftGraphReport', pipeline_response)\n\n if cls:\n return cls(pipeline_response, deserialized, {})\n\n return deserialized", "title": "" }, { "docid": "5ff8fdd98832d00f789928bafad9e783", "score": "0.5363513", "text": "def get_yammer_activity_user_counts(\n self,\n period, # type: str\n **kwargs # type: Any\n ):\n # type: (...) -> \"models.MicrosoftGraphReport\"\n cls = kwargs.pop('cls', None) # type: ClsType[\"models.MicrosoftGraphReport\"]\n error_map = {\n 401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError\n }\n error_map.update(kwargs.pop('error_map', {}))\n accept = \"application/json\"\n\n # Construct URL\n url = self.get_yammer_activity_user_counts.metadata['url'] # type: ignore\n path_format_arguments = {\n 'period': self._serialize.url(\"period\", period, 'str'),\n }\n url = self._client.format_url(url, **path_format_arguments)\n\n # Construct parameters\n query_parameters = {} # type: Dict[str, Any]\n\n # Construct headers\n header_parameters = {} # type: Dict[str, Any]\n header_parameters['Accept'] = self._serialize.header(\"accept\", accept, 'str')\n\n request = self._client.get(url, query_parameters, header_parameters)\n pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)\n response = pipeline_response.http_response\n\n if response.status_code not in [200]:\n map_error(status_code=response.status_code, response=response, error_map=error_map)\n error = self._deserialize(models.OdataError, response)\n raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat)\n\n deserialized = self._deserialize('MicrosoftGraphReport', pipeline_response)\n\n if cls:\n return cls(pipeline_response, deserialized, {})\n\n return deserialized", "title": "" }, { "docid": "dc048ff442c13187443aba2929da0701", "score": "0.5362511", "text": "def count(cls, service, sessionid) :\n try :\n obj = lsnrtspalgsession_datachannel_binding()\n obj.sessionid = sessionid\n option_ = options()\n option_.count = True\n response = obj.get_resources(service, option_)\n if response :\n return response[0].__dict__['___count']\n return 0\n except Exception as e:\n raise e", "title": "" }, { "docid": "90c76ee8027fa1678f079073f06d978e", "score": "0.5353397", "text": "def get_teams_user_activity_user_counts(\n self,\n period, # type: str\n **kwargs # type: Any\n ):\n # type: (...) -> \"models.MicrosoftGraphReport\"\n cls = kwargs.pop('cls', None) # type: ClsType[\"models.MicrosoftGraphReport\"]\n error_map = {\n 401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError\n }\n error_map.update(kwargs.pop('error_map', {}))\n accept = \"application/json\"\n\n # Construct URL\n url = self.get_teams_user_activity_user_counts.metadata['url'] # type: ignore\n path_format_arguments = {\n 'period': self._serialize.url(\"period\", period, 'str'),\n }\n url = self._client.format_url(url, **path_format_arguments)\n\n # Construct parameters\n query_parameters = {} # type: Dict[str, Any]\n\n # Construct headers\n header_parameters = {} # type: Dict[str, Any]\n header_parameters['Accept'] = self._serialize.header(\"accept\", accept, 'str')\n\n request = self._client.get(url, query_parameters, header_parameters)\n pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)\n response = pipeline_response.http_response\n\n if response.status_code not in [200]:\n map_error(status_code=response.status_code, response=response, error_map=error_map)\n error = self._deserialize(models.OdataError, response)\n raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat)\n\n deserialized = self._deserialize('MicrosoftGraphReport', pipeline_response)\n\n if cls:\n return cls(pipeline_response, deserialized, {})\n\n return deserialized", "title": "" }, { "docid": "6aad58ac5bd7fea894467bc5b18923c5", "score": "0.5338669", "text": "def GetProfileCount(self):\n\t\treturn self._oleobj_.InvokeTypes(4, LCID, 1, (3, 0), (),)", "title": "" }, { "docid": "0696128ee3d40421787f308f7c48b21d", "score": "0.5314422", "text": "def GetProfileCount(self):\n\t\treturn self._oleobj_.InvokeTypes(4, LCID, 1, (2, 0), (),)", "title": "" }, { "docid": "d153b1f73041bb6050764a50dcba0b8c", "score": "0.5303124", "text": "def count_calls(results):\n return len(list(r for r in results.keys() if r[1]=='call'))", "title": "" }, { "docid": "88526c7640cbca89b28538e2aaeb928e", "score": "0.52374506", "text": "def NumberRegisteredForActivity(activity_key):\n\n registrations = UserRegistration.ActiveQuery(activity=activity_key)\n return registrations.count()", "title": "" }, { "docid": "f70600b53cbaab975d5789f64fc0d0bd", "score": "0.52110255", "text": "def count(cls, client) :\n\t\ttry :\n\t\t\tobj = forwardingsession()\n\t\t\toption_ = options()\n\t\t\toption_.count = True\n\t\t\tresponse = obj.get_resources(client, option_)\n\t\t\tif response :\n\t\t\t\treturn response[0].__dict__['___count']\n\t\t\treturn 0\n\t\texcept Exception as e :\n\t\t\traise e", "title": "" }, { "docid": "687f6cb8f9d6c3ccbd3e168eaf304d56", "score": "0.5180486", "text": "def call_skype(self):\n return self.social_url('skype', 'call')", "title": "" }, { "docid": "f6b0c63fa9e2a9f0cde75d7aabf71283", "score": "0.51622546", "text": "def GetUserPointsCount(self):\n\t\treturn self._oleobj_.InvokeTypes(24, LCID, 1, (3, 0), (),)", "title": "" }, { "docid": "99efe8a3455da70570ab68668ef421ee", "score": "0.51563406", "text": "def get_email_activity_counts(\n self,\n period, # type: str\n **kwargs # type: Any\n ):\n # type: (...) -> \"models.MicrosoftGraphReport\"\n cls = kwargs.pop('cls', None) # type: ClsType[\"models.MicrosoftGraphReport\"]\n error_map = {\n 401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError\n }\n error_map.update(kwargs.pop('error_map', {}))\n accept = \"application/json\"\n\n # Construct URL\n url = self.get_email_activity_counts.metadata['url'] # type: ignore\n path_format_arguments = {\n 'period': self._serialize.url(\"period\", period, 'str'),\n }\n url = self._client.format_url(url, **path_format_arguments)\n\n # Construct parameters\n query_parameters = {} # type: Dict[str, Any]\n\n # Construct headers\n header_parameters = {} # type: Dict[str, Any]\n header_parameters['Accept'] = self._serialize.header(\"accept\", accept, 'str')\n\n request = self._client.get(url, query_parameters, header_parameters)\n pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)\n response = pipeline_response.http_response\n\n if response.status_code not in [200]:\n map_error(status_code=response.status_code, response=response, error_map=error_map)\n error = self._deserialize(models.OdataError, response)\n raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat)\n\n deserialized = self._deserialize('MicrosoftGraphReport', pipeline_response)\n\n if cls:\n return cls(pipeline_response, deserialized, {})\n\n return deserialized", "title": "" }, { "docid": "c64f0a96b5ade39aa80f1f992ca63ae4", "score": "0.51215875", "text": "def get_mailbox_usage_mailbox_counts(\n self,\n period, # type: str\n **kwargs # type: Any\n ):\n # type: (...) -> \"models.MicrosoftGraphReport\"\n cls = kwargs.pop('cls', None) # type: ClsType[\"models.MicrosoftGraphReport\"]\n error_map = {\n 401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError\n }\n error_map.update(kwargs.pop('error_map', {}))\n accept = \"application/json\"\n\n # Construct URL\n url = self.get_mailbox_usage_mailbox_counts.metadata['url'] # type: ignore\n path_format_arguments = {\n 'period': self._serialize.url(\"period\", period, 'str'),\n }\n url = self._client.format_url(url, **path_format_arguments)\n\n # Construct parameters\n query_parameters = {} # type: Dict[str, Any]\n\n # Construct headers\n header_parameters = {} # type: Dict[str, Any]\n header_parameters['Accept'] = self._serialize.header(\"accept\", accept, 'str')\n\n request = self._client.get(url, query_parameters, header_parameters)\n pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)\n response = pipeline_response.http_response\n\n if response.status_code not in [200]:\n map_error(status_code=response.status_code, response=response, error_map=error_map)\n error = self._deserialize(models.OdataError, response)\n raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat)\n\n deserialized = self._deserialize('MicrosoftGraphReport', pipeline_response)\n\n if cls:\n return cls(pipeline_response, deserialized, {})\n\n return deserialized", "title": "" }, { "docid": "5a037d6fcb9c0dac0ee8b63073f5ae81", "score": "0.511952", "text": "def get_office365_activation_counts(\n self,\n **kwargs # type: Any\n ):\n # type: (...) -> \"models.MicrosoftGraphReport\"\n cls = kwargs.pop('cls', None) # type: ClsType[\"models.MicrosoftGraphReport\"]\n error_map = {\n 401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError\n }\n error_map.update(kwargs.pop('error_map', {}))\n accept = \"application/json\"\n\n # Construct URL\n url = self.get_office365_activation_counts.metadata['url'] # type: ignore\n\n # Construct parameters\n query_parameters = {} # type: Dict[str, Any]\n\n # Construct headers\n header_parameters = {} # type: Dict[str, Any]\n header_parameters['Accept'] = self._serialize.header(\"accept\", accept, 'str')\n\n request = self._client.get(url, query_parameters, header_parameters)\n pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)\n response = pipeline_response.http_response\n\n if response.status_code not in [200]:\n map_error(status_code=response.status_code, response=response, error_map=error_map)\n error = self._deserialize(models.OdataError, response)\n raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat)\n\n deserialized = self._deserialize('MicrosoftGraphReport', pipeline_response)\n\n if cls:\n return cls(pipeline_response, deserialized, {})\n\n return deserialized", "title": "" }, { "docid": "147825d93c698f94a61c885b54a59f09", "score": "0.51111597", "text": "def GetProfileItemsCount(self):\n\t\treturn self._oleobj_.InvokeTypes(27, LCID, 1, (3, 0), (),)", "title": "" }, { "docid": "4d9c552f79f9ea438994fb390610eff5", "score": "0.50868326", "text": "def GetUserPointsCount(self):\n\t\treturn self._oleobj_.InvokeTypes(20, LCID, 1, (3, 0), (),)", "title": "" }, { "docid": "f972176acacb645a1b9125cde0b013ec", "score": "0.5084734", "text": "def get_share_point_activity_file_counts(\n self,\n period, # type: str\n **kwargs # type: Any\n ):\n # type: (...) -> \"models.MicrosoftGraphReport\"\n cls = kwargs.pop('cls', None) # type: ClsType[\"models.MicrosoftGraphReport\"]\n error_map = {\n 401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError\n }\n error_map.update(kwargs.pop('error_map', {}))\n accept = \"application/json\"\n\n # Construct URL\n url = self.get_share_point_activity_file_counts.metadata['url'] # type: ignore\n path_format_arguments = {\n 'period': self._serialize.url(\"period\", period, 'str'),\n }\n url = self._client.format_url(url, **path_format_arguments)\n\n # Construct parameters\n query_parameters = {} # type: Dict[str, Any]\n\n # Construct headers\n header_parameters = {} # type: Dict[str, Any]\n header_parameters['Accept'] = self._serialize.header(\"accept\", accept, 'str')\n\n request = self._client.get(url, query_parameters, header_parameters)\n pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)\n response = pipeline_response.http_response\n\n if response.status_code not in [200]:\n map_error(status_code=response.status_code, response=response, error_map=error_map)\n error = self._deserialize(models.OdataError, response)\n raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat)\n\n deserialized = self._deserialize('MicrosoftGraphReport', pipeline_response)\n\n if cls:\n return cls(pipeline_response, deserialized, {})\n\n return deserialized", "title": "" }, { "docid": "87704c9009abb47d7cd1e6a43a096527", "score": "0.50844985", "text": "def GetTotalCountOfClubs(self, request, context):\n context.set_code(grpc.StatusCode.UNIMPLEMENTED)\n context.set_details('Method not implemented!')\n raise NotImplementedError('Method not implemented!')", "title": "" }, { "docid": "ce8912ddf62301405b366058f20be196", "score": "0.5081217", "text": "def getApplicationCount(self):\r\n fn = self.function_table.getApplicationCount\r\n result = fn()\r\n return result", "title": "" }, { "docid": "35dd5a853eec7783a38c0c8ab6731776", "score": "0.5075649", "text": "def GetUserPointsCount(self):\n\t\treturn self._oleobj_.InvokeTypes(28, LCID, 1, (3, 0), (),)", "title": "" }, { "docid": "8617c402bf960cc4c75b455cb9aa50c2", "score": "0.5072616", "text": "def get_office365_active_user_counts(\n self,\n period, # type: str\n **kwargs # type: Any\n ):\n # type: (...) -> \"models.MicrosoftGraphReport\"\n cls = kwargs.pop('cls', None) # type: ClsType[\"models.MicrosoftGraphReport\"]\n error_map = {\n 401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError\n }\n error_map.update(kwargs.pop('error_map', {}))\n accept = \"application/json\"\n\n # Construct URL\n url = self.get_office365_active_user_counts.metadata['url'] # type: ignore\n path_format_arguments = {\n 'period': self._serialize.url(\"period\", period, 'str'),\n }\n url = self._client.format_url(url, **path_format_arguments)\n\n # Construct parameters\n query_parameters = {} # type: Dict[str, Any]\n\n # Construct headers\n header_parameters = {} # type: Dict[str, Any]\n header_parameters['Accept'] = self._serialize.header(\"accept\", accept, 'str')\n\n request = self._client.get(url, query_parameters, header_parameters)\n pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)\n response = pipeline_response.http_response\n\n if response.status_code not in [200]:\n map_error(status_code=response.status_code, response=response, error_map=error_map)\n error = self._deserialize(models.OdataError, response)\n raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat)\n\n deserialized = self._deserialize('MicrosoftGraphReport', pipeline_response)\n\n if cls:\n return cls(pipeline_response, deserialized, {})\n\n return deserialized", "title": "" }, { "docid": "a5aad0e71a5180f633b1049871bc39e7", "score": "0.5063802", "text": "def get_email_app_usage_apps_user_counts(\n self,\n period, # type: str\n **kwargs # type: Any\n ):\n # type: (...) -> \"models.MicrosoftGraphReport\"\n cls = kwargs.pop('cls', None) # type: ClsType[\"models.MicrosoftGraphReport\"]\n error_map = {\n 401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError\n }\n error_map.update(kwargs.pop('error_map', {}))\n accept = \"application/json\"\n\n # Construct URL\n url = self.get_email_app_usage_apps_user_counts.metadata['url'] # type: ignore\n path_format_arguments = {\n 'period': self._serialize.url(\"period\", period, 'str'),\n }\n url = self._client.format_url(url, **path_format_arguments)\n\n # Construct parameters\n query_parameters = {} # type: Dict[str, Any]\n\n # Construct headers\n header_parameters = {} # type: Dict[str, Any]\n header_parameters['Accept'] = self._serialize.header(\"accept\", accept, 'str')\n\n request = self._client.get(url, query_parameters, header_parameters)\n pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)\n response = pipeline_response.http_response\n\n if response.status_code not in [200]:\n map_error(status_code=response.status_code, response=response, error_map=error_map)\n error = self._deserialize(models.OdataError, response)\n raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat)\n\n deserialized = self._deserialize('MicrosoftGraphReport', pipeline_response)\n\n if cls:\n return cls(pipeline_response, deserialized, {})\n\n return deserialized", "title": "" }, { "docid": "3516b6df32c47934e200d4e602486088", "score": "0.50560164", "text": "def get_skype_for_business_device_usage_user_counts_with_period(self,period: Optional[str] = None) -> GetSkypeForBusinessDeviceUsageUserCountsWithPeriodRequestBuilder:\n if not period:\n raise TypeError(\"period cannot be null.\")\n from .get_skype_for_business_device_usage_user_counts_with_period.get_skype_for_business_device_usage_user_counts_with_period_request_builder import GetSkypeForBusinessDeviceUsageUserCountsWithPeriodRequestBuilder\n\n return GetSkypeForBusinessDeviceUsageUserCountsWithPeriodRequestBuilder(self.request_adapter, self.path_parameters, period)", "title": "" }, { "docid": "85ed8ff2a8fcf969a8f5e5b5c59a8c67", "score": "0.5050576", "text": "def get_skype_for_business_activity_user_detail744_e(\n self,\n period, # type: str\n **kwargs # type: Any\n ):\n # type: (...) -> \"models.MicrosoftGraphReport\"\n cls = kwargs.pop('cls', None) # type: ClsType[\"models.MicrosoftGraphReport\"]\n error_map = {\n 401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError\n }\n error_map.update(kwargs.pop('error_map', {}))\n accept = \"application/json\"\n\n # Construct URL\n url = self.get_skype_for_business_activity_user_detail744_e.metadata['url'] # type: ignore\n path_format_arguments = {\n 'period': self._serialize.url(\"period\", period, 'str'),\n }\n url = self._client.format_url(url, **path_format_arguments)\n\n # Construct parameters\n query_parameters = {} # type: Dict[str, Any]\n\n # Construct headers\n header_parameters = {} # type: Dict[str, Any]\n header_parameters['Accept'] = self._serialize.header(\"accept\", accept, 'str')\n\n request = self._client.get(url, query_parameters, header_parameters)\n pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)\n response = pipeline_response.http_response\n\n if response.status_code not in [200]:\n map_error(status_code=response.status_code, response=response, error_map=error_map)\n error = self._deserialize(models.OdataError, response)\n raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat)\n\n deserialized = self._deserialize('MicrosoftGraphReport', pipeline_response)\n\n if cls:\n return cls(pipeline_response, deserialized, {})\n\n return deserialized", "title": "" }, { "docid": "71fc3ded900086a546f0b0bf8ba0c4c1", "score": "0.5043911", "text": "def get_email_app_usage_user_counts(\n self,\n period, # type: str\n **kwargs # type: Any\n ):\n # type: (...) -> \"models.MicrosoftGraphReport\"\n cls = kwargs.pop('cls', None) # type: ClsType[\"models.MicrosoftGraphReport\"]\n error_map = {\n 401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError\n }\n error_map.update(kwargs.pop('error_map', {}))\n accept = \"application/json\"\n\n # Construct URL\n url = self.get_email_app_usage_user_counts.metadata['url'] # type: ignore\n path_format_arguments = {\n 'period': self._serialize.url(\"period\", period, 'str'),\n }\n url = self._client.format_url(url, **path_format_arguments)\n\n # Construct parameters\n query_parameters = {} # type: Dict[str, Any]\n\n # Construct headers\n header_parameters = {} # type: Dict[str, Any]\n header_parameters['Accept'] = self._serialize.header(\"accept\", accept, 'str')\n\n request = self._client.get(url, query_parameters, header_parameters)\n pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)\n response = pipeline_response.http_response\n\n if response.status_code not in [200]:\n map_error(status_code=response.status_code, response=response, error_map=error_map)\n error = self._deserialize(models.OdataError, response)\n raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat)\n\n deserialized = self._deserialize('MicrosoftGraphReport', pipeline_response)\n\n if cls:\n return cls(pipeline_response, deserialized, {})\n\n return deserialized", "title": "" }, { "docid": "bab812edcfb4481bb5bc998edbaf4666", "score": "0.50438106", "text": "def number_messages(self, participant = None):\n def helper(i, x):\n return participant == self.data['messages'][i]['sender_name']\n if participant:\n return len([ x for i, x in enumerate(self.data['messages']) if helper(i, x) ] )\n return len(self.data['messages'])", "title": "" }, { "docid": "26f3558af1a830f2440cb4512632229d", "score": "0.50398695", "text": "def GetFaceCount(self):\n\t\treturn self._oleobj_.InvokeTypes(2, LCID, 1, (3, 0), (),)", "title": "" }, { "docid": "26f3558af1a830f2440cb4512632229d", "score": "0.50398695", "text": "def GetFaceCount(self):\n\t\treturn self._oleobj_.InvokeTypes(2, LCID, 1, (3, 0), (),)", "title": "" }, { "docid": "56750e170529a82192cefdcd64d8c7a2", "score": "0.5038063", "text": "def GetFaceCount(self):\n\t\treturn self._oleobj_.InvokeTypes(37, LCID, 1, (3, 0), (),)", "title": "" }, { "docid": "6cdb58c756788605963c462d9d7af492", "score": "0.50308347", "text": "def get_subscriber_count(self, broadcaster_id=THE_BEST_TWITCH_STREAMER_ID_NO_BIAS):\r\n data = self.get_subscribers(broadcaster_id=THE_BEST_TWITCH_STREAMER_ID_NO_BIAS).json()\r\n logger.info(data)\r\n # Everyone is always subscribed to themself, so subtract one.\r\n return len(data[\"data\"]) - 1", "title": "" }, { "docid": "7e7244eb378e810c1c5d7eb63d036986", "score": "0.5028539", "text": "def GetFaceCount(self):\n\t\treturn self._oleobj_.InvokeTypes(10, LCID, 1, (3, 0), (),)", "title": "" }, { "docid": "4106ac6bc5725b02ca723305c4fdb4d3", "score": "0.5026396", "text": "def get_count(self):\n method = 'GET'\n url = '/rest/v1/info/counter'\n json_data = self.client.request(method, url)\n count = json_loads(json_data)\n return count", "title": "" }, { "docid": "28ab9b8e5e4d20abd0629d98a5b149cf", "score": "0.50251293", "text": "def _get_count(_khoros_object, _user_id, _object_type):\n _api_response = query_users_table_by_id(_khoros_object, f'{_object_type}.count(*)', _user_id)\n return int(_api_response['data']['items'][0][_object_type]['count'])", "title": "" }, { "docid": "9c4beb251a12d3a8be09cd58c581e754", "score": "0.50222594", "text": "def GetFaceCount(self):\n\t\treturn self._oleobj_.InvokeTypes(11, LCID, 1, (3, 0), (),)", "title": "" }, { "docid": "9c4beb251a12d3a8be09cd58c581e754", "score": "0.50222594", "text": "def GetFaceCount(self):\n\t\treturn self._oleobj_.InvokeTypes(11, LCID, 1, (3, 0), (),)", "title": "" }, { "docid": "9c4beb251a12d3a8be09cd58c581e754", "score": "0.5022002", "text": "def GetFaceCount(self):\n\t\treturn self._oleobj_.InvokeTypes(11, LCID, 1, (3, 0), (),)", "title": "" }, { "docid": "9c4beb251a12d3a8be09cd58c581e754", "score": "0.5022002", "text": "def GetFaceCount(self):\n\t\treturn self._oleobj_.InvokeTypes(11, LCID, 1, (3, 0), (),)", "title": "" }, { "docid": "0d2daa9d775ec6cb3ec1f900b258920c", "score": "0.5011822", "text": "def get_yammer_activity_counts(\n self,\n period, # type: str\n **kwargs # type: Any\n ):\n # type: (...) -> \"models.MicrosoftGraphReport\"\n cls = kwargs.pop('cls', None) # type: ClsType[\"models.MicrosoftGraphReport\"]\n error_map = {\n 401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError\n }\n error_map.update(kwargs.pop('error_map', {}))\n accept = \"application/json\"\n\n # Construct URL\n url = self.get_yammer_activity_counts.metadata['url'] # type: ignore\n path_format_arguments = {\n 'period': self._serialize.url(\"period\", period, 'str'),\n }\n url = self._client.format_url(url, **path_format_arguments)\n\n # Construct parameters\n query_parameters = {} # type: Dict[str, Any]\n\n # Construct headers\n header_parameters = {} # type: Dict[str, Any]\n header_parameters['Accept'] = self._serialize.header(\"accept\", accept, 'str')\n\n request = self._client.get(url, query_parameters, header_parameters)\n pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)\n response = pipeline_response.http_response\n\n if response.status_code not in [200]:\n map_error(status_code=response.status_code, response=response, error_map=error_map)\n error = self._deserialize(models.OdataError, response)\n raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat)\n\n deserialized = self._deserialize('MicrosoftGraphReport', pipeline_response)\n\n if cls:\n return cls(pipeline_response, deserialized, {})\n\n return deserialized", "title": "" }, { "docid": "cedfcc9e65a9e8f5fdc60f5dcfabbc64", "score": "0.50076604", "text": "def get_count(cfg, plannerId, pps_host=None, recurrence=False, bookings=False, failure=False, timeout=2):\n try:\n protocol = cfg['protocol']\n pps_port = cfg['pps']['port']\n\n if pps_host is None:\n pps_host = cfg['pps']['host']\n\n header = get_header(plannerId)\n if header is None:\n print \"Unable to get the headers\"\n return False, None\n\n if recurrence:\n url = protocol + \"://\" + pps_host + \":\" + str(pps_port) + \"/pps/v3/core/recurrences/countRecurrences\"\n\n print \"Get recurrence count via URL : \", url\n\n elif bookings:\n url = protocol + \"://\" + pps_host + \":\" + str(pps_port) + \"/pps/v3/core/bookings/countBookings\"\n print \"Get Booking count via URL : \", url\n\n elif failure:\n url = protocol + \"://\" + pps_host + \":\" + str(pps_port) + \"/pps/v3/core/failures/countFailures\"\n print \"Get Failures count of planner via URL:\", url\n\n else:\n url = protocol + \"://\" + pps_host + \":\" + str(pps_port) + \"/pps/v3/core/recordings/countRecordings\"\n print \"Get Recordings count of planner via URL:\", url\n\n r = sendURL(\"get\", url, server_timeout=timeout, header=header)\n\n if r is not None:\n if r.status_code != 200:\n print \"PPS V3 Unable to get the count via url %s\" % url\n print r.status_code\n print r.headers\n return False, r\n else:\n r = json.loads(r.content)\n r = r['count']\n print \"PPS V3. Got the count of the planner %s\" % plannerId\n return True, r\n else:\n return False, None\n\n except Exception as e:\n print \"Error in PPS V3 get count :\\n\", str(e)\n return False, None", "title": "" }, { "docid": "bcd0e481ca78fffac23c3aaf55790505", "score": "0.5007204", "text": "def get_office365_groups_activity_counts(\n self,\n period, # type: str\n **kwargs # type: Any\n ):\n # type: (...) -> \"models.MicrosoftGraphReport\"\n cls = kwargs.pop('cls', None) # type: ClsType[\"models.MicrosoftGraphReport\"]\n error_map = {\n 401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError\n }\n error_map.update(kwargs.pop('error_map', {}))\n accept = \"application/json\"\n\n # Construct URL\n url = self.get_office365_groups_activity_counts.metadata['url'] # type: ignore\n path_format_arguments = {\n 'period': self._serialize.url(\"period\", period, 'str'),\n }\n url = self._client.format_url(url, **path_format_arguments)\n\n # Construct parameters\n query_parameters = {} # type: Dict[str, Any]\n\n # Construct headers\n header_parameters = {} # type: Dict[str, Any]\n header_parameters['Accept'] = self._serialize.header(\"accept\", accept, 'str')\n\n request = self._client.get(url, query_parameters, header_parameters)\n pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)\n response = pipeline_response.http_response\n\n if response.status_code not in [200]:\n map_error(status_code=response.status_code, response=response, error_map=error_map)\n error = self._deserialize(models.OdataError, response)\n raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat)\n\n deserialized = self._deserialize('MicrosoftGraphReport', pipeline_response)\n\n if cls:\n return cls(pipeline_response, deserialized, {})\n\n return deserialized", "title": "" }, { "docid": "fca279236b2f4d4c5047ba76491bc679", "score": "0.49927172", "text": "def get_office365_activations_user_counts(self) -> GetOffice365ActivationsUserCountsRequestBuilder:\n from .get_office365_activations_user_counts.get_office365_activations_user_counts_request_builder import GetOffice365ActivationsUserCountsRequestBuilder\n\n return GetOffice365ActivationsUserCountsRequestBuilder(self.request_adapter, self.path_parameters)", "title": "" }, { "docid": "e99d05cf0cedc11335a6d9b87c51b897", "score": "0.4986895", "text": "def count(self, params: dict = None, proxy_search: str = None) -> int:\n # I can shorten the following code with this: return len(list(self.get_multiple(...)))\n # but in term of memory efficiency it is not good, because we can have at a moment a potentially very large\n # list in memory.\n total = 0\n for _ in self.get_multiple(params, proxy_search=proxy_search):\n total += 1\n return total", "title": "" }, { "docid": "da5fca0282bccd57d349acd927ad7db8", "score": "0.49825254", "text": "def get_skype_for_business_participant_activity_minute_counts_with_period(self,period: Optional[str] = None) -> GetSkypeForBusinessParticipantActivityMinuteCountsWithPeriodRequestBuilder:\n if not period:\n raise TypeError(\"period cannot be null.\")\n from .get_skype_for_business_participant_activity_minute_counts_with_period.get_skype_for_business_participant_activity_minute_counts_with_period_request_builder import GetSkypeForBusinessParticipantActivityMinuteCountsWithPeriodRequestBuilder\n\n return GetSkypeForBusinessParticipantActivityMinuteCountsWithPeriodRequestBuilder(self.request_adapter, self.path_parameters, period)", "title": "" }, { "docid": "a933c314b94bd5cdeff26661614730f1", "score": "0.4981104", "text": "def GetFacesToDraftCount(self):\n\t\treturn self._oleobj_.InvokeTypes(7, LCID, 1, (2, 0), (),)", "title": "" }, { "docid": "bde732879f1b40d64c6dbf8f83447438", "score": "0.49780124", "text": "def count(self, status='sent'):\n if status.lower() not in ['sent', 'draft', 'outbox']:\n raise ValueError('Incorrect status, check documentation')\n url = client.build_url('campaigns', status.lower(), 'count')\n _, res_json = client.get(url, headers=self.headers)\n return res_json['count']", "title": "" }, { "docid": "1ab1f441b98360ad506a27a51d69260b", "score": "0.49757162", "text": "def count(self, *args, **kwargs): # real signature unknown\n pass", "title": "" }, { "docid": "d4300ae39ea0e566be230c173b6f977f", "score": "0.4964569", "text": "def _get_calls_for_lead(lead):\n print(f\"Getting calls for {lead['display_name']}\")\n has_more = True\n offset = 0\n calls = []\n while has_more:\n try:\n resp = api.get('activity/call', params={ 'lead_id': lead['id'], '_skip': offset, '_fields': 'duration,user_id' })\n calls += [i for i in resp['data']]\n offset += len(resp['data'])\n has_more = resp['has_more']\n except Exception as e:\n has_more = False \n calls_per_lead.append({ 'lead': lead, 'calls': calls })", "title": "" }, { "docid": "1cacfebcf1ba85f11a095d3106784237", "score": "0.4952194", "text": "def count_activities(experiment: Experiment, activity_type: str) -> int:\n count = 0\n for activity in experiment[\"method\"]:\n if \"ref\" in activity:\n activity = lookup_activity(activity[\"ref\"])\n if activity[\"type\"] == activity_type:\n count += 1\n return count", "title": "" }, { "docid": "aa79aedbdd2e675f3f2ad274df774684", "score": "0.49477282", "text": "def get_one_drive_activity_user_counts(\n self,\n period, # type: str\n **kwargs # type: Any\n ):\n # type: (...) -> \"models.MicrosoftGraphReport\"\n cls = kwargs.pop('cls', None) # type: ClsType[\"models.MicrosoftGraphReport\"]\n error_map = {\n 401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError\n }\n error_map.update(kwargs.pop('error_map', {}))\n accept = \"application/json\"\n\n # Construct URL\n url = self.get_one_drive_activity_user_counts.metadata['url'] # type: ignore\n path_format_arguments = {\n 'period': self._serialize.url(\"period\", period, 'str'),\n }\n url = self._client.format_url(url, **path_format_arguments)\n\n # Construct parameters\n query_parameters = {} # type: Dict[str, Any]\n\n # Construct headers\n header_parameters = {} # type: Dict[str, Any]\n header_parameters['Accept'] = self._serialize.header(\"accept\", accept, 'str')\n\n request = self._client.get(url, query_parameters, header_parameters)\n pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)\n response = pipeline_response.http_response\n\n if response.status_code not in [200]:\n map_error(status_code=response.status_code, response=response, error_map=error_map)\n error = self._deserialize(models.OdataError, response)\n raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat)\n\n deserialized = self._deserialize('MicrosoftGraphReport', pipeline_response)\n\n if cls:\n return cls(pipeline_response, deserialized, {})\n\n return deserialized", "title": "" }, { "docid": "a2045c7b3f52c62d4d1f26d5caad8b9b", "score": "0.49356183", "text": "def count_calls(callers):\n nc = 0\n for calls in callers.values():\n nc += calls\n return nc", "title": "" }, { "docid": "f80e5badf1b328348f06ce1a21e140c5", "score": "0.49321246", "text": "def get_count(self):\n return self.ipcon.send_request(self, BrickletXMC1400Breakout.FUNCTION_GET_COUNT, (), '', 'I')", "title": "" }, { "docid": "8b8f4bf077d0094c688b8a135178811b", "score": "0.49120528", "text": "def get_office365_services_user_counts(\n self,\n period, # type: str\n **kwargs # type: Any\n ):\n # type: (...) -> \"models.MicrosoftGraphReport\"\n cls = kwargs.pop('cls', None) # type: ClsType[\"models.MicrosoftGraphReport\"]\n error_map = {\n 401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError\n }\n error_map.update(kwargs.pop('error_map', {}))\n accept = \"application/json\"\n\n # Construct URL\n url = self.get_office365_services_user_counts.metadata['url'] # type: ignore\n path_format_arguments = {\n 'period': self._serialize.url(\"period\", period, 'str'),\n }\n url = self._client.format_url(url, **path_format_arguments)\n\n # Construct parameters\n query_parameters = {} # type: Dict[str, Any]\n\n # Construct headers\n header_parameters = {} # type: Dict[str, Any]\n header_parameters['Accept'] = self._serialize.header(\"accept\", accept, 'str')\n\n request = self._client.get(url, query_parameters, header_parameters)\n pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)\n response = pipeline_response.http_response\n\n if response.status_code not in [200]:\n map_error(status_code=response.status_code, response=response, error_map=error_map)\n error = self._deserialize(models.OdataError, response)\n raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat)\n\n deserialized = self._deserialize('MicrosoftGraphReport', pipeline_response)\n\n if cls:\n return cls(pipeline_response, deserialized, {})\n\n return deserialized", "title": "" }, { "docid": "a04adb521658d399b4d7186cfdf0c34a", "score": "0.49002776", "text": "def page_count_get(self, page, **kwargs):\n\n endpoint_key_0 = self.endpoint_key[0][self.endpoint]\n endpoint_key_1 = self.endpoint_key[1][self.endpoint]\n\n params = {\"page\": page}\n\n if self.limit is not None:\n params[\"limit\"] = self.limit\n\n if (self.api_incremental_key is not None) & (self.last_timestamp is not None):\n params[self.api_incremental_key] = self.last_timestamp\n\n if self.group_id is not None:\n params[\"group_id\"] = self.group_id\n\n if self.campaign_id is not None:\n params[\"campaign_id\"] = self.campaign_id\n\n if self.url_id is not None:\n params[\"url_id\"] = self.url_id\n\n resp = self.session.get(\n self.base + self.endpoint, auth=(self.user, self.pw), params=params\n )\n\n print(f\"{resp.url}\")\n\n formatted_response = json.loads(json.dumps(xmltodict.parse(resp.text)))[\n \"response\"\n ][endpoint_key_1]\n\n ### Sina: 7/20/20 only broadcasts, messages, & sent_messages endpoints have page_count field this at this point in time\n if formatted_response.get(\"@page_count\") is not None:\n num_results = int(formatted_response.get(\"@page_count\"))\n\n elif formatted_response.get(\"page_count\") is not None:\n num_results = int(formatted_response.get(\"page_count\"))\n\n elif formatted_response.get(endpoint_key_0) is not None:\n num_results = 1\n\n else:\n num_results = 0\n\n return num_results", "title": "" }, { "docid": "7fb8bc4ba6488b72ba59a32e8e9bfe7d", "score": "0.48941764", "text": "def get_messages_count(khoros_object, user_settings=None, user_id=None, login=None, email=None):\n user_settings = _process_settings_and_user_id(khoros_object, user_settings, user_id, login, email)\n return _get_count(khoros_object, user_settings['id'], 'messages')", "title": "" }, { "docid": "d9c9a7351b5e6b87628e5df0f939b8bc", "score": "0.48916543", "text": "def week_count(self, action, user):\n # TODO: DRY this up with week_points and daily_points.\n today = date.today()\n days = [today - timedelta(days=d + 1) for d in range(7)]\n counts = self.redis.hmget(hash_key(user), ['{t}:{d}'.format(\n t=action.action_type, d=d) for d in days])\n fn = lambda x: int(x) if x else 0\n count = sum([fn(c) for c in counts])\n return count", "title": "" }, { "docid": "b5df5de5c41676218b705a728030ab04", "score": "0.48853853", "text": "def get_skype_for_business_device_usage_distribution_user_counts_with_period(self,period: Optional[str] = None) -> GetSkypeForBusinessDeviceUsageDistributionUserCountsWithPeriodRequestBuilder:\n if not period:\n raise TypeError(\"period cannot be null.\")\n from .get_skype_for_business_device_usage_distribution_user_counts_with_period.get_skype_for_business_device_usage_distribution_user_counts_with_period_request_builder import GetSkypeForBusinessDeviceUsageDistributionUserCountsWithPeriodRequestBuilder\n\n return GetSkypeForBusinessDeviceUsageDistributionUserCountsWithPeriodRequestBuilder(self.request_adapter, self.path_parameters, period)", "title": "" }, { "docid": "3da02365fbbf0bf8be8baaf69bae4b35", "score": "0.48802304", "text": "def get_yammer_groups_activity_counts(\n self,\n period, # type: str\n **kwargs # type: Any\n ):\n # type: (...) -> \"models.MicrosoftGraphReport\"\n cls = kwargs.pop('cls', None) # type: ClsType[\"models.MicrosoftGraphReport\"]\n error_map = {\n 401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError\n }\n error_map.update(kwargs.pop('error_map', {}))\n accept = \"application/json\"\n\n # Construct URL\n url = self.get_yammer_groups_activity_counts.metadata['url'] # type: ignore\n path_format_arguments = {\n 'period': self._serialize.url(\"period\", period, 'str'),\n }\n url = self._client.format_url(url, **path_format_arguments)\n\n # Construct parameters\n query_parameters = {} # type: Dict[str, Any]\n\n # Construct headers\n header_parameters = {} # type: Dict[str, Any]\n header_parameters['Accept'] = self._serialize.header(\"accept\", accept, 'str')\n\n request = self._client.get(url, query_parameters, header_parameters)\n pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)\n response = pipeline_response.http_response\n\n if response.status_code not in [200]:\n map_error(status_code=response.status_code, response=response, error_map=error_map)\n error = self._deserialize(models.OdataError, response)\n raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat)\n\n deserialized = self._deserialize('MicrosoftGraphReport', pipeline_response)\n\n if cls:\n return cls(pipeline_response, deserialized, {})\n\n return deserialized", "title": "" }, { "docid": "13bc5d080405b45b397dba25c9c5f7c2", "score": "0.48668444", "text": "def get_office365_groups_activity_file_counts(\n self,\n period, # type: str\n **kwargs # type: Any\n ):\n # type: (...) -> \"models.MicrosoftGraphReport\"\n cls = kwargs.pop('cls', None) # type: ClsType[\"models.MicrosoftGraphReport\"]\n error_map = {\n 401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError\n }\n error_map.update(kwargs.pop('error_map', {}))\n accept = \"application/json\"\n\n # Construct URL\n url = self.get_office365_groups_activity_file_counts.metadata['url'] # type: ignore\n path_format_arguments = {\n 'period': self._serialize.url(\"period\", period, 'str'),\n }\n url = self._client.format_url(url, **path_format_arguments)\n\n # Construct parameters\n query_parameters = {} # type: Dict[str, Any]\n\n # Construct headers\n header_parameters = {} # type: Dict[str, Any]\n header_parameters['Accept'] = self._serialize.header(\"accept\", accept, 'str')\n\n request = self._client.get(url, query_parameters, header_parameters)\n pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)\n response = pipeline_response.http_response\n\n if response.status_code not in [200]:\n map_error(status_code=response.status_code, response=response, error_map=error_map)\n error = self._deserialize(models.OdataError, response)\n raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat)\n\n deserialized = self._deserialize('MicrosoftGraphReport', pipeline_response)\n\n if cls:\n return cls(pipeline_response, deserialized, {})\n\n return deserialized", "title": "" }, { "docid": "cf5b2aa85f9eb0daf569be3d62b54b0d", "score": "0.4866709", "text": "def count_filtered(cls, client, filter_) :\n\t\ttry :\n\t\t\tobj = nsicapprofile()\n\t\t\toption_ = options()\n\t\t\toption_.count = True\n\t\t\toption_.filter = filter_\n\t\t\tresponse = obj.getfiltered(client, option_)\n\t\t\tif response :\n\t\t\t\treturn response[0].__dict__['___count']\n\t\t\treturn 0\n\t\texcept Exception as e :\n\t\t\traise e", "title": "" }, { "docid": "034db936b87bb651a88cc1b3bb5019d5", "score": "0.48650172", "text": "def positions_count_for_all_ballot_items_view(request): # positionsCountForAllBallotItems\n voter_device_id = get_voter_device_id(request) # We standardize how we take in the voter_device_id\n google_civic_election_id = request.GET.get('google_civic_election_id', 0)\n\n results = positions_count_for_all_ballot_items_for_api(\n voter_device_id=voter_device_id,\n google_civic_election_id=google_civic_election_id)\n json_data = {\n 'status': results['status'],\n 'success': results['success'],\n 'position_counts_list': results['position_counts_list'],\n }\n return HttpResponse(json.dumps(json_data), content_type='application/json')", "title": "" }, { "docid": "d4b0a65cedeb0c60aad3057d4d95a8c0", "score": "0.4853327", "text": "def get_teams_device_usage_user_counts(\n self,\n period, # type: str\n **kwargs # type: Any\n ):\n # type: (...) -> \"models.MicrosoftGraphReport\"\n cls = kwargs.pop('cls', None) # type: ClsType[\"models.MicrosoftGraphReport\"]\n error_map = {\n 401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError\n }\n error_map.update(kwargs.pop('error_map', {}))\n accept = \"application/json\"\n\n # Construct URL\n url = self.get_teams_device_usage_user_counts.metadata['url'] # type: ignore\n path_format_arguments = {\n 'period': self._serialize.url(\"period\", period, 'str'),\n }\n url = self._client.format_url(url, **path_format_arguments)\n\n # Construct parameters\n query_parameters = {} # type: Dict[str, Any]\n\n # Construct headers\n header_parameters = {} # type: Dict[str, Any]\n header_parameters['Accept'] = self._serialize.header(\"accept\", accept, 'str')\n\n request = self._client.get(url, query_parameters, header_parameters)\n pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)\n response = pipeline_response.http_response\n\n if response.status_code not in [200]:\n map_error(status_code=response.status_code, response=response, error_map=error_map)\n error = self._deserialize(models.OdataError, response)\n raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat)\n\n deserialized = self._deserialize('MicrosoftGraphReport', pipeline_response)\n\n if cls:\n return cls(pipeline_response, deserialized, {})\n\n return deserialized", "title": "" }, { "docid": "d66dfc3f5c80872f1494cc24593466f0", "score": "0.48492643", "text": "def get_skype_for_business_peer_to_peer_activity_minute_counts_with_period(self,period: Optional[str] = None) -> GetSkypeForBusinessPeerToPeerActivityMinuteCountsWithPeriodRequestBuilder:\n if not period:\n raise TypeError(\"period cannot be null.\")\n from .get_skype_for_business_peer_to_peer_activity_minute_counts_with_period.get_skype_for_business_peer_to_peer_activity_minute_counts_with_period_request_builder import GetSkypeForBusinessPeerToPeerActivityMinuteCountsWithPeriodRequestBuilder\n\n return GetSkypeForBusinessPeerToPeerActivityMinuteCountsWithPeriodRequestBuilder(self.request_adapter, self.path_parameters, period)", "title": "" }, { "docid": "8b56810ce8095bafcc5bfd73cd5859d9", "score": "0.48387197", "text": "def count(self, params):\n key = MLCache.as_key(params)\n return self.count_.get(key, 0)", "title": "" }, { "docid": "db49acac9cea564a91bd80dc0c42a0d7", "score": "0.48332763", "text": "def get():\n args = request.args\n try:\n count = len(\n Counter.query.filter_by(agency_id=args[\"agency_id\"]).all()\n )\n except KeyError:\n count = len(\n Counter.query.all()\n )\n return {\"count\": count}, 200", "title": "" }, { "docid": "240c02e4d67ea091926cd742b925a0fd", "score": "0.4827131", "text": "def test_get_counts(self):\n self.returns = [\n [{'count': 101}],\n [{'count': 102}],\n [{'count': 100}],\n ]\n\n expectedData = {'tenantId': '123'}\n expectedResults = {\n \"groups\": 100,\n \"policies\": 101,\n \"webhooks\": 102\n }\n config_query = ('SELECT COUNT(*) FROM scaling_group '\n 'WHERE \"tenantId\"=:tenantId AND deleting=false;')\n policy_query = ('SELECT COUNT(*) FROM scaling_policies '\n 'WHERE \"tenantId\"=:tenantId ;')\n webhook_query = ('SELECT COUNT(*) FROM policy_webhooks '\n 'WHERE \"tenantId\"=:tenantId ;')\n\n calls = [\n mock.call(policy_query, expectedData, ConsistencyLevel.ONE),\n mock.call(webhook_query, expectedData, ConsistencyLevel.ONE),\n mock.call(config_query, expectedData, ConsistencyLevel.ONE)]\n\n d = self.collection.get_counts(self.mock_log, '123')\n result = self.successResultOf(d)\n self.assertEquals(result, expectedResults)\n self.connection.execute.assert_has_calls(calls)", "title": "" }, { "docid": "d27fc4f1bee3de944f24b8ef2843e5ee", "score": "0.48224473", "text": "def getCount(self):\n return self.base.get(\"count\", [])", "title": "" }, { "docid": "714dc0a6b62757a436e91a7b98197c62", "score": "0.48188707", "text": "def test_get_inbox_count(self):\n pass", "title": "" }, { "docid": "285a2c7f1f1facb7b5ffd2398db46dc4", "score": "0.48164237", "text": "def collect_person_statistics(ps):\n activity_fp = ps.get_activity_fp()\n num_activities = get_line_numbers_file(\n ps.dataset.path_to_folder + settings.ACTIVITY_MAPPING_FILE_NAME) -1\n num_recorded_activities = get_line_numbers_file(activity_fp) -1\n data_size = get_folder_size(activity_fp)\n return num_activities, num_recorded_activities, data_size", "title": "" }, { "docid": "cd75f432c423da5bbc79cf53bfc707dd", "score": "0.48089218", "text": "def count(*args, **kwargs) -> Any:\n pass", "title": "" }, { "docid": "344d3a9068ec8bda2797910c3ec175f0", "score": "0.48069483", "text": "def num_conversations(self):\n return self.session.query(func.count(Conversation.id)).scalar()", "title": "" }, { "docid": "5d1f38108c4d96bb30b5f37275146b5e", "score": "0.48013413", "text": "def get_num_contacts(case_id, outcome, reindexed_activity):\n results = reindexed_activity.ix[[case_id]] #passing a list ensures a dataframe is returned, even if there is only one row\n filtered_results = results[results['Type'] == 'Progress Note']\n value_counts = filtered_results['Sub Type'].value_counts()\n successful_contacts = value_counts.get('Contacted Client', 0)\n unsuccessful_contacts = value_counts.get('Attempted Contact', 0)\n if outcome == 0: # only adjusting if disconnected\n unsuccessful_contacts = unsuccessful_contacts - 3 if unsuccessful_contacts >= 3 else 0\n return successful_contacts, unsuccessful_contacts", "title": "" }, { "docid": "25e5416329b73597eaa055bcac93b540", "score": "0.4795889", "text": "def attending_count(self):\n base_query = ActivityRSVP.query.filter_by(activity_id=self.id)\n return dict((k, base_query.filter_by(rsvp_status=k).count())\n for k in ActivityRSVP.RSVP_STATUSES)", "title": "" }, { "docid": "7b5f033beb83f2de439c236929bb0df0", "score": "0.47947863", "text": "def getSubscribers(channelId, apiKey):\n\n\ttry:\n\t\tr = requests.get('https://www.googleapis.com/youtube/v3/channels?part=statistics&id=' + channelId + '&key=' + apiKey)\n\t\tsubscriberCount = json.loads(r.text)[\"items\"][0][\"statistics\"][\"subscriberCount\"]\n\t\treturn subscriberCount\n\texcept Exception as e:\n\t\tprint str(e)\n\t\treturn None", "title": "" } ]
02e9133c95aad92f77f0ce8497ed635f
fetches information on an order made by the user
[ { "docid": "141cf78b75f4717d3bc7b295de24de6c", "score": "0.558745", "text": "async def fetch_order(self, id: str, symbol: Optional[str] = None, params={}):\n await self.load_markets()\n request = {\n 'id': id,\n }\n response = await self.privateGetOrdersGet(self.extend(request, params))\n #\n # {\n # \"code\": \"A10000\",\n # \"data\": {\n # \"id\": \"608695623247466496\",\n # \"symbol\": \"BTC_BRL\",\n # \"type\": \"MARKET\",\n # \"side\": \"SELL\",\n # \"price\": null,\n # \"averagePrice\": \"0\",\n # \"amount\": \"0.123\",\n # \"filledAmount\": \"0\",\n # \"value\": null,\n # \"filledValue\": \"0\",\n # \"filledFee\": \"0\",\n # \"status\": \"REJECTED\",\n # \"timestamp\": 1565165945588\n # },\n # \"message\": \"Success\"\n # }\n #\n data = self.safe_value(response, 'data', {})\n return self.parse_order(data)", "title": "" } ]
[ { "docid": "18a3fdbfa52c87f6e8bea5267f659b91", "score": "0.7255066", "text": "def get_order(order_id): # pragma: no cover", "title": "" }, { "docid": "86c184f294a199dc763577326206317c", "score": "0.69069827", "text": "def on_retrieve_order(self, order_id, context=None):\n pass", "title": "" }, { "docid": "a037de515ae50da0ec67db3a38258ad2", "score": "0.68423396", "text": "def order_details(order_id):\n\n # Get every product that is included in any order placed\n products = db.execute(\"SELECT * FROM products JOIN order_products ON products.sku=order_products.sku WHERE products.sku IN (SELECT sku FROM order_products WHERE order_id=:order_id)\", order_id=order_id)\n\n for product in products:\n product[\"price_usd\"] = usd(product[\"price\"])\n\n # Get every print job, if any\n prints = db.execute (\"SELECT * FROM print JOIN order_products ON print.sku=order_products.sku WHERE print.sku IN (SELECT sku FROM order_products WHERE order_id=:order_id)\", order_id=order_id)\n\n for print_ in prints:\n print_[\"total_usd\"] = usd(print_[\"total\"])\n\n # Get cart status for the active user\n cart_count=db.execute(\"SELECT SUM (quantity) FROM cart WHERE user_id=:session_id\", session_id=session[\"user_id\"])\n\n if cart_count[0][\"SUM (quantity)\"] == None:\n return render_template(\"order_details.html\", products=products, name=session[\"name\"], prints=prints, cart_count=0)\n\n else:\n return render_template(\"order_details.html\", products=products, name=session[\"name\"], prints=prints, cart_count=cart_count[0][\"SUM (quantity)\"])", "title": "" }, { "docid": "f0fcb01e1c97521ea77698cc1317b209", "score": "0.676911", "text": "def fetch_orders(self,order):\n query = \"SELECT * FROM orders WHERE title=%s\"\n self.cursor.execute(query, (order.title,))\n orders = self.cursor.fetchall()\n return orders", "title": "" }, { "docid": "2b8f07857c0f6e428cc18f848c259867", "score": "0.67287016", "text": "def get_order(order_id):\n # TODO: Define and document order not exists behavior\n data = json.loads(request.data)\n\n user_id = data['user_id']\n user_role = data['type']\n if UserController.check_user_id:\n order = OrderController.get_order(user_id, user_role, order_id)\n return jsonify(order)\n else:\n return jsonify({'status': 'Failed', 'message': 'Not a valid Username'}), 401", "title": "" }, { "docid": "aac70b9136d7ef8730e66bce512f412b", "score": "0.66477305", "text": "def view_order(request, order_id):\n template = loader.get_template('ad_sales/view_order.html')\n order = Order.objects.get(id=order_id)\n username = User.objects.get(id=order.user_id).username\n context = RequestContext(request, {\n 'order_id':order_id,\n 'username':username,\n 'date_recieved':order.date_recieved,\n 'date_completed':order.date_completed,\n 'status':order.status\n })\n return HttpResponse(template.render(context))", "title": "" }, { "docid": "58e7a0f02253573f166a805d9cb6a1e9", "score": "0.66269696", "text": "def get_order(order_id):\n\n return orders.get(Query().order_id == order_id)", "title": "" }, { "docid": "df0ca682318f347412ad04459c56bead", "score": "0.6550691", "text": "def get_order(self, order_id: str) -> outputs.OrderDetailOutput:\n url = self.__create_api_url(\"/orders/{}\", order_id)\n\n request = HTTPRequest(\n method=HTTPMethod.GET,\n url=url,\n )\n response = self.__send_signed_request(request=request)\n\n order_detail = outputs.OrderDetailOutput(**response.data)\n return order_detail", "title": "" }, { "docid": "76f22cdff1a4ce2234195267d770b5c7", "score": "0.6546161", "text": "async def get_order(self, account_id: str, order_id: str) -> 'asyncio.Future[MetatraderOrder]':\n response = await self._rpc_request(account_id, {'application': 'RPC', 'type': 'getOrder', 'orderId': order_id})\n return response['order']", "title": "" }, { "docid": "47958c47e50bc88ad551b07200d65ef3", "score": "0.6489197", "text": "def view_shopify_order():\n shop = flask.request.args.get('shop')\n order_id = flask.request.args.get('order_id')\n res = get_shopify_access_token(shop)\n if not res:\n res = {'status': 'access token not found'}\n response = flask.jsonify(res)\n response.status_code = 404\n return response\n shop_access_token = res\n\n url = f'https://{shop}/admin/api/{API_VERSION}/orders/{order_id}.json'\n logging.info(f'Receiving request for url {url}')\n headers = {\"X-Shopify-Access-Token\": shop_access_token}\n res = requests.get(url, headers=headers)\n logging.info(f'get order for {shop}: {res.json()}')\n # Status could be pending, accepted, declined\n response = flask.jsonify(res.json())\n response.status_code = 200\n return response", "title": "" }, { "docid": "69c903e7f66ba8f5a42d05830e5bb1ba", "score": "0.64805806", "text": "def view_orders(request):\n template = loader.get_template('ad_sales/view_orders.html')\n cursor = connection.cursor()\n if request.user.groups.filter(name='User').exists():\n cursor.execute(\n \"SELECT auth_user.username, order.id, order.date_recieved, order.status \"+ \n \"FROM `order` \"+\n \"INNER JOIN auth_user ON order.user_id=auth_user.id AND auth_user.id=%s \"+\n \"GROUP BY order.id \" +\n \"ORDER BY order.date_recieved, order.id\",\n [request.user.id])\n else:\n cursor.execute(\"SELECT auth_user.username, order.id, order.date_recieved, order.status \"+\n \"FROM `order` INNER JOIN auth_user ON order.user_id=auth_user.id GROUP BY order.id \" + \n \"ORDER BY auth_user.username, order.date_recieved, order.id\")\n rows = cursor.fetchall()\n orders = []\n for row in rows:\n orders.append({'id':row[1], 'user':row[0], 'date':row[2].strftime('%Y-%m-%d'), 'status':row[3]})\n context = RequestContext(request, {\n 'orders':orders\n })\n return HttpResponse(template.render(context))", "title": "" }, { "docid": "1a237a180045bcbaa8c62071d48c7d05", "score": "0.64720446", "text": "def get_order(self, order_id: str) -> dict:\n return self.auth_client.get_order(order_id)", "title": "" }, { "docid": "fd550de03c027a711b4710a8d5680704", "score": "0.63867235", "text": "async def get_specific_order(id:int,Authorize:AuthJWT=Depends()):\n\n\n try:\n Authorize.jwt_required()\n except Exception as e:\n raise HTTPException(status_code=status.HTTP_401_UNAUTHORIZED,\n detail=\"Invalid Token\"\n )\n\n subject=Authorize.get_jwt_subject()\n\n current_user=session.query(User).filter(User.username==subject).first()\n\n orders=current_user.orders\n\n for o in orders:\n if o.id == id:\n return jsonable_encoder(o)\n \n raise HTTPException(status_code=status.HTTP_400_BAD_REQUEST,\n detail=\"No order with such id\"\n )", "title": "" }, { "docid": "b5727b8557d0485e3223c752dd5011b4", "score": "0.63404185", "text": "def find_user_orders(self, uname):\n connection = connectdb()\n curs = connection.cursor()\n curs.execute(\n \"SELECT * FROM orders WHERE creator = %(creator)s\",\n {'creator': uname})\n\n your_orders = curs.fetchall()\n curs.close()\n connection.close()\n u_orders = []\n for order in your_orders:\n order = {\n 'order_id': order[0],\n 'food_id': order[1],\n 'title': order[2],\n 'price': order[3],\n 'quantity': order[4],\n 'total': order[5],\n 'status': order[6],\n 'creator': order[7]\n }\n u_orders.append(order)\n return {success_messages[6]['user_order']: u_orders}", "title": "" }, { "docid": "d70f925d70ea95367805f87085847cf5", "score": "0.6329701", "text": "def get_specific_order(self, order_id):\n for order in self.db:\n if order['order_id'] == int(order_id):\n return {\n \"message\": \"Order retrieved\", \"Order\" : order\n }, 200 \n return {\n \"Error\": \"Order not found!\"\n }, 200", "title": "" }, { "docid": "4d04b13263e1b1d8efa8233b456c35e1", "score": "0.6325912", "text": "def retrieve_order_info(self):\n order = {\n \"customer_id\": self.customer.pk,\n \"delivery_address\": self.delivery_address,\n \"total\": self.total\n }\n return order", "title": "" }, { "docid": "f3d380ceb9d3287d8915b9494afe1175", "score": "0.63165855", "text": "def fetch_order(self,column,did):\n query = \"\"\"SELECT * FROM orders WHERE {0}={1}\"\"\".format(column,did,)\n self.cursor.execute(query,)\n order = self.cursor.fetchall()\n return order", "title": "" }, { "docid": "969eb61b0b10bdb651d3b8f77964246f", "score": "0.63042915", "text": "def show_order(self, order_id, params=None):\n request = self._get('transactions/orders/' + str(order_id), params)\n return self.responder(request)", "title": "" }, { "docid": "a5045897714455b6bc9a3e48babab227", "score": "0.6255103", "text": "async def fetch_order(self, id: str, symbol: Optional[str] = None, params={}):\n await self.load_markets()\n market = None\n if symbol is not None:\n market = self.market(symbol)\n request = {\n 'order_id': id,\n }\n response = await self.v1PrivatePostPrivateGetOrderDetail(self.extend(request, params))\n #\n # {\n # \"id\": 1686872583882,\n # \"method\": \"private/get-order-detail\",\n # \"code\": 0,\n # \"result\": {\n # \"account_id\": \"ae075bef-1234-4321-bd6g-bb9007252a63\",\n # \"order_id\": \"6142909895025252686\",\n # \"client_oid\": \"CCXT_c2d2152cc32d40a3ae7fbf\",\n # \"order_type\": \"LIMIT\",\n # \"time_in_force\": \"GOOD_TILL_CANCEL\",\n # \"side\": \"BUY\",\n # \"exec_inst\": [],\n # \"quantity\": \"0.00020\",\n # \"limit_price\": \"20000.00\",\n # \"order_value\": \"4\",\n # \"avg_price\": \"0\",\n # \"trigger_price\": \"0\",\n # \"ref_price\": \"0\",\n # \"cumulative_quantity\": \"0\",\n # \"cumulative_value\": \"0\",\n # \"cumulative_fee\": \"0\",\n # \"status\": \"ACTIVE\",\n # \"update_user_id\": \"ae075bef-1234-4321-bd6g-bb9007252a63\",\n # \"order_date\": \"2023-06-15\",\n # \"instrument_name\": \"BTC_USD\",\n # \"fee_instrument_name\": \"BTC\",\n # \"create_time\": 1686870220684,\n # \"create_time_ns\": \"1686870220684239675\",\n # \"update_time\": 1686870220684\n # }\n # }\n #\n order = self.safe_value(response, 'result', {})\n return self.parse_order(order, market)", "title": "" }, { "docid": "bed623b877b27fe5388cbd6e75b54987", "score": "0.6242367", "text": "def test_read_order(self):\n # Create an order\n order = Order(customer_name='Test Customer 123')\n order.save()\n\n # Read the order via API\n response = self.client.get('/api/order/%s/' % order.id, format='json')\n\n self.assertEqual(response.status_code, status.HTTP_200_OK)\n self.assertEqual(\n response.data,\n {'id': order.id, 'customer_name': order.customer_name})", "title": "" }, { "docid": "274e6c3d26492a577adaf2f6f42bd3a7", "score": "0.6238278", "text": "def test_get_order_using_get(self):\n pass", "title": "" }, { "docid": "c57c6f88bf4bdcb259d093fd10e3c2d0", "score": "0.62329054", "text": "def test_get_order(self):\n pass", "title": "" }, { "docid": "bd8038f79ed84e4f9cc8f2279db6c9e3", "score": "0.62262857", "text": "def get_order(self, uuid, *args, **kwargs):\n url = self._build_url('/account/getorder')\n\n payload = {'uuid': uuid}\n\n return self._call(url, params=payload)", "title": "" }, { "docid": "f5371c3a74cce3bfe8f9047e22e5f8be", "score": "0.6200291", "text": "def get(self, request):\n form = (\n OrderForm(\n initial={\n \"address_country\": request.user.order_set.latest(\n \"pk\"\n ).address_country,\n \"address_street\": request.user.order_set.latest(\n \"pk\"\n ).address_street,\n \"address_zipcode\": request.user.order_set.latest(\n \"pk\"\n ).address_zipcode,\n \"address_city\": request.user.order_set.latest(\"pk\").address_city,\n }\n )\n if request.user.order_set.all()\n else OrderForm()\n )\n buyer_form = BuyerForm(\n initial={\n \"first_name\": request.user.first_name,\n \"last_name\": request.user.last_name,\n \"email\": request.user.email,\n }\n )\n delivery_form = DeliveryForm()\n payment_form = PaymentForm()\n ctx = {\n \"form\": form,\n \"buyer_form\": buyer_form,\n \"delivery_form\": delivery_form,\n \"payment_form\": payment_form,\n }\n return render(request, \"niunius/order_form.html\", ctx)", "title": "" }, { "docid": "2cd1654bea8427dae168a8da9b5e9c65", "score": "0.6109596", "text": "def get_orders_by_specific_user(self, user_id):\n user_orders = []\n for order in self.db:\n if order['user_id'] == user_id:\n user_orders.append(order)\n if not user_orders:\n return{\n \"message\": \"User does not have orders\"\n }, 200\n return {\n \"message\": \"user orders\", \"User Orders\": user_orders\n }, 200", "title": "" }, { "docid": "e751d6d4512a6fde10e9fb5585c8707d", "score": "0.610943", "text": "async def get_user_orders(Authorize:AuthJWT=Depends()):\n\n\n try:\n Authorize.jwt_required()\n except Exception as e:\n raise HTTPException(\n status_code=status.HTTP_401_UNAUTHORIZED,\n detail=\"Invalid Token\"\n )\n\n user=Authorize.get_jwt_subject()\n\n\n current_user=session.query(User).filter(User.username==user).first()\n\n return jsonable_encoder(current_user.orders)", "title": "" }, { "docid": "7f5573a05f5251aece77c4eed5a00b74", "score": "0.6104635", "text": "def get(self,userId):\n #check if user exists\n check_user = userobj.check_user(userId)\n if not check_user:\n return {'message': \"That user does not exist\"}, 404\n orders = db.get_user_orders(userId)\n #check if user has orders\n if orders:\n return {'All your oders': orders},200\n return {'message': \"User has not placed any orders yet\"},404", "title": "" }, { "docid": "1b8382544bc6204fb2775210045973e1", "score": "0.6068319", "text": "def get(self, request, user_uuid, order_uuid, format=None):\n object = get_object_or_404(Order, user_uuid=user_uuid, uuid=order_uuid)\n serializer = UsersOrderSerializer(object)\n return Response(serializer.data, status=status.HTTP_200_OK)", "title": "" }, { "docid": "bba58789d184578c8d35c134b9aa7b54", "score": "0.6067546", "text": "def show_order(order_id):\n\n if any(order_id in i for i in current_user.get_order_ids()):\n\n order = Order.query.get(order_id)\n\n if not order.paid:\n order.check_paid()\n\n return render_template('order.html', order=order)\n\n else:\n return abort(404)", "title": "" }, { "docid": "0fdd088c0d04c28c6614fbfd413dcafe", "score": "0.6060348", "text": "def get_order(request, order_id):\n print(request, order_id)\n try:\n order = Order.objects.get(id=order_id)\n except Order.DoesNotExist:\n return JsonResponse({'error': 'Order not found.'}, status=404)\n \n # Return order contents (serialize order)\n if request.method == 'GET':\n return JsonResponse()\n\n elif request.method == 'PUT':\n data = json.loads(request.body)\n if data.get('order_finished') is not None:\n order.order_finished = data['order_finished']\n\n elif data.get('archived') is not None:\n order.archived = data['archived']\n order.save()\n return HttpResponse(status=204)\n \n # Must be GET or PUT\n else:\n return JsonResponse({\n 'error': \"GET or PUT request required.\"\n }, status=400)", "title": "" }, { "docid": "0493a772eca3d6cdb7cb6eada5edb460", "score": "0.60364985", "text": "def orders(self, args=None):\n if not args:\n args = {}\n args['user'] = self.id\n return self.get_list('order', args=args)", "title": "" }, { "docid": "62edb0a92f191da22894cd00c1a77f66", "score": "0.6021005", "text": "def get_context_data(self, **kwargs):\n\n # Get an order matching a given order_id sent by a user\n order = get_object_or_404(self.request.user.order_set, pk=self.kwargs[\"pk\"])\n return {\"order\": order}", "title": "" }, { "docid": "2fecec51560b21db8ab337af3e8faef4", "score": "0.6013095", "text": "def get_order(self, order_id=None):\n if (self.token_value())['usertype'] == 'admin':\n if order_id is None:\n data = self.order.get_order()\n if data:\n return jsonify({\"status\": \"success\", \"data\": data}), 200\n return jsonify({\"status\": \"failure\", \"error\": {\"message\": \"no order found\"}}), 404\n\n data = self.order.get_order(order_id)\n if data:\n return jsonify({\"status\": \"success\", \"data\": data}), 200\n return jsonify({\"status\": \"failure\", \"error\": {\"message\": \"no order found\"}}), 404\n\n if (self.token_value())['usertype'] == 'user':\n\n if order_id is None:\n data = self.order.get_order(None, (self.token_value())['user_id'])\n if data:\n return jsonify({\"status\": \"success\", \"data\": data}), 200\n return jsonify({\"status\": \"failure\", \"error\": {\"message\": \"no order found\"}}), 404\n\n data = self.order.get_order(order_id, (self.token_value())['user_id'])\n if data:\n return jsonify({\"status\": \"success\", \"data\": data}), 200\n return jsonify({\"status\": \"failure\", \"error\": {\"message\": \"no order found\"}}), 404", "title": "" }, { "docid": "b501b10d407a03d3b3ffda34bc8a5d11", "score": "0.6010721", "text": "def order_callback(self, order):\n rospy.loginfo('Order Received') \n self._order_received = self._order_received + 1\n order_message = order.message\n self._orders = json.loads(order_message)\n\n agr_order_id = self._orders[\"order_id\"]\n self._orders_list[agr_order_id] = self._orders\n\n if self._orders[\"item\"] == \"Medicine\":\n self._orders_dic['HP'].append(agr_order_id)\n\n elif self._orders[\"item\"] == \"Food\" :\n self._orders_dic['MP'].append(agr_order_id)\n\n elif self._orders[\"item\"] == \"Clothes\" :\n self._orders_dic['LP'].append(agr_order_id)", "title": "" }, { "docid": "852358b460003647cedf986580d1cd1a", "score": "0.59794116", "text": "def test_get_orders(self):\n pass", "title": "" }, { "docid": "70e9b1ca74bcf3a0e68797881abfcf93", "score": "0.5956624", "text": "def find_all_orders(self):\n connection = connectdb()\n curs = connection.cursor()\n curs.execute(\"SELECT * FROM orders\")\n all_orders = curs.fetchall()\n curs.close()\n connection.close()\n user_orders = []\n for order in all_orders:\n order = {\n 'order_id': order[0],'food_id': order[1],'title': order[2],'price': order[3],\n 'quantity': order[4],'total': order[5],'status': order[6],'creator': order[7]\n }\n user_orders.append(order)\n return {success_messages[5]['user_orders']: user_orders}", "title": "" }, { "docid": "87ff5aa0c4b12857695fab788532341e", "score": "0.5952068", "text": "def cheforders():\n\n # define current user\n current_user = session[\"user_id\"]\n\n # select all chef orders\n orders = db.execute(\"SELECT * FROM chefsorders WHERE id=:user_id\",\n user_id=current_user)\n\n # render template of chef orders\n return render_template(\"cheforders.html\", data=orders)", "title": "" }, { "docid": "d503bceead296e5faa04d6adf407363a", "score": "0.5940372", "text": "def GenerateOrderInfo(self, orders):\n self.__GenerateOrderInfo(orders)", "title": "" }, { "docid": "f29452143cc54dd89de3c5e935c2a439", "score": "0.59198564", "text": "def solicit_order(cookies):\n # write your code for this function below this line", "title": "" }, { "docid": "b6f9acd69d66e3db3ee8300a82a47835", "score": "0.5905797", "text": "async def get_order_by_id(id:int,Authorize:AuthJWT=Depends()):\n\n\n try:\n Authorize.jwt_required()\n except Exception as e:\n raise HTTPException(\n status_code=status.HTTP_401_UNAUTHORIZED,\n detail=\"Invalid Token\"\n )\n\n user=Authorize.get_jwt_subject()\n\n current_user=session.query(User).filter(User.username==user).first()\n\n if current_user.is_staff:\n order=session.query(Order).filter(Order.id==id).first()\n\n return jsonable_encoder(order)\n\n raise HTTPException(\n status_code=status.HTTP_401_UNAUTHORIZED,\n detail=\"User not alowed to carry out request\"\n )", "title": "" }, { "docid": "1ebe3d3ba06427e21a0d334c6cb29ff0", "score": "0.5895449", "text": "def test_list_order(self):\n url = \"/api/order/details/\"\n user_token = User.objects.first().token\n headers = {\n 'HTTP_AUTHORIZATION': 'Bearer ' + user_token\n }\n response = self.client.get(url, format='json', **headers)\n self.assertEqual(response.status_code, status.HTTP_200_OK)\n self.assertEqual(response.data['result'][0]['id'], 1)\n self.assertEqual(response.data['result'][0]['order_total'], \"150.00 SAR\", )\n self.assertEqual(response.data['result'][0]['grand_total'], \"150.00 SAR\", )\n self.assertEqual(response.data['result'][0]['shipping_fee'], \"0.00 SAR\", )\n self.assertEqual(response.data['result'][0]['delivery_date'], \"2021-09-04\")\n self.assertEqual(response.data['result'][0]['user'], 1)\n self.assertEqual(response.data['result'][0]['address'][0]['title'], 'home')\n self.assertEqual(response.data['result'][0]['address'][0]['city'], 1)\n self.assertEqual(response.data['result'][0]['address'][0]['address_info'], 'talkha-mansora')", "title": "" }, { "docid": "cfc0cffe35e119587e014e8d2a1fb512", "score": "0.58904344", "text": "def on_order(self, order: OrderData):\n pass", "title": "" }, { "docid": "cfc0cffe35e119587e014e8d2a1fb512", "score": "0.58904344", "text": "def on_order(self, order: OrderData):\n pass", "title": "" }, { "docid": "cfc0cffe35e119587e014e8d2a1fb512", "score": "0.58904344", "text": "def on_order(self, order: OrderData):\n pass", "title": "" }, { "docid": "cfc0cffe35e119587e014e8d2a1fb512", "score": "0.58904344", "text": "def on_order(self, order: OrderData):\n pass", "title": "" }, { "docid": "cfc0cffe35e119587e014e8d2a1fb512", "score": "0.58904344", "text": "def on_order(self, order: OrderData):\n pass", "title": "" }, { "docid": "cfc0cffe35e119587e014e8d2a1fb512", "score": "0.58904344", "text": "def on_order(self, order: OrderData):\n pass", "title": "" }, { "docid": "a748260859d3823576717415c7b9d216", "score": "0.58831674", "text": "def get_orders():\n data = json.loads(request.data)\n user_id = data['user_id']\n if UserController.check_user_id:\n orders = OrderController.get_orders(user_id)\n return jsonify(orders), 200\n else:\n return jsonify({'status': 'Failed', 'message': 'Not a valid Username'}), 401", "title": "" }, { "docid": "c3646c17aabffb00b7f1a18fa085cda0", "score": "0.5860806", "text": "def orders(request):\n if not request.user.is_authenticated:\n raise PermissionDenied(NOT_LOGGED_IN)\n\n q = 'SELECT id, made_on, total FROM purchase WHERE user_id = %s'\n pg = pagination(request)\n\n for row in sql(q + page(**pg), request.user.id):\n yield order.__wrapped__(request, details=row)", "title": "" }, { "docid": "f2f090e26a054a4deb8acaf65559df8b", "score": "0.5846827", "text": "def show_order(order_id: int) -> Any:\n try:\n new_order = Orders()\n resp = str(new_order.show_order(order_id))\n except Exception:\n return Response('Неправильный запрос', status=400)\n if request.method == 'GET':\n if resp == '[]':\n return Response('Объект не найден в базе', status=404)\n return str(resp)\n if request.method == 'PUT':\n json_from_request = json.loads(request.data.decode('utf-8'))\n if resp == '[]':\n return Response('Объект не найден в базе', status=404)\n print(striper(resp)['status'])\n print(json_from_request['status'])\n if striper(resp)['status'] == 'not_accepted' and json_from_request['status'] in ['in progress', 'cancelled']:\n new_order.update_orders_not_accepted(order_id,\n json_from_request['status'],\n json_from_request['date_created'],\n json_from_request['driver_id'],\n json_from_request['client_id'])\n return Response('Изменено', status=200)\n elif striper(resp)['status'] == 'in progress' and json_from_request['status'] in ['done', 'cancelled']:\n new_order.update_orders(order_id,\n json_from_request['status'])\n return Response('Изменено', status=200)\n return Response('Неверная последовательность статусов', status=400)", "title": "" }, { "docid": "210c1300c5dd94ce6497ed7eeab4ab28", "score": "0.5832482", "text": "def resolve_order(info, id):\n user = info.context.user\n order = graphene.Node.get_node_from_global_id(info, id, Order)\n if user.has_perm('order.manage_orders') or order.user == user:\n return order\n return None", "title": "" }, { "docid": "d56d232430efdd8f39b398d93b2f46ed", "score": "0.58283156", "text": "def go_to_cart():\n query = \"SELECT flower_name, price, quantity FROM orders WHERE username=%s\"\n values = (session['username'],)\n results_1 = query_handler_fetch(query, values)\n query = \"SELECT flower_name, price, quantity, order_date FROM your_order WHERE username=%s\"\n values = (session['username'],)\n results_2 = query_handler_fetch(query, values)\n return render_template(\"go_to_cart.html\", articles=results_1, order_history=results_2)", "title": "" }, { "docid": "a36cc482583a41a4110f1d9e3ff09889", "score": "0.58264756", "text": "def get_order(self, reference):\n content, status, message, reason = self._request('GET', 'order/%s' % reference)\n if content:\n return xmltodict.parse(content)\n else:\n raise FastSpringException('Could not get order information', status, message, reason)", "title": "" }, { "docid": "fe4c445568be8f141097a017b96f15d0", "score": "0.58205056", "text": "def notify_order(self, order):\n\n # Suppress notification if it is just a submitted order.\n if order.status == order.Submitted:\n return\n\n # Print out the date, security name, order number and status.\n dt, dn = self.datetime.date(), order.data._name\n if self.p.print_orders_trades:\n type = \"Buy\" if order.isbuy() else \"Sell\"\n self.log(\n f\"Order {order.ref:3d},\\tType {type},\\tStatus {order.getstatusname()} \\t\"\n f\"Size: {order.created.size:9.4f}, Price: {order.created.price:9.4f}, \"\n )\n if order.status == order.Margin:\n return\n\n # Check if an order has been completed\n if order.status in [order.Completed]:\n if self.p.print_orders_trades:\n self.log(\n f\"{'BUY' if order.isbuy() else 'SELL'} EXECUTED for {dn}, \"\n f\"Price: {order.executed.price:6.2f}, \"\n f\"Cost: {order.executed.value:6.2f}, \"\n f\"Comm: {order.executed.comm:4.2f}, \"\n f\"Size: {order.created.size:9.4f}, \"\n )\n\n if len([o for o in self.ord if o.status < 4]) == 0:\n self.ord = list()", "title": "" }, { "docid": "55137041aa64d983523c2b77cdaeaba2", "score": "0.5783528", "text": "def view_order_details(self, parcel_id):\n exists = self.check_if_parcel_id_exists(parcel_id)\n current_user = get_jwt_identity()\n query = \"\"\" SELECT \"\"\"\n if exists[0]:\n with db as conn:\n curr = conn.cursor()\n query = \"\"\" SELECT user_id, title, rec_name, rec_email, address, postal_code, pick_up, current_location, \n weight, bill, status, created_on FROM parcels WHERE parcel_id = %s \"\"\"\n columns = ('user_id', 'title', 'rec_name', 'rec_email', 'address', 'postal_code', 'pick_up', 'current_location',\n 'weight', 'bill', 'status', 'created_on')\n curr.execute(query, (parcel_id,),)\n record = curr.fetchone()\n values = []\n for value in record:\n values.append(str(value))\n parcel = dict(zip(columns, values))\n\n if current_user['admin']:\n return parcel\n elif current_user['user_id'] == record[0]:\n return parcel\n else:\n return 'Not authorized to view this page'\n else:\n return 'No parcel with id #{}'.format(parcel_id)", "title": "" }, { "docid": "28d7f8bb3cf0f01115b5fe01d47be5fe", "score": "0.5750072", "text": "def test_order(self):\n\n uid = str(self.__order['externalId'])\n\n (pook.get(os.getenv('RETAILCRM_URL') + '/api/v3/orders/' + uid)\n .headers({'X-API-KEY': os.getenv('RETAILCRM_KEY')})\n .reply(200)\n .headers(self.__header)\n .json({'success': 'true', 'orders': self.__order})\n )\n\n response = self.client.order(uid)\n pook.off()\n\n self.assertTrue(response.is_successful(), True)\n self.assertTrue(response.get_status_code() < 400, True)", "title": "" }, { "docid": "7315667c9e228302c768311cf0f95082", "score": "0.5740777", "text": "def order(request):\n if request.user.is_staff or request.user.is_superuser:\n raise PermissionDenied\n # fetch items from db\n cursor = connection.cursor()\n query = \"SELECT item_id, quantity FROM item\"\n cursor.execute(query + \";\")\n items = dictfetchall(cursor)\n\n if request.method == 'POST':\n form = forms.OrderForm(request.POST)\n\n if form.is_valid():\n # validate data:\n item_id = form.cleaned_data[\"item_id\"]\n quantity = form.cleaned_data[\"quantity\"]\n # make changes to database\n for item in items:\n if item[\"item_id\"] == item_id:\n if item[\"quantity\"] - quantity >= 0:\n # obtain user id\n cursor.execute(\"SELECT customer_id FROM customer WHERE email = '{}';\".format(\n request.user.email))\n customer_id = dictfetchall(cursor)[0][\"customer_id\"]\n\n # create new purchase row\n today = date.today().strftime(\"%Y-%m-%d\")\n cursor.execute(\"INSERT INTO purchase (purchase_date, item_id, quantity, customer_id) VALUES ('{}', {}, {}, {});\".format(\n today, item_id, quantity, customer_id))\n\n return HttpResponseRedirect(\"/customer/order\")\n else:\n # blank form if GET request\n form = forms.OrderForm()\n \"\"\"Table that shows all purchases made by customer\"\"\"\n cursor = connection.cursor()\n # obtain user id\n cursor.execute(\"SELECT customer_id FROM customer WHERE email = '{}';\".format(\n request.user.email))\n customer_id = dictfetchall(cursor)[0][\"customer_id\"]\n\n # get all purchases from customer\n query = \"SELECT P.purchase_date, P.quantity, P.item_id, I.price, I.name FROM purchase P, item I WHERE P.customer_id={} AND P.item_id=I.item_id ORDER BY P.purchase_date DESC\".format(\n customer_id)\n cursor.execute(query + \";\")\n\n context = {\"items\": dictfetchall(cursor), 'form': form, }\n return render(request, 'customerOrder.html', context)", "title": "" }, { "docid": "1e7738d1fb6aa695b6b34d9cbbc64798", "score": "0.572878", "text": "async def place_an_order(order:OrderModel,Authorize:AuthJWT=Depends()):\n\n\n try:\n Authorize.jwt_required()\n\n except Exception as e:\n raise HTTPException(\n status_code=status.HTTP_401_UNAUTHORIZED,\n detail=\"Invalid Token\"\n )\n\n current_user=Authorize.get_jwt_subject()\n\n user=session.query(User).filter(User.username==current_user).first()\n\n\n new_order=Order(\n pizza_size=order.pizza_size,\n quantity=order.quantity\n )\n\n new_order.user=user\n\n session.add(new_order)\n\n session.commit()\n\n\n response={\n \"pizza_size\":new_order.pizza_size,\n \"quantity\":new_order.quantity,\n \"id\":new_order.id,\n \"order_status\":new_order.order_status\n }\n\n return jsonable_encoder(response)", "title": "" }, { "docid": "74e81f2351d18c2e3a42173a84781d51", "score": "0.5722145", "text": "def fetch_order(self, id: str, symbol: Optional[str] = None, params={}):\n self.load_markets()\n # a special case for btcbox – default symbol is BTC/JPY\n if symbol is None:\n symbol = 'BTC/JPY'\n market = self.market(symbol)\n request = self.extend({\n 'id': id,\n 'coin': market['baseId'],\n }, params)\n response = self.privatePostTradeView(self.extend(request, params))\n #\n # {\n # \"id\":11,\n # \"datetime\":\"2014-10-21 10:47:20\",\n # \"type\":\"sell\",\n # \"price\":42000,\n # \"amount_original\":1.2,\n # \"amount_outstanding\":1.2,\n # \"status\":\"closed\",\n # \"trades\":[]\n # }\n #\n return self.parse_order(response, market)", "title": "" }, { "docid": "4a57eda57f639e359d1f8f419e6c2086", "score": "0.57148844", "text": "def customer_order(request):\n\n if request.method != \"POST\":\n return JsonResponse({\"error\": \"POST request required.\"}, status=400)\n print(\"PRINTING REQUEST\", request, request.body)\n order = json.loads(request.body)\n print(\"CUSTOMER NAME:\", order['name'])\n print(\"CUSTOMER PHONE: \", order['phone'])\n for item in order['order']['items']:\n print(item['name'], item['quantity'], item['price'])\n \n # Save customer information\n customer = Customer(\n name = order['name'],\n phone_num = order['phone']\n )\n customer.save()\n # Create the order\n customer_order = Order(\n customer = customer\n )\n customer_order.save()\n\n for item in order['order']['items']:\n order_item = OrderItem(\n order = customer_order,\n item = item['name'],\n quantity = item['quantity'],\n price = round((float(item['price']) * float(item['quantity'])), 2)\n )\n order_item.save()\n\n return JsonResponse({\"message\": \"Order placed successfully.\"}, status=201)", "title": "" }, { "docid": "ca926a5f015ee0454ea4ff7c7775cd89", "score": "0.570459", "text": "def get_user_order_object(params, user_id):\n\n if params:\n if 'all_orders' in params:\n menu_obj = Order.objects.filter(user_id=user_id)\n return menu_obj\n elif 'pending_orders' in params:\n menu_obj = Order.objects.filter(\n user_id=user_id).filter(status=False)\n return menu_obj\n else:\n raise ValidationError({\n \"message\": \"Kindly pass 'all_orders' or 'pending_orders' in params\"\n })", "title": "" }, { "docid": "29c6103a12e14326f76d7e4f3921b67e", "score": "0.5697854", "text": "def get_all_orders(self):\n pass", "title": "" }, { "docid": "4ca3c55e4373f83084ac025008a99b67", "score": "0.56855327", "text": "def order_id(self, request, order_id):\n queryset = Orders.objects.filter(order_id=order_id)\n serializer = Orders_serializer(queryset, many=True)\n\n return Response(serializer.data)", "title": "" }, { "docid": "9131dbb20a9522c8164d45ef837aa1e8", "score": "0.56838375", "text": "def allorders(request):\n\n # This data is driven by using cursors\n cursor = connection.cursor()\n sql = \"SELECT username, orderid, name, quantity, totalvalue, ordertime \\\n FROM orders od, products pr, customers cu WHERE cu.uid = od.uid \\\n AND pr.pid = od.pid\"\n\n result = cursor.execute(sql)\n\n data, orders = cursor.fetchall(), []\n\n for row in data:\n (username, orderid, name, quantity, totalvalue, ordertime,) = row\n orders.append({\"username\": username, \"orderid\": orderid, \"name\": name,\n \"quantity\": quantity, \"total\": totalvalue,\n \"ordertime\": ordertime})\n\n return render(request, 'allorders.html', {\"orders\": orders})", "title": "" }, { "docid": "451426a06ef3e0641dd78d787f8567a3", "score": "0.5678847", "text": "def get_order(pk):\n\n Order.objects.get(pk=pk)\n\n try:\n return Order.objects.get(pk=pk)\n except:\n raise Http404", "title": "" }, { "docid": "bf6d06841e936ac88b0f1922563b8ae6", "score": "0.5677795", "text": "def order_by_user(self, request, user_id):\n queryset = Orders.objects.filter(user=user_id)\n serializer = Orders_serializer(queryset, many=True)\n\n return Response(serializer.data)", "title": "" }, { "docid": "c249978519103efceb2786a6c712c14e", "score": "0.5677709", "text": "def add_order_details(self, order):\n for product in self.products_in_request:\n order_detail = OrderDetail(\n product_id=Product.objects.get(pk=product.get('product_id')),\n order_id=order,\n quantity=product.get('quantity')\n )\n order_detail.save()", "title": "" }, { "docid": "6d5cc1c7cce5bdd2c300868eba88ad5c", "score": "0.5651656", "text": "def orders_detail(request, pk):\n if pk == \"serializers\":\n return\n\n if pk[0] == \"[\" and pk[-1] == \"]\":\n pk = pk[1:-1].split(\",\")\n\n try:\n if type(pk) is str:\n order = Order.objects.get(pk=pk)\n serializer = OrderSerializer(order,context={'request': request})\n\n elif isinstance(pk, list):\n orders = []\n for i in pk:\n orders.append(Order.objects.get(pk=i))\n serializer = OrderSerializer(orders, context={'request': request}, many=True)\n\n except:\n return Response(status=status.HTTP_404_NOT_FOUND)\n\n\n\n if request.method == 'GET':\n return Response(serializer.data)\n\n elif request.method == 'PUT':\n serializer = OrderSerializer(order, data=request.data, context={'request': request})\n if serializer.is_valid():\n serializer.save()\n return Response(serializer.data)\n return Response(serializer.errors, status=status.HTTP_400_BAD_REQUEST)\n\n elif request.method == 'DELETE':\n order.delete()\n return Response(status=status.HTTP_204_NO_CONTENT)", "title": "" }, { "docid": "078ff29ac0ab41c7c6460f2d6b1f5611", "score": "0.5651538", "text": "def fetch_order(self, id: str, symbol: Optional[str] = None, params={}):\n self.load_markets()\n market = None\n if symbol is not None:\n market = self.market(symbol)\n request = {\n 'order_id': id,\n }\n response = self.v3PrivateGetBrokerageOrdersHistoricalOrderId(self.extend(request, params))\n #\n # {\n # \"order\": {\n # \"order_id\": \"9bc1eb3b-5b46-4b71-9628-ae2ed0cca75b\",\n # \"product_id\": \"LTC-BTC\",\n # \"user_id\": \"1111111-1111-1111-1111-111111111111\",\n # \"order_configuration\": {\n # \"limit_limit_gtc\": {\n # \"base_size\": \"0.2\",\n # \"limit_price\": \"0.006\",\n # \"post_only\": False\n # }\n # },\n # \"side\": \"SELL\",\n # \"client_order_id\": \"e5fe8482-05bb-428f-ad4d-dbc8ce39239c\",\n # \"status\": \"OPEN\",\n # \"time_in_force\": \"GOOD_UNTIL_CANCELLED\",\n # \"created_time\": \"2023-01-16T23:37:23.947030Z\",\n # \"completion_percentage\": \"0\",\n # \"filled_size\": \"0\",\n # \"average_filled_price\": \"0\",\n # \"fee\": \"\",\n # \"number_of_fills\": \"0\",\n # \"filled_value\": \"0\",\n # \"pending_cancel\": False,\n # \"size_in_quote\": False,\n # \"total_fees\": \"0\",\n # \"size_inclusive_of_fees\": False,\n # \"total_value_after_fees\": \"0\",\n # \"trigger_status\": \"INVALID_ORDER_TYPE\",\n # \"order_type\": \"LIMIT\",\n # \"reject_reason\": \"REJECT_REASON_UNSPECIFIED\",\n # \"settled\": False,\n # \"product_type\": \"SPOT\",\n # \"reject_message\": \"\",\n # \"cancel_message\": \"\"\n # }\n # }\n #\n order = self.safe_value(response, 'order', {})\n return self.parse_order(order, market)", "title": "" }, { "docid": "05756bc172d069c0dbc21760602a1ff1", "score": "0.5634026", "text": "def get_active_orders(request):\n\n # User role of current logged in user.\n user_role = request.user.role-1\n # List of order objects which are active.\n active_orders = Order.objects.filter(order_restaurant_id=user_role, order_active=True)\n food_items_in_order = []\n\n for order in active_orders:\n # List of orderfooditem objects which are part of the specific order.\n food_items = OrderFoodItem.objects.filter(orderfooditem_order_id=order)\n food_items_dict = {}\n for food_item in food_items:\n # Name of the fooditem in the fooditem list\n fooditem_name = food_item.orderfooditem_fooditem_id.fooditem_name\n # A dictionary of fooditems where the fooditem name is key & the fooditem quantitiy is value.\n food_items_dict[fooditem_name]=food_item.orderfooditem_quantity\n\n # Appending the dictionary to the list which contains such fooditem dictionaries for all orders.\n food_items_in_order.append(food_items_dict)\n # Zipping the order data & the fooditem data of the corresponding order together. \n order_data = zip(active_orders,food_items_in_order)\n data = {'order_data':order_data,'user_role':user_role}\n return render(request, 'order_management/active_orders.html',data)", "title": "" }, { "docid": "aa230816289df98b88980f44fafacc65", "score": "0.5626257", "text": "def orders_my(self) -> ApiResponse:\n if self._token is None:\n return ApiResponse(success = False, error = 'NOT_AUTHORIZED')\n\n response = self._send_request(self._server_url, 'GET', self._token, '/api/v1/orders/my')\n\n if response.get('success') is True:\n return ApiResponse(success = True, response = {\n 'orders': response.get('orders')\n })\n\n return ApiResponse(success = False, error = response.get('error'))", "title": "" }, { "docid": "725059c05faf3e79f140821f77a8fd1b", "score": "0.5618792", "text": "def get_order(self, order_id, order_type=None):\n if not order_id:\n return None\n\n try:\n return self._get_order(order_id, order_type)\n except Exception as e:\n logging.error('%s %s except: %s' % (self.name, get_current_function_name(), e))\n return None", "title": "" }, { "docid": "c66f589bdd2b1edbf442592bddf12c51", "score": "0.56033134", "text": "def order_report():", "title": "" }, { "docid": "4d91138d7b7212aec22f0785496d0648", "score": "0.5591921", "text": "def get_order_user(self, user_id):\n if (self.token_value())['usertype'] == 'admin' or ((self.token_value())['usertype'] == 'user' and (self.token_value())['user_id'] == user_id):\n data = self.order.get_order(None, user_id)\n if data:\n return jsonify({\"status\": \"success\", \"data\": data}), 200\n return jsonify({\"status\": \"failure\", \"error\": {\"message\": \"no order found\"}}), 404\n return jsonify({\"status\": \"failure\", \"error\": {\"message\": \"you can't access this resource\"}}), 401", "title": "" }, { "docid": "0c1822f8a719a96e2a80e22c2477d3c3", "score": "0.5581007", "text": "async def fetch_order(self, id: str, symbol: Optional[str] = None, params={}):\n await self.load_markets()\n request = {\n 'order_id': id,\n }\n order = await self.privateGetOrdersOrderId(self.extend(request, params))\n marketId = self.safe_string(order, 'symbol')\n market = self.safe_market(marketId)\n return self.parse_order(order, market)", "title": "" }, { "docid": "6daf506d03e4b9294dd7a08c66d65143", "score": "0.5571171", "text": "def __init__(self):\n self.id = \"\"\n self.name = \"\"\n self.orders = []", "title": "" }, { "docid": "3b188e76d29fe0f9e1782063aa3ef592", "score": "0.55650705", "text": "async def list_all_orders(Authorize:AuthJWT=Depends()):\n\n\n try:\n Authorize.jwt_required()\n except Exception as e:\n raise HTTPException(status_code=status.HTTP_401_UNAUTHORIZED,\n detail=\"Invalid Token\"\n )\n\n current_user=Authorize.get_jwt_subject()\n\n user=session.query(User).filter(User.username==current_user).first()\n\n if user.is_staff:\n orders=session.query(Order).all()\n\n return jsonable_encoder(orders)\n\n raise HTTPException(status_code=status.HTTP_401_UNAUTHORIZED,\n detail=\"You are not a superuser\"\n )", "title": "" }, { "docid": "db1b6690cd2fcd7d73dc5403217d9cdc", "score": "0.5562727", "text": "def order(order_id):\n\n omise.api_secret = current_app.config.get(\"OMISE_SECRET_KEY\")\n omise.api_version = current_app.config.get(\"OMISE_API_VERSION\")\n omise.api_main = current_app.config.get(\"OMISE_API_BASE\")\n\n try:\n search = omise.Search.execute(\"charge\", **{\"query\": order_id})\n chrg = search[0]\n return process(chrg, already_redirected=True)\n except IndexError as error:\n flash(f\"Order {order_id} not found.\")\n return redirect(url_for(\"checkout.check_out\"))\n except omise.errors.BaseError as error:\n flash(f\"An error occurred. Please contact support. Order ID: {order_id}\")\n current_app.logger.error(f\"OmiseError: {repr(error)}.\")\n return redirect(url_for(\"checkout.check_out\"))\n except Exception as e:\n flash(\"\"\"An error occurred. Please contact support.\"\"\")\n current_app.logger.error(repr(e))\n return redirect(url_for(\"checkout.check_out\"))", "title": "" }, { "docid": "e315b23080b0ed0b38dcb136cfc275a5", "score": "0.5549427", "text": "def userfetch():", "title": "" }, { "docid": "42ed35282c2df38e41bdd4abe6c501c9", "score": "0.5549384", "text": "def get_orders():\n uri = URI + ENDPOINT\n response = requests.get(uri, headers=HEADERS)\n return response.json().get('Data').get('Page')", "title": "" }, { "docid": "b5a1590c1c75e1ce2e10b83a1314656d", "score": "0.5548746", "text": "def microbial_submitted_order():\n order = {'customer': 'cust000',\n 'name': 'test order',\n 'internal_id': 'lims_reference',\n 'comment': 'test comment',\n 'ticket_number': '123456',\n 'items': [\n dict(name='Jag', internal_id='ms1', reads='1000', container='96 well plate',\n container_name='hej',\n rml_plate_name=None,\n well_position='D:5', well_position_rml=None, sex=None, panels=None,\n require_qcok=True,\n application='MWRNXTR003', source=None, status=None, customer='cust015',\n family=None,\n priority='standard', capture_kit=None, comment=None, index=None,\n reagent_label=None,\n tumour=False, custom_index=None, elution_buffer='Nuclease-free water',\n organism='C. Jejuni', reference_genome='NC_111',\n extraction_method='MagNaPure 96 (contact Clinical Genomics before '\n 'submission)',\n analysis='fastq', concentration_weight='1', mother=None, father=None),\n dict(name='testar', internal_id='ms2', reads='1000', container='96 well plate',\n container_name='hej',\n rml_plate_name=None,\n well_position='H:5', well_position_rml=None, sex=None, panels=None,\n require_qcok=True,\n application='MWRNXTR003', source=None, status=None, customer='cust015',\n family=None,\n priority='standard', capture_kit=None, comment=None, index=None,\n reagent_label=None,\n tumour=False, custom_index=None, elution_buffer='Nuclease-free water',\n organism='M.upium', reference_genome='NC_222',\n extraction_method='MagNaPure 96 (contact Clinical Genomics before '\n 'submission)',\n analysis='fastq', quantity='2', mother=None, father=None),\n dict(name='lite', internal_id='ms3', reads='1000', container='96 well plate',\n container_name='hej',\n rml_plate_name=None,\n well_position='A:6', well_position_rml=None, sex=None, panels=None,\n require_qcok=True,\n application='MWRNXTR003', source=None, status=None, customer='cust015',\n family=None,\n priority='standard', capture_kit=None, comment='3', index=None,\n reagent_label=None,\n tumour=False, custom_index=None, elution_buffer='Nuclease-free water',\n organism='C. difficile', reference_genome='NC_333',\n extraction_method='MagNaPure 96 (contact Clinical Genomics before '\n 'submission)',\n analysis='fastq', mother=None, father=None)], 'project_type': 'microbial'}\n return order", "title": "" }, { "docid": "4fb8073c8f96574104a96c3f44acdc63", "score": "0.5534544", "text": "def placeOrder(self):\n # Order Type ID: 1 = Buy ; 2 = Sell\n # Price Type ID: 3 = Limit; 6 = Stop-Limit; 8 = Limit(Margin); 9 = Stop-Limit(Margin)\n self.order_info = self.trader.add_order(self.auth_id, self.exch_id, self.mkt_id, 1, 3, self.ask, self.amount_to_buy)\n self.order_id = self.order_info[\"data\"][\"internal_order_id\"]\n print(self.order_info)\n time.sleep(120)", "title": "" }, { "docid": "5a4dead1146224e774679e77b00213e0", "score": "0.55295163", "text": "def get_order(self):\n if 'order' in self.__data:\n return self.__data['order']", "title": "" }, { "docid": "3897f2ae6ed63f12de774d573a3a0c94", "score": "0.551041", "text": "def getOrder(self):\n return self.order", "title": "" }, { "docid": "22afdf1d94be6020bce4e6b32ce3672a", "score": "0.5508652", "text": "def get_order_by_id(self, order_id):\n LOG.debug(_(\"Getting order - Order ID: {0}\").format(order_id))\n href = \"{0}/{1}/{2}\".format(self._tenant, self.ORDERS_PATH, order_id)\n return self.get_order(href)", "title": "" }, { "docid": "9a11c79b5dd511dbbc8e8b501192bc9c", "score": "0.5502555", "text": "def getOrder(self,storeOrderId):\n orderArr = AccessLayer.getOrder(storeOrderId)\n if len(orderArr) == 0:\n return None\n customer = Customer.Customers().getCustomerById(orderArr[3])\n return self.orderArrToStoreOrder(orderArr,customer)", "title": "" }, { "docid": "73b8adf137691581fbe1bdc77ee836ca", "score": "0.5483849", "text": "def test_get_order(self):\n\t\tOrder.objects.create(customer_name='danny', customer_address=\"5 Lynarstr.\", pizza_size='50')\n\t\tresponse = self.client.get(self.url)\n\t\tself.assertTrue(len(json.loads(response.content)) == Order.objects.count())", "title": "" }, { "docid": "81cd66cb80e72850fb7553fe068b32a4", "score": "0.5481721", "text": "def track_order(request, order_id, template=\"shop/track_order.html\", extra_context=None):\n try:\n order = Order.objects.get_for_user(order_id, request)\n except Order.DoesNotExist:\n raise Http404\n # if None...\n context = {\"order\": order}\n context.update(order.details_as_dict())\n context.update(extra_context or {})\n return render(request, template, context)", "title": "" }, { "docid": "1f3c3af1a9515e36ddef668f4014abe2", "score": "0.5479833", "text": "def test_detail_order_by_api(self):\n\n\t\torder = TestUtiles.create_order_test_with_detail(3)\n\t\ttoken = TestUtiles.create_token_test()\n\t\tclient = APIClient()\n\t\tclient.credentials(HTTP_AUTHORIZATION=token)\n\n\t\t\"\"\" test first page of data \"\"\"\n\t\tresponse = client.get('/orders/{order_id}/detail_order/'.format(order_id=order.id), {}, format='json')\n\t\tresponse_data = response.json()\n\t\tself.assertIs(response.status_code, status.HTTP_200_OK)\n\t\tself.assertIs(len(response_data.get('results', [])) > 0, True)\n\t\tself.assertContains(response, \"/orders/\")\n\t\tself.assertIsNone(response_data.get('previous', False))\n\n\t\t\"\"\" test next page of data \"\"\"\n\t\tresponse = client.get(response_data.get('next'), {}, format='json')\n\t\tresponse_data = response.json()\n\t\tself.assertIs(response.status_code, status.HTTP_200_OK)\n\t\tself.assertIs(len(response_data.get('results', [])) > 0, True)\n\t\tself.assertContains(response, \"/orders/\")\n\t\tself.assertIsNone(response_data.get('next', False))", "title": "" }, { "docid": "fc629b3fffa239012d8cb2f9c7296150", "score": "0.5477508", "text": "def manual_order( order ):\n return process_transfer( order )", "title": "" }, { "docid": "256ed4af84fe477c588e19d630c9a7b5", "score": "0.54760325", "text": "def b_orders(update, context):\n have_order = False\n # 检查用户ID\n user_id = update.message.from_user.id\n select_sql = \"select b_api_key, b_secret_key, api_lable from \" + t_table +\" where tg_id={}\".format(user_id)\n results = select_data(select_sql)\n if not results:\n update.message.reply_text(\"请先绑定API\")\n return\n # 友情提示\n update.message.reply_text(\"订单查询中,请耐心等待。\")\n for result in results:\n all_symbols = send_signed_request('GET', '/fapi/v2/account', result) # 查询账户交易对\n # all_symbols = send_signed_request('GET', '/fapi/v1/openOrders', results[0]) # 所有挂单\n if all_symbols:\n all_symbols = all_symbols[\"positions\"]\n for symbol in all_symbols:\n # =======================================当前持仓========================================================\n # 没有持仓的去掉\n # if float(symbol['entryPrice']) == 0.0:\n # continue\n # symbol_ = symbol['symbol'] # 交易对\n # positionAmt = symbol['positionAmt'] # 持仓数量\n # entryPrice = symbol['entryPrice'] # 持仓成本价\n # unrealizedProfit = symbol['unrealizedProfit'] # 持仓未实现盈亏\n # positionType = \"多单\"\n # if float(positionAmt) < 0:\n # positionType = \"空单\"\n # order_info_str = \"账户:{}\\n\" \\\n # \"交易对:{}\\n\" \\\n # \"持仓方式:{}\\n\" \\\n # \"持仓数量:{}\\n\" \\\n # \"持仓均价:{}\\n\" \\\n # \"持仓未实现盈亏:{}\" .format(result[2], symbol_.replace(\"USDT\", \"_USDT\"), positionType,\n # positionAmt, entryPrice, unrealizedProfit)\n # # 推送到指定用户\n # update.message.reply_text(order_info_str)\n # ========================================历史持仓=======================================================\n # 获取每个交易对的历史记录\n # 可以设置开始结束时间做筛选,13位时间戳\n history_orders = send_signed_request('GET', '/fapi/v1/userTrades', results[0],\n {'symbol': symbol['symbol']}) # 订单历史\n if not history_orders:\n continue\n # 排序\n # history_orders.sort(key=lambda k: (k.get('time', 0)))\n # 获取持有的币种的最后五笔订单\n # history_orders = history_orders[-10:]\n for info in history_orders:\n buyer = info['buyer'] # 是否是买方\n commission = info['commission'] # 手续费\n commissionAsset = info['commissionAsset'] # 手续费计价单位\n maker = info['maker'] # 是否是挂单方\n orderId = info['orderId'] # 订单编号\n price = info['price'] # 成交价\n qty = info['qty'] # 成交量\n quoteQty = info['quoteQty'] # 成交额\n realizedPnl = info['realizedPnl'] # 实现盈亏\n side = info['side'] # 买卖方向\n positionSide = info['positionSide'] # 持仓方向\n symbol = info['symbol'] # 交易对\n time_ = info['time'] # 时间\n # 从时间筛选订单,半个小时内订单\n if time() - float(time_)/1000 > 60*60:\n continue\n # 转换时区\n tz = pytz.timezone('Asia/ShangHai')\n dt = pytz.datetime.datetime.fromtimestamp(time_/1000, tz)\n time_ = str(dt.strftime('%Y-%m-%d %H:%M:%S'))\n if float(realizedPnl) != 0.0:\n if float(realizedPnl) > 0.0:\n order_info_str = \"账户:{}\\n\" \\\n \"交易对:{}\\n\" \\\n \"订单编号:{}\\n\" \\\n \"订单类型:{} {}\\n\" \\\n \"成交价:{} USDT\\n\" \\\n \"成交量:{} {}\\n\" \\\n \"成交额:{} USDT\\n\" \\\n \"手续费:{} {}\\n\" \\\n \"实现盈亏:{} USDT\\ud83d\\udcb0\\ud83d\\udcb0\\ud83d\\udcb0\\n\" \\\n \"成交时间:{}\".format(result[2], symbol.replace(\"USDT\", \"_USDT\"), orderId,\n zh_order_type(maker), zh_order_position(buyer),\n price, qty, symbol.replace(\"USDT\", \"\"), quoteQty,\n commission, commissionAsset, realizedPnl, time_)\n else:\n order_info_str = \"账户:{}\\n\" \\\n \"交易对:{}\\n\" \\\n \"订单编号:{}\\n\" \\\n \"订单类型:{} {}\\n\" \\\n \"成交价:{} USDT\\n\" \\\n \"成交量:{} {}\\n\" \\\n \"成交额:{} USDT\\n\" \\\n \"手续费:{} {}\\n\" \\\n \"实现盈亏:{} USDT\\ud83e\\udd7a\\ud83e\\udd7a\\ud83e\\udd7a\\n\" \\\n \"成交时间:{}\".format(result[2], symbol.replace(\"USDT\", \"_USDT\"), orderId,\n zh_order_type(maker), zh_order_position(buyer),\n price, qty, symbol.replace(\"USDT\", \"\"), quoteQty,\n commission, commissionAsset, realizedPnl, time_)\n else:\n # continue\n order_info_str = \"账户:{}\\n\" \\\n \"交易对:{}\\n\" \\\n \"订单编号:{}\\n\" \\\n \"订单类型:{} {}\\n\" \\\n \"成交价:{} USDT\\n\" \\\n \"成交量:{} {}\\n\" \\\n \"成交额:{} USDT\\n\" \\\n \"手续费:{} {}\\n\" \\\n \"成交时间:{}\".format(result[2], symbol.replace(\"USDT\", \"_USDT\"), orderId,\n zh_order_type(maker), zh_order_position(buyer),\n price, qty, symbol.replace(\"USDT\", \"\"), quoteQty,\n commission, commissionAsset, time_)\n # ==================================================================================================\n # 推送到指定用户\n update.message.reply_text(order_info_str)\n have_order = True\n # ======================================================================================================\n if not have_order:\n update.message.reply_text(\"最近暂无订单,请稍后重试。\")\n else:\n update.message.reply_text(\"订单查询完成。\")", "title": "" }, { "docid": "2a30f2e9a09bc6f4dd541c955bbc6e9b", "score": "0.547378", "text": "def trackOrder(self, id_num):\n for i in self.ord:\n if i.id_here == id_num:\n return f\"Your order #{id_num} is sent to {i.city}. Total price: {i.calculateAmount()} UAH.\"\n return 'No such order.'", "title": "" }, { "docid": "5f2c106b621ee1c71f7e3be4c0fd1d3e", "score": "0.5456643", "text": "def get(self, current_user):\n user_role = current_user[0][7]\n specific_user_id = current_user[0][0]\n if user_role == 'user':\n try:\n user_id = int(specific_user_id)\n except:\n return jsonify({'message':'Invalid User Id'}), 400\n return Orders.get_specific_user_orders(self,user_id)\n return jsonify({'message':'Cannot Perform That Function!'}), 404", "title": "" }, { "docid": "fe8c0ab86d6e0b6c70a2b00cab15538f", "score": "0.54531616", "text": "def orders(self, **args):\n uri = 'commerce/orders'\n\n result = self.get(uri, args)\n self._next_page = result['pagination']['nextPageCursor'] if 'nextPageCursor' in result['pagination'] else None\n\n return result['result']", "title": "" }, { "docid": "d89bb16e55197f7b6d84b2486c856acb", "score": "0.54523534", "text": "def get_order_book(self, **kwargs):\n return self.client.get_order_book(data=kwargs)", "title": "" }, { "docid": "f436e6b12b85911daa8eb9b9adc2a675", "score": "0.54410225", "text": "def get(self,order_num):\n for order in orders:\n if(order_num == order[\"order_num\"]):\n return order, 200\n return \"Order Not Found\", 404", "title": "" }, { "docid": "25ff21f64391358a4a817f2086129fe8", "score": "0.54403764", "text": "def user_profile(request, pk=None):\n user = User.objects.get(username=request.user.username, email=request.user.email, pk=pk)\n\n if request.user.is_authenticated:\n orders = BuyProduct.objects.filter(user_account=request.user)\n else:\n user = request.user\n \n context = {\"profile\": user, \"orders\": orders}\n\n return render(request, 'profile.html', context)", "title": "" }, { "docid": "092f77bd688e8f6befa9f38e0e1b7ccf", "score": "0.5439471", "text": "def __GenerateOrderInfo(self, orders):\n orders = [(order.property(), order.direction()) for order in orders]\n if orders and orders[-1] == ('__key__', datastore_pb.Query_Order.ASCENDING):\n orders.pop()\n return orders", "title": "" }, { "docid": "cae7eaeed75d343a54c24aa3f24242cb", "score": "0.5433412", "text": "def customer_checkout():\n\n global item_number, item_list, user, user_basket, user_order, product_list, vendorname_list\n\n customer_delivery_address()\n customer_delivery_method()\n customer_promo()\n customer_pay()\n con.commit()\n\n print(\"Order complete.\")\n print(\"Order ID: \" + str(user_order.orderid))\n\n customer_continue()", "title": "" }, { "docid": "359f7bfb2d2eea5213b988d81218ab6d", "score": "0.5429153", "text": "def get(self, request, uuid, format=None):\n objects = Order.objects.filter(user_uuid=uuid)\n serializer = UsersOrderSerializer(objects, many=True)\n return Response(serializer.data, status=status.HTTP_200_OK)", "title": "" } ]
f519c5e185687bcd8c9aae2adddd9e02
Check if the current session was created using an appropriate authentication type (e.g. "login" or "apikey"), the argument is a whitelist (string or list object) of acceptable authentication types.
[ { "docid": "cf4ff46866f52ea92df38ab4a2b3fd55", "score": "0.6155261", "text": "def authentication_type(auth_type):\n\n def func(method):\n @functools.wraps(method)\n def wrapper(self, *args, **kwargs):\n if self.session is not None:\n if isinstance(auth_type, basestring):\n if self.session[\"authentication\"] == auth_type:\n return method(self, *args, **kwargs)\n elif isinstance(auth_type, list):\n if self.session[\"authentication\"] in auth_type:\n return method(self, *args, **kwargs)\n self.set_status(NOT_AUTHORIZED)\n self.write({\"errors\": [\n \"This session is authorized to call this method\"\n ]})\n return wrapper\n return func", "title": "" } ]
[ { "docid": "9c43e85be1716e5081dd5b8cada10ced", "score": "0.5811439", "text": "def is_authenticated(self, request):\n authenticated = False\n for auth in self.auth_types:\n authenticated = auth.is_authenticated(request)\n if authenticated:\n selected_auth = auth\n break\n return authenticated", "title": "" }, { "docid": "9c43e85be1716e5081dd5b8cada10ced", "score": "0.5811439", "text": "def is_authenticated(self, request):\n authenticated = False\n for auth in self.auth_types:\n authenticated = auth.is_authenticated(request)\n if authenticated:\n selected_auth = auth\n break\n return authenticated", "title": "" }, { "docid": "30f8e506db108d985dc355952876c8e7", "score": "0.5737887", "text": "def allowed_authentication_type(self) -> Optional[pulumi.Input[int]]:\n return pulumi.get(self, \"allowed_authentication_type\")", "title": "" }, { "docid": "30f8e506db108d985dc355952876c8e7", "score": "0.5737887", "text": "def allowed_authentication_type(self) -> Optional[pulumi.Input[int]]:\n return pulumi.get(self, \"allowed_authentication_type\")", "title": "" }, { "docid": "1a3c15b08c7f17b210371b6600276f40", "score": "0.5667453", "text": "def check_auth(*args, **kwargs):\n if settings.get(\"web\")[\"auth\"] == 1:\n conditions = cherrypy.request.config.get('auth.require', None)\n if conditions is not None:\n username = cherrypy.session.get(SESSION_KEY)\n if username:\n cherrypy.request.login = username\n for condition in conditions:\n # A condition is just a callable that returns true or false\n if not condition():\n raise cherrypy.HTTPRedirect(\"/auth/login\")\n else:\n raise cherrypy.HTTPRedirect(\"/auth/login\")\n else:\n return None", "title": "" }, { "docid": "4ee31f89fb2ce8d8f41ef0b910699d8d", "score": "0.56611747", "text": "def test_task_list_authentication_classes(self):\n self.assertEqual(TaskList.authentication_classes, (TokenAuthentication,))", "title": "" }, { "docid": "faa2934bdd42c1f27294d3ca5b8af7f9", "score": "0.562855", "text": "def supports_session_type(cls, sessiontype: Type[ba.Session]) -> bool:\n return (issubclass(sessiontype, DualTeamSession)\n or issubclass(sessiontype, FreeForAllSession))", "title": "" }, { "docid": "0316b0933e871d1cfa6de5f583ca0f28", "score": "0.5551004", "text": "def authenticate(self):\n self.unsupported()\n return False", "title": "" }, { "docid": "f27930e42268ed229283a40a3d439dd7", "score": "0.54980725", "text": "def check_auth(self):\n raise NotImplementedError()", "title": "" }, { "docid": "97b587136b92da97d9b8141d24ffad46", "score": "0.54445773", "text": "def is_logged_in():\n return 'userid' in session and 'screenname' in session", "title": "" }, { "docid": "5ff0885319c20decccc5b2b7f833577e", "score": "0.5422277", "text": "def check_login(ctx: EndpointContext) -> bool:\n return \"login\" in ctx.session", "title": "" }, { "docid": "f617529a6d3b0538ae9432cfd6e909e8", "score": "0.53938043", "text": "def check_auth(self, username, password, allowed_roles, resource, method):\n return username == 'admin' and password == 'secret'", "title": "" }, { "docid": "4564ef9d975c72cc3a096ad1b900e8b3", "score": "0.5386885", "text": "def test_valid_login_allowed(self):\n request = self.factory.post(settings.LOGIN_URL, REMOTE_ADDR=self.ip1, \n HTTP_USER_AGENT=self.useragent1)\n user = self.authenticate(request, **self.goodcredentials)\n self.assertEqual(user, self.user)", "title": "" }, { "docid": "32e08a88918d49d7402da5ff1bacb0bb", "score": "0.53676134", "text": "def is_auth():\n authenticated = False\n try:\n if session['oauth_token']:\n authenticated = True\n except KeyError:\n authenticated = False\n return authenticated", "title": "" }, { "docid": "18d40d9a8c157b409dc4eee5fef6eee5", "score": "0.5317518", "text": "def is_authenticated():\n return g.user and 'identity' in g.user", "title": "" }, { "docid": "35511d3972b4c990f84a22ac910597e6", "score": "0.53048456", "text": "def accept_login(users, username, password):\n if username in users:\n return users[username] == password", "title": "" }, { "docid": "23c59c3e026d493733f26a7f36a65806", "score": "0.5301993", "text": "def authenticate(self, params):\n if params.get('identifier', None) and params.get('password', '') == 'test':\n return True\n return False", "title": "" }, { "docid": "8dcd1fbe01b5f9e21b7003a3721337c8", "score": "0.5276714", "text": "def check_auth(username, password):\n return username == app.director.username and password == app.director.password", "title": "" }, { "docid": "d0cddbfbd99f784e97e7c809bc6f6313", "score": "0.52553636", "text": "def is_authenticated():\n return 'author' in session", "title": "" }, { "docid": "a3a0670bb8d70b0b62d78e1a8c047ab5", "score": "0.52392906", "text": "def check_auth(username, password):\n return username == 'username' and password == 'password'", "title": "" }, { "docid": "6b65eb3c03738301ff048d5e6f972130", "score": "0.5229523", "text": "def is_authenticated():\n return True", "title": "" }, { "docid": "053cc7ac6a404c8830beafc7f7c1dcc9", "score": "0.5219979", "text": "def supports_authentication_validation(self):\n return False", "title": "" }, { "docid": "74c0fd2519ba65da4a4828d62e30ca6d", "score": "0.5218497", "text": "def check_auth(username, password):\n return username == app.config['UNAME'] and password == app.config['PASSWORD']", "title": "" }, { "docid": "5d8ee70016a8f67debd4d9443029a84d", "score": "0.5214262", "text": "def check_auth(username, password):\n return username == admin_config.get('DEFAULT', 'BACKEND_USERNAME') and password == admin_config.get('DEFAULT', 'BACKEND_PASSWORD')", "title": "" }, { "docid": "0849f57a292b08084878662f5f2e8f72", "score": "0.5181039", "text": "def login_available(cls, settings):\n return True", "title": "" }, { "docid": "df8c706275a36cdfa951747fbbd13b1e", "score": "0.5173363", "text": "def __init__(self, auth_types):\n self.auth_types = auth_types\n self.selected_auth = auth_types[0]", "title": "" }, { "docid": "df8c706275a36cdfa951747fbbd13b1e", "score": "0.5173363", "text": "def __init__(self, auth_types):\n self.auth_types = auth_types\n self.selected_auth = auth_types[0]", "title": "" }, { "docid": "03bbd8e7d03ef6bf2def49a21e56ea3d", "score": "0.5172624", "text": "def is_authenticated(self):\n\t\treturn True", "title": "" }, { "docid": "03bbd8e7d03ef6bf2def49a21e56ea3d", "score": "0.5172624", "text": "def is_authenticated(self):\n\t\treturn True", "title": "" }, { "docid": "03bbd8e7d03ef6bf2def49a21e56ea3d", "score": "0.5172624", "text": "def is_authenticated(self):\n\t\treturn True", "title": "" }, { "docid": "8f2e0b1e93f9910934348b85eba96745", "score": "0.51724476", "text": "def check_auth(username, password, config):\n return username == config.username and password == config.password", "title": "" }, { "docid": "6a147044bdb34446aa8da3ac649fd697", "score": "0.51465607", "text": "def check_auth(username, password):\n return username == 'admin' and password == 'adminbook'", "title": "" }, { "docid": "9b3a41f5a1213b0c69cd56b5e69a0b16", "score": "0.5141585", "text": "def check_auth(username, password):\n return username == 'admin' and password == 'secret'", "title": "" }, { "docid": "9b3a41f5a1213b0c69cd56b5e69a0b16", "score": "0.5141585", "text": "def check_auth(username, password):\n return username == 'admin' and password == 'secret'", "title": "" }, { "docid": "9b3a41f5a1213b0c69cd56b5e69a0b16", "score": "0.5141585", "text": "def check_auth(username, password):\n return username == 'admin' and password == 'secret'", "title": "" }, { "docid": "a8556c7340c24c49c260bfdce61656d2", "score": "0.5133274", "text": "def check_auth(username, password):\n\treturn username == 'admin' and password == 'password'", "title": "" }, { "docid": "ae6142b5af5b87ad6bd1d4c8589042e6", "score": "0.5129292", "text": "def check_authentication():\n from core import config\n if not config.get('reebill', 'authenticate'):\n cherrypy.session['user'] = UserDAO.default_user\n return True\n\n user_dao = UserDAO()\n if 'reebill_session' in cherrypy.request.cookie:\n token = cherrypy.request.cookie['reebill_session'].value\n user = user_dao.load_by_session_token(token)\n\n if user is not None:\n # Reset the session since we may not have the user in this\n # interpreter process's session yet\n cherrypy.session['user'] = user\n return True\n\n raise Unauthenticated(\"No Session\")", "title": "" }, { "docid": "02b02f8424b38ff8393c9f20dd7a5cb6", "score": "0.5115596", "text": "def check_auth(username, password):\n # This is bad: fix if actually going to use on a larger scale:\n return username == 'admin' and password == 'secret'", "title": "" }, { "docid": "7fddf658d7779c84158a78b6b32916d1", "score": "0.5108889", "text": "def check_auth(username, password):\n return username == app.config['HTTP_USER'] and password == app.config['HTTP_PASSWORD']", "title": "" }, { "docid": "c9790dc8f79d4fbd3612719b047b3fec", "score": "0.5102442", "text": "def valid(environ, username, password):\n return username == password", "title": "" }, { "docid": "ff61a832c69b85c9a6e858c8786c73bc", "score": "0.5099994", "text": "def check_auth(username, password):\n return username == 'captainu' and password == 'passw0rd'", "title": "" }, { "docid": "4b61e852480dac0d690229c99367570d", "score": "0.5097881", "text": "def supports_authentication_acquisition(self):\n return False", "title": "" }, { "docid": "63d4ee69d515582e20af6fe9886c370d", "score": "0.50895983", "text": "def check_auth(username, password):\n\n return username == 'admin' and password == 'secret'", "title": "" }, { "docid": "00bb8ff91bf5e9d3e1b3b1f3b6dbf1dc", "score": "0.50866354", "text": "def _check_sponsor_auth(username, password):\n return username.lower() == 'sponsor' and password == settings['sponsor_portal_password']", "title": "" }, { "docid": "31da7d5572124f3e102bcb6b118522cb", "score": "0.5081235", "text": "def check_auth(username, password):\n return username == admin_name and password == admin_password", "title": "" }, { "docid": "51c3784f4e481ce93a7ce5e9b80d5021", "score": "0.50774306", "text": "def is_authenticated(self):\r\n return True", "title": "" }, { "docid": "4245dd767ba61dc7de9e1145c508e74a", "score": "0.5072933", "text": "def check_auth(username, password):\n return username == 'ACCESS' and password == 'DENY'", "title": "" }, { "docid": "e513e6c70acf311508239cd8d9cab6d0", "score": "0.5072681", "text": "def check_auth(username, password):\n return username == 'sample' and password == 'sample'", "title": "" }, { "docid": "ae5fff8b01a94db92e7f665fd607c0b1", "score": "0.50694335", "text": "def get_enabled_auth_types(self) -> Iterable[str]:\n return self.checkers.keys()", "title": "" }, { "docid": "16a3af65d8a665c62c703da1d5e264d1", "score": "0.5067878", "text": "def valid_auth(self):\n return self.get_user_data() is not None", "title": "" }, { "docid": "3009fea64abd2f63da4917b112768204", "score": "0.5053872", "text": "def check_auth(username, password):\n return username == ADMIN_USER and password == ADMIN_PASSWORD", "title": "" }, { "docid": "632aad90e0878efc4492130ed1586edb", "score": "0.50403124", "text": "def check_auth(username, password):\n\n userList = col.find_one({\"user\": username})\n passList = col.find_one({\"Pass\": password})\n # return username == \"admin\" and password == \"secret\"\n return userList != None and passList != None", "title": "" }, { "docid": "f1a0fe20894c9cdd21ad691734c37714", "score": "0.50384307", "text": "def allowed_authentication_modes(self) -> Sequence[str]:\n return pulumi.get(self, \"allowed_authentication_modes\")", "title": "" }, { "docid": "dccf92edde2831a55cfac5c2fe7fd253", "score": "0.5035914", "text": "def authenticationRequired(self, *args, **kwargs): # real signature unknown\n pass", "title": "" }, { "docid": "4a0970f5e223027c69cd713e52b2722d", "score": "0.50303274", "text": "def test_task_detail_authentication_classes(self):\n self.assertEqual(TaskSolve.authentication_classes,\n (TokenAuthentication,))", "title": "" }, { "docid": "84cba71aa9bc94db32ba4089f9ce3458", "score": "0.5011294", "text": "def requires_auth(self):\n return True", "title": "" }, { "docid": "84cba71aa9bc94db32ba4089f9ce3458", "score": "0.5011294", "text": "def requires_auth(self):\n return True", "title": "" }, { "docid": "24b632dd493f2c84f43877d72cedd444", "score": "0.5010286", "text": "def authenticated(self):\n\n if self.flask_session.get('id_token_jwt'):\n return True\n else:\n return False", "title": "" }, { "docid": "33369f3688cf2dd8926f08f5532e61b1", "score": "0.500877", "text": "def supports_authentication_record_type(self, authentication_record_type=None):\n if authentication_record_type is None:\n raise NullArgument()\n return False", "title": "" }, { "docid": "166d798ca4c4338e84e58f1a833381e9", "score": "0.5003639", "text": "def is_authenticated(self):\n return True", "title": "" }, { "docid": "166d798ca4c4338e84e58f1a833381e9", "score": "0.5003639", "text": "def is_authenticated(self):\n return True", "title": "" }, { "docid": "166d798ca4c4338e84e58f1a833381e9", "score": "0.5003639", "text": "def is_authenticated(self):\n return True", "title": "" }, { "docid": "166d798ca4c4338e84e58f1a833381e9", "score": "0.5003639", "text": "def is_authenticated(self):\n return True", "title": "" }, { "docid": "166d798ca4c4338e84e58f1a833381e9", "score": "0.5003639", "text": "def is_authenticated(self):\n return True", "title": "" }, { "docid": "166d798ca4c4338e84e58f1a833381e9", "score": "0.5003639", "text": "def is_authenticated(self):\n return True", "title": "" }, { "docid": "166d798ca4c4338e84e58f1a833381e9", "score": "0.5003639", "text": "def is_authenticated(self):\n return True", "title": "" }, { "docid": "166d798ca4c4338e84e58f1a833381e9", "score": "0.5003639", "text": "def is_authenticated(self):\n return True", "title": "" }, { "docid": "166d798ca4c4338e84e58f1a833381e9", "score": "0.5003639", "text": "def is_authenticated(self):\n return True", "title": "" }, { "docid": "166d798ca4c4338e84e58f1a833381e9", "score": "0.5003639", "text": "def is_authenticated(self):\n return True", "title": "" }, { "docid": "166d798ca4c4338e84e58f1a833381e9", "score": "0.5003639", "text": "def is_authenticated(self):\n return True", "title": "" }, { "docid": "166d798ca4c4338e84e58f1a833381e9", "score": "0.5003639", "text": "def is_authenticated(self):\n return True", "title": "" }, { "docid": "166d798ca4c4338e84e58f1a833381e9", "score": "0.5003639", "text": "def is_authenticated(self):\n return True", "title": "" }, { "docid": "b1db79d996af06bea1303c35b8799eae", "score": "0.49990708", "text": "def check_auth(ausername, apassword):\n return ausername == username and apassword == password", "title": "" }, { "docid": "93e806ffa26df956df3c6501d1c22a72", "score": "0.49861354", "text": "def authentication_type(self):\n if \"authenticationType\" in self._prop_dict:\n return self._prop_dict[\"authenticationType\"]\n else:\n return None", "title": "" }, { "docid": "ffcb64133fa2ca2ab3b6bb683cee066e", "score": "0.4981471", "text": "def authenticate():\n session = bottle.request.environ.get('beaker.session')", "title": "" }, { "docid": "393a2cdeebe86392efee895eb951df4c", "score": "0.49777374", "text": "def valid_authentication(self, auth):\n return self._get(\"/user\", auth=auth).ok", "title": "" }, { "docid": "9aa51761a4346d2885f70c2f8a371787", "score": "0.49753025", "text": "def is_logged_in():\n return session.get(\"user\")", "title": "" }, { "docid": "b8b4b3e1c1c167055dfbf08f2a349402", "score": "0.49733254", "text": "def check_auth2(username, password):\n if username == 'admin' and password == 'admin': # CHANGE THIS\n return True\n return False", "title": "" }, { "docid": "509c26a70ac84196469ed30c60a6f42b", "score": "0.49706167", "text": "def check_auth(username, password):\n #simple logging\n return username == 'admin' and password == 'admin'", "title": "" }, { "docid": "3f4bb088b23fb5525fa4fb0ab75562a3", "score": "0.49703607", "text": "def authorized(checker):\n return current_user.is_authenticated() and checker()", "title": "" }, { "docid": "ad41f5ff00282374e3391c5126e302ba", "score": "0.4968436", "text": "def is_logged_in(self):\n return (self._auth in ['ANON', 'UNKNOWN']) == False", "title": "" }, { "docid": "578ae15fbdf0bf9385c6dc9c75cc0195", "score": "0.4960382", "text": "def is_authenticated(self):\n check = False\n if self.headers:\n check = all(k in self.headers and self.headers[k] for k in ('X-Access-Key', 'X-Access-Token'))\n return check", "title": "" }, { "docid": "3ecb91b0d071057177907df2abdb391a", "score": "0.4958775", "text": "def authenticate_client(self, request, *args, **kwargs):\n app_log.debug(\"authenticate_client %s\", request)\n client_id = request.client_id\n client_secret = request.client_secret\n oauth_client = (\n self.db.query(orm.OAuthClient).filter_by(identifier=client_id).first()\n )\n if oauth_client is None:\n return False\n if not client_secret or not oauth_client.secret:\n # disallow authentication with no secret\n return False\n if not compare_token(oauth_client.secret, client_secret):\n app_log.warning(\"Client secret mismatch for %s\", client_id)\n return False\n\n request.client = oauth_client\n return True", "title": "" }, { "docid": "02b5f968b590652c785eea7ef85d92de", "score": "0.4956055", "text": "def _check_auth(self, msg, data):\n # Check that flavor is supported\n try:\n sec = self.sec_flavors[msg.cred.flavor]\n except KeyError:\n log_t.warn(\"AUTH_ERROR: Unsupported flavor %i\" % msg.cred.flavor)\n if msg.proc == 0 and msg.cred.flavor == AUTH_NONE:\n # RFC 1831 section 11.1 says \"by convention\" should allow this\n log_t.warn(\"Allowing NULL proc through anyway\")\n sec = security.klass(AUTH_NONE)()\n else:\n raise rpclib.RPCDeniedReply(AUTH_ERROR, AUTH_FAILED)\n # Call flavor specific authority checks\n return sec.check_auth(msg, data)\n\n # What incoming flavors do I allow?\n # How does server learn/change these defaults\n\n # For AUTH_NONE:\n # return True - note 11.1 says \"by convention\" should\n # allow AUTH_NONE, at least for proc==0\n\n # For AUTH_SYS:\n # check machinename, mode - again how is accept list set on server?\n \n # For GSS:\n # illegal enum values should return AUTH_BADCRED\n # this will be noticed by XDR unpack failing, which means\n # type(cred.body) == str\n # check gss_version, fail with AUTH_BADCRED\n # check allows service - again how does server set?\n # check context handle - what does this mean?\n # see 5.3.3.3, we maintain list of contexts we are in session\n # with, if not in list, return CREDPROBLEM\n # if security credentials expire, return CTXPROBLEM\n # check header checksum in verf, failure returns CREDPROBLEM\n # check seq_num in cred, silently drop repeats,\n # return CTXPROBLEM if exceeds window\n # check seq_num in data, return GARBAGE_ARGS if mismatches cred\n # check gss_proc==DATA, else:\n # if proc==0, handle elsewhere\n # else return AUTH_BADCRED\n return True", "title": "" }, { "docid": "797db339311bc3ae4f3757f6e6a8b251", "score": "0.49554795", "text": "def get_supported_login_types(self) -> Iterable[str]:\n # Load any login types registered by modules\n # This is stored in the password_auth_provider so this doesn't trigger\n # any callbacks\n types = list(self.password_auth_provider.get_supported_login_types().keys())\n\n # This list should include PASSWORD if (either _password_localdb_enabled is\n # true or if one of the modules registered it) AND _password_enabled is true\n # Also:\n # Some clients just pick the first type in the list. In this case, we want\n # them to use PASSWORD (rather than token or whatever), so we want to make sure\n # that comes first, where it's present.\n if LoginType.PASSWORD in types:\n types.remove(LoginType.PASSWORD)\n if self._password_enabled_for_login:\n types.insert(0, LoginType.PASSWORD)\n elif self._password_localdb_enabled and self._password_enabled_for_login:\n types.insert(0, LoginType.PASSWORD)\n\n return types", "title": "" }, { "docid": "0f9905a5432c1f2e8232a165536c3442", "score": "0.49498868", "text": "def test_is_logged_in(self):\n pass", "title": "" }, { "docid": "034d6aebea405e96d9b72072256f0c69", "score": "0.49434224", "text": "def check_auth(request, **kwargs):\n\n my_auth = MyBasicAuthentication()\n return my_auth.is_authenticated(request, **kwargs)", "title": "" }, { "docid": "b43a8149da4e9d425ec1a110bd51f230", "score": "0.49428022", "text": "def authentication_type(self) -> Optional[str]:\n return pulumi.get(self, \"authentication_type\")", "title": "" }, { "docid": "b43a8149da4e9d425ec1a110bd51f230", "score": "0.49428022", "text": "def authentication_type(self) -> Optional[str]:\n return pulumi.get(self, \"authentication_type\")", "title": "" }, { "docid": "b43a8149da4e9d425ec1a110bd51f230", "score": "0.49428022", "text": "def authentication_type(self) -> Optional[str]:\n return pulumi.get(self, \"authentication_type\")", "title": "" }, { "docid": "b43a8149da4e9d425ec1a110bd51f230", "score": "0.49428022", "text": "def authentication_type(self) -> Optional[str]:\n return pulumi.get(self, \"authentication_type\")", "title": "" }, { "docid": "b43a8149da4e9d425ec1a110bd51f230", "score": "0.49428022", "text": "def authentication_type(self) -> Optional[str]:\n return pulumi.get(self, \"authentication_type\")", "title": "" }, { "docid": "b43a8149da4e9d425ec1a110bd51f230", "score": "0.49428022", "text": "def authentication_type(self) -> Optional[str]:\n return pulumi.get(self, \"authentication_type\")", "title": "" }, { "docid": "b43a8149da4e9d425ec1a110bd51f230", "score": "0.49428022", "text": "def authentication_type(self) -> Optional[str]:\n return pulumi.get(self, \"authentication_type\")", "title": "" }, { "docid": "b43a8149da4e9d425ec1a110bd51f230", "score": "0.49428022", "text": "def authentication_type(self) -> Optional[str]:\n return pulumi.get(self, \"authentication_type\")", "title": "" }, { "docid": "b43a8149da4e9d425ec1a110bd51f230", "score": "0.49428022", "text": "def authentication_type(self) -> Optional[str]:\n return pulumi.get(self, \"authentication_type\")", "title": "" }, { "docid": "48e963702bb7cd32f48c7b9d307bcddc", "score": "0.49400705", "text": "def handle_login(login_session):\n # To debug, comment out the next if statement\n if 'email' not in login_session:\n return False\n return True", "title": "" }, { "docid": "fd5fec4a303aa6680f4ffbfcdf491230", "score": "0.49386814", "text": "def checksession():\n if 'token' in session:\n result = callapi(\"get\", \"/login/check/\" + session['token'])\n if result['status'] == 0:\n return True\n return False", "title": "" }, { "docid": "869ca6c97728e8f305399b9c5e2c134c", "score": "0.49380708", "text": "def is_authenticated(self):\n return (\n self.auth_token is not None\n and self.sid_token is not None\n and self.lsid_token is not None\n )", "title": "" }, { "docid": "7a64ccf9225438ffafdc12c396c306c6", "score": "0.4936473", "text": "def is_authenticated(self):\n\n if self.sess_username in request.session and request.session[self.sess_username]:\n return True\n return False", "title": "" } ]
be0059908e9c39cd679d8c2756867624
Takes an index into a flattened 3D array and its side length. Returns the coordinate in the cube.
[ { "docid": "73d42ac28eabe7bc138e1e9edd434b73", "score": "0.50769484", "text": "def index_to_coord(index, sl):\n coord = []\n two_d_slice_size = sl * sl\n coord.append(index // two_d_slice_size)\n remaining = index % two_d_slice_size\n coord.append(remaining // sl)\n coord.append(remaining % sl)\n return coord", "title": "" } ]
[ { "docid": "494eda5982266fe514015723e0cd5ba2", "score": "0.58937633", "text": "def calculateCubeIdx3D(cube_size, vol_size, strides):\n x_idx, y_idx, z_idx = calculateIdx3D(vol_size, cube_size, strides)\n pos_idx_flat = np.zeros(x_idx.shape[0] * y_idx.shape[0] * z_idx.shape[0])\n flat_idx = 0\n \n for x in x_idx:\n for y in y_idx:\n for z in z_idx:\n pos_3d = [x, y, z]\n pos_idx_flat[flat_idx] = pos2idx(pos_3d, vol_size)\n flat_idx += 1\n \n return pos_idx_flat", "title": "" }, { "docid": "b22fcd2b380ccbf9dfa311ca34e75be6", "score": "0.5678868", "text": "def flatten_indices_3d(inds):\n n = inds.shape[-2]\n nti = inds.shape[1]\n T = inds[..., 0] # shape (nto, nti, n, n, n)\n U = inds[..., 1] # shape (nto, nti, n, n, n)\n V = inds[..., 2] # shape (nto, nti, n, n, n)\n W = inds[..., 3] # shape (nto, nti, n, n, n)\n inds_flat = U * n * n * nti + V * n * nti + W * nti + T\n return inds_flat", "title": "" }, { "docid": "50eb5a7293adf7abc8ef245480d86ffa", "score": "0.55458266", "text": "def indexshape(v: torch.Tensor, index: torch.Tensor) -> torch.Tensor:\n return v[index.flatten()].reshape(index.shape + v.shape[1:])", "title": "" }, { "docid": "415828a39f783c7af77016dd5c039e29", "score": "0.5416955", "text": "def flat_idx(arr, i):\n if np.isscalar(arr):\n return arr\n else:\n return arr.flat[i]", "title": "" }, { "docid": "ee2496d1a3f34c833b370685b78b439c", "score": "0.5387503", "text": "def _get_tri3(self, index):\n v = memoryview(self.__vertdata)\n I = int(index) * 3 * self.num_floats\n A = v[I:I + self.num_floats]\n B = v[I + self.num_floats:I + 2 * self.num_floats]\n C = v[I + 2 * self.num_floats:I + 3 * self.num_floats]\n return A, B, C", "title": "" }, { "docid": "620ad4ea4f6ea252ea4b897b3159714d", "score": "0.5339169", "text": "def get_box_idx(i, j):\n return (i/3)*3 +j/3", "title": "" }, { "docid": "41d249beb6401d8c256bf4486a7718c5", "score": "0.5303263", "text": "def createCube(centerX, centerY, centerZ, sideLength):\n coordinates = np.zeros((8,3))\n coordinates[0] = [centerX-(sideLength/2), centerY-(sideLength/2), centerZ-(sideLength/2)]\n coordinates[1] = [centerX-(sideLength/2), centerY-(sideLength/2), centerZ+(sideLength/2)]\n coordinates[2] = [centerX-(sideLength/2), centerY+(sideLength/2), centerZ-(sideLength/2)]\n coordinates[3] = [centerX-(sideLength/2), centerY+(sideLength/2), centerZ+(sideLength/2)]\n coordinates[4] = [centerX+(sideLength/2), centerY-(sideLength/2), centerZ-(sideLength/2)]\n coordinates[5] = [centerX+(sideLength/2), centerY-(sideLength/2), centerZ+(sideLength/2)]\n coordinates[6] = [centerX+(sideLength/2), centerY+(sideLength/2), centerZ-(sideLength/2)]\n coordinates[7] = [centerX+(sideLength/2), centerY+(sideLength/2), centerZ+(sideLength/2)]\n return coordinates", "title": "" }, { "docid": "0346a790cbc926bafe70085d1848dc74", "score": "0.52590823", "text": "def getSubCubeByIndex(cube, dim1_index, dim2_index, slicedDimIndex, collapsedIndices): \n coord_indices = []\n collapsed_dim_indices = []\n i = 0\n for dim_num in range(cube.ndim):\n if dim_num != dim1_index and dim_num != dim2_index and dim_num != slicedDimIndex:\n index = collapsedIndices[i]\n coord_indices.append(index)\n collapsed_dim_indices.append(dim_num)\n i += 1\n new_cube= extractCube(cube, collapsed_dim_indices, coord_indices)\n return new_cube", "title": "" }, { "docid": "3e39879409790b8251180ea9a27f2aad", "score": "0.5251454", "text": "def index2trace_flat(shape,index):\n # We need to perform: \n # index[0]*shape[1]*...shape[-1] + index[1]*shape[2]*...shape[-1] + ... \n # + index[-1]*shape[-1] + index[-1]\n # To do this we calculate the product of shape[X] elements and multiple\n # by the corresponding index element, index[-1] as added at the beginning\n a = index[-1]\n for i,v in enumerate(index[:-1]):\n s = shape[i+1:]\n mult = reduce(lambda x,y: x*y, shape[i+1:])\n a = a+mult*v\n return a", "title": "" }, { "docid": "4ba6a5253c7b46511a9a45a6b99b64bc", "score": "0.523568", "text": "def GetCubeIndex(self, Point):\n\t\tp_x = Point[0]; p_y = Point[1]; p_z = Point[2]\n\t\tp = int(numpy.floor(p_x*self.N_x / float(self.L_x))) \t\t# index along x\n\t\tq = int(numpy.floor(p_y*self.N_y / float(self.L_y))) \t\t# index along y\n\t\tr = int(numpy.floor(p_z*self.N_z / float(self.L_z))) \t\t# index along z\n\t\tc = p + q*self.N_x + r*self.N_x*self.N_y\t\t\t\t\t# global index of this cube\n\t\tcubeOrigin = [p*self.L_x/float(self.N_x),\\\n\t\t\t\t\t q*self.L_y/float(self.N_y),\\\n\t\t\t\t\t r*self.L_z/float(self.N_z)]\t\t\t\t\t\t\t# coordinates of this cube's origin\n\t\t\n\t\treturn int(c), cubeOrigin", "title": "" }, { "docid": "31b38c11f7a62e1000df3b56d0c45940", "score": "0.52352375", "text": "def _cone3d(shape, ij, pos, ampli, width):\n temp = np.zeros(shape)\n pos = np.reshape(pos, (1, 3))\n dist = np.sqrt(np.sum((ij - pos) ** 2, axis=1))\n codi = (width - dist) * (dist < width) / width\n temp[ij[:, 0], ij[:, 1], ij[:, 2]] = codi * ampli\n return temp", "title": "" }, { "docid": "7db3ed1572fb8b6e21db45248fc0176c", "score": "0.52300996", "text": "def index_to_position(index, array_width):\n return index % array_width, index // array_width", "title": "" }, { "docid": "031e8df7ac1e181ddda39c998e2e7b78", "score": "0.522212", "text": "def unitcube(dimension):\n assert dimension > 0\n return [0.0] * dimension, [1.0] * dimension", "title": "" }, { "docid": "47a25fc3373be6dc33a1422783f185e2", "score": "0.521625", "text": "def flat_idx(arr, i):\n if isinstance(arr, (nb.types.Integer, nb.types.Float)):\n return lambda arr, i: arr\n else:\n return lambda arr, i: arr.flat[i]", "title": "" }, { "docid": "2b3e41195a2d6cde437ebc6a9b73419d", "score": "0.51677084", "text": "def child(idx, dim, axis):\n idxm = multi_index(idx, dim)\n out = numpy.array(idxm) + 1*(numpy.eye(len(idxm))[axis])\n return single_index(out)", "title": "" }, { "docid": "ecbc8a9eec2bda9846d12bb486384411", "score": "0.5139458", "text": "def is_inside_cube(coor, a, **kwargs):\n\n index = is_inside_cube_index(coor, a, **kwargs)\n if coor.shape[1] == 3:\n return coor[index].copy()\n else:\n return coor[:, index].copy()", "title": "" }, { "docid": "f14eff50865ba745afbb095ee300c5cb", "score": "0.5131598", "text": "def GetAreas(index,x,y,z=np.array([])):\n\n\t#get number of elements and number of nodes\n\tnels=np.size(index,axis=0)\n\tnods=np.size(x)\n\n\t#some checks\n\tif np.size(y)!=nods or (z and np.size(z)!=nods):\n\t\traise TypeError(\"GetAreas error message: x,y and z do not have the same length.\")\n\tif np.max(index)>nods:\n\t\traise TypeError(\"GetAreas error message: index should not have values above %d.\" % nods)\n\tif (not z and np.size(index,axis=1)!=3):\n\t\traise TypeError(\"GetAreas error message: index should have 3 columns for 2d meshes.\")\n\tif (z and np.size(index,axis=1)!=6):\n\t\traise TypeError(\"GetAreas error message: index should have 6 columns for 3d meshes.\")\n\n\t#initialization\n\tareas=np.zeros(nels)\n\tx1=x[index[:,0]-1]\n\tx2=x[index[:,1]-1]\n\tx3=x[index[:,2]-1]\n\ty1=y[index[:,0]-1]\n\ty2=y[index[:,1]-1]\n\ty3=y[index[:,2]-1]\n\n\t#compute the volume of each element\n\tif not z:\n\t\t#compute the surface of the triangle\n\t\tareas=(0.5*((x2-x1)*(y3-y1)-(y2-y1)*(x3-x1)))\n\telse:\n\t\t#V=area(triangle)*1/3(z1+z2+z3)\n\t\tthickness=np.mean(z[index[:,3:6]-1])-np.mean(z[index[:,0:3]-1])\n\t\tareas=(0.5*((x2-x1)*(y3-y1)-(y2-y1)*(x3-x1)))*thickness\n\n\treturn areas", "title": "" }, { "docid": "d880c975f51bb58bdb7e0ff62c5efe41", "score": "0.51016176", "text": "def vertex(self, index):\n return self._data[8*index:8*index+3]", "title": "" }, { "docid": "faaf2f60d605c6f688e09f32a2062489", "score": "0.50926214", "text": "def my_cube(x):\n\treturn (x ** 3)", "title": "" }, { "docid": "14d36d20c385ed3ee0e1205cbcbfcf58", "score": "0.50888854", "text": "def get_coordinate_2D(self, index: int) -> Tuple[int, int]:\n return index // self.size, index % self.size", "title": "" }, { "docid": "004f9adeab940dcb92f02ac112ad6940", "score": "0.5064866", "text": "def _cube_volume(width, height, depth):\n return width * height * depth", "title": "" }, { "docid": "e342397d8601ef10211150065673e8cd", "score": "0.50635195", "text": "def unravel_index(v, shape, **kwargs):\n return tensor(np.unravel_index(v, shape, **kwargs))", "title": "" }, { "docid": "79855ed9fb86e9bf090382d4d53ad664", "score": "0.5060139", "text": "def __getitem__(self, index_tuple):\n assert len(index_tuple) == 2, \"Invalid number of array subscripts.\"\n row = index_tuple[0]\n col = index_tuple[1]\n if not (0 <= row < self.num_rows() and 0 <= col < self.num_cols()):\n raise IndexError('Invalid index')\n array_1d = self.rows[row]\n return array_1d[col]", "title": "" }, { "docid": "465bbb8510752d19c975c6b5f20d611a", "score": "0.50158626", "text": "def face(self, index):\n return self._indices[self._idxCount*index:self._idxCount*(index+1)]", "title": "" }, { "docid": "5d57b9496abeb3e63fb03fc1c3aa72f9", "score": "0.5005878", "text": "def get_triu_3D(size):\n rows1 = []\n rows2 = []\n rows3 = []\n for i in range(size):\n rows_ele, cols_ele = torch.triu_indices(size-i, size-i)\n rows2 += rows_ele\n rows3 += cols_ele\n rows1 += [torch.tensor(i)] * len(rows_ele)\n rows1, rows2, rows3 = torch.stack(rows1), torch.stack(rows2), torch.stack(rows3)\n return rows1, rows2, rows3", "title": "" }, { "docid": "157c90926d28bb868fa9483bb336a29a", "score": "0.49707347", "text": "def index_1d(image,x,y):\n\tw=image['width']\n\treturn w*y+x", "title": "" }, { "docid": "b13f7b7a54a2301c5adecb2b7d7ed250", "score": "0.49455", "text": "def __getitem__(self, index) -> 'np.array[4,2]':\r\n return self.__iter__()[index]", "title": "" }, { "docid": "9ca48df541868488832b580721f9bd96", "score": "0.49404263", "text": "def cube():\n vtype = [('a_position', np.float32, 3),\n ('a_normal', np.float32, 3),\n ('a_color', np.float32, 4)]\n # Vertices positions\n v = [[1, 1, 1], [-1, 1, 1], [-1, -1, 1], [1, -1, 1],\n [1, -1, -1], [1, 1, -1], [-1, 1, -1], [-1, -1, -1]]\n # Face Normals\n n = [[0, 0, 1], [1, 0, 0], [0, 1, 0],\n [-1, 0, 1], [0, -1, 0], [0, 0, -1]]\n # Vertice colors\n c = [[0, 1, 1, 1], [0, 0, 1, 1], [0, 0, 0, 1], [0, 1, 0, 1],\n [1, 1, 0, 1], [1, 1, 1, 1], [1, 0, 1, 1], [1, 0, 0, 1]]\n\n V = np.array([(v[0], n[0], c[0]), (v[1], n[0], c[1]),\n (v[2], n[0], c[2]), (v[3], n[0], c[3]),\n (v[0], n[1], c[0]), (v[3], n[1], c[3]),\n (v[4], n[1], c[4]), (v[5], n[1], c[5]),\n (v[0], n[2], c[0]), (v[5], n[2], c[5]),\n (v[6], n[2], c[6]), (v[1], n[2], c[1]),\n (v[1], n[3], c[1]), (v[6], n[3], c[6]),\n (v[7], n[3], c[7]), (v[2], n[3], c[2]),\n (v[7], n[4], c[7]), (v[4], n[4], c[4]),\n (v[3], n[4], c[3]), (v[2], n[4], c[2]),\n (v[4], n[5], c[4]), (v[7], n[5], c[7]),\n (v[6], n[5], c[6]), (v[5], n[5], c[5])],\n dtype=vtype)\n I1 = np.resize(np.array([0, 1, 2, 0, 2, 3], dtype=np.uint32), 6 * (2 * 3))\n I1 += np.repeat(4 * np.arange(2 * 3, dtype=np.uint32), 6)\n\n I2 = np.resize(\n np.array([0, 1, 1, 2, 2, 3, 3, 0], dtype=np.uint32), 6 * (2 * 4))\n I2 += np.repeat(4 * np.arange(6, dtype=np.uint32), 8)\n\n return V, I1, I2", "title": "" }, { "docid": "30577c9e266c1d7bcf6e6f006a247707", "score": "0.4939538", "text": "def flatten_index(self, nwp_index):\n nwp_sent_len = nwp_index.shape[-2]\n return nwp_index.contiguous().view(-1, nwp_sent_len, 1)", "title": "" }, { "docid": "b0ce9f5ea7e7967b24e4f7264967237a", "score": "0.4935434", "text": "def trace2index_flat(shape,ntrace):\n # algorithm is to take quotient/remainers of sizes in reverse\n q = ntrace # seed quotient with remained\n index = []\n for s in shape[:0:-1]: # loop from last size to 2nd size\n q,r = divmod(q,s)\n index.insert(0,r)\n index.insert(0,q)\n return tuple(index)", "title": "" }, { "docid": "db2c5a5fc1522c029ec0f057eaa25969", "score": "0.49258748", "text": "def position_to_index(x, y, array_width):\n return x + y*array_width", "title": "" }, { "docid": "2fbba478ce4e91ad2a109551e0ff1f84", "score": "0.49238217", "text": "def z_coord(cube):\n try:\n z = cube.coord(axis='Z')\n except CoordinateNotFoundError:\n z = cube.coords(axis='Z')\n for coord in cube.coords(axis='Z'):\n if coord.ndim == 1:\n z = coord\n return z", "title": "" }, { "docid": "166e080309a9025929f173e12de03f99", "score": "0.49152505", "text": "def get_layer_coord(direction, index):\n return (direction.value * (SIZE - 1 - index)) if is_direction_positive(direction) else (-direction.value * index)", "title": "" }, { "docid": "1a7c65ddb7dc047195ca3de350aa5291", "score": "0.48655802", "text": "def __getitem__(self, index):\n if isinstance(index, tuple):\n if len(index) > 2:\n raise IndexError('TileSpec only has two dimensions (row/col)')\n if not isinstance(index[0], int) and isinstance(index[1], int):\n raise TypeError(\n 'Only support indexing int/int for now')\n return self._index_to_tile(index)\n else:\n raise IndexError('Unknown index type')", "title": "" }, { "docid": "460ce8cac595449fa971d949d39248cf", "score": "0.48616904", "text": "def kp_3d(self, kp_idx):\n return self.kp_landmark.get(kp_idx)", "title": "" }, { "docid": "c129db1644245c027017822f86d6cdd9", "score": "0.48532802", "text": "def pndindex(*args):\n return np.ndindex(*args)", "title": "" }, { "docid": "78aa73dfc810dadee9c6d1773a2783b2", "score": "0.48384112", "text": "def getLayerClearDepthValue(self, index):\n \n pass", "title": "" }, { "docid": "84b5cbb7bf8fb3d4f4308b10a2425229", "score": "0.48373097", "text": "def cubeVertices(size):\n\tvertices = []\n\t\n\tfor vertex in basic_round:\n\t\tv_x = (vertex[0] * size)\n\t\tv_y = (vertex[1] * size)\n\t\tv_z = (vertex[2] * size)\n\t\t\n\t\tvertices.append((v_x, v_y, v_z))\n\t\n\treturn vertices", "title": "" }, { "docid": "ed3ceb2bcb66dfb496ac00be1a4a0ad5", "score": "0.48295268", "text": "def __getitem__(self, index):\n # Generate indexes of the batch\n indexes = self.indexes[index*self.batch_size:(index+1)*self.batch_size]\n indexes = [i for i in indexes if i < len(self.img_flist)]\n\n x = np.zeros((len(indexes), *self.dim), dtype=np.float32)\n m, n, d = self.dim_original\n\n for i, index in enumerate(indexes):\n # TODO: load images also in other formats (color, 16 bits)\n img = self._read_image(index)\n img = self.norm.make(img)\n x[i, :m, :n, :d] = (img - .5).reshape(self.dim_original)\n if self.debug:\n print(x.shape, indexes)\n\n return x", "title": "" }, { "docid": "9c53bce449871b0a71dfb6c81b829a69", "score": "0.48250538", "text": "def array_index_to_world_values(self, *index_arrays):\n return self.pixel_to_world_values(*index_arrays[::-1])", "title": "" }, { "docid": "943b06ce4bb1a1a33ad906f4035ed817", "score": "0.48216644", "text": "def dim3_in(self):\n return self._dim3_in", "title": "" }, { "docid": "ee62ef479dd522a680b5b889b1a2fe7c", "score": "0.48130336", "text": "def get_num(index):\n return int(grid[3*index:3*index+2])", "title": "" }, { "docid": "b76bd908112d03a7d9840646a35eb059", "score": "0.48073918", "text": "def __getitem__(self, index):\n # Single index from numpy scalar\n if isinstance(index, np.ndarray) and index.size==1:\n index = int(index)\n \n if isinstance(index, tuple):\n # Multiple indexes: return as array\n return np.asarray(self)[index]\n elif isinstance(index, slice):\n # Slice: return subset\n return np.ndarray.__getitem__(self, index)\n elif isinstance(index, int):\n ndim = self.ndim\n if ndim and self.shape[-1] in (1,3,4):\n ndim -= 1\n if ndim > 1:\n return np.ndarray.__getitem__(self, index)\n else:\n return np.asarray(self)[index] # Back to normal\n else:\n # Probably some other form of subslicing\n return np.asarray(self)[index]", "title": "" }, { "docid": "30b1922cf38ddefff6c9f03a1c2d89ae", "score": "0.47915167", "text": "def ensure3d(arr):\n if len(arr.shape) == 3:\n return arr\n elif len(arr.shape) == 2:\n return arr.reshape((arr.shape[0], arr.shape[1], 1))", "title": "" }, { "docid": "60476918c62ce78849c3a04b94bde456", "score": "0.4787738", "text": "def convert_to_ij(self, index):\n row = int(index / self.layout.shape[1])\n column = int(index % self.layout.shape[1])\n return [row, column]", "title": "" }, { "docid": "c03955aa4ebe02cf46edff0aa8b209c2", "score": "0.47814274", "text": "def get_neighbor(x, index):\n\n if isinstance(x, torch.Tensor):\n num_sample = x.size(0)\n pick_up = []\n for i in range(num_sample):\n tmp = x[i][index[i]]\n pick_up.append(tmp.unsqueeze(dim=0))\n pick_up = torch.cat(pick_up, dim=0)\n return pick_up\n elif isinstance(x, np.ndarray):\n num_sample = x.shape[0]\n pick_up = []\n for i in range(num_sample):\n tmp = x[i][index[i]]\n pick_up.append(tmp)\n pick_up = np.stack(pick_up, axis=0)\n return pick_up", "title": "" }, { "docid": "d0f2789410e9968af2f55c9df053b0ac", "score": "0.4771339", "text": "def flat_idx_to_tuple(idx):\n row = idx // 6\n col = idx % 6\n return (row, col)", "title": "" }, { "docid": "fd61e328c9492d02e825e9a98330f02c", "score": "0.47699788", "text": "def coord_to_index(coord, shape):\n return reduce(lambda x, y: x * y[0] + y[1], zip(shape, coord), 0)", "title": "" }, { "docid": "8077dc92f7106d1e6baba561951cffa1", "score": "0.47573444", "text": "def cube_vertices(self, ix, iy, iz):\n\n # node0 = (n+1)*(n+1)*k+(n+1)*j+i+1\n # node1 = (n+1)*(n+1)*(k+1)+(n+1)*j+i+1\n # node2 = (n+1)*(n+1)*(k+1)+(n+1)*j+i+2\n # node3 = (n+1)*(n+1)*k+(n+1)*j+i+2\n # node4 = (n+1)*(n+1)*k+(n+1)*(j+1)+i+1\n # node5 = (n+1)*(n+1)*(k+1)+(n+1)*(j+1)+i+1\n # node6 = (n+1)*(n+1)*(k+1)+(n+1)*(j+1)+i+2\n # node7 = (n+1)*(n+1)*k+(n+1)*(j+1)+i+2\n\n n0 = self.get_id(ix, iy, iz)\n n1 = self.get_id(ix, iy, iz + 1)\n n2 = self.get_id(ix + 1, iy, iz + 1)\n n3 = self.get_id(ix + 1, iy, iz)\n\n n4 = self.get_id(ix, iy + 1, iz)\n n5 = self.get_id(ix, iy + 1, iz + 1)\n n6 = self.get_id(ix + 1, iy + 1, iz + 1)\n n7 = self.get_id(ix + 1, iy + 1, iz)\n\n return n0, n1, n2, n3, n4, n5, n6, n7", "title": "" }, { "docid": "bf53b159102c092b52e60d81e4c984a6", "score": "0.4750623", "text": "def index_to_index_position(self, index):\n return int_index_inverse(\n index, self.levels, self.dimensions)", "title": "" }, { "docid": "5c73f11baf065706ada199024d870312", "score": "0.4744319", "text": "def __getitem__(self, index):\n if isinstance(index, tuple):\n if isinstance(index[0], int) and isinstance(index[0], int):\n n = index[0]\n m = index[1]\n if 0 <= n < self.shape_columns:\n if 0 <= m < self.shape_rows:\n if n == 0:\n return self.elements[m]\n else:\n return self.elements[(n*self.shape_rows) + m]\n else:\n raise ValueError(\"Index is out of bounds\")\n else:\n raise ValueError(\"Index is out of bounds\")\n else:\n raise TypeError(\"Index error, you can only retrive indexes with int\")\n elif isinstance(index, int):\n if 0 <= index < self.shape_rows*self.shape_columns:\n return self.elements[index]\n else:\n raise ValueError(\"Index is out of bounds\")", "title": "" }, { "docid": "d9d7a9441ed0b86f55b6ec155a66f240", "score": "0.47404283", "text": "def cube_vertices(self):\n # FIXME add rotation\n vertices = np.array(CUBE_VERTICES)\n vertices = (vertices - 0.5) * self.size + self.position\n return vertices[CUBE_TOP + CUBE_BOTTOM + CUBE_FRONT + CUBE_BACK + CUBE_LEFT + CUBE_RIGHT].flatten()", "title": "" }, { "docid": "40b4b41b2adeab8d3ace9eafe0b172f5", "score": "0.47391528", "text": "def _get_slice(self, nd_array: np.ndarray, idx: int) -> np.ndarray:\n s = nd_array[..., idx, :, :] if self._axis == 0 else \\\n nd_array[..., :, idx, :] if self._axis == 1 else \\\n nd_array[..., :, :, idx]\n return s", "title": "" }, { "docid": "e7e6941be51dd736edca7ec29c7084be", "score": "0.4738475", "text": "def __getitem__(self, j):\n return self._coords[j]", "title": "" }, { "docid": "a7bd1a62427ec54fffb7ef75b244a7bd", "score": "0.472802", "text": "def __getitem__(self, j):\n return self.coords[j]", "title": "" }, { "docid": "2e5106b979cc94fb1dd1de4352adea3e", "score": "0.4727818", "text": "def inputShapeAtIndex(self, index):\n \n pass", "title": "" }, { "docid": "15753f1cb351206e9f7d5ecb5122b949", "score": "0.4723003", "text": "def _get_data(self, idx):\n position = self._other_attr['position'].get(idx)\n if position.ndim == 2:\n return position[:, 1]\n else:\n return position[1]", "title": "" }, { "docid": "6fe251c397d2ab8d6207f4a74215ea88", "score": "0.4721267", "text": "def list_index_to_board_coordinates(index):\r\n y = int(index / 3)\r\n x = index - (y * 3)\r\n return x, y", "title": "" }, { "docid": "bbb0ebba805ce55a4444cfd62612b098", "score": "0.47192666", "text": "def encode_shapelayer(voxel, id1=None, id2=None, id3=None):\n\n side = voxel.shape[0]\n assert voxel.shape[0] == voxel.shape[1] and voxel.shape[1] == voxel.shape[2], \\\n 'The project_single_view_contour grid needs to be a cube. It is however %dx%dx%d.' % \\\n (voxel.shape[0], voxel.shape[1], voxel.shape[2])\n\n if id1 is None or id2 is None or id3 is None:\n id1, id2, id3 = generate_indices(side)\n pass\n\n # add empty border for argmax\n # need to distinguish empty tubes\n v = np.zeros((side + 2, side + 2, side + 2), dtype=np.uint8)\n v[1:-1, 1:-1, 1:-1] = voxel\n\n shape_layer = np.zeros((side, side, 6), dtype=np.uint16)\n\n # project depth to yz-plane towards negative x\n s1 = np.argmax(v, axis=0) # returns first occurence\n # project depth to yz-plane towards positive x\n s2 = np.argmax(v[-1::-1, :, :], axis=0) # same, but from other side\n s2 = side + 1 - s2 # correct for added border\n\n # set all empty tubes to 0\n s1[s1 < 1] = side + 2\n s2[s2 > side] = 0\n shape_layer[:, :, 0] = s1[1:-1, 1:-1]\n shape_layer[:, :, 1] = s2[1:-1, 1:-1]\n\n # project depth to xz-plane towards negative y\n s1 = np.argmax(v, axis=1)\n # project depth to xz-plane towards positive y\n s2 = np.argmax(v[:, -1::-1, :], axis=1)\n s2 = side + 1 - s2\n\n s1[s1 < 1] = side + 2\n s2[s2 > side] = 0\n shape_layer[:, :, 2] = s1[1:-1, 1:-1]\n shape_layer[:, :, 3] = s2[1:-1, 1:-1]\n\n # project depth to xy-plane towards negative z\n s1 = np.argmax(v, axis=2)\n # project depth to xy-plane towards positive z\n s2 = np.argmax(v[:, :, -1::-1], axis=2)\n s2 = side + 1 - s2\n\n s1[s1 < 1] = side + 2\n s2[s2 > side] = 0\n shape_layer[:, :, 4] = s1[1:-1, 1:-1]\n shape_layer[:, :, 5] = s2[1:-1, 1:-1]\n\n return shape_layer", "title": "" }, { "docid": "ff78a7a2c98138c9e517bcf8f5877f21", "score": "0.47192377", "text": "def slice(self, rindex):\n res = DMatrix(None)\n res.handle = ctypes.c_void_p(xglib.XGDMatrixSliceDMatrix(\n self.handle, (ctypes.c_int*len(rindex))(*rindex), len(rindex)))\n return res", "title": "" }, { "docid": "f48722400fda16e7200798762c768444", "score": "0.47125798", "text": "def _get_data(self, idx):\n position = self._other_attr['position'].get(idx)\n if position.ndim == 2:\n return position[:, 0]\n else:\n return position[0]", "title": "" }, { "docid": "8c6b65fcb71fff8bd7a524033872fc27", "score": "0.47093856", "text": "def __getitem__(self, index):\n index = list(index) if isinstance(index, tuple) else [index]\n ndimension = self.ndimension()\n index += [slice(None, None, None)] * (ndimension - len(index))\n components = list(self._args)\n\n squeeze_left = False\n squeeze_right = False\n if isinstance(index[-2], int):\n index[-2] = slice(index[-2], index[-2] + 1, None)\n squeeze_left = True\n if isinstance(index[-1], int):\n index[-1] = slice(index[-1], index[-1] + 1, None)\n squeeze_right = True\n\n # Handle batch dimensions\n isbatch = ndimension >= 3\n if isbatch:\n batch_index = tuple(index[:-2])\n for i, item in enumerate(components):\n components[i] = item[batch_index]\n\n new_lazy_variable = self.__class__(*components, **self._kwargs)\n\n # Handle index\n left_index = index[-2]\n right_index = index[-1]\n\n if (\n not torch.is_tensor(left_index)\n and left_index == slice(None, None, None)\n and not torch.is_tensor(right_index)\n and right_index == slice(None, None, None)\n ):\n return new_lazy_variable\n\n res = new_lazy_variable._getitem_nonbatch(left_index, right_index)\n if squeeze_left or squeeze_right:\n res = res.evaluate()\n if squeeze_left:\n res = res.squeeze(-2)\n if squeeze_right:\n res = res.squeeze(-1)\n\n return res", "title": "" }, { "docid": "4c4e00e9d01731b0dd3e1e7a981c6bef", "score": "0.47052205", "text": "def ix(self, index):\n if isinstance(index, tuple):\n raise IndexError(\"Too many indices for ArrayRDD\")\n elif isinstance(index, slice) and index == slice(None, None, None):\n return self\n\n indices = np.arange(self.count())[index]\n indexed = self.zipWithIndex()\n if isinstance(index, slice):\n ascending = index.step is None or index.step > 0\n rdd = indexed.filter(lambda (x, i): i in indices)\n if not ascending:\n rdd = rdd.sortBy(lambda (x, i): i, ascending)\n elif hasattr(index, \"__iter__\"):\n # TODO: check monotoniticity to avoid unnunnecessary sorting\n arg = indices.tolist()\n rdd = indexed.filter(lambda (x, i): i in indices) \\\n .sortBy(lambda (x, i): arg.index(i))\n elif isinstance(index, int):\n rdd = indexed.filter(lambda (x, i): i == indices)\n else:\n raise KeyError(\"Unexpected type of index: {0}\".format(type(index)))\n\n return rdd.map(lambda (x, i): x)", "title": "" }, { "docid": "d09faacaddadb219ccabc59c370293ed", "score": "0.46936166", "text": "def length_3d( locations = [] ):\n\treturn length( locations, True )", "title": "" }, { "docid": "67cf7f5cfd1e4a67e7cd67278c55bf91", "score": "0.46904933", "text": "def as3darray(self, fslice=None, crop=None,\n dtype = _dtype_):\n if fslice is None or isinstance(fslice, int):\n fslice = (fslice, )\n shape = self.shape(crop)\n\tnewshape = (len(self),) + shape\n out = lib.memsafe_arr(newshape, dtype)\n for k,frame in enumerate(itt.islice(self.frames(), *fslice)):\n out[k,:,:] = frame[crop]\n out = out[:k+1]\n if hasattr (out, 'flush'):\n out.flush()\n return out", "title": "" }, { "docid": "78677263cb8ecd984e276cd1aa4107b6", "score": "0.46901664", "text": "def my_cube(c):\n\treturn (c**3)", "title": "" }, { "docid": "f06193898056f83869cf9b53b26bb0d4", "score": "0.46792462", "text": "def test_shapes():\n assert len(z_order_index_list_of_2d(_generate_data((3, 6, 4)))) == 72", "title": "" }, { "docid": "8e37e4a1c1aa4e6028b60e38ff86185e", "score": "0.46706882", "text": "def cube(n):\n \"REPLACE THIS CODE WITH YOUR CUBE METHOD\"", "title": "" }, { "docid": "6f25b3a8f893246b4ee48e80f3ce027e", "score": "0.46706337", "text": "def __call__(self, arr):\n slices = []\n for cnt, orig_i in enumerate(arr.shape):\n if cnt == 3:\n break\n slices.append(slice(0, orig_i, self.scale))\n down_arr = arr[tuple(slices)]\n\n return down_arr", "title": "" }, { "docid": "a2c5d8c6f48f351d9cb9362f5eccf230", "score": "0.46676278", "text": "def combiner_slice_length_at_traj_index(index: int, coords_path: ndarray) -> float:\n assert index < len(coords_path)\n if index == 0:\n dr = np.linalg.norm(coords_path[1] - coords_path[0])\n slice_length = dr / 2.0\n elif index == len(coords_path) - 1:\n dr = np.linalg.norm(coords_path[-1] - coords_path[-2])\n slice_length = dr / 2.0\n else:\n dr2 = np.linalg.norm(coords_path[index + 1] - coords_path[index])\n dr1 = np.linalg.norm(coords_path[index] - coords_path[index - 1])\n slice_length = dr1 / 2 + dr2 / 2\n return slice_length", "title": "" }, { "docid": "6305a98a0fef367d8928b4b256ac62a0", "score": "0.46641997", "text": "def test_voxel_index_list():", "title": "" }, { "docid": "99a27b178a9f156dabe852b77c0875a8", "score": "0.46569002", "text": "def generate_side(self, dimension, index, handedness=\"l\",\n announce=\"generating a side\"):\n \n if dimension == \"x\":\n array = np.array([np.zeros(self.image[:, index].shape), self.image[:, index]]).T\n length = self.x - 1\n elif dimension == \"y\":\n array = np.array([np.zeros(self.image[index, :].shape), self.image[index, :]]).T\n length = self.y - 1\n else:\n raise IOError(\"dimension must be 'x' or 'y'\")\n\n for i in range(length):\n for triangle in self.get_template_triangles(handedness=handedness):\n m = self.counter()\n for n, (k, l) in enumerate(triangle):\n if dimension == \"x\":\n vector = [i + k, - index * (self.y - 1), array[i + k, l]]\n else:\n vector = [- index * (self.x - 1), i + k, array[i + k, l]]\n self.elevation.vectors[m][n] = np.array(\n vector\n )", "title": "" }, { "docid": "f83654dbd575ba33b2c571deb2ef81b5", "score": "0.46521455", "text": "def next_slice(axes):\n for i in range(3):\n image3d = axes[i].image3d\n # wrap around using % modulus\n axes[i].index = (axes[i].index + 1) % image3d.shape[0]\n axes[i].images[0].set_array(image3d[axes[i].index, :, :])", "title": "" }, { "docid": "00dd4195cad3e027116da1d59b7dc0a4", "score": "0.46478876", "text": "def project_skel(skel_3d):\n skel_2d = skel_3d[:2, :] # just drop z values\n return skel_2d", "title": "" }, { "docid": "791627dbab3ddeaa27c57292bc3a17fa", "score": "0.46438283", "text": "def crop_and_embed(array, box_coords, cube_shape):\n cube_array = np.zeros(cube_shape, dtype=array.dtype)\n crop = [int(max(0, box_coords[i])) \n if i % 2 == 0 else\n int(min(box_coords[i], array.shape[i // 2]))\n for i in range(6)]\n embd = [abs(box_coords[i]) if box_coords[i] < 0 else 0\n if i % 2 == 0 else\n cube_shape[i // 2] if box_coords[i] <= array.shape[i // 2] else array.shape[i // 2] - box_coords[i]\n for i in range(6)]\n cube_array[embd[0]:embd[1], embd[2]:embd[3], embd[4]:embd[5]] \\\n = array[crop[0]:crop[1], crop[2]:crop[3], crop[4]:crop[5]]\n return cube_array", "title": "" }, { "docid": "60f9414451df9139488fbd5a2e60c12e", "score": "0.46417344", "text": "def _euler_integration_3d(\n locations: numpy.ndarray,\n flows: numpy.ndarray,\n indices: numpy.ndarray,\n num_iterations: numpy.uint32,\n):\n shape_z, shape_y, shape_x = flows.shape[1:]\n for _ in range(num_iterations):\n for i in range(indices.shape[0]):\n z, y, x = indices[i, 0], indices[i, 1], indices[i, 2]\n\n curr_z, curr_y, curr_x = int(locations[0, z, y, x]), int(locations[1, z, y, x]), int(locations[2, z, y, x])\n flow_z, flow_y, flow_x = flows[0, curr_z, curr_y, curr_x], flows[1, curr_z, curr_y, curr_x], flows[2, curr_z, curr_y, curr_x]\n next_z, next_y, next_x = locations[0, z, y, x] - flow_z, locations[1, z, y, x] - flow_y, locations[2, z, y, x] - flow_x\n\n next_z = min(shape_z - 1, max(0, next_z))\n next_y = min(shape_y - 1, max(0, next_y))\n next_x = min(shape_x - 1, max(0, next_x))\n\n locations[0, z, y, x] = next_z\n locations[1, z, y, x] = next_y\n locations[2, z, y, x] = next_x\n return locations", "title": "" }, { "docid": "5290fa347d4161990bd762f3f882a639", "score": "0.46397325", "text": "def get_box_coords_from_index(P, ul_idx, lr_idx): \n # Compute the size of each cell in the grid \n cell_size = 1.0 / P \n \n # Compute the x and y indices of the upper-left and lower-right corners of the bounding box \n ul_x = ul_idx % P \n ul_y = ul_idx // P \n \n lr_x = lr_idx % P \n lr_y = lr_idx // P \n \n # Compute the normalized coordinates of the bounding box \n if ul_idx == lr_idx: \n x1 = ul_x * cell_size \n y1 = ul_y * cell_size \n x2 = lr_x * cell_size + cell_size \n y2 = lr_y * cell_size + cell_size \n elif ul_x == lr_x or ul_y == lr_y: \n x1 = ul_x * cell_size \n y1 = ul_y * cell_size \n x2 = lr_x * cell_size + cell_size \n y2 = lr_y * cell_size + cell_size \n else: \n x1 = ul_x * cell_size + cell_size / 2 \n y1 = ul_y * cell_size + cell_size / 2 \n x2 = lr_x * cell_size + cell_size / 2 \n y2 = lr_y * cell_size + cell_size / 2 \n \n return np.array([x1, y1, x2, y2])", "title": "" }, { "docid": "61b4a177b0bf35898d2686e2bcb7cba8", "score": "0.46375418", "text": "def ravel_multi_index(v: Iterable[Union[torch.LongTensor, np.ndarray]],\n shape: Iterable[int], **kwargs) -> torch.LongTensor:\n return longtensor(np.ravel_multi_index(v, shape, **kwargs))", "title": "" }, { "docid": "b1828c59ca58517a4a0ec7fe1e94cc2d", "score": "0.4631725", "text": "def get_origin_lab_by_index( self , index ):\r\n # NOTE: This function assumes that the transform stored by 'apply_FK_all' is current\r\n # Fetch the linear offset from the homogeneous transformation\r\n return [ self.links[ index ].xformHomog[0][3] , self.links[ index ].xformHomog[1][3] , self.links[ index ].xformHomog[2][3] ]", "title": "" }, { "docid": "637a7b222f83ce4c5b288ee8d151e49e", "score": "0.4630662", "text": "def access_data(data, x, y, z = 0):\n # arrays are index \"outward-in\", i.e., the x-coordinate is always the *last* element\n \n return data[z,y,x]", "title": "" }, { "docid": "e860968330a13e7011fcb42c34d78943", "score": "0.4629762", "text": "def _permute_res(*index):\n for i, item in enumerate(flag_three):\n if i == 0:\n res_axis = (index[item],)\n else:\n res_axis = res_axis + (index[item],)\n\n return res_axis", "title": "" }, { "docid": "7a2971843de43d10336d499cbac873ab", "score": "0.46273336", "text": "def cube(n):\n return n ** 3", "title": "" }, { "docid": "7a2971843de43d10336d499cbac873ab", "score": "0.46273336", "text": "def cube(n):\n return n ** 3", "title": "" }, { "docid": "1aea4e3dd5e958058e3bfff571122f05", "score": "0.46252432", "text": "def get(self, v):\n v = asarray(v).astype(Float32)\n intv = v.astype(Int)\n\n # Define a sampling pattern of vertices within 1 of intv in each dimension.\n # The last axis of this array will be the vertex, the next-to-last will be the\n # cube point, all other axes are as used by the caller.\n cubev = repeat(intv[...,NewAxis,:], self.cube.shape[0], -2)\n cubev += self.cube\n\n # Get random gradient vectors for each sample point\n grad = self.table.get(cubev)\n cubev = cubev.astype(Float32)\n\n # Get the distance between our vector and each sample point\n dist = repeat(v[...,NewAxis,:], self.cube.shape[0], -2) - cubev\n\n # Use dot products to calculate the influence of each sample point\n infl = sum(grad * dist, -1)\n\n # Determine the amount of interpolation in each axis.\n # This uses the curve y = 3x^2 - 2x^3 to give a much more visually\n # pleasing result. Linearly interpolated perlin noise generally looks bad.\n s = dist[...,0,:]\n s = s*s*self.three - s*s*s*self.two\n\n # This interpolation is easier to visulaize if you think of each axis\n # as a bit, and each sampling point as a binary number.\n # The process of interpolating over n dimensions and returning a\n # scalar result requires that we 'fold' our sample influences.\n #\n # First we fold across axis 0: This means that all samples with axis\n # zero at 0 will be matched with all samples where axis 0 is 1, and they\n # will be interpolated according to the amount in s. The result will be\n # stored in the influence array, where axis 0 is 0. The locations where\n # axis 0 is 1 will no longer be used.\n #\n # Next it folds across axis 1. Axis 0 is now 'locked', so we only have\n # half as many axes to match up and interpolate across. The process\n # continues across all axes.\n #\n # The connection with binary digits involves how the axes are mapped\n # to locations in our cube and infl arrays. The least significant bit\n # in the cube index is the last axis, the most significant bit is the\n # first axis. Thus, reading the cube index in binary from left to right,\n # the axes are in increasing order.\n #\n # The first axis we process will then be the high bit. We fold all infl\n # samples by listing all binary numbers with this bit set to 0 and then\n # 1, and interpolate. To 'lock' this bit then is just a matter of decreasing\n # the number of axes we loop over.\n #\n # To keep track of all this bit-banging fun, we have a bit mask indicating\n # the current fold axis. The number of interpolations necessary for that fold\n # will always be equal to this mask. Every time we lock an axis, we just\n # bit-shift this mask right by one. We're done when the mask is zero.\n #\n axisNumber = 0\n axisMask = 1 << (self.dimensions - 1)\n while axisMask:\n for i in xrange(axisMask):\n infl[...,i] += s[...,axisNumber] * (infl[...,axisMask + i] - infl[...,i])\n axisMask >>= 1\n axisNumber += 1\n return infl[...,0]", "title": "" }, { "docid": "ec08992bd7bd495c9dc3a9c25216ca67", "score": "0.46234888", "text": "def plane_index(idx: int):\n if dim == 0:\n indices = [slice(0, None), idx, slice(0, None)]\n elif dim == 1:\n indices = [slice(0, None), slice(0, None), idx]\n elif dim == 2:\n indices = [idx, slice(0, None), slice(0, None)]\n else:\n raise ValueError(\n 'Integration dimension should be one of 0, 1, and 2. {} is given.'\n .format(dim))\n\n return indices", "title": "" }, { "docid": "3df3b259d1b89314e167de64b66e56b2", "score": "0.46197382", "text": "def is_three_dimensional(self) -> bool:\n return self.dimensions == 3", "title": "" }, { "docid": "cae9fe391e1ae61014a01fe88c3baba0", "score": "0.46191838", "text": "def __getitem__(self, index):\n \n #- convert index to 1d array to maintain dimentionality of sliced arrays\n if not isinstance(index, slice):\n index = np.atleast_1d(index)\n \n if self.fibermap is not None:\n fibermap = self.fibermap[index]\n else:\n fibermap = None\n \n if self.mask is None :\n tmp_mask=None\n else :\n tmp_mask=self.mask[index]\n \n if self.sigma is None :\n tmp_sigma=None\n else :\n tmp_sigma=self.sigma[index]\n \n result = QFrame(self.wave[index], self.flux[index], self.ivar[index],\n tmp_mask, tmp_sigma, fibers=self.fibers[index], spectrograph=self.spectrograph,\n meta=self.meta, fibermap=fibermap)\n \n return result", "title": "" }, { "docid": "a06fa8aca14dc1cc12f4c90674d65fad", "score": "0.46180984", "text": "def __getitem__(self, index):\n\n if index < 0:\n raise NotImplementedError, \"RootNtuple doesn't support fancy python indexing.\"\n elif index >= self.entries:\n raise IndexError, \"This ntuple only contains %d values.\" % (self.entries,)\n\n i, loc = self.global_to_local_index(index)\n return self.internal[i].entry(loc)", "title": "" }, { "docid": "17bbd259e3f66ebe676a823225ec20d1", "score": "0.46176532", "text": "def rotate_cube(cube, angle_index):\n # Rotate a cube with dim=[x,y,z] a multiple of 90 degrees in plane x,y\n nnqq = cube.shape[2]\n ncube = np.copy(cube)\n for ii in range(nnqq):\n ncube[:, :, ii] = np.rot90(cube[:, :, ii], angle_index)\n return ncube", "title": "" }, { "docid": "2e69d60a079cbbf656e6292291e3f29d", "score": "0.45975503", "text": "def idx_to_rowcol(self, idx: np.ndarray) -> np.ndarray:\n return np.column_stack(np.unravel_index(idx, self.size[::-1]))", "title": "" }, { "docid": "ee49ec790d4eb4460dae76f645b17e73", "score": "0.45945832", "text": "def Inter3D(*args):\n return _BRepOffset.BRepOffset_Tool_Inter3D(*args)", "title": "" }, { "docid": "32852612b856d00bc374db9453027b73", "score": "0.45897317", "text": "def get_index_where__(coords, coord):\n if coord.size == 3:\n # Special case when only one coordinate requested\n mask = np.all(coords[:, None] == coord[None, :], axis = 2)\n items= np.outer( np.arange(coords.shape[0]), np.ones(1, dtype = int))\n return items[mask][0]\n else:\n mask = np.all(coords[:, None] == coord[None, :], axis = 2)\n items= np.outer( np.arange(coords.shape[0]), np.ones(coord.shape[0], dtype = int))\n return items[mask]", "title": "" }, { "docid": "140773046515446b23c118f8d48cc13e", "score": "0.4588737", "text": "def __getitem__(self, d: 'unsigned long') -> \"double\":\n return _itkContinuousIndexPython.itkContinuousIndexD3___getitem__(self, d)", "title": "" }, { "docid": "a3969c56d09ff0ee4504ec9f521b1e8c", "score": "0.45847988", "text": "def vertexBaseMeshAddWithIndex(self, x, y, z, index):\n \n pass", "title": "" }, { "docid": "caefc40867890a7dfe3f25d60593c31b", "score": "0.45836625", "text": "def _get_box_index(self):\n cl = self.spike_clusters[self.spike_ids]\n # Sanity check.\n # assert np.all(np.in1d(cl, self.cluster_ids))\n return _index_of(cl, self.all_cluster_ids)", "title": "" }, { "docid": "b4a65b5ce70b3e6d0e35a12e74e98971", "score": "0.45758823", "text": "def make2d(idx, num_cols=224):\n v = idx % num_cols\n u = idx // num_cols\n return u, v", "title": "" }, { "docid": "03593d1e8857e5dc04ca002cc86f8e55", "score": "0.4575121", "text": "def pixel_index_to_loc(index, trimrows, convdata):\n foundslice = False\n totalel = 0\n k = 0\n while foundslice is False:\n currslice = convdata[k]\n elinslice = ((currslice.shape[0] - trimrows * 2) *\n (currslice.shape[1] - trimrows * 2))\n totalel = totalel + elinslice\n if totalel > index:\n foundslice = True\n else:\n k = k+1\n shape = convdata[k].shape\n index = int(index - (totalel-elinslice))\n i = int(math.floor(index/(shape[1]-2*trimrows))) + trimrows\n index = index % (shape[1] - 2*trimrows)\n j = int(index) + trimrows\n return[i, j, k]", "title": "" }, { "docid": "1b57a0838d5abe325a605c9060b5399f", "score": "0.4568069", "text": "def test_flatten_3d() -> None:\n lst = [\n [\n [1, 2],\n [3, 4],\n ],\n [\n [5, 6],\n [7, 8],\n ],\n ]\n flat = flatten2list(lst)\n\n assert flat == [1, 2, 3, 4, 5, 6, 7, 8]", "title": "" }, { "docid": "48432c12ea5a92e4ac4d515e4096e3e5", "score": "0.45660672", "text": "def _index(self, i, j, k):\n if i < 0 or j < 0 or k < 0 or k >= self.nz or j >= self.ny or i >= self.nx:\n return -1\n return k * self.nxy + j * self.nx + i", "title": "" } ]
b7fea5fe355cde8ec14ccdd6653e0c46
Get item shortcut through both dicts.
[ { "docid": "06d3cd8a0386c261b7d67402f6c54233", "score": "0.0", "text": "def __getitem__(self, command: TelnetCommand) -> ExpectedResponse:\n return self._queue[command.group][command]", "title": "" } ]
[ { "docid": "472f524275ca6c4f4f4d33910ff44203", "score": "0.597796", "text": "def _get_item(dic: dict, keys: list) -> dict:\n\tfor key in keys:\n\t\tdic = dic[key]\n\n\treturn dic", "title": "" }, { "docid": "880a504ea925a5235047cca96ec8d9f3", "score": "0.5780727", "text": "def __getitem__(self, item):\n return self.get(sighash=item)", "title": "" }, { "docid": "d6befb0ff20fdee1321ce810ca024c3e", "score": "0.57484156", "text": "def get_item(dictionary, key):\n return dictionary.get(key)", "title": "" }, { "docid": "d6befb0ff20fdee1321ce810ca024c3e", "score": "0.57484156", "text": "def get_item(dictionary, key):\n return dictionary.get(key)", "title": "" }, { "docid": "d6befb0ff20fdee1321ce810ca024c3e", "score": "0.57484156", "text": "def get_item(dictionary, key):\n return dictionary.get(key)", "title": "" }, { "docid": "d6befb0ff20fdee1321ce810ca024c3e", "score": "0.57484156", "text": "def get_item(dictionary, key):\n return dictionary.get(key)", "title": "" }, { "docid": "0fa85ed5a37e7fbddcf02c62a2fe1e75", "score": "0.5735857", "text": "def get(self, *args, **kwargs):\n priority, item = super().get(*args, **kwargs)\n return priority, item", "title": "" }, { "docid": "6f54233c85aa12a7d56b7b155563b341", "score": "0.5718025", "text": "def __getitem__(self, item):\n return self.__dict__[item]", "title": "" }, { "docid": "52ad7036e28411e14e73a5204b7f6d58", "score": "0.5684234", "text": "def obj1_lookup(self, obj2):\n return self.obj2_obj1[obj2]", "title": "" }, { "docid": "31085f6f0a5888e3998bcaa5939ee726", "score": "0.565758", "text": "def lookup(self, key):", "title": "" }, { "docid": "80d429b42455fdb3fe2ac963851ca29d", "score": "0.5611967", "text": "def obj2_lookup(self, obj1):\n return self.obj1_obj2[obj1]", "title": "" }, { "docid": "7aab2fee825b9d35f272313ed5ffe77b", "score": "0.55477744", "text": "def __getitem__(self, item):\n if type(item) == str:\n return self.__dict__[item]\n else:\n return self.__dict__", "title": "" }, { "docid": "95b96f1c335b5a8a015f95e48d83c754", "score": "0.55428225", "text": "def itemFromProxy(obj):\n return object.__getattribute__(obj, '_sharedItem')", "title": "" }, { "docid": "f5fcd43e7e2134d7cd8101333131f995", "score": "0.54106086", "text": "def __getitem__(self, key):\n for k,v in list(self.__dict__.items()):\n if k == key:\n return v\n try:\n return v[key]\n except:\n pass\n\n print((\"Item %s could not be found...\" %key))", "title": "" }, { "docid": "016b405359d18dddbd1171f00f69f3fb", "score": "0.53960925", "text": "def __getitem__(self, item):\n result = self.get(item)\n if not result:\n raise KeyError(item)\n else:\n return result", "title": "" }, { "docid": "30da54a4af3b1912fff1e454556a17a1", "score": "0.5376248", "text": "def __getitem__(self, key):\n return self()[key]", "title": "" }, { "docid": "0687580a746b6847ea93e77d48cf0fdb", "score": "0.537204", "text": "def by_key(item):\n return Line['key', item]", "title": "" }, { "docid": "8f6a89699a49d482690ef22a5ebb494a", "score": "0.53654104", "text": "def __getitem__(self, tup:('a', 'action:from|to', 'b')) -> dict:\n a, b = self._read_tup(tup)\n \n if (a,b) in self.dicts:\n return self.dicts[a,b]\n if (b,a) in self.dicts:\n self.dicts[a,b] = self.reverse_dict(self.dicts[b,a])\n return self.dicts[a,b]\n raise KeyError(' '.join(tup))", "title": "" }, { "docid": "e6fe4bbd09d9dd7d42730eb67d80c546", "score": "0.5348452", "text": "def find(items, term, key=None):\n if key is None:\n key = lambda other: term == other\n \n for item in items:\n if key(item):\n return item", "title": "" }, { "docid": "eccfce713d8d75a3e2955a8057231b9e", "score": "0.5340252", "text": "def _find_match(needle: dict, haystack: list, keys: list):\n for item in haystack:\n for key in keys:\n if item.get(key) != needle[key]:\n break\n else:\n return item\n return None", "title": "" }, { "docid": "5f7f82881e514387c63548f278fab8f4", "score": "0.5338744", "text": "def _item_lazy_access(self, key):\n d = self._items.get(key)\n if not d:\n d = {}\n self._items[key] = d\n return d", "title": "" }, { "docid": "8fbbf4311b7e3b4cf8a4331c02268d08", "score": "0.5320492", "text": "def get_key(self, item):\r\n return item[0]", "title": "" }, { "docid": "1c44cd9d59a3880cddfc3da667c0eeea", "score": "0.5300942", "text": "def __getitem__(self, item):\n return getattr(self, item)", "title": "" }, { "docid": "9963e27f5c714a49070ee83979fdfba1", "score": "0.52864194", "text": "def _single_getitem(self, key):\n try:\n return self._dict[key]\n except KeyError:\n return self.default", "title": "" }, { "docid": "281ca332e8d02e8ed5f97555c8ba209c", "score": "0.52860224", "text": "def __getitem__(self, key):\n return type(self)(self.origin, typeof(key))", "title": "" }, { "docid": "e76dd62af3fb804886f998b0adb3d5a5", "score": "0.5276896", "text": "def __getattr__(self, item):\n if item.startswith(\"_\"):\n return super(Slicer, self).__getattr__(item)\n\n if item == \"o\":\n return reduced_o(self._anon)\n else:\n tracked = self._objects.get(item, None)\n if tracked is None:\n tracked = self._aliases.get(item, None)\n\n if tracked is None:\n raise AttributeError(f\"Attribute '{item}' does not exist.\")\n\n return tracked.o", "title": "" }, { "docid": "f0b5b3da110a40735470068893a643ec", "score": "0.5268786", "text": "def _map___getitem__(self, key):\n if not isinstance(key, self.keytype):\n raise KeyError('type of key should be ' + repr(self.keytype) + ' but got ' + repr(type(key)))\n if key not in self:\n raise KeyError('key not found')\n return self.second(self.find(key))", "title": "" }, { "docid": "0985bf6ef89bf65a4b0a54b7dbc9b94e", "score": "0.52579814", "text": "def __getitem__(self, item):\n try:\n return self.get_field(item)\n except KeyError:\n if self._src_crds is not None and item in self._src_crds:\n return self._src_crds[item]\n else:\n raise KeyError(item)", "title": "" }, { "docid": "056a69bc3878a3b9c467436b0743d479", "score": "0.5257437", "text": "def secondary_keys_dicts(self):", "title": "" }, { "docid": "62ab836d9bd5ad667a3cec18b413b4f6", "score": "0.5250056", "text": "def get(self, item, default=None):\n return self.as_dict().get(item, default)", "title": "" }, { "docid": "3323949a6957dab4943d257f7db33e35", "score": "0.5247545", "text": "def __getitem__(self, key):\n\n return self.additional[key]", "title": "" }, { "docid": "3323949a6957dab4943d257f7db33e35", "score": "0.5247545", "text": "def __getitem__(self, key):\n\n return self.additional[key]", "title": "" }, { "docid": "273aadbc6e381ae569a35edb58a9b93b", "score": "0.5237851", "text": "def __getitem__(self, key):\n return dict.__getitem__(self, key)", "title": "" }, { "docid": "320c4bec98f472e763ab40387bba2278", "score": "0.5236101", "text": "def get(self, key, alternative=None):\n try:\n return self[key]\n except KeyError:\n return alternative", "title": "" }, { "docid": "7f1abcba4ba31ce077f4cb99fa3bc865", "score": "0.52312005", "text": "def __getitem__(self, item):\n return foreign_get(self.vars, item)", "title": "" }, { "docid": "02c89bb4655340a317cdd0b7db42db10", "score": "0.5221905", "text": "def get_from_first(key: Any, *getters: Sequence, default: Any = None) -> Any:\n for item in getters:\n if item and (\n isinstance(item, dict) and key in item\n or isinstance(key, numbers.Integral) and hasattr(item, \"__len__\") and 0 <= int(key) < len(item)\n ):\n return item[key]\n return default", "title": "" }, { "docid": "40c4b057fa88f5ba1908d1bf01aac2ad", "score": "0.5215199", "text": "def __getitem__(self, key):\n return self._mappings[key]", "title": "" }, { "docid": "a19bad0305704b0b1b4544ccc6f61cff", "score": "0.52042335", "text": "def getSpecific(self, keyword, key):", "title": "" }, { "docid": "fa675d2d456a45df7ee3192e3cc4b791", "score": "0.5195001", "text": "def __getitem__(self, item):\n return self._object_names[item]", "title": "" }, { "docid": "1881ee7aef07b86da9497102604f4121", "score": "0.5192655", "text": "def __getattr__(self, item):\n return self.__dict__[item] if item in self.__dict__ else self.data.get(item)", "title": "" }, { "docid": "96aed9f70f4df36e256a82fcfc9a6097", "score": "0.51832706", "text": "def __getitem__(self, alt_loc):\n assert isinstance(alt_loc, str)\n\n if self.altloc is None:\n if self.alt_loc == alt_loc:\n return self\n raise KeyError, alt_loc\n\n else:\n return self.altloc[alt_loc]", "title": "" }, { "docid": "bc3c41179416fe97d9cc5c916bf5b070", "score": "0.51807827", "text": "def get_db_item(self, key, item_key):\n return self.get_db_items(key).get(item_key)", "title": "" }, { "docid": "8db75a2d2140ccc2ab84a4c66b7d01b5", "score": "0.5170798", "text": "def __getitem__(self, key):\n return self.__getattr__(key)", "title": "" }, { "docid": "ed6453476a6cd1eb2057f971358c3716", "score": "0.5168816", "text": "def __getitem__(self, key):\n return self.__getattr__(key)", "title": "" }, { "docid": "335b646837d568239b4738dae7060187", "score": "0.51440024", "text": "def get_other_item(item: str) -> np.ndarray:\n i = choice(range(len(items)))\n while items[i] == item:\n i = choice(range(len(items)))\n return embeddings[i]", "title": "" }, { "docid": "530cbe0aafda9868d2f8e3c4898b001a", "score": "0.51363635", "text": "def get_item_fx(self, key):\n if hasattr(self, key):\n return getattr(self, key)\n else:\n raise KeyError", "title": "" }, { "docid": "6685c4698f4702c97f5e2a57006f54f7", "score": "0.51334906", "text": "def _lookup_wrapper(d):\n def _inner(key):\n return d[key]\n return _inner", "title": "" }, { "docid": "7a292908d147559aa4d84bcf8f0be523", "score": "0.5132768", "text": "def __getitem__(self, keep):\n return self.get([self], keep)[0]", "title": "" }, { "docid": "5bd768dce827f38cc96ec0631c17a735", "score": "0.51282305", "text": "def __getitem__(self, item):\n u, v = item\n return self.__getitem(u, v)", "title": "" }, { "docid": "b4a612db9a4590697aa6722c20d047d9", "score": "0.51204985", "text": "def __getitem__(self, key: Union[Tuple[str, T], str]) -> Union[str, T]:\n if isinstance(key, tuple):\n return self.get(key[0], default=key[1])\n else:\n return self.get(key)", "title": "" }, { "docid": "cce98f91413e5ea41da415179798c68d", "score": "0.5107033", "text": "def __getitem__(self, keys):\n return self.get(keys, self.default)", "title": "" }, { "docid": "ab2cf7ebdebeebf7c35e4f676cc6d2b7", "score": "0.50995785", "text": "def _map_popitem(self):\n if len(self) == 0:\n raise KeyError('key not found')\n key = self.keys()[0]\n return (key, self.pop(key))", "title": "" }, { "docid": "65d2973fe1e188caea1f1963d2d1a9e4", "score": "0.5097012", "text": "def __getitem__(self, item):\n return self.top[item]", "title": "" }, { "docid": "fc7009fe1553491a099e5fbfb9fdceb9", "score": "0.509351", "text": "def getitem(\n self, obj: t.Any, argument: t.Union[str, t.Any]\n ) -> t.Union[t.Any, Undefined]:\n try:\n return obj[argument]\n except (AttributeError, TypeError, LookupError):\n if isinstance(argument, str):\n try:\n attr = str(argument)\n except Exception:\n pass\n else:\n try:\n return getattr(obj, attr)\n except AttributeError:\n pass\n return self.undefined(obj=obj, name=argument)", "title": "" }, { "docid": "48f2ab74033e7cac6fb100f30f0ad2a6", "score": "0.5080237", "text": "def get_item_in_dict(d_or_l, key_list, prev=None):\n if prev is None:\n res = []\n else:\n res = prev.copy()\n\n if type(d_or_l) is dict:\n d_list = [d_or_l]\n if type(d_or_l) is list:\n d_list = d_or_l\n\n for d in d_list:\n key_list_len = len(key_list)\n\n if key_list_len >= 1:\n key = key_list[0]\n if key in d:\n if key_list_len == 1:\n res.append(d[key])\n else:\n res = Operation.get_item_in_dict(d[key], key_list[1:], res)\n\n return res", "title": "" }, { "docid": "ebc9e2a97b1bc12d98675a3190c01954", "score": "0.50658095", "text": "def get(self, key, alternative=None):\n try:\n return self[key]\n except (KeyError, TypeError, ValueError):\n return alternative", "title": "" }, { "docid": "6a7f3799bd4dab2b081fbae026fbbe7e", "score": "0.5063169", "text": "def _lookup(self, key):\n\n if key in self.position and key in self.info:\n # If the key exists in both position and info, treat it as a list to intersect.\n return self._skill_list(key)\n if key in self.position:\n return self.position[key]\n if key in self.info:\n return self.info[key]\n\n raise KeyError(f\"Invalid Key: {key}\")", "title": "" }, { "docid": "3a275d335c1b14cf955eb9f4b24e7e04", "score": "0.50616646", "text": "def __getitem__(self, key: T) -> T:\n return self.lookup(key)", "title": "" }, { "docid": "3aab68b6c2f38e51b6ad0c8cf8bc692d", "score": "0.5050881", "text": "def __getitem__(self, item):\n return self._metadata[item]", "title": "" }, { "docid": "1ccff0509f52f248616040c76f5a6f4c", "score": "0.5039343", "text": "def __getitem__(self, key):\n return self.query(key)", "title": "" }, { "docid": "d9ec7ea8dbdad3ae6fb7c617ff545536", "score": "0.5035987", "text": "def __getitem__(self, item):\n return self.data[item]", "title": "" }, { "docid": "d9ec7ea8dbdad3ae6fb7c617ff545536", "score": "0.5035987", "text": "def __getitem__(self, item):\n return self.data[item]", "title": "" }, { "docid": "d9ec7ea8dbdad3ae6fb7c617ff545536", "score": "0.5035987", "text": "def __getitem__(self, item):\n return self.data[item]", "title": "" }, { "docid": "d201fb22c0b56f01c536bffefcea5f75", "score": "0.5017554", "text": "def get_instance(self, instance):\n\n title = list(instance.keys())[0]\n instance = instance.get(title)\n return instance", "title": "" }, { "docid": "87db9824a2cb1cc3bce583a608820b2c", "score": "0.5016983", "text": "def getitem(obj, attr, default=None):\n from functools import partial\n _getattr = partial(getattr, obj)\n getter = getattr(obj, '__getitem__', None) or _getattr\n try:\n return getter(attr)\n except (KeyError, AttributeError):\n return default", "title": "" }, { "docid": "2a0e5361fb86131d7547bbd26c7db102", "score": "0.5015981", "text": "def __getitem__(self, item):\n return self._data[item]", "title": "" }, { "docid": "9126cdfa637936d92a7cf525f98179df", "score": "0.5004039", "text": "def __getitem__(self, key):\n return self", "title": "" }, { "docid": "08ec7950dead05ad10ea6192f6690324", "score": "0.5003514", "text": "def get_item(obj, key):\n val = None\n if obj and type(obj) == dict:\n val = obj.get(key)\n elif obj and hasattr(obj, key):\n val = getattr(obj, key)\n val = val or ''\n return val", "title": "" }, { "docid": "16dd39a7fb4e96c6298b53ed2600cbd3", "score": "0.50019485", "text": "def __getitem__(self, key):\n return self.get(key)", "title": "" }, { "docid": "16dd39a7fb4e96c6298b53ed2600cbd3", "score": "0.50019485", "text": "def __getitem__(self, key):\n return self.get(key)", "title": "" }, { "docid": "16dd39a7fb4e96c6298b53ed2600cbd3", "score": "0.50019485", "text": "def __getitem__(self, key):\n return self.get(key)", "title": "" }, { "docid": "96e7ef2b6293314816abe8985b1f4c87", "score": "0.49922767", "text": "def __getitem__(self, item):\n\n return {'text': self.texts[item],\n 'label': self.labels[item]}", "title": "" }, { "docid": "59b5fc5aade4d4f129006034d906356a", "score": "0.49885535", "text": "def _single_getitem(self, key):\n return getattr(self._cpp_obj, self._getter)(key)", "title": "" }, { "docid": "0d0a0a3348c192171e82bf10177ef2d9", "score": "0.49878815", "text": "def __getitem__(self, itm):\n return self.wrappers[itm]", "title": "" }, { "docid": "d867aad4176af9618e95333f0aae01f5", "score": "0.49874637", "text": "def __getitem__(self, item):\n instrumented_attrs = {k: getattr(self, k) for k in self}\n return instrumented_attrs[item]", "title": "" }, { "docid": "86bbd82f9a4d5f01815bdcb4cbfc11fa", "score": "0.49821863", "text": "def get(aMap,key,default=None):\n\ti,k,v=get_slot(aMap,key,default=default)", "title": "" }, { "docid": "af25dc92f961f875c8785b8e54ec86b9", "score": "0.4982008", "text": "def __getitem__(self, key):\n return self.get_function()[key]", "title": "" }, { "docid": "4a1420ea01e96a55a751b28df625935a", "score": "0.49786395", "text": "def __getattr__(self, key):\n return self._items[key]", "title": "" }, { "docid": "bd257d65a1838f05c10c6cd24659232d", "score": "0.49768052", "text": "def items(self, *args, **kwargs):\n return [ (key, self._get(key, *args, **kwargs),) for key in self.keys(*args, **kwargs) ]", "title": "" }, { "docid": "b9f19b1b843efac474a829a8d2a37f38", "score": "0.4974795", "text": "def __getitem__(self,key):\n # Using [key] syntax on an equipment allows to retrieve a tag directly\n # or a point referred to this particular equipment\n for each in self.tags:\n if key == each:\n return self.tags[key]\n # if key not found in tags... we probably are searching a point\n # self will call __iter__ which will look for points in equipment\n for point in self:\n #partial_results = []\n # Given an ID.... should return the point with this ID\n if key.replace('@','') == str(point.id).replace('@',''):\n return point\n # Given a dis or navName... should return equip\n if 'dis' in each.tags:\n if key == each.tags['dis']:\n return each\n if 'navName' in each.tags:\n if key == each.tags['navName']:\n return each\n if 'navNameFormat' in each.tags:\n if key == each.tags['navNameFormat']:\n return each\n else: \n try:\n # Maybe key is a filter_expr\n request = self.find_entity(key)\n return request.result\n except HaystackError as e:\n self._session._log.warning('{} not found'.format(key))", "title": "" }, { "docid": "6472306570b8a7bc71cc4762c15a053e", "score": "0.49663872", "text": "def Map(a, b):\n out = {}\n for key, value in a.items():\n if key in b:\n out[value] = b[key]\n return out", "title": "" }, { "docid": "0af6983d0d32508c6fe5436ef455b111", "score": "0.4959104", "text": "def __getitem__(self, key):", "title": "" }, { "docid": "fea7808a8b63ac2545e6e0b8b5688f6b", "score": "0.4947378", "text": "def item_duplicate():\n return {'name':'chair',\n 'value':300}", "title": "" }, { "docid": "ddc9548c162732f02d5fb2ee9c613258", "score": "0.4946922", "text": "def __getitem__(self, item):\r\n return self.select(item)", "title": "" }, { "docid": "553ee6ccacc1efcf8a630a6529b8c366", "score": "0.49465272", "text": "def __getitem__(self, index):\n return index, super().__getitem__(index)", "title": "" }, { "docid": "3b9aac78799f7acc13e09a35f445a080", "score": "0.49435228", "text": "def _get_item(self, item_name, item_type):\n\t\t# create local cache for performance optimizations. TODO: Rewrite functions that call this function\n\t\tif not self.item_list:\n\t\t\tself.item_list = self.pre_object_list\n\t\t\tself.item_cache = {}\n\t\t\tfor item in self.item_list:\n\t\t\t\tif not item.has_key('name'):\n\t\t\t\t\tcontinue\n\t\t\t\tname = item['name']\n\t\t\t\ttmp_item_type = (item['meta']['object_type'])\n\t\t\t\tif not self.item_cache.has_key( tmp_item_type ):\n\t\t\t\t\tself.item_cache[tmp_item_type] = {}\n\t\t\t\tself.item_cache[tmp_item_type][name] = item\n\t\ttry:\n\t\t\treturn self.item_cache[item_type][item_name]\n\t\texcept:\n\t\t\treturn None\n\t\tif self.item_cache[item_type].has_key(item_name):\n\t\t\treturn self.item_cache[item_type][item_name]\n\t\treturn None\n\t\tfor test_item in self.item_list: \n\t\t\t## Skip items without a name\n\t\t\tif not test_item.has_key('name'):\n\t\t\t\tcontinue\n\n\t\t\t## Make sure there isn't an infinite loop going on\n\t\t\ttry:\n\t\t\t\tif (test_item['name'] == item_name) and (test_item['meta']['object_type'] == item_type):\n\t\t\t\t\treturn test_item\n\t\t\texcept:\n\t\t\t\traise ParserError(\"Loop detected, exiting\", item=test_item)\n\t\t\t\n\t\t## If we make it this far, it means there is no matching item\n\t\treturn None", "title": "" }, { "docid": "20b8b9a7c5c21926ed074e9814247881", "score": "0.49394187", "text": "def conditional_copy(self, other, key, altkey=None):\n if hasattr(self, key):\n possible = getattr(self, key)\n if possible:\n usekey = {True: altkey, False: key}[altkey is not None]\n if hasattr(other, usekey):\n exists = getattr(other, usekey)\n if exists:\n return\n if isinstance(possible, list):\n setattr(other, usekey, [deepcopy(i) for i in possible])\n else:\n setattr(other, usekey, deepcopy(possible))", "title": "" }, { "docid": "f30d879caef4209417cbdc0787fd4c24", "score": "0.49352953", "text": "def __getitem__(self, key):\n return getattr(self, key)", "title": "" }, { "docid": "f30d879caef4209417cbdc0787fd4c24", "score": "0.49352953", "text": "def __getitem__(self, key):\n return getattr(self, key)", "title": "" }, { "docid": "d025c8c19ece5c9037b5cf90fee45822", "score": "0.4933713", "text": "def __get__(self, instance, owner):\n return self.key, self.value", "title": "" }, { "docid": "8c6044efedb00f6c8918c9cfe7d77e3c", "score": "0.49303666", "text": "def __getitem__(self, item):\n return self.hdus[item]", "title": "" }, { "docid": "c8d7fc8651b1becfb0775f8538989594", "score": "0.49301708", "text": "def __getitem__(self, item):\n return getattr(self, item)", "title": "" }, { "docid": "55c36e79fbc2bba08efa23affde0dbb3", "score": "0.4929861", "text": "def find(found_item, _):\n if found_item:\n return found_item[1]\n else:\n return default", "title": "" }, { "docid": "2be6befe6c710ebbfa14aa26cb59b11a", "score": "0.4923695", "text": "def __getitem__(self, name):\n return self._items[name.lower()][1]", "title": "" }, { "docid": "ebe0c4f040ae0da2e7057d8078616fef", "score": "0.4918688", "text": "def map(self, attr1, attr2):\n return dict(zip(getattr(self, attr1), getattr(self, attr2)))", "title": "" }, { "docid": "ebe0c4f040ae0da2e7057d8078616fef", "score": "0.4918688", "text": "def map(self, attr1, attr2):\n return dict(zip(getattr(self, attr1), getattr(self, attr2)))", "title": "" }, { "docid": "d7867667e3389e2893cb5c1dd8f6ae40", "score": "0.4916353", "text": "def __getitem__(self, item):\n\n for _var in self.inputs + self.outputs:\n if _var.name == item:\n return _var\n\n raise KeyError('No input: {}'.format(item))", "title": "" }, { "docid": "e688fee66661bb254482c59b372925b9", "score": "0.4915927", "text": "def get(self, item, default=NO_DEFAULT):\n try:\n return util.annotate(self.dict_[item])\n except KeyError:\n if default is NO_DEFAULT:\n raise\n return default", "title": "" }, { "docid": "6fbfbd238dacaf78f4ab0b060e4726d9", "score": "0.4909987", "text": "def rget(dict_object, path_list):\n try:\n return reduce(lambda d, k: d[k], path_list, dict_object)\n except KeyError:\n return dict_object", "title": "" }, { "docid": "1a6638cd0997c59c6e37a746a9480d45", "score": "0.49093032", "text": "def __getitem__(self, key):\n return self.get_models()[str(key)]", "title": "" }, { "docid": "6408f3ebde0aac2bfc0e250cddc91c08", "score": "0.4907842", "text": "def __getitem__(self, item):\n return self.default_dataset[item]", "title": "" } ]
3aae4b28f301e8cdc8fba6f89a073908
Calculate the sum of the number of truths tracked with ambiguous ID over all timestamps eg. A truth that has one track with correct ID, and one with unknown ID assigned to it would be counted.
[ { "docid": "2e51093d3e27675f9349a80364b93a60", "score": "0.0", "text": "def _ja_sum(self, manager, timestamps):\n return sum(self._ja_t(manager, timestamp) for timestamp in timestamps)", "title": "" } ]
[ { "docid": "c3442804961ac918c979fed29342d916", "score": "0.57795316", "text": "def _nu_j(self, manager, truth):\n\n # Starting at the beginning of the truth find the track associated at that timestamp with\n # the longest length, increase the track count by one and move time to the end of that\n # track. Repeat until the end of the truth is reached. If no tracks present at a point then\n # move on to the next timestamp index in the truth.\n\n assocs = sorted(manager.association_set.associations_including_objects([truth]),\n key=attrgetter('time_range.end_timestamp'),\n reverse=True)\n\n if len(assocs) == 0:\n return 0\n\n truth_timestamps = sorted(i.timestamp for i in truth.states)\n n_truth_needed = 0\n i_timestamp = 0\n\n while i_timestamp < len(truth_timestamps):\n current_time = truth_timestamps[i_timestamp]\n assoc_at_time = next((assoc for assoc in assocs if current_time in assoc.time_range),\n None)\n if not assoc_at_time:\n i_timestamp += 1\n else:\n end_time = assoc_at_time.time_range.end_timestamp\n n_truth_needed += 1\n\n # If not yet at the end of the truth timestamps indices, move on to the next\n try:\n # Move to next timestamp index after current association's end timestamp\n i_timestamp = truth_timestamps.index(end_time, i_timestamp + 1) + 1\n except ValueError:\n break\n return n_truth_needed", "title": "" }, { "docid": "18ecf8193a181b7b096833d2a39ab897", "score": "0.5761744", "text": "def _jt_t(self, manager, timestamp):\n\n assocs = manager.association_set.associations_at_timestamp(timestamp)\n n_associated_truths = 0\n for truth in manager.groundtruth_paths:\n for assoc in assocs:\n if truth in assoc.objects:\n n_associated_truths += 1\n break\n return n_associated_truths", "title": "" }, { "docid": "6b917679a9af18ed3fb53570628aa523", "score": "0.56708175", "text": "def count(self):\n _count = 0\n for tick in self.tick_store:\n if self.happy(tick):\n _count += 1\n return _count", "title": "" }, { "docid": "b9131c6f0fb3a13d3dff9b82d2b28b25", "score": "0.5641615", "text": "def _n_t(self, manager, timestamp):\n\n return sum(\n 1\n for track in manager.tracks\n if timestamp in (state.timestamp for state in track.states))", "title": "" }, { "docid": "74c6f4792a84024f7ec273f6814fd61a", "score": "0.55175763", "text": "def utility(self, playerid):\n total = 0\n thisplayer = self.players[playerid]\n for k, other in enumerate(self.players):\n if k != playerid:\n total += self.qualities[\"a\"] * (.5 + thisplayer.consumption[\"a\"]) * (.5 + other.consumption[\n \"a\"]) + self.qualities[\"b\"] * (.5 + thisplayer.consumption[\"b\"]) * (.5 + other.consumption[\"b\"])\n else:\n total += self.isol(thisplayer.consumption[\"a\"])\n return total", "title": "" }, { "docid": "df51e0a958f5ba893fcdc01caa568937", "score": "0.5356508", "text": "def inconclusive_count(self):\n inconc_count = len([i for i, result in enumerate(self.data) if result.inconclusive])\n unknown_count = len([i for i, result in enumerate(self.data) if result.get_verdict() ==\n \"unknown\"])\n return inconc_count + unknown_count", "title": "" }, { "docid": "57bc4a2b74337581316e370b88c60e02", "score": "0.534674", "text": "def _na_t(self, manager, timestamp):\n\n assocs = manager.association_set.associations_at_timestamp(timestamp)\n n_associated_tracks = 0\n for track in manager.tracks:\n for assoc in assocs:\n if track in assoc.objects:\n n_associated_tracks += 1\n break\n return n_associated_tracks", "title": "" }, { "docid": "ed6338b17f6969df554e30b4dedbc526", "score": "0.5326365", "text": "def iHH_counts(self, d):\n a = (self.square_gps_dist <= d) & (self.square_gps_dist > 0)\n b = a.sum(axis=0)\n return b", "title": "" }, { "docid": "e67117db2bd38915a5c67263ddd393a1", "score": "0.53229177", "text": "def compute(self):\n\n count = len(self.df['hash'][self.df['merged']].unique())\n return count", "title": "" }, { "docid": "1da7508e22a1d6ff229ee60090220978", "score": "0.5319695", "text": "def candidate_count(self, instructions):\n result = 0\n for inst in instructions:\n if not inst.ignore:\n result += 1\n return result", "title": "" }, { "docid": "9bd9ca57e8ec12cb4bdc3704dd863000", "score": "0.5296362", "text": "def _computeNumIsotxsRecords(self, nuclide):\n numRecords = 2\n metadata = self._getNuclideIO()(nuclide, self, self._lib)._getNuclideMetadata()\n if metadata[\"chiFlag\"] > 1:\n numRecords += 1\n numRecords += sum(1 for _ord in metadata[\"ords\"] if _ord > 0)\n return numRecords", "title": "" }, { "docid": "6a9e1669f83a8ddcf62edd99b8be507d", "score": "0.5267144", "text": "def checksum(input):\r\n checksum_twos = 0\r\n checksum_threes = 0\r\n\r\n for id in input:\r\n c = [v for k,v in Counter(id).items()]\r\n if 2 in c:\r\n checksum_twos += 1\r\n if 3 in c:\r\n checksum_threes += 1\r\n \r\n return checksum_threes * checksum_twos", "title": "" }, { "docid": "a0885832d2a8fef9eeab1b83ffbc6025", "score": "0.5228556", "text": "def count_matches(self, array1, array2):\n # In time samples\n self.admissible_proximity = 60\n m, n = len(array1), len(array2)\n i, j = 0, 0\n count = 0\n while i < m and j < n:\n if abs(array1[i] - array2[j]) < self.admissible_proximity:\n i += 1\n j += 1\n count += 1\n elif array1[i] < array2[j]:\n i += 1\n else:\n j += 1\n return count", "title": "" }, { "docid": "12736a0f6c1a37f940b6d040af70d1f8", "score": "0.5203559", "text": "def total_missed_events(self):\n result = 0\n for val in self.missed.values():\n result += val\n return result", "title": "" }, { "docid": "fdc4fbeeed8e006de28ac7e348ad08d4", "score": "0.51925755", "text": "def count_number_unique_notes(hand: str):\n train_dir = 'train' if hand == 'right' else 'train_left_v1'\n val_dir = 'val' if hand == 'right' else 'val_left_v1'\n test_dir = 'test' if hand == 'right' else 'test_left_v1'\n\n val_notes = handle_preprocess(hand, val_dir)\n train_notes = handle_preprocess(hand, train_dir)\n test_notes = handle_preprocess(hand, test_dir)\n \n notes = set()\n for note_list in (val_notes, train_notes, test_notes):\n for nl in note_list:\n for note in nl:\n notes.add(note)\n\n n_notes = len(notes)\n min_note = min(notes - {0})\n max_note = max(notes)\n\n print(\"PRINTING SET OF NOTES FOR %s HAND\" % (hand.upper()))\n print(notes, min_note, max_note)\n return n_notes", "title": "" }, { "docid": "29112ea7ac88471be645674e414b0c30", "score": "0.5176762", "text": "def missed_events(self):\n count = 0\n for row in range(self.__rowDimension):\n for col in range(self.__colDimension):\n max_id = board_info.get_max_id(col, row)\n count += max_id\n if (col, row) not in self.times_hasEvent.keys():\n self.missed[(col, row)] += max_id\n else:\n events = self.times_hasEvent[(col, row)].keys()\n for i in range(max_id):\n if i + 1 not in events:\n self.missed[(col, row)] += 1\n return count", "title": "" }, { "docid": "d986a0157f0c0e524be2b6e1114b14aa", "score": "0.516423", "text": "def count_true(board):\n result = 0\n for row in board:\n for value in row:\n result += value\n return result", "title": "" }, { "docid": "fc68b1e1e78bfe3fd70c3352d1b5dc85", "score": "0.5151503", "text": "def part1():\n duplicate_count = 0\n triplet_count = 0\n for s in read_input(2):\n if has_n_occurance(s, 2):\n duplicate_count += 1\n if has_n_occurance(s, 3):\n triplet_count += 1\n \n return duplicate_count * triplet_count", "title": "" }, { "docid": "65cb5e90089bf4cd8c2662c0df7dfae6", "score": "0.5147053", "text": "def count_num_of_amp_ratio(self,event):\n i = 0\n evid = event[0][0].stats.file.split('.')[0]\n for station in event:\n sta_name = station[0].stats.station\n if sta_name in self.tomoDD_file_dict[evid]:\n phase_amp = phase.get_station_phase_and_amp(station)\n a = phase_amp[1]\n if a:\n i = i + 1\n return i", "title": "" }, { "docid": "4c062abad10f37f9456243cb11ea72e9", "score": "0.51378685", "text": "def observed_otus(counts):\n counts = _validate(counts)\n return (counts != 0).sum()", "title": "" }, { "docid": "a3dcdc1d93ae1d011fc364e9fe13cc88", "score": "0.51093185", "text": "def check_count(self):\n card_points = [self.value_map['numbers'][val[:-1]]\n for val in self.r_hist if val != 'GO']\n total = sum(card_points)\n return total", "title": "" }, { "docid": "804f87b5e6846eb624df230f98928870", "score": "0.51008046", "text": "def compute_n_hit(self, reco_items_id, real_items_id):\n n_TP = 0\n for item_id in real_items_id:\n if item_id in reco_items_id: # O(1) complexity for set\n n_TP += 1\n return n_TP", "title": "" }, { "docid": "6909d9c5892f99a7a7944a14c12821ae", "score": "0.5073896", "text": "def get_empirical_summary(ncodons, alignments, t1, t2):\n subs_counts = numpy.zeros((ncodons, ncodons), dtype=int)\n for alignment in alignments:\n d = dict(alignment)\n for i, j in zip(d[t1], d[t2]):\n subs_counts[i, j] += 1\n return subs_counts", "title": "" }, { "docid": "59240f1312253de33bc5ca72ac8dadb3", "score": "0.5072683", "text": "def calculatehandlen(hand):\n count = 0\n for ke_y in hand:\n if hand[ke_y] > 0:\n count += hand[ke_y]\n return count", "title": "" }, { "docid": "790310adf6efa93f1018540413a9f516", "score": "0.5063865", "text": "def test_count_amino_acids(self):\n for analysis in self.analyses:\n count_dict = analysis.count_amino_acids()\n for i in count_dict:\n self.assertEqual(count_dict[i], self.text.count(i))", "title": "" }, { "docid": "261479e005acbfbf9bc3f0d0d60a43c8", "score": "0.5053106", "text": "def solver_2star(d):\n total = 0\n for group in d:\n seen = {}\n for person in group:\n for answer in person:\n if answer not in seen:\n seen[answer] = 0\n seen[answer] += 1\n\n for answer, count in seen.items():\n if count == len(group):\n total += 1\n\n return total", "title": "" }, { "docid": "ca61348fcf40b57df35a53c713428934", "score": "0.5043481", "text": "def score(hand):\r\n res = 0\r\n for dummy_dice in hand:\r\n temp = hand.count(dummy_dice) * dummy_dice\r\n if temp > res:\r\n res = temp \r\n return res", "title": "" }, { "docid": "0c9a371dc57da5f4b6b371793cf94ec2", "score": "0.5040758", "text": "def sum_readings(self, readings):\n tot = 0\n for i in readings:\n if i > 0:\n tot += 1 # Reduce wall reading (2) to 1.\n return tot", "title": "" }, { "docid": "957ee26c8b9efc75533d25f10961b409", "score": "0.50331044", "text": "def _compute_idfs(self):\r\n \"\"\"compute the idf value for the words in the question alone\"\"\"\r\n self._idfs = dict.fromkeys(set(self._the_question), 0)\r\n for name, text in self._tokened_dict.items():\r\n for q_word in set(self._the_question):\r\n if self._is_in_data(text, q_word):\r\n self._idfs[q_word] += 1\r\n self._make_idfs_log()", "title": "" }, { "docid": "45f68778de7b3f58e0cee64a35361af8", "score": "0.50317407", "text": "def part1(file):\n return sum(len(answers) for answers in group_answers_from_file(file))", "title": "" }, { "docid": "c69951cdda952512f552b2fb398660dc", "score": "0.5027997", "text": "def get_cand_counts(self):\n return self.is_cand.sum(axis=1)", "title": "" }, { "docid": "a697cf38297be43915d9f3d2852f0bfd", "score": "0.5023754", "text": "def countHints(self):\n cntArray = [0 for x in range(9)]\n for cell in self.cells:\n for hint in range(9):\n if cell.hints[hint] != None:\n cntArray[hint] += 1\n return cntArray", "title": "" }, { "docid": "8d60327409567219809e2cf7ba96a78d", "score": "0.5023431", "text": "def num_found(self):\n return sum(x==True for x in self.found.values())", "title": "" }, { "docid": "b86574d6ff387d9e68972bdd05c4b8ef", "score": "0.5021213", "text": "def boolCounter(mask, width, height):\n result = 0\n for i in range(0, height):\n for j in range (0,width):\n if(mask[i][j].all() == True):\n result += 1\n return result", "title": "" }, { "docid": "8e57e7701525a20db4912e9df0bd2b34", "score": "0.5020541", "text": "def number_bites_resolved(self) -> int:\n self.unique_bites_resolved = len(set([row[\"bite\"] for row in self.rows if row[\"completed\"] == \"True\"]))\n return self.unique_bites_resolved", "title": "" }, { "docid": "837a689c71c28cedead818379e386a64", "score": "0.5015954", "text": "def no_condition_count(all_conditions):\n no_conditions = 0\n for key in all_conditions:\n if (~all_conditions[key]).all():\n no_conditions += 1\n return no_conditions", "title": "" }, { "docid": "da4378d9b615c50747c6f5dc2283334a", "score": "0.50100935", "text": "def count_correct(output, target):\n with torch.no_grad():\n k = 1\n batch_size = target.size(0)\n \n _, pred = output.topk(k, 1, True, True)\n pred = pred.t()\n correct = pred.eq(target.view(1, -1).expand_as(pred))\n\n correct_k = correct[:k].view(-1).float().sum(0, keepdim=True)\n \n return correct_k", "title": "" }, { "docid": "613ab90963bf43af95b852ae8e9869e9", "score": "0.50070107", "text": "def solver_1star(d):\n total = 0\n for group in d:\n seen = set()\n for person in group:\n for answer in person:\n if answer not in seen:\n seen.add(answer)\n total += len(seen)\n\n return total", "title": "" }, { "docid": "549ea47601b40e3dad447289601ec592", "score": "0.49956366", "text": "def test_counts_for_two_studies_and_two_tags(self):\n # There are two studies, two tags, and some decided and some not decided for each tag+study.\n study_versions_of_two_studies = SourceStudyVersionFactory.create_batch(2)\n tags = factories.TagFactory.create_batch(2)\n # Start with making three tagged traits need a decision (one without decision, and one of each decision type).\n to_decide = 3\n counts_to_match = []\n for sv in study_versions_of_two_studies:\n study_dict = {'study_name': sv.study.i_study_name, 'study_pk': sv.study.pk}\n tag_list = []\n for t in tags:\n # Make tagged traits for each tag + study.\n tagged_traits = factories.TaggedTraitFactory.create_batch(\n 5, tag=t, trait__source_dataset__source_study_version=sv)\n # Make some that need decisions.\n study_responses = factories.StudyResponseFactory.create_batch(\n to_decide, status=models.StudyResponse.STATUS_DISAGREE,\n dcc_review__tagged_trait__trait__source_dataset__source_study_version=sv,\n dcc_review__tagged_trait__tag=t)\n # Make one that has each decision type.\n factories.DCCDecisionFactory.create(\n dcc_review=study_responses[0].dcc_review, decision=models.DCCDecision.DECISION_REMOVE)\n factories.DCCDecisionFactory.create(\n dcc_review=study_responses[1].dcc_review, decision=models.DCCDecision.DECISION_CONFIRM)\n tag_dict = {'tt_total': to_decide, 'tag_name': t.title, 'study_pk': sv.study.pk, 'tag_pk': t.pk,\n 'study_name': sv.study.i_study_name, 'tt_decision_required_count': to_decide - 2}\n tag_list.append(tag_dict)\n to_decide += 1 # Increment this every time so the counts are distinguishable.\n counts_to_match.append((study_dict, tag_list))\n # counts_to_match = tuple(counts_to_match)\n response = self.client.get(self.get_url())\n context = response.context\n self.assertIn('grouped_study_tag_counts', context)\n counts = context['grouped_study_tag_counts']\n # Sometimes the order of the studies wasn't quite right, and sorting didn't work due to dicts.\n # So that's why this study-by-study matching was needed.\n for study in counts:\n study_to_match = [el for el in counts_to_match if el[0] == study[0]][0]\n self.assertEqual(study, study_to_match)", "title": "" }, { "docid": "36c491e46f536a5f653677bb899fc287", "score": "0.49920633", "text": "def _assoc_distances_sum_t(self, manager, timestamp, mapping, weighting, mapping2=None):\n measure = EuclideanWeighted(mapping=mapping, mapping2=mapping2, weighting=weighting)\n distance_sum = 0\n for assoc in manager.association_set.associations_at_timestamp(timestamp):\n track, truth = assoc.objects\n # Sets aren't ordered, so need to ensure correct path is truth/track\n if isinstance(truth, Track):\n track, truth = truth, track\n distance_sum += measure(track[timestamp], truth[timestamp])\n return distance_sum", "title": "" }, { "docid": "22e7e7c3aaf4464afa0930d19f0f1f5f", "score": "0.49893522", "text": "def cntPhases(df):\n dummy=eventParse(df,1)\n phaselist=dummy.iloc[:,1].value_counts().index.values\n totalphs=len(phaselist)\n\n return totalphs,phaselist", "title": "" }, { "docid": "b4df5e7a984a5ddc7728831465996b85", "score": "0.49861464", "text": "def count_conditions(self):\n total = 0\n if not len(self):\n return 0\n for category in self:\n total += len(self[category].df)\n return total", "title": "" }, { "docid": "66c251799bf3af79b15fede28b9298a3", "score": "0.49816525", "text": "def get_score(self, results, targets, inputs):\n score = 0\n for res, target, used_input in zip(results, targets, inputs):\n if str(res).strip() == target.strip():\n score += 1\n correct = True\n else:\n correct = False\n if self.verbose:\n print(\n f\"Problem: {used_input}, Answer: {res}, Expected Answer: {target}, Correct: {correct}\"\n )\n return score", "title": "" }, { "docid": "f8030b878166f9a3fbe115b32d82ad47", "score": "0.49617463", "text": "def count_same(pairs):\n same_count = 0\n for x, y in pairs:\n if x == y:\n same_count = same_count + 1\n return same_count", "title": "" }, { "docid": "ce215f0e834add582b9188885ed08cea", "score": "0.49546295", "text": "def speaker_accuracy(winners):\n return len(winners[winners[\"Pred\"] == winners[\"Truth\"]]) / len(winners)", "title": "" }, { "docid": "8c8863c442aa596a2dd1c94f9ab3af4c", "score": "0.495121", "text": "def count_occur_once(pair_dict, rule_dict, rule, verbose=False):\n result = count_occur(pair_dict, rule_dict, rule, prime_min=1, target_min=1, prime_max=1, target_max=1)\n # print verbose info\n if verbose:\n print(rule)\n print('# of qualified pairs: {}'.format(len(result)))\n return result", "title": "" }, { "docid": "81098a91b11c45bfe6561e2562cd7c62", "score": "0.4940235", "text": "def part2(file):\n return sum(len(answers) for answers in group_consensus_answers_from_file(file))", "title": "" }, { "docid": "837918a257bc932150df6794b06ccc4e", "score": "0.49379364", "text": "def _repetitions_check(self, data, labels):\r\n names = np.unique(labels)\r\n repetitions = 0\r\n for name in names:\r\n count = 0\r\n for lbl in labels:\r\n if name == lbl:\r\n count += 1\r\n if count != repetitions and repetitions != 0:\r\n return 1\r\n repetitions = count\r\n return repetitions", "title": "" }, { "docid": "9194cae61d6b537dc03d881bcdf1dbf2", "score": "0.49113148", "text": "def count_by_state_unsynced(self, arg):\n if self._exp_key is not None:\n exp_trials = [\n tt for tt in self._dynamic_trials if tt[\"exp_key\"] == self._exp_key\n ]\n else:\n exp_trials = self._dynamic_trials\n return self.count_by_state_synced(arg, trials=exp_trials)", "title": "" }, { "docid": "4ee295142c5ed9fd8c4ba8813dec39b9", "score": "0.48974463", "text": "def count_inconsistencies(self):\n result = 0\n for X in self.variables:\n for Y in self.neighbors[X]:\n if not self.constraints(X, self.get(X), Y, self.get(Y)):\n result += 1\n return result // 2", "title": "" }, { "docid": "dd4bf753b7577f90f396cae412bd46d9", "score": "0.4895321", "text": "def ActualCount(self, player):\n count = 0\n for i in range(len(self.board)):\n for j in range(len(self.board[i])):\n if player == 1 and self.board[i][j].value == \"X\":\n count += 1\n if player == -1 and self.board[i][j].value == \"O\":\n count += 1\n return count", "title": "" }, { "docid": "8d6ae3682ac5828eee085a1abea37393", "score": "0.48837307", "text": "def total_overlaps(overlap_count: Counter) -> int:\n return sum(1 for i in overlap_count.values() if i > 1)", "title": "" }, { "docid": "ee1d0716c47dab4e3015752288ac7cf7", "score": "0.48820785", "text": "def get_live_aids_total_by_department(dep_id: str) -> int:\n related_perimeters = get_all_related_perimeters(dep_id, values=[\"id\"])\n return (\n Aid.objects.live()\n .filter(perimeter_id__in=related_perimeters)\n .distinct()\n .count()\n )", "title": "" }, { "docid": "4489b51c5a16506bb82e332315719fd6", "score": "0.48771366", "text": "def get_correct_lap_count(self):\n correct_qty = 0\n for leg in self.legs:\n assert isinstance(leg, RelayLeg)\n if leg.is_correct():\n correct_qty += 1\n else:\n return correct_qty\n return correct_qty", "title": "" }, { "docid": "0da823c72548c27176f0ae63e03a615c", "score": "0.48749048", "text": "def calculate_result(rolls):\n\n results = {\n 'check_result': 0,\n 'side_effect_result': 0,\n 'triumph': 0,\n 'dispair': 0,\n }\n\n for roll in rolls:\n if roll == 'none':\n continue\n elif roll == 'success':\n results['check_result'] += 1\n elif roll == 'failure':\n results['check_result'] -= 1\n elif roll == 'advantage':\n results['side_effect_result'] += 1\n elif roll == 'threat':\n results['side_effect_result'] -= 1\n elif roll == 'triumph':\n results['triumph'] += 1\n elif roll == 'dispair':\n results['dispair'] += 1\n\n return results", "title": "" }, { "docid": "42992725628a7ee79a2959865a6a7301", "score": "0.48743504", "text": "def correct_cnt_(output, target):\n \n batch_size = target.size(0)\n\n _, pred = output.topk(1, 1, True, True)\n pred = pred.t()\n correct = pred.eq(target.view(1, -1).expand_as(pred))\n\n correct_ = correct[:1].view(-1).float().sum(0, keepdim=True)\n\n return correct_", "title": "" }, { "docid": "c0dfc8e44ed86a3ad6eb6918348aebfd", "score": "0.48632842", "text": "def get_n_correct(batch, answer):\n return (torch.max(answer, 1)[1].view(\n batch.label.size()) == batch.label).float().sum().item()", "title": "" }, { "docid": "0b5fbdf5022f9a7a8ce46dbd057e0c69", "score": "0.4859147", "text": "def count(self):\n def _count(x, y):\n return x + 1\n\n return self.reduce(_count, 0)", "title": "" }, { "docid": "fceae5ceca7bd311b20e8f159475fa34", "score": "0.4853245", "text": "def sumMatchesAndMismatches(segment):\r\n return sum(\r\n [value for (code, value) in segment.cigartuples if code == 0]\r\n )", "title": "" }, { "docid": "d9b939f32c67b11c7f63ff830e392618", "score": "0.4850502", "text": "def _tt_j(self, manager, truth):\n\n assocs = manager.association_set.associations_including_objects(\n [truth])\n timestamps = sorted(s.timestamp for s in truth)\n total_time = datetime.timedelta(0)\n for i_timestamp, timestamp in enumerate(timestamps[:-1]):\n for assoc in assocs:\n # If both timestamps are in one association then add the\n # difference to the total difference and stop looking\n if timestamp in assoc.time_range \\\n and timestamps[i_timestamp + 1] in assoc.time_range:\n total_time += (timestamps[i_timestamp + 1] - timestamp)\n break\n return total_time", "title": "" }, { "docid": "b6fcbd24b30e2bb9a6ef159a8f3b7b73", "score": "0.48498434", "text": "def num_correct(y_true, y_pred):\n gt, pr = get_gt_pr(y_true, y_pred)\n\n intersection = tf.reduce_sum(tf.cast(tf.logical_and(gt, pr), tf.int64))\n return intersection", "title": "" }, { "docid": "4a45522794578a180b553b65a4373081", "score": "0.48484755", "text": "def init_counts(certain, ax, bx, xx):\n n = ax.shape[0]\n aa = certain[:n, :n] + 1e-6 # add pesudocount to avoid divide by 0\n ab = certain[:n, n:] # + 1e-6 # TODO make inter pesudocount to 0\n bb = certain[n:, n:] + 1e-6\n diploid = aa + ab + ab.T + bb\n # TODO change intra inter ratio\n # TODO add pesudocount only at the denominater\n # so if the numerator is 0, then the ratio will also be 0\n\n raa_ax = np.true_divide(aa, (aa + ab))\n rab_ax = np.true_divide(ab, (aa + ab))\n rbb_bx = np.true_divide(bb, (bb + ab.T))\n rba_bx = np.true_divide(ab.T, (bb + ab.T))\n raa_xx = np.true_divide(aa, diploid)\n rbb_xx = np.true_divide(bb, diploid)\n rab_xx = np.true_divide(ab, diploid)\n # assign each uncertain counts to different sources\n aa_ax = np.multiply(raa_ax, ax)\n ab_ax = np.multiply(rab_ax, ax)\n bb_bx = np.multiply(rbb_bx, bx)\n ba_bx = np.multiply(rba_bx, bx) # !!! FIX: change from ab to ba\n aa_xx = np.multiply(raa_xx, xx)\n bb_xx = np.multiply(rbb_xx, xx)\n ab_xx = np.multiply(rab_xx, xx)\n # combine reassign counts\n add_aa = aa_ax + aa_ax.T + aa_xx # aa = aa* + a*a + a*a*\n add_bb = bb_bx + bb_bx.T + bb_xx # bb = bb* + b*b + b*b*\n add_ab = ab_ax + ba_bx.T + ab_xx # ab = ab* + a*b + a*b*\n add_mat = np.concatenate((np.concatenate((add_aa, add_ab), axis=1),\n np.concatenate((add_ab.T, add_bb), axis=1)),\n axis=0)\n return add_mat + certain", "title": "" }, { "docid": "7d62c1d32bc7b071ca7f2d133bb07267", "score": "0.48450083", "text": "def get_num_imputed_ctus_per_partyid( imputed_df ):\n num_imputed_ctus_per_partyid = imputed_df.groupby('party_id','imputed_ctu').count()\\\n .where(\"imputed_ctu = 1\")\\\n .select(\"party_id\", col(\"count\").alias(\"num_imputed_ctus\"))\n return num_imputed_ctus_per_partyid", "title": "" }, { "docid": "83c358c7c46498b13b2a2ae8cc289bdb", "score": "0.48371777", "text": "def final_speaker_accuracy(df_res, column):\n\n acc = 0\n count = 0\n for val in df_res.iterrows():\n predic = sorted(val[1][column])\n truth = sorted(val[1][\"Truth\"])\n count += len(truth)\n acc += len(set(predic) & set(truth))\n\n return acc / count", "title": "" }, { "docid": "6fcff4d97cd248c243ed8c2124656163", "score": "0.48289078", "text": "def countBadPairs(self, nums: List[int]) -> int:\n\n m, ans = defaultdict(int), 0\n for i in range(len(nums)):\n ans += i - m[nums[i] - i]\n m[nums[i] - i] += 1\n return ans", "title": "" }, { "docid": "a746f801d9cca083c3d246e59f7832c1", "score": "0.48285982", "text": "def count_beat(self) -> int:\n if self.beat is None:\n return 0\n return np.count_nonzero(self.beat)", "title": "" }, { "docid": "9a9727538eb0e3253fa1f3c408f14269", "score": "0.48163518", "text": "def _get_tc_counts(Nt1o1, Nt0o1, Nt1o0, Nt0o0):\n Nt1 = Nt1o0 + Nt1o1\n Nt0 = Nt0o0 + Nt0o1\n N = Nt0 + Nt1\n return Nt1, Nt0, N", "title": "" }, { "docid": "a2e3aecad6071d9ad6728ab30c472417", "score": "0.4809577", "text": "def match_ts(gold_ts_sequence, pred_ts_sequence):\n # positive, negative and neutral\n tag2tagid = {'POS': 0, 'NEG': 1, 'NEU': 2}\n hit_count, gold_count, pred_count = np.zeros(3), np.zeros(3), np.zeros(3)\n for t in gold_ts_sequence:\n #print(t)\n ts_tag = t[2]\n tid = tag2tagid[ts_tag]\n gold_count[tid] += 1\n for t in pred_ts_sequence:\n ts_tag = t[2]\n tid = tag2tagid[ts_tag]\n if t in gold_ts_sequence:\n hit_count[tid] += 1\n pred_count[tid] += 1\n return hit_count, gold_count, pred_count", "title": "" }, { "docid": "8881357d531207f85c2ffa745437067a", "score": "0.48093936", "text": "def test_inaccurate_count_field(history, sample_chamberdata_record):\n bad_record = sample_chamberdata_record\n bad_record[\"Inputs\"][\"PV_1\"][\"Values\"] = (\n sample_chamberdata_record.get(\"Inputs\").get(\"PV_1\").get(\"Values\")[:40]\n )\n\n bad_record[\"Inputs\"][\"PV_1\"][\"SetPoints\"] = (\n sample_chamberdata_record.get(\"Inputs\").get(\"PV_1\").get(\"SetPoints\")[:40]\n )\n\n partial_data = history._parse_mongo_chamberdata_record(bad_record)\n\n # Test that known source data is preserved:\n for row in partial_data[:40]:\n assert (\n row[3] is not None\n ), \"There should be output data when input data is NOT missing\"\n\n # Test that missing source data -> None in the output data\n for row in partial_data[40:]:\n\n assert row[3] is None, \"Expected empty output data\"\n assert row[2] is None, \"Expected empty output data\"\n\n # This data should not be empty:\n assert row[4] is not None\n assert row[5] is not None", "title": "" }, { "docid": "78733dba2bfbfe74f23153299dcc8d11", "score": "0.48078355", "text": "def _get_count_of_data_requests_satisfied(self):\n return StudyDataRequest.objects.filter(study__exact=self, status__exact=1).count()", "title": "" }, { "docid": "f96a58d2c969b0f1af925815685d131c", "score": "0.4807144", "text": "def __count_minus_one(a, b, c, d):\n acc = 0\n if a == -1:\n acc += 1\n if b == -1:\n acc += 1\n if c == -1:\n acc += 1\n if d == -1:\n acc += 1\n return acc", "title": "" }, { "docid": "cca61fb7620bb4b3ceb446baf4da9ed2", "score": "0.48066285", "text": "def conversation_accuracy(df_res, column):\n\n return sum(df_res[column] == df_res[\"Truth\"]) / len(df_res)", "title": "" }, { "docid": "9d6ef41f6c2b83251425758bb31df52c", "score": "0.48055658", "text": "def event_counts_for_rats(self):\n ptable = get_node('/physiology', self.results['table'])\n rats = unique_rats(ptable)\n return { rat: len(ptable.getWhereList('rat==%d'%rat)) for rat in rats }", "title": "" }, { "docid": "ea9e720c437c0697b4f776c696543b2e", "score": "0.47959346", "text": "def _check_counts(self):\n document_count = sum([r[3] for r in self.raw_data])\n instance_count = sum([r[4] for r in self.raw_data])\n average_score = sum([r[2] * r[3] for r in self.raw_data]) / self.document_count\n if not fequal(document_count, self.document_count):\n print 'doc_count', document_count, self.document_count\n if not fequal(instance_count, self.instance_count):\n print 'ins_count', instance_count, self.instance_count\n if not fequal(average_score, self.average_score):\n print 'avg_score', average_score, self.average_score", "title": "" }, { "docid": "2cdc76e04ee9256e97c48dc761a0e896", "score": "0.47937474", "text": "def __len__(self):\n counts = 0\n for key in self._waves.keys():\n counts += len(self._waves[key])\n return counts", "title": "" }, { "docid": "c1d1443997c769106c534fdb530074fc", "score": "0.47924024", "text": "def fourSumCount(self, A: List[int], B: List[int], C: List[int], D: List[int]) -> int:\r\n \r\n # First get the possible combinations of each element in each list (in two parts) using itertools.product\r\n combs_AB = itertools.product(A,B)\r\n combs_CD = itertools.product(C,D)\r\n \r\n # Now get the hashes of the sums of the first two lists using collections.Counter (produces a dictionary of schema sum_value : number_of_occurances\r\n hash_AB_sums = collections.Counter(sum(ab) for ab in combs_AB)\r\n \r\n # Since we have the first hash of sums, to keep the second as small as possible we'll only add to this one if - of the sum is in hash_AB_sums\r\n # ie if the result will be zero\r\n hash_CD_sums = collections.Counter(sum(cd) for cd in combs_CD if -sum(cd) in hash_AB_sums)\r\n\r\n # This step can be refactored and is completely unnecessary since we're already completely looking through both dictionaries\r\n # ... Keeping it for clarity\r\n \r\n # Now iterate over both dicts and take the product of zero-sum possibilities from each... \r\n # ie if there's ONE instance of -1 in one list and THREE instances of one in another, the number of possible zero-sums for that entry is ONE*THREE = THREE\r\n zero_sum_count = 0\r\n for i in hash_AB_sums:\r\n for j in hash_CD_sums:\r\n if i + j == 0:\r\n zero_sum_count += hash_AB_sums[i]*hash_CD_sums[j]\r\n\r\n return zero_sum_count", "title": "" }, { "docid": "a7d3e8cad353731970942f71686090a1", "score": "0.47906548", "text": "def sentiment_count(self, speech):\n good_count = [x for x in speech if x in self.good_bad[0]]\n bad_count = [x for x in speech if x in self.good_bad[1]]\n return (good_count, bad_count)", "title": "" }, { "docid": "43f4be88bc7e81e9c1442ca9f647f862", "score": "0.47859046", "text": "def _compute_count(self, edge_types):\n # it's possible for a player not to have any of that edge type, in\n # which case there won't be an entry in the edges dict, so default to\n # the empty dict\n count = 0\n for edge_type in edge_types:\n count += len(self.get_edges().get(edge_type, {}))\n return count", "title": "" }, { "docid": "3cd4beb5b625644f383700a7c37de0ef", "score": "0.47835356", "text": "def count_tokens(data, unk_id=None):\n\n n_tokens = 0\n n_oovs = 0\n for sentence in data:\n n_tokens += len(sentence)\n if unk_id is not None:\n n_oovs += np.count_nonzero(sentence == unk_id)\n return n_tokens, n_oovs", "title": "" }, { "docid": "a3100e32289156ab114cb461801cef88", "score": "0.4782856", "text": "def num_correct_upsets(self):\n\t\tif self.name == \"TRUTH\":\n\t\t\treturn -1\n\t\ttrue = Bracket.objects.filter(name=\"TRUTH\").get(year=self.year)\n\t\tstats = Season_stats.objects.filter(year=self.year).exclude(made_tournament = False).all()\n\t\tstarting64 = Tournament.objects.get(year=self.year)\n\t\tseed = {}\n\t\tfor stat in stats:\n\t\t\tseed[stat.team] = stat.tournament_seed\n\t\tupsets = 0\n\t\tupsets = upsets + int(seed[self.top_left_r1_g1] == 16 and self.top_left_r1_g1_correct)\n\t\tupsets = upsets + int(seed[self.top_left_r1_g2] == 9 and self.top_left_r1_g2_correct)\n\t\tupsets = upsets + int(seed[self.top_left_r1_g3] == 12 and self.top_left_r1_g3_correct)\n\t\tupsets = upsets + int(seed[self.top_left_r1_g4] == 13 and self.top_left_r1_g4_correct)\n\t\tupsets = upsets + int(seed[self.top_left_r1_g5] == 11 and self.top_left_r1_g5_correct)\n\t\tupsets = upsets + int(seed[self.top_left_r1_g6] == 14 and self.top_left_r1_g6_correct)\n\t\tupsets = upsets + int(seed[self.top_left_r1_g7] == 10 and self.top_left_r1_g7_correct)\n\t\tupsets = upsets + int(seed[self.top_left_r1_g8] == 15 and self.top_left_r1_g8_correct)\n\t\tupsets = upsets + int(seed[self.bottom_left_r1_g1] == 16 and self.bottom_left_r1_g1_correct)\n\t\tupsets = upsets + int(seed[self.bottom_left_r1_g2] == 9 and self.bottom_left_r1_g2_correct)\n\t\tupsets = upsets + int(seed[self.bottom_left_r1_g3] == 12 and self.bottom_left_r1_g3_correct)\n\t\tupsets = upsets + int(seed[self.bottom_left_r1_g4] == 13 and self.bottom_left_r1_g4_correct)\n\t\tupsets = upsets + int(seed[self.bottom_left_r1_g5] == 11 and self.bottom_left_r1_g5_correct)\n\t\tupsets = upsets + int(seed[self.bottom_left_r1_g6] == 14 and self.bottom_left_r1_g6_correct)\n\t\tupsets = upsets + int(seed[self.bottom_left_r1_g7] == 10 and self.bottom_left_r1_g7_correct)\n\t\tupsets = upsets + int(seed[self.bottom_left_r1_g8] == 15 and self.bottom_left_r1_g8_correct)\n\t\tupsets = upsets + int(seed[self.top_right_r1_g1] == 16 and self.top_right_r1_g1_correct)\n\t\tupsets = upsets + int(seed[self.top_right_r1_g2] == 9 and self.top_right_r1_g2_correct)\n\t\tupsets = upsets + int(seed[self.top_right_r1_g3] == 12 and self.top_right_r1_g3_correct)\n\t\tupsets = upsets + int(seed[self.top_right_r1_g4] == 13 and self.top_right_r1_g4_correct)\n\t\tupsets = upsets + int(seed[self.top_right_r1_g5] == 11 and self.top_right_r1_g5_correct)\n\t\tupsets = upsets + int(seed[self.top_right_r1_g6] == 14 and self.top_right_r1_g6_correct)\n\t\tupsets = upsets + int(seed[self.top_right_r1_g7] == 10 and self.top_right_r1_g7_correct)\n\t\tupsets = upsets + int(seed[self.top_right_r1_g8] == 15 and self.top_right_r1_g8_correct)\n\t\tupsets = upsets + int(seed[self.bottom_right_r1_g1] == 16 and self.bottom_right_r1_g1_correct)\n\t\tupsets = upsets + int(seed[self.bottom_right_r1_g2] == 9 and self.bottom_right_r1_g2_correct)\n\t\tupsets = upsets + int(seed[self.bottom_right_r1_g3] == 12 and self.bottom_right_r1_g3_correct)\n\t\tupsets = upsets + int(seed[self.bottom_right_r1_g4] == 13 and self.bottom_right_r1_g4_correct)\n\t\tupsets = upsets + int(seed[self.bottom_right_r1_g5] == 11 and self.bottom_right_r1_g5_correct)\n\t\tupsets = upsets + int(seed[self.bottom_right_r1_g6] == 14 and self.bottom_right_r1_g6_correct)\n\t\tupsets = upsets + int(seed[self.bottom_right_r1_g7] == 10 and self.bottom_right_r1_g7_correct)\n\t\tupsets = upsets + int(seed[self.bottom_right_r1_g8] == 15 and self.bottom_right_r1_g8_correct)\n\t\tupsets = upsets + int((true.top_left_r2_g1 == self.top_left_r2_g1) and ((seed[true.top_left_r2_g1] > seed[true.top_left_r1_g1]) or (seed[true.top_left_r2_g1] > seed[true.top_left_r1_g2])))\n\t\tupsets = upsets + int((true.top_left_r2_g2 == self.top_left_r2_g2) and ((seed[true.top_left_r2_g2] > seed[true.top_left_r1_g3]) or (seed[true.top_left_r2_g2] > seed[true.top_left_r1_g4])))\n\t\tupsets = upsets + int((true.top_left_r2_g3 == self.top_left_r2_g3) and ((seed[true.top_left_r2_g3] > seed[true.top_left_r1_g5]) or (seed[true.top_left_r2_g3] > seed[true.top_left_r1_g6])))\n\t\tupsets = upsets + int((true.top_left_r2_g4 == self.top_left_r2_g4) and ((seed[true.top_left_r2_g4] > seed[true.top_left_r1_g7]) or (seed[true.top_left_r2_g4] > seed[true.top_left_r1_g8])))\n\t\tupsets = upsets + int((true.bottom_left_r2_g1 == self.bottom_left_r2_g1) and ((seed[true.bottom_left_r2_g1] > seed[true.bottom_left_r1_g1]) or (seed[true.bottom_left_r2_g1] > seed[true.bottom_left_r1_g2])))\n\t\tupsets = upsets + int((true.bottom_left_r2_g2 == self.bottom_left_r2_g2) and ((seed[true.bottom_left_r2_g2] > seed[true.bottom_left_r1_g3]) or (seed[true.bottom_left_r2_g2] > seed[true.bottom_left_r1_g4])))\n\t\tupsets = upsets + int((true.bottom_left_r2_g3 == self.bottom_left_r2_g3) and ((seed[true.bottom_left_r2_g3] > seed[true.bottom_left_r1_g5]) or (seed[true.bottom_left_r2_g3] > seed[true.bottom_left_r1_g6])))\n\t\tupsets = upsets + int((true.bottom_left_r2_g4 == self.bottom_left_r2_g4) and ((seed[true.bottom_left_r2_g4] > seed[true.bottom_left_r1_g7]) or (seed[true.bottom_left_r2_g4] > seed[true.bottom_left_r1_g8])))\n\t\tupsets = upsets + int((true.top_right_r2_g1 == self.top_right_r2_g1) and ((seed[true.top_right_r2_g1] > seed[true.top_right_r1_g1]) or (seed[true.top_right_r2_g1] > seed[true.top_right_r1_g2])))\n\t\tupsets = upsets + int((true.top_right_r2_g2 == self.top_right_r2_g2) and ((seed[true.top_right_r2_g2] > seed[true.top_right_r1_g3]) or (seed[true.top_right_r2_g2] > seed[true.top_right_r1_g4])))\n\t\tupsets = upsets + int((true.top_right_r2_g3 == self.top_right_r2_g3) and ((seed[true.top_right_r2_g3] > seed[true.top_right_r1_g5]) or (seed[true.top_right_r2_g3] > seed[true.top_right_r1_g6])))\n\t\tupsets = upsets + int((true.top_right_r2_g4 == self.top_right_r2_g4) and ((seed[true.top_right_r2_g4] > seed[true.top_right_r1_g7]) or (seed[true.top_right_r2_g4] > seed[true.top_right_r1_g8])))\n\t\tupsets = upsets + int((true.bottom_right_r2_g1 == self.bottom_right_r2_g1) and ((seed[true.bottom_right_r2_g1] > seed[true.bottom_right_r1_g1]) or (seed[true.bottom_right_r2_g1] > seed[true.bottom_right_r1_g2])))\n\t\tupsets = upsets + int((true.bottom_right_r2_g2 == self.bottom_right_r2_g2) and ((seed[true.bottom_right_r2_g2] > seed[true.bottom_right_r1_g3]) or (seed[true.bottom_right_r2_g2] > seed[true.bottom_right_r1_g4])))\n\t\tupsets = upsets + int((true.bottom_right_r2_g3 == self.bottom_right_r2_g3) and ((seed[true.bottom_right_r2_g3] > seed[true.bottom_right_r1_g5]) or (seed[true.bottom_right_r2_g3] > seed[true.bottom_right_r1_g6])))\n\t\tupsets = upsets + int((true.bottom_right_r2_g4 == self.bottom_right_r2_g4) and ((seed[true.bottom_right_r2_g4] > seed[true.bottom_right_r1_g7]) or (seed[true.bottom_right_r2_g4] > seed[true.bottom_right_r1_g8])))\n\t\tupsets = upsets + int((true.top_left_ss_g1 == self.top_left_ss_g1) and ((seed[true.top_left_ss_g1] > seed[true.top_left_r2_g1]) or (seed[true.top_left_ss_g1] > seed[true.top_left_r2_g2])))\n\t\tupsets = upsets + int((true.top_left_ss_g2 == self.top_left_ss_g2) and ((seed[true.top_left_ss_g2] > seed[true.top_left_r2_g3]) or (seed[true.top_left_ss_g2] > seed[true.top_left_r2_g4])))\n\t\tupsets = upsets + int((true.bottom_left_ss_g1 == self.bottom_left_ss_g1) and ((seed[true.bottom_left_ss_g1] > seed[true.bottom_left_r2_g1]) or (seed[true.bottom_left_ss_g1] > seed[true.bottom_left_r2_g2])))\n\t\tupsets = upsets + int((true.bottom_left_ss_g2 == self.bottom_left_ss_g2) and ((seed[true.bottom_left_ss_g2] > seed[true.bottom_left_r2_g3]) or (seed[true.bottom_left_ss_g2] > seed[true.bottom_left_r2_g4])))\n\t\tupsets = upsets + int((true.top_right_ss_g1 == self.top_right_ss_g1) and ((seed[true.top_right_ss_g1] > seed[true.top_right_r2_g1]) or (seed[true.top_right_ss_g1] > seed[true.top_right_r2_g2])))\n\t\tupsets = upsets + int((true.top_right_ss_g2 == self.top_right_ss_g2) and ((seed[true.top_right_ss_g2] > seed[true.top_right_r2_g3]) or (seed[true.top_right_ss_g2] > seed[true.top_right_r2_g4])))\n\t\tupsets = upsets + int((true.bottom_right_ss_g1 == self.bottom_right_ss_g1) and ((seed[true.bottom_right_ss_g1] > seed[true.bottom_right_r2_g1]) or (seed[true.bottom_right_ss_g1] > seed[true.bottom_right_r2_g2])))\n\t\tupsets = upsets + int((true.bottom_right_ss_g2 == self.bottom_right_ss_g2) and ((seed[true.bottom_right_ss_g2] > seed[true.bottom_right_r2_g3]) or (seed[true.bottom_right_ss_g2] > seed[true.bottom_right_r2_g4])))\n\t\tupsets = upsets + int((true.top_left_ee == self.top_left_ee) and ((seed[true.top_left_ee] > seed[true.top_left_ss_g1]) or (seed[true.top_left_ee] > seed[true.top_left_ss_g2])))\n\t\tupsets = upsets + int((true.bottom_left_ee == self.bottom_left_ee) and ((seed[true.bottom_left_ee] > seed[true.bottom_left_ss_g1]) or (seed[true.bottom_left_ee] > seed[true.bottom_left_ss_g2])))\n\t\tupsets = upsets + int((true.top_right_ee == self.top_right_ee) and ((seed[true.top_right_ee] > seed[true.top_right_ss_g1]) or (seed[true.top_right_ee] > seed[true.top_right_ss_g2])))\n\t\tupsets = upsets + int((true.bottom_left_ee == self.bottom_left_ee) and ((seed[true.bottom_right_ee] > seed[true.bottom_right_ss_g1]) or (seed[true.bottom_right_ee] > seed[true.bottom_right_ss_g2])))\n\t\tupsets = upsets + int((true.ff_left == self.ff_left) and ((seed[true.ff_left] > seed[true.top_left_ee]) or (seed[true.ff_left] > seed[true.bottom_left_ee])))\n\t\tupsets = upsets + int((true.ff_right == self.ff_right) and ((seed[true.ff_right] > seed[true.top_right_ee]) or (seed[true.ff_right] > seed[true.bottom_right_ee])))\n\t\tupsets = upsets + int((true.championship == self.championship) and ((seed[true.championship] > seed[true.ff_left]) or (seed[true.championship] > seed[true.ff_right])))\n\t\treturn upsets", "title": "" }, { "docid": "2ec168eb83ef3750e3e2a15640301b5d", "score": "0.47800586", "text": "def acc(prediction,truth):\n counter1 = 0\n for i in range(len(prediction)):\n counter1+=int((prediction[i] >= truth[i]).all())\n return counter1/len(prediction)", "title": "" }, { "docid": "69d1084f2374a967e1c22032f386c274", "score": "0.4779697", "text": "def count_unique(outcome, values_of_interest):\n return len(set(outcome))", "title": "" }, { "docid": "01abaccbdeaf9666ccdb3c74ef0b1816", "score": "0.47796547", "text": "def calc_recall(self, query_id):\n # intersection of retrieved and relevant / number relevant\n num_relevant = len(self.relevant[query_id])\n return (float(len(list(set(self.retrieved[query_id]) &\n set(self.relevant[query_id])))) / float(num_relevant))", "title": "" }, { "docid": "6dca1602a2f95d4ec23ea7bb12a1a66b", "score": "0.47778574", "text": "def reduce(self):\n self.known = 0 \n for column_index in range(9):\n self.check_has_solution(column_index) \n return 1", "title": "" }, { "docid": "09d2336718f442070b756fe3922a0156", "score": "0.47766745", "text": "def identify_anomalies(data):\n arr = []\n z = calc_z_scores(data)\n for i in range(0,z.shape[1]):\n arr = np.concatenate((np.array(arr),np.where(z[:, i] > 1.65)[0]))\n \n unique, counts = np.unique(arr, return_counts=True)\n ids = dict(zip(unique, counts))\n return ids\n #check for ids which have more than or equalto 3 outlying features", "title": "" }, { "docid": "7a6905846a5cc168f0be6568c5603e04", "score": "0.47738764", "text": "def count_valid_tours(idxs, axis=1):\n valid_tours = 0\n for i in range(idxs.shape[1]):\n \n idx_i = idxs[:, i]\n idx_i_unique = np.unique(idx_i)\n if idx_i.shape[0] == idx_i_unique.shape[0] + 1 and idx_i[0] == idx_i[-1]:\n valid_tours += 1\n return valid_tours", "title": "" }, { "docid": "6d10628ca14a416d999438e6c8105356", "score": "0.4771415", "text": "def score(self):\n trick_value = 0\n for card in self.trick:\n if card != Card(0, -1):\n if card.suit == self.atout_suit:\n trick_value += atout_values[card.rank.rank]\n else:\n trick_value += generic_values[card.rank.rank]\n # 10 de der\n if self.trick_number == 8:\n trick_value += 10\n return trick_value", "title": "" }, { "docid": "49dd52804d1524fded2fb5af961ffaf6", "score": "0.4767587", "text": "def countOnes(data):\n\n\tc = data.count() # a method built into the bitarray class, which counts the number of ones in the bitarray\n\t\n\tmaxiumumC = len(data) # since the length of the bitarray clearly sets an upper bound on c. \n\tratio = float(c)/float(maxiumumC)\n\treturn ratio", "title": "" }, { "docid": "b8a9d21c55203cf4f987f47fc49ddd6f", "score": "0.47646955", "text": "def n_good_cadences(self):\n return self.quality_mask.sum()", "title": "" }, { "docid": "7274b0cabcd7d22df7523ba33048ac24", "score": "0.47646105", "text": "def get_ideal_counts(self, shots=8000, reverse=True):\n # check if circuit contains measurement\n gate_dict = self.get_gate_count()\n if 'measure' not in gate_dict:\n # add all possible measurements\n self.add_all_measurements()\n # get statevector\n if reverse:\n job = execute(self.reverse_bits(), Aer.get_backend('aer_simulator'), shots=shots)\n else:\n job = execute(self, Aer.get_backend('aer_simulator'), shots=shots)\n result = job.result()\n return result.get_counts()", "title": "" }, { "docid": "9d0b6320da18f2e40f2a2ae0e643a2a4", "score": "0.4762804", "text": "def test_example_day6_pt1():\n assert count_sum_of_unique_yes_answers(example_data) == 11", "title": "" }, { "docid": "e52122e0d1480e527efbfc262e8e1a5c", "score": "0.47556895", "text": "def count(self):\r\n return len(all())", "title": "" }, { "docid": "c2c829dc497e7f47779cb492d5a3b5de", "score": "0.47550002", "text": "def find_unique_id_cycling(log):\n comparison_count = 0\n for checking in log:\n found_instances = 0\n\n for comparison in log:\n comparison_count += 1\n if checking == comparison:\n found_instances += 1\n\n if found_instances == 1:\n print(comparison_count)\n return checking", "title": "" }, { "docid": "97c83d7c3eb4552b300b1e722e35aa2b", "score": "0.47544953", "text": "def keys_needed(self, counterparty):\n total = 0\n for t in self.txs:\n for to in t.outs:\n if to.spk_type == \"p2sh-p2wpkh\" and to.counterparty != counterparty:\n continue\n #for NN type, exactly one will always be needed\n total += 1\n for t in self.backout_txs:\n for to in t.outs:\n #backout outpoints are never NN\n if to.counterparty == counterparty:\n total += 1\n return total", "title": "" }, { "docid": "c7ceea025c57c746e4bc9bbcd185d5d7", "score": "0.47544846", "text": "def test_counts_for_two_needfollowup_disagree_tagged_traits_from_same_study_and_tag(self):\n study_version = SourceStudyVersionFactory.create()\n tag = factories.TagFactory.create()\n study_responses = factories.StudyResponseFactory.create_batch(\n 2, status=models.StudyResponse.STATUS_DISAGREE,\n dcc_review__tagged_trait__trait__source_dataset__source_study_version=study_version,\n dcc_review__tagged_trait__tag=tag)\n response = self.client.get(self.get_url())\n context = response.context\n self.assertIn('grouped_study_tag_counts', context)\n counts = context['grouped_study_tag_counts']\n self.assertEqual(len(counts), 1) # One study.\n study1 = counts[0]\n self.assertEqual(study1[1][0]['tt_total'], 2)\n self.assertEqual(study1[1][0]['tt_decision_required_count'], 2)", "title": "" }, { "docid": "f85b30f45e3676828da7d07b8a2d4f57", "score": "0.4753043", "text": "def __len__(self):\n if self.ignore_large_samples:\n return len(self.ids_for_small_samples)\n else:\n return len(self.reactant_mols)", "title": "" }, { "docid": "1866e638eac228fa8a077cf3b3c3789d", "score": "0.47514945", "text": "def count_in(x, y):\n x_shape = x.shape\n y_shape = y.shape\n\n # replicate tensors so that we can compare 1-1 each x element vs each y element\n x_replicated = np.tile(x, (1, y_shape[0]))\n y_replicated = np.tile(y, (x_shape[0], 1))\n\n # reshape replicated elements\n x_replicated = np.reshape(x_replicated, [x_shape[0] * y_shape[0], x_shape[1]])\n y_replicated = np.reshape(y_replicated, [y_shape[0] * x_shape[0], y_shape[1]])\n\n # element wise equality, followed by equality on all elements\n equal = (x_replicated == y_replicated)\n # x samples which have true on all columns are samples already in y\n equal_all_dims = equal.all(axis=1)\n # summing all the True values\n total_already_in = equal_all_dims.astype(float).sum()\n return total_already_in", "title": "" }, { "docid": "8c939935d540008158b9cf127bf92a4e", "score": "0.47488862", "text": "def __len__(self):\n return int(Z3_apply_result_get_num_subgoals(self.ctx.ref(), self.result))", "title": "" }, { "docid": "ea8d7b565e2efec2d0105bcb8c25b7e7", "score": "0.4747662", "text": "def count_values(outcome, values_of_interest):\n count = 0\n for value in values_of_interest:\n count += outcome.count(value)\n return count", "title": "" }, { "docid": "cb83eeb1bf19c91b040dbaea4af898bd", "score": "0.47433943", "text": "def check_accuracy(self, labels):\r\n a = self.layers[-1].feature\r\n counter = 0\r\n for i in range(a.shape[0]):\r\n pred = a[i, :]\r\n label_i = labels[i, :]\r\n if pred[np.where(label_i == 1)] == pred.max():\r\n counter += 1\r\n counter /= labels.shape[0]\r\n\r\n return counter", "title": "" }, { "docid": "da70cbcb5e6dea308b1e265eb1171087", "score": "0.47321528", "text": "def sum_unknown_ranks(self):\n sum=0\n for n in self.unknown_ranks:\n sum += n\n return sum", "title": "" } ]
b6314c694f141ca98b11b20cdc9c471f
Produce any customization definitions (types, fields, message destinations, etc) that should be installed by `resilientcircuits customize`
[ { "docid": "e8cb9bf1c39847a06d843046e12286ea", "score": "0.5533729", "text": "def customization_data(client=None):\n\n # This import data contains:\n # Function inputs:\n # artifact_value\n # Message Destinations:\n # bluecoat_site_review\n # Functions:\n # bluecoat_site_review_lookup\n # Workflows:\n # bluecoat_site_review_search\n # Rules:\n # Example: Bluecoat Site Review\n\n\n yield ImportDefinition(u\"\"\"\neyJ0YXNrX29yZGVyIjogW10sICJ3b3JrZmxvd3MiOiBbeyJ1dWlkIjogImVlOTE2NGRlLTBmN2Ut\nNGZkOC1iNjVmLWUxMTBlODRjZTFjOSIsICJkZXNjcmlwdGlvbiI6ICJUaGlzIHdvcmtmbG93IGRl\nbW9uc3RyYXRlcyB0aGUgQmx1ZWNvYXQgU2l0ZSBSZXZpZXcgbG9vayB1cCBmdW5jdGlvbi4gVGhl\nIHJ1bGUgd29ya3Mgb24gdXJsLWJhc2VkIGFydGlmYWN0cyIsICJvYmplY3RfdHlwZSI6ICJhcnRp\nZmFjdCIsICJleHBvcnRfa2V5IjogImJsdWVjb2F0X3NpdGVfcmV2aWV3X3NlYXJjaCIsICJ3b3Jr\nZmxvd19pZCI6IDQ5LCAibGFzdF9tb2RpZmllZF9ieSI6ICJhQGV4YW1wbGUuY29tIiwgImNvbnRl\nbnQiOiB7InhtbCI6ICI8P3htbCB2ZXJzaW9uPVwiMS4wXCIgZW5jb2Rpbmc9XCJVVEYtOFwiPz48\nZGVmaW5pdGlvbnMgeG1sbnM9XCJodHRwOi8vd3d3Lm9tZy5vcmcvc3BlYy9CUE1OLzIwMTAwNTI0\nL01PREVMXCIgeG1sbnM6YnBtbmRpPVwiaHR0cDovL3d3dy5vbWcub3JnL3NwZWMvQlBNTi8yMDEw\nMDUyNC9ESVwiIHhtbG5zOm9tZ2RjPVwiaHR0cDovL3d3dy5vbWcub3JnL3NwZWMvREQvMjAxMDA1\nMjQvRENcIiB4bWxuczpvbWdkaT1cImh0dHA6Ly93d3cub21nLm9yZy9zcGVjL0RELzIwMTAwNTI0\nL0RJXCIgeG1sbnM6cmVzaWxpZW50PVwiaHR0cDovL3Jlc2lsaWVudC5pYm0uY29tL2JwbW5cIiB4\nbWxuczp4c2Q9XCJodHRwOi8vd3d3LnczLm9yZy8yMDAxL1hNTFNjaGVtYVwiIHhtbG5zOnhzaT1c\nImh0dHA6Ly93d3cudzMub3JnLzIwMDEvWE1MU2NoZW1hLWluc3RhbmNlXCIgdGFyZ2V0TmFtZXNw\nYWNlPVwiaHR0cDovL3d3dy5jYW11bmRhLm9yZy90ZXN0XCI+PHByb2Nlc3MgaWQ9XCJibHVlY29h\ndF9zaXRlX3Jldmlld19zZWFyY2hcIiBpc0V4ZWN1dGFibGU9XCJ0cnVlXCIgbmFtZT1cIkV4YW1w\nbGU6IEJsdWVjb2F0IFNpdGUgUmV2aWV3IFNlYXJjaFwiPjxkb2N1bWVudGF0aW9uPlRoaXMgd29y\na2Zsb3cgZGVtb25zdHJhdGVzIHRoZSBCbHVlY29hdCBTaXRlIFJldmlldyBsb29rIHVwIGZ1bmN0\naW9uLiBUaGUgcnVsZSB3b3JrcyBvbiB1cmwtYmFzZWQgYXJ0aWZhY3RzPC9kb2N1bWVudGF0aW9u\nPjxzdGFydEV2ZW50IGlkPVwiU3RhcnRFdmVudF8xNTVhc3htXCI+PG91dGdvaW5nPlNlcXVlbmNl\nRmxvd18wdW54bDJ0PC9vdXRnb2luZz48L3N0YXJ0RXZlbnQ+PHNlcnZpY2VUYXNrIGlkPVwiU2Vy\ndmljZVRhc2tfMWliZWZ2N1wiIG5hbWU9XCJCbHVlY29hdCBTaXRlIFJldmlldyBMb29rdXBcIiBy\nZXNpbGllbnQ6dHlwZT1cImZ1bmN0aW9uXCI+PGV4dGVuc2lvbkVsZW1lbnRzPjxyZXNpbGllbnQ6\nZnVuY3Rpb24gdXVpZD1cIjJiMjY5MzJhLTYwZGYtNDA0NS1hZThkLTBlYjMxOWRlNjAxZFwiPntc\nImlucHV0c1wiOnt9LFwicG9zdF9wcm9jZXNzaW5nX3NjcmlwdFwiOlwiIyBSZXN1bHQ6IHsnaW5w\ndXRzJzoge3UnaW5jaWRlbnRfaWQnOiAyMDk5LCB1J2FydGlmYWN0X3ZhbHVlJzogdSdodHRwOi8v\nbmpqaWFqaWUuY29tL3Byb2plY3QvZ2NhbC9nYW9jZW5nL2luZGV4XzMuaHRtJ30sICdtZXRyaWNz\nJzogeydwYWNrYWdlJzogJ2ZuLWJsdWVjb2F0LXNpdGUtcmV2aWV3JywgJ3RpbWVzdGFtcCc6ICcy\nMDE5LTAzLTI1IDE2OjU4OjA4JywgJ3BhY2thZ2VfdmVyc2lvbic6ICcxLjAuMCcsICdob3N0Jzog\nJ21hcmtzLW1icC5jYW1icmlkZ2UuaWJtLmNvbScsICd2ZXJzaW9uJzogJzEuMCcsICdleGVjdXRp\nb25fdGltZV9tcyc6IDgwMzQ1fSwgJ3N1Y2Nlc3MnOiBUcnVlLCAnY29udGVudCc6IHt1J0NhdGVn\nb3JpemF0aW9uUmVzdWx0Jzoge3UnY2F0ZWdvcml6YXRpb24nOiB7dSdjYXRlZ29yaXphdGlvbic6\nIHt1J251bSc6IHUnNDMnLCB1J25hbWUnOiB1J01hbGljaW91cyBTb3VyY2VzL01hbG5ldHMnfX0s\nIHUnbG9ja2VkJzogdSdmYWxzZScsIHUndHJhbnNsYXRlZENhdGVnb3JpZXMnOiB7dSdmcic6IHt1\nJ251bSc6IHUnNDMnLCB1J25hbWUnOiB1J1NvdXJjZXMgbWFsdmVpbGxhbnRlcy9tYWxuZXRzIChN\nYWxpY2lvdXMgU291cmNlcy9NYWxuZXRzKSd9LCB1J2RlJzoge3UnbnVtJzogdSc0MycsIHUnbmFt\nZSc6IHUnQlxcXFx4ZjZzYXJ0aWdlIFF1ZWxsZW4vTWFsbmV0cyAoTWFsaWNpb3VzIFNvdXJjZXMv\nTWFsbmV0cyknfSwgdSd6aCc6IHt1J251bSc6IHUnNDMnLCB1J25hbWUnOiB1J1xcXFx1NjA3Nlxc\nXFx1NjEwZlxcXFx1Njc2NVxcXFx1NmU5MC9cXFxcdTYwNzZcXFxcdTYxMGZcXFxcdTdmNTFcXFxc\ndTdlZGMgKE1hbGljaW91cyBTb3VyY2VzL01hbG5ldHMpJ30sIHUnemhfVFcnOiB7dSdudW0nOiB1\nJzQzJywgdSduYW1lJzogdSdcXFxcdTYwZTFcXFxcdTYxMGZcXFxcdTRmODZcXFxcdTZlOTAvXFxc\nXHU2MGUxXFxcXHU2MTBmXFxcXHU3ZGIyXFxcXHU4ZGVmIChNYWxpY2lvdXMgU291cmNlcy9NYWxu\nZXRzKSd9LCB1J2VuJzoge3UnbnVtJzogdSc0MycsIHUnbmFtZSc6IHUnTWFsaWNpb3VzIFNvdXJj\nZXMvTWFsbmV0cyd9LCB1J2phJzoge3UnbnVtJzogdSc0MycsIHUnbmFtZSc6IHUnXFxcXHU2MGFh\nXFxcXHU2MTBmXFxcXHUzMDZlXFxcXHUzMDQyXFxcXHUzMDhiXFxcXHU3NjdhXFxcXHU0ZmUxXFxc\nXHU1MTQzL1xcXFx1MzBkZVxcXFx1MzBlYlxcXFx1MzBjZFxcXFx1MzBjM1xcXFx1MzBjOCAoTWFs\naWNpb3VzIFNvdXJjZXMvTWFsbmV0cyknfSwgdSdlcyc6IHt1J251bSc6IHUnNDMnLCB1J25hbWUn\nOiB1J0Z1ZW50ZXMgZGUgc29mdHdhcmUgbWFsaWNpb3NvL21hbG5ldHMgKE1hbGljaW91cyBTb3Vy\nY2VzL01hbG5ldHMpJ319LCB1J3VybCc6IHUnaHR0cDovL25qamlhamllLmNvbS9wcm9qZWN0L2dj\nYWwvZ2FvY2VuZy9pbmRleF8zLmh0bScsIHUncmF0ZURhdGUnOiB1XFxcIkxhc3QgVGltZSBSYXRl\nZC9SZXZpZXdlZDogJmd0OyB7e2RheXN9fSBkYXlzIHt7bGVnYWN5fX1UaGUgVVJMIHN1Ym1pdHRl\nZCBmb3IgcmV2aWV3IHdhcyByYXRlZCBtb3JlIHRoYW4ge3tkYXlzfX0gZGF5cyBhZ28uICBUaGUg\nZGVmYXVsdCBzZXR0aW5nIGZvciBTeW1hbnRlYyBTRyBjbGllbnRzIHRvIGRvd25sb2FkIHJhdGlu\nZyBjaGFuZ2VzIGlzIG9uY2UgYSBkYXkuICBUaGVyZSBpcyBubyBuZWVkIHRvIHNob3cgcmF0aW5n\ncyBvbGRlciB0aGFuIHRoaXMuIFNpbmNlIFN5bWFudGVjJ3MgZGVza3RvcCBjbGllbnQgSzkgYW5k\nIGNlcnRhaW4gT0VNIHBhcnRuZXJzIHVwZGF0ZSBkaWZmZXJlbnRseSwgcmF0aW5ncyBtYXkgZGlm\nZmVyIGZyb20gdGhvc2Ugb2YgYSBTeW1hbnRlYyBTRyBhcyB3ZWxsIGFzIHRob3NlIHByZXNlbnQg\nb24gdGhlIFNpdGUgUmV2aWV3IFRvb2wuXFxcIiwgdSdmb2xsb3dlZFVybCc6IE5vbmUsIHUnbG9j\na2VkU3BlY2lhbE5vdGUnOiBOb25lLCB1J3RocmVhdHJpc2tMZXZlbEVuJzogTm9uZSwgdSdsaW5r\nYWJsZSc6IHUnZmFsc2UnLCB1J3Jlc29sdmVkRGV0YWlsJzoge3UncmVzb2x2ZUVuYWJsZWQnOiB1\nJ3RydWUnLCB1J2lwQWRkcmVzcyc6IHUnMTU0LjIxMC4yMzUuNzInfSwgdSdzZWN1cml0eUNhdGVn\nb3J5SWRzJzoge3Unc2VjdXJpdHlDYXRlZ29yeUlkcyc6IFt1JzQzJywgdScxMDInLCB1JzQ0Jywg\ndSc5MicsIHUnMTgnXX0sIHUnbXVsdGlwbGVNZXNzYWdlJzogTm9uZSwgdSdzdWdnZXN0aW9uJzog\nTm9uZSwgdSdzZWN1cml0eUNhdGVnb3J5JzogdSd0cnVlJywgdSdyYXRpbmdEdHNDdXRvZmYnOiB1\nJzcnLCB1J211bHRpcGxlJzogdSdmYWxzZScsIHUndW5yYXRlZCc6IHUnZmFsc2UnLCB1J2N1clRy\nYWNraW5nSWQnOiB1JzM5OTkyNycsIHUncmF0aW5nRHRzJzogdSdPTERFUicsIHUnbG9ja2VkTWVz\nc2FnZSc6IE5vbmUsIHUndGhyZWF0cmlza0xldmVsJzogTm9uZX19LCAncmF3JzogJ3tcXFwiQ2F0\nZWdvcml6YXRpb25SZXN1bHRcXFwiOiB7XFxcImNhdGVnb3JpemF0aW9uXFxcIjoge1xcXCJjYXRl\nZ29yaXphdGlvblxcXCI6IHtcXFwibnVtXFxcIjogXFxcIjQzXFxcIiwgXFxcIm5hbWVcXFwiOiBc\nXFwiTWFsaWNpb3VzIFNvdXJjZXMvTWFsbmV0c1xcXCJ9fSwgXFxcImxvY2tlZFxcXCI6IFxcXCJm\nYWxzZVxcXCIsIFxcXCJ0cmFuc2xhdGVkQ2F0ZWdvcmllc1xcXCI6IHtcXFwiZnJcXFwiOiB7XFxc\nIm51bVxcXCI6IFxcXCI0M1xcXCIsIFxcXCJuYW1lXFxcIjogXFxcIlNvdXJjZXMgbWFsdmVpbGxh\nbnRlcy9tYWxuZXRzIChNYWxpY2lvdXMgU291cmNlcy9NYWxuZXRzKVxcXCJ9LCBcXFwiZGVcXFwi\nOiB7XFxcIm51bVxcXCI6IFxcXCI0M1xcXCIsIFxcXCJuYW1lXFxcIjogXFxcIkJcXFxcXFxcXHUw\nMGY2c2FydGlnZSBRdWVsbGVuL01hbG5ldHMgKE1hbGljaW91cyBTb3VyY2VzL01hbG5ldHMpXFxc\nIn0sIFxcXCJ6aFxcXCI6IHtcXFwibnVtXFxcIjogXFxcIjQzXFxcIiwgXFxcIm5hbWVcXFwiOiBc\nXFwiXFxcXFxcXFx1NjA3NlxcXFxcXFxcdTYxMGZcXFxcXFxcXHU2NzY1XFxcXFxcXFx1NmU5MC9c\nXFxcXFxcXHU2MDc2XFxcXFxcXFx1NjEwZlxcXFxcXFxcdTdmNTFcXFxcXFxcXHU3ZWRjIChNYWxp\nY2lvdXMgU291cmNlcy9NYWxuZXRzKVxcXCJ9LCBcXFwiemhfVFdcXFwiOiB7XFxcIm51bVxcXCI6\nIFxcXCI0M1xcXCIsIFxcXCJuYW1lXFxcIjogXFxcIlxcXFxcXFxcdTYwZTFcXFxcXFxcXHU2MTBm\nXFxcXFxcXFx1NGY4NlxcXFxcXFxcdTZlOTAvXFxcXFxcXFx1NjBlMVxcXFxcXFxcdTYxMGZcXFxc\nXFxcXHU3ZGIyXFxcXFxcXFx1OGRlZiAoTWFsaWNpb3VzIFNvdXJjZXMvTWFsbmV0cylcXFwifSwg\nXFxcImVuXFxcIjoge1xcXCJudW1cXFwiOiBcXFwiNDNcXFwiLCBcXFwibmFtZVxcXCI6IFxcXCJN\nYWxpY2lvdXMgU291cmNlcy9NYWxuZXRzXFxcIn0sIFxcXCJqYVxcXCI6IHtcXFwibnVtXFxcIjog\nXFxcIjQzXFxcIiwgXFxcIm5hbWVcXFwiOiBcXFwiXFxcXFxcXFx1NjBhYVxcXFxcXFxcdTYxMGZc\nXFxcXFxcXHUzMDZlXFxcXFxcXFx1MzA0MlxcXFxcXFxcdTMwOGJcXFxcXFxcXHU3NjdhXFxcXFxc\nXFx1NGZlMVxcXFxcXFxcdTUxNDMvXFxcXFxcXFx1MzBkZVxcXFxcXFxcdTMwZWJcXFxcXFxcXHUz\nMGNkXFxcXFxcXFx1MzBjM1xcXFxcXFxcdTMwYzggKE1hbGljaW91cyBTb3VyY2VzL01hbG5ldHMp\nXFxcIn0sIFxcXCJlc1xcXCI6IHtcXFwibnVtXFxcIjogXFxcIjQzXFxcIiwgXFxcIm5hbWVcXFwi\nOiBcXFwiRnVlbnRlcyBkZSBzb2Z0d2FyZSBtYWxpY2lvc28vbWFsbmV0cyAoTWFsaWNpb3VzIFNv\ndXJjZXMvTWFsbmV0cylcXFwifX0sIFxcXCJ1cmxcXFwiOiBcXFwiaHR0cDovL25qamlhamllLmNv\nbS9wcm9qZWN0L2djYWwvZ2FvY2VuZy9pbmRleF8zLmh0bVxcXCIsIFxcXCJyYXRlRGF0ZVxcXCI6\nIFxcXCJMYXN0IFRpbWUgUmF0ZWQvUmV2aWV3ZWQ6ICZndDsge3tkYXlzfX0gZGF5cyB7e2xlZ2Fj\neX19VGhlIFVSTCBzdWJtaXR0ZWQgZm9yIHJldmlldyB3YXMgcmF0ZWQgbW9yZSB0aGFuIHt7ZGF5\nc319IGRheXMgYWdvLiAgVGhlIGRlZmF1bHQgc2V0dGluZyBmb3IgU3ltYW50ZWMgU0cgY2xpZW50\ncyB0byBkb3dubG9hZCByYXRpbmcgY2hhbmdlcyBpcyBvbmNlIGEgZGF5LiAgVGhlcmUgaXMgbm8g\nbmVlZCB0byBzaG93IHJhdGluZ3Mgb2xkZXIgdGhhbiB0aGlzLiBTaW5jZSBTeW1hbnRlY1xcXFwn\ncyBkZXNrdG9wIGNsaWVudCBLOSBhbmQgY2VydGFpbiBPRU0gcGFydG5lcnMgdXBkYXRlIGRpZmZl\ncmVudGx5LCByYXRpbmdzIG1heSBkaWZmZXIgZnJvbSB0aG9zZSBvZiBhIFN5bWFudGVjIFNHIGFz\nIHdlbGwgYXMgdGhvc2UgcHJlc2VudCBvbiB0aGUgU2l0ZSBSZXZpZXcgVG9vbC5cXFwiLCBcXFwi\nZm9sbG93ZWRVcmxcXFwiOiBudWxsLCBcXFwibG9ja2VkU3BlY2lhbE5vdGVcXFwiOiBudWxsLCBc\nXFwidGhyZWF0cmlza0xldmVsRW5cXFwiOiBudWxsLCBcXFwibGlua2FibGVcXFwiOiBcXFwiZmFs\nc2VcXFwiLCBcXFwicmVzb2x2ZWREZXRhaWxcXFwiOiB7XFxcInJlc29sdmVFbmFibGVkXFxcIjog\nXFxcInRydWVcXFwiLCBcXFwiaXBBZGRyZXNzXFxcIjogXFxcIjE1NC4yMTAuMjM1LjcyXFxcIn0s\nIFxcXCJzZWN1cml0eUNhdGVnb3J5SWRzXFxcIjoge1xcXCJzZWN1cml0eUNhdGVnb3J5SWRzXFxc\nIjogW1xcXCI0M1xcXCIsIFxcXCIxMDJcXFwiLCBcXFwiNDRcXFwiLCBcXFwiOTJcXFwiLCBcXFwi\nMThcXFwiXX0sIFxcXCJtdWx0aXBsZU1lc3NhZ2VcXFwiOiBudWxsLCBcXFwic3VnZ2VzdGlvblxc\nXCI6IG51bGwsIFxcXCJzZWN1cml0eUNhdGVnb3J5XFxcIjogXFxcInRydWVcXFwiLCBcXFwicmF0\naW5nRHRzQ3V0b2ZmXFxcIjogXFxcIjdcXFwiLCBcXFwibXVsdGlwbGVcXFwiOiBcXFwiZmFsc2Vc\nXFwiLCBcXFwidW5yYXRlZFxcXCI6IFxcXCJmYWxzZVxcXCIsIFxcXCJjdXJUcmFja2luZ0lkXFxc\nIjogXFxcIjM5OTkyN1xcXCIsIFxcXCJyYXRpbmdEdHNcXFwiOiBcXFwiT0xERVJcXFwiLCBcXFwi\nbG9ja2VkTWVzc2FnZVxcXCI6IG51bGwsIFxcXCJ0aHJlYXRyaXNrTGV2ZWxcXFwiOiBudWxsfX0n\nLCAncmVhc29uJzogTm9uZSwgJ3ZlcnNpb24nOiAnMS4wJ31cXG5pZiBpc2luc3RhbmNlKHJlc3Vs\ndHMuY29udGVudFsnQ2F0ZWdvcml6YXRpb25SZXN1bHQnXVsnY2F0ZWdvcml6YXRpb24nXVsnY2F0\nZWdvcml6YXRpb24nXSxsaXN0KTpcXG4gIGNhdGVnb3JpemF0aW9uX2xpc3QgPSBbY2F0ZWdvcml6\nYXRpb25bJ25hbWUnXSBmb3IgY2F0ZWdvcml6YXRpb24gaW4gcmVzdWx0cy5jb250ZW50WydDYXRl\nZ29yaXphdGlvblJlc3VsdCddWydjYXRlZ29yaXphdGlvbiddWydjYXRlZ29yaXphdGlvbiddXVxc\nbiAgY2F0ZWdvcml6YXRpb25fbmFtZSA9IHVcXFwiLCBcXFwiLmpvaW4oY2F0ZWdvcml6YXRpb25f\nbGlzdClcXG5lbHNlOlxcbiAgY2F0ZWdvcml6YXRpb25fbmFtZSA9IHJlc3VsdHMuY29udGVudFsn\nQ2F0ZWdvcml6YXRpb25SZXN1bHQnXVsnY2F0ZWdvcml6YXRpb24nXVsnY2F0ZWdvcml6YXRpb24n\nXVsnbmFtZSddXFxuICBcXG5leGlzdGluZ19kZXNjcmlwdGlvbiA9IGFydGlmYWN0LmRlc2NyaXB0\naW9uLmNvbnRlbnQrJ1xcXFxuJyBpZiBhcnRpZmFjdC5kZXNjcmlwdGlvbiBlbHNlIFxcXCJcXFwi\nXFxuXFxuYXJ0aWZhY3QuZGVzY3JpcHRpb24gPSB1XFxcInt9Qmx1ZWNvYXQgQ2F0ZWdvcml6YXRp\nb246IHt9XFxcIi5mb3JtYXQoZXhpc3RpbmdfZGVzY3JpcHRpb24sIGNhdGVnb3JpemF0aW9uX25h\nbWUpXFxuICBcXG5cXG5cIixcInByZV9wcm9jZXNzaW5nX3NjcmlwdFwiOlwiaW5wdXRzLmFydGlm\nYWN0X3ZhbHVlID0gYXJ0aWZhY3QudmFsdWVcXG5cIn08L3Jlc2lsaWVudDpmdW5jdGlvbj48L2V4\ndGVuc2lvbkVsZW1lbnRzPjxpbmNvbWluZz5TZXF1ZW5jZUZsb3dfMHVueGwydDwvaW5jb21pbmc+\nPG91dGdvaW5nPlNlcXVlbmNlRmxvd18xZ3JzY290PC9vdXRnb2luZz48L3NlcnZpY2VUYXNrPjxz\nZXF1ZW5jZUZsb3cgaWQ9XCJTZXF1ZW5jZUZsb3dfMHVueGwydFwiIHNvdXJjZVJlZj1cIlN0YXJ0\nRXZlbnRfMTU1YXN4bVwiIHRhcmdldFJlZj1cIlNlcnZpY2VUYXNrXzFpYmVmdjdcIi8+PGVuZEV2\nZW50IGlkPVwiRW5kRXZlbnRfMDdxajl4M1wiPjxpbmNvbWluZz5TZXF1ZW5jZUZsb3dfMWdyc2Nv\ndDwvaW5jb21pbmc+PC9lbmRFdmVudD48c2VxdWVuY2VGbG93IGlkPVwiU2VxdWVuY2VGbG93XzFn\ncnNjb3RcIiBzb3VyY2VSZWY9XCJTZXJ2aWNlVGFza18xaWJlZnY3XCIgdGFyZ2V0UmVmPVwiRW5k\nRXZlbnRfMDdxajl4M1wiLz48dGV4dEFubm90YXRpb24gaWQ9XCJUZXh0QW5ub3RhdGlvbl8xa3h4\naXl0XCI+PHRleHQ+U3RhcnQgeW91ciB3b3JrZmxvdyBoZXJlPC90ZXh0PjwvdGV4dEFubm90YXRp\nb24+PGFzc29jaWF0aW9uIGlkPVwiQXNzb2NpYXRpb25fMXNldWo0OFwiIHNvdXJjZVJlZj1cIlN0\nYXJ0RXZlbnRfMTU1YXN4bVwiIHRhcmdldFJlZj1cIlRleHRBbm5vdGF0aW9uXzFreHhpeXRcIi8+\nPHRleHRBbm5vdGF0aW9uIGlkPVwiVGV4dEFubm90YXRpb25fMWpod2JjaFwiPjx0ZXh0PlJlc3Vs\ndHMgYXJlIGFwcGVuZGVkIHRvIHRoZSBhcnRpZmFjdCBkZXNjcmlwdGlvbjwvdGV4dD48L3RleHRB\nbm5vdGF0aW9uPjxhc3NvY2lhdGlvbiBpZD1cIkFzc29jaWF0aW9uXzB6MzgzZWpcIiBzb3VyY2VS\nZWY9XCJTZXJ2aWNlVGFza18xaWJlZnY3XCIgdGFyZ2V0UmVmPVwiVGV4dEFubm90YXRpb25fMWpo\nd2JjaFwiLz48L3Byb2Nlc3M+PGJwbW5kaTpCUE1ORGlhZ3JhbSBpZD1cIkJQTU5EaWFncmFtXzFc\nIj48YnBtbmRpOkJQTU5QbGFuZSBicG1uRWxlbWVudD1cInVuZGVmaW5lZFwiIGlkPVwiQlBNTlBs\nYW5lXzFcIj48YnBtbmRpOkJQTU5TaGFwZSBicG1uRWxlbWVudD1cIlN0YXJ0RXZlbnRfMTU1YXN4\nbVwiIGlkPVwiU3RhcnRFdmVudF8xNTVhc3htX2RpXCI+PG9tZ2RjOkJvdW5kcyBoZWlnaHQ9XCIz\nNlwiIHdpZHRoPVwiMzZcIiB4PVwiMTYyXCIgeT1cIjE4OFwiLz48YnBtbmRpOkJQTU5MYWJlbD48\nb21nZGM6Qm91bmRzIGhlaWdodD1cIjBcIiB3aWR0aD1cIjkwXCIgeD1cIjE1N1wiIHk9XCIyMjNc\nIi8+PC9icG1uZGk6QlBNTkxhYmVsPjwvYnBtbmRpOkJQTU5TaGFwZT48YnBtbmRpOkJQTU5TaGFw\nZSBicG1uRWxlbWVudD1cIlRleHRBbm5vdGF0aW9uXzFreHhpeXRcIiBpZD1cIlRleHRBbm5vdGF0\naW9uXzFreHhpeXRfZGlcIj48b21nZGM6Qm91bmRzIGhlaWdodD1cIjMwXCIgd2lkdGg9XCIxMDBc\nIiB4PVwiOTlcIiB5PVwiMjU0XCIvPjwvYnBtbmRpOkJQTU5TaGFwZT48YnBtbmRpOkJQTU5FZGdl\nIGJwbW5FbGVtZW50PVwiQXNzb2NpYXRpb25fMXNldWo0OFwiIGlkPVwiQXNzb2NpYXRpb25fMXNl\ndWo0OF9kaVwiPjxvbWdkaTp3YXlwb2ludCB4PVwiMTY5XCIgeHNpOnR5cGU9XCJvbWdkYzpQb2lu\ndFwiIHk9XCIyMjBcIi8+PG9tZ2RpOndheXBvaW50IHg9XCIxNTNcIiB4c2k6dHlwZT1cIm9tZ2Rj\nOlBvaW50XCIgeT1cIjI1NFwiLz48L2JwbW5kaTpCUE1ORWRnZT48YnBtbmRpOkJQTU5TaGFwZSBi\ncG1uRWxlbWVudD1cIlNlcnZpY2VUYXNrXzFpYmVmdjdcIiBpZD1cIlNlcnZpY2VUYXNrXzFpYmVm\ndjdfZGlcIj48b21nZGM6Qm91bmRzIGhlaWdodD1cIjgwXCIgd2lkdGg9XCIxMDBcIiB4PVwiMjc4\nXCIgeT1cIjE2NlwiLz48L2JwbW5kaTpCUE1OU2hhcGU+PGJwbW5kaTpCUE1ORWRnZSBicG1uRWxl\nbWVudD1cIlNlcXVlbmNlRmxvd18wdW54bDJ0XCIgaWQ9XCJTZXF1ZW5jZUZsb3dfMHVueGwydF9k\naVwiPjxvbWdkaTp3YXlwb2ludCB4PVwiMTk4XCIgeHNpOnR5cGU9XCJvbWdkYzpQb2ludFwiIHk9\nXCIyMDZcIi8+PG9tZ2RpOndheXBvaW50IHg9XCIyNzhcIiB4c2k6dHlwZT1cIm9tZ2RjOlBvaW50\nXCIgeT1cIjIwNlwiLz48YnBtbmRpOkJQTU5MYWJlbD48b21nZGM6Qm91bmRzIGhlaWdodD1cIjEz\nXCIgd2lkdGg9XCI5MFwiIHg9XCIxOTNcIiB5PVwiMTg0LjVcIi8+PC9icG1uZGk6QlBNTkxhYmVs\nPjwvYnBtbmRpOkJQTU5FZGdlPjxicG1uZGk6QlBNTlNoYXBlIGJwbW5FbGVtZW50PVwiRW5kRXZl\nbnRfMDdxajl4M1wiIGlkPVwiRW5kRXZlbnRfMDdxajl4M19kaVwiPjxvbWdkYzpCb3VuZHMgaGVp\nZ2h0PVwiMzZcIiB3aWR0aD1cIjM2XCIgeD1cIjQ1NVwiIHk9XCIxODhcIi8+PGJwbW5kaTpCUE1O\nTGFiZWw+PG9tZ2RjOkJvdW5kcyBoZWlnaHQ9XCIxM1wiIHdpZHRoPVwiOTBcIiB4PVwiNDI4XCIg\neT1cIjIyN1wiLz48L2JwbW5kaTpCUE1OTGFiZWw+PC9icG1uZGk6QlBNTlNoYXBlPjxicG1uZGk6\nQlBNTkVkZ2UgYnBtbkVsZW1lbnQ9XCJTZXF1ZW5jZUZsb3dfMWdyc2NvdFwiIGlkPVwiU2VxdWVu\nY2VGbG93XzFncnNjb3RfZGlcIj48b21nZGk6d2F5cG9pbnQgeD1cIjM3OFwiIHhzaTp0eXBlPVwi\nb21nZGM6UG9pbnRcIiB5PVwiMjA2XCIvPjxvbWdkaTp3YXlwb2ludCB4PVwiNDU1XCIgeHNpOnR5\ncGU9XCJvbWdkYzpQb2ludFwiIHk9XCIyMDZcIi8+PGJwbW5kaTpCUE1OTGFiZWw+PG9tZ2RjOkJv\ndW5kcyBoZWlnaHQ9XCIxM1wiIHdpZHRoPVwiOTBcIiB4PVwiMzcxLjVcIiB5PVwiMTg0LjVcIi8+\nPC9icG1uZGk6QlBNTkxhYmVsPjwvYnBtbmRpOkJQTU5FZGdlPjxicG1uZGk6QlBNTlNoYXBlIGJw\nbW5FbGVtZW50PVwiVGV4dEFubm90YXRpb25fMWpod2JjaFwiIGlkPVwiVGV4dEFubm90YXRpb25f\nMWpod2JjaF9kaVwiPjxvbWdkYzpCb3VuZHMgaGVpZ2h0PVwiNDNcIiB3aWR0aD1cIjE0OFwiIHg9\nXCIzNzFcIiB5PVwiODBcIi8+PC9icG1uZGk6QlBNTlNoYXBlPjxicG1uZGk6QlBNTkVkZ2UgYnBt\nbkVsZW1lbnQ9XCJBc3NvY2lhdGlvbl8wejM4M2VqXCIgaWQ9XCJBc3NvY2lhdGlvbl8wejM4M2Vq\nX2RpXCI+PG9tZ2RpOndheXBvaW50IHg9XCIzNzBcIiB4c2k6dHlwZT1cIm9tZ2RjOlBvaW50XCIg\neT1cIjE2OFwiLz48b21nZGk6d2F5cG9pbnQgeD1cIjQyMVwiIHhzaTp0eXBlPVwib21nZGM6UG9p\nbnRcIiB5PVwiMTIzXCIvPjwvYnBtbmRpOkJQTU5FZGdlPjwvYnBtbmRpOkJQTU5QbGFuZT48L2Jw\nbW5kaTpCUE1ORGlhZ3JhbT48L2RlZmluaXRpb25zPiIsICJ3b3JrZmxvd19pZCI6ICJibHVlY29h\ndF9zaXRlX3Jldmlld19zZWFyY2giLCAidmVyc2lvbiI6IDEwfSwgImxhc3RfbW9kaWZpZWRfdGlt\nZSI6IDE1NTM1NTY5OTM2NzEsICJjcmVhdG9yX2lkIjogImFAZXhhbXBsZS5jb20iLCAiYWN0aW9u\ncyI6IFtdLCAicHJvZ3JhbW1hdGljX25hbWUiOiAiYmx1ZWNvYXRfc2l0ZV9yZXZpZXdfc2VhcmNo\nIiwgIm5hbWUiOiAiRXhhbXBsZTogQmx1ZWNvYXQgU2l0ZSBSZXZpZXcgU2VhcmNoIn1dLCAiYWN0\naW9ucyI6IFt7ImxvZ2ljX3R5cGUiOiAiYWxsIiwgIm5hbWUiOiAiRXhhbXBsZTogQmx1ZWNvYXQg\nU2l0ZSBSZXZpZXciLCAidmlld19pdGVtcyI6IFtdLCAidHlwZSI6IDEsICJ3b3JrZmxvd3MiOiBb\nImJsdWVjb2F0X3NpdGVfcmV2aWV3X3NlYXJjaCJdLCAib2JqZWN0X3R5cGUiOiAiYXJ0aWZhY3Qi\nLCAidGltZW91dF9zZWNvbmRzIjogODY0MDAsICJ1dWlkIjogIjNiYTA4Y2VjLWY1NzMtNDg5Ny05\nYzEyLTMwOTNiMTVhNGUzMSIsICJhdXRvbWF0aW9ucyI6IFtdLCAiZXhwb3J0X2tleSI6ICJFeGFt\ncGxlOiBCbHVlY29hdCBTaXRlIFJldmlldyIsICJjb25kaXRpb25zIjogW3sidHlwZSI6IG51bGws\nICJldmFsdWF0aW9uX2lkIjogbnVsbCwgImZpZWxkX25hbWUiOiAiYXJ0aWZhY3QudHlwZSIsICJt\nZXRob2QiOiAiaW4iLCAidmFsdWUiOiBbIkROUyBOYW1lIiwgIlVSTCJdfV0sICJpZCI6IDY5LCAi\nbWVzc2FnZV9kZXN0aW5hdGlvbnMiOiBbXX1dLCAibGF5b3V0cyI6IFtdLCAiZXhwb3J0X2Zvcm1h\ndF92ZXJzaW9uIjogMiwgImlkIjogMjEsICJpbmR1c3RyaWVzIjogbnVsbCwgInBoYXNlcyI6IFtd\nLCAiYWN0aW9uX29yZGVyIjogW10sICJnZW9zIjogbnVsbCwgImxvY2FsZSI6IG51bGwsICJzZXJ2\nZXJfdmVyc2lvbiI6IHsibWFqb3IiOiAzMSwgInZlcnNpb24iOiAiMzEuMC40MjU0IiwgImJ1aWxk\nX251bWJlciI6IDQyNTQsICJtaW5vciI6IDB9LCAidGltZWZyYW1lcyI6IG51bGwsICJ3b3Jrc3Bh\nY2VzIjogW10sICJhdXRvbWF0aWNfdGFza3MiOiBbXSwgImZ1bmN0aW9ucyI6IFt7ImRpc3BsYXlf\nbmFtZSI6ICJCbHVlY29hdCBTaXRlIFJldmlldyBMb29rdXAiLCAiZGVzY3JpcHRpb24iOiB7ImNv\nbnRlbnQiOiAiVGhpcyBmdW5jdGlvbiB0YWtlcyBhbiBhcnRpZmFjdCBvZiB0eXBlIFVSTCBvciBE\nTlMgbmFtZSBhbmQgcmV0dXJucyB0aG9zZSByZXN1bHRzIGFzIGEganNvbiBvYmplY3QuIiwgImZv\ncm1hdCI6ICJ0ZXh0In0sICJjcmVhdG9yIjogeyJkaXNwbGF5X25hbWUiOiAiYWJsZSBiYWNrZXIi\nLCAidHlwZSI6ICJ1c2VyIiwgImlkIjogMywgIm5hbWUiOiAiYUBleGFtcGxlLmNvbSJ9LCAidmll\nd19pdGVtcyI6IFt7InNob3dfaWYiOiBudWxsLCAiZmllbGRfdHlwZSI6ICJfX2Z1bmN0aW9uIiwg\nInNob3dfbGlua19oZWFkZXIiOiBmYWxzZSwgImVsZW1lbnQiOiAiZmllbGRfdXVpZCIsICJjb250\nZW50IjogIjliYTQ5ODg3LTBkY2YtNDBjZS1hNWVhLTljMGM0M2Y4MzFiZiIsICJzdGVwX2xhYmVs\nIjogbnVsbH1dLCAiZXhwb3J0X2tleSI6ICJibHVlY29hdF9zaXRlX3Jldmlld19sb29rdXAiLCAi\ndXVpZCI6ICIyYjI2OTMyYS02MGRmLTQwNDUtYWU4ZC0wZWIzMTlkZTYwMWQiLCAibGFzdF9tb2Rp\nZmllZF9ieSI6IHsiZGlzcGxheV9uYW1lIjogImFibGUgYmFja2VyIiwgInR5cGUiOiAidXNlciIs\nICJpZCI6IDMsICJuYW1lIjogImFAZXhhbXBsZS5jb20ifSwgInZlcnNpb24iOiAyLCAid29ya2Zs\nb3dzIjogW3siZGVzY3JpcHRpb24iOiBudWxsLCAib2JqZWN0X3R5cGUiOiAiYXJ0aWZhY3QiLCAi\nYWN0aW9ucyI6IFtdLCAibmFtZSI6ICJFeGFtcGxlOiBCbHVlY29hdCBTaXRlIFJldmlldyBTZWFy\nY2giLCAid29ya2Zsb3dfaWQiOiA0OSwgInByb2dyYW1tYXRpY19uYW1lIjogImJsdWVjb2F0X3Np\ndGVfcmV2aWV3X3NlYXJjaCIsICJ1dWlkIjogbnVsbH1dLCAibGFzdF9tb2RpZmllZF90aW1lIjog\nMTU1MzU1Njk3NjM2NCwgImRlc3RpbmF0aW9uX2hhbmRsZSI6ICJibHVlY29hdF9zaXRlX3Jldmll\ndyIsICJpZCI6IDM3LCAibmFtZSI6ICJibHVlY29hdF9zaXRlX3Jldmlld19sb29rdXAifV0sICJu\nb3RpZmljYXRpb25zIjogbnVsbCwgInJlZ3VsYXRvcnMiOiBudWxsLCAiaW5jaWRlbnRfdHlwZXMi\nOiBbeyJjcmVhdGVfZGF0ZSI6IDE1NTM1NTk0NDcxNzEsICJkZXNjcmlwdGlvbiI6ICJDdXN0b21p\nemF0aW9uIFBhY2thZ2VzIChpbnRlcm5hbCkiLCAiZXhwb3J0X2tleSI6ICJDdXN0b21pemF0aW9u\nIFBhY2thZ2VzIChpbnRlcm5hbCkiLCAiaWQiOiAwLCAibmFtZSI6ICJDdXN0b21pemF0aW9uIFBh\nY2thZ2VzIChpbnRlcm5hbCkiLCAidXBkYXRlX2RhdGUiOiAxNTUzNTU5NDQ3MTcxLCAidXVpZCI6\nICJiZmVlYzJkNC0zNzcwLTExZTgtYWQzOS00YTAwMDQwNDRhYTAiLCAiZW5hYmxlZCI6IGZhbHNl\nLCAic3lzdGVtIjogZmFsc2UsICJwYXJlbnRfaWQiOiBudWxsLCAiaGlkZGVuIjogZmFsc2V9XSwg\nInNjcmlwdHMiOiBbXSwgInR5cGVzIjogW10sICJtZXNzYWdlX2Rlc3RpbmF0aW9ucyI6IFt7InV1\naWQiOiAiYzg5YmNlNmQtMDFmOC00N2U3LThkNGMtNWQ3OTNiMWVhNmI2IiwgImV4cG9ydF9rZXki\nOiAiYmx1ZWNvYXRfc2l0ZV9yZXZpZXciLCAibmFtZSI6ICJCbHVlY29hdCBTaXRlIFJldmlldyIs\nICJkZXN0aW5hdGlvbl90eXBlIjogMCwgInByb2dyYW1tYXRpY19uYW1lIjogImJsdWVjb2F0X3Np\ndGVfcmV2aWV3IiwgImV4cGVjdF9hY2siOiB0cnVlLCAidXNlcnMiOiBbImFAZXhhbXBsZS5jb20i\nXX1dLCAiaW5jaWRlbnRfYXJ0aWZhY3RfdHlwZXMiOiBbXSwgInJvbGVzIjogW10sICJmaWVsZHMi\nOiBbeyJvcGVyYXRpb25zIjogW10sICJ0eXBlX2lkIjogMCwgIm9wZXJhdGlvbl9wZXJtcyI6IHt9\nLCAidGV4dCI6ICJTaW11bGF0aW9uIiwgImJsYW5rX29wdGlvbiI6IGZhbHNlLCAicHJlZml4Ijog\nbnVsbCwgImNoYW5nZWFibGUiOiB0cnVlLCAiaWQiOiA1MSwgInJlYWRfb25seSI6IHRydWUsICJ1\ndWlkIjogImMzZjBlM2VkLTIxZTEtNGQ1My1hZmZiLWZlNWNhMzMwOGNjYSIsICJjaG9zZW4iOiBm\nYWxzZSwgImlucHV0X3R5cGUiOiAiYm9vbGVhbiIsICJ0b29sdGlwIjogIldoZXRoZXIgdGhlIGlu\nY2lkZW50IGlzIGEgc2ltdWxhdGlvbiBvciBhIHJlZ3VsYXIgaW5jaWRlbnQuICBUaGlzIGZpZWxk\nIGlzIHJlYWQtb25seS4iLCAiaW50ZXJuYWwiOiBmYWxzZSwgInJpY2hfdGV4dCI6IGZhbHNlLCAi\ndGVtcGxhdGVzIjogW10sICJleHBvcnRfa2V5IjogImluY2lkZW50L2luY190cmFpbmluZyIsICJo\naWRlX25vdGlmaWNhdGlvbiI6IGZhbHNlLCAibmFtZSI6ICJpbmNfdHJhaW5pbmciLCAiZGVwcmVj\nYXRlZCI6IGZhbHNlLCAiZGVmYXVsdF9jaG9zZW5fYnlfc2VydmVyIjogZmFsc2UsICJ2YWx1ZXMi\nOiBbXX0sIHsib3BlcmF0aW9ucyI6IFtdLCAidHlwZV9pZCI6IDExLCAib3BlcmF0aW9uX3Blcm1z\nIjoge30sICJ0ZXh0IjogImFydGlmYWN0X3ZhbHVlIiwgImJsYW5rX29wdGlvbiI6IGZhbHNlLCAi\ncHJlZml4IjogbnVsbCwgImNoYW5nZWFibGUiOiB0cnVlLCAiaWQiOiAyODUsICJyZWFkX29ubHki\nOiBmYWxzZSwgInV1aWQiOiAiOWJhNDk4ODctMGRjZi00MGNlLWE1ZWEtOWMwYzQzZjgzMWJmIiwg\nImNob3NlbiI6IGZhbHNlLCAiaW5wdXRfdHlwZSI6ICJ0ZXh0IiwgInRvb2x0aXAiOiAiIiwgImlu\ndGVybmFsIjogZmFsc2UsICJyaWNoX3RleHQiOiBmYWxzZSwgInRlbXBsYXRlcyI6IFtdLCAiZXhw\nb3J0X2tleSI6ICJfX2Z1bmN0aW9uL2FydGlmYWN0X3ZhbHVlIiwgImhpZGVfbm90aWZpY2F0aW9u\nIjogZmFsc2UsICJwbGFjZWhvbGRlciI6ICIiLCAibmFtZSI6ICJhcnRpZmFjdF92YWx1ZSIsICJk\nZXByZWNhdGVkIjogZmFsc2UsICJkZWZhdWx0X2Nob3Nlbl9ieV9zZXJ2ZXIiOiBmYWxzZSwgInZh\nbHVlcyI6IFtdfV0sICJvdmVycmlkZXMiOiBbXSwgImV4cG9ydF9kYXRlIjogMTU1MzU1OTQzMzAy\nNn0=\n\"\"\"\n )", "title": "" } ]
[ { "docid": "0c3174bb75572433d27a14a7034f28bf", "score": "0.64324254", "text": "def apply_customization(self, args):\n pass", "title": "" }, { "docid": "29cce1b4c52c4beb66fdf4d95ee4eda4", "score": "0.56733376", "text": "def customizing(self, p_customizing: str):", "title": "" }, { "docid": "dc8a29a64ddfaa262279b1631e7b438c", "score": "0.5274856", "text": "def _augment_pipeline_cfg(self):", "title": "" }, { "docid": "61e8feb0cb79f796de9d66b4be9a0d1e", "score": "0.52658665", "text": "def customization(request):\n\n # Check that if a logo has been specified, the file exists.\n if cust_vals['c_logo'] and not os.path.isfile(cust_vals['c_logo_path']):\n sys.stderr('Error: File for logo does not exist at ' + cust_vals['c_logo_path'] + '\\nUsing text logo instead.')\n cust_vals['c_logo'] = False\n return cust_vals", "title": "" }, { "docid": "24ee012d4df61077073d3cbb2837ce53", "score": "0.523526", "text": "def customization_data(client=None):\n\n res_file = os.path.join(os.path.dirname(__file__), RES_FILE)\n if not os.path.isfile(res_file):\n raise FileNotFoundError(\"{} not found\".format(RES_FILE))\n\n with io.open(res_file, mode='rt') as f:\n b64_data = base64.b64encode(f.read().encode('utf-8'))\n yield ImportDefinition(b64_data)", "title": "" }, { "docid": "24ee012d4df61077073d3cbb2837ce53", "score": "0.523526", "text": "def customization_data(client=None):\n\n res_file = os.path.join(os.path.dirname(__file__), RES_FILE)\n if not os.path.isfile(res_file):\n raise FileNotFoundError(\"{} not found\".format(RES_FILE))\n\n with io.open(res_file, mode='rt') as f:\n b64_data = base64.b64encode(f.read().encode('utf-8'))\n yield ImportDefinition(b64_data)", "title": "" }, { "docid": "24ee012d4df61077073d3cbb2837ce53", "score": "0.523526", "text": "def customization_data(client=None):\n\n res_file = os.path.join(os.path.dirname(__file__), RES_FILE)\n if not os.path.isfile(res_file):\n raise FileNotFoundError(\"{} not found\".format(RES_FILE))\n\n with io.open(res_file, mode='rt') as f:\n b64_data = base64.b64encode(f.read().encode('utf-8'))\n yield ImportDefinition(b64_data)", "title": "" }, { "docid": "b2aef56925ca401968120c4b24070dcc", "score": "0.5199545", "text": "def test_cutomize():\n cs = customize('test', 'm_name')\n owner = FalseOwner()\n f = lambda x: 1\n cs(f)\n cs.customize(owner, 'dd')\n assert owner.test.args == ('m_name', f, (), 'custom')", "title": "" }, { "docid": "03181b345ea0b779f81c43ab555e6189", "score": "0.514219", "text": "def customization_data(client=None):\n\n # This import data contains:\n # Function inputs:\n # urlscanio_public\n # urlscanio_referer\n # urlscanio_url\n # urlscanio_useragent\n # Message Destinations:\n # urlscanio\n # Functions:\n # urlscanio\n # Workflows:\n # example_urlscanio\n # Rules:\n # Example: urlscan.io\n\n yield ImportDefinition(u\"\"\"\neyJ0YXNrX29yZGVyIjogW10sICJ3b3JrZmxvd3MiOiBbeyJ1dWlkIjogIjIxYjg0MWJiLWYzZjMt\nNDFiNy05MmExLWM0NGYwMjdkMTRhMSIsICJkZXNjcmlwdGlvbiI6ICIiLCAib2JqZWN0X3R5cGUi\nOiAiYXJ0aWZhY3QiLCAiZXhwb3J0X2tleSI6ICJleGFtcGxlX3VybHNjYW5pbyIsICJ3b3JrZmxv\nd19pZCI6IDE2OCwgImxhc3RfbW9kaWZpZWRfYnkiOiAiaHB5bGVAcmVzaWxpZW50c3lzdGVtcy5j\nb20iLCAiY29udGVudCI6IHsieG1sIjogIjw/eG1sIHZlcnNpb249XCIxLjBcIiBlbmNvZGluZz1c\nIlVURi04XCI/PjxkZWZpbml0aW9ucyB4bWxucz1cImh0dHA6Ly93d3cub21nLm9yZy9zcGVjL0JQ\nTU4vMjAxMDA1MjQvTU9ERUxcIiB4bWxuczpicG1uZGk9XCJodHRwOi8vd3d3Lm9tZy5vcmcvc3Bl\nYy9CUE1OLzIwMTAwNTI0L0RJXCIgeG1sbnM6b21nZGM9XCJodHRwOi8vd3d3Lm9tZy5vcmcvc3Bl\nYy9ERC8yMDEwMDUyNC9EQ1wiIHhtbG5zOm9tZ2RpPVwiaHR0cDovL3d3dy5vbWcub3JnL3NwZWMv\nREQvMjAxMDA1MjQvRElcIiB4bWxuczpyZXNpbGllbnQ9XCJodHRwOi8vcmVzaWxpZW50LmlibS5j\nb20vYnBtblwiIHhtbG5zOnhzZD1cImh0dHA6Ly93d3cudzMub3JnLzIwMDEvWE1MU2NoZW1hXCIg\neG1sbnM6eHNpPVwiaHR0cDovL3d3dy53My5vcmcvMjAwMS9YTUxTY2hlbWEtaW5zdGFuY2VcIiB0\nYXJnZXROYW1lc3BhY2U9XCJodHRwOi8vd3d3LmNhbXVuZGEub3JnL3Rlc3RcIj48cHJvY2VzcyBp\nZD1cImV4YW1wbGVfdXJsc2NhbmlvXCIgaXNFeGVjdXRhYmxlPVwidHJ1ZVwiIG5hbWU9XCJFeGFt\ncGxlOiB1cmxzY2FuLmlvXCI+PGRvY3VtZW50YXRpb24vPjxzdGFydEV2ZW50IGlkPVwiU3RhcnRF\ndmVudF8xNTVhc3htXCI+PG91dGdvaW5nPlNlcXVlbmNlRmxvd18xN2RvNDZ0PC9vdXRnb2luZz48\nL3N0YXJ0RXZlbnQ+PHNlcnZpY2VUYXNrIGlkPVwiU2VydmljZVRhc2tfMGMzZXN1blwiIG5hbWU9\nXCJTY2FuIHdpdGggdXJsc2Nhbi5pb1wiIHJlc2lsaWVudDp0eXBlPVwiZnVuY3Rpb25cIj48ZXh0\nZW5zaW9uRWxlbWVudHM+PHJlc2lsaWVudDpmdW5jdGlvbiB1dWlkPVwiZDE5YzFmMDAtYjRmMS00\nNDgwLWI4YTMtN2JkZDE5MTQzMDQxXCI+e1wiaW5wdXRzXCI6e1wiZWU5MjYzYzEtNDMyYS00MWE4\nLThiYmQtNjkxN2QzNWZiM2FjXCI6e1wiaW5wdXRfdHlwZVwiOlwic3RhdGljXCIsXCJzdGF0aWNf\naW5wdXRcIjp7XCJib29sZWFuX3ZhbHVlXCI6ZmFsc2UsXCJtdWx0aXNlbGVjdF92YWx1ZVwiOltd\nfX19LFwicHJlX3Byb2Nlc3Npbmdfc2NyaXB0XCI6XCIjIFRoaXMgaXMgYW4gYXJ0aWZhY3Qgd29y\na2Zsb3c7IFxcbiMgVGhlIFVSTCB0byBzY2FuIGlzIHRoZSBhcnRpZmFjdCB2YWx1ZVxcbmlucHV0\ncy51cmxzY2FuaW9fdXJsID0gYXJ0aWZhY3QudmFsdWVcIixcInJlc3VsdF9uYW1lXCI6XCJ1cmxz\nY2FuaW9cIixcInBvc3RfcHJvY2Vzc2luZ19zY3JpcHRcIjpcIiMgVGhlIHJlc3VsdCBjb250YWlu\ncyxcXG4jIHtcXG4jICAgXFxcInBuZ191cmxcXFwiOiB0aGUgVVJMIG9mIHRoZSBzY3JlZW5zaG90\nIGltYWdlXFxuIyAgIFxcXCJwbmdfYmFzZTY0Y29udGVudFxcXCI6IHRoZSBiYXNlNjQtZW5jb2Rl\nZCBzY3JlZW5zaG90IChQTkcpXFxuIyAgIFxcXCJyZXBvcnRfdXJsXFxcIjogdGhlIFVSTCBvZiB0\naGUgSlNPTiByZXBvcnRfdXJsXFxuIyAgIFxcXCJyZXBvcnRcXFwiOiB0aGUgSlNPTiByZXBvcnQs\nIHdoaWNoIHdpbGwgY29udGFpbiBsb3RzIG9mIGRldGFpbCBvZiB0aGUgcGFnZSBhbmFseXNpcyAo\nc2VlIHVybHNjYW4uaW8gZm9yIGRldGFpbHMpLlxcbiMgfVxcbiNcXG4jIEluIHRoaXMgY2FzZSwg\ndGhlIGZpbGUgYXR0YWNobWVudCBjb250ZW50IGlzIHVzZWQgbGF0ZXIgaW4gdGhlIHdvcmtmbG93\nLiAgTm90aGluZyB0byBkbyBoZXJlLlwifTwvcmVzaWxpZW50OmZ1bmN0aW9uPjwvZXh0ZW5zaW9u\nRWxlbWVudHM+PGluY29taW5nPlNlcXVlbmNlRmxvd18xN2RvNDZ0PC9pbmNvbWluZz48b3V0Z29p\nbmc+U2VxdWVuY2VGbG93XzBqZHI3aDc8L291dGdvaW5nPjwvc2VydmljZVRhc2s+PHNlcXVlbmNl\nRmxvdyBpZD1cIlNlcXVlbmNlRmxvd18xN2RvNDZ0XCIgc291cmNlUmVmPVwiU3RhcnRFdmVudF8x\nNTVhc3htXCIgdGFyZ2V0UmVmPVwiU2VydmljZVRhc2tfMGMzZXN1blwiLz48c2VydmljZVRhc2sg\naWQ9XCJTZXJ2aWNlVGFza18wcjV2N3k4XCIgbmFtZT1cIlV0aWxpdGllczogQmFzZTY0IHRvIEF0\ndGFjaG1lbnRcIiByZXNpbGllbnQ6dHlwZT1cImZ1bmN0aW9uXCI+PGV4dGVuc2lvbkVsZW1lbnRz\nPjxyZXNpbGllbnQ6ZnVuY3Rpb24gdXVpZD1cIjExMzQ5MTU5LTE1M2UtNDliNy05YTliLWUyMjY3\nNmMwMzY4N1wiPntcImlucHV0c1wiOnt9LFwicHJlX3Byb2Nlc3Npbmdfc2NyaXB0XCI6XCIjIFRo\nZSBmaWxlIHdpbGwgYmUgYXR0YWNoZWQgdG8gdGhpcyBpbmNpZGVudFxcbmlucHV0cy5pbmNpZGVu\ndF9pZCA9IGluY2lkZW50LmlkXFxuXFxuIyBUaGUgZmlsZSBjb250ZW50IGlzIGJhc2U2NC1lbmNv\nZGVkIGRhdGFcXG5pbnB1dHMuYmFzZTY0Y29udGVudCA9IHdvcmtmbG93LnByb3BlcnRpZXMudXJs\nc2NhbmlvLnBuZ19iYXNlNjRjb250ZW50XFxuXFxuIyBOYW1lIHRoZSBmaWxlIGF0dGFjaG1lbnQg\nZnJvbSB0aGUgYXJ0aWZhY3RcXG5pbnB1dHMuZmlsZV9uYW1lID0gXFxcInVybHNjYW5pb19zY3Jl\nZW5zaG90X3t9LnBuZ1xcXCIuZm9ybWF0KGFydGlmYWN0LnZhbHVlLnJlcGxhY2UoXFxcIjpcXFwi\nLCBcXFwiX1xcXCIpLnJlcGxhY2UoXFxcIi9cXFwiLCBcXFwiX1xcXCIpKVxcblwifTwvcmVzaWxp\nZW50OmZ1bmN0aW9uPjwvZXh0ZW5zaW9uRWxlbWVudHM+PGluY29taW5nPlNlcXVlbmNlRmxvd18w\namRyN2g3PC9pbmNvbWluZz48b3V0Z29pbmc+U2VxdWVuY2VGbG93XzBnM24zZHA8L291dGdvaW5n\nPjwvc2VydmljZVRhc2s+PHNlcXVlbmNlRmxvdyBpZD1cIlNlcXVlbmNlRmxvd18wamRyN2g3XCIg\nc291cmNlUmVmPVwiU2VydmljZVRhc2tfMGMzZXN1blwiIHRhcmdldFJlZj1cIlNlcnZpY2VUYXNr\nXzByNXY3eThcIi8+PGVuZEV2ZW50IGlkPVwiRW5kRXZlbnRfMDh0OG00NFwiPjxpbmNvbWluZz5T\nZXF1ZW5jZUZsb3dfMGczbjNkcDwvaW5jb21pbmc+PC9lbmRFdmVudD48c2VxdWVuY2VGbG93IGlk\nPVwiU2VxdWVuY2VGbG93XzBnM24zZHBcIiBzb3VyY2VSZWY9XCJTZXJ2aWNlVGFza18wcjV2N3k4\nXCIgdGFyZ2V0UmVmPVwiRW5kRXZlbnRfMDh0OG00NFwiLz48dGV4dEFubm90YXRpb24gaWQ9XCJU\nZXh0QW5ub3RhdGlvbl8xa3h4aXl0XCI+PHRleHQ+UnVuIGZvciBhIFVSTCBhcnRpZmFjdDwvdGV4\ndD48L3RleHRBbm5vdGF0aW9uPjxhc3NvY2lhdGlvbiBpZD1cIkFzc29jaWF0aW9uXzFzZXVqNDhc\nIiBzb3VyY2VSZWY9XCJTdGFydEV2ZW50XzE1NWFzeG1cIiB0YXJnZXRSZWY9XCJUZXh0QW5ub3Rh\ndGlvbl8xa3h4aXl0XCIvPjx0ZXh0QW5ub3RhdGlvbiBpZD1cIlRleHRBbm5vdGF0aW9uXzFpOGg5\nc2hcIj48dGV4dD5TY2FuIHRoZSBVUkw8L3RleHQ+PC90ZXh0QW5ub3RhdGlvbj48YXNzb2NpYXRp\nb24gaWQ9XCJBc3NvY2lhdGlvbl8wZGthdG9yXCIgc291cmNlUmVmPVwiU2VydmljZVRhc2tfMGMz\nZXN1blwiIHRhcmdldFJlZj1cIlRleHRBbm5vdGF0aW9uXzFpOGg5c2hcIi8+PHRleHRBbm5vdGF0\naW9uIGlkPVwiVGV4dEFubm90YXRpb25fMHlycWtqM1wiPjx0ZXh0PkF0dGFjaCB0aGUgc2NyZWVu\nc2hvdCB0byB0aGUgaW5jaWRlbnQ8L3RleHQ+PC90ZXh0QW5ub3RhdGlvbj48YXNzb2NpYXRpb24g\naWQ9XCJBc3NvY2lhdGlvbl8wOHc0Z3YzXCIgc291cmNlUmVmPVwiU2VydmljZVRhc2tfMHI1djd5\nOFwiIHRhcmdldFJlZj1cIlRleHRBbm5vdGF0aW9uXzB5cnFrajNcIi8+PC9wcm9jZXNzPjxicG1u\nZGk6QlBNTkRpYWdyYW0gaWQ9XCJCUE1ORGlhZ3JhbV8xXCI+PGJwbW5kaTpCUE1OUGxhbmUgYnBt\nbkVsZW1lbnQ9XCJ1bmRlZmluZWRcIiBpZD1cIkJQTU5QbGFuZV8xXCI+PGJwbW5kaTpCUE1OU2hh\ncGUgYnBtbkVsZW1lbnQ9XCJTdGFydEV2ZW50XzE1NWFzeG1cIiBpZD1cIlN0YXJ0RXZlbnRfMTU1\nYXN4bV9kaVwiPjxvbWdkYzpCb3VuZHMgaGVpZ2h0PVwiMzZcIiB3aWR0aD1cIjM2XCIgeD1cIjE2\nMlwiIHk9XCIxODhcIi8+PGJwbW5kaTpCUE1OTGFiZWw+PG9tZ2RjOkJvdW5kcyBoZWlnaHQ9XCIw\nXCIgd2lkdGg9XCI5MFwiIHg9XCIxNTdcIiB5PVwiMjIzXCIvPjwvYnBtbmRpOkJQTU5MYWJlbD48\nL2JwbW5kaTpCUE1OU2hhcGU+PGJwbW5kaTpCUE1OU2hhcGUgYnBtbkVsZW1lbnQ9XCJUZXh0QW5u\nb3RhdGlvbl8xa3h4aXl0XCIgaWQ9XCJUZXh0QW5ub3RhdGlvbl8xa3h4aXl0X2RpXCI+PG9tZ2Rj\nOkJvdW5kcyBoZWlnaHQ9XCIzNVwiIHdpZHRoPVwiMTAwXCIgeD1cIjE3NFwiIHk9XCI2OVwiLz48\nL2JwbW5kaTpCUE1OU2hhcGU+PGJwbW5kaTpCUE1ORWRnZSBicG1uRWxlbWVudD1cIkFzc29jaWF0\naW9uXzFzZXVqNDhcIiBpZD1cIkFzc29jaWF0aW9uXzFzZXVqNDhfZGlcIj48b21nZGk6d2F5cG9p\nbnQgeD1cIjE4M1wiIHhzaTp0eXBlPVwib21nZGM6UG9pbnRcIiB5PVwiMTg5XCIvPjxvbWdkaTp3\nYXlwb2ludCB4PVwiMjE1XCIgeHNpOnR5cGU9XCJvbWdkYzpQb2ludFwiIHk9XCIxMDRcIi8+PC9i\ncG1uZGk6QlBNTkVkZ2U+PGJwbW5kaTpCUE1OU2hhcGUgYnBtbkVsZW1lbnQ9XCJTZXJ2aWNlVGFz\na18wYzNlc3VuXCIgaWQ9XCJTZXJ2aWNlVGFza18wYzNlc3VuX2RpXCI+PG9tZ2RjOkJvdW5kcyBo\nZWlnaHQ9XCI4MFwiIHdpZHRoPVwiMTAwXCIgeD1cIjI5N1wiIHk9XCIxNjZcIi8+PC9icG1uZGk6\nQlBNTlNoYXBlPjxicG1uZGk6QlBNTkVkZ2UgYnBtbkVsZW1lbnQ9XCJTZXF1ZW5jZUZsb3dfMTdk\nbzQ2dFwiIGlkPVwiU2VxdWVuY2VGbG93XzE3ZG80NnRfZGlcIj48b21nZGk6d2F5cG9pbnQgeD1c\nIjE5OFwiIHhzaTp0eXBlPVwib21nZGM6UG9pbnRcIiB5PVwiMjA2XCIvPjxvbWdkaTp3YXlwb2lu\ndCB4PVwiMjk3XCIgeHNpOnR5cGU9XCJvbWdkYzpQb2ludFwiIHk9XCIyMDZcIi8+PGJwbW5kaTpC\nUE1OTGFiZWw+PG9tZ2RjOkJvdW5kcyBoZWlnaHQ9XCIxM1wiIHdpZHRoPVwiMFwiIHg9XCIyNDcu\nNVwiIHk9XCIxODRcIi8+PC9icG1uZGk6QlBNTkxhYmVsPjwvYnBtbmRpOkJQTU5FZGdlPjxicG1u\nZGk6QlBNTlNoYXBlIGJwbW5FbGVtZW50PVwiU2VydmljZVRhc2tfMHI1djd5OFwiIGlkPVwiU2Vy\ndmljZVRhc2tfMHI1djd5OF9kaVwiPjxvbWdkYzpCb3VuZHMgaGVpZ2h0PVwiODBcIiB3aWR0aD1c\nIjEwMFwiIHg9XCI1MDZcIiB5PVwiMTY2XCIvPjwvYnBtbmRpOkJQTU5TaGFwZT48YnBtbmRpOkJQ\nTU5FZGdlIGJwbW5FbGVtZW50PVwiU2VxdWVuY2VGbG93XzBqZHI3aDdcIiBpZD1cIlNlcXVlbmNl\nRmxvd18wamRyN2g3X2RpXCI+PG9tZ2RpOndheXBvaW50IHg9XCIzOTdcIiB4c2k6dHlwZT1cIm9t\nZ2RjOlBvaW50XCIgeT1cIjIwNlwiLz48b21nZGk6d2F5cG9pbnQgeD1cIjUwNlwiIHhzaTp0eXBl\nPVwib21nZGM6UG9pbnRcIiB5PVwiMjA2XCIvPjxicG1uZGk6QlBNTkxhYmVsPjxvbWdkYzpCb3Vu\nZHMgaGVpZ2h0PVwiMTNcIiB3aWR0aD1cIjBcIiB4PVwiNDUxLjVcIiB5PVwiMTg0XCIvPjwvYnBt\nbmRpOkJQTU5MYWJlbD48L2JwbW5kaTpCUE1ORWRnZT48YnBtbmRpOkJQTU5TaGFwZSBicG1uRWxl\nbWVudD1cIkVuZEV2ZW50XzA4dDhtNDRcIiBpZD1cIkVuZEV2ZW50XzA4dDhtNDRfZGlcIj48b21n\nZGM6Qm91bmRzIGhlaWdodD1cIjM2XCIgd2lkdGg9XCIzNlwiIHg9XCI3MTRcIiB5PVwiMTg4XCIv\nPjxicG1uZGk6QlBNTkxhYmVsPjxvbWdkYzpCb3VuZHMgaGVpZ2h0PVwiMTNcIiB3aWR0aD1cIjBc\nIiB4PVwiNzMyXCIgeT1cIjIyN1wiLz48L2JwbW5kaTpCUE1OTGFiZWw+PC9icG1uZGk6QlBNTlNo\nYXBlPjxicG1uZGk6QlBNTkVkZ2UgYnBtbkVsZW1lbnQ9XCJTZXF1ZW5jZUZsb3dfMGczbjNkcFwi\nIGlkPVwiU2VxdWVuY2VGbG93XzBnM24zZHBfZGlcIj48b21nZGk6d2F5cG9pbnQgeD1cIjYwNlwi\nIHhzaTp0eXBlPVwib21nZGM6UG9pbnRcIiB5PVwiMjA2XCIvPjxvbWdkaTp3YXlwb2ludCB4PVwi\nNzE0XCIgeHNpOnR5cGU9XCJvbWdkYzpQb2ludFwiIHk9XCIyMDZcIi8+PGJwbW5kaTpCUE1OTGFi\nZWw+PG9tZ2RjOkJvdW5kcyBoZWlnaHQ9XCIxM1wiIHdpZHRoPVwiMFwiIHg9XCI2NjBcIiB5PVwi\nMTg0XCIvPjwvYnBtbmRpOkJQTU5MYWJlbD48L2JwbW5kaTpCUE1ORWRnZT48YnBtbmRpOkJQTU5T\naGFwZSBicG1uRWxlbWVudD1cIlRleHRBbm5vdGF0aW9uXzFpOGg5c2hcIiBpZD1cIlRleHRBbm5v\ndGF0aW9uXzFpOGg5c2hfZGlcIj48b21nZGM6Qm91bmRzIGhlaWdodD1cIjMwXCIgd2lkdGg9XCIx\nMDBcIiB4PVwiMzQ4XCIgeT1cIjY5XCIvPjwvYnBtbmRpOkJQTU5TaGFwZT48YnBtbmRpOkJQTU5F\nZGdlIGJwbW5FbGVtZW50PVwiQXNzb2NpYXRpb25fMGRrYXRvclwiIGlkPVwiQXNzb2NpYXRpb25f\nMGRrYXRvcl9kaVwiPjxvbWdkaTp3YXlwb2ludCB4PVwiMzY0XCIgeHNpOnR5cGU9XCJvbWdkYzpQ\nb2ludFwiIHk9XCIxNjZcIi8+PG9tZ2RpOndheXBvaW50IHg9XCIzOTJcIiB4c2k6dHlwZT1cIm9t\nZ2RjOlBvaW50XCIgeT1cIjk5XCIvPjwvYnBtbmRpOkJQTU5FZGdlPjxicG1uZGk6QlBNTlNoYXBl\nIGJwbW5FbGVtZW50PVwiVGV4dEFubm90YXRpb25fMHlycWtqM1wiIGlkPVwiVGV4dEFubm90YXRp\nb25fMHlycWtqM19kaVwiPjxvbWdkYzpCb3VuZHMgaGVpZ2h0PVwiNDZcIiB3aWR0aD1cIjEwMFwi\nIHg9XCI1NTRcIiB5PVwiNjFcIi8+PC9icG1uZGk6QlBNTlNoYXBlPjxicG1uZGk6QlBNTkVkZ2Ug\nYnBtbkVsZW1lbnQ9XCJBc3NvY2lhdGlvbl8wOHc0Z3YzXCIgaWQ9XCJBc3NvY2lhdGlvbl8wOHc0\nZ3YzX2RpXCI+PG9tZ2RpOndheXBvaW50IHg9XCI1NzJcIiB4c2k6dHlwZT1cIm9tZ2RjOlBvaW50\nXCIgeT1cIjE2NlwiLz48b21nZGk6d2F5cG9pbnQgeD1cIjU5NVwiIHhzaTp0eXBlPVwib21nZGM6\nUG9pbnRcIiB5PVwiMTA3XCIvPjwvYnBtbmRpOkJQTU5FZGdlPjwvYnBtbmRpOkJQTU5QbGFuZT48\nL2JwbW5kaTpCUE1ORGlhZ3JhbT48L2RlZmluaXRpb25zPiIsICJ3b3JrZmxvd19pZCI6ICJleGFt\ncGxlX3VybHNjYW5pbyIsICJ2ZXJzaW9uIjogMn0sICJsYXN0X21vZGlmaWVkX3RpbWUiOiAxNTMx\nMzYyNDAyODg4LCAiY3JlYXRvcl9pZCI6ICJocHlsZUByZXNpbGllbnRzeXN0ZW1zLmNvbSIsICJh\nY3Rpb25zIjogW10sICJwcm9ncmFtbWF0aWNfbmFtZSI6ICJleGFtcGxlX3VybHNjYW5pbyIsICJu\nYW1lIjogIkV4YW1wbGU6IHVybHNjYW4uaW8ifV0sICJhY3Rpb25zIjogW3sibG9naWNfdHlwZSI6\nICJhbGwiLCAibmFtZSI6ICJFeGFtcGxlOiB1cmxzY2FuLmlvIiwgInZpZXdfaXRlbXMiOiBbXSwg\nInR5cGUiOiAxLCAid29ya2Zsb3dzIjogWyJleGFtcGxlX3VybHNjYW5pbyJdLCAib2JqZWN0X3R5\ncGUiOiAiYXJ0aWZhY3QiLCAidGltZW91dF9zZWNvbmRzIjogODY0MDAsICJ1dWlkIjogImJiYjkw\nNzJhLTI2M2MtNDMxNi04ZGEzLWRhODg1YzA0ZjgxNCIsICJhdXRvbWF0aW9ucyI6IFtdLCAiZXhw\nb3J0X2tleSI6ICJFeGFtcGxlOiB1cmxzY2FuLmlvIiwgImNvbmRpdGlvbnMiOiBbeyJ0eXBlIjog\nbnVsbCwgImV2YWx1YXRpb25faWQiOiBudWxsLCAiZmllbGRfbmFtZSI6ICJhcnRpZmFjdC50eXBl\nIiwgIm1ldGhvZCI6ICJpbiIsICJ2YWx1ZSI6IFsiVVJMIiwgIlVSTCBSZWZlcmVyIl19XSwgImlk\nIjogNTY1LCAibWVzc2FnZV9kZXN0aW5hdGlvbnMiOiBbXX1dLCAibGF5b3V0cyI6IFtdLCAiZXhw\nb3J0X2Zvcm1hdF92ZXJzaW9uIjogMiwgImlkIjogMzYsICJpbmR1c3RyaWVzIjogbnVsbCwgInBo\nYXNlcyI6IFtdLCAiYWN0aW9uX29yZGVyIjogW10sICJnZW9zIjogbnVsbCwgInNlcnZlcl92ZXJz\naW9uIjogeyJtYWpvciI6IDMwLCAidmVyc2lvbiI6ICIzMC4xLjI1IiwgImJ1aWxkX251bWJlciI6\nIDI1LCAibWlub3IiOiAxfSwgInRpbWVmcmFtZXMiOiBudWxsLCAid29ya3NwYWNlcyI6IFtdLCAi\nYXV0b21hdGljX3Rhc2tzIjogW10sICJmdW5jdGlvbnMiOiBbeyJkaXNwbGF5X25hbWUiOiAiU2Nh\nbiB3aXRoIHVybHNjYW4uaW8iLCAiZGVzY3JpcHRpb24iOiB7ImNvbnRlbnQiOiAiQW5hbHl6ZSBh\nIFVSTCB3aXRoIHVybHNjYW4uaW8iLCAiZm9ybWF0IjogInRleHQifSwgImNyZWF0b3IiOiB7ImRp\nc3BsYXlfbmFtZSI6ICJSZXNpbGllbnQgU3lzYWRtaW4iLCAidHlwZSI6ICJ1c2VyIiwgImlkIjog\nNywgIm5hbWUiOiAiYXBpQGV4YW1wbGUuY29tIn0sICJ2aWV3X2l0ZW1zIjogW3sic2hvd19pZiI6\nIG51bGwsICJmaWVsZF90eXBlIjogIl9fZnVuY3Rpb24iLCAic2hvd19saW5rX2hlYWRlciI6IGZh\nbHNlLCAiZWxlbWVudCI6ICJmaWVsZF91dWlkIiwgImNvbnRlbnQiOiAiNjJmOTVlZTktYTExMi00\nZDFhLWFhNjUtYTZkMGUxZWY3MzQ4IiwgInN0ZXBfbGFiZWwiOiBudWxsfSwgeyJzaG93X2lmIjog\nbnVsbCwgImZpZWxkX3R5cGUiOiAiX19mdW5jdGlvbiIsICJzaG93X2xpbmtfaGVhZGVyIjogZmFs\nc2UsICJlbGVtZW50IjogImZpZWxkX3V1aWQiLCAiY29udGVudCI6ICJlZTkyNjNjMS00MzJhLTQx\nYTgtOGJiZC02OTE3ZDM1ZmIzYWMiLCAic3RlcF9sYWJlbCI6IG51bGx9LCB7InNob3dfaWYiOiBu\ndWxsLCAiZmllbGRfdHlwZSI6ICJfX2Z1bmN0aW9uIiwgInNob3dfbGlua19oZWFkZXIiOiBmYWxz\nZSwgImVsZW1lbnQiOiAiZmllbGRfdXVpZCIsICJjb250ZW50IjogIjczODE4NzVjLTdhMzItNDM5\nZC04ZjU1LTc2MjM5YTRjNzJiNyIsICJzdGVwX2xhYmVsIjogbnVsbH0sIHsic2hvd19pZiI6IG51\nbGwsICJmaWVsZF90eXBlIjogIl9fZnVuY3Rpb24iLCAic2hvd19saW5rX2hlYWRlciI6IGZhbHNl\nLCAiZWxlbWVudCI6ICJmaWVsZF91dWlkIiwgImNvbnRlbnQiOiAiYTJlYmRiNWItM2Q1YS00MjVj\nLWEwYzctZmEzNTkwNGZhNWM0IiwgInN0ZXBfbGFiZWwiOiBudWxsfV0sICJleHBvcnRfa2V5Ijog\nInVybHNjYW5pbyIsICJ1dWlkIjogImQxOWMxZjAwLWI0ZjEtNDQ4MC1iOGEzLTdiZGQxOTE0MzA0\nMSIsICJsYXN0X21vZGlmaWVkX2J5IjogeyJkaXNwbGF5X25hbWUiOiAiXHVmZWZmXHUyMDYzIEh1\nZ2giLCAidHlwZSI6ICJ1c2VyIiwgImlkIjogNCwgIm5hbWUiOiAiaHB5bGVAcmVzaWxpZW50c3lz\ndGVtcy5jb20ifSwgInZlcnNpb24iOiAzLCAid29ya2Zsb3dzIjogW3siZGVzY3JpcHRpb24iOiBu\ndWxsLCAib2JqZWN0X3R5cGUiOiAiYXJ0aWZhY3QiLCAiYWN0aW9ucyI6IFtdLCAibmFtZSI6ICJF\neGFtcGxlOiB1cmxzY2FuLmlvIiwgIndvcmtmbG93X2lkIjogMTY4LCAicHJvZ3JhbW1hdGljX25h\nbWUiOiAiZXhhbXBsZV91cmxzY2FuaW8iLCAidXVpZCI6IG51bGx9XSwgImxhc3RfbW9kaWZpZWRf\ndGltZSI6IDE1MzEzNTQ1ODYyOTUsICJkZXN0aW5hdGlvbl9oYW5kbGUiOiAidXJsc2NhbmlvIiwg\nImlkIjogMjU3LCAibmFtZSI6ICJ1cmxzY2FuaW8ifV0sICJub3RpZmljYXRpb25zIjogbnVsbCwg\nInJlZ3VsYXRvcnMiOiBudWxsLCAiaW5jaWRlbnRfdHlwZXMiOiBbeyJjcmVhdGVfZGF0ZSI6IDE1\nMzEzNjI2NDc2MzEsICJkZXNjcmlwdGlvbiI6ICJDdXN0b21pemF0aW9uIFBhY2thZ2VzIChpbnRl\ncm5hbCkiLCAiZXhwb3J0X2tleSI6ICJDdXN0b21pemF0aW9uIFBhY2thZ2VzIChpbnRlcm5hbCki\nLCAiaWQiOiAwLCAibmFtZSI6ICJDdXN0b21pemF0aW9uIFBhY2thZ2VzIChpbnRlcm5hbCkiLCAi\ndXBkYXRlX2RhdGUiOiAxNTMxMzYyNjQ3NjMxLCAidXVpZCI6ICJiZmVlYzJkNC0zNzcwLTExZTgt\nYWQzOS00YTAwMDQwNDRhYTAiLCAiZW5hYmxlZCI6IGZhbHNlLCAic3lzdGVtIjogZmFsc2UsICJw\nYXJlbnRfaWQiOiBudWxsLCAiaGlkZGVuIjogZmFsc2V9XSwgInNjcmlwdHMiOiBbXSwgInR5cGVz\nIjogW10sICJtZXNzYWdlX2Rlc3RpbmF0aW9ucyI6IFt7InV1aWQiOiAiOWM0ZTAxNDMtZDg0Yi00\nZmQyLWJlNDMtZTUyZWViMWUxZTA4IiwgImV4cG9ydF9rZXkiOiAidXJsc2NhbmlvIiwgIm5hbWUi\nOiAidXJsc2Nhbi5pbyIsICJkZXN0aW5hdGlvbl90eXBlIjogMCwgInByb2dyYW1tYXRpY19uYW1l\nIjogInVybHNjYW5pbyIsICJleHBlY3RfYWNrIjogdHJ1ZSwgInVzZXJzIjogWyJhcGlAZXhhbXBs\nZS5jb20iXX1dLCAiaW5jaWRlbnRfYXJ0aWZhY3RfdHlwZXMiOiBbXSwgInJvbGVzIjogW10sICJm\naWVsZHMiOiBbeyJvcGVyYXRpb25zIjogW10sICJyZWFkX29ubHkiOiB0cnVlLCAibmFtZSI6ICJp\nbmNfdHJhaW5pbmciLCAidGVtcGxhdGVzIjogW10sICJ0eXBlX2lkIjogMCwgImNob3NlbiI6IGZh\nbHNlLCAidGV4dCI6ICJTaW11bGF0aW9uIiwgImRlZmF1bHRfY2hvc2VuX2J5X3NlcnZlciI6IGZh\nbHNlLCAiZXhwb3J0X2tleSI6ICJpbmNpZGVudC9pbmNfdHJhaW5pbmciLCAidG9vbHRpcCI6ICJX\naGV0aGVyIHRoZSBpbmNpZGVudCBpcyBhIHNpbXVsYXRpb24gb3IgYSByZWd1bGFyIGluY2lkZW50\nLiAgVGhpcyBmaWVsZCBpcyByZWFkLW9ubHkuIiwgInJpY2hfdGV4dCI6IGZhbHNlLCAib3BlcmF0\naW9uX3Blcm1zIjoge30sICJwcmVmaXgiOiBudWxsLCAiaW50ZXJuYWwiOiBmYWxzZSwgInZhbHVl\ncyI6IFtdLCAiYmxhbmtfb3B0aW9uIjogZmFsc2UsICJpbnB1dF90eXBlIjogImJvb2xlYW4iLCAi\nY2hhbmdlYWJsZSI6IHRydWUsICJoaWRlX25vdGlmaWNhdGlvbiI6IGZhbHNlLCAiaWQiOiAxMTcy\nLCAidXVpZCI6ICJjM2YwZTNlZC0yMWUxLTRkNTMtYWZmYi1mZTVjYTMzMDhjY2EifSwgeyJvcGVy\nYXRpb25zIjogW10sICJ0eXBlX2lkIjogMTEsICJvcGVyYXRpb25fcGVybXMiOiB7fSwgInRleHQi\nOiAidXJsc2NhbmlvX3B1YmxpYyIsICJibGFua19vcHRpb24iOiBmYWxzZSwgInByZWZpeCI6IG51\nbGwsICJjaGFuZ2VhYmxlIjogdHJ1ZSwgImlkIjogMzIwNywgInJlYWRfb25seSI6IGZhbHNlLCAi\ndXVpZCI6ICJlZTkyNjNjMS00MzJhLTQxYTgtOGJiZC02OTE3ZDM1ZmIzYWMiLCAiY2hvc2VuIjog\nZmFsc2UsICJpbnB1dF90eXBlIjogImJvb2xlYW4iLCAidG9vbHRpcCI6ICJTaG91bGQgdGhlIHNj\nYW4gYmUgcG9zdGVkIGFzIHB1YmxpYz8iLCAiaW50ZXJuYWwiOiBmYWxzZSwgInJpY2hfdGV4dCI6\nIGZhbHNlLCAidGVtcGxhdGVzIjogW10sICJleHBvcnRfa2V5IjogIl9fZnVuY3Rpb24vdXJsc2Nh\nbmlvX3B1YmxpYyIsICJoaWRlX25vdGlmaWNhdGlvbiI6IGZhbHNlLCAicGxhY2Vob2xkZXIiOiAi\nIiwgIm5hbWUiOiAidXJsc2NhbmlvX3B1YmxpYyIsICJkZWZhdWx0X2Nob3Nlbl9ieV9zZXJ2ZXIi\nOiBmYWxzZSwgInZhbHVlcyI6IFtdfSwgeyJvcGVyYXRpb25zIjogW10sICJ0eXBlX2lkIjogMTEs\nICJvcGVyYXRpb25fcGVybXMiOiB7fSwgInRleHQiOiAidXJsc2NhbmlvX3JlZmVyZXIiLCAiYmxh\nbmtfb3B0aW9uIjogZmFsc2UsICJwcmVmaXgiOiBudWxsLCAiY2hhbmdlYWJsZSI6IHRydWUsICJp\nZCI6IDMyMDksICJyZWFkX29ubHkiOiBmYWxzZSwgInV1aWQiOiAiYTJlYmRiNWItM2Q1YS00MjVj\nLWEwYzctZmEzNTkwNGZhNWM0IiwgImNob3NlbiI6IGZhbHNlLCAiaW5wdXRfdHlwZSI6ICJ0ZXh0\nIiwgInRvb2x0aXAiOiAiQ3VzdG9tIHJlZmVyZXIgVVJMIGZvciB0aGlzIHNjYW4iLCAiaW50ZXJu\nYWwiOiBmYWxzZSwgInJpY2hfdGV4dCI6IGZhbHNlLCAidGVtcGxhdGVzIjogW10sICJleHBvcnRf\na2V5IjogIl9fZnVuY3Rpb24vdXJsc2NhbmlvX3JlZmVyZXIiLCAiaGlkZV9ub3RpZmljYXRpb24i\nOiBmYWxzZSwgInBsYWNlaG9sZGVyIjogIiIsICJuYW1lIjogInVybHNjYW5pb19yZWZlcmVyIiwg\nImRlZmF1bHRfY2hvc2VuX2J5X3NlcnZlciI6IGZhbHNlLCAidmFsdWVzIjogW119LCB7Im9wZXJh\ndGlvbnMiOiBbXSwgInR5cGVfaWQiOiAxMSwgIm9wZXJhdGlvbl9wZXJtcyI6IHt9LCAidGV4dCI6\nICJ1cmxzY2FuaW9fdXJsIiwgImJsYW5rX29wdGlvbiI6IGZhbHNlLCAicHJlZml4IjogbnVsbCwg\nImNoYW5nZWFibGUiOiB0cnVlLCAiaWQiOiAzMjA2LCAicmVhZF9vbmx5IjogZmFsc2UsICJ1dWlk\nIjogIjYyZjk1ZWU5LWExMTItNGQxYS1hYTY1LWE2ZDBlMWVmNzM0OCIsICJjaG9zZW4iOiBmYWxz\nZSwgImlucHV0X3R5cGUiOiAidGV4dCIsICJ0b29sdGlwIjogIiIsICJpbnRlcm5hbCI6IGZhbHNl\nLCAicmljaF90ZXh0IjogZmFsc2UsICJ0ZW1wbGF0ZXMiOiBbXSwgImV4cG9ydF9rZXkiOiAiX19m\ndW5jdGlvbi91cmxzY2FuaW9fdXJsIiwgImhpZGVfbm90aWZpY2F0aW9uIjogZmFsc2UsICJwbGFj\nZWhvbGRlciI6ICIiLCAibmFtZSI6ICJ1cmxzY2FuaW9fdXJsIiwgImRlZmF1bHRfY2hvc2VuX2J5\nX3NlcnZlciI6IGZhbHNlLCAidmFsdWVzIjogW119LCB7Im9wZXJhdGlvbnMiOiBbXSwgInR5cGVf\naWQiOiAxMSwgIm9wZXJhdGlvbl9wZXJtcyI6IHt9LCAidGV4dCI6ICJ1cmxzY2FuaW9fdXNlcmFn\nZW50IiwgImJsYW5rX29wdGlvbiI6IGZhbHNlLCAicHJlZml4IjogbnVsbCwgImNoYW5nZWFibGUi\nOiB0cnVlLCAiaWQiOiAzMjA4LCAicmVhZF9vbmx5IjogZmFsc2UsICJ1dWlkIjogIjczODE4NzVj\nLTdhMzItNDM5ZC04ZjU1LTc2MjM5YTRjNzJiNyIsICJjaG9zZW4iOiBmYWxzZSwgImlucHV0X3R5\ncGUiOiAidGV4dCIsICJ0b29sdGlwIjogIk92ZXJyaWRlIFVzZXItQWdlbnQgZm9yIHRoaXMgc2Nh\nbiIsICJpbnRlcm5hbCI6IGZhbHNlLCAicmljaF90ZXh0IjogZmFsc2UsICJ0ZW1wbGF0ZXMiOiBb\nXSwgImV4cG9ydF9rZXkiOiAiX19mdW5jdGlvbi91cmxzY2FuaW9fdXNlcmFnZW50IiwgImhpZGVf\nbm90aWZpY2F0aW9uIjogZmFsc2UsICJwbGFjZWhvbGRlciI6ICIiLCAibmFtZSI6ICJ1cmxzY2Fu\naW9fdXNlcmFnZW50IiwgImRlZmF1bHRfY2hvc2VuX2J5X3NlcnZlciI6IGZhbHNlLCAidmFsdWVz\nIjogW119XSwgIm92ZXJyaWRlcyI6IFtdLCAiZXhwb3J0X2RhdGUiOiAxNTMxMzYyNjIyNzk3fQ==\n\"\"\"\n )", "title": "" }, { "docid": "e0c5f4533f32bda42463d55b907ba4ba", "score": "0.51125747", "text": "def module_config_info():\n\n print(\"consumes\", CONSUMES)\n print(\"produces\", PRODUCES)\n module_config_template()", "title": "" }, { "docid": "96e8a1448f6c52ae2cf2373a0ae88198", "score": "0.50800186", "text": "def customizing(self) -> str:", "title": "" }, { "docid": "232039aaef047f1073550e33a8ae0573", "score": "0.50351864", "text": "def gen_config(name):\n if name == 'patterns_to_ignore' or name == 'entropy_patterns_to_discount':\n return []", "title": "" }, { "docid": "94bc2713a84134094ad932ed9648f05d", "score": "0.5018977", "text": "def setup_ilo_customizations():\n\n # Load the ZCML configuration for the ilo.customizations package\n fiveconfigure.debug_mode = True\n import ilo.customizations\n zcml.load_config('configure.zcml',ilo.customizations)\n fiveconfigure.debug_mode = False\n\n # We need to tell the testing framework that these products should\n # be available. This can't happen untuk after we have loaded the ZCML\n\n ztc.installPackage('ilo.customizations')", "title": "" }, { "docid": "961d4a8f3307a5d5e1e308a5e9afc89b", "score": "0.5014473", "text": "def GenerateConfig(context):\n\n FRONTEND = context.env[\"deployment\"] + \"-frontend\"\n FIREWALL = context.env[\"deployment\"] + \"-application-fw\"\n APPLICATION_PORT = 80\n CONTAINER_PORT = 5000\n config = [\n {\n 'name': FRONTEND,\n 'type': 'frontend.py',\n 'properties': {\n 'zone': context.properties[\"zone\"],\n 'dockerImage': 'asia.gcr.io/hobby-174213/line-art-generator:latest',\n 'containerImage': 'cos-stable-59-9460-73-0',\n 'port': APPLICATION_PORT,\n 'containerPort': CONTAINER_PORT,\n\n # If left out will default to 1\n 'size': 1,\n # If left out will default to 1\n 'maxSize': 1\n }\n },\n {\n 'name': FIREWALL,\n 'type': 'compute.v1.firewall',\n 'properties': {\n 'allowed': [{\n 'IPProtocol': 'TCP',\n 'ports': [APPLICATION_PORT]\n }],\n 'sourceRanges': ['0.0.0.0/0']\n }\n }\n ]\n\n return {'resources': config}", "title": "" }, { "docid": "4a9eb081b93445591f4fcb62f3564dc9", "score": "0.50002056", "text": "def module_config_info():\n print('consumes %s' % CONSUMES)\n print('produces %s' % PRODUCES)\n module_config_template()", "title": "" }, { "docid": "0fb7da6af1832aacc43d8a3c78f3c5da", "score": "0.49601492", "text": "def main(client, customer_id, ad_group_id):\n text_customizer_name = f\"Planet_{uuid4().hex[:8]}\"\n price_customizer_name = f\"Price_{uuid4().hex[:8]}\"\n\n text_customizer_resource_name = create_text_customizer_attribute(\n client, customer_id, text_customizer_name\n )\n price_customizer_resource_name = create_price_customizer_attribute(\n client, customer_id, price_customizer_name\n )\n link_customizer_attributes(\n client,\n customer_id,\n ad_group_id,\n text_customizer_resource_name,\n price_customizer_resource_name,\n )\n create_ad_with_customizations(\n client,\n customer_id,\n ad_group_id,\n text_customizer_name,\n price_customizer_name,\n )", "title": "" }, { "docid": "4c90ea5a11ce2c5b269a4a0ddb10baea", "score": "0.4937653", "text": "def customization_data(client=None):\n\n # This import data contains:\n # Function inputs:\n # artifact_id\n # attachment_id\n # email_header_validation_target_email\n # incident_id\n # Message Destinations:\n # fn_email_header_validation\n # Functions:\n # email_header_validation_using_dkimarc\n # Workflows:\n # example_email_header_validation_using_dkimarc_artifact\n # example_email_header_validation_using_dkimarc_attachment\n # Rules:\n # Example: Email Header Validation Using DKIM/ARC [Artifact]\n # Example: Email Header Validation Using DKIM/ARC [Attachment]\n\n\n yield ImportDefinition(u\"\"\"\neyJzZXJ2ZXJfdmVyc2lvbiI6IHsibWFqb3IiOiAzMCwgIm1pbm9yIjogMCwgImJ1aWxkX251\nbWJlciI6IDAsICJ2ZXJzaW9uIjogIjMwLjAuMCJ9LCAiZXhwb3J0X2Zvcm1hdF92ZXJzaW9u\nIjogMiwgImlkIjogNCwgImV4cG9ydF9kYXRlIjogMTUzNzMwMTY1MTczNSwgImZpZWxkcyI6\nIFt7ImlkIjogNTEsICJuYW1lIjogImluY190cmFpbmluZyIsICJ0ZXh0IjogIlNpbXVsYXRp\nb24iLCAicHJlZml4IjogbnVsbCwgInR5cGVfaWQiOiAwLCAidG9vbHRpcCI6ICJXaGV0aGVy\nIHRoZSBpbmNpZGVudCBpcyBhIHNpbXVsYXRpb24gb3IgYSByZWd1bGFyIGluY2lkZW50LiAg\nVGhpcyBmaWVsZCBpcyByZWFkLW9ubHkuIiwgImlucHV0X3R5cGUiOiAiYm9vbGVhbiIsICJo\naWRlX25vdGlmaWNhdGlvbiI6IGZhbHNlLCAiY2hvc2VuIjogZmFsc2UsICJkZWZhdWx0X2No\nb3Nlbl9ieV9zZXJ2ZXIiOiBmYWxzZSwgImJsYW5rX29wdGlvbiI6IGZhbHNlLCAiaW50ZXJu\nYWwiOiBmYWxzZSwgInV1aWQiOiAiYzNmMGUzZWQtMjFlMS00ZDUzLWFmZmItZmU1Y2EzMzA4\nY2NhIiwgIm9wZXJhdGlvbnMiOiBbXSwgIm9wZXJhdGlvbl9wZXJtcyI6IHt9LCAidmFsdWVz\nIjogW10sICJyZWFkX29ubHkiOiB0cnVlLCAiY2hhbmdlYWJsZSI6IHRydWUsICJyaWNoX3Rl\neHQiOiBmYWxzZSwgImV4cG9ydF9rZXkiOiAiaW5jaWRlbnQvaW5jX3RyYWluaW5nIiwgInRl\nbXBsYXRlcyI6IFtdLCAiZGVwcmVjYXRlZCI6IGZhbHNlfSwgeyJpZCI6IDIwNSwgIm5hbWUi\nOiAiZW1haWxfaGVhZGVyX3ZhbGlkYXRpb25fdGFyZ2V0X2VtYWlsIiwgInRleHQiOiAiZW1h\naWxfaGVhZGVyX3ZhbGlkYXRpb25fdGFyZ2V0X2VtYWlsIiwgInByZWZpeCI6IG51bGwsICJ0\neXBlX2lkIjogMTEsICJ0b29sdGlwIjogIlJGQzgyMiBlbWFpbCB0byBwZXJmb3JtIGhlYWRl\nciBhbmFseXNpcyBvbi4gQ2FuIGJlIHVzZWQgaW4gcGxhY2Ugb2YgYXR0YWNobWVudC9hcnRp\nZmFjdCBpZHMgaW4gY2hhaW5lZCB3b3JrZmxvd3MiLCAicGxhY2Vob2xkZXIiOiAiIiwgImlu\ncHV0X3R5cGUiOiAidGV4dCIsICJoaWRlX25vdGlmaWNhdGlvbiI6IGZhbHNlLCAiY2hvc2Vu\nIjogZmFsc2UsICJkZWZhdWx0X2Nob3Nlbl9ieV9zZXJ2ZXIiOiBmYWxzZSwgImJsYW5rX29w\ndGlvbiI6IGZhbHNlLCAiaW50ZXJuYWwiOiBmYWxzZSwgInV1aWQiOiAiMDU4MmQyZDctZDcx\nNS00ZjY0LTgxZDMtNTBjOTRjZmExZjI5IiwgIm9wZXJhdGlvbnMiOiBbXSwgIm9wZXJhdGlv\nbl9wZXJtcyI6IHt9LCAidmFsdWVzIjogW10sICJyZWFkX29ubHkiOiBmYWxzZSwgImNoYW5n\nZWFibGUiOiB0cnVlLCAicmljaF90ZXh0IjogZmFsc2UsICJleHBvcnRfa2V5IjogIl9fZnVu\nY3Rpb24vZW1haWxfaGVhZGVyX3ZhbGlkYXRpb25fdGFyZ2V0X2VtYWlsIiwgInRlbXBsYXRl\ncyI6IFtdLCAiZGVwcmVjYXRlZCI6IGZhbHNlfSwgeyJpZCI6IDE1NSwgIm5hbWUiOiAiaW5j\naWRlbnRfaWQiLCAidGV4dCI6ICJpbmNpZGVudF9pZCIsICJwcmVmaXgiOiBudWxsLCAidHlw\nZV9pZCI6IDExLCAidG9vbHRpcCI6ICIiLCAicGxhY2Vob2xkZXIiOiAiIiwgImlucHV0X3R5\ncGUiOiAibnVtYmVyIiwgInJlcXVpcmVkIjogImFsd2F5cyIsICJoaWRlX25vdGlmaWNhdGlv\nbiI6IGZhbHNlLCAiY2hvc2VuIjogZmFsc2UsICJkZWZhdWx0X2Nob3Nlbl9ieV9zZXJ2ZXIi\nOiBmYWxzZSwgImJsYW5rX29wdGlvbiI6IGZhbHNlLCAiaW50ZXJuYWwiOiBmYWxzZSwgInV1\naWQiOiAiODExZTk5ZDctZDE5NC00Y2U4LTg2Y2MtYWZmNWUwMWFiODVjIiwgIm9wZXJhdGlv\nbnMiOiBbXSwgIm9wZXJhdGlvbl9wZXJtcyI6IHt9LCAidmFsdWVzIjogW10sICJyZWFkX29u\nbHkiOiBmYWxzZSwgImNoYW5nZWFibGUiOiB0cnVlLCAicmljaF90ZXh0IjogZmFsc2UsICJl\neHBvcnRfa2V5IjogIl9fZnVuY3Rpb24vaW5jaWRlbnRfaWQiLCAidGVtcGxhdGVzIjogW10s\nICJkZXByZWNhdGVkIjogZmFsc2V9LCB7ImlkIjogMTU3LCAibmFtZSI6ICJhdHRhY2htZW50\nX2lkIiwgInRleHQiOiAiYXR0YWNobWVudF9pZCIsICJwcmVmaXgiOiBudWxsLCAidHlwZV9p\nZCI6IDExLCAidG9vbHRpcCI6ICIiLCAicGxhY2Vob2xkZXIiOiAiIiwgImlucHV0X3R5cGUi\nOiAibnVtYmVyIiwgImhpZGVfbm90aWZpY2F0aW9uIjogZmFsc2UsICJjaG9zZW4iOiBmYWxz\nZSwgImRlZmF1bHRfY2hvc2VuX2J5X3NlcnZlciI6IGZhbHNlLCAiYmxhbmtfb3B0aW9uIjog\nZmFsc2UsICJpbnRlcm5hbCI6IGZhbHNlLCAidXVpZCI6ICIxN2MzZTY1Mi02NTU5LTQ5MzUt\nOWY5NS03NDM3NGNhMzdhN2IiLCAib3BlcmF0aW9ucyI6IFtdLCAib3BlcmF0aW9uX3Blcm1z\nIjoge30sICJ2YWx1ZXMiOiBbXSwgInJlYWRfb25seSI6IGZhbHNlLCAiY2hhbmdlYWJsZSI6\nIHRydWUsICJyaWNoX3RleHQiOiBmYWxzZSwgImV4cG9ydF9rZXkiOiAiX19mdW5jdGlvbi9h\ndHRhY2htZW50X2lkIiwgInRlbXBsYXRlcyI6IFtdLCAiZGVwcmVjYXRlZCI6IGZhbHNlfSwg\neyJpZCI6IDE0MiwgIm5hbWUiOiAiYXJ0aWZhY3RfaWQiLCAidGV4dCI6ICJhcnRpZmFjdF9p\nZCIsICJwcmVmaXgiOiBudWxsLCAidHlwZV9pZCI6IDExLCAidG9vbHRpcCI6ICIiLCAicGxh\nY2Vob2xkZXIiOiAiIiwgImlucHV0X3R5cGUiOiAibnVtYmVyIiwgImhpZGVfbm90aWZpY2F0\naW9uIjogZmFsc2UsICJjaG9zZW4iOiBmYWxzZSwgImRlZmF1bHRfY2hvc2VuX2J5X3NlcnZl\nciI6IGZhbHNlLCAiYmxhbmtfb3B0aW9uIjogZmFsc2UsICJpbnRlcm5hbCI6IGZhbHNlLCAi\ndXVpZCI6ICJlZmRiY2E3ZS02YWU4LTQyNjktYTNkMS04MGYxNzE2YTYyMjIiLCAib3BlcmF0\naW9ucyI6IFtdLCAib3BlcmF0aW9uX3Blcm1zIjoge30sICJ2YWx1ZXMiOiBbXSwgInJlYWRf\nb25seSI6IGZhbHNlLCAiY2hhbmdlYWJsZSI6IHRydWUsICJyaWNoX3RleHQiOiBmYWxzZSwg\nImV4cG9ydF9rZXkiOiAiX19mdW5jdGlvbi9hcnRpZmFjdF9pZCIsICJ0ZW1wbGF0ZXMiOiBb\nXSwgImRlcHJlY2F0ZWQiOiBmYWxzZX1dLCAiaW5jaWRlbnRfdHlwZXMiOiBbeyJ1cGRhdGVf\nZGF0ZSI6IDE1MzczMDE2ODI2NDUsICJjcmVhdGVfZGF0ZSI6IDE1MzczMDE2ODI2NDUsICJ1\ndWlkIjogImJmZWVjMmQ0LTM3NzAtMTFlOC1hZDM5LTRhMDAwNDA0NGFhMCIsICJkZXNjcmlw\ndGlvbiI6ICJDdXN0b21pemF0aW9uIFBhY2thZ2VzIChpbnRlcm5hbCkiLCAiZXhwb3J0X2tl\neSI6ICJDdXN0b21pemF0aW9uIFBhY2thZ2VzIChpbnRlcm5hbCkiLCAibmFtZSI6ICJDdXN0\nb21pemF0aW9uIFBhY2thZ2VzIChpbnRlcm5hbCkiLCAiZW5hYmxlZCI6IGZhbHNlLCAic3lz\ndGVtIjogZmFsc2UsICJwYXJlbnRfaWQiOiBudWxsLCAiaGlkZGVuIjogZmFsc2UsICJpZCI6\nIDB9XSwgInBoYXNlcyI6IFtdLCAiYXV0b21hdGljX3Rhc2tzIjogW10sICJvdmVycmlkZXMi\nOiBbXSwgIm1lc3NhZ2VfZGVzdGluYXRpb25zIjogW3sibmFtZSI6ICJmbl9lbWFpbF9oZWFk\nZXJfdmFsaWRhdGlvbiIsICJwcm9ncmFtbWF0aWNfbmFtZSI6ICJmbl9lbWFpbF9oZWFkZXJf\ndmFsaWRhdGlvbiIsICJkZXN0aW5hdGlvbl90eXBlIjogMCwgImV4cGVjdF9hY2siOiB0cnVl\nLCAidXNlcnMiOiBbImJAYS5jb20iXSwgInV1aWQiOiAiYTdmZDY5MzEtMmI4Yi00NjBiLWFh\nMjMtNWIzNWVhZTQ4MzI4IiwgImV4cG9ydF9rZXkiOiAiZm5fZW1haWxfaGVhZGVyX3ZhbGlk\nYXRpb24ifV0sICJhY3Rpb25zIjogW3siaWQiOiA0OCwgIm5hbWUiOiAiRXhhbXBsZTogRW1h\naWwgSGVhZGVyIFZhbGlkYXRpb24gVXNpbmcgREtJTS9BUkMgW0FydGlmYWN0XSIsICJ0eXBl\nIjogMSwgIm9iamVjdF90eXBlIjogImFydGlmYWN0IiwgImNvbmRpdGlvbnMiOiBbeyJtZXRo\nb2QiOiAiZXF1YWxzIiwgImZpZWxkX25hbWUiOiAiYXJ0aWZhY3QudHlwZSIsICJ2YWx1ZSI6\nICJSRkMgODIyIEVtYWlsIE1lc3NhZ2UgRmlsZSIsICJ0eXBlIjogbnVsbCwgImV2YWx1YXRp\nb25faWQiOiBudWxsfV0sICJhdXRvbWF0aW9ucyI6IFtdLCAibWVzc2FnZV9kZXN0aW5hdGlv\nbnMiOiBbXSwgIndvcmtmbG93cyI6IFsiZXhhbXBsZV9lbWFpbF9oZWFkZXJfdmFsaWRhdGlv\nbl91c2luZ19ka2ltYXJjX2FydGlmYWN0Il0sICJ2aWV3X2l0ZW1zIjogW10sICJ0aW1lb3V0\nX3NlY29uZHMiOiA4NjQwMCwgInV1aWQiOiAiNzg5NTRmNmQtZmM4NS00YzAxLThkYjYtYTVl\nNmE2NDlhYThkIiwgImV4cG9ydF9rZXkiOiAiRXhhbXBsZTogRW1haWwgSGVhZGVyIFZhbGlk\nYXRpb24gVXNpbmcgREtJTS9BUkMgW0FydGlmYWN0XSIsICJsb2dpY190eXBlIjogImFsbCJ9\nLCB7ImlkIjogNDksICJuYW1lIjogIkV4YW1wbGU6IEVtYWlsIEhlYWRlciBWYWxpZGF0aW9u\nIFVzaW5nIERLSU0vQVJDIFtBdHRhY2htZW50XSIsICJ0eXBlIjogMSwgIm9iamVjdF90eXBl\nIjogImF0dGFjaG1lbnQiLCAiY29uZGl0aW9ucyI6IFtdLCAiYXV0b21hdGlvbnMiOiBbXSwg\nIm1lc3NhZ2VfZGVzdGluYXRpb25zIjogW10sICJ3b3JrZmxvd3MiOiBbImV4YW1wbGVfZW1h\naWxfaGVhZGVyX3ZhbGlkYXRpb25fdXNpbmdfZGtpbWFyY19hdHRhY2htZW50Il0sICJ2aWV3\nX2l0ZW1zIjogW10sICJ0aW1lb3V0X3NlY29uZHMiOiA4NjQwMCwgInV1aWQiOiAiM2FhNzZh\nZGQtNDk4NS00MTFlLThjNWItYzA4MWQwODMzNjhmIiwgImV4cG9ydF9rZXkiOiAiRXhhbXBs\nZTogRW1haWwgSGVhZGVyIFZhbGlkYXRpb24gVXNpbmcgREtJTS9BUkMgW0F0dGFjaG1lbnRd\nIiwgImxvZ2ljX3R5cGUiOiAiYWxsIn1dLCAibGF5b3V0cyI6IFtdLCAibm90aWZpY2F0aW9u\ncyI6IG51bGwsICJ0aW1lZnJhbWVzIjogbnVsbCwgImluZHVzdHJpZXMiOiBudWxsLCAicmVn\ndWxhdG9ycyI6IG51bGwsICJnZW9zIjogbnVsbCwgInRhc2tfb3JkZXIiOiBbXSwgImFjdGlv\nbl9vcmRlciI6IFtdLCAidHlwZXMiOiBbXSwgInNjcmlwdHMiOiBbXSwgImluY2lkZW50X2Fy\ndGlmYWN0X3R5cGVzIjogW10sICJ3b3JrZmxvd3MiOiBbeyJ3b3JrZmxvd19pZCI6IDM2LCAi\nbmFtZSI6ICJFeGFtcGxlOiBFbWFpbCBIZWFkZXIgVmFsaWRhdGlvbiBVc2luZyBES0lNL0FS\nQyBbQXR0YWNobWVudF0iLCAicHJvZ3JhbW1hdGljX25hbWUiOiAiZXhhbXBsZV9lbWFpbF9o\nZWFkZXJfdmFsaWRhdGlvbl91c2luZ19ka2ltYXJjX2F0dGFjaG1lbnQiLCAib2JqZWN0X3R5\ncGUiOiAiYXR0YWNobWVudCIsICJkZXNjcmlwdGlvbiI6ICIiLCAiY3JlYXRvcl9pZCI6ICJi\nQGEuY29tIiwgImxhc3RfbW9kaWZpZWRfYnkiOiAiYkBhLmNvbSIsICJsYXN0X21vZGlmaWVk\nX3RpbWUiOiAxNTM3Mjk4Mzk5NDYwLCAiZXhwb3J0X2tleSI6ICJleGFtcGxlX2VtYWlsX2hl\nYWRlcl92YWxpZGF0aW9uX3VzaW5nX2RraW1hcmNfYXR0YWNobWVudCIsICJ1dWlkIjogImFj\nZTU4MWE4LTg5MWItNGQ3MS1iNmMzLWQ3NGU2ZmY2OTNmNyIsICJjb250ZW50IjogeyJ3b3Jr\nZmxvd19pZCI6ICJleGFtcGxlX2VtYWlsX2hlYWRlcl92YWxpZGF0aW9uX3VzaW5nX2RraW1h\ncmNfYXR0YWNobWVudCIsICJ4bWwiOiAiPD94bWwgdmVyc2lvbj1cIjEuMFwiIGVuY29kaW5n\nPVwiVVRGLThcIj8+PGRlZmluaXRpb25zIHhtbG5zPVwiaHR0cDovL3d3dy5vbWcub3JnL3Nw\nZWMvQlBNTi8yMDEwMDUyNC9NT0RFTFwiIHhtbG5zOmJwbW5kaT1cImh0dHA6Ly93d3cub21n\nLm9yZy9zcGVjL0JQTU4vMjAxMDA1MjQvRElcIiB4bWxuczpvbWdkYz1cImh0dHA6Ly93d3cu\nb21nLm9yZy9zcGVjL0RELzIwMTAwNTI0L0RDXCIgeG1sbnM6b21nZGk9XCJodHRwOi8vd3d3\nLm9tZy5vcmcvc3BlYy9ERC8yMDEwMDUyNC9ESVwiIHhtbG5zOnJlc2lsaWVudD1cImh0dHA6\nLy9yZXNpbGllbnQuaWJtLmNvbS9icG1uXCIgeG1sbnM6eHNkPVwiaHR0cDovL3d3dy53My5v\ncmcvMjAwMS9YTUxTY2hlbWFcIiB4bWxuczp4c2k9XCJodHRwOi8vd3d3LnczLm9yZy8yMDAx\nL1hNTFNjaGVtYS1pbnN0YW5jZVwiIHRhcmdldE5hbWVzcGFjZT1cImh0dHA6Ly93d3cuY2Ft\ndW5kYS5vcmcvdGVzdFwiPjxwcm9jZXNzIGlkPVwiZXhhbXBsZV9lbWFpbF9oZWFkZXJfdmFs\naWRhdGlvbl91c2luZ19ka2ltYXJjX2F0dGFjaG1lbnRcIiBpc0V4ZWN1dGFibGU9XCJ0cnVl\nXCIgbmFtZT1cIkV4YW1wbGU6IEVtYWlsIEhlYWRlciBWYWxpZGF0aW9uIFVzaW5nIERLSU0v\nQVJDIFtBdHRhY2htZW50XVwiPjxkb2N1bWVudGF0aW9uLz48c3RhcnRFdmVudCBpZD1cIlN0\nYXJ0RXZlbnRfMTU1YXN4bVwiPjxvdXRnb2luZz5TZXF1ZW5jZUZsb3dfMWpveHpuMjwvb3V0\nZ29pbmc+PC9zdGFydEV2ZW50PjxzZXJ2aWNlVGFzayBpZD1cIlNlcnZpY2VUYXNrXzFiZ2Vq\nd25cIiBuYW1lPVwiRW1haWwgSGVhZGVyIFZhbGlkYXRpb24gVXNpbmcgREtJLi4uXCIgcmVz\naWxpZW50OnR5cGU9XCJmdW5jdGlvblwiPjxleHRlbnNpb25FbGVtZW50cz48cmVzaWxpZW50\nOmZ1bmN0aW9uIHV1aWQ9XCI0MjMyNzZmNC1jYWUzLTRmOTktYjdmNi1jNzI2MDRlYjJmOTFc\nIj57XCJpbnB1dHNcIjp7fSxcInBvc3RfcHJvY2Vzc2luZ19zY3JpcHRcIjpcIiMgcmVzdWx0\ncyA9IHtcXG4jICAgICAgICAgICAgICAgICBcXFwiZGtpbV92ZXJpZnlcXFwiOiBUcnVlL0Zh\nbHNlLFxcbiMgICAgICAgICAgICAgICAgIFxcXCJhcmNfdmVyaWZ5XFxcIjogVHJ1ZS9GYWxz\nZSxcXG4jICAgICAgICAgICAgICAgICBcXFwiZGtpbV9tZXNzYWdlXFxcIjogcmVhc29uIGZv\nciBUcnVlL0ZhbHNlXFxuIyAgICAgICAgICAgICAgICAgXFxcImFyY19tZXNzYWdlXFxcIjog\ncmVhc29uIGZvciBUcnVlL0ZhbHNlXFxuIyAgICAgICAgICAgICB9XFxuXFxub3V0cHV0ID0g\nJ0RLSU0gQW5hbHlzaXM6IHt9LiB7fVxcXFxuQVJDIEFuYWx5c2lzOiB7fS4ge30nLmZvcm1h\ndChzdHIocmVzdWx0cy5ka2ltX3ZlcmlmeSksIHJlc3VsdHMuZGtpbV9tZXNzYWdlLCBzdHIo\ncmVzdWx0cy5hcmNfdmVyaWZ5KSwgcmVzdWx0cy5hcmNfbWVzc2FnZSlcXG5pbmNpZGVudC5h\nZGROb3RlKG91dHB1dClcIixcInByZV9wcm9jZXNzaW5nX3NjcmlwdFwiOlwiaW5wdXRzLmlu\nY2lkZW50X2lkID0gaW5jaWRlbnQuaWRcXG5pbnB1dHMuYXR0YWNobWVudF9pZCA9IGF0dGFj\naG1lbnQuaWRcIn08L3Jlc2lsaWVudDpmdW5jdGlvbj48L2V4dGVuc2lvbkVsZW1lbnRzPjxp\nbmNvbWluZz5TZXF1ZW5jZUZsb3dfMWpveHpuMjwvaW5jb21pbmc+PG91dGdvaW5nPlNlcXVl\nbmNlRmxvd18wNDR5ZmhjPC9vdXRnb2luZz48L3NlcnZpY2VUYXNrPjxzZXF1ZW5jZUZsb3cg\naWQ9XCJTZXF1ZW5jZUZsb3dfMWpveHpuMlwiIHNvdXJjZVJlZj1cIlN0YXJ0RXZlbnRfMTU1\nYXN4bVwiIHRhcmdldFJlZj1cIlNlcnZpY2VUYXNrXzFiZ2Vqd25cIi8+PGVuZEV2ZW50IGlk\nPVwiRW5kRXZlbnRfMDJ6dmxzNVwiPjxpbmNvbWluZz5TZXF1ZW5jZUZsb3dfMDQ0eWZoYzwv\naW5jb21pbmc+PC9lbmRFdmVudD48c2VxdWVuY2VGbG93IGlkPVwiU2VxdWVuY2VGbG93XzA0\nNHlmaGNcIiBzb3VyY2VSZWY9XCJTZXJ2aWNlVGFza18xYmdlanduXCIgdGFyZ2V0UmVmPVwi\nRW5kRXZlbnRfMDJ6dmxzNVwiLz48dGV4dEFubm90YXRpb24gaWQ9XCJUZXh0QW5ub3RhdGlv\nbl8xa3h4aXl0XCI+PHRleHQ+U3RhcnQgeW91ciB3b3JrZmxvdyBoZXJlPC90ZXh0PjwvdGV4\ndEFubm90YXRpb24+PGFzc29jaWF0aW9uIGlkPVwiQXNzb2NpYXRpb25fMXNldWo0OFwiIHNv\ndXJjZVJlZj1cIlN0YXJ0RXZlbnRfMTU1YXN4bVwiIHRhcmdldFJlZj1cIlRleHRBbm5vdGF0\naW9uXzFreHhpeXRcIi8+PC9wcm9jZXNzPjxicG1uZGk6QlBNTkRpYWdyYW0gaWQ9XCJCUE1O\nRGlhZ3JhbV8xXCI+PGJwbW5kaTpCUE1OUGxhbmUgYnBtbkVsZW1lbnQ9XCJ1bmRlZmluZWRc\nIiBpZD1cIkJQTU5QbGFuZV8xXCI+PGJwbW5kaTpCUE1OU2hhcGUgYnBtbkVsZW1lbnQ9XCJT\ndGFydEV2ZW50XzE1NWFzeG1cIiBpZD1cIlN0YXJ0RXZlbnRfMTU1YXN4bV9kaVwiPjxvbWdk\nYzpCb3VuZHMgaGVpZ2h0PVwiMzZcIiB3aWR0aD1cIjM2XCIgeD1cIjE2MlwiIHk9XCIxODhc\nIi8+PGJwbW5kaTpCUE1OTGFiZWw+PG9tZ2RjOkJvdW5kcyBoZWlnaHQ9XCIwXCIgd2lkdGg9\nXCI5MFwiIHg9XCIxNTdcIiB5PVwiMjIzXCIvPjwvYnBtbmRpOkJQTU5MYWJlbD48L2JwbW5k\naTpCUE1OU2hhcGU+PGJwbW5kaTpCUE1OU2hhcGUgYnBtbkVsZW1lbnQ9XCJUZXh0QW5ub3Rh\ndGlvbl8xa3h4aXl0XCIgaWQ9XCJUZXh0QW5ub3RhdGlvbl8xa3h4aXl0X2RpXCI+PG9tZ2Rj\nOkJvdW5kcyBoZWlnaHQ9XCIzMFwiIHdpZHRoPVwiMTAwXCIgeD1cIjk5XCIgeT1cIjI1NFwi\nLz48L2JwbW5kaTpCUE1OU2hhcGU+PGJwbW5kaTpCUE1ORWRnZSBicG1uRWxlbWVudD1cIkFz\nc29jaWF0aW9uXzFzZXVqNDhcIiBpZD1cIkFzc29jaWF0aW9uXzFzZXVqNDhfZGlcIj48b21n\nZGk6d2F5cG9pbnQgeD1cIjE3NlwiIHhzaTp0eXBlPVwib21nZGM6UG9pbnRcIiB5PVwiMjIz\nXCIvPjxvbWdkaTp3YXlwb2ludCB4PVwiMTc2XCIgeHNpOnR5cGU9XCJvbWdkYzpQb2ludFwi\nIHk9XCIyMzNcIi8+PG9tZ2RpOndheXBvaW50IHg9XCIxNThcIiB4c2k6dHlwZT1cIm9tZ2Rj\nOlBvaW50XCIgeT1cIjI1NFwiLz48L2JwbW5kaTpCUE1ORWRnZT48YnBtbmRpOkJQTU5TaGFw\nZSBicG1uRWxlbWVudD1cIlNlcnZpY2VUYXNrXzFiZ2Vqd25cIiBpZD1cIlNlcnZpY2VUYXNr\nXzFiZ2Vqd25fZGlcIj48b21nZGM6Qm91bmRzIGhlaWdodD1cIjgwXCIgd2lkdGg9XCIxMDBc\nIiB4PVwiMjkyXCIgeT1cIjE2NlwiLz48L2JwbW5kaTpCUE1OU2hhcGU+PGJwbW5kaTpCUE1O\nRWRnZSBicG1uRWxlbWVudD1cIlNlcXVlbmNlRmxvd18xam94em4yXCIgaWQ9XCJTZXF1ZW5j\nZUZsb3dfMWpveHpuMl9kaVwiPjxvbWdkaTp3YXlwb2ludCB4PVwiMTk4XCIgeHNpOnR5cGU9\nXCJvbWdkYzpQb2ludFwiIHk9XCIyMDZcIi8+PG9tZ2RpOndheXBvaW50IHg9XCIyMzZcIiB4\nc2k6dHlwZT1cIm9tZ2RjOlBvaW50XCIgeT1cIjIwNlwiLz48b21nZGk6d2F5cG9pbnQgeD1c\nIjIzNlwiIHhzaTp0eXBlPVwib21nZGM6UG9pbnRcIiB5PVwiMjA2XCIvPjxvbWdkaTp3YXlw\nb2ludCB4PVwiMjkyXCIgeHNpOnR5cGU9XCJvbWdkYzpQb2ludFwiIHk9XCIyMDZcIi8+PGJw\nbW5kaTpCUE1OTGFiZWw+PG9tZ2RjOkJvdW5kcyBoZWlnaHQ9XCIxM1wiIHdpZHRoPVwiOTBc\nIiB4PVwiMjA2XCIgeT1cIjE5OS41XCIvPjwvYnBtbmRpOkJQTU5MYWJlbD48L2JwbW5kaTpC\nUE1ORWRnZT48YnBtbmRpOkJQTU5TaGFwZSBicG1uRWxlbWVudD1cIkVuZEV2ZW50XzAyenZs\nczVcIiBpZD1cIkVuZEV2ZW50XzAyenZsczVfZGlcIj48b21nZGM6Qm91bmRzIGhlaWdodD1c\nIjM2XCIgd2lkdGg9XCIzNlwiIHg9XCI0NTlcIiB5PVwiMTg4XCIvPjxicG1uZGk6QlBNTkxh\nYmVsPjxvbWdkYzpCb3VuZHMgaGVpZ2h0PVwiMTNcIiB3aWR0aD1cIjBcIiB4PVwiNDc3XCIg\neT1cIjIyN1wiLz48L2JwbW5kaTpCUE1OTGFiZWw+PC9icG1uZGk6QlBNTlNoYXBlPjxicG1u\nZGk6QlBNTkVkZ2UgYnBtbkVsZW1lbnQ9XCJTZXF1ZW5jZUZsb3dfMDQ0eWZoY1wiIGlkPVwi\nU2VxdWVuY2VGbG93XzA0NHlmaGNfZGlcIj48b21nZGk6d2F5cG9pbnQgeD1cIjM5MlwiIHhz\naTp0eXBlPVwib21nZGM6UG9pbnRcIiB5PVwiMjA2XCIvPjxvbWdkaTp3YXlwb2ludCB4PVwi\nNDU5XCIgeHNpOnR5cGU9XCJvbWdkYzpQb2ludFwiIHk9XCIyMDZcIi8+PGJwbW5kaTpCUE1O\nTGFiZWw+PG9tZ2RjOkJvdW5kcyBoZWlnaHQ9XCIxM1wiIHdpZHRoPVwiMFwiIHg9XCI0MjUu\nNVwiIHk9XCIxODRcIi8+PC9icG1uZGk6QlBNTkxhYmVsPjwvYnBtbmRpOkJQTU5FZGdlPjwv\nYnBtbmRpOkJQTU5QbGFuZT48L2JwbW5kaTpCUE1ORGlhZ3JhbT48L2RlZmluaXRpb25zPiIs\nICJ2ZXJzaW9uIjogMn0sICJhY3Rpb25zIjogW119LCB7IndvcmtmbG93X2lkIjogMzUsICJu\nYW1lIjogIkV4YW1wbGU6IEVtYWlsIEhlYWRlciBWYWxpZGF0aW9uIFVzaW5nIERLSU0vQVJD\nIFtBcnRpZmFjdF0iLCAicHJvZ3JhbW1hdGljX25hbWUiOiAiZXhhbXBsZV9lbWFpbF9oZWFk\nZXJfdmFsaWRhdGlvbl91c2luZ19ka2ltYXJjX2FydGlmYWN0IiwgIm9iamVjdF90eXBlIjog\nImFydGlmYWN0IiwgImRlc2NyaXB0aW9uIjogIkFuIGV4YW1wbGUgb2YgaGF2aW5nIGFuIGFy\ndGlmYWN0IGVtYWlsJ3MgaGVhZGVyIHZhbGlkYXRlZCB1c2luZyBES0lNL0FSQy4iLCAiY3Jl\nYXRvcl9pZCI6ICJiQGEuY29tIiwgImxhc3RfbW9kaWZpZWRfYnkiOiAiYkBhLmNvbSIsICJs\nYXN0X21vZGlmaWVkX3RpbWUiOiAxNTM3Mjk4Mzc5NDA4LCAiZXhwb3J0X2tleSI6ICJleGFt\ncGxlX2VtYWlsX2hlYWRlcl92YWxpZGF0aW9uX3VzaW5nX2RraW1hcmNfYXJ0aWZhY3QiLCAi\ndXVpZCI6ICI3ZDA0MzZkOS0wMjc1LTQ4OGUtYWZjMC1lMGViZmRlZmE2OGIiLCAiY29udGVu\ndCI6IHsid29ya2Zsb3dfaWQiOiAiZXhhbXBsZV9lbWFpbF9oZWFkZXJfdmFsaWRhdGlvbl91\nc2luZ19ka2ltYXJjX2FydGlmYWN0IiwgInhtbCI6ICI8P3htbCB2ZXJzaW9uPVwiMS4wXCIg\nZW5jb2Rpbmc9XCJVVEYtOFwiPz48ZGVmaW5pdGlvbnMgeG1sbnM9XCJodHRwOi8vd3d3Lm9t\nZy5vcmcvc3BlYy9CUE1OLzIwMTAwNTI0L01PREVMXCIgeG1sbnM6YnBtbmRpPVwiaHR0cDov\nL3d3dy5vbWcub3JnL3NwZWMvQlBNTi8yMDEwMDUyNC9ESVwiIHhtbG5zOm9tZ2RjPVwiaHR0\ncDovL3d3dy5vbWcub3JnL3NwZWMvREQvMjAxMDA1MjQvRENcIiB4bWxuczpvbWdkaT1cImh0\ndHA6Ly93d3cub21nLm9yZy9zcGVjL0RELzIwMTAwNTI0L0RJXCIgeG1sbnM6cmVzaWxpZW50\nPVwiaHR0cDovL3Jlc2lsaWVudC5pYm0uY29tL2JwbW5cIiB4bWxuczp4c2Q9XCJodHRwOi8v\nd3d3LnczLm9yZy8yMDAxL1hNTFNjaGVtYVwiIHhtbG5zOnhzaT1cImh0dHA6Ly93d3cudzMu\nb3JnLzIwMDEvWE1MU2NoZW1hLWluc3RhbmNlXCIgdGFyZ2V0TmFtZXNwYWNlPVwiaHR0cDov\nL3d3dy5jYW11bmRhLm9yZy90ZXN0XCI+PHByb2Nlc3MgaWQ9XCJleGFtcGxlX2VtYWlsX2hl\nYWRlcl92YWxpZGF0aW9uX3VzaW5nX2RraW1hcmNfYXJ0aWZhY3RcIiBpc0V4ZWN1dGFibGU9\nXCJ0cnVlXCIgbmFtZT1cIkV4YW1wbGU6IEVtYWlsIEhlYWRlciBWYWxpZGF0aW9uIFVzaW5n\nIERLSU0vQVJDIFtBcnRpZmFjdF1cIj48ZG9jdW1lbnRhdGlvbj48IVtDREFUQVtBbiBleGFt\ncGxlIG9mIGhhdmluZyBhbiBhcnRpZmFjdCBlbWFpbCdzIGhlYWRlciB2YWxpZGF0ZWQgdXNp\nbmcgREtJTS9BUkMuXV0+PC9kb2N1bWVudGF0aW9uPjxzdGFydEV2ZW50IGlkPVwiU3RhcnRF\ndmVudF8xNTVhc3htXCI+PG91dGdvaW5nPlNlcXVlbmNlRmxvd18wYmdzaTJuPC9vdXRnb2lu\nZz48L3N0YXJ0RXZlbnQ+PHNlcnZpY2VUYXNrIGlkPVwiU2VydmljZVRhc2tfMW51bGUxMlwi\nIG5hbWU9XCJFbWFpbCBIZWFkZXIgVmFsaWRhdGlvbiBVc2luZyBES0kuLi5cIiByZXNpbGll\nbnQ6dHlwZT1cImZ1bmN0aW9uXCI+PGV4dGVuc2lvbkVsZW1lbnRzPjxyZXNpbGllbnQ6ZnVu\nY3Rpb24gdXVpZD1cIjQyMzI3NmY0LWNhZTMtNGY5OS1iN2Y2LWM3MjYwNGViMmY5MVwiPntc\nImlucHV0c1wiOnt9LFwicG9zdF9wcm9jZXNzaW5nX3NjcmlwdFwiOlwiIyByZXN1bHRzID0g\ne1xcbiMgICAgICAgICAgICAgICAgIFxcXCJka2ltX3ZlcmlmeVxcXCI6IFRydWUvRmFsc2Us\nXFxuIyAgICAgICAgICAgICAgICAgXFxcImFyY192ZXJpZnlcXFwiOiBUcnVlL0ZhbHNlLFxc\nbiMgICAgICAgICAgICAgICAgIFxcXCJka2ltX21lc3NhZ2VcXFwiOiByZWFzb24gZm9yIFRy\ndWUvRmFsc2VcXG4jICAgICAgICAgICAgICAgICBcXFwiYXJjX21lc3NhZ2VcXFwiOiByZWFz\nb24gZm9yIFRydWUvRmFsc2VcXG4jICAgICAgICAgICAgIH1cXG5cXG5vdXRwdXQgPSAnREtJ\nTSBBbmFseXNpczoge30uIHt9XFxcXG5BUkMgQW5hbHlzaXM6IHt9LiB7fScuZm9ybWF0KHN0\ncihyZXN1bHRzLmRraW1fdmVyaWZ5KSwgcmVzdWx0cy5ka2ltX21lc3NhZ2UsIHN0cihyZXN1\nbHRzLmFyY192ZXJpZnkpLCByZXN1bHRzLmFyY19tZXNzYWdlKVxcbmluY2lkZW50LmFkZE5v\ndGUob3V0cHV0KVwiLFwicHJlX3Byb2Nlc3Npbmdfc2NyaXB0XCI6XCJpbnB1dHMuaW5jaWRl\nbnRfaWQgPSBpbmNpZGVudC5pZFxcbmlucHV0cy5hcnRpZmFjdF9pZCA9IGFydGlmYWN0Lmlk\nXCJ9PC9yZXNpbGllbnQ6ZnVuY3Rpb24+PC9leHRlbnNpb25FbGVtZW50cz48aW5jb21pbmc+\nU2VxdWVuY2VGbG93XzBiZ3NpMm48L2luY29taW5nPjxvdXRnb2luZz5TZXF1ZW5jZUZsb3df\nMTkzMnh3bzwvb3V0Z29pbmc+PC9zZXJ2aWNlVGFzaz48c2VxdWVuY2VGbG93IGlkPVwiU2Vx\ndWVuY2VGbG93XzBiZ3NpMm5cIiBzb3VyY2VSZWY9XCJTdGFydEV2ZW50XzE1NWFzeG1cIiB0\nYXJnZXRSZWY9XCJTZXJ2aWNlVGFza18xbnVsZTEyXCIvPjxlbmRFdmVudCBpZD1cIkVuZEV2\nZW50XzBhcm5la3BcIj48aW5jb21pbmc+U2VxdWVuY2VGbG93XzE5MzJ4d288L2luY29taW5n\nPjwvZW5kRXZlbnQ+PHNlcXVlbmNlRmxvdyBpZD1cIlNlcXVlbmNlRmxvd18xOTMyeHdvXCIg\nc291cmNlUmVmPVwiU2VydmljZVRhc2tfMW51bGUxMlwiIHRhcmdldFJlZj1cIkVuZEV2ZW50\nXzBhcm5la3BcIi8+PHRleHRBbm5vdGF0aW9uIGlkPVwiVGV4dEFubm90YXRpb25fMWt4eGl5\ndFwiPjx0ZXh0PlN0YXJ0IHlvdXIgd29ya2Zsb3cgaGVyZTwvdGV4dD48L3RleHRBbm5vdGF0\naW9uPjxhc3NvY2lhdGlvbiBpZD1cIkFzc29jaWF0aW9uXzFzZXVqNDhcIiBzb3VyY2VSZWY9\nXCJTdGFydEV2ZW50XzE1NWFzeG1cIiB0YXJnZXRSZWY9XCJUZXh0QW5ub3RhdGlvbl8xa3h4\naXl0XCIvPjwvcHJvY2Vzcz48YnBtbmRpOkJQTU5EaWFncmFtIGlkPVwiQlBNTkRpYWdyYW1f\nMVwiPjxicG1uZGk6QlBNTlBsYW5lIGJwbW5FbGVtZW50PVwidW5kZWZpbmVkXCIgaWQ9XCJC\nUE1OUGxhbmVfMVwiPjxicG1uZGk6QlBNTlNoYXBlIGJwbW5FbGVtZW50PVwiU3RhcnRFdmVu\ndF8xNTVhc3htXCIgaWQ9XCJTdGFydEV2ZW50XzE1NWFzeG1fZGlcIj48b21nZGM6Qm91bmRz\nIGhlaWdodD1cIjM2XCIgd2lkdGg9XCIzNlwiIHg9XCIxNjJcIiB5PVwiMTg4XCIvPjxicG1u\nZGk6QlBNTkxhYmVsPjxvbWdkYzpCb3VuZHMgaGVpZ2h0PVwiMFwiIHdpZHRoPVwiOTBcIiB4\nPVwiMTU3XCIgeT1cIjIyM1wiLz48L2JwbW5kaTpCUE1OTGFiZWw+PC9icG1uZGk6QlBNTlNo\nYXBlPjxicG1uZGk6QlBNTlNoYXBlIGJwbW5FbGVtZW50PVwiVGV4dEFubm90YXRpb25fMWt4\neGl5dFwiIGlkPVwiVGV4dEFubm90YXRpb25fMWt4eGl5dF9kaVwiPjxvbWdkYzpCb3VuZHMg\naGVpZ2h0PVwiMzBcIiB3aWR0aD1cIjEwMFwiIHg9XCI5OVwiIHk9XCIyNTRcIi8+PC9icG1u\nZGk6QlBNTlNoYXBlPjxicG1uZGk6QlBNTkVkZ2UgYnBtbkVsZW1lbnQ9XCJBc3NvY2lhdGlv\nbl8xc2V1ajQ4XCIgaWQ9XCJBc3NvY2lhdGlvbl8xc2V1ajQ4X2RpXCI+PG9tZ2RpOndheXBv\naW50IHg9XCIxNjlcIiB4c2k6dHlwZT1cIm9tZ2RjOlBvaW50XCIgeT1cIjIyMFwiLz48b21n\nZGk6d2F5cG9pbnQgeD1cIjE1M1wiIHhzaTp0eXBlPVwib21nZGM6UG9pbnRcIiB5PVwiMjU0\nXCIvPjwvYnBtbmRpOkJQTU5FZGdlPjxicG1uZGk6QlBNTlNoYXBlIGJwbW5FbGVtZW50PVwi\nU2VydmljZVRhc2tfMW51bGUxMlwiIGlkPVwiU2VydmljZVRhc2tfMW51bGUxMl9kaVwiPjxv\nbWdkYzpCb3VuZHMgaGVpZ2h0PVwiODBcIiB3aWR0aD1cIjEwMFwiIHg9XCIyOTlcIiB5PVwi\nMTY2XCIvPjwvYnBtbmRpOkJQTU5TaGFwZT48YnBtbmRpOkJQTU5FZGdlIGJwbW5FbGVtZW50\nPVwiU2VxdWVuY2VGbG93XzBiZ3NpMm5cIiBpZD1cIlNlcXVlbmNlRmxvd18wYmdzaTJuX2Rp\nXCI+PG9tZ2RpOndheXBvaW50IHg9XCIxOThcIiB4c2k6dHlwZT1cIm9tZ2RjOlBvaW50XCIg\neT1cIjIwNlwiLz48b21nZGk6d2F5cG9pbnQgeD1cIjI0M1wiIHhzaTp0eXBlPVwib21nZGM6\nUG9pbnRcIiB5PVwiMjA2XCIvPjxvbWdkaTp3YXlwb2ludCB4PVwiMjQzXCIgeHNpOnR5cGU9\nXCJvbWdkYzpQb2ludFwiIHk9XCIyMDZcIi8+PG9tZ2RpOndheXBvaW50IHg9XCIyOTlcIiB4\nc2k6dHlwZT1cIm9tZ2RjOlBvaW50XCIgeT1cIjIwNlwiLz48YnBtbmRpOkJQTU5MYWJlbD48\nb21nZGM6Qm91bmRzIGhlaWdodD1cIjEzXCIgd2lkdGg9XCI5MFwiIHg9XCIyMTNcIiB5PVwi\nMTk5LjVcIi8+PC9icG1uZGk6QlBNTkxhYmVsPjwvYnBtbmRpOkJQTU5FZGdlPjxicG1uZGk6\nQlBNTlNoYXBlIGJwbW5FbGVtZW50PVwiRW5kRXZlbnRfMGFybmVrcFwiIGlkPVwiRW5kRXZl\nbnRfMGFybmVrcF9kaVwiPjxvbWdkYzpCb3VuZHMgaGVpZ2h0PVwiMzZcIiB3aWR0aD1cIjM2\nXCIgeD1cIjQ4NVwiIHk9XCIxODhcIi8+PGJwbW5kaTpCUE1OTGFiZWw+PG9tZ2RjOkJvdW5k\ncyBoZWlnaHQ9XCIxM1wiIHdpZHRoPVwiOTBcIiB4PVwiNDU4XCIgeT1cIjIyN1wiLz48L2Jw\nbW5kaTpCUE1OTGFiZWw+PC9icG1uZGk6QlBNTlNoYXBlPjxicG1uZGk6QlBNTkVkZ2UgYnBt\nbkVsZW1lbnQ9XCJTZXF1ZW5jZUZsb3dfMTkzMnh3b1wiIGlkPVwiU2VxdWVuY2VGbG93XzE5\nMzJ4d29fZGlcIj48b21nZGk6d2F5cG9pbnQgeD1cIjM5OVwiIHhzaTp0eXBlPVwib21nZGM6\nUG9pbnRcIiB5PVwiMjA2XCIvPjxvbWdkaTp3YXlwb2ludCB4PVwiNDg1XCIgeHNpOnR5cGU9\nXCJvbWdkYzpQb2ludFwiIHk9XCIyMDZcIi8+PGJwbW5kaTpCUE1OTGFiZWw+PG9tZ2RjOkJv\ndW5kcyBoZWlnaHQ9XCIxM1wiIHdpZHRoPVwiOTBcIiB4PVwiMzk3XCIgeT1cIjE4NC41XCIv\nPjwvYnBtbmRpOkJQTU5MYWJlbD48L2JwbW5kaTpCUE1ORWRnZT48L2JwbW5kaTpCUE1OUGxh\nbmU+PC9icG1uZGk6QlBNTkRpYWdyYW0+PC9kZWZpbml0aW9ucz4iLCAidmVyc2lvbiI6IDJ9\nLCAiYWN0aW9ucyI6IFtdfV0sICJyb2xlcyI6IFtdLCAid29ya3NwYWNlcyI6IFtdLCAiZnVu\nY3Rpb25zIjogW3siaWQiOiAzOSwgIm5hbWUiOiAiZW1haWxfaGVhZGVyX3ZhbGlkYXRpb25f\ndXNpbmdfZGtpbWFyYyIsICJkaXNwbGF5X25hbWUiOiAiRW1haWwgSGVhZGVyIFZhbGlkYXRp\nb24gVXNpbmcgREtJTS9BUkMiLCAiZGVzY3JpcHRpb24iOiB7ImZvcm1hdCI6ICJ0ZXh0Iiwg\nImNvbnRlbnQiOiAiQW5hbHl6ZXMgdGhlIERLSU0gYW5kIEFSQyBoZWFkZXJzIGZvciBhIFJG\nQzgyMiBmb3JtYXR0ZWQgZW1haWwuIn0sICJkZXN0aW5hdGlvbl9oYW5kbGUiOiAiZm5fZW1h\naWxfaGVhZGVyX3ZhbGlkYXRpb24iLCAiZXhwb3J0X2tleSI6ICJlbWFpbF9oZWFkZXJfdmFs\naWRhdGlvbl91c2luZ19ka2ltYXJjIiwgInV1aWQiOiAiNDIzMjc2ZjQtY2FlMy00Zjk5LWI3\nZjYtYzcyNjA0ZWIyZjkxIiwgInZlcnNpb24iOiAyLCAiY3JlYXRvciI6IHsiaWQiOiAzLCAi\ndHlwZSI6ICJ1c2VyIiwgIm5hbWUiOiAiYkBhLmNvbSIsICJkaXNwbGF5X25hbWUiOiAiUmVz\naWxpZW50IFN5c2FkbWluIn0sICJsYXN0X21vZGlmaWVkX2J5IjogeyJpZCI6IDMsICJ0eXBl\nIjogInVzZXIiLCAibmFtZSI6ICJiQGEuY29tIiwgImRpc3BsYXlfbmFtZSI6ICJSZXNpbGll\nbnQgU3lzYWRtaW4ifSwgImxhc3RfbW9kaWZpZWRfdGltZSI6IDE1MzcyOTgzNjExMjEsICJ2\naWV3X2l0ZW1zIjogW3sic3RlcF9sYWJlbCI6IG51bGwsICJzaG93X2lmIjogbnVsbCwgImVs\nZW1lbnQiOiAiZmllbGRfdXVpZCIsICJmaWVsZF90eXBlIjogIl9fZnVuY3Rpb24iLCAiY29u\ndGVudCI6ICIwNTgyZDJkNy1kNzE1LTRmNjQtODFkMy01MGM5NGNmYTFmMjkiLCAic2hvd19s\naW5rX2hlYWRlciI6IGZhbHNlfSwgeyJzdGVwX2xhYmVsIjogbnVsbCwgInNob3dfaWYiOiBu\ndWxsLCAiZWxlbWVudCI6ICJmaWVsZF91dWlkIiwgImZpZWxkX3R5cGUiOiAiX19mdW5jdGlv\nbiIsICJjb250ZW50IjogIjgxMWU5OWQ3LWQxOTQtNGNlOC04NmNjLWFmZjVlMDFhYjg1YyIs\nICJzaG93X2xpbmtfaGVhZGVyIjogZmFsc2V9LCB7InN0ZXBfbGFiZWwiOiBudWxsLCAic2hv\nd19pZiI6IG51bGwsICJlbGVtZW50IjogImZpZWxkX3V1aWQiLCAiZmllbGRfdHlwZSI6ICJf\nX2Z1bmN0aW9uIiwgImNvbnRlbnQiOiAiMTdjM2U2NTItNjU1OS00OTM1LTlmOTUtNzQzNzRj\nYTM3YTdiIiwgInNob3dfbGlua19oZWFkZXIiOiBmYWxzZX0sIHsic3RlcF9sYWJlbCI6IG51\nbGwsICJzaG93X2lmIjogbnVsbCwgImVsZW1lbnQiOiAiZmllbGRfdXVpZCIsICJmaWVsZF90\neXBlIjogIl9fZnVuY3Rpb24iLCAiY29udGVudCI6ICJlZmRiY2E3ZS02YWU4LTQyNjktYTNk\nMS04MGYxNzE2YTYyMjIiLCAic2hvd19saW5rX2hlYWRlciI6IGZhbHNlfV0sICJ3b3JrZmxv\nd3MiOiBbeyJ3b3JrZmxvd19pZCI6IDM1LCAibmFtZSI6ICJFeGFtcGxlOiBFbWFpbCBIZWFk\nZXIgVmFsaWRhdGlvbiBVc2luZyBES0lNL0FSQyBbQXJ0aWZhY3RdIiwgInByb2dyYW1tYXRp\nY19uYW1lIjogImV4YW1wbGVfZW1haWxfaGVhZGVyX3ZhbGlkYXRpb25fdXNpbmdfZGtpbWFy\nY19hcnRpZmFjdCIsICJvYmplY3RfdHlwZSI6ICJhcnRpZmFjdCIsICJkZXNjcmlwdGlvbiI6\nIG51bGwsICJ1dWlkIjogbnVsbCwgImFjdGlvbnMiOiBbXX0sIHsid29ya2Zsb3dfaWQiOiAz\nNiwgIm5hbWUiOiAiRXhhbXBsZTogRW1haWwgSGVhZGVyIFZhbGlkYXRpb24gVXNpbmcgREtJ\nTS9BUkMgW0F0dGFjaG1lbnRdIiwgInByb2dyYW1tYXRpY19uYW1lIjogImV4YW1wbGVfZW1h\naWxfaGVhZGVyX3ZhbGlkYXRpb25fdXNpbmdfZGtpbWFyY19hdHRhY2htZW50IiwgIm9iamVj\ndF90eXBlIjogImF0dGFjaG1lbnQiLCAiZGVzY3JpcHRpb24iOiBudWxsLCAidXVpZCI6IG51\nbGwsICJhY3Rpb25zIjogW119XX1dfQo=\n\"\"\"\n )", "title": "" }, { "docid": "0de304ebf09e24c51553912fd46f1942", "score": "0.48873147", "text": "def generate(self, properties, args):", "title": "" }, { "docid": "f29c4b3f3c801f39b31eb4c6a49d0fb1", "score": "0.48698008", "text": "def generate(self, source_types=None, exclude=None):", "title": "" }, { "docid": "cf150bda8212e7fbab00d692ff709633", "score": "0.48566413", "text": "def generate_config(context):\n service_account_name = generate_name(context.properties['uniqueString'],\n 'bigip-sa')\n role_name = generate_name(context.properties['uniqueString'],\n 'bigipaccessrole',\n ['-'])\n resources = [create_service_account(context, service_account_name)] + \\\n [create_custom_role(context, role_name)] + \\\n [create_binding(context, service_account_name, role_name)]\n outputs = [\n {\n 'name': 'serviceAccountEmail',\n 'value': '$(ref.' + service_account_name + '.email)'\n },\n {\n 'name': 'customRoleName',\n 'value': '$(ref.' + role_name + '.name)'\n },\n {\n 'name': 'customRolePermissions',\n 'value': '$(ref.' + role_name + '.includedPermissions)'\n }\n ]\n\n return {'resources': resources, 'outputs': outputs}", "title": "" }, { "docid": "34997b10d4958cc691192780c8042f17", "score": "0.48444468", "text": "def composer(support_custom):\n return MethodComposer(support_custom, helping_func, 'feat', 'value')", "title": "" }, { "docid": "e8266a4bc3b1a5ccfdde319cb0077c3b", "score": "0.4839842", "text": "def customization_data(client=None):\n\n # This import data contains:\n # Function inputs:\n # es_doc_type\n # es_index\n # es_query\n # Message Destinations:\n # fn_elasticsearch\n # Functions:\n # fn_elasticsearch_query\n # Workflows:\n # example_elasticsearch_query_from_artifact\n # example_elasticsearch_query_from_incident\n # Rules:\n # Example: ElasticSearch Query from Artifact\n # Example: ElasticSearch Query from Incident\n\n\n yield ImportDefinition(u\"\"\"\neyJpZCI6IDYwLCAiZmllbGRzIjogW3siY2hvc2VuIjogZmFsc2UsICJpbnRlcm5hbCI6IGZhbHNl\nLCAidXVpZCI6ICJjM2YwZTNlZC0yMWUxLTRkNTMtYWZmYi1mZTVjYTMzMDhjY2EiLCAib3BlcmF0\naW9ucyI6IFtdLCAidmFsdWVzIjogW10sICJjaGFuZ2VhYmxlIjogdHJ1ZSwgImlkIjogMzgsICJu\nYW1lIjogImluY190cmFpbmluZyIsICJ0ZXh0IjogIlNpbXVsYXRpb24iLCAicHJlZml4IjogbnVs\nbCwgInR5cGVfaWQiOiAwLCAidG9vbHRpcCI6ICJXaGV0aGVyIHRoZSBpbmNpZGVudCBpcyBhIHNp\nbXVsYXRpb24gb3IgYSByZWd1bGFyIGluY2lkZW50LiAgVGhpcyBmaWVsZCBpcyByZWFkLW9ubHku\nIiwgImlucHV0X3R5cGUiOiAiYm9vbGVhbiIsICJoaWRlX25vdGlmaWNhdGlvbiI6IGZhbHNlLCAi\nZGVmYXVsdF9jaG9zZW5fYnlfc2VydmVyIjogZmFsc2UsICJibGFua19vcHRpb24iOiBmYWxzZSwg\nIm9wZXJhdGlvbl9wZXJtcyI6IHt9LCAicmVhZF9vbmx5IjogdHJ1ZSwgInJpY2hfdGV4dCI6IGZh\nbHNlLCAiZXhwb3J0X2tleSI6ICJpbmNpZGVudC9pbmNfdHJhaW5pbmciLCAidGVtcGxhdGVzIjog\nW119LCB7ImNob3NlbiI6IGZhbHNlLCAiaW50ZXJuYWwiOiBmYWxzZSwgInV1aWQiOiAiYjkyY2Mz\nZWQtMjg3OC00NjMwLTgxYTctNTgzMDc4MGZhNWQ5IiwgIm9wZXJhdGlvbnMiOiBbXSwgInZhbHVl\ncyI6IFtdLCAiY2hhbmdlYWJsZSI6IHRydWUsICJpZCI6IDEwOCwgIm5hbWUiOiAiZXNfcXVlcnki\nLCAidGV4dCI6ICJlc19xdWVyeSIsICJwcmVmaXgiOiBudWxsLCAidHlwZV9pZCI6IDExLCAidG9v\nbHRpcCI6ICJUaGUgcXVlcnkgdGhhdCB3aWxsIGJlIHN1Ym1pdHRlZCB0byBFbGFzdGljU2VhcmNo\nIiwgInBsYWNlaG9sZGVyIjogIiIsICJpbnB1dF90eXBlIjogInRleHRhcmVhIiwgInJlcXVpcmVk\nIjogImFsd2F5cyIsICJoaWRlX25vdGlmaWNhdGlvbiI6IGZhbHNlLCAiZGVmYXVsdF9jaG9zZW5f\nYnlfc2VydmVyIjogZmFsc2UsICJibGFua19vcHRpb24iOiBmYWxzZSwgIm9wZXJhdGlvbl9wZXJt\ncyI6IHt9LCAicmVhZF9vbmx5IjogZmFsc2UsICJyaWNoX3RleHQiOiBmYWxzZSwgImV4cG9ydF9r\nZXkiOiAiX19mdW5jdGlvbi9lc19xdWVyeSIsICJ0ZW1wbGF0ZXMiOiBbeyJ1dWlkIjogIjJjOTlk\nODA0LTM2YjctNGRhNy1hOGNiLWFmYTAyNGVkNmI1ZCIsICJpZCI6IDcsICJuYW1lIjogIm1hdGNo\nX3Rlcm0iLCAidGVtcGxhdGUiOiB7ImZvcm1hdCI6ICJ0ZXh0IiwgImNvbnRlbnQiOiAie1xuICAg\nIFwicXVlcnlcIiA6IHtcbiAgICAgICAgXCJ0ZXJtXCIgOiB7PFRFUk1fVE9fQkVfU0VBUkNIRUQ+\nfVxuICAgIH1cbn0ifX0sIHsidXVpZCI6ICIyNDU3YzhkMC0yYjE5LTQ3NTYtOWExYS00ZTY0MTA2\nOWQyMzkiLCAiaWQiOiA4LCAibmFtZSI6ICJtYXRjaF9hbGwiLCAidGVtcGxhdGUiOiB7ImZvcm1h\ndCI6ICJ0ZXh0IiwgImNvbnRlbnQiOiAie1xuICAgIFwicXVlcnlcIjoge1xuICAgICAgICBcIm1h\ndGNoX2FsbFwiOiB7fVxuICAgIH1cbn0ifX0sIHsidXVpZCI6ICIyNGEzMTkxYi1kMWIzLTQxYjEt\nOTljMC0xZDIyOTBiYTM2ZWQiLCAiaWQiOiA2LCAibmFtZSI6ICJtYXRjaF90ZXJtX3NvcnRlZCIs\nICJ0ZW1wbGF0ZSI6IHsiZm9ybWF0IjogInRleHQiLCAiY29udGVudCI6ICJ7XG4gICAgXCJzb3J0\nXCIgOiBbXG4gICAgICAgIHsgXCI8U09SVF9WQUxVRT5cIiA6IFwiZGVzY1wiIH1cbiAgICBdLFxu\nICAgIFwicXVlcnlcIiA6IHtcbiAgICAgICAgXCJ0ZXJtXCIgOiA8VEVSTV9UT19CRV9TRUFSQ0hF\nRD5cbiAgICB9XG59In19XX0sIHsiY2hvc2VuIjogZmFsc2UsICJpbnRlcm5hbCI6IGZhbHNlLCAi\ndXVpZCI6ICJlZWQ1NTQ0My03ZDgwLTQ0NTEtYjI3NS0zMWYyZTA5YzNhODQiLCAib3BlcmF0aW9u\ncyI6IFtdLCAidmFsdWVzIjogW10sICJjaGFuZ2VhYmxlIjogdHJ1ZSwgImlkIjogMTA3LCAibmFt\nZSI6ICJlc19pbmRleCIsICJ0ZXh0IjogImVzX2luZGV4IiwgInByZWZpeCI6IG51bGwsICJ0eXBl\nX2lkIjogMTEsICJ0b29sdGlwIjogIlRoZSBpbmRleCB0aGF0IHdpbGwgYmUgc2VhcmNoZWQgZm9y\nIGRhdGEuIElmIGxlZnQgYmxhbmsgYWxsIGluZGljZXMgd2lsbCBiZSBzZWFyY2hlZC4iLCAicGxh\nY2Vob2xkZXIiOiAiIiwgImlucHV0X3R5cGUiOiAidGV4dCIsICJoaWRlX25vdGlmaWNhdGlvbiI6\nIGZhbHNlLCAiZGVmYXVsdF9jaG9zZW5fYnlfc2VydmVyIjogZmFsc2UsICJibGFua19vcHRpb24i\nOiBmYWxzZSwgIm9wZXJhdGlvbl9wZXJtcyI6IHt9LCAicmVhZF9vbmx5IjogZmFsc2UsICJyaWNo\nX3RleHQiOiBmYWxzZSwgImV4cG9ydF9rZXkiOiAiX19mdW5jdGlvbi9lc19pbmRleCIsICJ0ZW1w\nbGF0ZXMiOiBbXX0sIHsiY2hvc2VuIjogZmFsc2UsICJpbnRlcm5hbCI6IGZhbHNlLCAidXVpZCI6\nICIxZTA0MTc3NS1jOWJhLTQzYWUtYThjZi0zZjRkZGE5YTA2ODEiLCAib3BlcmF0aW9ucyI6IFtd\nLCAidmFsdWVzIjogW10sICJjaGFuZ2VhYmxlIjogdHJ1ZSwgImlkIjogMTA5LCAibmFtZSI6ICJl\nc19kb2NfdHlwZSIsICJ0ZXh0IjogImVzX2RvY190eXBlIiwgInByZWZpeCI6IG51bGwsICJ0eXBl\nX2lkIjogMTEsICJ0b29sdGlwIjogIlRoZSBkb2N1bWVudCB0eXBlIHRoYXQgd2lsbCBiZSBzZWFy\nY2guIiwgInBsYWNlaG9sZGVyIjogIiIsICJpbnB1dF90eXBlIjogInRleHQiLCAiaGlkZV9ub3Rp\nZmljYXRpb24iOiBmYWxzZSwgImRlZmF1bHRfY2hvc2VuX2J5X3NlcnZlciI6IGZhbHNlLCAiYmxh\nbmtfb3B0aW9uIjogZmFsc2UsICJvcGVyYXRpb25fcGVybXMiOiB7fSwgInJlYWRfb25seSI6IGZh\nbHNlLCAicmljaF90ZXh0IjogZmFsc2UsICJleHBvcnRfa2V5IjogIl9fZnVuY3Rpb24vZXNfZG9j\nX3R5cGUiLCAidGVtcGxhdGVzIjogW119XSwgInBoYXNlcyI6IFtdLCAib3ZlcnJpZGVzIjogW10s\nICJhY3Rpb25zIjogW3siaWQiOiAzMiwgIm5hbWUiOiAiRXhhbXBsZTogRWxhc3RpY1NlYXJjaCBR\ndWVyeSBmcm9tIEFydGlmYWN0IiwgInR5cGUiOiAxLCAib2JqZWN0X3R5cGUiOiAiYXJ0aWZhY3Qi\nLCAiY29uZGl0aW9ucyI6IFt7Im1ldGhvZCI6ICJlcXVhbHMiLCAiZmllbGRfbmFtZSI6ICJhcnRp\nZmFjdC50eXBlIiwgInZhbHVlIjogIlN0cmluZyIsICJ0eXBlIjogbnVsbCwgImV2YWx1YXRpb25f\naWQiOiBudWxsfV0sICJhdXRvbWF0aW9ucyI6IFtdLCAibWVzc2FnZV9kZXN0aW5hdGlvbnMiOiBb\nXSwgIndvcmtmbG93cyI6IFsiZXhhbXBsZV9lbGFzdGljc2VhcmNoX3F1ZXJ5X2Zyb21fYXJ0aWZh\nY3QiXSwgInZpZXdfaXRlbXMiOiBbXSwgInRpbWVvdXRfc2Vjb25kcyI6IDg2NDAwLCAidXVpZCI6\nICIxZDBkNTBlMy0yYmZjLTQ1ZGUtOTBlYy1mMmY0NGE3MTk5ZjQiLCAiZXhwb3J0X2tleSI6ICJF\neGFtcGxlOiBFbGFzdGljU2VhcmNoIFF1ZXJ5IGZyb20gQXJ0aWZhY3QiLCAibG9naWNfdHlwZSI6\nICJhbGwifSwgeyJpZCI6IDMzLCAibmFtZSI6ICJFeGFtcGxlOiBFbGFzdGljU2VhcmNoIFF1ZXJ5\nIGZyb20gSW5jaWRlbnQiLCAidHlwZSI6IDEsICJvYmplY3RfdHlwZSI6ICJpbmNpZGVudCIsICJj\nb25kaXRpb25zIjogW10sICJhdXRvbWF0aW9ucyI6IFtdLCAibWVzc2FnZV9kZXN0aW5hdGlvbnMi\nOiBbXSwgIndvcmtmbG93cyI6IFsiZXhhbXBsZV9lbGFzdGljc2VhcmNoX3F1ZXJ5X2Zyb21faW5j\naWRlbnQiXSwgInZpZXdfaXRlbXMiOiBbXSwgInRpbWVvdXRfc2Vjb25kcyI6IDg2NDAwLCAidXVp\nZCI6ICI1NjI1MjI2YS0zOTE5LTRlMTYtOTY2ZS0yZDg5ZmEyMWQyMTciLCAiZXhwb3J0X2tleSI6\nICJFeGFtcGxlOiBFbGFzdGljU2VhcmNoIFF1ZXJ5IGZyb20gSW5jaWRlbnQiLCAibG9naWNfdHlw\nZSI6ICJhbGwifV0sICJsYXlvdXRzIjogW10sICJub3RpZmljYXRpb25zIjogbnVsbCwgInRpbWVm\ncmFtZXMiOiBudWxsLCAiaW5kdXN0cmllcyI6IG51bGwsICJyZWd1bGF0b3JzIjogbnVsbCwgImdl\nb3MiOiBudWxsLCAiZnVuY3Rpb25zIjogW3siaWQiOiAyMSwgIm5hbWUiOiAiZm5fZWxhc3RpY3Nl\nYXJjaF9xdWVyeSIsICJkZXNjcmlwdGlvbiI6IHsiZm9ybWF0IjogInRleHQiLCAiY29udGVudCI6\nICJBIGZ1bmN0aW9uIHRoYXQgYWxsb3dzIGEgdXNlciB0byBxdWVyeSBhIHNwZWNpZmllZCBFbGFz\ndGljU2VhcmNoIGRhdGFzdG9yZSBmb3IgZGF0YS4ifSwgInV1aWQiOiAiNGYxMDM0OTAtNTk1ZC00\nYWI5LWJhMzAtODIwMmM4ZGRmZTlkIiwgInZlcnNpb24iOiA4NCwgImNyZWF0b3IiOiB7ImlkIjog\nMiwgInR5cGUiOiAidXNlciIsICJuYW1lIjogImFAYS5jb20iLCAiZGlzcGxheV9uYW1lIjogIlJl\nc2lsaWVudCBTeXNhZG1pbiJ9LCAid29ya2Zsb3dzIjogW3siZGVzY3JpcHRpb24iOiBudWxsLCAi\ndXVpZCI6IG51bGwsICJ3b3JrZmxvd19pZCI6IDE5LCAibmFtZSI6ICJFeGFtcGxlOiBFbGFzdGlj\nU2VhcmNoIFF1ZXJ5IGZyb20gQXJ0aWZhY3QiLCAicHJvZ3JhbW1hdGljX25hbWUiOiAiZXhhbXBs\nZV9lbGFzdGljc2VhcmNoX3F1ZXJ5X2Zyb21fYXJ0aWZhY3QiLCAib2JqZWN0X3R5cGUiOiAiYXJ0\naWZhY3QiLCAiYWN0aW9ucyI6IFtdfSwgeyJkZXNjcmlwdGlvbiI6IG51bGwsICJ1dWlkIjogbnVs\nbCwgIndvcmtmbG93X2lkIjogMTgsICJuYW1lIjogIkV4YW1wbGU6IEVsYXN0aWNTZWFyY2ggUXVl\ncnkgZnJvbSBJbmNpZGVudCIsICJwcm9ncmFtbWF0aWNfbmFtZSI6ICJleGFtcGxlX2VsYXN0aWNz\nZWFyY2hfcXVlcnlfZnJvbV9pbmNpZGVudCIsICJvYmplY3RfdHlwZSI6ICJpbmNpZGVudCIsICJh\nY3Rpb25zIjogW119XSwgImRpc3BsYXlfbmFtZSI6ICJFbGFzdGljU2VhcmNoIFV0aWxpdGllczog\nUXVlcnkiLCAiZGVzdGluYXRpb25faGFuZGxlIjogImZuX2VsYXN0aWNzZWFyY2giLCAiZXhwb3J0\nX2tleSI6ICJmbl9lbGFzdGljc2VhcmNoX3F1ZXJ5IiwgImxhc3RfbW9kaWZpZWRfYnkiOiB7Imlk\nIjogMiwgInR5cGUiOiAidXNlciIsICJuYW1lIjogImFAYS5jb20iLCAiZGlzcGxheV9uYW1lIjog\nIlJlc2lsaWVudCBTeXNhZG1pbiJ9LCAibGFzdF9tb2RpZmllZF90aW1lIjogMTUzNTcxNTI2MDk4\nNSwgInZpZXdfaXRlbXMiOiBbeyJzdGVwX2xhYmVsIjogbnVsbCwgInNob3dfaWYiOiBudWxsLCAi\nZWxlbWVudCI6ICJmaWVsZF91dWlkIiwgImZpZWxkX3R5cGUiOiAiX19mdW5jdGlvbiIsICJjb250\nZW50IjogImI5MmNjM2VkLTI4NzgtNDYzMC04MWE3LTU4MzA3ODBmYTVkOSIsICJzaG93X2xpbmtf\naGVhZGVyIjogZmFsc2V9LCB7InN0ZXBfbGFiZWwiOiBudWxsLCAic2hvd19pZiI6IG51bGwsICJl\nbGVtZW50IjogImZpZWxkX3V1aWQiLCAiZmllbGRfdHlwZSI6ICJfX2Z1bmN0aW9uIiwgImNvbnRl\nbnQiOiAiMWUwNDE3NzUtYzliYS00M2FlLWE4Y2YtM2Y0ZGRhOWEwNjgxIiwgInNob3dfbGlua19o\nZWFkZXIiOiBmYWxzZX0sIHsic3RlcF9sYWJlbCI6IG51bGwsICJzaG93X2lmIjogbnVsbCwgImVs\nZW1lbnQiOiAiZmllbGRfdXVpZCIsICJmaWVsZF90eXBlIjogIl9fZnVuY3Rpb24iLCAiY29udGVu\ndCI6ICJlZWQ1NTQ0My03ZDgwLTQ0NTEtYjI3NS0zMWYyZTA5YzNhODQiLCAic2hvd19saW5rX2hl\nYWRlciI6IGZhbHNlfV19XSwgInNlcnZlcl92ZXJzaW9uIjogeyJtYWpvciI6IDMwLCAibWlub3Ii\nOiAwLCAiYnVpbGRfbnVtYmVyIjogMzQ3NiwgInZlcnNpb24iOiAiMzAuMC4zNDc2In0sICJleHBv\ncnRfZm9ybWF0X3ZlcnNpb24iOiAyLCAiZXhwb3J0X2RhdGUiOiAxNTQzNTgwNDQ2Mjg2LCAiaW5j\naWRlbnRfdHlwZXMiOiBbeyJ1cGRhdGVfZGF0ZSI6IDE1NDM1ODA0NjQ1NjgsICJjcmVhdGVfZGF0\nZSI6IDE1NDM1ODA0NjQ1NjgsICJ1dWlkIjogImJmZWVjMmQ0LTM3NzAtMTFlOC1hZDM5LTRhMDAw\nNDA0NGFhMCIsICJkZXNjcmlwdGlvbiI6ICJDdXN0b21pemF0aW9uIFBhY2thZ2VzIChpbnRlcm5h\nbCkiLCAiZXhwb3J0X2tleSI6ICJDdXN0b21pemF0aW9uIFBhY2thZ2VzIChpbnRlcm5hbCkiLCAi\nbmFtZSI6ICJDdXN0b21pemF0aW9uIFBhY2thZ2VzIChpbnRlcm5hbCkiLCAiZW5hYmxlZCI6IGZh\nbHNlLCAic3lzdGVtIjogZmFsc2UsICJwYXJlbnRfaWQiOiBudWxsLCAiaGlkZGVuIjogZmFsc2Us\nICJpZCI6IDB9XSwgImF1dG9tYXRpY190YXNrcyI6IFtdLCAibWVzc2FnZV9kZXN0aW5hdGlvbnMi\nOiBbeyJuYW1lIjogImZuX2VsYXN0aWNzZWFyY2giLCAicHJvZ3JhbW1hdGljX25hbWUiOiAiZm5f\nZWxhc3RpY3NlYXJjaCIsICJkZXN0aW5hdGlvbl90eXBlIjogMCwgImV4cGVjdF9hY2siOiB0cnVl\nLCAidXNlcnMiOiBbImFAYS5jb20iXSwgInV1aWQiOiAiMjhlMmU4YTItZWE2NS00MzBiLWFiYzMt\nNDhkMmJiM2Q2MGRiIiwgImV4cG9ydF9rZXkiOiAiZm5fZWxhc3RpY3NlYXJjaCJ9XSwgInRhc2tf\nb3JkZXIiOiBbXSwgImFjdGlvbl9vcmRlciI6IFtdLCAidHlwZXMiOiBbXSwgInNjcmlwdHMiOiBb\nXSwgImluY2lkZW50X2FydGlmYWN0X3R5cGVzIjogW10sICJ3b3JrZmxvd3MiOiBbeyJkZXNjcmlw\ndGlvbiI6ICJBbiBleGFtcGxlIHdoaWNoIGF0dGVtcHRzIHRvIHF1ZXJ5IEVsYXN0aWNTZWFyY2gg\ndXNpbmcgYSBwcmUtZGVmaW5lZCBxdWVyeS4gUXVlcnkgZXhhbXBsZXMgYXJlIHByb3ZpZGVkIGR1\ncmluZyB3b3JrZmxvdyBjcmVhdGlvbi4iLCAidXVpZCI6ICI5MGM2NTBhNy1kYTU1LTRiZmQtYjQw\nOS1jOTM4ZWM2MDhkMmIiLCAid29ya2Zsb3dfaWQiOiAxOCwgIm5hbWUiOiAiRXhhbXBsZTogRWxh\nc3RpY1NlYXJjaCBRdWVyeSBmcm9tIEluY2lkZW50IiwgInByb2dyYW1tYXRpY19uYW1lIjogImV4\nYW1wbGVfZWxhc3RpY3NlYXJjaF9xdWVyeV9mcm9tX2luY2lkZW50IiwgIm9iamVjdF90eXBlIjog\nImluY2lkZW50IiwgImNyZWF0b3JfaWQiOiAiYUBhLmNvbSIsICJsYXN0X21vZGlmaWVkX2J5Ijog\nImFAYS5jb20iLCAibGFzdF9tb2RpZmllZF90aW1lIjogMTU0MzU4MDQzODgwMywgImV4cG9ydF9r\nZXkiOiAiZXhhbXBsZV9lbGFzdGljc2VhcmNoX3F1ZXJ5X2Zyb21faW5jaWRlbnQiLCAiY29udGVu\ndCI6IHsieG1sIjogIjw/eG1sIHZlcnNpb249XCIxLjBcIiBlbmNvZGluZz1cIlVURi04XCI/Pjxk\nZWZpbml0aW9ucyB4bWxucz1cImh0dHA6Ly93d3cub21nLm9yZy9zcGVjL0JQTU4vMjAxMDA1MjQv\nTU9ERUxcIiB4bWxuczpicG1uZGk9XCJodHRwOi8vd3d3Lm9tZy5vcmcvc3BlYy9CUE1OLzIwMTAw\nNTI0L0RJXCIgeG1sbnM6b21nZGM9XCJodHRwOi8vd3d3Lm9tZy5vcmcvc3BlYy9ERC8yMDEwMDUy\nNC9EQ1wiIHhtbG5zOm9tZ2RpPVwiaHR0cDovL3d3dy5vbWcub3JnL3NwZWMvREQvMjAxMDA1MjQv\nRElcIiB4bWxuczpyZXNpbGllbnQ9XCJodHRwOi8vcmVzaWxpZW50LmlibS5jb20vYnBtblwiIHht\nbG5zOnhzZD1cImh0dHA6Ly93d3cudzMub3JnLzIwMDEvWE1MU2NoZW1hXCIgeG1sbnM6eHNpPVwi\naHR0cDovL3d3dy53My5vcmcvMjAwMS9YTUxTY2hlbWEtaW5zdGFuY2VcIiB0YXJnZXROYW1lc3Bh\nY2U9XCJodHRwOi8vd3d3LmNhbXVuZGEub3JnL3Rlc3RcIj48cHJvY2VzcyBpZD1cImV4YW1wbGVf\nZWxhc3RpY3NlYXJjaF9xdWVyeV9mcm9tX2luY2lkZW50XCIgaXNFeGVjdXRhYmxlPVwidHJ1ZVwi\nIG5hbWU9XCJFeGFtcGxlOiBFbGFzdGljU2VhcmNoIFF1ZXJ5IGZyb20gSW5jaWRlbnRcIj48ZG9j\ndW1lbnRhdGlvbj5BbiBleGFtcGxlIHdoaWNoIGF0dGVtcHRzIHRvIHF1ZXJ5IEVsYXN0aWNTZWFy\nY2ggdXNpbmcgYSBwcmUtZGVmaW5lZCBxdWVyeS4gUXVlcnkgZXhhbXBsZXMgYXJlIHByb3ZpZGVk\nIGR1cmluZyB3b3JrZmxvdyBjcmVhdGlvbi48L2RvY3VtZW50YXRpb24+PHN0YXJ0RXZlbnQgaWQ9\nXCJTdGFydEV2ZW50XzE1NWFzeG1cIj48b3V0Z29pbmc+U2VxdWVuY2VGbG93XzFlNmg0bWQ8L291\ndGdvaW5nPjwvc3RhcnRFdmVudD48c2VydmljZVRhc2sgaWQ9XCJTZXJ2aWNlVGFza18xMjhseHd2\nXCIgbmFtZT1cIkVsYXN0aWNTZWFyY2ggVXRpbGl0aWVzOiBRdWVyeVwiIHJlc2lsaWVudDp0eXBl\nPVwiZnVuY3Rpb25cIj48ZXh0ZW5zaW9uRWxlbWVudHM+PHJlc2lsaWVudDpmdW5jdGlvbiB1dWlk\nPVwiNGYxMDM0OTAtNTk1ZC00YWI5LWJhMzAtODIwMmM4ZGRmZTlkXCI+e1wiaW5wdXRzXCI6e1wi\nYjkyY2MzZWQtMjg3OC00NjMwLTgxYTctNTgzMDc4MGZhNWQ5XCI6e1wiaW5wdXRfdHlwZVwiOlwi\nc3RhdGljXCIsXCJzdGF0aWNfaW5wdXRcIjp7XCJtdWx0aXNlbGVjdF92YWx1ZVwiOltdLFwidGV4\ndF9jb250ZW50X3ZhbHVlXCI6e1wiZm9ybWF0XCI6XCJ0ZXh0XCIsXCJjb250ZW50XCI6XCJ7XFxu\nICAgIFxcXCJxdWVyeVxcXCI6IHtcXG4gICAgICAgIFxcXCJtYXRjaF9hbGxcXFwiOiB7fVxcbiAg\nICB9XFxufVwifX19fSxcInJlc3VsdF9uYW1lXCI6XCJcIixcInBvc3RfcHJvY2Vzc2luZ19zY3Jp\ncHRcIjpcIlxcXCJcXFwiXFxcIlxcbiMgQW4gRXhhbXBsZSBvZiB0aGUgcmVzdWx0IG9iamVjdCBc\nXG4gICAgcmVzdWx0cyA9IHtcXG4gICAgICAgIFxcXCJpbnB1dHNcXFwiOiB7XFxuICAgICAgICAg\nIFxcXCJlc19xdWVyeVxcXCI6IHsgXFxcInF1ZXJ5XFxcIjogeyBcXFwibWF0Y2hfYWxsXFxcIjog\ne30gfSB9LFxcbiAgICAgICAgICBcXFwiZXNfZG9jX3R5cGVcXFwiOiBsb2dzLFxcbiAgICAgICAg\nICBcXFwiZXNfaW5kZXhcXFwiIDogbXlfbG9nc3RvcmVcXG4gICAgICAgIH0sXFxuICAgICAgICBc\nXFwicXVlcnlfcmVzdWx0c1xcXCI6IFtcXG4gICAgICAgICAgJmx0O2VsYXN0aWNzZWFyY2gtcmVj\nb3JkJmd0OyxcXG4gICAgICAgIFxcXCJzdWNjZXNzXFxcIjogVHJ1ZSAvIEZhbHNlLFxcbiAgICAg\nICAgXFxcIm1hdGNoZWRfcmVjb3Jkc1xcXCI6IDEwMDAsXFxuICAgICAgICBcXFwicmV0dXJuZWRf\ncmVjb3Jkc1xcXCI6IDEwMFxcbiAgICB9XFxuICAgIE5vdGU6IFRoZSBzY2hlbWEgb2YgZWxhc3Rp\nY3NlYXJjaC1yZWNvcmQ7IG91dGxpbmVkIGFib3ZlLCB3aWxsIHJlZmxlY3QgdGhlIHN0cnVjdHVy\nZSBvZiB5b3VyIGRhdGEgaW4gRWxhc3RpYyBpdHNlbGZcXG5cXFwiXFxcIlxcXCJcXG5cXG5pZiBy\nZXN1bHRzLm1hdGNoZWRfcmVjb3JkczpcXG4gIG5vdGVUZXh0ID0gXFxcIlxcXCJcXFwiJmx0O2Im\nZ3Q7RWxhc3RpY1NlYXJjaCBRdWVyeSBzdGF0dXMmbHQ7L2ImZ3Q7XFxuICAgICAgICAgICAgICAg\nICZsdDticiZndDsgUXVlcnkgc3VwcGxpZWQ6ICZsdDtiJmd0O3swfSZsdDsvYiZndDtcXG4gICAg\nICAgICAgICAgICAgJmx0O2JyJmd0OyBUb3RhbCBtYXRjaGVkIHJlY29yZHMgOiZsdDtiJmd0O3sx\nfSZsdDsvYiZndDtcXFwiXFxcIlxcXCIuZm9ybWF0KHJlc3VsdHMuaW5wdXRzW1xcXCJlc19xdWVy\neVxcXCJdLCByZXN1bHRzLm1hdGNoZWRfcmVjb3JkcylcXG4gIFxcbiAgaWYgcmVzdWx0cy5yZXR1\ncm5lZF9yZWNvcmRzICE9IDA6XFxuICAgIG5vdGVUZXh0ICs9IFxcXCJcXFwiXFxcIiZsdDticiZn\ndDsgVG90YWwgcmV0dXJuZWQgcmVjb3JkcyA6ICZsdDtiJmd0O3swfSZsdDsvYiZndDtcXFwiXFxc\nIlxcXCIuZm9ybWF0KHJlc3VsdHMucmV0dXJuZWRfcmVjb3JkcylcXG4gIGluY2lkZW50LmFkZE5v\ndGUoaGVscGVyLmNyZWF0ZVJpY2hUZXh0KG5vdGVUZXh0KSlcIn08L3Jlc2lsaWVudDpmdW5jdGlv\nbj48L2V4dGVuc2lvbkVsZW1lbnRzPjxpbmNvbWluZz5TZXF1ZW5jZUZsb3dfMWU2aDRtZDwvaW5j\nb21pbmc+PG91dGdvaW5nPlNlcXVlbmNlRmxvd18wOGJ1bjIwPC9vdXRnb2luZz48L3NlcnZpY2VU\nYXNrPjxlbmRFdmVudCBpZD1cIkVuZEV2ZW50XzBuejVyNzJcIj48aW5jb21pbmc+U2VxdWVuY2VG\nbG93XzA4YnVuMjA8L2luY29taW5nPjwvZW5kRXZlbnQ+PHNlcXVlbmNlRmxvdyBpZD1cIlNlcXVl\nbmNlRmxvd18xZTZoNG1kXCIgc291cmNlUmVmPVwiU3RhcnRFdmVudF8xNTVhc3htXCIgdGFyZ2V0\nUmVmPVwiU2VydmljZVRhc2tfMTI4bHh3dlwiLz48c2VxdWVuY2VGbG93IGlkPVwiU2VxdWVuY2VG\nbG93XzA4YnVuMjBcIiBzb3VyY2VSZWY9XCJTZXJ2aWNlVGFza18xMjhseHd2XCIgdGFyZ2V0UmVm\nPVwiRW5kRXZlbnRfMG56NXI3MlwiLz48dGV4dEFubm90YXRpb24gaWQ9XCJUZXh0QW5ub3RhdGlv\nbl8xa3h4aXl0XCI+PHRleHQ+U3RhcnQgeW91ciB3b3JrZmxvdyBoZXJlPC90ZXh0PjwvdGV4dEFu\nbm90YXRpb24+PGFzc29jaWF0aW9uIGlkPVwiQXNzb2NpYXRpb25fMXNldWo0OFwiIHNvdXJjZVJl\nZj1cIlN0YXJ0RXZlbnRfMTU1YXN4bVwiIHRhcmdldFJlZj1cIlRleHRBbm5vdGF0aW9uXzFreHhp\neXRcIi8+PHRleHRBbm5vdGF0aW9uIGlkPVwiVGV4dEFubm90YXRpb25fMTdmbW5vbFwiPjx0ZXh0\nPlRha2VzIGluIGFuIGVsYXN0aWNzZWFyY2ggcXVlcnkgYW5kIG9wdGlvbmFsbHksIGFuIGluZGV4\nIGFuZCBkb2NfdHlwZSB0byBzZWFyY2ggYWdhaW5zdDwvdGV4dD48L3RleHRBbm5vdGF0aW9uPjxh\nc3NvY2lhdGlvbiBpZD1cIkFzc29jaWF0aW9uXzA5MjM4aTNcIiBzb3VyY2VSZWY9XCJTZXJ2aWNl\nVGFza18xMjhseHd2XCIgdGFyZ2V0UmVmPVwiVGV4dEFubm90YXRpb25fMTdmbW5vbFwiLz48dGV4\ndEFubm90YXRpb24gaWQ9XCJUZXh0QW5ub3RhdGlvbl8wMDB4ejZqXCI+PHRleHQ+UmV0dXJucyBy\nZXN1bHQgb2YgcXVlcnkgaW5jbHVkaW5nIGhvdyBtYW55IG1hdGNoZWQgYW5kIHJldHVybmVkIHJl\nY29yZHMuIFNhdmVzIHF1ZXJ5IGluZm9ybWF0aW9uIGluIGEgcmljaCB0ZXh0IG5vdGU8L3RleHQ+\nPC90ZXh0QW5ub3RhdGlvbj48YXNzb2NpYXRpb24gaWQ9XCJBc3NvY2lhdGlvbl8wNDU3eTQzXCIg\nc291cmNlUmVmPVwiU2VydmljZVRhc2tfMTI4bHh3dlwiIHRhcmdldFJlZj1cIlRleHRBbm5vdGF0\naW9uXzAwMHh6NmpcIi8+PC9wcm9jZXNzPjxicG1uZGk6QlBNTkRpYWdyYW0gaWQ9XCJCUE1ORGlh\nZ3JhbV8xXCI+PGJwbW5kaTpCUE1OUGxhbmUgYnBtbkVsZW1lbnQ9XCJ1bmRlZmluZWRcIiBpZD1c\nIkJQTU5QbGFuZV8xXCI+PGJwbW5kaTpCUE1OU2hhcGUgYnBtbkVsZW1lbnQ9XCJTdGFydEV2ZW50\nXzE1NWFzeG1cIiBpZD1cIlN0YXJ0RXZlbnRfMTU1YXN4bV9kaVwiPjxvbWdkYzpCb3VuZHMgaGVp\nZ2h0PVwiMzZcIiB3aWR0aD1cIjM2XCIgeD1cIjE2MlwiIHk9XCIxODhcIi8+PGJwbW5kaTpCUE1O\nTGFiZWw+PG9tZ2RjOkJvdW5kcyBoZWlnaHQ9XCIwXCIgd2lkdGg9XCI5MFwiIHg9XCIxNTdcIiB5\nPVwiMjIzXCIvPjwvYnBtbmRpOkJQTU5MYWJlbD48L2JwbW5kaTpCUE1OU2hhcGU+PGJwbW5kaTpC\nUE1OU2hhcGUgYnBtbkVsZW1lbnQ9XCJUZXh0QW5ub3RhdGlvbl8xa3h4aXl0XCIgaWQ9XCJUZXh0\nQW5ub3RhdGlvbl8xa3h4aXl0X2RpXCI+PG9tZ2RjOkJvdW5kcyBoZWlnaHQ9XCIzMFwiIHdpZHRo\nPVwiMTAwXCIgeD1cIjk5XCIgeT1cIjI1NFwiLz48L2JwbW5kaTpCUE1OU2hhcGU+PGJwbW5kaTpC\nUE1ORWRnZSBicG1uRWxlbWVudD1cIkFzc29jaWF0aW9uXzFzZXVqNDhcIiBpZD1cIkFzc29jaWF0\naW9uXzFzZXVqNDhfZGlcIj48b21nZGk6d2F5cG9pbnQgeD1cIjE2OVwiIHhzaTp0eXBlPVwib21n\nZGM6UG9pbnRcIiB5PVwiMjIwXCIvPjxvbWdkaTp3YXlwb2ludCB4PVwiMTUzXCIgeHNpOnR5cGU9\nXCJvbWdkYzpQb2ludFwiIHk9XCIyNTRcIi8+PC9icG1uZGk6QlBNTkVkZ2U+PGJwbW5kaTpCUE1O\nU2hhcGUgYnBtbkVsZW1lbnQ9XCJTZXJ2aWNlVGFza18xMjhseHd2XCIgaWQ9XCJTZXJ2aWNlVGFz\na18xMjhseHd2X2RpXCI+PG9tZ2RjOkJvdW5kcyBoZWlnaHQ9XCI4MFwiIHdpZHRoPVwiMTAwXCIg\neD1cIjMxOC4wNzc3NTAwMDAwMDAwNFwiIHk9XCIxNjUuNTQ3MjUwMDAwMDAwMDJcIi8+PC9icG1u\nZGk6QlBNTlNoYXBlPjxicG1uZGk6QlBNTlNoYXBlIGJwbW5FbGVtZW50PVwiRW5kRXZlbnRfMG56\nNXI3MlwiIGlkPVwiRW5kRXZlbnRfMG56NXI3Ml9kaVwiPjxvbWdkYzpCb3VuZHMgaGVpZ2h0PVwi\nMzZcIiB3aWR0aD1cIjM2XCIgeD1cIjU2OC4wNTY3NDQxODYwNDY2XCIgeT1cIjE4OFwiLz48YnBt\nbmRpOkJQTU5MYWJlbD48b21nZGM6Qm91bmRzIGhlaWdodD1cIjEzXCIgd2lkdGg9XCIwXCIgeD1c\nIjU4Ni4wNTY3NDQxODYwNDY2XCIgeT1cIjIyN1wiLz48L2JwbW5kaTpCUE1OTGFiZWw+PC9icG1u\nZGk6QlBNTlNoYXBlPjxicG1uZGk6QlBNTkVkZ2UgYnBtbkVsZW1lbnQ9XCJTZXF1ZW5jZUZsb3df\nMWU2aDRtZFwiIGlkPVwiU2VxdWVuY2VGbG93XzFlNmg0bWRfZGlcIj48b21nZGk6d2F5cG9pbnQg\neD1cIjE5OFwiIHhzaTp0eXBlPVwib21nZGM6UG9pbnRcIiB5PVwiMjA2XCIvPjxvbWdkaTp3YXlw\nb2ludCB4PVwiMzE4XCIgeHNpOnR5cGU9XCJvbWdkYzpQb2ludFwiIHk9XCIyMDZcIi8+PGJwbW5k\naTpCUE1OTGFiZWw+PG9tZ2RjOkJvdW5kcyBoZWlnaHQ9XCIxM1wiIHdpZHRoPVwiMFwiIHg9XCIy\nNThcIiB5PVwiMTg0XCIvPjwvYnBtbmRpOkJQTU5MYWJlbD48L2JwbW5kaTpCUE1ORWRnZT48YnBt\nbmRpOkJQTU5FZGdlIGJwbW5FbGVtZW50PVwiU2VxdWVuY2VGbG93XzA4YnVuMjBcIiBpZD1cIlNl\ncXVlbmNlRmxvd18wOGJ1bjIwX2RpXCI+PG9tZ2RpOndheXBvaW50IHg9XCI0MThcIiB4c2k6dHlw\nZT1cIm9tZ2RjOlBvaW50XCIgeT1cIjIwNlwiLz48b21nZGk6d2F5cG9pbnQgeD1cIjU2OFwiIHhz\naTp0eXBlPVwib21nZGM6UG9pbnRcIiB5PVwiMjA2XCIvPjxicG1uZGk6QlBNTkxhYmVsPjxvbWdk\nYzpCb3VuZHMgaGVpZ2h0PVwiMTNcIiB3aWR0aD1cIjBcIiB4PVwiNDkzXCIgeT1cIjE4NFwiLz48\nL2JwbW5kaTpCUE1OTGFiZWw+PC9icG1uZGk6QlBNTkVkZ2U+PGJwbW5kaTpCUE1OU2hhcGUgYnBt\nbkVsZW1lbnQ9XCJUZXh0QW5ub3RhdGlvbl8xN2Ztbm9sXCIgaWQ9XCJUZXh0QW5ub3RhdGlvbl8x\nN2Ztbm9sX2RpXCI+PG9tZ2RjOkJvdW5kcyBoZWlnaHQ9XCI3MlwiIHdpZHRoPVwiMTQ0XCIgeD1c\nIjE1N1wiIHk9XCI3MlwiLz48L2JwbW5kaTpCUE1OU2hhcGU+PGJwbW5kaTpCUE1ORWRnZSBicG1u\nRWxlbWVudD1cIkFzc29jaWF0aW9uXzA5MjM4aTNcIiBpZD1cIkFzc29jaWF0aW9uXzA5MjM4aTNf\nZGlcIj48b21nZGk6d2F5cG9pbnQgeD1cIjMyMVwiIHhzaTp0eXBlPVwib21nZGM6UG9pbnRcIiB5\nPVwiMTczXCIvPjxvbWdkaTp3YXlwb2ludCB4PVwiMjgwXCIgeHNpOnR5cGU9XCJvbWdkYzpQb2lu\ndFwiIHk9XCIxNDRcIi8+PC9icG1uZGk6QlBNTkVkZ2U+PGJwbW5kaTpCUE1OU2hhcGUgYnBtbkVs\nZW1lbnQ9XCJUZXh0QW5ub3RhdGlvbl8wMDB4ejZqXCIgaWQ9XCJUZXh0QW5ub3RhdGlvbl8wMDB4\nejZqX2RpXCI+PG9tZ2RjOkJvdW5kcyBoZWlnaHQ9XCI5NFwiIHdpZHRoPVwiMTg5XCIgeD1cIjQz\nNVwiIHk9XCI2MVwiLz48L2JwbW5kaTpCUE1OU2hhcGU+PGJwbW5kaTpCUE1ORWRnZSBicG1uRWxl\nbWVudD1cIkFzc29jaWF0aW9uXzA0NTd5NDNcIiBpZD1cIkFzc29jaWF0aW9uXzA0NTd5NDNfZGlc\nIj48b21nZGk6d2F5cG9pbnQgeD1cIjQxOFwiIHhzaTp0eXBlPVwib21nZGM6UG9pbnRcIiB5PVwi\nMTc2XCIvPjxvbWdkaTp3YXlwb2ludCB4PVwiNDUzXCIgeHNpOnR5cGU9XCJvbWdkYzpQb2ludFwi\nIHk9XCIxNTVcIi8+PC9icG1uZGk6QlBNTkVkZ2U+PC9icG1uZGk6QlBNTlBsYW5lPjwvYnBtbmRp\nOkJQTU5EaWFncmFtPjwvZGVmaW5pdGlvbnM+IiwgInZlcnNpb24iOiA5MywgIndvcmtmbG93X2lk\nIjogImV4YW1wbGVfZWxhc3RpY3NlYXJjaF9xdWVyeV9mcm9tX2luY2lkZW50In0sICJhY3Rpb25z\nIjogW119LCB7ImRlc2NyaXB0aW9uIjogIkFuIGV4YW1wbGUgd2hpY2ggYXR0ZW1wdHMgdG8gcXVl\ncnkgRWxhc3RpY1NlYXJjaCB1c2luZyBkYXRhIGdhdGhlcmVkIGZyb20gYW4gYXJ0aWZhY3QuIElu\ndGVuZGVkIHRvIGJlIHVzZWQgb24gYW4gYXJ0aWZhY3Qgb2YgdHlwZSAnU3RyaW5nJyIsICJ1dWlk\nIjogIjI5YWZkMTIyLTJlMjQtNDUxNi1iNzc5LTg4N2M1MDk2MmY1ZiIsICJ3b3JrZmxvd19pZCI6\nIDE5LCAibmFtZSI6ICJFeGFtcGxlOiBFbGFzdGljU2VhcmNoIFF1ZXJ5IGZyb20gQXJ0aWZhY3Qi\nLCAicHJvZ3JhbW1hdGljX25hbWUiOiAiZXhhbXBsZV9lbGFzdGljc2VhcmNoX3F1ZXJ5X2Zyb21f\nYXJ0aWZhY3QiLCAib2JqZWN0X3R5cGUiOiAiYXJ0aWZhY3QiLCAiY3JlYXRvcl9pZCI6ICJhQGEu\nY29tIiwgImxhc3RfbW9kaWZpZWRfYnkiOiAiYUBhLmNvbSIsICJsYXN0X21vZGlmaWVkX3RpbWUi\nOiAxNTQzNTE2NDM0Njg3LCAiZXhwb3J0X2tleSI6ICJleGFtcGxlX2VsYXN0aWNzZWFyY2hfcXVl\ncnlfZnJvbV9hcnRpZmFjdCIsICJjb250ZW50IjogeyJ4bWwiOiAiPD94bWwgdmVyc2lvbj1cIjEu\nMFwiIGVuY29kaW5nPVwiVVRGLThcIj8+PGRlZmluaXRpb25zIHhtbG5zPVwiaHR0cDovL3d3dy5v\nbWcub3JnL3NwZWMvQlBNTi8yMDEwMDUyNC9NT0RFTFwiIHhtbG5zOmJwbW5kaT1cImh0dHA6Ly93\nd3cub21nLm9yZy9zcGVjL0JQTU4vMjAxMDA1MjQvRElcIiB4bWxuczpvbWdkYz1cImh0dHA6Ly93\nd3cub21nLm9yZy9zcGVjL0RELzIwMTAwNTI0L0RDXCIgeG1sbnM6b21nZGk9XCJodHRwOi8vd3d3\nLm9tZy5vcmcvc3BlYy9ERC8yMDEwMDUyNC9ESVwiIHhtbG5zOnJlc2lsaWVudD1cImh0dHA6Ly9y\nZXNpbGllbnQuaWJtLmNvbS9icG1uXCIgeG1sbnM6eHNkPVwiaHR0cDovL3d3dy53My5vcmcvMjAw\nMS9YTUxTY2hlbWFcIiB4bWxuczp4c2k9XCJodHRwOi8vd3d3LnczLm9yZy8yMDAxL1hNTFNjaGVt\nYS1pbnN0YW5jZVwiIHRhcmdldE5hbWVzcGFjZT1cImh0dHA6Ly93d3cuY2FtdW5kYS5vcmcvdGVz\ndFwiPjxwcm9jZXNzIGlkPVwiZXhhbXBsZV9lbGFzdGljc2VhcmNoX3F1ZXJ5X2Zyb21fYXJ0aWZh\nY3RcIiBpc0V4ZWN1dGFibGU9XCJ0cnVlXCIgbmFtZT1cIkV4YW1wbGU6IEVsYXN0aWNTZWFyY2gg\nUXVlcnkgZnJvbSBBcnRpZmFjdFwiPjxkb2N1bWVudGF0aW9uPjwhW0NEQVRBW0FuIGV4YW1wbGUg\nd2hpY2ggYXR0ZW1wdHMgdG8gcXVlcnkgRWxhc3RpY1NlYXJjaCB1c2luZyBkYXRhIGdhdGhlcmVk\nIGZyb20gYW4gYXJ0aWZhY3QuIEludGVuZGVkIHRvIGJlIHVzZWQgb24gYW4gYXJ0aWZhY3Qgb2Yg\ndHlwZSAnU3RyaW5nJ11dPjwvZG9jdW1lbnRhdGlvbj48c3RhcnRFdmVudCBpZD1cIlN0YXJ0RXZl\nbnRfMTU1YXN4bVwiPjxvdXRnb2luZz5TZXF1ZW5jZUZsb3dfMGFldHNpMzwvb3V0Z29pbmc+PC9z\ndGFydEV2ZW50PjxzZXJ2aWNlVGFzayBpZD1cIlNlcnZpY2VUYXNrXzBlNTZyMjlcIiBuYW1lPVwi\nRWxhc3RpY1NlYXJjaCBVdGlsaXRpZXM6IFF1ZXJ5XCIgcmVzaWxpZW50OnR5cGU9XCJmdW5jdGlv\nblwiPjxleHRlbnNpb25FbGVtZW50cz48cmVzaWxpZW50OmZ1bmN0aW9uIHV1aWQ9XCI0ZjEwMzQ5\nMC01OTVkLTRhYjktYmEzMC04MjAyYzhkZGZlOWRcIj57XCJpbnB1dHNcIjp7fSxcInByZV9wcm9j\nZXNzaW5nX3NjcmlwdFwiOlwiaWYgYXJ0aWZhY3QudmFsdWUgaXMgbm90IE5vbmU6XFxuICBpbnB1\ndHMuZXNfcXVlcnkgPSBhcnRpZmFjdC52YWx1ZVwiLFwicG9zdF9wcm9jZXNzaW5nX3NjcmlwdFwi\nOlwiXFxcIlxcXCJcXFwiXFxuIyBBbiBFeGFtcGxlIG9mIHRoZSByZXN1bHQgb2JqZWN0IFxcbiAg\nICByZXN1bHRzID0ge1xcbiAgICAgICAgXFxcImlucHV0c1xcXCI6IHtcXG4gICAgICAgICAgXFxc\nImVzX3F1ZXJ5XFxcIjogeyBcXFwicXVlcnlcXFwiOiB7IFxcXCJtYXRjaF9hbGxcXFwiOiB7fSB9\nIH0sXFxuICAgICAgICAgIFxcXCJlc19kb2NfdHlwZVxcXCI6IGxvZ3MsXFxuICAgICAgICAgIFxc\nXCJlc19pbmRleFxcXCIgOiBteV9sb2dzdG9yZVxcbiAgICAgICAgfSxcXG4gICAgICAgIFxcXCJx\ndWVyeV9yZXN1bHRzXFxcIjogW1xcbiAgICAgICAgICAmbHQ7ZWxhc3RpY3NlYXJjaC1yZWNvcmQm\nZ3Q7LFxcbiAgICAgICAgXFxcInN1Y2Nlc3NcXFwiOiBUcnVlIC8gRmFsc2UsXFxuICAgICAgICBc\nXFwibWF0Y2hlZF9yZWNvcmRzXFxcIjogMTAwMCxcXG4gICAgICAgIFxcXCJyZXR1cm5lZF9yZWNv\ncmRzXFxcIjogMTAwXFxuICAgIH1cXG4gICAgTm90ZTogVGhlIHNjaGVtYSBvZiBlbGFzdGljc2Vh\ncmNoLXJlY29yZDsgb3V0bGluZWQgYWJvdmUsIHdpbGwgcmVmbGVjdCB0aGUgc3RydWN0dXJlIG9m\nIHlvdXIgZGF0YSBpbiBFbGFzdGljIGl0c2VsZlxcblxcXCJcXFwiXFxcIlxcblxcbmlmIHJlc3Vs\ndHMubWF0Y2hlZF9yZWNvcmRzOlxcbiAgbm90ZVRleHQgPSBcXFwiXFxcIlxcXCImbHQ7YiZndDtF\nbGFzdGljU2VhcmNoIFF1ZXJ5IHN0YXR1cyZsdDsvYiZndDtcXG4gICAgICAgICAgICAgICAgJmx0\nO2JyJmd0OyBRdWVyeSBzdXBwbGllZDogJmx0O2ImZ3Q7ezB9Jmx0Oy9iJmd0O1xcbiAgICAgICAg\nICAgICAgICAmbHQ7YnImZ3Q7IFRvdGFsIG1hdGNoZWQgcmVjb3JkcyA6Jmx0O2ImZ3Q7ezF9Jmx0\nOy9iJmd0O1xcXCJcXFwiXFxcIi5mb3JtYXQocmVzdWx0cy5pbnB1dHNbXFxcImVzX3F1ZXJ5XFxc\nIl0sIHJlc3VsdHMubWF0Y2hlZF9yZWNvcmRzKVxcbiAgXFxuICBpZiByZXN1bHRzLnJldHVybmVk\nX3JlY29yZHMgIT0gMDpcXG4gICAgbm90ZVRleHQgKz0gXFxcIlxcXCJcXFwiJmx0O2JyJmd0OyBU\nb3RhbCByZXR1cm5lZCByZWNvcmRzIDogJmx0O2ImZ3Q7ezB9Jmx0Oy9iJmd0O1xcXCJcXFwiXFxc\nIi5mb3JtYXQocmVzdWx0cy5yZXR1cm5lZF9yZWNvcmRzKVxcbiAgaW5jaWRlbnQuYWRkTm90ZSho\nZWxwZXIuY3JlYXRlUmljaFRleHQobm90ZVRleHQpKVwifTwvcmVzaWxpZW50OmZ1bmN0aW9uPjwv\nZXh0ZW5zaW9uRWxlbWVudHM+PGluY29taW5nPlNlcXVlbmNlRmxvd18wYWV0c2kzPC9pbmNvbWlu\nZz48b3V0Z29pbmc+U2VxdWVuY2VGbG93XzF5ejc3cHc8L291dGdvaW5nPjwvc2VydmljZVRhc2s+\nPGVuZEV2ZW50IGlkPVwiRW5kRXZlbnRfMGdpenFyZFwiPjxpbmNvbWluZz5TZXF1ZW5jZUZsb3df\nMXl6NzdwdzwvaW5jb21pbmc+PC9lbmRFdmVudD48c2VxdWVuY2VGbG93IGlkPVwiU2VxdWVuY2VG\nbG93XzBhZXRzaTNcIiBzb3VyY2VSZWY9XCJTdGFydEV2ZW50XzE1NWFzeG1cIiB0YXJnZXRSZWY9\nXCJTZXJ2aWNlVGFza18wZTU2cjI5XCIvPjxzZXF1ZW5jZUZsb3cgaWQ9XCJTZXF1ZW5jZUZsb3df\nMXl6Nzdwd1wiIHNvdXJjZVJlZj1cIlNlcnZpY2VUYXNrXzBlNTZyMjlcIiB0YXJnZXRSZWY9XCJF\nbmRFdmVudF8wZ2l6cXJkXCIvPjx0ZXh0QW5ub3RhdGlvbiBpZD1cIlRleHRBbm5vdGF0aW9uXzFr\neHhpeXRcIj48dGV4dD5TdGFydCB5b3VyIHdvcmtmbG93IGhlcmU8L3RleHQ+PC90ZXh0QW5ub3Rh\ndGlvbj48YXNzb2NpYXRpb24gaWQ9XCJBc3NvY2lhdGlvbl8xc2V1ajQ4XCIgc291cmNlUmVmPVwi\nU3RhcnRFdmVudF8xNTVhc3htXCIgdGFyZ2V0UmVmPVwiVGV4dEFubm90YXRpb25fMWt4eGl5dFwi\nLz48dGV4dEFubm90YXRpb24gaWQ9XCJUZXh0QW5ub3RhdGlvbl8wNTFhdGllXCI+PHRleHQ+VGFr\nZXMgaW4gYW4gZWxhc3RpY3NlYXJjaCBxdWVyeSBhbmQgb3B0aW9uYWxseSwgYW4gaW5kZXggYW5k\nIGRvY190eXBlIHRvIHNlYXJjaCBhZ2FpbnN0PC90ZXh0PjwvdGV4dEFubm90YXRpb24+PGFzc29j\naWF0aW9uIGlkPVwiQXNzb2NpYXRpb25fMXRhOHphd1wiIHNvdXJjZVJlZj1cIlNlcnZpY2VUYXNr\nXzBlNTZyMjlcIiB0YXJnZXRSZWY9XCJUZXh0QW5ub3RhdGlvbl8wNTFhdGllXCIvPjx0ZXh0QW5u\nb3RhdGlvbiBpZD1cIlRleHRBbm5vdGF0aW9uXzAwam42YmNcIj48dGV4dD5SZXR1cm5zIHJlc3Vs\ndCBvZiBxdWVyeSBpbmNsdWRpbmcgaG93IG1hbnkgbWF0Y2hlZCBhbmQgcmV0dXJuZWQgcmVjb3Jk\ncy4gU2F2ZXMgcXVlcnkgaW5mb3JtYXRpb24gaW4gYSByaWNoIHRleHQgbm90ZTwvdGV4dD48L3Rl\neHRBbm5vdGF0aW9uPjxhc3NvY2lhdGlvbiBpZD1cIkFzc29jaWF0aW9uXzB0Nng5MnNcIiBzb3Vy\nY2VSZWY9XCJTZXJ2aWNlVGFza18wZTU2cjI5XCIgdGFyZ2V0UmVmPVwiVGV4dEFubm90YXRpb25f\nMDBqbjZiY1wiLz48L3Byb2Nlc3M+PGJwbW5kaTpCUE1ORGlhZ3JhbSBpZD1cIkJQTU5EaWFncmFt\nXzFcIj48YnBtbmRpOkJQTU5QbGFuZSBicG1uRWxlbWVudD1cInVuZGVmaW5lZFwiIGlkPVwiQlBN\nTlBsYW5lXzFcIj48YnBtbmRpOkJQTU5TaGFwZSBicG1uRWxlbWVudD1cIlN0YXJ0RXZlbnRfMTU1\nYXN4bVwiIGlkPVwiU3RhcnRFdmVudF8xNTVhc3htX2RpXCI+PG9tZ2RjOkJvdW5kcyBoZWlnaHQ9\nXCIzNlwiIHdpZHRoPVwiMzZcIiB4PVwiMTYyXCIgeT1cIjE4OFwiLz48YnBtbmRpOkJQTU5MYWJl\nbD48b21nZGM6Qm91bmRzIGhlaWdodD1cIjBcIiB3aWR0aD1cIjkwXCIgeD1cIjE1N1wiIHk9XCIy\nMjNcIi8+PC9icG1uZGk6QlBNTkxhYmVsPjwvYnBtbmRpOkJQTU5TaGFwZT48YnBtbmRpOkJQTU5T\naGFwZSBicG1uRWxlbWVudD1cIlRleHRBbm5vdGF0aW9uXzFreHhpeXRcIiBpZD1cIlRleHRBbm5v\ndGF0aW9uXzFreHhpeXRfZGlcIj48b21nZGM6Qm91bmRzIGhlaWdodD1cIjMwXCIgd2lkdGg9XCIx\nMDBcIiB4PVwiOTlcIiB5PVwiMjU0XCIvPjwvYnBtbmRpOkJQTU5TaGFwZT48YnBtbmRpOkJQTU5F\nZGdlIGJwbW5FbGVtZW50PVwiQXNzb2NpYXRpb25fMXNldWo0OFwiIGlkPVwiQXNzb2NpYXRpb25f\nMXNldWo0OF9kaVwiPjxvbWdkaTp3YXlwb2ludCB4PVwiMTY5XCIgeHNpOnR5cGU9XCJvbWdkYzpQ\nb2ludFwiIHk9XCIyMjBcIi8+PG9tZ2RpOndheXBvaW50IHg9XCIxNTNcIiB4c2k6dHlwZT1cIm9t\nZ2RjOlBvaW50XCIgeT1cIjI1NFwiLz48L2JwbW5kaTpCUE1ORWRnZT48YnBtbmRpOkJQTU5TaGFw\nZSBicG1uRWxlbWVudD1cIlNlcnZpY2VUYXNrXzBlNTZyMjlcIiBpZD1cIlNlcnZpY2VUYXNrXzBl\nNTZyMjlfZGlcIj48b21nZGM6Qm91bmRzIGhlaWdodD1cIjgwXCIgd2lkdGg9XCIxMDBcIiB4PVwi\nMzY5XCIgeT1cIjE2NlwiLz48L2JwbW5kaTpCUE1OU2hhcGU+PGJwbW5kaTpCUE1OU2hhcGUgYnBt\nbkVsZW1lbnQ9XCJFbmRFdmVudF8wZ2l6cXJkXCIgaWQ9XCJFbmRFdmVudF8wZ2l6cXJkX2RpXCI+\nPG9tZ2RjOkJvdW5kcyBoZWlnaHQ9XCIzNlwiIHdpZHRoPVwiMzZcIiB4PVwiNjQ0XCIgeT1cIjE4\nOFwiLz48YnBtbmRpOkJQTU5MYWJlbD48b21nZGM6Qm91bmRzIGhlaWdodD1cIjEzXCIgd2lkdGg9\nXCIwXCIgeD1cIjY2MlwiIHk9XCIyMjdcIi8+PC9icG1uZGk6QlBNTkxhYmVsPjwvYnBtbmRpOkJQ\nTU5TaGFwZT48YnBtbmRpOkJQTU5FZGdlIGJwbW5FbGVtZW50PVwiU2VxdWVuY2VGbG93XzBhZXRz\naTNcIiBpZD1cIlNlcXVlbmNlRmxvd18wYWV0c2kzX2RpXCI+PG9tZ2RpOndheXBvaW50IHg9XCIx\nOThcIiB4c2k6dHlwZT1cIm9tZ2RjOlBvaW50XCIgeT1cIjIwNlwiLz48b21nZGk6d2F5cG9pbnQg\neD1cIjM2OVwiIHhzaTp0eXBlPVwib21nZGM6UG9pbnRcIiB5PVwiMjA2XCIvPjxicG1uZGk6QlBN\nTkxhYmVsPjxvbWdkYzpCb3VuZHMgaGVpZ2h0PVwiMTNcIiB3aWR0aD1cIjBcIiB4PVwiMjgzLjVc\nIiB5PVwiMTg0XCIvPjwvYnBtbmRpOkJQTU5MYWJlbD48L2JwbW5kaTpCUE1ORWRnZT48YnBtbmRp\nOkJQTU5FZGdlIGJwbW5FbGVtZW50PVwiU2VxdWVuY2VGbG93XzF5ejc3cHdcIiBpZD1cIlNlcXVl\nbmNlRmxvd18xeXo3N3B3X2RpXCI+PG9tZ2RpOndheXBvaW50IHg9XCI0NjlcIiB4c2k6dHlwZT1c\nIm9tZ2RjOlBvaW50XCIgeT1cIjIwNlwiLz48b21nZGk6d2F5cG9pbnQgeD1cIjY0NFwiIHhzaTp0\neXBlPVwib21nZGM6UG9pbnRcIiB5PVwiMjA2XCIvPjxicG1uZGk6QlBNTkxhYmVsPjxvbWdkYzpC\nb3VuZHMgaGVpZ2h0PVwiMTNcIiB3aWR0aD1cIjBcIiB4PVwiNTU2LjVcIiB5PVwiMTg0XCIvPjwv\nYnBtbmRpOkJQTU5MYWJlbD48L2JwbW5kaTpCUE1ORWRnZT48YnBtbmRpOkJQTU5TaGFwZSBicG1u\nRWxlbWVudD1cIlRleHRBbm5vdGF0aW9uXzA1MWF0aWVcIiBpZD1cIlRleHRBbm5vdGF0aW9uXzA1\nMWF0aWVfZGlcIj48b21nZGM6Qm91bmRzIGhlaWdodD1cIjk2XCIgd2lkdGg9XCIxMzdcIiB4PVwi\nMjA5XCIgeT1cIjc3XCIvPjwvYnBtbmRpOkJQTU5TaGFwZT48YnBtbmRpOkJQTU5FZGdlIGJwbW5F\nbGVtZW50PVwiQXNzb2NpYXRpb25fMXRhOHphd1wiIGlkPVwiQXNzb2NpYXRpb25fMXRhOHphd19k\naVwiPjxvbWdkaTp3YXlwb2ludCB4PVwiMzY5XCIgeHNpOnR5cGU9XCJvbWdkYzpQb2ludFwiIHk9\nXCIxNzdcIi8+PG9tZ2RpOndheXBvaW50IHg9XCIzNDZcIiB4c2k6dHlwZT1cIm9tZ2RjOlBvaW50\nXCIgeT1cIjE2NFwiLz48L2JwbW5kaTpCUE1ORWRnZT48YnBtbmRpOkJQTU5TaGFwZSBicG1uRWxl\nbWVudD1cIlRleHRBbm5vdGF0aW9uXzAwam42YmNcIiBpZD1cIlRleHRBbm5vdGF0aW9uXzAwam42\nYmNfZGlcIj48b21nZGM6Qm91bmRzIGhlaWdodD1cIjEwMVwiIHdpZHRoPVwiMTI5XCIgeD1cIjUw\nNFwiIHk9XCI4MFwiLz48L2JwbW5kaTpCUE1OU2hhcGU+PGJwbW5kaTpCUE1ORWRnZSBicG1uRWxl\nbWVudD1cIkFzc29jaWF0aW9uXzB0Nng5MnNcIiBpZD1cIkFzc29jaWF0aW9uXzB0Nng5MnNfZGlc\nIj48b21nZGk6d2F5cG9pbnQgeD1cIjQ2OVwiIHhzaTp0eXBlPVwib21nZGM6UG9pbnRcIiB5PVwi\nMTgxXCIvPjxvbWdkaTp3YXlwb2ludCB4PVwiNTA0XCIgeHNpOnR5cGU9XCJvbWdkYzpQb2ludFwi\nIHk9XCIxNjNcIi8+PC9icG1uZGk6QlBNTkVkZ2U+PC9icG1uZGk6QlBNTlBsYW5lPjwvYnBtbmRp\nOkJQTU5EaWFncmFtPjwvZGVmaW5pdGlvbnM+IiwgInZlcnNpb24iOiAxMDcsICJ3b3JrZmxvd19p\nZCI6ICJleGFtcGxlX2VsYXN0aWNzZWFyY2hfcXVlcnlfZnJvbV9hcnRpZmFjdCJ9LCAiYWN0aW9u\ncyI6IFtdfV0sICJyb2xlcyI6IFtdLCAid29ya3NwYWNlcyI6IFtdfQ==\n\"\"\"\n )", "title": "" }, { "docid": "5a6826193167909dd521e00320051bbd", "score": "0.48198164", "text": "def customize_distribution(paths, dist, config):\n base_config = config.base_config\n\n # First, copy duplicate the bundle, optionally to a new name.\n if dist.channel_customize:\n commands.move_file(\n os.path.join(paths.work, base_config.app_dir),\n os.path.join(paths.work, config.app_dir))\n\n macos_dir = os.path.join(paths.work, config.app_dir, 'Contents',\n 'MacOS')\n commands.move_file(\n os.path.join(macos_dir, base_config.app_product),\n os.path.join(macos_dir, config.app_product))\n\n _modify_plists(paths, dist, config)\n _process_entitlements(paths, dist, config)\n\n if dist.creator_code:\n pkg_info_file = os.path.join(paths.work, config.app_dir, 'Contents',\n 'PkgInfo')\n commands.write_file(pkg_info_file, 'APPL{}'.format(dist.creator_code))\n\n if dist.channel_customize:\n _replace_icons(paths, dist, config)\n _rename_enterprise_manifest(paths, dist, config)", "title": "" }, { "docid": "f019c6be3ca1cf043a429352f8779173", "score": "0.48173362", "text": "def codegen(request, response):\n\n args = {}\n for argument in request.parameter.split(','):\n (key, value) = argument.split('=', 1)\n values = value.split(':')\n if len(values) == 1:\n args[key] = values[0]\n else:\n args[key] = values\n\n legacy_mode_enabled = ('legacy_mode_enabled' in args and\n args['legacy_mode_enabled'].lower() == 'true')\n\n gen_dependencies = ('gen_dependencies' in args and\n args['gen_dependencies'].lower() == 'true')\n\n codegen_reference_mode = ('codegen_reference_mode' in args and\n args['codegen_reference_mode'].lower() == 'true')\n\n if args['templates'] is None or not args['templates']:\n raise Exception('wdl_plugin: \\'templates\\' argument is empty')\n\n if isinstance(args['templates'], list):\n template_files = args['templates']\n else:\n template_files = [\n args['templates'],\n ]\n\n template_languages = {\n 'c': template_set.TemplateLanguage.C,\n 'cpp': template_set.TemplateLanguage.CPLUSPLUS,\n 'java': template_set.TemplateLanguage.JAVA,\n 'js': template_set.TemplateLanguage.JAVASCRIPT,\n 'md': template_set.TemplateLanguage.MARKDOWN,\n 'objc': template_set.TemplateLanguage.OBJECTIVEC,\n }\n if 'language' not in args or args['language'] not in template_languages:\n language = template_set.TemplateLanguage.BASE\n else:\n language = template_languages[args['language']]\n\n schema_obj = schema.Schema()\n schema_parser = nwv_parser.Parser(schema_obj)\n\n file_descs_to_gen = [\n proto_file for proto_file in request.proto_file\n if (('semantic' not in proto_file.name)\n and ('retention' not in proto_file.name))\n ]\n\n dependency_set = []\n\n # This file needs to get added to the dependency list if we're in\n # codegen mode, since this file doesn't show up as a dependency by\n # default, but is still necessary for some code-generated targets.\n if (codegen_reference_mode):\n dependency_set.append('google/protobuf/field_mask.proto')\n\n for proto_file in file_descs_to_gen:\n dependency_set.append(proto_file.name)\n\n schema_parser.add_file_set(file_descs_to_gen)\n\n gwvc.check(schema_obj)\n\n # Add two spaces to each log messages to make it line up with our output.\n template_set.log = lambda *a: print(' ', *a)\n\n templates = template_set.TemplateSet(\n template_files,\n legacy_mode_enabled=legacy_mode_enabled, language=language)\n\n if gen_dependencies:\n templates.codegen(schema_obj, None, dependency_set)\n else:\n templates.codegen(schema_obj, None, request.file_to_generate)\n\n for filename, content in templates.output_files:\n out_file = response.file.add()\n out_file.name = filename\n # The newline was added in the legacy template_set file writer,\n # so it's included here to preserve compatibility.\n out_file.content = content.encode('utf-8') + '\\n'.encode('utf-8')", "title": "" }, { "docid": "e538f40a1dd76be96a459a67bb8ce370", "score": "0.48164776", "text": "def module_config_template():\n\n d = {\n \"NerscFigureOfMerit\": {\n \"module\": \"modules.NERSC.transforms.NerscFigureOfMerit\",\n \"name\": \"NerscFigureOfMerit\",\n \"parameters\": {\n }\n }\n }\n\n print(\"Entry in channel cofiguration\")\n pprint.pprint(d)\n print(\"where\")\n print(\"\\t name - name of the class to be instantiated by task manager\")", "title": "" }, { "docid": "ab99ed10b9cfdc68548e672ef14c8b2d", "score": "0.4810402", "text": "def CompileControl(self, tweak_data, repo_settings):\n subfolder = PackageLister.FullPathCname(self, repo_settings)\n\n control_file = \"Architecture: iphoneos-arm\\n\"\n # Mandatory properties include name, bundle id, and version.\n control_file += \"Package: \" + tweak_data['bundle_id'] + \"\\n\"\n control_file += \"Name: \" + tweak_data['name'] + \"\\n\"\n control_file += \"Version: \" + tweak_data['version'] + \"\\n\"\n # Known properties\n control_file += \"Depiction: https://\" + repo_settings['cname'] + subfolder + \"/depiction/web/\" + tweak_data[\n 'bundle_id'] \\\n + \".html\\n\"\n control_file += \"SileoDepiction: https://\" + repo_settings['cname'] + subfolder + \"/depiction/native/\" \\\n + tweak_data['bundle_id'] + \".json\\n\"\n control_file += \"ModernDepiction: https://\" + repo_settings['cname'] + subfolder + \"/depiction/native/\" \\\n + tweak_data['bundle_id'] + \".json\\n\"\n control_file += \"Icon: https://\" + repo_settings['cname'] + subfolder + \"/assets/\" + tweak_data[\n 'bundle_id'] + \"/icon.png\\n\"\n\n # Optional properties\n try:\n if tweak_data['tagline']:\n # APT note: Multi-line descriptions are in the spec, but must be indicated with a leading space.\n control_file += \"Description: \" + tweak_data['tagline'].replace(\"\\n\\n\", \"\\n .\\n \").replace(\"\\n\", \"\\n \") + \"\\n\"\n except Exception:\n control_file += \"Description: An awesome package!\\n\"\n\n try:\n if tweak_data['homepage']:\n control_file += \"Homepage: \" + tweak_data['homepage'] + \"\\n\"\n except Exception:\n pass\n\n try:\n if tweak_data['section']:\n control_file += \"Section: \" + tweak_data['section'] + \"\\n\"\n except Exception:\n control_file += \"Section: Unknown\\n\"\n\n try:\n if tweak_data['pre_dependencies']:\n control_file += \"Pre-Depends: \" + tweak_data['pre_dependencies'] + \"\\n\"\n except Exception:\n pass\n\n try:\n if tweak_data['dependencies']:\n control_file += \"Depends: firmware (>=\" + tweak_data['works_min'] + \"), \" + tweak_data[\n 'dependencies'] + \"\\n\"\n except Exception:\n control_file += \"Depends: firmware (>=\" + tweak_data['works_min'] + \")\\n\"\n\n try:\n if tweak_data['conflicts']:\n control_file += \"Conflicts: \" + tweak_data['conflicts'] + \"\\n\"\n except Exception:\n pass\n\n try:\n if tweak_data['replaces']:\n control_file += \"Replaces: \" + tweak_data['replaces'] + \"\\n\"\n except Exception:\n pass\n\n try:\n if tweak_data['provides']:\n control_file += \"Provides: \" + tweak_data['provides'] + \"\\n\"\n except Exception:\n pass\n\n try:\n if tweak_data['build_depends']:\n control_file += \"Build-Depends: \" + tweak_data['build_depends'] + \"\\n\"\n except Exception:\n pass\n\n try:\n if tweak_data['recommends']:\n control_file += \"Recommends: \" + tweak_data['recommends'] + \"\\n\"\n except Exception:\n pass\n\n try:\n if tweak_data['suggests']:\n control_file += \"Suggests: \" + tweak_data['suggests'] + \"\\n\"\n except Exception:\n pass\n\n try:\n if tweak_data['enhances']:\n control_file += \"Enhances: \" + tweak_data['enhances'] + \"\\n\"\n except Exception:\n pass\n\n try:\n if tweak_data['breaks']:\n control_file += \"Breaks: \" + tweak_data['breaks'] + \"\\n\"\n except Exception:\n pass\n try:\n if tweak_data['tags']:\n control_file += \"Tags: compatible_min::ios\" + tweak_data['works_min'] + \", compatible_max::ios\" + tweak_data['works_max'] + \", \" + tweak_data['tags'] + \"\\n\"\n except Exception:\n control_file += \"Tags: compatible_min::ios\" + tweak_data['works_min'] + \", compatible_max::ios\" + tweak_data['works_max'] + \"\\n\"\n\n try:\n if tweak_data['developer']:\n try:\n if tweak_data['developer']['email']:\n control_file += \"Author: \" + tweak_data['developer']['name'] + \" <\" + tweak_data['developer'][\n 'email'] + \">\\n\"\n except Exception:\n control_file += \"Author: \" + tweak_data['developer']['name'] + \"\\n\"\n except Exception:\n control_file += \"Author: Unknown\\n\"\n\n try:\n if tweak_data['maintainer']['email']:\n control_file += \"Maintainer: \" + tweak_data['maintainer']['name'] + \" <\" \\\n + tweak_data['maintainer']['email'] + \">\\n\"\n except Exception:\n try:\n control_file += \"Maintainer: \" + tweak_data['maintainer']['name'] + \"\\n\"\n except Exception:\n try:\n if tweak_data['developer']['email']:\n control_file += \"Maintainer: \" + tweak_data['developer']['name'] + \" <\" \\\n + tweak_data['developer']['email'] + \">\\n\"\n except Exception:\n try:\n control_file += \"Maintainer: \" + tweak_data['developer']['name'] + \"\\n\"\n except Exception:\n control_file += \"Maintainer: Unknown\\n\"\n\n try:\n if tweak_data['sponsor']:\n try:\n if tweak_data['sponsor']['email']:\n control_file += \"Sponsor: \" + tweak_data['sponsor']['name'] + \" <\" + tweak_data['sponsor'][\n 'email'] + \">\\n\"\n except Exception:\n control_file += \"Sponsor: \" + tweak_data['sponsor']['name'] + \"\\n\"\n except Exception:\n pass\n\n # other_control\n try:\n if tweak_data['other_control']:\n for line in tweak_data['other_control']:\n control_file += line + \"\\n\"\n except Exception:\n pass\n\n return control_file", "title": "" }, { "docid": "3e86ff33e3990dbe0385473522c63797", "score": "0.47996297", "text": "def tweaks(self) -> None:\n pass", "title": "" }, { "docid": "3e86ff33e3990dbe0385473522c63797", "score": "0.47996297", "text": "def tweaks(self) -> None:\n pass", "title": "" }, { "docid": "f87b066d738779efbdfedf4fde6bc280", "score": "0.47984815", "text": "def retro_schema(schema):\n output = wrap({\n \"mappings\": {\n typename: {\n \"dynamic_templates\": [\n retro_dynamic_template(*(t.items()[0]))\n for t in details.dynamic_templates\n ],\n \"properties\": retro_properties(details.properties)\n }\n for typename, details in schema.mappings.items()\n },\n \"settings\": schema.settings\n })\n return output", "title": "" }, { "docid": "eadd71af0e834f3fd3b3ba05130bd10d", "score": "0.47609848", "text": "def module_config_info():\n\n print(\"consumes\", CONSUMES)\n module_config_template()", "title": "" }, { "docid": "ae6ca12dc4c119b83bcd70aacceecc9e", "score": "0.47553256", "text": "def upgrade():\n connection = op.get_bind()\n create_custom_attribute(connection, 'Control Narrative',\n 'Rich Text', 'control')\n create_custom_attribute(connection, 'Operating Procedure',\n 'Rich Text', 'control')", "title": "" }, { "docid": "42111e4fbf63f11311f1a9e8cc21e9e2", "score": "0.47531417", "text": "def config(id: str = Config(\"metadata.id\"),\n name: dict = Config(\"metadata.name\"),\n description: dict = Config(\"metadata.description\"),\n language: str = Config(\"metadata.language\"),\n modes: list = Config(\"korp.modes\"),\n protected: bool = Config(\"korp.protected\"),\n annotations: ExportAnnotationNames = ExportAnnotationNames(\"korp.annotations\"),\n source_annotations: SourceAnnotationsAllSourceFiles = SourceAnnotationsAllSourceFiles(\n \"korp.source_annotations\"),\n cwb_annotations: ExportAnnotationNames = ExportAnnotationNames(\"cwb.annotations\"),\n cwb_source_annotations: SourceAnnotationsAllSourceFiles = SourceAnnotationsAllSourceFiles(\n \"cwb.source_annotations\"),\n annotation_definitions: Optional[dict] = Config(\"korp.annotation_definitions\"),\n custom_annotations: Optional[list] = Config(\"korp.custom_annotations\"),\n morphology: Optional[list] = Config(\"korp.morphology\"),\n reading_mode: Optional[dict] = Config(\"korp.reading_mode\"),\n hidden_annotations: List[AnnotationName] = Config(\"korp.hidden_annotations\"),\n filters: Optional[list] = Config(\"korp.filters\"),\n sentence: Optional[AnnotationName] = AnnotationName(\"<sentence>\"),\n paragraph: Optional[AnnotationName] = AnnotationName(\"<paragraph>\"),\n installations: Optional[list] = Config(\"install\"),\n exports: Optional[list] = Config(\"export.default\"),\n scramble_on: Optional[AnnotationName] = AnnotationName(\"[cwb.scramble_on]\"),\n context: Optional[list] = Config(\"korp.context\"),\n within: Optional[list] = Config(\"korp.within\"),\n source_files: AllSourceFilenames = AllSourceFilenames(),\n token: AnnotationName = AnnotationName(\"<token>\"),\n remove_namespaces: bool = Config(\"export.remove_module_namespaces\", False),\n sparv_namespace: str = Config(\"export.sparv_namespace\"),\n source_namespace: str = Config(\"export.source_namespace\"),\n remote_host: Optional[str] = Config(\"korp.remote_host\"),\n config_dir: str = Config(\"korp.config_dir\"),\n out: Export = Export(\"korp.config/[metadata.id].yaml\")):\n config_dict = {\n \"id\": id,\n \"title\": name,\n \"description\": description,\n \"lang\": language,\n \"mode\": modes\n }\n optional = {\n \"limited_access\": protected,\n \"custom_attributes\": custom_annotations,\n \"morphology\": morphology,\n \"reading_mode\": reading_mode\n }\n\n config_dict.update({k: v for k, v in optional.items() if v})\n\n # Use CWB annotations if no specific Korp annotations are specified\n # TODO: Doesn't currently work, as annotations/source_annotations already inherits from export.[source_]annotations\n if not annotations:\n annotations = cwb_annotations\n if not source_annotations:\n source_annotations = cwb_source_annotations\n\n if not annotation_definitions:\n annotation_definitions = {}\n\n # Get annotation names\n annotation_list, token_attributes, export_names = util.export.get_annotation_names(\n annotations, source_annotations, source_files=source_files, token_name=token.name,\n remove_namespaces=remove_namespaces, sparv_namespace=sparv_namespace, source_namespace=source_namespace,\n keep_struct_names=True)\n\n # Context and within\n if not within and not context:\n # Figure out based on available annotations and scrambling\n within = []\n\n anns = set([split_annotation(a[0])[0] for a in itertools.chain(annotations, source_annotations or [])])\n if sentence and sentence.name in anns:\n within.append(export_names[sentence.name])\n\n if paragraph and paragraph.name in anns:\n # Check installation list or default export to figure out if corpus is scrambled\n scrambled = True\n if installations:\n if \"cwb:install_corpus_scrambled\" in installations:\n scrambled = True\n elif \"cwb:install_corpus\" in installations:\n scrambled = False\n elif exports:\n if \"cwb:encode_scrambled\" in exports:\n scrambled = True\n elif \"cwb:encode\" in exports:\n scrambled = False\n else:\n logger.warning(\"Couldn't determine if corpus is scrambled. Assuming it is scrambled.\")\n if not (scrambled and sentence and scramble_on == sentence):\n within.append(export_names[paragraph.name])\n\n if within and not context:\n context = [v if isinstance(v, str) else v[\"value\"] for v in within]\n elif context and not within:\n within = [v.split(\" \", 1)[1] if isinstance(v, str) else v[\"value\"].split(\" \", 1)[1] for v in context]\n elif not within and not context:\n logger.warning(\"Couldn't figure out 'context' and 'within' automatically. Set at least one of them manually in \"\n \"the config.\")\n\n if within:\n config_dict[\"within\"] = []\n for v in within:\n if isinstance(v, str):\n n = 0\n if \" \" in v:\n n, _, v = v.partition(\" \")\n if v in LABELS:\n i = 1 if int(n) > 1 else 0\n label = {lang: f\"{n} {val[i]}\" if n else val[i] for lang, val in LABELS[v].items()}\n else:\n label = {\"swe\": f\"{n} {v}\" if n else v, \"eng\": f\"{n} {v}\" if n else v}\n w = {\n \"value\": f\"{n} {v}\" if n else v,\n \"label\": label\n }\n else:\n w = v\n config_dict[\"within\"].append(w)\n if context:\n config_dict[\"context\"] = []\n for v in context:\n if isinstance(v, str):\n n = 1\n if \" \" in v:\n n, _, v = v.partition(\" \")\n if v in LABELS:\n i = 1 if int(n) > 1 else 0\n label = {lang: f\"{n} {val[i]}\" for lang, val in LABELS[v].items()}\n else:\n label = {\"swe\": f\"{n} {v}\", \"eng\": f\"{n} {v}\"}\n c = {\n \"value\": f\"{n} {v}\",\n \"label\": label\n }\n else:\n c = v\n config_dict[\"context\"].append(c)\n\n # Annotations\n presets = get_presets(remote_host, config_dir)\n token_annotations = []\n struct_annotations = []\n\n for annotation in annotation_list:\n export_name = export_names.get(annotation.name, annotation.name)\n # Skip certain annotations unless explicitly listed in annotation_definitions\n if (annotation.name in hidden_annotations or annotation.attribute_name is None or export_name.split(\":\", 1)[\n -1].startswith(\"_\")) and annotation.name not in annotation_definitions and not (\n reading_mode and export_name in READING_MODE_ANNOTATIONS\n ):\n logger.debug(f\"Skipping annotation {annotation.name!r}\")\n continue\n export_name_cwb = cwb_escape(export_name.replace(\":\", \"_\"))\n is_token = annotation.annotation_name == token.name\n definition: Union[str, dict] = annotation_definitions.get(annotation.name, export_name_cwb)\n\n if isinstance(definition, str): # Referring to a preset\n # Check that preset exists\n if definition not in presets:\n logger.warning(\n f\"{annotation.name!r} is missing a definition, and {definition!r} is not available as a \"\n \"preset. Annotation will not be included.\")\n continue\n if not is_token and presets[definition] == \"positional\":\n # Non-token annotation used as a token-annotation in Korp\n is_token = True\n elif \"preset\" in definition: # Extending a preset\n if definition[\"preset\"] not in presets:\n logger.warning(f\"{annotation.name!r} refers to a non-existent preset. Annotation will not be included.\")\n continue\n if not is_token:\n # Check if non-token annotation should be used as a token-annotation in Korp\n if definition.get(\"use_as_positional\") or presets[definition[\"preset\"]] == \"positional\":\n is_token = True\n definition[\"is_struct_attr\"] = True\n definition.pop(\"use_as_positional\", None)\n elif not is_token:\n # Check if non-token annotation should be used as a token-annotation in Korp\n if definition.get(\"use_as_positional\"):\n is_token = True\n definition[\"is_struct_attr\"] = True\n definition.pop(\"use_as_positional\", None)\n\n if is_token:\n token_annotations.append({export_name_cwb: definition})\n else:\n struct_annotations.append({export_name_cwb: definition})\n\n config_dict[\"struct_attributes\"] = struct_annotations\n config_dict[\"pos_attributes\"] = token_annotations\n\n if filters:\n config_dict[\"attribute_filters\"] = []\n for a in filters:\n config_dict[\"attribute_filters\"].append(cwb_escape(export_names[a].replace(\":\", \"_\")))\n\n with open(out, \"w\", encoding=\"utf-8\") as out_yaml:\n out_yaml.write(\"# This file was automatically generated by Sparv. Do not make changes directly to this file as \"\n \"they will get overwritten.\\n\")\n out_yaml.write(dict_to_yaml(config_dict))", "title": "" }, { "docid": "80867d639cfa0193bc215c6a33a12e08", "score": "0.47512046", "text": "def GenerateProductMixIdeas(self, request, context):\n context.set_code(grpc.StatusCode.UNIMPLEMENTED)\n context.set_details('Method not implemented!')\n raise NotImplementedError('Method not implemented!')", "title": "" }, { "docid": "c73acd9b3fccf37e7ab3928baa9377fd", "score": "0.4744066", "text": "def main():\r\n product = sys.argv[1]\r\n configfile = sys.argv[2]\r\n outputfile = sys.argv[3]\r\n \r\n try:\r\n alreadyDone = {}\r\n builder = configuration.NestedConfigurationBuilder(open(configfile, 'r'))\r\n config_set = builder.getConfiguration()\r\n outfile = open(outputfile, \"w+\")\r\n outfile.write(\"Variant,Default language,Languages\\n\")\r\n for variant in config_set.getConfigurations():\r\n if variant.type and variant.type.startswith(\"langpack_\"):\r\n if variant['PRODUCT_NAME'] not in alreadyDone:\r\n alreadyDone[variant['PRODUCT_NAME']] = {}\r\n if variant.type not in alreadyDone[variant['PRODUCT_NAME']] and variant['PRODUCT_NAME'] == product:\r\n alreadyDone[variant['PRODUCT_NAME']][variant.type] = True\r\n v_info = VariantInfo(variant['ROFS2_DIR'])\r\n if len(str(v_info)) > 0:\r\n outfile.write(str(v_info)+\"\\n\")\r\n outfile.close()\r\n except IOError, exc:\r\n print \"ERROR: %s\" % exc\r\n sys.exit(-1)\r\n \r\n sys.exit(0)", "title": "" }, { "docid": "0d321d8246be0d3862f34b7dd1c298e8", "score": "0.4737784", "text": "def module_config_template():\n\n template = {\n \"GridFigureOfMerit\": {\n \"module\": \"decisionengine_modules.glideinwms.transforms.grid_figure_of_merit\",\n \"name\": \"GridFigureOfMerit\",\n \"parameters\": {},\n }\n }\n\n print('Entry in channel configuration')\n pprint.pprint(template)", "title": "" }, { "docid": "88e702d3af8db4fa49f2b8a269722148", "score": "0.4714184", "text": "def custom_openapi() -> dict[str, t.Any]:\n if app.openapi_schema:\n return app.openapi_schema\n openapi_schema = get_openapi(\n title=\"Pixels API\",\n description=None,\n version=Server.VERSION,\n routes=app.routes,\n )\n app.openapi_schema = openapi_schema\n return app.openapi_schema", "title": "" }, { "docid": "25121b88df82e91f30d49f37ea7ab0c5", "score": "0.47125274", "text": "def install_client_package_impl(self):", "title": "" }, { "docid": "bfc388dca0d9a2b627dc039893dfc716", "score": "0.47045922", "text": "def create_spec(self):\n self.add_definitions()\n if \"rpconly\" in self.resourcedef and self.resourcedef[\"rpconly\"]:\n resp_comp = utils.add_rpcresponses()\n self.openapi[\"components\"][\"responses\"] = resp_comp\n self.add_rpcverbs()\n else:\n self.add_responses()\n self.inresponses = copy.deepcopy(utils.generate_default_response())\n self.inresponses[\"200\"] = {\"$ref\": (\"#/components/responses/Ok\")}\n self.inresponses.update(\n copy.deepcopy(utils.generate_create_response())\n )\n self.delresponses = copy.deepcopy(\n utils.generate_default_response()\n )\n self.delresponses[\"204\"] = {\n \"$ref\": (\"#/components/responses/NoContent\")\n }\n err = self.add_parameters()\n if not err:\n self.add_basepath()\n self.add_pkpath()\n if self.hasbody:\n self.add_extrapaths()\n self.add_rpcverbs()", "title": "" }, { "docid": "0ceef1b9b0c21c08bebf4e5a8d6f2a24", "score": "0.4691281", "text": "def compile_meta(self, configuration):\n return None", "title": "" }, { "docid": "6cef34b3bb3f0a19abd0d5f50647464c", "score": "0.46902937", "text": "def generate_c(generator_arguments_file: str):\n mapping = {\n 'idl__rosidl_typesupport_introspection_c.h.em':\n 'detail/%s__rosidl_typesupport_introspection_c.h',\n 'idl__type_support.c.em': 'detail/%s__type_support.c',\n }\n return generate_files(generator_arguments_file, mapping)", "title": "" }, { "docid": "a70c936a1e51dc3de5a323f19f867e04", "score": "0.46898988", "text": "def customization_data(client=None):\n\n yield ImportDefinition(u\"\"\"\neyJhY3Rpb25fb3JkZXIiOiBbXSwgImFjdGlvbnMiOiBbeyJhdXRvbWF0aW9ucyI6IFtdLCAiY29u\nZGl0aW9ucyI6IFt7ImV2YWx1YXRpb25faWQiOiBudWxsLCAiZmllbGRfbmFtZSI6ICJhcnRpZmFj\ndC50eXBlIiwgIm1ldGhvZCI6ICJpbiIsICJ0eXBlIjogbnVsbCwgInZhbHVlIjogWyJFbWFpbCBT\nZW5kZXIiLCAiRW1haWwgUmVjaXBpZW50Il19XSwgImVuYWJsZWQiOiB0cnVlLCAiZXhwb3J0X2tl\neSI6ICJIYXZlIEkgQmVlbiBQd25lZCBTZWFyY2giLCAiaWQiOiA2MiwgImxvZ2ljX3R5cGUiOiAi\nYWxsIiwgIm1lc3NhZ2VfZGVzdGluYXRpb25zIjogW10sICJuYW1lIjogIkhhdmUgSSBCZWVuIFB3\nbmVkIFNlYXJjaCIsICJvYmplY3RfdHlwZSI6ICJhcnRpZmFjdCIsICJ0YWdzIjogW10sICJ0aW1l\nb3V0X3NlY29uZHMiOiA4NjQwMCwgInR5cGUiOiAxLCAidXVpZCI6ICJkZWExNjliNS0wZDc0LTQy\nZjYtYmNiZS1jNWI5OGNkNmRkYzkiLCAidmlld19pdGVtcyI6IFtdLCAid29ya2Zsb3dzIjogWyJo\nYXZlX2lfYmVlbl9wd25lZF9zZWFyY2giXX1dLCAiYXV0b21hdGljX3Rhc2tzIjogW10sICJleHBv\ncnRfZGF0ZSI6IDE1ODkzMDEyMDI1MTIsICJleHBvcnRfZm9ybWF0X3ZlcnNpb24iOiAyLCAiZmll\nbGRzIjogW3siYWxsb3dfZGVmYXVsdF92YWx1ZSI6IGZhbHNlLCAiYmxhbmtfb3B0aW9uIjogZmFs\nc2UsICJjYWxjdWxhdGVkIjogZmFsc2UsICJjaGFuZ2VhYmxlIjogdHJ1ZSwgImNob3NlbiI6IGZh\nbHNlLCAiZGVmYXVsdF9jaG9zZW5fYnlfc2VydmVyIjogZmFsc2UsICJkZXByZWNhdGVkIjogZmFs\nc2UsICJleHBvcnRfa2V5IjogIl9fZnVuY3Rpb24vZW1haWxfYWRkcmVzcyIsICJoaWRlX25vdGlm\naWNhdGlvbiI6IGZhbHNlLCAiaWQiOiAzMjAsICJpbnB1dF90eXBlIjogInRleHQiLCAiaW50ZXJu\nYWwiOiBmYWxzZSwgIm5hbWUiOiAiZW1haWxfYWRkcmVzcyIsICJvcGVyYXRpb25fcGVybXMiOiB7\nfSwgIm9wZXJhdGlvbnMiOiBbXSwgInBsYWNlaG9sZGVyIjogInRlc3RAZXhhbXBsZS5jb20iLCAi\ncHJlZml4IjogbnVsbCwgInJlYWRfb25seSI6IGZhbHNlLCAicmljaF90ZXh0IjogZmFsc2UsICJ0\nYWdzIjogW10sICJ0ZW1wbGF0ZXMiOiBbXSwgInRleHQiOiAiZW1haWxfYWRkcmVzcyIsICJ0b29s\ndGlwIjogIiIsICJ0eXBlX2lkIjogMTEsICJ1dWlkIjogIjExMzI3Nzk0LWE5NTctNDRjZC1hODVi\nLThiOTQxNDNiYzAyYyIsICJ2YWx1ZXMiOiBbXX0sIHsiZXhwb3J0X2tleSI6ICJpbmNpZGVudC9p\nbnRlcm5hbF9jdXN0b21pemF0aW9uc19maWVsZCIsICJpZCI6IDAsICJpbnB1dF90eXBlIjogInRl\neHQiLCAiaW50ZXJuYWwiOiB0cnVlLCAibmFtZSI6ICJpbnRlcm5hbF9jdXN0b21pemF0aW9uc19m\naWVsZCIsICJyZWFkX29ubHkiOiB0cnVlLCAidGV4dCI6ICJDdXN0b21pemF0aW9ucyBGaWVsZCAo\naW50ZXJuYWwpIiwgInR5cGVfaWQiOiAwLCAidXVpZCI6ICJiZmVlYzJkNC0zNzcwLTExZTgtYWQz\nOS00YTAwMDQwNDRhYTEifV0sICJmdW5jdGlvbnMiOiBbeyJjcmVhdG9yIjogeyJkaXNwbGF5X25h\nbWUiOiAiUmVzaWxpZW50IFN5c2FkbWluIiwgImlkIjogMywgIm5hbWUiOiAiYUBleGFtcGxlLmNv\nbSIsICJ0eXBlIjogInVzZXIifSwgImRlc2NyaXB0aW9uIjogeyJmb3JtYXQiOiAidGV4dCIsICJj\nb250ZW50IjogIkdldCBhbGwgdmVyaWZpZWQgYnJlYWNoZXMgb2YgYW4gZW1haWwgYWRkcmVzcyBm\ncm9tIEhhdmUgSSBCZWVuIFB3bmVkLiJ9LCAiZGVzdGluYXRpb25faGFuZGxlIjogImhpYnAiLCAi\nZGlzcGxheV9uYW1lIjogIkhhdmUgSSBCZWVuIFB3bmVkIEdldCBCcmVhY2hlcyIsICJleHBvcnRf\na2V5IjogImhhdmVfaV9iZWVuX3B3bmVkX2dldF9icmVhY2hlcyIsICJpZCI6IDM3LCAibGFzdF9t\nb2RpZmllZF9ieSI6IHsiZGlzcGxheV9uYW1lIjogIlJlc2lsaWVudCBTeXNhZG1pbiIsICJpZCI6\nIDMsICJuYW1lIjogImFAZXhhbXBsZS5jb20iLCAidHlwZSI6ICJ1c2VyIn0sICJsYXN0X21vZGlm\naWVkX3RpbWUiOiAxNTg5Mjk5NDExODAyLCAibmFtZSI6ICJoYXZlX2lfYmVlbl9wd25lZF9nZXRf\nYnJlYWNoZXMiLCAidGFncyI6IFtdLCAidXVpZCI6ICJmZTc1MTYxNy00ZWU5LTQzZTgtYjM1ZC1h\nNmJmNDE5YjY1ZDUiLCAidmVyc2lvbiI6IDEsICJ2aWV3X2l0ZW1zIjogW3siY29udGVudCI6ICIx\nMTMyNzc5NC1hOTU3LTQ0Y2QtYTg1Yi04Yjk0MTQzYmMwMmMiLCAiZWxlbWVudCI6ICJmaWVsZF91\ndWlkIiwgImZpZWxkX3R5cGUiOiAiX19mdW5jdGlvbiIsICJzaG93X2lmIjogbnVsbCwgInNob3df\nbGlua19oZWFkZXIiOiBmYWxzZSwgInN0ZXBfbGFiZWwiOiBudWxsfV0sICJ3b3JrZmxvd3MiOiBb\neyJhY3Rpb25zIjogW10sICJkZXNjcmlwdGlvbiI6IG51bGwsICJuYW1lIjogIkhhdmUgSSBCZWVu\nIFB3bmVkIFNlYXJjaCIsICJvYmplY3RfdHlwZSI6ICJhcnRpZmFjdCIsICJwcm9ncmFtbWF0aWNf\nbmFtZSI6ICJoYXZlX2lfYmVlbl9wd25lZF9zZWFyY2giLCAidGFncyI6IFtdLCAidXVpZCI6IG51\nbGwsICJ3b3JrZmxvd19pZCI6IDM5fV19LCB7ImNyZWF0b3IiOiB7ImRpc3BsYXlfbmFtZSI6ICJS\nZXNpbGllbnQgU3lzYWRtaW4iLCAiaWQiOiAzLCAibmFtZSI6ICJhQGV4YW1wbGUuY29tIiwgInR5\ncGUiOiAidXNlciJ9LCAiZGVzY3JpcHRpb24iOiB7ImZvcm1hdCI6ICJ0ZXh0IiwgImNvbnRlbnQi\nOiAiR2V0IGFsbCBwYXN0ZXMgb2YgYW4gZW1haWwgYWNjb3VudCBmcm9tIEhhdmUgSSBCZWVuIFB3\nbmVkLiJ9LCAiZGVzdGluYXRpb25faGFuZGxlIjogImhpYnAiLCAiZGlzcGxheV9uYW1lIjogIkhh\ndmUgSSBCZWVuIFB3bmVkIEdldCBQYXN0ZXMiLCAiZXhwb3J0X2tleSI6ICJoYXZlX2lfYmVlbl9w\nd25lZF9nZXRfcGFzdGVzIiwgImlkIjogMzgsICJsYXN0X21vZGlmaWVkX2J5IjogeyJkaXNwbGF5\nX25hbWUiOiAiUmVzaWxpZW50IFN5c2FkbWluIiwgImlkIjogMywgIm5hbWUiOiAiYUBleGFtcGxl\nLmNvbSIsICJ0eXBlIjogInVzZXIifSwgImxhc3RfbW9kaWZpZWRfdGltZSI6IDE1ODkyOTk0MTE4\nNjgsICJuYW1lIjogImhhdmVfaV9iZWVuX3B3bmVkX2dldF9wYXN0ZXMiLCAidGFncyI6IFtdLCAi\ndXVpZCI6ICJkNDQ4YmVlNS1kODVlLTQ4NGUtYTkxYy0yNzRlYmFlNjcxMjMiLCAidmVyc2lvbiI6\nIDEsICJ2aWV3X2l0ZW1zIjogW3siY29udGVudCI6ICIxMTMyNzc5NC1hOTU3LTQ0Y2QtYTg1Yi04\nYjk0MTQzYmMwMmMiLCAiZWxlbWVudCI6ICJmaWVsZF91dWlkIiwgImZpZWxkX3R5cGUiOiAiX19m\ndW5jdGlvbiIsICJzaG93X2lmIjogbnVsbCwgInNob3dfbGlua19oZWFkZXIiOiBmYWxzZSwgInN0\nZXBfbGFiZWwiOiBudWxsfV0sICJ3b3JrZmxvd3MiOiBbeyJhY3Rpb25zIjogW10sICJkZXNjcmlw\ndGlvbiI6IG51bGwsICJuYW1lIjogIkhhdmUgSSBCZWVuIFB3bmVkIFNlYXJjaCIsICJvYmplY3Rf\ndHlwZSI6ICJhcnRpZmFjdCIsICJwcm9ncmFtbWF0aWNfbmFtZSI6ICJoYXZlX2lfYmVlbl9wd25l\nZF9zZWFyY2giLCAidGFncyI6IFtdLCAidXVpZCI6IG51bGwsICJ3b3JrZmxvd19pZCI6IDM5fV19\nXSwgImdlb3MiOiBudWxsLCAiZ3JvdXBzIjogbnVsbCwgImlkIjogMSwgImluYm91bmRfbWFpbGJv\neGVzIjogbnVsbCwgImluY2lkZW50X2FydGlmYWN0X3R5cGVzIjogW10sICJpbmNpZGVudF90eXBl\ncyI6IFt7InVwZGF0ZV9kYXRlIjogMTU4OTMwMTIwMTE4OCwgImNyZWF0ZV9kYXRlIjogMTU4OTMw\nMTIwMTE4OCwgInV1aWQiOiAiYmZlZWMyZDQtMzc3MC0xMWU4LWFkMzktNGEwMDA0MDQ0YWEwIiwg\nImRlc2NyaXB0aW9uIjogIkN1c3RvbWl6YXRpb24gUGFja2FnZXMgKGludGVybmFsKSIsICJleHBv\ncnRfa2V5IjogIkN1c3RvbWl6YXRpb24gUGFja2FnZXMgKGludGVybmFsKSIsICJuYW1lIjogIkN1\nc3RvbWl6YXRpb24gUGFja2FnZXMgKGludGVybmFsKSIsICJlbmFibGVkIjogZmFsc2UsICJzeXN0\nZW0iOiBmYWxzZSwgInBhcmVudF9pZCI6IG51bGwsICJoaWRkZW4iOiBmYWxzZSwgImlkIjogMH1d\nLCAiaW5kdXN0cmllcyI6IG51bGwsICJsYXlvdXRzIjogW10sICJsb2NhbGUiOiBudWxsLCAibWVz\nc2FnZV9kZXN0aW5hdGlvbnMiOiBbeyJhcGlfa2V5cyI6IFtdLCAiZGVzdGluYXRpb25fdHlwZSI6\nIDAsICJleHBlY3RfYWNrIjogdHJ1ZSwgImV4cG9ydF9rZXkiOiAiaGlicCIsICJuYW1lIjogImhp\nYnAiLCAicHJvZ3JhbW1hdGljX25hbWUiOiAiaGlicCIsICJ0YWdzIjogW10sICJ1c2VycyI6IFsi\nYUBleGFtcGxlLmNvbSJdLCAidXVpZCI6ICJkY2M0NWY2Zi1lMzljLTQ3YzMtYWM5ZS0yMDc5NjEx\nZjRmNDcifV0sICJub3RpZmljYXRpb25zIjogbnVsbCwgIm92ZXJyaWRlcyI6IFtdLCAicGhhc2Vz\nIjogW10sICJyZWd1bGF0b3JzIjogbnVsbCwgInJvbGVzIjogW10sICJzY3JpcHRzIjogW10sICJz\nZXJ2ZXJfdmVyc2lvbiI6IHsiYnVpbGRfbnVtYmVyIjogNTI2MSwgIm1ham9yIjogMzQsICJtaW5v\nciI6IDAsICJ2ZXJzaW9uIjogIjM0LjAuNTI2MSJ9LCAidGFncyI6IFtdLCAidGFza19vcmRlciI6\nIFtdLCAidGltZWZyYW1lcyI6IG51bGwsICJ0eXBlcyI6IFtdLCAid29ya2Zsb3dzIjogW3siYWN0\naW9ucyI6IFtdLCAiY29udGVudCI6IHsidmVyc2lvbiI6IDExLCAid29ya2Zsb3dfaWQiOiAiaGF2\nZV9pX2JlZW5fcHduZWRfc2VhcmNoIiwgInhtbCI6ICI8P3htbCB2ZXJzaW9uPVwiMS4wXCIgZW5j\nb2Rpbmc9XCJVVEYtOFwiPz48ZGVmaW5pdGlvbnMgeG1sbnM9XCJodHRwOi8vd3d3Lm9tZy5vcmcv\nc3BlYy9CUE1OLzIwMTAwNTI0L01PREVMXCIgeG1sbnM6YnBtbmRpPVwiaHR0cDovL3d3dy5vbWcu\nb3JnL3NwZWMvQlBNTi8yMDEwMDUyNC9ESVwiIHhtbG5zOm9tZ2RjPVwiaHR0cDovL3d3dy5vbWcu\nb3JnL3NwZWMvREQvMjAxMDA1MjQvRENcIiB4bWxuczpvbWdkaT1cImh0dHA6Ly93d3cub21nLm9y\nZy9zcGVjL0RELzIwMTAwNTI0L0RJXCIgeG1sbnM6cmVzaWxpZW50PVwiaHR0cDovL3Jlc2lsaWVu\ndC5pYm0uY29tL2JwbW5cIiB4bWxuczp4c2Q9XCJodHRwOi8vd3d3LnczLm9yZy8yMDAxL1hNTFNj\naGVtYVwiIHhtbG5zOnhzaT1cImh0dHA6Ly93d3cudzMub3JnLzIwMDEvWE1MU2NoZW1hLWluc3Rh\nbmNlXCIgdGFyZ2V0TmFtZXNwYWNlPVwiaHR0cDovL3d3dy5jYW11bmRhLm9yZy90ZXN0XCI+PHBy\nb2Nlc3MgaWQ9XCJoYXZlX2lfYmVlbl9wd25lZF9zZWFyY2hcIiBpc0V4ZWN1dGFibGU9XCJ0cnVl\nXCIgbmFtZT1cIkhhdmUgSSBCZWVuIFB3bmVkIFNlYXJjaFwiPjxkb2N1bWVudGF0aW9uLz48c3Rh\ncnRFdmVudCBpZD1cIlN0YXJ0RXZlbnRfMTU1YXN4bVwiPjxvdXRnb2luZz5TZXF1ZW5jZUZsb3df\nMWYwYjI5ejwvb3V0Z29pbmc+PC9zdGFydEV2ZW50PjxzZXJ2aWNlVGFzayBpZD1cIlNlcnZpY2VU\nYXNrXzFzNWJ0dm5cIiBuYW1lPVwiSGF2ZSBJIEJlZW4gUHduZWQgR2V0IEJyZWFjaGVzXCIgcmVz\naWxpZW50OnR5cGU9XCJmdW5jdGlvblwiPjxleHRlbnNpb25FbGVtZW50cz48cmVzaWxpZW50OmZ1\nbmN0aW9uIHV1aWQ9XCJmZTc1MTYxNy00ZWU5LTQzZTgtYjM1ZC1hNmJmNDE5YjY1ZDVcIj57XCJp\nbnB1dHNcIjp7fSxcInBvc3RfcHJvY2Vzc2luZ19zY3JpcHRcIjpcIm1zZyA9IFxcXCJUaW1lc3Rh\nbXA6IFxcXCIgKyBzdHIocmVzdWx0cy5tZXRyaWNzW1xcXCJ0aW1lc3RhbXBcXFwiXSlcXG5pZiBy\nZXN1bHRzLmNvbnRlbnRbXFxcIkJyZWFjaGVzXFxcIl06XFxuICBtc2cgPSBtc2cgKyBcXFwiXFxc\nXG5CcmVhY2hlczogXFxcIiArIHN0cihsZW4ocmVzdWx0cy5jb250ZW50W1xcXCJCcmVhY2hlc1xc\nXCJdKSlcXG5lbHNlOlxcbiAgbXNnID0gbXNnICsgXFxcIlxcXFxuTm8gQnJlYWNoZXNcXFwiXFxu\nICBcXG5pZiBhcnRpZmFjdC5kZXNjcmlwdGlvbjpcXG4gIGFydGlmYWN0LmRlc2NyaXB0aW9uID0g\nYXJ0aWZhY3QuZGVzY3JpcHRpb24uY29udGVudCArICdcXFxcblxcXFxuJyArIG1zZ1xcbmVsc2U6\nXFxuICBhcnRpZmFjdC5kZXNjcmlwdGlvbiA9IG1zZ1xcblxcblxcbicnJyBFeGFtcGxlIHJlc3Bv\nbnNlXFxue1xcbiAgXFxcIklucHV0c1xcXCI6IHtcXG4gICAgXFxcImVtYWlsX2FkZHJlc3NcXFwi\nOiBcXFwidGVzdEBlbWFpbC5jb21cXFwiXFxuICB9LCBcXG4gIFxcXCJSdW4gVGltZVxcXCI6IFxc\nXCIyMDAwXFxcIixcXG4gIFxcXCJCcmVhY2hlc1xcXCI6XFxuICAgIFsgIFxcbiAgICAgICB7ICBc\nXG4gICAgICAgICAgdSdQd25Db3VudCc6MTQ5MzY2NzAsXFxuICAgICAgICAgIHUnRG9tYWluJzp1\nJzAwMHdlYmhvc3QuY29tJyxcXG4gICAgICAgICAgdSdJc1NlbnNpdGl2ZSc6RmFsc2UsXFxuICAg\nICAgICAgIHUnTmFtZSc6dScwMDB3ZWJob3N0JyxcXG4gICAgICAgICAgdSdUaXRsZSc6dScwMDB3\nZWJob3N0JyxcXG4gICAgICAgICAgdSdEYXRhQ2xhc3Nlcyc6WyAgXFxuICAgICAgICAgICAgIHUn\nRW1haWwgYWRkcmVzc2VzJyxcXG4gICAgICAgICAgICAgdSdJUCBhZGRyZXNzZXMnLFxcbiAgICAg\nICAgICAgICB1J05hbWVzJyxcXG4gICAgICAgICAgICAgdSdQYXNzd29yZHMnXFxuICAgICAgICAg\nIF0sXFxuICAgICAgICAgIHUnTG9nb1R5cGUnOnUncG5nJyxcXG4gICAgICAgICAgdSdJc1NwYW1M\naXN0JzpGYWxzZSxcXG4gICAgICAgICAgdSdJc1JldGlyZWQnOkZhbHNlLFxcbiAgICAgICAgICB1\nJ0JyZWFjaERhdGUnOnUnMjAxNS0wMy0wMScsXFxuICAgICAgICAgIHUnSXNGYWJyaWNhdGVkJzpG\nYWxzZSxcXG4gICAgICAgICAgdSdNb2RpZmllZERhdGUnOiAgICAgIHUnMjAxNy0xMi0xMFQyMTo0\nNDoyNyAgICAgIFonLFxcbiAgICAgICAgICB1J0FkZGVkRGF0ZSc6ICAgICAgdScyMDE1LTEwLTI2\nVDIzOjM1OjQ1ICAgICAgWicsXFxuICAgICAgICAgIHUnSXNWZXJpZmllZCc6VHJ1ZSxcXG4gICAg\nICAgICAgdSdEZXNjcmlwdGlvbic6dSdJbiBhcHByb3hpbWF0ZWx5IE1hcmNoIDIwMTUsXFxuICAg\nICAgICAgIHRoZSBmcmVlIHdlYiBob3N0aW5nIHByb3ZpZGVyICZsdDthIGhyZWY9XFxcImh0dHA6\nLy93d3cudHJveWh1bnQuY29tLzIwMTUvMTAvYnJlYWNoZXMtdHJhZGVycy1wbGFpbi10ZXh0LXBh\nc3N3b3Jkcy5odG1sXFxcIiB0YXJnZXQ9XFxcIl9ibGFua1xcXCIgcmVsPVxcXCJub29wZW5lclxc\nXCImZ3Q7MDAwd2ViaG9zdCBzdWZmZXJlZCBhIG1ham9yIGRhdGEgYnJlYWNoJmx0Oy9hJmd0OyB0\naGF0IGV4cG9zZWQgYWxtb3N0IDE1IG1pbGxpb24gY3VzdG9tZXIgcmVjb3Jkcy4gVGhlIGRhdGEg\nd2FzIHNvbGQgYW5kIHRyYWRlZCBiZWZvcmUgMDAwd2ViaG9zdCB3YXMgYWxlcnRlZCBpbiBPY3Rv\nYmVyLiBUaGUgYnJlYWNoIGluY2x1ZGVkIG5hbWVzLFxcbiAgICAgICAgICBlbWFpbCBhZGRyZXNz\nZXMgYW5kIHBsYWluIHRleHQgcGFzc3dvcmRzLidcXG4gICAgICAgfSxcXG4gICAgICAgeyAgXFxu\nICAgICAgICAgIHUnUHduQ291bnQnOjc5OTA2MTksXFxuICAgICAgICAgIHUnRG9tYWluJzp1Jzh0\ncmFja3MuY29tJyxcXG4gICAgICAgICAgdSdJc1NlbnNpdGl2ZSc6RmFsc2UsXFxuICAgICAgICAg\nIHUnTmFtZSc6dSc4dHJhY2tzJyxcXG4gICAgICAgICAgdSdUaXRsZSc6dSc4dHJhY2tzJyxcXG4g\nICAgICAgICAgdSdEYXRhQ2xhc3Nlcyc6WyAgXFxuICAgICAgICAgICAgIHUnRW1haWwgYWRkcmVz\nc2VzJyxcXG4gICAgICAgICAgICAgdSdQYXNzd29yZHMnXFxuICAgICAgICAgIF0sXFxuICAgICAg\nICAgIHUnTG9nb1R5cGUnOnUncG5nJyxcXG4gICAgICAgICAgdSdJc1NwYW1MaXN0JzpGYWxzZSxc\nXG4gICAgICAgICAgdSdJc1JldGlyZWQnOkZhbHNlLFxcbiAgICAgICAgICB1J0JyZWFjaERhdGUn\nOnUnMjAxNy0wNi0yNycsXFxuICAgICAgICAgIHUnSXNGYWJyaWNhdGVkJzpGYWxzZSxcXG4gICAg\nICAgICAgdSdNb2RpZmllZERhdGUnOiAgICAgIHUnMjAxOC0wMi0xNlQwNzowOTozMCAgICAgIFon\nLFxcbiAgICAgICAgICB1J0FkZGVkRGF0ZSc6ICAgICAgdScyMDE4LTAyLTE2VDA3OjA5OjMwICAg\nICAgWicsXFxuICAgICAgICAgIHUnSXNWZXJpZmllZCc6VHJ1ZSxcXG4gICAgICAgICAgdSdEZXNj\ncmlwdGlvbic6dSdJbiBKdW5lIDIwMTcsXFxuICAgICAgICAgIHRoZSBvbmxpbmUgcGxheWxpc3Rz\nIHNlcnZpY2Uga25vd24gYXMgJmx0O2EgaHJlZj1cXFwiaHR0cHM6Ly9ibG9nLjh0cmFja3MuY29t\nLzIwMTcvMDYvMjcvcGFzc3dvcmQtc2VjdXJpdHktYWxlcnQvXFxcIiB0YXJnZXQ9XFxcIl9ibGFu\na1xcXCIgcmVsPVxcXCJub29wZW5lclxcXCImZ3Q7OFRyYWNrcyBzdWZmZXJlZCBhIGRhdGEgYnJl\nYWNoJmx0Oy9hJmd0OyB3aGljaCBpbXBhY3RlZCAxOCBtaWxsaW9uIGFjY291bnRzLiBJbiB0aGVp\nciBkaXNjbG9zdXJlLFxcbiAgICAgICAgICA4ICAgICAgVHJhY2tzIGFkdmlzZWQgdGhhdCAmYW1w\nO3F1b3Q7dGhlIHZlY3RvciBmb3IgdGhlIGF0dGFjayB3YXMgYW4gZW1wbG95ZWVcXFxcdTIwMTlz\nIEdpdEh1YiBhY2NvdW50LFxcbiAgICAgICAgICB3aGljaCB3YXMgbm90IHNlY3VyZWQgdXNpbmcg\ndHdvLWZhY3RvciBhdXRoZW50aWNhdGlvbiZhbXA7cXVvdDsuIFNhbHRlZCBTSEEtMSBwYXNzd29y\nZCBoYXNoZXMgZm9yIHVzZXJzIHdobyAmbHQ7ZW0mZ3Q7ZGlkblxcXFwndCZsdDsvZW0mZ3Q7IHNp\nZ24gdXAgd2l0aCBlaXRoZXIgR29vZ2xlIG9yIEZhY2Vib29rIGF1dGhlbnRpY2F0aW9uIHdlcmUg\nYWxzbyBpbmNsdWRlZC4gVGhlIGRhdGEgd2FzIHByb3ZpZGVkIHRvIEhJQlAgYnkgd2hpdGVoYXQg\nc2VjdXJpdHkgcmVzZWFyY2hlciBhbmQgZGF0YSBhbmFseXN0IEFkYW0gRGF2aWVzIGFuZCBjb250\nYWluZWQgYWxtb3N0IDggbWlsbGlvbiB1bmlxdWUgZW1haWwgYWRkcmVzc2VzLidcXG4gICAgICAg\nfSxcXG4gICAgICAgeyAgXFxuICAgICAgICAgIHUnUHduQ291bnQnOjEzNzI1NTAsXFxuICAgICAg\nICAgIHUnRG9tYWluJzp1J2FidXNld2l0aC51cycsXFxuICAgICAgICAgIHUnSXNTZW5zaXRpdmUn\nOkZhbHNlLFxcbiAgICAgICAgICB1J05hbWUnOnUnQWJ1c2VXaXRoVXMnLFxcbiAgICAgICAgICB1\nJ1RpdGxlJzp1J0FidXNlV2l0aC5VcycsXFxuICAgICAgICAgIHUnRGF0YUNsYXNzZXMnOlsgIFxc\nbiAgICAgICAgICAgICB1J0VtYWlsIGFkZHJlc3NlcycsXFxuICAgICAgICAgICAgIHUnSVAgYWRk\ncmVzc2VzJyxcXG4gICAgICAgICAgICAgdSdQYXNzd29yZHMnLFxcbiAgICAgICAgICAgICB1J1Vz\nZXJuYW1lcydcXG4gICAgICAgICAgXSxcXG4gICAgICAgICAgdSdMb2dvVHlwZSc6dSdwbmcnLFxc\nbiAgICAgICAgICB1J0lzU3BhbUxpc3QnOkZhbHNlLFxcbiAgICAgICAgICB1J0lzUmV0aXJlZCc6\nRmFsc2UsXFxuICAgICAgICAgIHUnQnJlYWNoRGF0ZSc6dScyMDE2LTA3LTAxJyxcXG4gICAgICAg\nICAgdSdJc0ZhYnJpY2F0ZWQnOkZhbHNlLFxcbiAgICAgICAgICB1J01vZGlmaWVkRGF0ZSc6ICAg\nICAgdScyMDE3LTEwLTA5VDExOjA4OjQ1ICAgICAgWicsXFxuICAgICAgICAgIHUnQWRkZWREYXRl\nJzogICAgICB1JzIwMTctMTAtMDlUMTE6MDg6NDUgICAgICBaJyxcXG4gICAgICAgICAgdSdJc1Zl\ncmlmaWVkJzpUcnVlLFxcbiAgICAgICAgICB1J0Rlc2NyaXB0aW9uJzp1J0luIDIwMTYsXFxuICAg\nICAgICAgIHRoZSBzaXRlIGRlZGljYXRlZCB0byBoZWxwaW5nIHBlb3BsZSBoYWNrIGVtYWlsIGFu\nZCBvbmxpbmUgZ2FtaW5nIGFjY291bnRzIGtub3duIGFzIEFidXNld2l0aC51cyBzdWZmZXJlZCBt\ndWx0aXBsZSBkYXRhIGJyZWFjaGVzLiBUaGUgc2l0ZSAmbHQ7YSBocmVmPVxcXCJodHRwczovL2ty\nZWJzb25zZWN1cml0eS5jb20vMjAxNy8wMi93aG8tcmFuLWxlYWtlZHNvdXJjZS1jb20vXFxcIiB0\nYXJnZXQ9XFxcIl9ibGFua1xcXCIgcmVsPVxcXCJub29wZW5lclxcXCImZ3Q7YWxsZWdlZGx5IGhh\nZCBhbiBhZG1pbmlzdHJhdG9yIGluIGNvbW1vbiB3aXRoIHRoZSBuZWZhcmlvdXMgTGVha2VkU291\ncmNlIHNpdGUmbHQ7L2EmZ3Q7LFxcbiAgICAgICAgICBib3RoIG9mIHdoaWNoIGhhdmUgc2luY2Ug\nYmVlbiBzaHV0IGRvd24uIFRoZSBleHBvc2VkIGRhdGEgaW5jbHVkZWQgbW9yZSB0aGFuIDEuMyBt\naWxsaW9uIHVuaXF1ZSBlbWFpbCBhZGRyZXNzZXMsXFxuICAgICAgICAgIG9mdGVuIGFjY29tcGFu\naWVkIGJ5IHVzZXJuYW1lcyxcXG4gICAgICAgICAgSVAgYWRkcmVzc2VzIGFuZCBwbGFpbiB0ZXh0\nIG9yIGhhc2hlZCBwYXNzd29yZHMgcmV0cmlldmVkIGZyb20gdmFyaW91cyBzb3VyY2VzIGFuZCBp\nbnRlbmRlZCB0byBiZSB1c2VkIHRvIGNvbXByb21pc2UgdGhlIHZpY3RpbXNcXFxcJyBhY2NvdW50\ncy4nXFxuICAgICAgIH1cXG4gICAgXVxcbn1cXG5cXG4nJydcIixcInByZV9wcm9jZXNzaW5nX3Nj\ncmlwdFwiOlwiaW5wdXRzLmVtYWlsX2FkZHJlc3MgPSBhcnRpZmFjdC52YWx1ZVwiLFwicmVzdWx0\nX25hbWVcIjpcIlwifTwvcmVzaWxpZW50OmZ1bmN0aW9uPjwvZXh0ZW5zaW9uRWxlbWVudHM+PGlu\nY29taW5nPlNlcXVlbmNlRmxvd18xZjBiMjl6PC9pbmNvbWluZz48b3V0Z29pbmc+U2VxdWVuY2VG\nbG93XzFpaWVlMXA8L291dGdvaW5nPjwvc2VydmljZVRhc2s+PHNlcXVlbmNlRmxvdyBpZD1cIlNl\ncXVlbmNlRmxvd18xZjBiMjl6XCIgc291cmNlUmVmPVwiU3RhcnRFdmVudF8xNTVhc3htXCIgdGFy\nZ2V0UmVmPVwiU2VydmljZVRhc2tfMXM1YnR2blwiLz48c2VydmljZVRhc2sgaWQ9XCJTZXJ2aWNl\nVGFza18wbHZnandqXCIgbmFtZT1cIkhhdmUgSSBCZWVuIFB3bmVkIEdldCBQYXN0ZXNcIiByZXNp\nbGllbnQ6dHlwZT1cImZ1bmN0aW9uXCI+PGV4dGVuc2lvbkVsZW1lbnRzPjxyZXNpbGllbnQ6ZnVu\nY3Rpb24gdXVpZD1cImQ0NDhiZWU1LWQ4NWUtNDg0ZS1hOTFjLTI3NGViYWU2NzEyM1wiPntcImlu\ncHV0c1wiOnt9LFwicG9zdF9wcm9jZXNzaW5nX3NjcmlwdFwiOlwibXNnID0gXFxcIlRpbWVzdGFt\ncDogXFxcIiArIHN0cihyZXN1bHRzLm1ldHJpY3NbXFxcInRpbWVzdGFtcFxcXCJdKVxcbmlmIHJl\nc3VsdHMuY29udGVudFtcXFwiUGFzdGVzXFxcIl06XFxuICBtc2cgPSBtc2cgKyBcXFwiXFxcXG5Q\nYXN0ZXM6IFxcXCIgKyBzdHIobGVuKHJlc3VsdHMuY29udGVudFtcXFwiUGFzdGVzXFxcIl0pKVxc\nbmVsc2U6XFxuICBtc2cgPSBtc2cgKyBcXFwiXFxcXG5ObyBQYXN0ZXNcXFwiXFxuICAgIFxcbmlm\nIGFydGlmYWN0LmRlc2NyaXB0aW9uOlxcbiAgYXJ0aWZhY3QuZGVzY3JpcHRpb24gPSBhcnRpZmFj\ndC5kZXNjcmlwdGlvbi5jb250ZW50ICsgJ1xcXFxuXFxcXG4nICsgbXNnXFxuZWxzZTpcXG4gIGFy\ndGlmYWN0LmRlc2NyaXB0aW9uID0gbXNnXFxuXFxuXFxuJycnIEV4YW1wbGUgUmVzcG9uc2VcXG57\nXFxuICBcXFwiSW5wdXRzXFxcIjoge1xcbiAgICBcXFwiZW1haWxfYWRkcmVzc1xcXCI6IFxcXCJ0\nZXN0QGVtYWlsLmNvbVxcXCJcXG4gIH0sIFxcbiAgXFxcIlJ1biBUaW1lXFxcIjogXFxcIjIwMDBc\nXFwiLFxcbiAgXFxcIlBhc3Rlc1xcXCI6XFxuICAgIFsgIFxcbiAgICAgICB7ICBcXG4gICAgICAg\nICAgdSdEYXRlJzpOb25lLFxcbiAgICAgICAgICB1J1NvdXJjZSc6dSdBZEhvY1VybCcsXFxuICAg\nICAgICAgIHUnRW1haWxDb3VudCc6OTg5MyxcXG4gICAgICAgICAgdSdJZCc6ICAgICAgdSdodHRw\nOi8vc2lwaDBuLmluL2V4cGxvaXRzLnBocD9pZD0zNjcwJyxcXG4gICAgICAgICAgdSdUaXRsZSc6\ndSdzaXBoMG4uaW4nXFxuICAgICAgIH0sXFxuICAgICAgIHsgIFxcbiAgICAgICAgICB1J0RhdGUn\nOk5vbmUsXFxuICAgICAgICAgIHUnU291cmNlJzp1J0FkSG9jVXJsJyxcXG4gICAgICAgICAgdSdF\nbWFpbENvdW50JzoxMjAwMixcXG4gICAgICAgICAgdSdJZCc6ICAgICAgdSdodHRwOi8vc2lwaDBu\nLmluL2V4cGxvaXRzLnBocD9pZD0zODkyJyxcXG4gICAgICAgICAgdSdUaXRsZSc6dSdzaXBoMG4u\naW4nXFxuICAgICAgIH0sXFxuICAgICAgIHsgIFxcbiAgICAgICAgICB1J0RhdGUnOk5vbmUsXFxu\nICAgICAgICAgIHUnU291cmNlJzp1J0FkSG9jVXJsJyxcXG4gICAgICAgICAgdSdFbWFpbENvdW50\nJzo5OTc5MSxcXG4gICAgICAgICAgdSdJZCc6ICAgICAgdSdodHRwOi8vc2lwaDBuLmluL2V4cGxv\naXRzLnBocD9pZD00NjgwJyxcXG4gICAgICAgICAgdSdUaXRsZSc6dSdyZW1vdGVzdGFmZi5jb20u\nYXUnXFxuICAgICAgIH1cXG4gICAgXVxcbn1cXG4nJydcXG5cIixcInByZV9wcm9jZXNzaW5nX3Nj\ncmlwdFwiOlwiaW5wdXRzLmVtYWlsX2FkZHJlc3MgPSBhcnRpZmFjdC52YWx1ZVwifTwvcmVzaWxp\nZW50OmZ1bmN0aW9uPjwvZXh0ZW5zaW9uRWxlbWVudHM+PGluY29taW5nPlNlcXVlbmNlRmxvd18x\naWllZTFwPC9pbmNvbWluZz48b3V0Z29pbmc+U2VxdWVuY2VGbG93XzB2ZDB2ODA8L291dGdvaW5n\nPjwvc2VydmljZVRhc2s+PHNlcXVlbmNlRmxvdyBpZD1cIlNlcXVlbmNlRmxvd18xaWllZTFwXCIg\nc291cmNlUmVmPVwiU2VydmljZVRhc2tfMXM1YnR2blwiIHRhcmdldFJlZj1cIlNlcnZpY2VUYXNr\nXzBsdmdqd2pcIi8+PGVuZEV2ZW50IGlkPVwiRW5kRXZlbnRfMTFjZzZldVwiPjxpbmNvbWluZz5T\nZXF1ZW5jZUZsb3dfMHZkMHY4MDwvaW5jb21pbmc+PC9lbmRFdmVudD48c2VxdWVuY2VGbG93IGlk\nPVwiU2VxdWVuY2VGbG93XzB2ZDB2ODBcIiBzb3VyY2VSZWY9XCJTZXJ2aWNlVGFza18wbHZnandq\nXCIgdGFyZ2V0UmVmPVwiRW5kRXZlbnRfMTFjZzZldVwiLz48dGV4dEFubm90YXRpb24gaWQ9XCJU\nZXh0QW5ub3RhdGlvbl8xa3h4aXl0XCI+PHRleHQ+U3RhcnQgeW91ciB3b3JrZmxvdyBoZXJlPC90\nZXh0PjwvdGV4dEFubm90YXRpb24+PGFzc29jaWF0aW9uIGlkPVwiQXNzb2NpYXRpb25fMXNldWo0\nOFwiIHNvdXJjZVJlZj1cIlN0YXJ0RXZlbnRfMTU1YXN4bVwiIHRhcmdldFJlZj1cIlRleHRBbm5v\ndGF0aW9uXzFreHhpeXRcIi8+PC9wcm9jZXNzPjxicG1uZGk6QlBNTkRpYWdyYW0gaWQ9XCJCUE1O\nRGlhZ3JhbV8xXCI+PGJwbW5kaTpCUE1OUGxhbmUgYnBtbkVsZW1lbnQ9XCJ1bmRlZmluZWRcIiBp\nZD1cIkJQTU5QbGFuZV8xXCI+PGJwbW5kaTpCUE1OU2hhcGUgYnBtbkVsZW1lbnQ9XCJTdGFydEV2\nZW50XzE1NWFzeG1cIiBpZD1cIlN0YXJ0RXZlbnRfMTU1YXN4bV9kaVwiPjxvbWdkYzpCb3VuZHMg\naGVpZ2h0PVwiMzZcIiB3aWR0aD1cIjM2XCIgeD1cIjE2MlwiIHk9XCIxODhcIi8+PGJwbW5kaTpC\nUE1OTGFiZWw+PG9tZ2RjOkJvdW5kcyBoZWlnaHQ9XCIwXCIgd2lkdGg9XCI5MFwiIHg9XCIxNTdc\nIiB5PVwiMjIzXCIvPjwvYnBtbmRpOkJQTU5MYWJlbD48L2JwbW5kaTpCUE1OU2hhcGU+PGJwbW5k\naTpCUE1OU2hhcGUgYnBtbkVsZW1lbnQ9XCJUZXh0QW5ub3RhdGlvbl8xa3h4aXl0XCIgaWQ9XCJU\nZXh0QW5ub3RhdGlvbl8xa3h4aXl0X2RpXCI+PG9tZ2RjOkJvdW5kcyBoZWlnaHQ9XCIzMFwiIHdp\nZHRoPVwiMTAwXCIgeD1cIjk5XCIgeT1cIjI1NFwiLz48L2JwbW5kaTpCUE1OU2hhcGU+PGJwbW5k\naTpCUE1ORWRnZSBicG1uRWxlbWVudD1cIkFzc29jaWF0aW9uXzFzZXVqNDhcIiBpZD1cIkFzc29j\naWF0aW9uXzFzZXVqNDhfZGlcIj48b21nZGk6d2F5cG9pbnQgeD1cIjE2OVwiIHhzaTp0eXBlPVwi\nb21nZGM6UG9pbnRcIiB5PVwiMjIwXCIvPjxvbWdkaTp3YXlwb2ludCB4PVwiMTUzXCIgeHNpOnR5\ncGU9XCJvbWdkYzpQb2ludFwiIHk9XCIyNTRcIi8+PC9icG1uZGk6QlBNTkVkZ2U+PGJwbW5kaTpC\nUE1OU2hhcGUgYnBtbkVsZW1lbnQ9XCJTZXJ2aWNlVGFza18xczVidHZuXCIgaWQ9XCJTZXJ2aWNl\nVGFza18xczVidHZuX2RpXCI+PG9tZ2RjOkJvdW5kcyBoZWlnaHQ9XCI4MFwiIHdpZHRoPVwiMTAw\nXCIgeD1cIjI3MVwiIHk9XCIxNjZcIi8+PC9icG1uZGk6QlBNTlNoYXBlPjxicG1uZGk6QlBNTkVk\nZ2UgYnBtbkVsZW1lbnQ9XCJTZXF1ZW5jZUZsb3dfMWYwYjI5elwiIGlkPVwiU2VxdWVuY2VGbG93\nXzFmMGIyOXpfZGlcIj48b21nZGk6d2F5cG9pbnQgeD1cIjE5OFwiIHhzaTp0eXBlPVwib21nZGM6\nUG9pbnRcIiB5PVwiMjA2XCIvPjxvbWdkaTp3YXlwb2ludCB4PVwiMjcxXCIgeHNpOnR5cGU9XCJv\nbWdkYzpQb2ludFwiIHk9XCIyMDZcIi8+PGJwbW5kaTpCUE1OTGFiZWw+PG9tZ2RjOkJvdW5kcyBo\nZWlnaHQ9XCIxM1wiIHdpZHRoPVwiOTBcIiB4PVwiMTg5LjVcIiB5PVwiMTg0LjVcIi8+PC9icG1u\nZGk6QlBNTkxhYmVsPjwvYnBtbmRpOkJQTU5FZGdlPjxicG1uZGk6QlBNTlNoYXBlIGJwbW5FbGVt\nZW50PVwiU2VydmljZVRhc2tfMGx2Z2p3alwiIGlkPVwiU2VydmljZVRhc2tfMGx2Z2p3al9kaVwi\nPjxvbWdkYzpCb3VuZHMgaGVpZ2h0PVwiODBcIiB3aWR0aD1cIjEwMFwiIHg9XCI0ODBcIiB5PVwi\nMTY2XCIvPjwvYnBtbmRpOkJQTU5TaGFwZT48YnBtbmRpOkJQTU5FZGdlIGJwbW5FbGVtZW50PVwi\nU2VxdWVuY2VGbG93XzFpaWVlMXBcIiBpZD1cIlNlcXVlbmNlRmxvd18xaWllZTFwX2RpXCI+PG9t\nZ2RpOndheXBvaW50IHg9XCIzNzFcIiB4c2k6dHlwZT1cIm9tZ2RjOlBvaW50XCIgeT1cIjIwNlwi\nLz48b21nZGk6d2F5cG9pbnQgeD1cIjQ4MFwiIHhzaTp0eXBlPVwib21nZGM6UG9pbnRcIiB5PVwi\nMjA2XCIvPjxicG1uZGk6QlBNTkxhYmVsPjxvbWdkYzpCb3VuZHMgaGVpZ2h0PVwiMTNcIiB3aWR0\naD1cIjBcIiB4PVwiNDI1LjVcIiB5PVwiMTg0XCIvPjwvYnBtbmRpOkJQTU5MYWJlbD48L2JwbW5k\naTpCUE1ORWRnZT48YnBtbmRpOkJQTU5TaGFwZSBicG1uRWxlbWVudD1cIkVuZEV2ZW50XzExY2c2\nZXVcIiBpZD1cIkVuZEV2ZW50XzExY2c2ZXVfZGlcIj48b21nZGM6Qm91bmRzIGhlaWdodD1cIjM2\nXCIgd2lkdGg9XCIzNlwiIHg9XCI2OTZcIiB5PVwiMTg4XCIvPjxicG1uZGk6QlBNTkxhYmVsPjxv\nbWdkYzpCb3VuZHMgaGVpZ2h0PVwiMTNcIiB3aWR0aD1cIjBcIiB4PVwiNzE0XCIgeT1cIjIyN1wi\nLz48L2JwbW5kaTpCUE1OTGFiZWw+PC9icG1uZGk6QlBNTlNoYXBlPjxicG1uZGk6QlBNTkVkZ2Ug\nYnBtbkVsZW1lbnQ9XCJTZXF1ZW5jZUZsb3dfMHZkMHY4MFwiIGlkPVwiU2VxdWVuY2VGbG93XzB2\nZDB2ODBfZGlcIj48b21nZGk6d2F5cG9pbnQgeD1cIjU4MFwiIHhzaTp0eXBlPVwib21nZGM6UG9p\nbnRcIiB5PVwiMjA2XCIvPjxvbWdkaTp3YXlwb2ludCB4PVwiNjk2XCIgeHNpOnR5cGU9XCJvbWdk\nYzpQb2ludFwiIHk9XCIyMDZcIi8+PGJwbW5kaTpCUE1OTGFiZWw+PG9tZ2RjOkJvdW5kcyBoZWln\naHQ9XCIxM1wiIHdpZHRoPVwiMFwiIHg9XCI2MzhcIiB5PVwiMTg0XCIvPjwvYnBtbmRpOkJQTU5M\nYWJlbD48L2JwbW5kaTpCUE1ORWRnZT48L2JwbW5kaTpCUE1OUGxhbmU+PC9icG1uZGk6QlBNTkRp\nYWdyYW0+PC9kZWZpbml0aW9ucz4ifSwgImNvbnRlbnRfdmVyc2lvbiI6IDExLCAiY3JlYXRvcl9p\nZCI6ICJhQGV4YW1wbGUuY29tIiwgImRlc2NyaXB0aW9uIjogIiIsICJleHBvcnRfa2V5IjogImhh\ndmVfaV9iZWVuX3B3bmVkX3NlYXJjaCIsICJsYXN0X21vZGlmaWVkX2J5IjogImFAZXhhbXBsZS5j\nb20iLCAibGFzdF9tb2RpZmllZF90aW1lIjogMTU4OTMwMTA5NTIwNCwgIm5hbWUiOiAiSGF2ZSBJ\nIEJlZW4gUHduZWQgU2VhcmNoIiwgIm9iamVjdF90eXBlIjogImFydGlmYWN0IiwgInByb2dyYW1t\nYXRpY19uYW1lIjogImhhdmVfaV9iZWVuX3B3bmVkX3NlYXJjaCIsICJ0YWdzIjogW10sICJ1dWlk\nIjogIjI2N2VjYjU2LTNlMGQtNDljMS1hMGFlLWFkYWFiMzE2Yjg4ZSIsICJ3b3JrZmxvd19pZCI6\nIDM5fV0sICJ3b3Jrc3BhY2VzIjogW119\n\"\"\")", "title": "" }, { "docid": "acad49c9cdf317d31b65c29ffe6cbea1", "score": "0.46896127", "text": "def setupVarious(context):\n l = logging.getLogger('collective.bibliocustomviews / setuphandler')\n # Ordinarily, GenericSetup handlers check for the existence of XML files.\n # Here, we are not parsing an XML file, but we use this text file as a\n # flag to check that we actually meant for this import step to be run.\n # The file is found in profiles/default.\n if context.readDataFile('collective.bibliocustomviews_various.txt') is None:\n return\n\n portal = context.getSite()\n catalog = portal.portal_catalog\n #class extra:\n # index_type=\"Okapi BM25 Rank\"\n # lexicon_id=\"plone_lexicon\" \n #for k, tp, extra in [\n # ('BiblioSource', 'ZCTextIndex', extra),\n # ('AuthorsList', 'KeywordIndex', None),\n #]:\n # if not k in catalog.Indexes:\n # l.error('Creating %s in catalog' % k)\n # catalog.addIndex(k, tp, extra)\n # catalog.addColumn(k)\n # catalog.reindexIndex(k, portal.REQUEST)\n columns = catalog._catalog.schema\n reindex = True\n for k in ('bSource', 'bAuthorsList', ):\n if not k in columns:\n l.warn('Creating %s in catalog' % k)\n catalog.addColumn(k)\n reindex = True\n if reindex:\n l.warn('Reindexing')\n # reindex documents\n brains = catalog.searchResults(\n **{'object_provides':IBibliographicItem.__identifier__}\n )\n lb = len(brains)\n done = 0\n for i, b in enumerate(brains):\n cur = i * 100.0 / lb\n adone = int(cur) / 10 \n if done != adone:\n # print each 10%\n done = adone\n l.warn('Done %s/%s (%s%s)' %(i, lb, cur, '%'))\n transaction.commit()\n b.getObject().reindexObject()\n transaction.commit()", "title": "" }, { "docid": "567fe208e0ffb3e505dfda060604c84d", "score": "0.4683485", "text": "def flavor_create(self, context, values):", "title": "" }, { "docid": "c034c25539daa22b722b5013d4af25f9", "score": "0.46831304", "text": "def akeneo_modify_composer(composer):\n\n composer['name']= \"platformsh/{0}\".format(projectName)\n composer['description']= \"Akeneo PIM Community Standard Edition for deployment on Platform.sh\"\n\n return composer", "title": "" }, { "docid": "129204afe48326e7e7bcf80e0adb8ef7", "score": "0.46711794", "text": "def setup(self, options, definition_path):", "title": "" }, { "docid": "1d8a3c957f334e5227e7fc8322ec4d14", "score": "0.46692893", "text": "def customizations(record):\n record = type(record)\n record = author(record)\n record = editor(record)\n record = journal(record)\n record = keyword(record)\n record = link(record)\n record = page_double_hyphen(record)\n record = doi(record)\n return record", "title": "" }, { "docid": "75377dd40a27dfb617d25dbcab8ed170", "score": "0.46679994", "text": "def auto_enable_custom_integrations(enable_custom_integrations):\n yield", "title": "" }, { "docid": "fa9395c8c8d634012074a75be5bc183a", "score": "0.46675274", "text": "def extra_options():\n extra_vars = [('interfaces', [True, \"Indicates whether interfaces should be built (default: True)\", CUSTOM])]\n return IntelBase.extra_options(extra_vars)", "title": "" }, { "docid": "34103d472f6c63651f9936227467f840", "score": "0.46642977", "text": "def custom_schemas(self) -> List[dict]:\n schemas = []\n if self.get_arg_value(\"report_adapters_missing\"):\n schemas += list(SCHEMAS_CUSTOM[\"report_adapters_missing\"].values())\n if self.get_arg_value(\"report_software_whitelist\"):\n schemas += list(SCHEMAS_CUSTOM[\"report_software_whitelist\"].values())\n return schemas", "title": "" }, { "docid": "957538d9765d27637c9b9929b72e09ff", "score": "0.4650546", "text": "def generate_conf_file(self):\n\n self.menupanel.gen_luaconf(self.drawings.objects)", "title": "" }, { "docid": "b7a53cccdb9d2086378ccc5d7b37829f", "score": "0.46245915", "text": "def customization_data(client=None):\n\n yield ImportDefinition(u\"\"\"\neyJhY3Rpb25fb3JkZXIiOiBbXSwgImFjdGlvbnMiOiBbXSwgImF1dG9tYXRpY190YXNrcyI6IFtd\nLCAiZXhwb3J0X2RhdGUiOiAxNTk0MTM0OTE5MTAwLCAiZXhwb3J0X2Zvcm1hdF92ZXJzaW9uIjog\nMiwgImZpZWxkcyI6IFt7ImV4cG9ydF9rZXkiOiAiaW5jaWRlbnQvaW50ZXJuYWxfY3VzdG9taXph\ndGlvbnNfZmllbGQiLCAiaWQiOiAwLCAiaW5wdXRfdHlwZSI6ICJ0ZXh0IiwgImludGVybmFsIjog\ndHJ1ZSwgIm5hbWUiOiAiaW50ZXJuYWxfY3VzdG9taXphdGlvbnNfZmllbGQiLCAicmVhZF9vbmx5\nIjogdHJ1ZSwgInRleHQiOiAiQ3VzdG9taXphdGlvbnMgRmllbGQgKGludGVybmFsKSIsICJ0eXBl\nX2lkIjogMCwgInV1aWQiOiAiYmZlZWMyZDQtMzc3MC0xMWU4LWFkMzktNGEwMDA0MDQ0YWExIn1d\nLCAiZnVuY3Rpb25zIjogW10sICJnZW9zIjogbnVsbCwgImdyb3VwcyI6IG51bGwsICJpZCI6IDUs\nICJpbmJvdW5kX21haWxib3hlcyI6IG51bGwsICJpbmNpZGVudF9hcnRpZmFjdF90eXBlcyI6IFtd\nLCAiaW5jaWRlbnRfdHlwZXMiOiBbeyJ1cGRhdGVfZGF0ZSI6IDE1OTQxMzQ5MTcxODcsICJjcmVh\ndGVfZGF0ZSI6IDE1OTQxMzQ5MTcxODcsICJ1dWlkIjogImJmZWVjMmQ0LTM3NzAtMTFlOC1hZDM5\nLTRhMDAwNDA0NGFhMCIsICJkZXNjcmlwdGlvbiI6ICJDdXN0b21pemF0aW9uIFBhY2thZ2VzIChp\nbnRlcm5hbCkiLCAiZXhwb3J0X2tleSI6ICJDdXN0b21pemF0aW9uIFBhY2thZ2VzIChpbnRlcm5h\nbCkiLCAibmFtZSI6ICJDdXN0b21pemF0aW9uIFBhY2thZ2VzIChpbnRlcm5hbCkiLCAiZW5hYmxl\nZCI6IGZhbHNlLCAic3lzdGVtIjogZmFsc2UsICJwYXJlbnRfaWQiOiBudWxsLCAiaGlkZGVuIjog\nZmFsc2UsICJpZCI6IDB9XSwgImluZHVzdHJpZXMiOiBudWxsLCAibGF5b3V0cyI6IFtdLCAibG9j\nYWxlIjogbnVsbCwgIm1lc3NhZ2VfZGVzdGluYXRpb25zIjogW10sICJub3RpZmljYXRpb25zIjog\nbnVsbCwgIm92ZXJyaWRlcyI6IFtdLCAicGhhc2VzIjogW10sICJyZWd1bGF0b3JzIjogbnVsbCwg\nInJvbGVzIjogW10sICJzY3JpcHRzIjogW10sICJzZXJ2ZXJfdmVyc2lvbiI6IHsiYnVpbGRfbnVt\nYmVyIjogMzIsICJtYWpvciI6IDM1LCAibWlub3IiOiAyLCAidmVyc2lvbiI6ICIzNS4yLjMyIn0s\nICJ0YWdzIjogW10sICJ0YXNrX29yZGVyIjogW10sICJ0aW1lZnJhbWVzIjogbnVsbCwgInR5cGVz\nIjogW10sICJ3b3JrZmxvd3MiOiBbXSwgIndvcmtzcGFjZXMiOiBbXX0=\n\"\"\")", "title": "" }, { "docid": "6dc611c9f651f942d33c39d6acefadba", "score": "0.46243632", "text": "def configure(self, *args, **kwargs):", "title": "" }, { "docid": "5e76596a47cd1238a61550bc07105994", "score": "0.4621968", "text": "def __init__(self):\n super(\n DmgCommandBase.ConfigSubCommand.GenerateSubCommand,\n self).__init__(\n \"/run/dmg/config/generate/*\", \"generate\")\n self.access_points = FormattedParameter(\n \"--access-points={}\", None)\n self.num_engines = FormattedParameter(\"--num-engines={}\", None)\n self.scm_only = FormattedParameter(\"--scm-only\", False)\n self.net_class = FormattedParameter(\"--net-class={}\", None)\n self.net_provider = FormattedParameter(\"--net-provider={}\", None)\n self.use_tmpfs_scm = FormattedParameter(\"--use-tmpfs-scm\", False)\n self.control_metadata_path = FormattedParameter(\n \"--control-metadata-path={}\", None)", "title": "" }, { "docid": "984c3854e1e5655d3f42943a52a5b090", "score": "0.46168107", "text": "def customization_data(client=None):\n\n yield ImportDefinition(u\"\"\"\neyJhY3Rpb25fb3JkZXIiOiBbXSwgImFjdGlvbnMiOiBbeyJhdXRvbWF0aW9ucyI6IFtdLCAiY29u\nZGl0aW9ucyI6IFtdLCAiZW5hYmxlZCI6IGZhbHNlLCAiZXhwb3J0X2tleSI6ICJEYXRhIEZlZWRl\ncjogQXJ0aWZhY3QiLCAiaWQiOiAxMjQsICJsb2dpY190eXBlIjogImFsbCIsICJtZXNzYWdlX2Rl\nc3RpbmF0aW9ucyI6IFsiZmVlZF9kYXRhX3Jlc2lsaWVudCJdLCAibmFtZSI6ICJEYXRhIEZlZWRl\ncjogQXJ0aWZhY3QiLCAib2JqZWN0X3R5cGUiOiAiYXJ0aWZhY3QiLCAidGFncyI6IFtdLCAidGlt\nZW91dF9zZWNvbmRzIjogODY0MDAsICJ0eXBlIjogMCwgInV1aWQiOiAiM2M2MjdhYTgtNTgxMC00\nNGE0LWEyNWQtZTVhOGRiMTliNmE2IiwgInZpZXdfaXRlbXMiOiBbXSwgIndvcmtmbG93cyI6IFtd\nfSwgeyJhdXRvbWF0aW9ucyI6IFtdLCAiY29uZGl0aW9ucyI6IFtdLCAiZW5hYmxlZCI6IGZhbHNl\nLCAiZXhwb3J0X2tleSI6ICJEYXRhIEZlZWRlcjogQXR0YWNobWVudCIsICJpZCI6IDEyNSwgImxv\nZ2ljX3R5cGUiOiAiYWxsIiwgIm1lc3NhZ2VfZGVzdGluYXRpb25zIjogWyJmZWVkX2RhdGFfcmVz\naWxpZW50Il0sICJuYW1lIjogIkRhdGEgRmVlZGVyOiBBdHRhY2htZW50IiwgIm9iamVjdF90eXBl\nIjogImF0dGFjaG1lbnQiLCAidGFncyI6IFtdLCAidGltZW91dF9zZWNvbmRzIjogODY0MDAsICJ0\neXBlIjogMCwgInV1aWQiOiAiMmI2MjhiOGMtMWI1Mi00ZTUxLWE1ZjMtYzMyM2Q3ZmYwMzdlIiwg\nInZpZXdfaXRlbXMiOiBbXSwgIndvcmtmbG93cyI6IFtdfSwgeyJhdXRvbWF0aW9ucyI6IFtdLCAi\nY29uZGl0aW9ucyI6IFtdLCAiZW5hYmxlZCI6IGZhbHNlLCAiZXhwb3J0X2tleSI6ICJEYXRhIEZl\nZWRlcjogSW5jaWRlbnQiLCAiaWQiOiAxMjYsICJsb2dpY190eXBlIjogImFsbCIsICJtZXNzYWdl\nX2Rlc3RpbmF0aW9ucyI6IFsiZmVlZF9kYXRhX3Jlc2lsaWVudCJdLCAibmFtZSI6ICJEYXRhIEZl\nZWRlcjogSW5jaWRlbnQiLCAib2JqZWN0X3R5cGUiOiAiaW5jaWRlbnQiLCAidGFncyI6IFtdLCAi\ndGltZW91dF9zZWNvbmRzIjogODY0MDAsICJ0eXBlIjogMCwgInV1aWQiOiAiNWJjMGI5OWItOGY4\nNy00OGRlLTk3ZDktOTMzM2YxMTM5ZDVkIiwgInZpZXdfaXRlbXMiOiBbXSwgIndvcmtmbG93cyI6\nIFtdfSwgeyJhdXRvbWF0aW9ucyI6IFtdLCAiY29uZGl0aW9ucyI6IFtdLCAiZW5hYmxlZCI6IGZh\nbHNlLCAiZXhwb3J0X2tleSI6ICJEYXRhIEZlZWRlcjogTWlsZXN0b25lIiwgImlkIjogMTI3LCAi\nbG9naWNfdHlwZSI6ICJhbGwiLCAibWVzc2FnZV9kZXN0aW5hdGlvbnMiOiBbImZlZWRfZGF0YV9y\nZXNpbGllbnQiXSwgIm5hbWUiOiAiRGF0YSBGZWVkZXI6IE1pbGVzdG9uZSIsICJvYmplY3RfdHlw\nZSI6ICJtaWxlc3RvbmUiLCAidGFncyI6IFtdLCAidGltZW91dF9zZWNvbmRzIjogODY0MDAsICJ0\neXBlIjogMCwgInV1aWQiOiAiYzdmY2FmNTAtNDQwMi00YzYyLTk1NTItYzI2ZGY2ZTViZTliIiwg\nInZpZXdfaXRlbXMiOiBbXSwgIndvcmtmbG93cyI6IFtdfSwgeyJhdXRvbWF0aW9ucyI6IFtdLCAi\nY29uZGl0aW9ucyI6IFtdLCAiZW5hYmxlZCI6IGZhbHNlLCAiZXhwb3J0X2tleSI6ICJEYXRhIEZl\nZWRlcjogTm90ZSIsICJpZCI6IDEyOCwgImxvZ2ljX3R5cGUiOiAiYWxsIiwgIm1lc3NhZ2VfZGVz\ndGluYXRpb25zIjogWyJmZWVkX2RhdGFfcmVzaWxpZW50Il0sICJuYW1lIjogIkRhdGEgRmVlZGVy\nOiBOb3RlIiwgIm9iamVjdF90eXBlIjogIm5vdGUiLCAidGFncyI6IFtdLCAidGltZW91dF9zZWNv\nbmRzIjogODY0MDAsICJ0eXBlIjogMCwgInV1aWQiOiAiNzgwZjJlYmUtOWFhYy00MWU5LTk4YWIt\nNzA2ODhhYzlhZjdhIiwgInZpZXdfaXRlbXMiOiBbXSwgIndvcmtmbG93cyI6IFtdfSwgeyJhdXRv\nbWF0aW9ucyI6IFtdLCAiY29uZGl0aW9ucyI6IFtdLCAiZW5hYmxlZCI6IGZhbHNlLCAiZXhwb3J0\nX2tleSI6ICJEYXRhIEZlZWRlcjogU3luYyBJbmNpZGVudHMiLCAiaWQiOiAxMjksICJsb2dpY190\neXBlIjogImFsbCIsICJtZXNzYWdlX2Rlc3RpbmF0aW9ucyI6IFtdLCAibmFtZSI6ICJEYXRhIEZl\nZWRlcjogU3luYyBJbmNpZGVudHMiLCAib2JqZWN0X3R5cGUiOiAiaW5jaWRlbnQiLCAidGFncyI6\nIFtdLCAidGltZW91dF9zZWNvbmRzIjogODY0MDAsICJ0eXBlIjogMSwgInV1aWQiOiAiMTc0YWJj\nMTgtZGM3Mi00MTMwLWE1YzgtNjQyMWZiNDQ5ZjEyIiwgInZpZXdfaXRlbXMiOiBbeyJjb250ZW50\nIjogImNjNTMyYTIyLWU5MGYtNDZlNC05MTlhLTBhZDEyOTY3OTZiZiIsICJlbGVtZW50IjogImZp\nZWxkX3V1aWQiLCAiZmllbGRfdHlwZSI6ICJhY3Rpb25pbnZvY2F0aW9uIiwgInNob3dfaWYiOiBu\ndWxsLCAic2hvd19saW5rX2hlYWRlciI6IGZhbHNlLCAic3RlcF9sYWJlbCI6IG51bGx9LCB7ImNv\nbnRlbnQiOiAiM2UyZTkzZGUtNmJhNS00YWRmLWIwNDQtNjZhNDYwOWRkNzdkIiwgImVsZW1lbnQi\nOiAiZmllbGRfdXVpZCIsICJmaWVsZF90eXBlIjogImFjdGlvbmludm9jYXRpb24iLCAic2hvd19p\nZiI6IG51bGwsICJzaG93X2xpbmtfaGVhZGVyIjogZmFsc2UsICJzdGVwX2xhYmVsIjogbnVsbH0s\nIHsiY29udGVudCI6ICI3ZWQzYWYwMy1kM2IzLTQxZWQtYTcyMS05NzA1NDM0NjM1OTgiLCAiZWxl\nbWVudCI6ICJmaWVsZF91dWlkIiwgImZpZWxkX3R5cGUiOiAiYWN0aW9uaW52b2NhdGlvbiIsICJz\naG93X2lmIjogbnVsbCwgInNob3dfbGlua19oZWFkZXIiOiBmYWxzZSwgInN0ZXBfbGFiZWwiOiBu\ndWxsfV0sICJ3b3JrZmxvd3MiOiBbImRhdGFfZmVlZGVyX3N5bmNfaW5jaWRlbnRzIl19LCB7ImF1\ndG9tYXRpb25zIjogW10sICJjb25kaXRpb25zIjogW10sICJlbmFibGVkIjogZmFsc2UsICJleHBv\ncnRfa2V5IjogIkRhdGEgRmVlZGVyOiBUYXNrIiwgImlkIjogMTMwLCAibG9naWNfdHlwZSI6ICJh\nbGwiLCAibWVzc2FnZV9kZXN0aW5hdGlvbnMiOiBbImZlZWRfZGF0YV9yZXNpbGllbnQiXSwgIm5h\nbWUiOiAiRGF0YSBGZWVkZXI6IFRhc2siLCAib2JqZWN0X3R5cGUiOiAidGFzayIsICJ0YWdzIjog\nW10sICJ0aW1lb3V0X3NlY29uZHMiOiA4NjQwMCwgInR5cGUiOiAwLCAidXVpZCI6ICJlZTBkOTJl\nZS1lNTNkLTRlYmQtYTI4Zi1kZjk1OWU5NDllZDciLCAidmlld19pdGVtcyI6IFtdLCAid29ya2Zs\nb3dzIjogW119XSwgImF1dG9tYXRpY190YXNrcyI6IFtdLCAiZXhwb3J0X2RhdGUiOiAxNjAyMjQ3\nNTk0NzI2LCAiZXhwb3J0X2Zvcm1hdF92ZXJzaW9uIjogMiwgImZpZWxkcyI6IFt7ImFsbG93X2Rl\nZmF1bHRfdmFsdWUiOiBmYWxzZSwgImJsYW5rX29wdGlvbiI6IGZhbHNlLCAiY2FsY3VsYXRlZCI6\nIGZhbHNlLCAiY2hhbmdlYWJsZSI6IHRydWUsICJjaG9zZW4iOiBmYWxzZSwgImRlZmF1bHRfY2hv\nc2VuX2J5X3NlcnZlciI6IGZhbHNlLCAiZGVwcmVjYXRlZCI6IGZhbHNlLCAiZXhwb3J0X2tleSI6\nICJfX2Z1bmN0aW9uL2RmX21pbl9pbmNpZGVudF9pZCIsICJoaWRlX25vdGlmaWNhdGlvbiI6IGZh\nbHNlLCAiaWQiOiA5NzAsICJpbnB1dF90eXBlIjogIm51bWJlciIsICJpbnRlcm5hbCI6IGZhbHNl\nLCAiaXNfdHJhY2tlZCI6IGZhbHNlLCAibmFtZSI6ICJkZl9taW5faW5jaWRlbnRfaWQiLCAib3Bl\ncmF0aW9uX3Blcm1zIjoge30sICJvcGVyYXRpb25zIjogW10sICJwbGFjZWhvbGRlciI6ICIiLCAi\ncHJlZml4IjogbnVsbCwgInJlYWRfb25seSI6IGZhbHNlLCAicmVxdWlyZWQiOiAiYWx3YXlzIiwg\nInJpY2hfdGV4dCI6IGZhbHNlLCAidGFncyI6IFtdLCAidGVtcGxhdGVzIjogW10sICJ0ZXh0Ijog\nImRmX21pbl9pbmNpZGVudF9pZCIsICJ0b29sdGlwIjogIkVudGVyIGFuIGluY2lkZW50ICMgb3Ig\nMCB0byBpbmRpY2F0ZSB0aGUgc3RhcnQgb2YgYWxsIGluY2lkZW50cyIsICJ0eXBlX2lkIjogMTEs\nICJ1dWlkIjogImI4MGQxMWQ0LTljNmItNGNkNy05NTFhLTRmZThjNTcyYzllZiIsICJ2YWx1ZXMi\nOiBbXX0sIHsiYWxsb3dfZGVmYXVsdF92YWx1ZSI6IGZhbHNlLCAiYmxhbmtfb3B0aW9uIjogdHJ1\nZSwgImNhbGN1bGF0ZWQiOiBmYWxzZSwgImNoYW5nZWFibGUiOiB0cnVlLCAiY2hvc2VuIjogZmFs\nc2UsICJkZWZhdWx0X2Nob3Nlbl9ieV9zZXJ2ZXIiOiBmYWxzZSwgImRlcHJlY2F0ZWQiOiBmYWxz\nZSwgImV4cG9ydF9rZXkiOiAiX19mdW5jdGlvbi9kZl9xdWVyeV9hcGlfbWV0aG9kIiwgImhpZGVf\nbm90aWZpY2F0aW9uIjogZmFsc2UsICJpZCI6IDk3MSwgImlucHV0X3R5cGUiOiAiYm9vbGVhbiIs\nICJpbnRlcm5hbCI6IGZhbHNlLCAiaXNfdHJhY2tlZCI6IGZhbHNlLCAibmFtZSI6ICJkZl9xdWVy\neV9hcGlfbWV0aG9kIiwgIm9wZXJhdGlvbl9wZXJtcyI6IHt9LCAib3BlcmF0aW9ucyI6IFtdLCAi\ncGxhY2Vob2xkZXIiOiAiIiwgInByZWZpeCI6IG51bGwsICJyZWFkX29ubHkiOiBmYWxzZSwgInJl\ncXVpcmVkIjogImFsd2F5cyIsICJyaWNoX3RleHQiOiBmYWxzZSwgInRhZ3MiOiBbXSwgInRlbXBs\nYXRlcyI6IFtdLCAidGV4dCI6ICJkZl9xdWVyeV9hcGlfbWV0aG9kIiwgInRvb2x0aXAiOiAiIiwg\nInR5cGVfaWQiOiAxMSwgInV1aWQiOiAiNzMxZTk0ZmYtODIyZi00OGYxLTgzYTktYTc4MzgwZmQ2\nMzZiIiwgInZhbHVlcyI6IFtdfSwgeyJhbGxvd19kZWZhdWx0X3ZhbHVlIjogZmFsc2UsICJibGFu\na19vcHRpb24iOiBmYWxzZSwgImNhbGN1bGF0ZWQiOiBmYWxzZSwgImNoYW5nZWFibGUiOiB0cnVl\nLCAiY2hvc2VuIjogZmFsc2UsICJkZWZhdWx0X2Nob3Nlbl9ieV9zZXJ2ZXIiOiBmYWxzZSwgImRl\ncHJlY2F0ZWQiOiBmYWxzZSwgImV4cG9ydF9rZXkiOiAiX19mdW5jdGlvbi9kZl9tYXhfaW5jaWRl\nbnRfaWQiLCAiaGlkZV9ub3RpZmljYXRpb24iOiBmYWxzZSwgImlkIjogOTY5LCAiaW5wdXRfdHlw\nZSI6ICJudW1iZXIiLCAiaW50ZXJuYWwiOiBmYWxzZSwgImlzX3RyYWNrZWQiOiBmYWxzZSwgIm5h\nbWUiOiAiZGZfbWF4X2luY2lkZW50X2lkIiwgIm9wZXJhdGlvbl9wZXJtcyI6IHt9LCAib3BlcmF0\naW9ucyI6IFtdLCAicGxhY2Vob2xkZXIiOiAiIiwgInByZWZpeCI6IG51bGwsICJyZWFkX29ubHki\nOiBmYWxzZSwgInJpY2hfdGV4dCI6IGZhbHNlLCAidGFncyI6IFtdLCAidGVtcGxhdGVzIjogW10s\nICJ0ZXh0IjogImRmX21heF9pbmNpZGVudF9pZCIsICJ0b29sdGlwIjogIkVudGVyIGluY2lkZW50\nICMgZm9yIHVwcGVyIHJhbmdlIG9yIDAgdG8gaW5kaWNhdGUgYWxsIGluY2lkZW50cyIsICJ0eXBl\nX2lkIjogMTEsICJ1dWlkIjogImU3ODIwZTQ0LTQwODctNGNlMi04NGFmLTBmZTkzNjMwYTAzYyIs\nICJ2YWx1ZXMiOiBbXX0sIHsiYWxsb3dfZGVmYXVsdF92YWx1ZSI6IGZhbHNlLCAiYmxhbmtfb3B0\naW9uIjogZmFsc2UsICJjYWxjdWxhdGVkIjogZmFsc2UsICJjaGFuZ2VhYmxlIjogdHJ1ZSwgImNo\nb3NlbiI6IGZhbHNlLCAiZGVmYXVsdF9jaG9zZW5fYnlfc2VydmVyIjogZmFsc2UsICJkZXByZWNh\ndGVkIjogZmFsc2UsICJleHBvcnRfa2V5IjogImFjdGlvbmludm9jYXRpb24vZGF0YV9mZWVkZXJf\nbWF4aW11bV9pbmNpZGVudF9pZCIsICJoaWRlX25vdGlmaWNhdGlvbiI6IGZhbHNlLCAiaWQiOiA5\nNjgsICJpbnB1dF90eXBlIjogIm51bWJlciIsICJpbnRlcm5hbCI6IGZhbHNlLCAiaXNfdHJhY2tl\nZCI6IGZhbHNlLCAibmFtZSI6ICJkYXRhX2ZlZWRlcl9tYXhpbXVtX2luY2lkZW50X2lkIiwgIm9w\nZXJhdGlvbl9wZXJtcyI6IHt9LCAib3BlcmF0aW9ucyI6IFtdLCAicGxhY2Vob2xkZXIiOiAiIiwg\nInByZWZpeCI6ICJwcm9wZXJ0aWVzIiwgInJlYWRfb25seSI6IGZhbHNlLCAicmljaF90ZXh0Ijog\nZmFsc2UsICJ0YWdzIjogW10sICJ0ZW1wbGF0ZXMiOiBbXSwgInRleHQiOiAiTWF4aW11bSBJbmNp\nZGVudCBJRCIsICJ0b29sdGlwIjogIkVudGVyIEluY2lkZW50IElEIHRvIHN5bmMgdXAgdG8gb3Ig\nMCB0byBpbmRpY2F0ZSBhbGwgaW5jaWRlbnRzIiwgInR5cGVfaWQiOiA2LCAidXVpZCI6ICIzZTJl\nOTNkZS02YmE1LTRhZGYtYjA0NC02NmE0NjA5ZGQ3N2QiLCAidmFsdWVzIjogW119LCB7ImFsbG93\nX2RlZmF1bHRfdmFsdWUiOiBmYWxzZSwgImJsYW5rX29wdGlvbiI6IHRydWUsICJjYWxjdWxhdGVk\nIjogZmFsc2UsICJjaGFuZ2VhYmxlIjogdHJ1ZSwgImNob3NlbiI6IGZhbHNlLCAiZGVmYXVsdF9j\naG9zZW5fYnlfc2VydmVyIjogZmFsc2UsICJkZXByZWNhdGVkIjogZmFsc2UsICJleHBvcnRfa2V5\nIjogImFjdGlvbmludm9jYXRpb24vcXVlcnlfYXBpX21ldGhvZCIsICJoaWRlX25vdGlmaWNhdGlv\nbiI6IGZhbHNlLCAiaWQiOiA5NjcsICJpbnB1dF90eXBlIjogImJvb2xlYW4iLCAiaW50ZXJuYWwi\nOiBmYWxzZSwgImlzX3RyYWNrZWQiOiBmYWxzZSwgIm5hbWUiOiAicXVlcnlfYXBpX21ldGhvZCIs\nICJvcGVyYXRpb25fcGVybXMiOiB7fSwgIm9wZXJhdGlvbnMiOiBbXSwgInBsYWNlaG9sZGVyIjog\nIiIsICJwcmVmaXgiOiAicHJvcGVydGllcyIsICJyZWFkX29ubHkiOiBmYWxzZSwgInJlcXVpcmVk\nIjogImFsd2F5cyIsICJyaWNoX3RleHQiOiBmYWxzZSwgInRhZ3MiOiBbXSwgInRlbXBsYXRlcyI6\nIFtdLCAidGV4dCI6ICJRdWVyeSBBUEkgTWV0aG9kIiwgInRvb2x0aXAiOiAiU3BlY2lmeSB0cnVl\nIGlmIGVycm9ycyBvY2N1ciB3aGVuIHVzaW5nIHRoZSBkZWZhdWx0IHNlYXJjaCBjYXBhYmlsaXR5\nIiwgInR5cGVfaWQiOiA2LCAidXVpZCI6ICI3ZWQzYWYwMy1kM2IzLTQxZWQtYTcyMS05NzA1NDM0\nNjM1OTgiLCAidmFsdWVzIjogW119LCB7ImFsbG93X2RlZmF1bHRfdmFsdWUiOiBmYWxzZSwgImJs\nYW5rX29wdGlvbiI6IGZhbHNlLCAiY2FsY3VsYXRlZCI6IGZhbHNlLCAiY2hhbmdlYWJsZSI6IHRy\ndWUsICJjaG9zZW4iOiBmYWxzZSwgImRlZmF1bHRfY2hvc2VuX2J5X3NlcnZlciI6IGZhbHNlLCAi\nZGVwcmVjYXRlZCI6IGZhbHNlLCAiZXhwb3J0X2tleSI6ICJhY3Rpb25pbnZvY2F0aW9uL2RhdGFf\nZmVlZGVyX21pbmltdW1faW5jaWRlbnRfaWQiLCAiaGlkZV9ub3RpZmljYXRpb24iOiBmYWxzZSwg\nImlkIjogOTY2LCAiaW5wdXRfdHlwZSI6ICJudW1iZXIiLCAiaW50ZXJuYWwiOiBmYWxzZSwgImlz\nX3RyYWNrZWQiOiBmYWxzZSwgIm5hbWUiOiAiZGF0YV9mZWVkZXJfbWluaW11bV9pbmNpZGVudF9p\nZCIsICJvcGVyYXRpb25fcGVybXMiOiB7fSwgIm9wZXJhdGlvbnMiOiBbXSwgInBsYWNlaG9sZGVy\nIjogIiIsICJwcmVmaXgiOiAicHJvcGVydGllcyIsICJyZWFkX29ubHkiOiBmYWxzZSwgInJlcXVp\ncmVkIjogImFsd2F5cyIsICJyaWNoX3RleHQiOiBmYWxzZSwgInRhZ3MiOiBbXSwgInRlbXBsYXRl\ncyI6IFtdLCAidGV4dCI6ICJNaW5pbXVtIEluY2lkZW50IElEIiwgInRvb2x0aXAiOiAiRW50ZXIg\nSW5jaWRlbnQgSUQgdG8gc3RhcnQgc3luYyBvciAwIiwgInR5cGVfaWQiOiA2LCAidXVpZCI6ICJj\nYzUzMmEyMi1lOTBmLTQ2ZTQtOTE5YS0wYWQxMjk2Nzk2YmYiLCAidmFsdWVzIjogW119LCB7ImFs\nbG93X2RlZmF1bHRfdmFsdWUiOiBmYWxzZSwgImJsYW5rX29wdGlvbiI6IGZhbHNlLCAiY2FsY3Vs\nYXRlZCI6IGZhbHNlLCAiY2hhbmdlYWJsZSI6IHRydWUsICJjaG9zZW4iOiBmYWxzZSwgImRlZmF1\nbHRfY2hvc2VuX2J5X3NlcnZlciI6IGZhbHNlLCAiZGVwcmVjYXRlZCI6IGZhbHNlLCAiZXhwb3J0\nX2tleSI6ICJpbmNpZGVudC9kZl9ob3N0IiwgImhpZGVfbm90aWZpY2F0aW9uIjogZmFsc2UsICJp\nZCI6IDE5NDQsICJpbnB1dF90eXBlIjogInRleHQiLCAiaW50ZXJuYWwiOiBmYWxzZSwgImlzX3Ry\nYWNrZWQiOiBmYWxzZSwgIm5hbWUiOiAiZGZfaG9zdCIsICJvcGVyYXRpb25fcGVybXMiOiB7fSwg\nIm9wZXJhdGlvbnMiOiBbXSwgInBsYWNlaG9sZGVyIjogIiIsICJwcmVmaXgiOiAicHJvcGVydGll\ncyIsICJyZWFkX29ubHkiOiBmYWxzZSwgInJpY2hfdGV4dCI6IGZhbHNlLCAidGFncyI6IFtdLCAi\ndGVtcGxhdGVzIjogW10sICJ0ZXh0IjogIkRhdGEgRmVlZGVyIFN5bmMgSG9zdCIsICJ0b29sdGlw\nIjogIkhvc3Qgd2hpY2ggb3JpZ2luYXRlZCB0aGUgc3luYyAiLCAidHlwZV9pZCI6IDAsICJ1dWlk\nIjogIjZiZTFiNzg5LTkxM2EtNGEzNC04NTg5LTc5ZjhkYzljMGVmZCIsICJ2YWx1ZXMiOiBbXX0s\nIHsiYWxsb3dfZGVmYXVsdF92YWx1ZSI6IGZhbHNlLCAiYmxhbmtfb3B0aW9uIjogZmFsc2UsICJj\nYWxjdWxhdGVkIjogZmFsc2UsICJjaGFuZ2VhYmxlIjogdHJ1ZSwgImNob3NlbiI6IGZhbHNlLCAi\nZGVmYXVsdF9jaG9zZW5fYnlfc2VydmVyIjogZmFsc2UsICJkZXByZWNhdGVkIjogZmFsc2UsICJl\neHBvcnRfa2V5IjogImluY2lkZW50L2RmX2NyZWF0ZV9kYXRlIiwgImhpZGVfbm90aWZpY2F0aW9u\nIjogZmFsc2UsICJpZCI6IDE5NDUsICJpbnB1dF90eXBlIjogImRhdGV0aW1lcGlja2VyIiwgImlu\ndGVybmFsIjogZmFsc2UsICJpc190cmFja2VkIjogZmFsc2UsICJuYW1lIjogImRmX2NyZWF0ZV9k\nYXRlIiwgIm9wZXJhdGlvbl9wZXJtcyI6IHt9LCAib3BlcmF0aW9ucyI6IFtdLCAicGxhY2Vob2xk\nZXIiOiAiIiwgInByZWZpeCI6ICJwcm9wZXJ0aWVzIiwgInJlYWRfb25seSI6IGZhbHNlLCAicmlj\naF90ZXh0IjogZmFsc2UsICJ0YWdzIjogW10sICJ0ZW1wbGF0ZXMiOiBbXSwgInRleHQiOiAiRGF0\nYSBGZWVkZXIgU3luYyBPcmlnaW5hbCBDcmVhdGUgRGF0ZSIsICJ0b29sdGlwIjogIk9yaWdpbmFs\nIEluY2lkZW50IGNyZWF0ZSBkYXRlIiwgInR5cGVfaWQiOiAwLCAidXVpZCI6ICI5MTU2YjM3OC1l\nZmQ0LTRhNTMtOTY3YS1mNTdjNzI1NjIzOTYiLCAidmFsdWVzIjogW119LCB7ImFsbG93X2RlZmF1\nbHRfdmFsdWUiOiBmYWxzZSwgImJsYW5rX29wdGlvbiI6IGZhbHNlLCAiY2FsY3VsYXRlZCI6IGZh\nbHNlLCAiY2hhbmdlYWJsZSI6IHRydWUsICJjaG9zZW4iOiBmYWxzZSwgImRlZmF1bHRfY2hvc2Vu\nX2J5X3NlcnZlciI6IGZhbHNlLCAiZGVwcmVjYXRlZCI6IGZhbHNlLCAiZXhwb3J0X2tleSI6ICJp\nbmNpZGVudC9kZl9pbmNfaWQiLCAiaGlkZV9ub3RpZmljYXRpb24iOiBmYWxzZSwgImlkIjogOTcz\nLCAiaW5wdXRfdHlwZSI6ICJudW1iZXIiLCAiaW50ZXJuYWwiOiBmYWxzZSwgImlzX3RyYWNrZWQi\nOiBmYWxzZSwgIm5hbWUiOiAiZGZfaW5jX2lkIiwgIm9wZXJhdGlvbl9wZXJtcyI6IHt9LCAib3Bl\ncmF0aW9ucyI6IFtdLCAicGxhY2Vob2xkZXIiOiAiIiwgInByZWZpeCI6ICJwcm9wZXJ0aWVzIiwg\nInJlYWRfb25seSI6IGZhbHNlLCAicmljaF90ZXh0IjogZmFsc2UsICJ0YWdzIjogW10sICJ0ZW1w\nbGF0ZXMiOiBbXSwgInRleHQiOiAiRGF0YSBGZWVkZXIgU3luYyBJbmNpZGVudCBJZCIsICJ0b29s\ndGlwIjogIkRhdGEgRmVlZGVyIFN5bmMgT3JpZ2luYXRpbmcgSW5jaWRlbnQgSWQiLCAidHlwZV9p\nZCI6IDAsICJ1dWlkIjogImE1ZjdhNjM3LTBkMjMtNDYzYS1iOGMyLTlhMzdjYWI0Njc4NyIsICJ2\nYWx1ZXMiOiBbXX0sIHsiYWxsb3dfZGVmYXVsdF92YWx1ZSI6IGZhbHNlLCAiYmxhbmtfb3B0aW9u\nIjogZmFsc2UsICJjYWxjdWxhdGVkIjogZmFsc2UsICJjaGFuZ2VhYmxlIjogdHJ1ZSwgImNob3Nl\nbiI6IGZhbHNlLCAiZGVmYXVsdF9jaG9zZW5fYnlfc2VydmVyIjogZmFsc2UsICJkZXByZWNhdGVk\nIjogZmFsc2UsICJleHBvcnRfa2V5IjogImluY2lkZW50L2RmX29yZ19pZCIsICJoaWRlX25vdGlm\naWNhdGlvbiI6IGZhbHNlLCAiaWQiOiA5NzIsICJpbnB1dF90eXBlIjogIm51bWJlciIsICJpbnRl\ncm5hbCI6IGZhbHNlLCAiaXNfdHJhY2tlZCI6IGZhbHNlLCAibmFtZSI6ICJkZl9vcmdfaWQiLCAi\nb3BlcmF0aW9uX3Blcm1zIjoge30sICJvcGVyYXRpb25zIjogW10sICJwbGFjZWhvbGRlciI6ICIi\nLCAicHJlZml4IjogInByb3BlcnRpZXMiLCAicmVhZF9vbmx5IjogZmFsc2UsICJyaWNoX3RleHQi\nOiBmYWxzZSwgInRhZ3MiOiBbXSwgInRlbXBsYXRlcyI6IFtdLCAidGV4dCI6ICJEYXRhIEZlZWRl\nciBTeW5jIE9yZyBJZCIsICJ0b29sdGlwIjogIkRhdGEgRmVlZGVyIFN5bmMgT3JpZ2luYXRpbmcg\nT3JnIElkIiwgInR5cGVfaWQiOiAwLCAidXVpZCI6ICJhYjYyYWU3ZS03Y2YyLTQ3YjQtODk0MC1m\nMGUxZTEzYTU4MzQiLCAidmFsdWVzIjogW119LCB7ImV4cG9ydF9rZXkiOiAiaW5jaWRlbnQvaW50\nZXJuYWxfY3VzdG9taXphdGlvbnNfZmllbGQiLCAiaWQiOiAwLCAiaW5wdXRfdHlwZSI6ICJ0ZXh0\nIiwgImludGVybmFsIjogdHJ1ZSwgIm5hbWUiOiAiaW50ZXJuYWxfY3VzdG9taXphdGlvbnNfZmll\nbGQiLCAicmVhZF9vbmx5IjogdHJ1ZSwgInRleHQiOiAiQ3VzdG9taXphdGlvbnMgRmllbGQgKGlu\ndGVybmFsKSIsICJ0eXBlX2lkIjogMCwgInV1aWQiOiAiYmZlZWMyZDQtMzc3MC0xMWU4LWFkMzkt\nNGEwMDA0MDQ0YWExIn1dLCAiZnVuY3Rpb25zIjogW3siY3JlYXRvciI6IHsiZGlzcGxheV9uYW1l\nIjogIlJlc2lsaWVudCBTeXNhZG1pbiIsICJpZCI6IDMsICJuYW1lIjogImFAZXhhbXBsZS5jb20i\nLCAidHlwZSI6ICJ1c2VyIn0sICJkZXNjcmlwdGlvbiI6IHsiZm9ybWF0IjogInRleHQiLCAiY29u\ndGVudCI6ICJTeW5jaHJvbml6ZSBJbmNpZGVudChzKSBhbmQgdGhlaXIgYXNzb2NpYXRlZCB0YXNr\ncywgbm90ZXMsIGF0dGFjaG1lbnRzLCBhcnRpZmFjdHMsIG1pbGVzdG9uZXMgYW5kIGFzc29jaWF0\nZWQgZGF0YXRhYmxlcyJ9LCAiZGVzdGluYXRpb25faGFuZGxlIjogImZlZWRfZGF0YV91bnVzZWQi\nLCAiZGlzcGxheV9uYW1lIjogIkRhdGEgRmVlZGVyOiBTeW5jIEluY2lkZW50cyIsICJleHBvcnRf\na2V5IjogImRhdGFfZmVlZGVyX3N5bmNfaW5jaWRlbnRzIiwgImlkIjogNDUsICJsYXN0X21vZGlm\naWVkX2J5IjogeyJkaXNwbGF5X25hbWUiOiAiUmVzaWxpZW50IFN5c2FkbWluIiwgImlkIjogMywg\nIm5hbWUiOiAiYUBleGFtcGxlLmNvbSIsICJ0eXBlIjogInVzZXIifSwgImxhc3RfbW9kaWZpZWRf\ndGltZSI6IDE2MDIyNDU0NzU0OTYsICJuYW1lIjogImRhdGFfZmVlZGVyX3N5bmNfaW5jaWRlbnRz\nIiwgInRhZ3MiOiBbXSwgInV1aWQiOiAiN2ZmZWQ0ZTUtNzJmYi00MTYyLWJkZWYtNGVhM2ViZmE4\nOWRlIiwgInZlcnNpb24iOiAyLCAidmlld19pdGVtcyI6IFt7ImNvbnRlbnQiOiAiYjgwZDExZDQt\nOWM2Yi00Y2Q3LTk1MWEtNGZlOGM1NzJjOWVmIiwgImVsZW1lbnQiOiAiZmllbGRfdXVpZCIsICJm\naWVsZF90eXBlIjogIl9fZnVuY3Rpb24iLCAic2hvd19pZiI6IG51bGwsICJzaG93X2xpbmtfaGVh\nZGVyIjogZmFsc2UsICJzdGVwX2xhYmVsIjogbnVsbH0sIHsiY29udGVudCI6ICJlNzgyMGU0NC00\nMDg3LTRjZTItODRhZi0wZmU5MzYzMGEwM2MiLCAiZWxlbWVudCI6ICJmaWVsZF91dWlkIiwgImZp\nZWxkX3R5cGUiOiAiX19mdW5jdGlvbiIsICJzaG93X2lmIjogbnVsbCwgInNob3dfbGlua19oZWFk\nZXIiOiBmYWxzZSwgInN0ZXBfbGFiZWwiOiBudWxsfSwgeyJjb250ZW50IjogIjczMWU5NGZmLTgy\nMmYtNDhmMS04M2E5LWE3ODM4MGZkNjM2YiIsICJlbGVtZW50IjogImZpZWxkX3V1aWQiLCAiZmll\nbGRfdHlwZSI6ICJfX2Z1bmN0aW9uIiwgInNob3dfaWYiOiBudWxsLCAic2hvd19saW5rX2hlYWRl\nciI6IGZhbHNlLCAic3RlcF9sYWJlbCI6IG51bGx9XSwgIndvcmtmbG93cyI6IFt7ImFjdGlvbnMi\nOiBbXSwgImRlc2NyaXB0aW9uIjogbnVsbCwgIm5hbWUiOiAiRGF0YSBGZWVkZXI6IFN5bmMgSW5j\naWRlbnRzIiwgIm9iamVjdF90eXBlIjogImluY2lkZW50IiwgInByb2dyYW1tYXRpY19uYW1lIjog\nImRhdGFfZmVlZGVyX3N5bmNfaW5jaWRlbnRzIiwgInRhZ3MiOiBbXSwgInV1aWQiOiBudWxsLCAi\nd29ya2Zsb3dfaWQiOiA1NX1dfV0sICJnZW9zIjogbnVsbCwgImdyb3VwcyI6IG51bGwsICJpZCI6\nIDg4LCAiaW5ib3VuZF9tYWlsYm94ZXMiOiBudWxsLCAiaW5jaWRlbnRfYXJ0aWZhY3RfdHlwZXMi\nOiBbXSwgImluY2lkZW50X3R5cGVzIjogW3sidXBkYXRlX2RhdGUiOiAxNjAyMjQ3NTg5ODUwLCAi\nY3JlYXRlX2RhdGUiOiAxNjAyMjQ3NTg5ODUwLCAidXVpZCI6ICJiZmVlYzJkNC0zNzcwLTExZTgt\nYWQzOS00YTAwMDQwNDRhYTAiLCAiZGVzY3JpcHRpb24iOiAiQ3VzdG9taXphdGlvbiBQYWNrYWdl\ncyAoaW50ZXJuYWwpIiwgImV4cG9ydF9rZXkiOiAiQ3VzdG9taXphdGlvbiBQYWNrYWdlcyAoaW50\nZXJuYWwpIiwgIm5hbWUiOiAiQ3VzdG9taXphdGlvbiBQYWNrYWdlcyAoaW50ZXJuYWwpIiwgImVu\nYWJsZWQiOiBmYWxzZSwgInN5c3RlbSI6IGZhbHNlLCAicGFyZW50X2lkIjogbnVsbCwgImhpZGRl\nbiI6IGZhbHNlLCAiaWQiOiAwfV0sICJpbmR1c3RyaWVzIjogbnVsbCwgImxheW91dHMiOiBbXSwg\nImxvY2FsZSI6IG51bGwsICJtZXNzYWdlX2Rlc3RpbmF0aW9ucyI6IFt7ImFwaV9rZXlzIjogW10s\nICJkZXN0aW5hdGlvbl90eXBlIjogMCwgImV4cGVjdF9hY2siOiB0cnVlLCAiZXhwb3J0X2tleSI6\nICJmZWVkX2RhdGFfcmVzaWxpZW50IiwgIm5hbWUiOiAiZmVlZF9kYXRhX3Jlc2lsaWVudCIsICJw\ncm9ncmFtbWF0aWNfbmFtZSI6ICJmZWVkX2RhdGFfcmVzaWxpZW50IiwgInRhZ3MiOiBbXSwgInVz\nZXJzIjogWyJhQGV4YW1wbGUuY29tIl0sICJ1dWlkIjogImZmNzE0NmU5LTZmNWItNDEyZS1hYzgw\nLWJiNDNmMmFhNDUxZCJ9LCB7ImFwaV9rZXlzIjogW10sICJkZXN0aW5hdGlvbl90eXBlIjogMCwg\nImV4cGVjdF9hY2siOiB0cnVlLCAiZXhwb3J0X2tleSI6ICJmZWVkX2RhdGFfdW51c2VkIiwgIm5h\nbWUiOiAiZmVlZF9kYXRhX3VudXNlZCIsICJwcm9ncmFtbWF0aWNfbmFtZSI6ICJmZWVkX2RhdGFf\ndW51c2VkIiwgInRhZ3MiOiBbXSwgInVzZXJzIjogW10sICJ1dWlkIjogIjhjMzhiMzc3LTNhMjYt\nNDQzZi04ZjNiLWI4M2UwNjEzNmU4ZSJ9XSwgIm5vdGlmaWNhdGlvbnMiOiBudWxsLCAib3ZlcnJp\nZGVzIjogW10sICJwaGFzZXMiOiBbXSwgInJlZ3VsYXRvcnMiOiBudWxsLCAicm9sZXMiOiBbXSwg\nInNjcmlwdHMiOiBbXSwgInNlcnZlcl92ZXJzaW9uIjogeyJidWlsZF9udW1iZXIiOiAzMiwgIm1h\nam9yIjogMzUsICJtaW5vciI6IDIsICJ2ZXJzaW9uIjogIjM1LjIuMzIifSwgInRhZ3MiOiBbXSwg\nInRhc2tfb3JkZXIiOiBbXSwgInRpbWVmcmFtZXMiOiBudWxsLCAidHlwZXMiOiBbXSwgIndvcmtm\nbG93cyI6IFt7ImFjdGlvbnMiOiBbXSwgImNvbnRlbnQiOiB7InZlcnNpb24iOiAxLCAid29ya2Zs\nb3dfaWQiOiAiZGF0YV9mZWVkZXJfc3luY19pbmNpZGVudHMiLCAieG1sIjogIjw/eG1sIHZlcnNp\nb249XCIxLjBcIiBlbmNvZGluZz1cIlVURi04XCI/PjxkZWZpbml0aW9ucyB4bWxucz1cImh0dHA6\nLy93d3cub21nLm9yZy9zcGVjL0JQTU4vMjAxMDA1MjQvTU9ERUxcIiB4bWxuczpicG1uZGk9XCJo\ndHRwOi8vd3d3Lm9tZy5vcmcvc3BlYy9CUE1OLzIwMTAwNTI0L0RJXCIgeG1sbnM6b21nZGM9XCJo\ndHRwOi8vd3d3Lm9tZy5vcmcvc3BlYy9ERC8yMDEwMDUyNC9EQ1wiIHhtbG5zOm9tZ2RpPVwiaHR0\ncDovL3d3dy5vbWcub3JnL3NwZWMvREQvMjAxMDA1MjQvRElcIiB4bWxuczpyZXNpbGllbnQ9XCJo\ndHRwOi8vcmVzaWxpZW50LmlibS5jb20vYnBtblwiIHhtbG5zOnhzZD1cImh0dHA6Ly93d3cudzMu\nb3JnLzIwMDEvWE1MU2NoZW1hXCIgeG1sbnM6eHNpPVwiaHR0cDovL3d3dy53My5vcmcvMjAwMS9Y\nTUxTY2hlbWEtaW5zdGFuY2VcIiB0YXJnZXROYW1lc3BhY2U9XCJodHRwOi8vd3d3LmNhbXVuZGEu\nb3JnL3Rlc3RcIj48cHJvY2VzcyBpZD1cImRhdGFfZmVlZGVyX3N5bmNfaW5jaWRlbnRzXCIgaXNF\neGVjdXRhYmxlPVwidHJ1ZVwiIG5hbWU9XCJEYXRhIEZlZWRlcjogU3luYyBJbmNpZGVudHNcIj48\nZG9jdW1lbnRhdGlvbj5TeW5jaHJvbml6ZSBJbmNpZGVudChzKSBhbmQgdGhlaXIgYXNzb2NpYXRl\nZCB0YXNrcywgbm90ZXMsIGF0dGFjaG1lbnRzLCBhcnRpZmFjdHMsIG1pbGVzdG9uZXMgYW5kIGFz\nc29jaWF0ZWQgZGF0YXRhYmxlczwvZG9jdW1lbnRhdGlvbj48c3RhcnRFdmVudCBpZD1cIlN0YXJ0\nRXZlbnRfMTU1YXN4bVwiPjxvdXRnb2luZz5TZXF1ZW5jZUZsb3dfMWd2bG52Zzwvb3V0Z29pbmc+\nPC9zdGFydEV2ZW50PjxzZXJ2aWNlVGFzayBpZD1cIlNlcnZpY2VUYXNrXzB5b2Y3aGlcIiBuYW1l\nPVwiRGF0YSBGZWVkZXI6IFN5bmMgSW5jaWRlbnRzXCIgcmVzaWxpZW50OnR5cGU9XCJmdW5jdGlv\nblwiPjxleHRlbnNpb25FbGVtZW50cz48cmVzaWxpZW50OmZ1bmN0aW9uIHV1aWQ9XCI3ZmZlZDRl\nNS03MmZiLTQxNjItYmRlZi00ZWEzZWJmYTg5ZGVcIj57XCJpbnB1dHNcIjp7fSxcInBvc3RfcHJv\nY2Vzc2luZ19zY3JpcHRcIjpcIiMgeyd2ZXJzaW9uJzogJzEuMCcsICdzdWNjZXNzJzogVHJ1ZSwg\nJ3JlYXNvbic6IE5vbmUsICdjb250ZW50JzogeydudW1fb2Zfc3luY19pbmNpZGVudHMnOiAyfSwg\nJ3Jhdyc6ICd7XFxcIm51bV9vZl9zeW5jX2luY2lkZW50c1xcXCI6IDJ9JywgJ2lucHV0cyc6IHsn\nZGZfbWF4X2luY2lkZW50X2lkJzogTm9uZSwgJ2RmX21pbl9pbmNpZGVudF9pZCc6IDB9LCAnbWV0\ncmljcyc6IHsndmVyc2lvbic6ICcxLjAnLCAncGFja2FnZSc6ICd1bmtub3duJywgJ3BhY2thZ2Vf\ndmVyc2lvbic6ICd1bmtub3duJywgJ2hvc3QnOiAnTWFya3MtTUJQLmZpb3Mtcm91dGVyLmhvbWUn\nLCAnZXhlY3V0aW9uX3RpbWVfbXMnOiAyMDYyLCAndGltZXN0YW1wJzogJzIwMTktMDUtMTQgMjE6\nMzc6MDUnfX1cXG5pbmNpZGVudC5hZGROb3RlKFxcXCJEYXRhIEZlZWRlciBTeW5jXFxcXG5NaW46\nIHt9IE1heDoge31cXFxcbkluY2lkZW50cyBTeW5jJ2Q6IHt9XFxcIi5mb3JtYXQoXFxuICAgICAg\nIHJlc3VsdHNbJ2lucHV0cyddWydkZl9taW5faW5jaWRlbnRfaWQnXSwgXFxuICAgICAgIHJlc3Vs\ndHNbJ2lucHV0cyddWydkZl9tYXhfaW5jaWRlbnRfaWQnXSxcXG4gICAgICAgcmVzdWx0c1snY29u\ndGVudCddWydudW1fb2Zfc3luY19pbmNpZGVudHMnXSkpXCIsXCJwcmVfcHJvY2Vzc2luZ19zY3Jp\ncHRcIjpcInRyeTpcXG4gIGlucHV0cy5kZl9taW5faW5jaWRlbnRfaWQgPSBydWxlLnByb3BlcnRp\nZXMuZGF0YV9mZWVkZXJfbWluaW11bV9pbmNpZGVudF9pZFxcbiAgaW5wdXRzLmRmX21heF9pbmNp\nZGVudF9pZCA9IHJ1bGUucHJvcGVydGllcy5kYXRhX2ZlZWRlcl9tYXhpbXVtX2luY2lkZW50X2lk\nXFxuICBpbnB1dHMuZGZfcXVlcnlfYXBpX21ldGhvZCA9IHJ1bGUucHJvcGVydGllcy5xdWVyeV9h\ncGlfbWV0aG9kXFxuZXhjZXB0OlxcbiAgaGVscGVyLmZhaWwoXFxcIlRoaXMgdmVyc2lvbiBvZiBS\nZXNpbGllbnQgY2Fubm90IHVzZSB0aGlzIGZ1bmN0aW9uXFxcIilcXG4gIFwiLFwicmVzdWx0X25h\nbWVcIjpcIlwifTwvcmVzaWxpZW50OmZ1bmN0aW9uPjwvZXh0ZW5zaW9uRWxlbWVudHM+PGluY29t\naW5nPlNlcXVlbmNlRmxvd18xZ3ZsbnZnPC9pbmNvbWluZz48b3V0Z29pbmc+U2VxdWVuY2VGbG93\nXzFnN2Q2OTc8L291dGdvaW5nPjwvc2VydmljZVRhc2s+PHNlcXVlbmNlRmxvdyBpZD1cIlNlcXVl\nbmNlRmxvd18xZ3ZsbnZnXCIgc291cmNlUmVmPVwiU3RhcnRFdmVudF8xNTVhc3htXCIgdGFyZ2V0\nUmVmPVwiU2VydmljZVRhc2tfMHlvZjdoaVwiLz48ZW5kRXZlbnQgaWQ9XCJFbmRFdmVudF8xdmd3\nMThmXCI+PGluY29taW5nPlNlcXVlbmNlRmxvd18xZzdkNjk3PC9pbmNvbWluZz48L2VuZEV2ZW50\nPjxzZXF1ZW5jZUZsb3cgaWQ9XCJTZXF1ZW5jZUZsb3dfMWc3ZDY5N1wiIHNvdXJjZVJlZj1cIlNl\ncnZpY2VUYXNrXzB5b2Y3aGlcIiB0YXJnZXRSZWY9XCJFbmRFdmVudF8xdmd3MThmXCIvPjx0ZXh0\nQW5ub3RhdGlvbiBpZD1cIlRleHRBbm5vdGF0aW9uXzFreHhpeXRcIj48dGV4dD5TdGFydCB5b3Vy\nIHdvcmtmbG93IGhlcmU8L3RleHQ+PC90ZXh0QW5ub3RhdGlvbj48YXNzb2NpYXRpb24gaWQ9XCJB\nc3NvY2lhdGlvbl8xc2V1ajQ4XCIgc291cmNlUmVmPVwiU3RhcnRFdmVudF8xNTVhc3htXCIgdGFy\nZ2V0UmVmPVwiVGV4dEFubm90YXRpb25fMWt4eGl5dFwiLz48dGV4dEFubm90YXRpb24gaWQ9XCJU\nZXh0QW5ub3RhdGlvbl8wMzMzNGNiXCI+PHRleHQ+Q3JlYXRlcyBhbiBpbmNpZGVudCBub3RlIHdp\ndGggbnVtYmVyIG9mIGluY2lkZW50cyBzeW5jaHJvbml6ZWQ8L3RleHQ+PC90ZXh0QW5ub3RhdGlv\nbj48YXNzb2NpYXRpb24gaWQ9XCJBc3NvY2lhdGlvbl8wN3RidXprXCIgc291cmNlUmVmPVwiU2Vy\ndmljZVRhc2tfMHlvZjdoaVwiIHRhcmdldFJlZj1cIlRleHRBbm5vdGF0aW9uXzAzMzM0Y2JcIi8+\nPHRleHRBbm5vdGF0aW9uIGlkPVwiVGV4dEFubm90YXRpb25fMWx2YnY2MlwiPjx0ZXh0PklucHV0\nIGZyb20gUnVsZSBhY3Rpdml0eSBmaWVsZHM8L3RleHQ+PC90ZXh0QW5ub3RhdGlvbj48YXNzb2Np\nYXRpb24gaWQ9XCJBc3NvY2lhdGlvbl8wbmlpendkXCIgc291cmNlUmVmPVwiU2VydmljZVRhc2tf\nMHlvZjdoaVwiIHRhcmdldFJlZj1cIlRleHRBbm5vdGF0aW9uXzFsdmJ2NjJcIi8+PC9wcm9jZXNz\nPjxicG1uZGk6QlBNTkRpYWdyYW0gaWQ9XCJCUE1ORGlhZ3JhbV8xXCI+PGJwbW5kaTpCUE1OUGxh\nbmUgYnBtbkVsZW1lbnQ9XCJ1bmRlZmluZWRcIiBpZD1cIkJQTU5QbGFuZV8xXCI+PGJwbW5kaTpC\nUE1OU2hhcGUgYnBtbkVsZW1lbnQ9XCJTdGFydEV2ZW50XzE1NWFzeG1cIiBpZD1cIlN0YXJ0RXZl\nbnRfMTU1YXN4bV9kaVwiPjxvbWdkYzpCb3VuZHMgaGVpZ2h0PVwiMzZcIiB3aWR0aD1cIjM2XCIg\neD1cIjE2MlwiIHk9XCIxODhcIi8+PGJwbW5kaTpCUE1OTGFiZWw+PG9tZ2RjOkJvdW5kcyBoZWln\naHQ9XCIwXCIgd2lkdGg9XCI5MFwiIHg9XCIxNTdcIiB5PVwiMjIzXCIvPjwvYnBtbmRpOkJQTU5M\nYWJlbD48L2JwbW5kaTpCUE1OU2hhcGU+PGJwbW5kaTpCUE1OU2hhcGUgYnBtbkVsZW1lbnQ9XCJU\nZXh0QW5ub3RhdGlvbl8xa3h4aXl0XCIgaWQ9XCJUZXh0QW5ub3RhdGlvbl8xa3h4aXl0X2RpXCI+\nPG9tZ2RjOkJvdW5kcyBoZWlnaHQ9XCIzMFwiIHdpZHRoPVwiMTAwXCIgeD1cIjk5XCIgeT1cIjI1\nNFwiLz48L2JwbW5kaTpCUE1OU2hhcGU+PGJwbW5kaTpCUE1ORWRnZSBicG1uRWxlbWVudD1cIkFz\nc29jaWF0aW9uXzFzZXVqNDhcIiBpZD1cIkFzc29jaWF0aW9uXzFzZXVqNDhfZGlcIj48b21nZGk6\nd2F5cG9pbnQgeD1cIjE2OVwiIHhzaTp0eXBlPVwib21nZGM6UG9pbnRcIiB5PVwiMjIwXCIvPjxv\nbWdkaTp3YXlwb2ludCB4PVwiMTUzXCIgeHNpOnR5cGU9XCJvbWdkYzpQb2ludFwiIHk9XCIyNTRc\nIi8+PC9icG1uZGk6QlBNTkVkZ2U+PGJwbW5kaTpCUE1OU2hhcGUgYnBtbkVsZW1lbnQ9XCJTZXJ2\naWNlVGFza18weW9mN2hpXCIgaWQ9XCJTZXJ2aWNlVGFza18weW9mN2hpX2RpXCI+PG9tZ2RjOkJv\ndW5kcyBoZWlnaHQ9XCI4MFwiIHdpZHRoPVwiMTAwXCIgeD1cIjI5MVwiIHk9XCIxNjZcIi8+PC9i\ncG1uZGk6QlBNTlNoYXBlPjxicG1uZGk6QlBNTkVkZ2UgYnBtbkVsZW1lbnQ9XCJTZXF1ZW5jZUZs\nb3dfMWd2bG52Z1wiIGlkPVwiU2VxdWVuY2VGbG93XzFndmxudmdfZGlcIj48b21nZGk6d2F5cG9p\nbnQgeD1cIjE5OFwiIHhzaTp0eXBlPVwib21nZGM6UG9pbnRcIiB5PVwiMjA2XCIvPjxvbWdkaTp3\nYXlwb2ludCB4PVwiMjkxXCIgeHNpOnR5cGU9XCJvbWdkYzpQb2ludFwiIHk9XCIyMDZcIi8+PGJw\nbW5kaTpCUE1OTGFiZWw+PG9tZ2RjOkJvdW5kcyBoZWlnaHQ9XCIxM1wiIHdpZHRoPVwiMFwiIHg9\nXCIyNDQuNVwiIHk9XCIxODRcIi8+PC9icG1uZGk6QlBNTkxhYmVsPjwvYnBtbmRpOkJQTU5FZGdl\nPjxicG1uZGk6QlBNTlNoYXBlIGJwbW5FbGVtZW50PVwiRW5kRXZlbnRfMXZndzE4ZlwiIGlkPVwi\nRW5kRXZlbnRfMXZndzE4Zl9kaVwiPjxvbWdkYzpCb3VuZHMgaGVpZ2h0PVwiMzZcIiB3aWR0aD1c\nIjM2XCIgeD1cIjQ2OC41MjcxNjQ2ODU5MDgzXCIgeT1cIjE4OFwiLz48YnBtbmRpOkJQTU5MYWJl\nbD48b21nZGM6Qm91bmRzIGhlaWdodD1cIjEzXCIgd2lkdGg9XCIwXCIgeD1cIjQ4Ni41MjcxNjQ2\nODU5MDgzXCIgeT1cIjIyN1wiLz48L2JwbW5kaTpCUE1OTGFiZWw+PC9icG1uZGk6QlBNTlNoYXBl\nPjxicG1uZGk6QlBNTkVkZ2UgYnBtbkVsZW1lbnQ9XCJTZXF1ZW5jZUZsb3dfMWc3ZDY5N1wiIGlk\nPVwiU2VxdWVuY2VGbG93XzFnN2Q2OTdfZGlcIj48b21nZGk6d2F5cG9pbnQgeD1cIjM5MVwiIHhz\naTp0eXBlPVwib21nZGM6UG9pbnRcIiB5PVwiMjA2XCIvPjxvbWdkaTp3YXlwb2ludCB4PVwiNDY5\nXCIgeHNpOnR5cGU9XCJvbWdkYzpQb2ludFwiIHk9XCIyMDZcIi8+PGJwbW5kaTpCUE1OTGFiZWw+\nPG9tZ2RjOkJvdW5kcyBoZWlnaHQ9XCIxM1wiIHdpZHRoPVwiMFwiIHg9XCI0MzBcIiB5PVwiMTg0\nXCIvPjwvYnBtbmRpOkJQTU5MYWJlbD48L2JwbW5kaTpCUE1ORWRnZT48YnBtbmRpOkJQTU5TaGFw\nZSBicG1uRWxlbWVudD1cIlRleHRBbm5vdGF0aW9uXzAzMzM0Y2JcIiBpZD1cIlRleHRBbm5vdGF0\naW9uXzAzMzM0Y2JfZGlcIj48b21nZGM6Qm91bmRzIGhlaWdodD1cIjU5XCIgd2lkdGg9XCIxNzZc\nIiB4PVwiMzg0XCIgeT1cIjY3XCIvPjwvYnBtbmRpOkJQTU5TaGFwZT48YnBtbmRpOkJQTU5FZGdl\nIGJwbW5FbGVtZW50PVwiQXNzb2NpYXRpb25fMDd0YnV6a1wiIGlkPVwiQXNzb2NpYXRpb25fMDd0\nYnV6a19kaVwiPjxvbWdkaTp3YXlwb2ludCB4PVwiMzg1XCIgeHNpOnR5cGU9XCJvbWdkYzpQb2lu\ndFwiIHk9XCIxNzBcIi8+PG9tZ2RpOndheXBvaW50IHg9XCI0MzhcIiB4c2k6dHlwZT1cIm9tZ2Rj\nOlBvaW50XCIgeT1cIjEyNlwiLz48L2JwbW5kaTpCUE1ORWRnZT48YnBtbmRpOkJQTU5TaGFwZSBi\ncG1uRWxlbWVudD1cIlRleHRBbm5vdGF0aW9uXzFsdmJ2NjJcIiBpZD1cIlRleHRBbm5vdGF0aW9u\nXzFsdmJ2NjJfZGlcIj48b21nZGM6Qm91bmRzIGhlaWdodD1cIjQ2XCIgd2lkdGg9XCIxMzNcIiB4\nPVwiMTQ1XCIgeT1cIjgyXCIvPjwvYnBtbmRpOkJQTU5TaGFwZT48YnBtbmRpOkJQTU5FZGdlIGJw\nbW5FbGVtZW50PVwiQXNzb2NpYXRpb25fMG5paXp3ZFwiIGlkPVwiQXNzb2NpYXRpb25fMG5paXp3\nZF9kaVwiPjxvbWdkaTp3YXlwb2ludCB4PVwiMjk2XCIgeHNpOnR5cGU9XCJvbWdkYzpQb2ludFwi\nIHk9XCIxNzFcIi8+PG9tZ2RpOndheXBvaW50IHg9XCIyNDFcIiB4c2k6dHlwZT1cIm9tZ2RjOlBv\naW50XCIgeT1cIjEyOFwiLz48L2JwbW5kaTpCUE1ORWRnZT48L2JwbW5kaTpCUE1OUGxhbmU+PC9i\ncG1uZGk6QlBNTkRpYWdyYW0+PC9kZWZpbml0aW9ucz4ifSwgImNvbnRlbnRfdmVyc2lvbiI6IDEs\nICJjcmVhdG9yX2lkIjogImFAZXhhbXBsZS5jb20iLCAiZGVzY3JpcHRpb24iOiAiU3luY2hyb25p\nemUgSW5jaWRlbnQocykgYW5kIHRoZWlyIGFzc29jaWF0ZWQgdGFza3MsIG5vdGVzLCBhdHRhY2ht\nZW50cywgYXJ0aWZhY3RzLCBtaWxlc3RvbmVzIGFuZCBhc3NvY2lhdGVkIGRhdGF0YWJsZXMiLCAi\nZXhwb3J0X2tleSI6ICJkYXRhX2ZlZWRlcl9zeW5jX2luY2lkZW50cyIsICJsYXN0X21vZGlmaWVk\nX2J5IjogImFAZXhhbXBsZS5jb20iLCAibGFzdF9tb2RpZmllZF90aW1lIjogMTU5MTcyMTQzMDgy\nMSwgIm5hbWUiOiAiRGF0YSBGZWVkZXI6IFN5bmMgSW5jaWRlbnRzIiwgIm9iamVjdF90eXBlIjog\nImluY2lkZW50IiwgInByb2dyYW1tYXRpY19uYW1lIjogImRhdGFfZmVlZGVyX3N5bmNfaW5jaWRl\nbnRzIiwgInRhZ3MiOiBbXSwgInV1aWQiOiAiNDMzNzA2YTUtMmI2MS00OGQ4LTliMTMtYjQyNDYy\nYTRlOTA5IiwgIndvcmtmbG93X2lkIjogNTV9XSwgIndvcmtzcGFjZXMiOiBbXX0=\n\"\"\")", "title": "" }, { "docid": "c4e94bf3d1da46ad9a3fa3e315369a37", "score": "0.4607705", "text": "def test_user_customizations(self):\n repo = self.init_test_repo('gbp-test-native')\n\n # Non-existent customization file\n eq_(mock_ch(['--customizations=customizations.py']), 1)\n\n # Create user customizations file\n with open('customizations.py', 'w') as fobj:\n fobj.write(\"class ChangelogEntryFormatter(object):\\n\")\n fobj.write(\" @classmethod\\n\")\n fobj.write(\" def compose(cls, commit_info, **kwargs):\\n\")\n fobj.write(\" return ['- %s' % commit_info['id']]\\n\")\n\n eq_(mock_ch(['--customizations=customizations.py']), 0)\n entry = self.read_file('packaging/gbp-test-native.changes')[1]\n sha = repo.rev_parse('HEAD')\n eq_(entry, '- %s\\n' % sha)", "title": "" }, { "docid": "571201f60b15fbd173730677c62bef72", "score": "0.46030045", "text": "def create_c_dict(self):\n c_dict = super().create_c_dict()\n c_dict['VERBOSITY'] = self.config.getstr('config',\n 'LOG_TC_PAIRS_VERBOSITY',\n c_dict['VERBOSITY'])\n c_dict['ALLOW_MULTIPLE_FILES'] = True\n\n c_dict['MISSING_VAL_TO_REPLACE'] = (\n self.config.getstr('config',\n 'TC_PAIRS_MISSING_VAL_TO_REPLACE', '-99')\n )\n c_dict['MISSING_VAL'] = (\n self.config.getstr('config', 'TC_PAIRS_MISSING_VAL', '-9999')\n )\n\n # get the MET config file path or use default\n c_dict['CONFIG_FILE'] = self.get_config_file('TCPairsConfig_wrapped')\n\n self.add_met_config(name='init_beg',\n data_type='string',\n metplus_configs=['TC_PAIRS_INIT_BEG',\n 'INIT_BEG'])\n\n self.add_met_config(name='init_end',\n data_type='string',\n metplus_configs=['TC_PAIRS_INIT_END',\n 'INIT_END'])\n\n self.add_met_config(name='init_inc',\n data_type='list',\n metplus_configs=['TC_PAIRS_INIT_INCLUDE',\n 'TC_PAIRS_INIT_INC',\n 'INIT_INCLUDE'])\n\n self.add_met_config(name='init_exc',\n data_type='list',\n metplus_configs=['TC_PAIRS_INIT_EXCLUDE',\n 'TC_PAIRS_INIT_EXC',\n 'INIT_EXCLUDE'])\n\n self.add_met_config(name='valid_inc',\n data_type='list',\n metplus_configs=['TC_PAIRS_VALID_INCLUDE',\n 'TC_PAIRS_VALID_INC',\n 'VALID_INCLUDE'])\n\n self.add_met_config(name='valid_exc',\n data_type='list',\n metplus_configs=['TC_PAIRS_VALID_EXCLUDE',\n 'TC_PAIRS_VALID_EXC',\n 'VALID_EXCLUDE'])\n\n self.add_met_config(name='write_valid',\n data_type='list',\n metplus_configs=['TC_PAIRS_WRITE_VALID'])\n\n self.add_met_config(name='valid_beg',\n data_type='string',\n metplus_configs=['TC_PAIRS_VALID_BEG',\n 'VALID_BEG'])\n\n self.add_met_config(name='valid_end',\n data_type='string',\n metplus_configs=['TC_PAIRS_VALID_END',\n 'VALID_END'])\n\n self.add_met_config(name='dland_file',\n data_type='string',\n metplus_configs=['TC_PAIRS_DLAND_FILE'])\n\n self.add_met_config(name='model',\n data_type='list',\n metplus_configs=['TC_PAIRS_MODEL',\n 'MODEL'])\n\n self.add_met_config(name='storm_name',\n data_type='list',\n metplus_configs=['TC_PAIRS_STORM_NAME'])\n\n self._handle_consensus()\n\n self.add_met_config(name='check_dup',\n data_type='bool')\n\n self.add_met_config(name='interp12',\n data_type='string',\n extra_args={'remove_quotes': True,\n 'uppercase': True})\n\n self.add_met_config(name='match_points', data_type='bool')\n\n self._handle_diag_info_map()\n\n self._handle_diag_convert_map()\n\n # if unset, set match_points to TRUE to match old default in wrapped\n if not self.env_var_dict.get('METPLUS_MATCH_POINTS'):\n self.env_var_dict['METPLUS_MATCH_POINTS'] = 'match_points = TRUE;'\n\n c_dict['INIT_INCLUDE'] = getlist(\n self.get_wrapper_or_generic_config('INIT_INCLUDE')\n )\n c_dict['INIT_EXCLUDE'] = getlist(\n self.get_wrapper_or_generic_config('INIT_EXCLUDE')\n )\n c_dict['VALID_BEG'] = self.get_wrapper_or_generic_config('VALID_BEG')\n c_dict['VALID_END'] = self.get_wrapper_or_generic_config('VALID_END')\n c_dict['ADECK_DIR'] = self.config.getdir('TC_PAIRS_ADECK_INPUT_DIR',\n '')\n c_dict['BDECK_DIR'] = self.config.getdir('TC_PAIRS_BDECK_INPUT_DIR',\n '')\n c_dict['EDECK_DIR'] = self.config.getdir('TC_PAIRS_EDECK_INPUT_DIR',\n '')\n c_dict['OUTPUT_DIR'] = self.config.getdir('TC_PAIRS_OUTPUT_DIR', '')\n if not c_dict['OUTPUT_DIR']:\n self.log_error('TC_PAIRS_OUTPUT_DIR must be set')\n\n c_dict['READ_ALL_FILES'] = (\n self.config.getbool('config',\n 'TC_PAIRS_READ_ALL_FILES',\n False)\n )\n\n # get list of models to process\n c_dict['MODEL_LIST'] = getlist(\n self.config.getraw('config', 'MODEL', '')\n )\n # if no models are requested, set list to contain a single string\n # that is the wildcard character '*'\n if not c_dict['MODEL_LIST']:\n c_dict['MODEL_LIST'] = ['*']\n\n self._read_storm_info(c_dict)\n\n c_dict['STORM_NAME_LIST'] = getlist(\n self.config.getraw('config', 'TC_PAIRS_STORM_NAME')\n )\n c_dict['DLAND_FILE'] = self.config.getraw('config',\n 'TC_PAIRS_DLAND_FILE')\n\n c_dict['ADECK_TEMPLATE'] = (\n self.config.getraw('config',\n 'TC_PAIRS_ADECK_TEMPLATE',\n '')\n )\n\n c_dict['BDECK_TEMPLATE'] = (\n self.config.getraw('config',\n 'TC_PAIRS_BDECK_TEMPLATE')\n )\n\n c_dict['EDECK_TEMPLATE'] = (\n self.config.getraw('config',\n 'TC_PAIRS_EDECK_TEMPLATE',\n '')\n )\n\n # read optional -diag argument variables\n self._handle_diag(c_dict)\n\n # handle output template\n output_template = (\n self.config.getraw('config', 'TC_PAIRS_OUTPUT_TEMPLATE')\n )\n # set output name to tc_pairs if not specified\n if not output_template:\n output_template = 'tc_pairs'\n\n c_dict['OUTPUT_TEMPLATE'] = output_template\n\n c_dict['SKIP_REFORMAT'] = (\n self.config.getbool('config',\n 'TC_PAIRS_SKIP_IF_REFORMAT_EXISTS',\n False)\n )\n c_dict['SKIP_OUTPUT'] = (\n self.config.getbool('config',\n 'TC_PAIRS_SKIP_IF_OUTPUT_EXISTS',\n False)\n )\n c_dict['REFORMAT_DECK'] = self.config.getbool('config',\n 'TC_PAIRS_REFORMAT_DECK',\n False)\n c_dict['REFORMAT_DECK_TYPE'] = (\n self.config.getstr('config', 'TC_PAIRS_REFORMAT_TYPE',\n 'SBU')\n )\n c_dict['REFORMAT_DIR'] = self.config.getdir('TC_PAIRS_REFORMAT_DIR',\n '')\n if c_dict['REFORMAT_DECK'] and not c_dict['REFORMAT_DIR']:\n self.log_error('Must set TC_PAIRS_REFORMAT_DIR if '\n 'TC_PAIRS_REFORMAT_DECK is True')\n\n c_dict['GET_ADECK'] = True if c_dict['ADECK_TEMPLATE'] else False\n c_dict['GET_EDECK'] = True if c_dict['EDECK_TEMPLATE'] else False\n\n self.handle_description()\n\n c_dict['SKIP_LEAD_SEQ'] = (\n self.config.getbool('config',\n 'TC_PAIRS_SKIP_LEAD_SEQ',\n False)\n )\n\n # check for settings that cause differences moving from v4.1 to v5.0\n # warn and update run setting to preserve old behavior\n if (self.config.has_option('config', 'LOOP_ORDER') and\n self.config.getstr_nocheck('config', 'LOOP_ORDER') == 'times' and\n not self.config.has_option('config', 'TC_PAIRS_RUN_ONCE')):\n self.logger.warning(\n 'LOOP_ORDER has been deprecated. LOOP_ORDER has been set to '\n '\"times\" and TC_PAIRS_RUN_ONCE is not set. '\n 'Forcing TC_PAIRS_RUN_ONCE=False to preserve behavior prior to '\n 'v5.0.0. Please remove LOOP_ORDER and set '\n 'TC_PAIRS_RUN_ONCE=False to preserve previous behavior and '\n 'remove this warning message.'\n )\n c_dict['RUN_ONCE'] = False\n return c_dict\n\n # only run once if True\n c_dict['RUN_ONCE'] = self.config.getbool('config',\n 'TC_PAIRS_RUN_ONCE',\n True)\n return c_dict", "title": "" }, { "docid": "6e44d308ef3ed132be735fe190a623c7", "score": "0.4600397", "text": "def edit_sb_type_function_json(\n config,\n extra_config,\n app_type,\n app_path,\n):\n\n function_json_path = f\"{app_path}{app_type}/function.json\"\n\n with open(function_json_path, \"r\") as f_func_json:\n function_json = json.loads(f_func_json.read())\n\n if extra_config[\"sb_type\"].lower() == \"topic\":\n config[\"Values\"][\"SERVICE_BUS_TYPE\"] = \"TOPIC\"\n if \"binding\" in app_type:\n del config[\"Values\"][\"SERVICE_BUS_BINDING_QUEUE_NAME\"]\n del function_json[\"bindings\"][0][\"queueName\"]\n else:\n del config[\"Values\"][\"SERVICE_BUS_QUEUE_NAME\"]\n else:\n config[\"Values\"][\"SERVICE_BUS_TYPE\"] = \"QUEUE\"\n if \"binding\" in app_type:\n del function_json[\"bindings\"][0][\"topicName\"]\n del function_json[\"bindings\"][0][\"subscriptionName\"]\n del config[\"Values\"][\"SERVICE_BUS_BINDING_TOPIC_NAME\"]\n del config[\"Values\"][\"SERVICE_BUS_BINDING_SUB_NAME\"]\n else:\n del config[\"Values\"][\"SERVICE_BUS_TOPIC_NAME\"]\n del config[\"Values\"][\"SERVICE_BUS_SUB_NAME\"]\n\n with open(function_json_path, \"w\") as f_func_json:\n f_func_json.write(json.dumps(function_json, indent=JSON_STRING_INDENT))\n\n logging.info(textwrap.indent(\n text=\"CONFIGURED function.json AND local.settings.json FOR CHOSEN SERVICE BUS TYPE!\",\n prefix=' ' * PRINT_LEVEL_INDENT * 8,\n ))\n\n return config", "title": "" }, { "docid": "09a6f7bd70a877cfe1c44419dd1f8f06", "score": "0.45984218", "text": "def extended_set():\n mod = [\n ModuleInstall(\"m2r\", \"pip\",\n purpose=\"M2R converts a markdown file including reStructuredText (rst) markups to a valid rst format.\"),\n ModuleInstall(\"CommonMark\", \"pip\", mname=\"commonmark\",\n purpose=\"Python parser for the CommonMark Markdown spec\"),\n ModuleInstall(\"recommonmark\", \"pip\",\n purpose=\"For pymc3???\"),\n ModuleInstall(\"algopy\", \"pip\", usage=\"OPTIM\",\n purpose=\"ALGOPY: Taylor Arithmetic Computation and Algorithmic Differentiation\"),\n ModuleInstall(\"numdifftools\", \"pip\", usage=\"OPTIM\",\n purpose=\"Solves automatic numerical differentiation problems in one or more variables.\"),\n ModuleInstall(\"numpydoc\", \"pip\",\n purpose=\"Sphinx extension to support docstrings in Numpy format\"),\n ModuleInstall(\"Automat\", \"pip\", mname=\"automat\",\n purpose=\"Self-service finite-state machines for the programmer on the go.\"),\n ModuleInstall(\"guidata\", \"pip\" if not sys.platform.startswith(\"win\") else \"wheel2\",\n purpose=\"Automatic graphical user interfaces generation for easy dataset editing and display\"),\n ModuleInstall(\n \"guiqwt\", \"wheel\", purpose=\"Efficient 2D plotting Python library based on PythonQwt (Spyder)\"),\n ModuleInstall(\n \"QtAwesome\", \"pip\", mname=\"qtawesome\",\n purpose=\"QtAwesome enables iconic fonts such as Font Awesome and Elusive Icons in PyQt and PySide applications.\"),\n ModuleInstall(\n \"natgrid\", \"wheel\", mname=\"mpl_toolkits.natgrid\",\n purpose=\"Python interface to NCAR natgrid library (for matplotlib)\"),\n ModuleInstall(\n \"py\", \"pip\", purpose=\"library with cross-python path, ini-parsing, io, code, log facilities\"),\n ModuleInstall(\"pluggy\", \"pip\",\n purpose=\"plugin and hook calling mechanisms for python\"),\n ModuleInstall(\"atomicwrites\", \"pip\", purpose=\"Atomic file writes.\"),\n ModuleInstall(\"pytest\", \"pip\",\n purpose=\"pytest: simple powerful testing with Python\"),\n ModuleInstall(\"parameterized\", \"pip\",\n purpose=\"parameterized: For everything. Parameterized testing \"\n \"for nose, parameterized testing for py.test, parameterized testing for unittest.\"),\n ModuleInstall(\n \"blist\", \"wheel\",\n purpose=\"a list-like type with better asymptotic performance and similar performance on small lists\"),\n ModuleInstall(\n \"blz\", \"wheel\",\n purpose=\"blz: a compressed data container\"),\n ModuleInstall(\"pamela\", \"pip\",\n purpose=\"An interface to the Pluggable Authentication Modules (PAM) \" +\n \"library on linux, written in pure python (using ctypes)\")\n if not sys.platform.startswith(\"win\") else None,\n ModuleInstall(\"async_generator\", \"pip\", purpose=\"for jupyterhup\"),\n ModuleInstall(\"python_oauth2\", \"pip\", purpose=\"for jupyterhup\"),\n ModuleInstall(\n \"jupyterhub\", \"pip\", purpose=\"JupyterHub: A multi-user server for Jupyter notebooks\", usage=\"JUPYTER\")\n if not sys.platform.startswith(\"win\") else None,\n ModuleInstall('rpy2', 'wheel', purpose=\"interact with R (R_HOME needs to be set up on Linux)\",\n usage=\"DATA/ML\"),\n ModuleInstall('python-pptx', 'pip', mname=\"pptx\",\n purpose=\"read/write PowerPoint presentation\"),\n ModuleInstall(\n 'python-docx', 'pip', mname=\"docx\", purpose=\"read/write Word document\"),\n # ModuleInstall('flasksphinx', 'pip', purpose=\"serves Sphinx\n # documentation through a Flask server\"), # issue with Python 3\n ModuleInstall(\n 'cffi', 'wheel', usage=\"C++\", purpose=\"Foreign Function Interface for Python calling C code.\"),\n ModuleInstall(\n 'datashape', 'pip', purpose=\"A data description language.\"),\n ModuleInstall(\n 'ordereddict', 'pip', purpose=\"Python's collections.OrderedDict\") if sys.version_info[0] == 2 else None,\n ModuleInstall(\n 'cyordereddict', 'wheel', purpose=\"Cython implementation of Python's collections.OrderedDict\"),\n ModuleInstall('dynd', 'wheel',\n purpose=\"DyND-Python, a component of the Blaze project, \" +\n \"is the Python exposure of the DyND dynamic multi-dimensional array library.\")\n if sys.version_info[0] >= 3 else None,\n ModuleInstall(\"mpmath\", \"pip\",\n purpose=\"mpmath is a free (BSD licensed) Python library for real and complex \" +\n \"floating-point arithmetic with arbitrary precision.\"),\n ModuleInstall(\n 'sympy', 'pip', purpose=\"SymPy is a Python library for symbolic mathematics.\"),\n ModuleInstall('gmpy2', 'wheel',\n purpose=\"big real numbers (issue on Linux and Anaconda)\"),\n ModuleInstall('llvmlite', 'wheel',\n purpose=\"lightweight wrapper around basic LLVM functionality, check issue \" +\n \"https://github.com/cmderdev/cmder/issues/490 for missing api-ms-win-crt-runtime-l1-1-0.dll\"),\n ModuleInstall('numba', 'wheel', usage=\"C++\",\n purpose=\"Numba is an Open Source NumPy-aware optimizing compiler for Python sponsored by Continuum Analytics, Inc.\"),\n ModuleInstall('scikit-image', 'wheel', mname='skimage',\n purpose=\"scikit-image is a collection of algorithms for image processing.\"),\n ModuleInstall(\n 'cvxopt', 'wheel', purpose=\"linear, quadratique optimization\", usage=\"OPTIM\"),\n ModuleInstall(\n 'PyWavelets', 'wheel', mname='pywt', purpose=\"wavelets computation\", usage=\"DATA/ML\"),\n ModuleInstall('pyclustering', 'pip',\n purpose=\"many kinds of clustering (Optics, DBScan, x-means, ...)\", usage=\"DATA/ML\"),\n ModuleInstall(\n 'pycosat', 'wheel', purpose=\"PicoSAT is a popular SAT solver written by Armin Biere in pure C.\"),\n ModuleInstall('pyshp', 'pip', mname='shapefile',\n purpose=\"Pure Python read/write support for ESRI Shapefile format\"),\n ModuleInstall('descartes', 'pip',\n purpose=\"Use Shapely or GeoJSON-like geometric objects as matplotlib paths and patches\"),\n ModuleInstall('geopandas', 'pip',\n purpose=\"GeoPandas is an open source project to make working with geospatial data in python easier. \"),\n ModuleInstall(\n 'vispy', 'pip', purpose=\"Vispy is a high-performance interactive 2D/3D data visualization library.\"),\n ModuleInstall(\n 'selenium', 'pip', purpose=\"Python wrapper for Selenium\", usage=\"NETWORK\"),\n ModuleInstall(\n 'splinter', 'pip', purpose=\"browser abstraction for web acceptance testing\", usage=\"NETWORK\"),\n ModuleInstall(\n 'pygame', 'wheel', purpose=\"GUI, interface for games (needs to be installed from www.pygame.org on Linux)\", usage=\"GUI\"),\n ModuleInstall(\n 'Kivy', 'wheel', mname='kivy', usage=\"GUI\",\n purpose=\"GUI, interface for games, mobile (use sudo apt-get install python3-kivy on Linux)\"),\n ModuleInstall('kivy-garden', 'pip', mname='kivy.garden',\n purpose=\"Garden tool for kivy flowers.\", usage=\"GUI\"),\n ModuleInstall(\n 'py4j', 'pip', purpose=\"Enables Python programs to dynamically access arbitrary Java objects\"),\n ModuleInstall(\n 'lockfile', 'pip', purpose=\"Platform-independent file locking module\"),\n ModuleInstall('python-daemon', 'pip', mname=\"daemon\",\n purpose=\"Library to implement a well-behaved Unix daemon process (for luigi)\"),\n ModuleInstall('cached_property', 'pip',\n purpose=\"A decorator for caching properties in classes (for luigi)\"),\n ModuleInstall('luigi', 'pip',\n purpose=\"workflows, data workflows\", usage=\"WORKFLOW\"),\n #\n ModuleInstall('setproctitle', 'wheel',\n purpose=\"A Python module to customize the process title\"),\n # thrift only works only for Python 2.7\n ModuleInstall(\n 'ply', 'pip', purpose=\"Python Lex & Yacc (for thrifty)\"),\n ModuleInstall(\n 'thrift', 'pip', purpose=\"Python bindings for the Apache Thrift RPC system\"),\n ModuleInstall(\n 'thriftpy', 'pip', purpose=\"pure python implemention of Apache Thrift.\"),\n # ModuleInstall('airflow', 'pip'), # does not work on Python 3\n ModuleInstall(\n 'branca', 'pip', purpose=\"\", usage=\"VIZ\"),\n ModuleInstall('folium', 'pip', usage=\"VIZ\",\n purpose=\"This library is a spinoff from folium, that would host the \" +\n \"non-map-specific features. It may become a HTML+JS generation \" +\n \"library in the future.\"),\n ModuleInstall(\n 'osmapi', 'pip', purpose=\"Python wrapper for the OSM API\", usage=\"VIZ\"),\n ModuleInstall('geographiclib', 'pip',\n purpose=\"This implements algorithms for geodesics (Karney, 2013) for solving \" +\n \"the direct and inverse problems for an ellipsoid of revolution.\"),\n ModuleInstall('geopy', 'pip',\n purpose=\"Python Geocoding Toolbox\", usage=\"VIZ\"),\n ModuleInstall('geojson', 'pip',\n purpose=\"Functions for encoding and decoding GeoJSON formatted data\"),\n # Deprecated\n # ModuleInstall('basemap', 'wheel', mname='mpl_toolkits.basemap',\n # purpose=\"maps extension for matplotlib\", usage=\"VIZ\"),\n ModuleInstall(\"python3-linkedin\", \"pip\", mname=\"linkedin\",\n purpose=\"Python Interface to the LinkedIn API\"),\n # access to linkedin\n ModuleInstall(\n \"oauthlib\", \"pip\", purpose=\"A generic, spec-compliant, thorough implementation of the OAuth request-signing logic\"),\n ModuleInstall(\"requests-oauthlib\", \"pip\", mname=\"requests_oauthlib\",\n purpose=\"OAuthlib authentication support for Requests.\"),\n ModuleInstall(\"antlr4-python3-runtime\", \"pip\",\n mname=\"antlr4\", purpose=\"antlr4 runtime, grammar parser\"),\n # ModuleInstall(\"unqlite\", \"pip\"), #\n # key/value store (NoSQL)\n ModuleInstall(\"pycontracts\", \"pip\", mname=\"contracts\", # version=\"1.7.6\",\n purpose=\"PyContracts is a Python package that allows to declare constraints on function parameters \" +\n \"and return values, setup for version 1.7.7 is bugged\"),\n #\n ModuleInstall(\n \"ecdsa\", \"pip\", purpose=\"ECDSA cryptographic signature library (pure python)\"),\n ModuleInstall(\"winrandom\", \"wheel\",\n purpose=\"This module gives direct access to Windows Cryptographic API CryptGetRandom() function, \" +\n \"which is cryptographically strong pseudo-random number generator (PRNG) on Windows:\"),\n ModuleInstall(\"pycrypto\", \"wheel2\", mname=\"Crypto\",\n purpose=\"Cryptographic modules for Python (not available on x64 and Python 3)\"),\n ModuleInstall(\"pycryptodomex\", \"pip\", mname=\"Cryptodome\",\n purpose=\"Cryptographic modules for Python (not available on x64 and Python 3)\"),\n ModuleInstall(\"xxhash\", \"wheel\",\n purpose=\"xxHash is an Extremely fast Hash algorithm, running at RAM speed limits.\"),\n ModuleInstall(\"cryptography\", \"pip\",\n purpose=\"cryptography is a package which provides cryptographic recipes and primitives to Python developers.\"),\n ModuleInstall(\n \"pyasn1\", \"pip\", purpose=\"ASN.1 types and codecs (for pysnmp)\"),\n ModuleInstall(\"asn1crypto\", \"pip\",\n purpose=\"Fast ASN.1 parser and serializer with definitions for private keys, \" +\n \"public keys, certificates, CRL, OCSP, CMS, PKCS#3, PKCS#7, PKCS#8, PKCS#12, PKCS#5, X.509 and TSP\"),\n ModuleInstall(\"PyNaCl\", \"pip\", mname=\"nacl\",\n purpose=\"Python binding to the Networking and Cryptography (NaCl) library\", usage=\"NETWORK\"),\n ModuleInstall(\"bcrypt\", \"pip\",\n purpose=\"Modern password hashing for your software and your servers\"),\n ModuleInstall(\"paramiko\", \"pip\",\n purpose=\"SSH2 protocol library\", usage=\"NETWORK\"),\n #\n #\n # 2015-02-05\n #\n ModuleInstall(\"autopy3\", \"wheel\",\n purpose=\"A simple, cross-platform GUI automation toolkit for Python 3 \" +\n \"(issue on Linux and Anaconda)\") if sys.version_info[0] >= 3 else None, # simulate events\n # large double\n ModuleInstall(\"bigfloat\", \"wheel\",\n purpose=\"big float (issue with Linux and Anaconda)\"),\n # convex optimization, depends on CVXOPT\n ModuleInstall(\n \"scs\", \"wheel\", purpose=\"Solves convex cone programs via operator splitting.\", usage=\"OPTIM\"),\n ModuleInstall(\n \"ecos\", \"wheel\", purpose=\"ECOS is a numerical software for solving convex second-order cone programs (SOCPs)\", usage=\"OPTIM\"),\n ModuleInstall(\n \"CVXcanon\", \"wheel\", purpose=\"A low-level library to perform the matrix building step in cvxpy, \" +\n \"a convex optimization modeling software.\", usage=\"OPTIM\") if sys.version_info[:2] >= (3, 5) else None,\n ModuleInstall(\n \"fastcache\", \"wheel\", purpose=\"C implementation of Python 3 lru_cache for Python.\"),\n ModuleInstall(\n \"multiprocess\", \"wheel\", purpose=\"better multiprocessing and multithreading in python\"),\n ModuleInstall(\"osqp\", \"wheel\",\n purpose=\"The OSQP (Operator Splitting Quadratic Program) solver is a numerical optimization package for solving \" +\n \"quadratic problems\") if sys.version_info[:2] >= (3, 5) else None,\n ModuleInstall(\"cvxpy\", \"pip\", usage=\"OPTIM\",\n purpose=\"linear, quadratic optimization, depends on cvxopt\") if sys.version_info[:2] >= (3, 5) else None,\n # to install packages with conda\n ModuleInstall(\"libLAS\", \"wheel\", mname=\"liblas\",\n purpose=\"libLAS is a C/C++ library for reading and writing the very common LAS LiDAR format.\"),\n ModuleInstall(\n \"liblinear\", \"wheel\", purpose=\"A Library for Large Linear Classification\"),\n ModuleInstall(\"marisa_trie\", \"wheel\",\n purpose=\"Static memory-efficient & fast Trie-like structures for Python (based on marisa-trie C++ library)\"),\n ModuleInstall(\n \"mlpy\", \"wheel\", purpose=\"mlpy is a Python module for Machine Learning built on top of NumPy/SciPy, has wavelets\"),\n ModuleInstall(\n \"pygit2\", \"wheel\", purpose=\"Pygit2 is a set of Python bindings to the libgit2 shared library, \" +\n \"libgit2 implements the core of Git.\"),\n ModuleInstall(\n \"pymongo\", \"wheel\", purpose=\"Python wrapper for MongoDB\", usage=\"NoSQL\"),\n ModuleInstall(\"psycopg2\", \"wheel\",\n purpose=\"Python-PostgreSQL Database Adapter\"),\n ModuleInstall(\n \"PyOpenGL\", \"wheel\", mname=\"OpenGL\", purpose=\"use OpenGL in Python\"),\n ModuleInstall(\n \"PyOpenGL_accelerate\", \"wheel\", mname=\"OpenGL_accelerate\", purpose=\"Acceleration code for PyOpenGL\"),\n ModuleInstall('pymc', 'wheel', web=\"https://github.com/pymc-devs/pymc\",\n purpose=\"Monte Carlo computation\", usage=\"DATA/ML\") if sys.version_info[0] >= 3 else None,\n ModuleInstall('autograd', 'pip',\n purpose=\"Efficiently computes derivatives of numpy code.\"),\n # The following package rely on theano (deprecated).\n # ModuleInstall('pymc3', 'github', \"pymc-devs\", web=\"https://github.com/pymc-devs/pymc3\",\n # purpose=\"Monte Carlo computation (Python 3 only)\", usage=\"DATA/ML\") if sys.version_info[0] >= 3 else None,\n # ModuleInstall('pysterior', 'pip',\n # purpose=\"pysterior is a machine learning library for Python which aims to make Bayesian parametric regression and \" +\n # \"classification models accessible and easy to use. The library allows users to construct \" +\n # \"supervised learning models using an intuitive interface similar to that used by scikit-learn.\",\n # usage=\"DATA/ML\") if sys.version_info[0] >= 3 else None,\n ModuleInstall(\n \"pyqtgraph\", \"pip\", purpose=\"Scientific Graphics and GUI Library for Python, depends on PySide\", usage=\"GUI\"),\n ModuleInstall(\"deap\", \"pip\", purpose=\"deep learning\"),\n # for gensim and distributed\n ModuleInstall(\"jmespath\", \"pip\", purpose=\"JSON Matching Expressions\"),\n # for gensim\n ModuleInstall(\"bz2file\", \"pip\", purpose=\"process bz2 files\"),\n # for gensim\n ModuleInstall(\"smart_open\", \"pip\",\n purpose=\"Utils for streaming large files (S3, HDFS, gzip, bz2...), provides the same API for many format\"),\n ModuleInstall(\"httpretty\", \"pip\",\n purpose=\"HTTP client mock for Python\"),\n ModuleInstall(\"gensim\", \"wheel\", purpose=\"genetic algorithm\"),\n # ModuleInstall(\"pybrain\", \"pip\"), # some issues with the code\n # (relative import are not well handled in version 0.3.3\n ModuleInstall(\"h5py\", \"wheel\", usage=\"DATA/ML\",\n purpose=\"The h5py package is a Pythonic interface to the HDF5 binary data format. \" +\n \"Trillion-Particle Simulation.\"),\n ModuleInstall(\"Keras-Applications\", \"pip\", mname=\"keras_applications\", usage=\"DATA/ML\",\n purpose=\"Keras Applications is the applications module of the Keras deep learning \" +\n \"library. It provides model definitions and pre-trained weights for a number of \" +\n \"popular archictures, such as VGG16, ResNet50, Xception, MobileNet, and more.\"),\n ModuleInstall(\"Keras-Preprocessing\", \"pip\", mname=\"keras_processing\", usage=\"DATA/ML\",\n purpose=\"Keras Preprocessing is the data preprocessing and data augmentation module \" +\n \"of the Keras deep learning library. It provides utilities for working with image \" +\n \"data, text data, and sequence data.\"),\n ModuleInstall(\"keras\", \"pip\", purpose=\"deep learning\",\n usage=\"DATA/ML\"),\n ModuleInstall(\"keras-vis\", \"pip\", mname=\"vis\", usage=\"DATA/ML\",\n purpose=\"keras-vis is a high-level toolkit for visualizing and debugging your trained keras neural net models.\"),\n # Bayesian\n ModuleInstall(\n \"bayespy\", \"pip\", purpose=\"bayesian modelling and computation\", usage=\"DATA/ML\"),\n ModuleInstall(\n \"numexpr\", \"wheel\", purpose=\"Fast numerical array expression evaluator for Python, NumPy, PyTables, pandas and more.\"),\n #\n ModuleInstall(\"glue-core\", \"pip\", mname=\"glue.core\",\n purpose=\"Multidimensional data visualzation across files\"),\n ModuleInstall(\"glue-vispy-viewers\", \"pip\", mname=\"glue_vispy_viewers\",\n purpose=\"Vispy-based viewers for Glue\"),\n ModuleInstall(\"glueviz\", \"pip\", mname=\"glue\",\n purpose=\"ploting, Multidimensional data visualzation across files\", usage=\"DATA/ML\"),\n #\n ModuleInstall(\"pandas-highcharts\", \"pip\", mname=\"pandas_highcharts\",\n purpose=\"plotting in javascript and pandas\", usage=\"VIZ\"),\n #\n ModuleInstall(\n \"heapdict\", \"pip\",\n purpose=\"a heap with decrease-key and increase-key operations\"),\n ModuleInstall(\n \"chest\", \"pip\",\n purpose=\"Simple on-disk dictionary\"),\n ModuleInstall(\n \"locket\", \"pip\",\n purpose=\"File-based locks for Python for Linux and Windows\"),\n ModuleInstall(\n \"partd\", \"pip\",\n purpose=\"Appendable key-value storage\"),\n ModuleInstall(\n \"dill\", \"pip\", purpose=\"serialize all of python (almost), Dill extends python's pickle module for serializing \" +\n \"and de-serializing python objects to the majority of the built-in python types.\"), # for dask\n ModuleInstall(\"cloudpickle\", \"pip\",\n purpose=\"Extended pickling support for Python objects\") if sys.version_info[:2] >= (3, 5) else None,\n # parallel computation\n ModuleInstall(\n \"dask\", \"pip\", purpose=\"parallization of operations with dataframe\", usage=\"DATA/ML\"),\n ModuleInstall(\n \"scoop\", \"pip\", purpose=\"SCOOP (Scalable COncurrent Operations in Python) \" +\n \"is a distributed task module allowing concurrent parallel programming on various environments, \" +\n \"from heterogeneous grids to supercomputers\", usage=\"DATA/ML\"),\n #\n ModuleInstall(\n \"docopt\", \"pip\", purpose=\"Pythonic argument parser, that will make you smile\"),\n ModuleInstall(\"pycurl\", \"wheel\",\n purpose=\"PycURL, a interface to the libcurl library. (for grab)\"),\n ModuleInstall(\"markdown2\", \"pip\", purpose=\"markdown parser\"),\n ModuleInstall(\n \"structures\", \"pip\", purpose=\"User-friendly library for creating data structures.\"),\n ModuleInstall(\n \"tzlocal\", \"pip\", purpose=\"tzinfo object for the local timezone\"),\n ModuleInstall(\n \"funcsigs\", \"pip\", purpose=\"Python function signatures from PEP362\"),\n ModuleInstall(\n \"apscheduler\", \"pip\", purpose=\"to schedule the execution of jobs, tasks\"),\n #\n # ModuleInstall(\"pdfminer\", \"pip\"), # PDF extraction (no python 3 version)\n # ModuleInstall(\"minecart\", \"pip\"), # PDF extraction (no python 3 version)\n #\n # ModuleInstall(\"pygauss\", \"pip\"), # molecule, bio-informatic,\n # requires PIL which is deprecated\n #\n # July 2015\n #\n ModuleInstall(\n \"pyexecjs\", \"pip\", mname=\"execjs\", purpose=\"Run JavaScript code from Python (for pyreact)\", usage=\"NETWORK\"),\n ModuleInstall(\"pyreact\", \"pip\", mname=\"react\",\n purpose=\"Python bridge to JSX & the React JavaScript library. (for pyxley)\", usage=\"NETWORK\"),\n ModuleInstall(\n \"pyxley\", \"pip\", purpose=\"A pure-Python SNMPv1/v2c/v3 library\", usage=\"NETWORK\"),\n\n #\n # 2015-08\n #\n ModuleInstall(\n \"pysmi\", \"pip\", purpose=\"SNMP SMI/MIB Parser (for pysnmp)\"),\n ModuleInstall(\n \"pysnmp\", \"pip\", purpose=\"A pure-Python SNMPv1/v2c/v3 library\", usage=\"NETWORK\"),\n # pyinstaller does not install properly on Windows\n # ModuleInstall(\n # \"pyinstaller\", \"pip\", purpose=\"Converts (packages) Python programs into stand-alone\n # executables, under Windows, Linux, Mac OS X, AIX and Solaris.\"),\n ModuleInstall(\n \"imageio\", \"pip\", purpose=\"Library for reading and writing a wide range of image, video, \" +\n \"scientific, and volumetric data formats (for moviepy)\", usage=\"VIDEO\"),\n ModuleInstall(\n \"cairocffi\", \"wheel2\", purpose=\"cairocffi is a CFFI-based drop-in replacement for Pycairo, \" +\n \"a set of Python bindings and object-oriented API for cairo\"),\n ModuleInstall(\"tinycss2\", \"pip\", purpose=\"tinycss2 is a complete yet simple CSS parser for Python. \" +\n \"It supports the full syntax and error handling for CSS 2.1 as well as some CSS 3 modules\"),\n ModuleInstall(\"cssselect2\", \"pip\",\n purpose=\"CSS selectors for Python ElementTree\"),\n ModuleInstall(\n \"cairosvg\", \"pip\", purpose=\"Convert your SVG files to PDF and PNG.\"),\n ModuleInstall(\n \"gizeh\", \"pip\", purpose=\"Simple Vector Graphics for Python\"),\n ModuleInstall(\"imageio-ffmpeg\", \"pip\", mname=\"imageio_ffmpeg\",\n purpose=\"ffmpeg\", usage=\"VIDEO\"),\n ModuleInstall(\n \"proglog\", \"pip\", purpose=\"Progress bar.\"),\n ModuleInstall(\n \"moviepy\", \"pip\", purpose=\"Video editing with Python\", usage=\"VIDEO\"),\n ModuleInstall(\n \"xgboost\", \"wheel\", purpose=\"Parallelized Stochastic Gradient Descent (only available on \" +\n \"Python 3 and x64)\", usage=\"DATA/ML\") if sys.version_info[0] >= 3 and is_64bit() else None,\n ModuleInstall(\n \"catboost\", \"pip\", purpose=\"CatBoost is a machine learning method based on gradient boosting \" +\n \"over decision trees.\", usage=\"DATA/ML\") if sys.version_info[0] >= 3 and is_64bit() else None,\n ModuleInstall(\n \"lightgbm\", \"pip\", purpose=\"Parallelized Stochastic Gradient Descent (only available on \" +\n \"Python 3 and x64)\", usage=\"DATA/ML\") if sys.version_info[0] >= 3 and is_64bit() else None,\n ModuleInstall(\"pygling\", \"pip\",\n purpose=\"to build makefile with python\") if sys.version_info[0] == 2 else None,\n ModuleInstall(\"cuda4py\", \"pip\",\n purpose=\"Python cffi CUDA bindings and helper classes\"),\n ModuleInstall(\"whoosh\", \"pip\", purpose=\"search engine in Python\"),\n ModuleInstall(\"pymatbridge\", \"pip\",\n purpose=\"pymatbridge is a set of python and matlab functions to allow these two systems to talk to each other\"),\n ModuleInstall(\"scilab2py\", \"pip\",\n purpose=\"Python to Scilab bridge\", usage=\"DATA/ML\"),\n # ModuleInstall(\"scilab_kernel\", \"pip\",\n # purpose=\"A Scilab kernel for IPython\", usage=\"JUPYTER\"),\n # does not work\n ModuleInstall(\"pymssql\", \"wheel\", usage=\"SQL\",\n purpose=\"A simple database interface for Python that builds on top of FreeTDS \" +\n \"to provide a Python DB-API (PEP-249) interface to Microsoft SQL Server.\"),\n\n ModuleInstall(\"PyMySQL\", \"pip\", mname=\"pymysql\",\n purpose=\"Pure-Python MySQL Driver\", usage=\"SQL\"),\n ModuleInstall(\"mysqlclient\", \"wheel\", mname=\"MySQLdb\",\n purpose=\"MySQL driver written in Python which does not depend on MySQL C client libraries and \" +\n \"implements the DB API v2.0 specification (PEP-249).\", usage=\"SQL\"),\n ModuleInstall(\"memory-profiler\", \"pip\", mname=\"memory_profiler\",\n purpose=\"A module for monitoring memory usage of a python program\", usage=\"PROFILING\"),\n ModuleInstall(\"pyinstrument_cext\", \"pip\", usage=\"PROFILING\",\n purpose=\"Pyinstrument's C extensions - reducing the overhead of statistical profilers\"),\n ModuleInstall(\"pyinstrument\", \"pip\", usage=\"PROFILING\",\n purpose=\"A Python profiler that records the call stack of the executing code, \" +\n \"instead of just the final function in it.\"),\n ModuleInstall(\"gprof2dot\", \"pip\", usage=\"PROFILING\",\n purpose=\"This is a Python script to convert the output from many profilers into a dot graph.\"),\n ModuleInstall(\"vprof\", \"pip\", usage=\"PROFILING\",\n purpose=\"vprof is a Python package providing rich and interactive visualizations for various Python \" +\n \"program characteristics such as running time and memory usage.\"),\n ModuleInstall(\"snakeviz\", \"pip\",\n purpose=\"SnakeViz is a browser based graphical viewer for the output of Python’s cProfile module.\", usage=\"PROFILING\"),\n ModuleInstall(\"httplib2\", \"pip\",\n purpose=\"A comprehensive HTTP client library.\"),\n ModuleInstall(\"rsa\", \"pip\",\n purpose=\"Pure-Python RSA implementation\"),\n ModuleInstall(\"oauth2client\", \"pip\",\n purpose=\"The oauth2client is a client library for OAuth 2.0.\"),\n ModuleInstall(\"uritemplate\", \"pip\",\n purpose=\"URI templates\"),\n ModuleInstall(\"jeepney\", \"pip\",\n purpose=\"This is a low-level, pure Python DBus protocol client. It has an I/O-free core, and \" + \\\n \"integration modules for different event loops.\"),\n ModuleInstall(\"secretstorage\", \"pip\",\n purpose=\"This module provides a way for securely storing passwords and other secrets.\"),\n ModuleInstall(\"keyring\", \"pip\",\n purpose=\"Store and access your passwords safely.\"),\n # ModuleInstall(\"keyrings.alt\", \"pip\", mname=\"keyrings.alt\",\n # purpose=\"Alternate keyring implementations\"),\n ModuleInstall(\"pyotp\", \"pip\",\n purpose=\"PyOTP is a Python library for generating and verifying one-time passwords.\"),\n ModuleInstall(\"param\", \"pip\",\n purpose=\"Declarative Python programming using Parameters.\"),\n ModuleInstall(\"pyviz_comms\", \"pip\", usage=\"VIZ\",\n purpose=\"Bidirectional communication for PyViz.\"),\n ModuleInstall(\"holoviews\", \"pip\", usage=\"VIZ\",\n purpose=\"Composable, declarative data structures for building complex visualizations easily.\"),\n ModuleInstall(\"geoviews\", \"pip\", usage=\"VIZ\",\n purpose=\"Composable, declarative data structures for building complex visualizations easily.\"),\n ModuleInstall(\"retrying\", \"pip\", purpose=\"Retrying is an Apache 2.0 licensed general-purpose retrying library, \" +\n \"written in Python, to simplify the task of adding retry behavior to just about anything.\"),\n ModuleInstall(\"plotly\", \"pip\", usage=\"VIZ\",\n purpose=\"Plotly's Python graphing library makes interactive, publication-quality graphs online. \" +\n \"Examples of how to make line plots, \" +\n \"scatter plots, area charts, bar charts, error bars, box plots, histograms, heatmaps, subplots, multiple-axes, \" +\n \"polar charts and bubble charts.\"),\n ModuleInstall(\"colorlover\", \"pip\", usage=\"VIZ\",\n purpose=\"Color scales for IPython notebook\"),\n ModuleInstall(\"TA_Lib\", \"wheel\", mname=\"talib\",\n purpose=\"This is a Python wrapper for TA-LIB based on Cython instead of SWIG.\"),\n ModuleInstall(\"cufflinks\", \"pip\", usage=\"VIZ\",\n purpose=\"Productivity Tools for Plotly + Pandas\"),\n ModuleInstall(\"lightning-python\", \"pip\", mname=\"lightning\", usage=\"VIZ\",\n purpose=\"Python client for the lightning API\"),\n ModuleInstall(\"passlib\", \"pip\",\n purpose=\"comprehensive password hashing framework supporting over 30 schemes\"),\n ModuleInstall(\"plac\", \"pip\",\n purpose=\"The smartest command line arguments parser in the world\"),\n ModuleInstall(\"pyOpenSSL\", \"pip\", mname=\"OpenSSL\",\n purpose=\"Python wrapper module around the OpenSSL library\"),\n ModuleInstall(\"w3lib\", \"pip\",\n purpose=\"Library of web-related functions\"),\n # ModuleInstall('python-cloudfiles-hubic', 'github', \"Gu1\", mname=\"cloudfiles\",\n # web=\"https://github.com/Gu1/python-cloudfiles-hubic\",\n # purpose=\"access to Hubic\"),\n # ModuleInstall('onedrive-sdk-python', 'github', \"OneDrive\", mname=\"onedrivesdk\",\n # web=\"https://github.com/Gu1/python-cloudfiles-hubic/\",\n # purpose=\"access to OneDrive\"),\n # ModuleInstall('rlpy', 'pip', usage=\"DATA/ML\",\n # purpose=\"RLPy is a framework to conduct sequential decision making\n # experiments. The current focus of this project lies on\n # value-function-based reinforcement learning, specifically using\n # linear function approximators (only Python 2.7).\"),\n ModuleInstall('wordcloud', 'wheel', usage=\"VIZ\",\n purpose=\"A little word cloud generator in Python.\"),\n ModuleInstall('pytagcloud', 'pip',\n purpose=\"Create beautiful tag clouds as images or HTML\"),\n\n # distributed\n ModuleInstall('tblib', 'pip',\n purpose=\"Traceback fiddling library. For now allows you to pickle tracebacks and raise exceptions with pickled \" +\n \"tracebacks in different processes. This allows better error handling when running code over \" +\n \"multiple processes (imagine multiprocessing, billiard, futures, celery etc)\"),\n ModuleInstall(\"zict\", \"pip\",\n purpose=\"The dictionary / mutable mapping interface is powerful and multi-faceted.\"),\n\n # pdf\n ModuleInstall(\"pyPdf\", \"github\", \"sdpython\", usage=\"PDF\", pipgit=True,\n branch=\"trunk\", purpose=\"read PDF\"),\n ModuleInstall(\"pdfrw\", \"pip\", usage=\"PDF\",\n purpose=\"PDF file reader/writer library\"),\n # 2016-05\n ModuleInstall(\"pydub\", \"pip\", usage=\"MUSIC\",\n purpose=\"Pydub lets you do stuff to audio in a way that isn't stupid.\"),\n ModuleInstall(\"cobble\", \"pip\", purpose=\"Cobble is a Python library that allows easy creation of data objects, \" +\n \"including implementations of common methods such as __eq__ and __repr__.\"),\n ModuleInstall(\"parsimonious\", \"pip\",\n purpose=\"(Soon to be) the fastest pure-Python PEG parser I could muster\"),\n ModuleInstall(\n \"mammoth\", \"pip\", purpose=\"Convert Word documents from docx to simple and clean HTML and Markdown\"),\n #\n # 2016-06\n #\n # ModuleInstall(\"ipython-sql\", \"pip\", purpose=\"RDBMS access via IPython\", usgae=\"JUPYTER\"),\n ModuleInstall(\"julia\", \"pip\",\n purpose=\"Julia/Python bridge with IPython support\", usage=\"DATA/ML\"),\n ModuleInstall(\"octave_kernel\", \"pip\",\n purpose=\"A Jupyter kernel for Octave.\"),\n ModuleInstall(\"oct2py\", \"pip\",\n purpose=\"Python to GNU Octave bridge --> run m-files from python.\", usage=\"DATA/ML\"),\n ModuleInstall(\"pg8000\", \"pip\",\n purpose=\"A Pure-Python PostgreSQL\", usage=\"SQL\"),\n ModuleInstall(\"PyMeta3\", \"pip\", mname=\"pymeta\",\n purpose=\"Pattern-matching language based on OMeta for Python 3 and 2\"),\n # ModuleInstall(\"ViTables\", \"pip\", mname=\"vitables\",\n # purpose=\"A viewer for PyTables package\"),\n ModuleInstall(\"pybars3\", \"pip\", mname=\"pybar\",\n purpose=\"Handlebars.js templating\"),\n ModuleInstall(\"db.py\", \"pip\", mname=\"db.tables\",\n purpose=\"db.py is an easier way to interact with your databases. It makes it easier \" +\n \"to explore tables, columns, views, etc. \" +\n \"It puts the emphasis on user interaction, information display, and providing easy to use helper functions.\"),\n ModuleInstall(\"clyent\", \"pip\",\n purpose=\"Command line client Library for windows and posix\"),\n ModuleInstall(\"chalmers\", \"pip\",\n purpose=\"Chalmers is an application that allows its users to monitor and control a number of processes on any \" +\n \"operating system (Posix and Win32 included)\"),\n ModuleInstall(\"pyct\", \"pip\", purpose=\"A utility package\"),\n ModuleInstall(\"colorcet\", \"pip\",\n purpose=\"colorcet is a collection of perceptually uniform colormaps for use with Python plotting programs \" +\n \"like bokeh, matplotlib, holoviews, and datashader.\"),\n ModuleInstall(\"datashader\", \"pip\", usage=\"VIZ\",\n purpose=\"Datashader is a graphics pipeline system for creating meaningful representations \" +\n \"of large amounts of data.\") if sys.version_info[:2] >= (3, 5) else None,\n ModuleInstall(\"dnspython\", \"pip\", usage=\"WEB\",\n purpose=\"dnspython is a DNS toolkit for Python. It supports almost all record types. It can be used for queries, \" +\n \"zone transfers, and dynamic updates. It supports TSIG authenticated messages and EDNS0.\"),\n ModuleInstall(\"grin\", \"pip\", usage=\"CLI\",\n purpose=\"A grep program configured the way I like it.\"),\n ModuleInstall(\"ldap3\", \"pip\", usage=\"WEB\",\n purpose=\"ldap3 is a strictly RFC 4510 conforming LDAP V3 pure Python client library.\"),\n ModuleInstall(\"mpi4py\", \"wheel\",\n purpose=\"MPI for Python\"),\n ModuleInstall(\"mss\", \"pip\", mname=\"mms\",\n purpose=\"An ultra fast cross-platform multiple screenshots module in pure python using ctypes\"),\n #\n # June 2016\n #\n ModuleInstall(\"pyglet\", \"pip\", usage=\"GUI\",\n purpose=\"a cross-platform windowing and multimedia library for Python\"),\n ModuleInstall(\"geoplotlib\", \"pip\", usage=\"VIZ\",\n purpose=\"geoplotlib is a python toolbox for visualizing geographical data and making maps\"),\n ModuleInstall(\"leather\", \"pip\", usage=\"VIZ\",\n purpose=\"Leather is the Python charting library for those who need charts now and don’t care if they’re perfect.\"),\n ModuleInstall(\"pythreejs\", \"pip\", usage=\"VIZ\",\n purpose=\"A Python / ThreeJS bridge utilizing the Jupyter widget infrastructure.\"),\n ModuleInstall(\"missingno\", \"pip\", usage=\"VIZ\",\n purpose=\"Messy datasets? Missing values? missingno provides a small toolset of flexible and easy-to-use \" +\n \"missing data visualizations and utilities that allows you to get a quick visual summary of the completeness \" +\n \"(or lack thereof) of your dataset.\"),\n ModuleInstall(\"vega\", \"pip\", usage=\"VIZ\",\n purpose=\"Python/Jupyter notebook module for Vega, and Vega-Lite, Polestar, and Voyager. Notebooks \" +\n \"with embedded visualizations can be viewed on github and nbviewer.\"),\n ModuleInstall(\"pydy\", \"pip\",\n purpose=\"Multibody Dynamics with Python\"),\n ModuleInstall(\"apache-libcloud\", \"pip\", mname=\"libcloud\",\n purpose=\"A standard Python library that abstracts away differences among multiple cloud provider APIs.\"),\n ModuleInstall(\"click-plugins\", \"pip\", mname=\"click_plugins\",\n purpose=\"An extension module for click to enable registering CLI commands via setuptools entry-points.\"),\n ModuleInstall(\"munch\", \"pip\",\n purpose=\"A dot-accessible dictionary (a la JavaScript objects).\"),\n ModuleInstall(\"cligj\", \"pip\",\n purpose=\"Click params for commmand line interfaces to GeoJSON\"),\n ModuleInstall(\"brythonmagic\", \"pip\",\n purpose=\"Magics to use brython in Jupyter notebook.\"),\n ModuleInstall(\n \"asteval\", \"pip\", purpose=\"Safe, minimalistic evaluator of python expression using ast module\"),\n ModuleInstall(\n \"uncertainties\", \"pip\", purpose=\"uncertainties allows calculations such as (2 +/- 0.1)*2 = 4 \" +\n \"+/- 0.2 to be performed transparently.\"),\n ModuleInstall(\n \"lmfit\", \"pip\", purpose=\"Least-Squares Minimization with Bounds and Constraints\", usage=\"OPTIM\"),\n #\n # August, September, October, December 2016\n #\n ModuleInstall('QInfer', 'pip', mname=\"qinfer\", usage=\"DATA/ML\",\n purpose=\"QInfer is a library using Bayesian sequential Monte Carlo for quantum parameter estimation.\"),\n ModuleInstall('pscript', 'pip', purpose=\"for flexx\"),\n ModuleInstall('webruntime', 'pip', purpose=\"for flexx\"),\n ModuleInstall('dialite', 'pip', purpose=\"for flexx\"),\n ModuleInstall('flexx', 'pip', usage=\"GUI\",\n purpose=\"Flexx is a pure Python toolkit for creating graphical user interfaces (GUI's), \" +\n \"that uses web technology for its rendering. Apps are written purely in Python; Flexx' \" +\n \"transpiler generates the necessary JavaScript on the fly.\"),\n ModuleInstall('pypng', 'pip',\n purpose=\"Pure Python PNG image encoder/decoder\"),\n ModuleInstall('colormath', 'pip',\n purpose=\"Color math and conversion library.\"),\n ModuleInstall('arrow', 'pip',\n purpose=\"Better dates and times for Python\"),\n ModuleInstall('custom_inherit', 'pip',\n purpose=\"A Python package that provides tools for inheriting docstrings in customizable ways.\"),\n ModuleInstall('toyplot', 'pip', usage=\"VIZ\",\n purpose=\"The kid-sized plotting toolkit for Python with grownup-sized goals.\"),\n ModuleInstall('images2gif', 'pip',\n purpose=\"Create a GIF from a list of images.\"),\n ModuleInstall('hypothesis', 'pip',\n purpose=\"Hypothesis is an advanced testing library for Python. It lets you write tests which are parametrized \" +\n \"by a source of examples, and then generates simple and comprehensible examples that make your tests fail. \" +\n \"This lets you find more bugs in your code with less work.\"),\n ModuleInstall('monotonic', 'pip',\n purpose=\"This module provides a monotonic() function which returns the value (in fractional seconds) \" +\n \"of a clock which never goes backwards. \" +\n \"It is compatible with Python 2 and Python 3.\"),\n ModuleInstall('fasteners', 'pip',\n purpose=\"A python package that provides useful locks.\"),\n ModuleInstall('asciitree', 'pip',\n purpose=\"Draws ASCII trees.\"),\n ModuleInstall('numcodecs', 'wheel',\n purpose=\"Numcodecs is a Python package providing buffer compression and transformation codecs for use \" +\n \"in data storage and communication applications.\"),\n ModuleInstall('zarr', 'pip',\n purpose=\"A minimal implementation of chunked, compressed, N-dimensional arrays for Python.\"),\n ModuleInstall('rx', 'pip',\n purpose=\"Reactive Extensions (Rx) for Python (LINQ)\"),\n ModuleInstall('PySocks', 'pip', mname=\"socks\",\n purpose=\"A semi-actively maintained SocksiPy fork. Contains many improvements to the original.\"),\n ModuleInstall('pympler', 'pip',\n purpose=\"Pympler is a development tool to measure, monitor and analyze the memory behavior of Python \" +\n \"objects in a running Python application.\"),\n ModuleInstall('fbpca', 'pip',\n purpose=\"Functions for principal component analysis (PCA) and accuracy checks.\"),\n ModuleInstall('prince', 'pip',\n purpose=\"Prince is a factor analysis library for datasets that fit in memory.\"),\n ModuleInstall('pivottablejs', 'pip',\n purpose=\"PivotTable.js integration for Jupyter/IPython Notebook.\"),\n #\n # January 2017\n #\n ModuleInstall('pystan', 'pip', usage=\"DATA/ML\",\n purpose=\"PyStan provides an interface to Stan, a package for Bayesian inference using \" +\n \"the No-U-Turn sampler, a variant of Hamiltonian Monte Carlo.\"),\n ModuleInstall('ephem', 'pip', purpose=\"for fbprophet\"),\n ModuleInstall('convertdate', 'pip', purpose=\"for fbprophet\"),\n ModuleInstall('holidays', 'pip', purpose=\"for fbprophet\"),\n ModuleInstall('lunardate', 'pip', purpose=\"for fbprophet\"),\n ModuleInstall('fbprophet', 'pip', usage=\"DATA/ML\",\n purpose=\"Prophet is a procedure for forecasting time series data. It is based on an additive \" +\n \"model where non-linear trends are fit with yearly and weekly seasonality, plus holidays. \" +\n \"It works best with daily periodicity data with at least one year of historical data. \" +\n \"Prophet is robust to missing data, shifts in the trend, and large outliers.\"),\n ModuleInstall('wikipedia', 'pip',\n purpose=\"Wikipedia API for Python\"),\n ModuleInstall('validate_email', 'pip',\n purpose=\"Validate_email verify if an email address is valid and really exists.\"),\n ModuleInstall('simhash', 'pip',\n purpose=\"A Python implementation of Simhash Algorithm\"),\n ModuleInstall('wptools', 'pip',\n purpose=\"Wikipedia tools (for Humans)\"),\n #\n # June 2017\n #\n ModuleInstall('pytest_runner', 'pip',\n purpose=\"Setup scripts can use pytest-runner to add setup.py test support for pytest runner.\"),\n ModuleInstall('fastparquet', 'wheel',\n purpose=\"fastparquet is a python implementation of the parquet format, aiming integrate \" +\n \"into python-based big data work-flows.\"),\n ModuleInstall('citeproc-py', 'pip', mname=\"citeproc_py\",\n purpose=\"citeproc-py is a CSL processor for Python. It aims to implement the CSL 1.0.1 specification. \" +\n \"citeproc-py can output styled citations and bibliographies in a number of different output formats. \" +\n \"Currently supported are plain text, reStructuredText and HTML. Other formats can be added easily.\"),\n ModuleInstall('duecredit', 'pip',\n purpose=\"Publications (and donations) tracer\"),\n #\n # September 2017\n #\n ModuleInstall('Brotli', 'wheel', mname=\"brotli\",\n purpose=\"Brotli is a generic-purpose lossless compression algorithm that compresses data using a \" +\n \"combination of a modern variant of the LZ77 algorithm, Huffman coding and 2nd order context modeling, \" +\n \"with a compression ratio comparable to the best currently available general-purpose compression methods. \" +\n \"It is similar in speed with deflate but offers more dense compression.\"),\n ModuleInstall('fast-histogram', 'wheel2', mname=\"fast_histogram\",\n purpose=\"Mizani is a scales package for graphics. It is written in Python and is \" +\n \"based on Hadley Wickham's Scales.\"),\n ModuleInstall('mizani', 'pip',\n purpose=\"Mizani is a scales package for graphics. It is written in Python and is \" +\n \"based on Hadley Wickham's Scales.\"),\n ModuleInstall('mpl-scatter-density', 'pip', mname=\"mpl_scatter_density\",\n purpose=\"Matplotlib helpers to make density scatter plots\"),\n ModuleInstall('pybind11', 'pip', usage=\"C++\",\n purpose=\"pybind11 is a lightweight header-only library that exposes C++ types in Python and vice versa, \" +\n \"mainly to create Python bindings of existing C++ code.\"),\n ModuleInstall('mypy_extensions', 'pip', purpose=\"for mypy\"),\n ModuleInstall('mypy', 'pip',\n purpose=\"Mypy is an experimental optional static type checker for Python that aims to combine the benefits of \" +\n \"dynamic (or 'duck') typing and static typing.\"),\n ModuleInstall('pypandoc', 'pip',\n purpose=\"Pypandoc provides a thin wrapper for pandoc, a universal document converter.\"),\n ModuleInstall('pocket', 'pip',\n purpose=\"Access to pocket API.\"),\n #\n # November 2017\n #\n ModuleInstall('seasonal', 'pip', purpose=\"timeseries\", usage=\"ML\"),\n #\n # December 2017\n #\n ModuleInstall('gdown', 'pip',\n purpose=\"Google Drive direct download of big files.\"),\n ModuleInstall('pytube3', 'pip',\n purpose=\"play with youtube videos\"),\n #\n # Mars, Avril 2018\n #\n ModuleInstall('pyjsparser', 'pip',\n purpose=\"Fast javascript parser (based on esprima.js)\"),\n ModuleInstall('Js2Py', 'pip', mname=\"js2py\",\n purpose=\"JavaScript to Python Translator & JavaScript interpreter written in 100% pure Python.\"),\n ModuleInstall('pythonnet', 'wheel', mname=\"clr\",\n purpose=\"Python binding for C#\"),\n #\n # July 2018\n #\n ModuleInstall('lml', 'pip',\n purpose=\"lml seamlessly finds the lml based plugins from your current python \" +\n \"environment but loads your plugins on demand. It is designed to support plugins \" +\n \"that have external dependencies, especially bulky and/or memory hungry ones. \" +\n \"lml provides the plugin management system only and the plugin interface is on your shoulder.\"),\n ModuleInstall('macropy3', 'pip',\n purpose='MacroPy is an implementation of Syntactic Macros in the Python Programming ' +\n 'Language. MacroPy provides a mechanism for user-defined functions (macros) to perform ' +\n 'transformations on the abstract syntax tree (AST) of a Python program at import time. This ' +\n 'is an easy way to enhance the semantics of a Python program in ways which are otherwise ' +\n 'impossible, for example providing an extremely concise way of declaring classes.'),\n ModuleInstall('dukpy', 'pip',\n purpose='DukPy is a simple javascript interpreter for Python built on top of duktape ' +\n 'engine without any external dependency. It comes with a bunch of common transpilers ' +\n 'built-in for convenience.'),\n ModuleInstall('javascripthon', 'pip',\n purpose='a Python 3 to ES6 JavaScript translator'),\n ModuleInstall('pyecharts', 'pip',\n purpose='pyecharts is a library to generate charts using Echarts. It simply provides ' +\n 'the interface of 28+ kinds of charts between Echarts and Python.'),\n ModuleInstall('pyecharts-javascripthon', 'pip', mname='pyecharts_javascripthon',\n purpose='pyecharts-javascripthon helps translate Python functions into javascript ones. ' +\n 'It uses javascripthon and dukpy to blend Python codes into javascript runtime. It supports ' +\n 'python 2.7, 3.4, 3.5 and 3.6. It works on Linux, MacOS and Windows platforms.'),\n ModuleInstall('pyecharts-snapshot', 'pip', mname='pyecharts_snapshot',\n purpose='pyecharts-snapshot renders the output of pyecharts as a png, jpeg, gif, svg image ' +\n 'or a pdf file at command line or in your code.'),\n ModuleInstall('jupyter-echarts-pypkg', 'pip', mname='jupyter_echarts_pypkg',\n purpose='The project packages jupyter-echarts and distributes it via pypi.'),\n ModuleInstall('yahoo-historical', 'pip', mname=\"yahoo_historical\",\n purpose='Python module to get stock data from Yahoo! Finance'),\n #\n # October 2019\n #\n ModuleInstall('wsproto', 'pip',\n purpose='Pure Python, pure state-machine WebSocket implementation.'),\n ModuleInstall('h11', 'pip',\n purpose='This is a little HTTP/1.1 library written from scratch in Python, '\n 'heavily inspired by hyper-h2.'),\n ModuleInstall('httptools', 'pip',\n purpose='httptools is a Python binding for nodejs HTTP parser. '\n 'It\\'s still in a very early development stage, expect '\n 'APIs to break.'),\n ModuleInstall('hypercorn', 'pip',\n purpose='Hypercorn is an ASGI web server based on the sans-io hyper, h11, '\n 'h2, and wsproto libraries and inspired by Gunicorn.'),\n ModuleInstall('priority', 'pip',\n purpose='Priority is a pure-Python implementation of the priority logic '\n 'for HTTP/2.'),\n ModuleInstall('starlette', 'pip',\n purpose='Starlette is a lightweight ASGI framework/toolkit, which is ideal '\n 'for building high performance asyncio services.'),\n ModuleInstall('uvicorn', 'pip',\n purpose='Uvicorn is a lightning-fast ASGI server implementation, '\n 'using uvloop and httptools.'),\n ModuleInstall('websockets', 'pip',\n purpose='websockets is a library for building WebSocket servers and '\n 'clients in Python with a focus on correctness and simplicity.'),\n #\n # December 2019\n #\n ModuleInstall('py-spy', 'pip',\n purpose='profiler implemented in rust, works like a C++ profiler'),\n ModuleInstall('pydicom', 'pip',\n purpose='pydicom is a pure python package for working with DICOM files. '\n 'It was made for inspecting and modifying DICOM data in an easy '\n '\"pythonic\" way. The modifications can be written again to a new '\n 'file.'),\n ]\n\n return [_ for _ in mod if _ is not None]", "title": "" }, { "docid": "f5de3fbd765c7dc4f5aca86ee65a18a4", "score": "0.4580654", "text": "def gen_mgmt(self):\n # call parent function to generate first mgmt interface (e1000)\n res = super(VQFX_vcp, self).gen_mgmt()\n # add virtio NIC for internal control plane interface to vFPC\n res.append(\"-device\")\n res.append(\"e1000,netdev=vcp-int,mac=%s\" % vrnetlab.gen_mac(1))\n res.append(\"-netdev\")\n res.append(\"tap,ifname=vcp-int,id=vcp-int,script=no,downscript=no\")\n\n # dummy\n for i in range(1):\n res.append(\"-device\")\n res.append(\"e1000,netdev=dummy%d,mac=%s\" % (i, vrnetlab.gen_mac(1)))\n res.append(\"-netdev\")\n res.append(\"tap,ifname=dummy%d,id=dummy%d,script=no,downscript=no\" % (i, i))\n\n return res", "title": "" }, { "docid": "e89ca212643ae8ea5567c66b877d90af", "score": "0.4570405", "text": "def create_installer(args, cat_info, env_info, module_names):\n # define the local staging directory\n stage = args.make_installer\n # remove the old staging directory if it exists\n if os.path.isdir(stage):\n shutil.rmtree(stage)\n # create some directories\n os.makedirs(os.path.join(stage, 'python-files'))\n os.makedirs(os.path.join(stage, 'xml-files'))\n # add the appropriate python files and the const data\n meta.add_python_files(module_names, os.path.join(stage, 'python-files'))\n # create the xml files\n identifiers, import_errors = mobyle.add_xml_files(\n cat_info, env_info,\n module_names, args.short_length,\n os.path.join(stage, 'xml-files'),\n args.runbsub)\n for e in import_errors:\n print e\n # copy the installer script\n shutil.copy('install-mob-tools.py',\n os.path.join(stage, 'install-mob-tools.py'))\n # copy the installer script dependency\n shutil.copy('mobenv.py',\n os.path.join(stage, 'mobenv.py'))\n # create the installer configuration\n with open(os.path.join(stage, 'install-mob-tools.conf'), 'w') as fout:\n print >> fout, '#', ' '.join(sys.argv)\n print >> fout, '\\t'.join(['auto_path', env_info.auto_path])\n print >> fout, '\\t'.join(['python_path', env_info.python_path])\n print >> fout, '\\t'.join(['mob_core', env_info.mob_core])\n print >> fout, '\\t'.join(['mob_version', args.mobyle_version])\n # create a file with a fragment that could be added to the config\n with open(os.path.join(stage, 'sys.config.fragment'), 'w') as fout:\n print >> fout, '# begin autogenerated code'\n print >> fout, 'my_pca_names = ['\n for ident in sorted(identifiers):\n print >> fout, \"\\t\\t'%s',\" % ident\n print >> fout, \"\\t\\t]\"\n dstring = \"dict((x, 'Sys') for x in my_pca_names)\"\n print >> fout, 'PARTICULAR_BATCH.update(%s)' % dstring\n print >> fout, '# end autogenerated code'\n # use subprocess instead of tarfile to create the tgz\n cmd = ['tar', 'czvf', stage + '.tgz', stage]\n subprocess.call(cmd)", "title": "" }, { "docid": "c08ce213fe0dd53b4a8b09d7635b501d", "score": "0.45670965", "text": "def _RewriteSetupPyGeneratedFile(\n self, project_definition, source_directory, source_filename,\n project_name, rpm_build_dependencies, input_file, output_file_object,\n python2_package_prefix='python-'):\n description = b''\n requires = b''\n summary = b''\n version = b''\n\n in_description = False\n in_python_package = False\n has_build_requires = False\n has_python2_package = False\n has_python3_package = False\n has_unmangled_version = False\n\n python2_only = project_definition.IsPython2Only()\n\n if project_definition.rpm_name:\n package_name = project_definition.rpm_name\n else:\n package_name = project_name\n\n if package_name.startswith('python-'):\n package_name = package_name[7:]\n\n unmangled_name = project_name\n\n with open(input_file, 'r+b') as input_file_object:\n for line in input_file_object.readlines():\n if line.startswith(b'%') and in_description:\n in_description = False\n\n if project_definition.description_long:\n description = b'{0:s}\\n\\n'.format(\n project_definition.description_long)\n\n output_file_object.write(description)\n\n if line.startswith(b'%prep') and in_python_package:\n in_python_package = False\n\n if in_python_package:\n continue\n\n if line.startswith(b'%define name '):\n # Need to override the project name for projects that prefix\n # their name with \"python-\" (or equivalent) in setup.py but\n # do not use it for their source package name.\n line = b'%define name {0:s}\\n'.format(project_name)\n\n elif line.startswith(b'%define version '):\n version = line[16:-1]\n if version.startswith(b'1!'):\n version = version[2:]\n\n elif line.startswith(b'%define unmangled_version '):\n # setup.py generates %define unmangled_version twice ignore\n # the second define.\n if has_unmangled_version:\n continue\n\n output_file_object.write(\n b'%define unmangled_name {0:s}\\n'.format(unmangled_name))\n\n has_unmangled_version = True\n\n elif not summary and line.startswith(b'Summary: '):\n summary = line\n\n elif line.startswith(b'Source0: '):\n if source_filename.endswith('.zip'):\n line = b'Source0: %{unmangled_name}-%{unmangled_version}.zip\\n'\n else:\n line = b'Source0: %{unmangled_name}-%{unmangled_version}.tar.gz\\n'\n\n elif line.startswith(b'BuildRoot: '):\n if project_name == 'psutil':\n line = (\n b'BuildRoot: %{_tmppath}/'\n b'%{name}-release-%{version}-%{release}-buildroot\\n')\n\n else:\n line = (\n b'BuildRoot: %{_tmppath}/'\n b'%{unmangled_name}-release-%{version}-%{release}-buildroot\\n')\n\n elif (not description and not requires and\n line.startswith(b'Requires: ')):\n requires = line\n continue\n\n elif line.startswith(b'BuildArch: noarch'):\n if project_definition.architecture_dependent:\n continue\n\n elif line.startswith(b'BuildRequires: '):\n has_build_requires = True\n line = b'BuildRequires: {0:s}\\n'.format(b', '.join(\n rpm_build_dependencies))\n\n elif line == b'\\n' and summary and not has_build_requires:\n has_build_requires = True\n line = b'BuildRequires: {0:s}\\n\\n'.format(b', '.join(\n rpm_build_dependencies))\n\n elif line.startswith(b'%description') and not description:\n in_description = True\n\n elif (line.startswith(b'%package -n python-') or\n line.startswith(b'%package -n python2-')):\n if project_name == 'plaso':\n in_python_package = True\n continue\n\n has_python2_package = True\n\n if line.startswith(b'%package -n python2-'):\n if python2_package_prefix == 'python-':\n logging.warning(\n 'rpm_python_package prefix is: \"python\" but spec file '\n 'defines: \"python2\"')\n python2_package_prefix = 'python2-'\n\n elif line.startswith(b'%package -n python3-'):\n has_python3_package = True\n\n elif line.startswith(b'%prep'):\n if project_name == 'plaso':\n requires = b'{0:s}, {1:s}-data\\n'.format(\n requires[:-1], project_name)\n\n if not has_python2_package:\n if project_name != package_name:\n python_package_name = b'{0:s}{1:s}'.format(\n python2_package_prefix, package_name)\n else:\n python_package_name = b'{0:s}%{{name}}'.format(\n python2_package_prefix)\n\n if python_package_name != b'%{name}':\n self._WritePython2PackageDefinition(\n output_file_object, python_package_name, summary, requires,\n description)\n\n if not python2_only and not has_python3_package:\n if project_name != package_name:\n python_package_name = b'python3-{0:s}'.format(package_name)\n else:\n python_package_name = b'python3-%{name}'\n\n # TODO: convert python 2 package names to python 3\n self._WritePython3PackageDefinition(\n output_file_object, python_package_name, summary, requires,\n description)\n\n if project_name == 'plaso':\n output_file_object.write((\n b'%package -n %{{name}}-data\\n'\n b'{0:s}'\n b'\\n'\n b'%description -n %{{name}}-data\\n'\n b'{1:s}').format(summary, description))\n\n elif line.startswith(b'%setup -n %{name}-%{unmangled_version}'):\n if project_name == 'psutil':\n line = b'%autosetup -n %{name}-release-%{unmangled_version}\\n'\n else:\n line = b'%autosetup -n %{unmangled_name}-%{unmangled_version}\\n'\n\n elif (line.startswith(b'python setup.py build') or\n line.startswith(b'python2 setup.py build') or\n line.startswith(b'%py2_build') or line.startswith(\n b'env CFLAGS=\"$RPM_OPT_FLAGS\" python setup.py build')):\n line = self._GetBuildDefinition(python2_only)\n\n elif (line.startswith(b'python setup.py install') or\n line.startswith(b'python2 setup.py install') or\n line.startswith(b'%py2_install')):\n line = self._GetInstallDefinition(project_name, python2_only)\n\n elif line == b'rm -rf $RPM_BUILD_ROOT\\n':\n line = b'rm -rf %{buildroot}\\n'\n\n elif (line.startswith(b'%files') and\n not line.startswith(b'%files -n %{name}-data')):\n break\n\n elif in_description:\n # Ignore leading white lines in the description.\n if not description and line == b'\\n':\n continue\n\n description = b''.join([description, line])\n continue\n\n output_file_object.write(line)\n\n license_line = self._GetLicenseFileDefinition(source_directory)\n\n doc_line = self._GetDocumentationFilesDefinition(source_directory)\n\n if project_name != package_name:\n python_package_name = b'{0:s}{1:s}'.format(\n python2_package_prefix, package_name)\n else:\n python_package_name = b'{0:s}%{{name}}'.format(python2_package_prefix)\n\n self._WritePython2PackageFiles(\n output_file_object, project_definition, project_name,\n python_package_name, license_line, doc_line)\n\n if not python2_only:\n if project_name != package_name:\n python_package_name = b'python3-{0:s}'.format(package_name)\n else:\n python_package_name = b'python3-%{name}'\n\n self._WritePython3PackageFiles(\n output_file_object, project_definition, project_name,\n python_package_name, license_line, doc_line)\n\n if project_name == 'plaso':\n output_file_object.write(\n b'\\n'\n b'%files -n %{name}-data\\n'\n b'%{_datadir}/%{name}/*\\n')\n\n # TODO: add bindir support.\n output_file_object.write((\n b'\\n'\n b'%exclude %{_bindir}/*\\n'))\n\n # TODO: add shared data support.\n\n self._WriteChangeLog(output_file_object, version)\n\n return True", "title": "" }, { "docid": "a94327773318baad0f5c6cd2c595006d", "score": "0.45645878", "text": "def scons_rules(self):\n self._prepare_to_generate_rule()\n\n dep_files_map = {}\n dep_files_map = self._swig_library_rules_py()\n if (hasattr(self.options, 'generate_java')\n and self.options.generate_java) or (\n self.data.get('options', {}).get('generate_java', False)):\n self._swig_library_rules_java(dep_files_map)\n if hasattr(self.options, 'generate_php') and self.options.generate_php:\n if not self.php_inc_list:\n console.error_exit(\"failed to build //%s:%s, please install php modules\" % (\n self.data['path'], self.data['name']))\n else:\n self._swig_library_rules_php(dep_files_map)", "title": "" }, { "docid": "22ac4a7ecce8ac79002a1a790760425d", "score": "0.4557159", "text": "def new_config(self):\n super(BaseBorderCopier, self).new_config()\n\n retargetted = \"\\n\".join(self.copy_py_code)\n retargetted = retargetted.replace(\"self.\", \"self.code.\")\n retargetted = retargetted.replace(\"write_to(\", \"write_to_current(\")\n retargetted = retargetted.replace(\"read_from_next(\", \"read_from(\")\n for dim, size_name in enumerate(self.code.acc.size_names):\n size = self.code.acc.get_size_of(dim)\n retargetted = retargetted.replace(size_name, str(size))\n for border_name, border_size in self.code.acc.border_size.iteritems():\n retargetted = retargetted.replace(border_name, str(border_size))\n if not HAVE_TUPLE_ARRAY_INDEX:\n retargetted = tuple_array_index_fixup(retargetted)\n\n exec retargetted in globals(), locals()", "title": "" }, { "docid": "3370c3500e290c64affe98c9e38c4f7f", "score": "0.4554821", "text": "def schema_config(self):\n # Construct the default ItemGrader schema\n schema = super(FormulaGrader, self).schema_config\n # Append options\n forbidden_default = \"Invalid Input: This particular answer is forbidden\"\n return schema.extend({\n Required('user_functions', default={}):\n {Extra: Any(is_callable, [is_callable], FunctionSamplingSet)},\n Required('user_constants', default={}): {Extra: Number},\n Required('blacklist', default=[]): [str],\n Required('whitelist', default=[]): [Any(str, None)],\n Required('forbidden_strings', default=[]): [str],\n Required('forbidden_message', default=forbidden_default): str,\n Required('required_functions', default=[]): [str],\n Required('tolerance', default='0.01%'): Any(PercentageString, NonNegative(Number)),\n Required('case_sensitive', default=True): bool,\n Required('metric_suffixes', default=False): bool,\n Required('samples', default=5): Positive(int),\n Required('variables', default=[]): [str],\n Required('sample_from', default={}): dict,\n Required('failable_evals', default=0): NonNegative(int)\n })", "title": "" }, { "docid": "ac83e333f8ac1a7a494cc1ad72f30d13", "score": "0.45516115", "text": "def override_clients(self, new_clients):\n\n self.RR2 = EnhancedResourceRegistryClient(new_clients.resource_registry)\n\n #shortcut names for the import sub-services\n # we hide these behind checks even though we expect them so that\n # the resource_impl_metatests will work\n if hasattr(new_clients, \"resource_registry\"):\n self.RR = new_clients.resource_registry\n\n if hasattr(new_clients, \"data_acquisition_management\"):\n self.DAMS = new_clients.data_acquisition_management\n\n if hasattr(new_clients, \"data_product_management\"):\n self.DPMS = new_clients.data_product_management\n\n if hasattr(new_clients, \"pubsub_management\"):\n self.PSMS = new_clients.pubsub_management\n\n if hasattr(new_clients, \"data_retriever\"):\n self.DRS = new_clients.data_retriever", "title": "" }, { "docid": "1a9538568f6fbc1cfff3bb4ddb46f233", "score": "0.45483053", "text": "def regenerate(context):\n run('pelican -r -s pelicanconf.py')", "title": "" }, { "docid": "2c9260be0ff06987c422886b65fc81df", "score": "0.45437306", "text": "def cb_generate_config(self, *args, **kwargs):\r\n self.linter.generate_config(skipsections=('COMMANDS',))\r\n sys.exit(0)", "title": "" }, { "docid": "9f4db0945d4c7025d74af0763f6c412f", "score": "0.45422688", "text": "def customization_data(client=None):\n\n # This import data contains:\n # Function inputs:\n # incident_id\n # task_id\n # teams_channel\n # teams_mrkdown\n # teams_payload\n # Message Destinations:\n # fn_teams\n # Functions:\n # teams_post_message\n # Workflows:\n # example_post_incident_to_ms_teams\n # example_post_task_to_microsoft_teams\n # Rules:\n # Example: Post a Task to Microsoft Teams\n # Example: Post an Incident to Microsoft Teams\n\n\n yield ImportDefinition(u\"\"\"\neyJ0YXNrX29yZGVyIjogW10sICJ3b3JrZmxvd3MiOiBbeyJ1dWlkIjogImIwZTE5NDY4LTkzZGQt\nNDNjMS1iNzNhLWJlNjgxODg1ODcwYyIsICJkZXNjcmlwdGlvbiI6ICJFeGFtcGxlIG9mIHBvc3Rp\nbmcgaW5jaWRlbnQgYW5kIHRhc2sgaW5mb3JtYXRpb24gdG8gVGVhbXMgYXMgdHdvIHNlY3Rpb25z\nIiwgIm9iamVjdF90eXBlIjogInRhc2siLCAiZXhwb3J0X2tleSI6ICJleGFtcGxlX3Bvc3RfdGFz\na190b19taWNyb3NvZnRfdGVhbXMiLCAid29ya2Zsb3dfaWQiOiA1MywgImxhc3RfbW9kaWZpZWRf\nYnkiOiAiYUBleGFtcGxlLmNvbSIsICJjb250ZW50IjogeyJ4bWwiOiAiPD94bWwgdmVyc2lvbj1c\nIjEuMFwiIGVuY29kaW5nPVwiVVRGLThcIj8+PGRlZmluaXRpb25zIHhtbG5zPVwiaHR0cDovL3d3\ndy5vbWcub3JnL3NwZWMvQlBNTi8yMDEwMDUyNC9NT0RFTFwiIHhtbG5zOmJwbW5kaT1cImh0dHA6\nLy93d3cub21nLm9yZy9zcGVjL0JQTU4vMjAxMDA1MjQvRElcIiB4bWxuczpvbWdkYz1cImh0dHA6\nLy93d3cub21nLm9yZy9zcGVjL0RELzIwMTAwNTI0L0RDXCIgeG1sbnM6b21nZGk9XCJodHRwOi8v\nd3d3Lm9tZy5vcmcvc3BlYy9ERC8yMDEwMDUyNC9ESVwiIHhtbG5zOnJlc2lsaWVudD1cImh0dHA6\nLy9yZXNpbGllbnQuaWJtLmNvbS9icG1uXCIgeG1sbnM6eHNkPVwiaHR0cDovL3d3dy53My5vcmcv\nMjAwMS9YTUxTY2hlbWFcIiB4bWxuczp4c2k9XCJodHRwOi8vd3d3LnczLm9yZy8yMDAxL1hNTFNj\naGVtYS1pbnN0YW5jZVwiIHRhcmdldE5hbWVzcGFjZT1cImh0dHA6Ly93d3cuY2FtdW5kYS5vcmcv\ndGVzdFwiPjxwcm9jZXNzIGlkPVwiZXhhbXBsZV9wb3N0X3Rhc2tfdG9fbWljcm9zb2Z0X3RlYW1z\nXCIgaXNFeGVjdXRhYmxlPVwidHJ1ZVwiIG5hbWU9XCJFeGFtcGxlOiBQb3N0IFRhc2sgdG8gTWlj\ncm9zb2Z0IFRlYW1zXCI+PGRvY3VtZW50YXRpb24+RXhhbXBsZSBvZiBwb3N0aW5nIGluY2lkZW50\nIGFuZCB0YXNrIGluZm9ybWF0aW9uIHRvIFRlYW1zIGFzIHR3byBzZWN0aW9uczwvZG9jdW1lbnRh\ndGlvbj48c3RhcnRFdmVudCBpZD1cIlN0YXJ0RXZlbnRfMTU1YXN4bVwiPjxvdXRnb2luZz5TZXF1\nZW5jZUZsb3dfMHE1bHNoYjwvb3V0Z29pbmc+PC9zdGFydEV2ZW50PjxzZXJ2aWNlVGFzayBpZD1c\nIlNlcnZpY2VUYXNrXzE3bjY4YmZcIiBuYW1lPVwiVGVhbXMgUG9zdCBNZXNzYWdlXCIgcmVzaWxp\nZW50OnR5cGU9XCJmdW5jdGlvblwiPjxleHRlbnNpb25FbGVtZW50cz48cmVzaWxpZW50OmZ1bmN0\naW9uIHV1aWQ9XCIwYzhlNDQ5Ny1jMTMxLTRkNWQtYmRmMy0zMTUzZDMwYjliYmNcIj57XCJpbnB1\ndHNcIjp7XCI3NjAyM2NlMy1mYzE3LTQxZDEtOTAwMi0yMzkyMjgzY2UzMTVcIjp7XCJpbnB1dF90\neXBlXCI6XCJzdGF0aWNcIixcInN0YXRpY19pbnB1dFwiOntcIm11bHRpc2VsZWN0X3ZhbHVlXCI6\nW10sXCJ0ZXh0X3ZhbHVlXCI6XCJ0ZXN0Y2hhbm5lbFwifX0sXCJmYTY0YTA5OS1mM2Q0LTRjYWEt\nYmQ2NC03MmZmZGI0NjQxNGZcIjp7XCJpbnB1dF90eXBlXCI6XCJzdGF0aWNcIixcInN0YXRpY19p\nbnB1dFwiOntcImJvb2xlYW5fdmFsdWVcIjp0cnVlLFwibXVsdGlzZWxlY3RfdmFsdWVcIjpbXX19\nfSxcInByZV9wcm9jZXNzaW5nX3NjcmlwdFwiOlwiZnJvbSBqYXZhLnV0aWwgaW1wb3J0IERhdGVc\nXG5cXG5pbnB1dHMuaW5jaWRlbnRfaWQgPSBpbmNpZGVudC5pZFxcbmlucHV0cy50YXNrX2lkID0g\ndGFzay5pZFxcblxcXCJcXFwiXFxcIlxcbmZvcm1hdCBvZiBhIHBheWxvYWQuICogPSBvcHRpb25h\nbFxcbnsgXFxcInRpdGxlXFxcIio6IHh4LCBcXG4gIFxcXCJzdW1tYXJ5XFxcIjogeHgsIFxcbiAg\nXFxcInNlY3Rpb25zXFxcIjogW3sgXFxcInRpdGxlXFxcIio6IHl5LCBcXFwidGV4dFxcXCIqOiB5\neSwgXFxuICAgICAgICAgICAgICAgICAgICAgICAgXFxcImZhY3RzXFxcIio6IFt7XFxcIm5hbWVc\nXFwiOiB6eiwgXFxcInZhbHVlXFxcIjogenp9XVxcbiAgICAgICAgICAgICAgfV1cXG59XFxuXFxc\nIlxcXCJcXFwiXFxuXFxucGF5bG9hZCA9IHVcXFwiXFxcIlxcXCJ7eyBcXFwic3VtbWFyeVxcXCI6\nIFxcXCJSZXNpbGllbnQgSW5jaWRlbnRcXFwiLCBcXFwic2VjdGlvbnNcXFwiOiBbIFxcbiAge3sg\nXFxcImZhY3RzXFxcIjogWyBcXG4gICAge3sgXFxcIm5hbWVcXFwiOiBcXFwiTmFtZVxcXCIsIFxc\nXCJ2YWx1ZVxcXCI6IFxcXCJ7fVxcXCIgfX0sIFxcbiAgICB7eyBcXFwibmFtZVxcXCI6IFxcXCJE\nZXNjcmlwdGlvblxcXCIsIFxcXCJ2YWx1ZVxcXCI6IFxcXCJ7fVxcXCIgfX0sIFxcbiAgICB7eyBc\nXFwibmFtZVxcXCI6IFxcXCJJZFxcXCIsIFxcXCJ2YWx1ZVxcXCI6IFxcXCJ7fVxcXCIgfX0sIFxc\nbiAgICB7eyBcXFwibmFtZVxcXCI6IFxcXCJPd25lclxcXCIsIFxcXCJ2YWx1ZVxcXCI6IFxcXCJ7\nfVxcXCIgfX0sIFxcbiAgICB7eyBcXFwibmFtZVxcXCI6IFxcXCJUeXBlc1xcXCIsIFxcXCJ2YWx1\nZVxcXCI6IFxcXCJ7fVxcXCIgfX0sIFxcbiAgICB7eyBcXFwibmFtZVxcXCI6IFxcXCJOSVNUIEF0\ndGFjayBWZWN0b3JzXFxcIiwgXFxcInZhbHVlXFxcIjogXFxcInt9XFxcIiB9fSwgXFxuICAgIHt7\nIFxcXCJuYW1lXFxcIjogXFxcIkNyZWF0ZSBEYXRlXFxcIiwgXFxcInZhbHVlXFxcIjogXFxcInt9\nXFxcIiB9fSwgXFxuICAgIHt7IFxcXCJuYW1lXFxcIjogXFxcIkRhdGUgT2NjdXJyZWRcXFwiLCBc\nXFwidmFsdWVcXFwiOiBcXFwie31cXFwiIH19LCBcXG4gICAge3sgXFxcIm5hbWVcXFwiOiBcXFwi\nRGlzY292ZXJlZCBEYXRlXFxcIiwgXFxcInZhbHVlXFxcIjogXFxcInt9XFxcIiB9fSwgXFxuICAg\nIHt7IFxcXCJuYW1lXFxcIjogXFxcIkNvbmZpcm1lZFxcXCIsIFxcXCJ2YWx1ZVxcXCI6IFxcXCJ7\nfVxcXCIgfX0sIFxcbiAgICB7eyBcXFwibmFtZVxcXCI6IFxcXCJTZXZlcml0eVxcXCIsIFxcXCJ2\nYWx1ZVxcXCI6IFxcXCJ7fVxcXCIgfX0gXFxuICAgXVxcbiAgfX0sXFxuICB7eyBcXFwidGV4dFxc\nXCI6IFxcXCJUYXNrXFxcIiwgXFxcImZhY3RzXFxcIjogWyBcXG4gICAge3sgXFxcIm5hbWVcXFwi\nOiBcXFwiVGFza1xcXCIsIFxcXCJ2YWx1ZVxcXCI6IFxcXCJ7fVxcXCIgfX0sIFxcbiAgICB7eyBc\nXFwibmFtZVxcXCI6IFxcXCJPd25lclxcXCIsIFxcXCJ2YWx1ZVxcXCI6IFxcXCJ7fVxcXCIgfX0s\nXFxuICAgIHt7IFxcXCJuYW1lXFxcIjogXFxcIkluc3RydWN0aW9uc1xcXCIsIFxcXCJ2YWx1ZVxc\nXCI6IFxcXCJ7fVxcXCIgfX0sXFxuICAgIHt7IFxcXCJuYW1lXFxcIjogXFxcIkR1ZSBEYXRlXFxc\nIiwgXFxcInZhbHVlXFxcIjogXFxcInt9XFxcIiB9fVxcbiAgICBdXFxuICB9fVxcbiBdIFxcbn19\nIFxcblxcXCJcXFwiXFxcIi5mb3JtYXQoaW5jaWRlbnQubmFtZSwgaW5jaWRlbnQuZGVzY3JpcHRp\nb24uY29udGVudC5yZXBsYWNlKCdcXFwiJywgJ1xcXFxcXFxcXFxcIicpIGlmIGluY2lkZW50LmRl\nc2NyaXB0aW9uIGVsc2UgXFxcIi1cXFwiLCBpbmNpZGVudC5pZCxcXG4gICBpbmNpZGVudC5vd25l\ncl9pZCBpZiBpbmNpZGVudC5vd25lcl9pZCBlbHNlIFxcXCItXFxcIixcXG4gICBcXFwiLCBcXFwi\nLmpvaW4oc3RyKHgpIGZvciB4IGluIGluY2lkZW50LmluY2lkZW50X3R5cGVfaWRzKSwgXFxcIiwg\nXFxcIi5qb2luKHN0cih4KSBmb3IgeCBpbiBpbmNpZGVudC5uaXN0X2F0dGFja192ZWN0b3JzKSxc\nXG4gICBEYXRlKGluY2lkZW50LmNyZWF0ZV9kYXRlKSwgRGF0ZShpbmNpZGVudC5zdGFydF9kYXRl\nKSBpZiBpbmNpZGVudC5zdGFydF9kYXRlIGVsc2UgXFxcIi1cXFwiLCBEYXRlKGluY2lkZW50LmRp\nc2NvdmVyZWRfZGF0ZSksXFxuICAgXFxcIlRydWVcXFwiIGlmIGluY2lkZW50LmNvbmZpcm1lZCBl\nbHNlIFxcXCJGYWxzZVxcXCIsXFxuICAgXFxcIi1cXFwiIGlmIG5vdCBpbmNpZGVudC5zZXZlcml0\neV9jb2RlIGVsc2UgaW5jaWRlbnQuc2V2ZXJpdHlfY29kZSxcXG4gICB0YXNrLm5hbWUsIHRhc2su\nb3duZXJfaWQgaWYgdGFzay5vd25lcl9pZCBlbHNlIFxcXCItXFxcIiwgdGFzay5pbnN0cnVjdGlv\nbnMuY29udGVudC5yZXBsYWNlKCdcXFwiJywgXFxcIidcXFwiKSBpZiB0YXNrLmluc3RydWN0aW9u\ncyBlbHNlIFxcXCItXFxcIiwgRGF0ZSh0YXNrLmR1ZV9kYXRlKSBpZiB0YXNrLmR1ZV9kYXRlIGVs\nc2UgXFxcIi1cXFwiXFxuICAgKVxcblxcbmlucHV0cy50ZWFtc19wYXlsb2FkID0gcGF5bG9hZFwi\nfTwvcmVzaWxpZW50OmZ1bmN0aW9uPjwvZXh0ZW5zaW9uRWxlbWVudHM+PGluY29taW5nPlNlcXVl\nbmNlRmxvd18wcTVsc2hiPC9pbmNvbWluZz48b3V0Z29pbmc+U2VxdWVuY2VGbG93XzFqOWRhNDU8\nL291dGdvaW5nPjwvc2VydmljZVRhc2s+PHNlcXVlbmNlRmxvdyBpZD1cIlNlcXVlbmNlRmxvd18w\ncTVsc2hiXCIgc291cmNlUmVmPVwiU3RhcnRFdmVudF8xNTVhc3htXCIgdGFyZ2V0UmVmPVwiU2Vy\ndmljZVRhc2tfMTduNjhiZlwiLz48ZW5kRXZlbnQgaWQ9XCJFbmRFdmVudF8xZDI2YzdyXCI+PGlu\nY29taW5nPlNlcXVlbmNlRmxvd18xajlkYTQ1PC9pbmNvbWluZz48L2VuZEV2ZW50PjxzZXF1ZW5j\nZUZsb3cgaWQ9XCJTZXF1ZW5jZUZsb3dfMWo5ZGE0NVwiIHNvdXJjZVJlZj1cIlNlcnZpY2VUYXNr\nXzE3bjY4YmZcIiB0YXJnZXRSZWY9XCJFbmRFdmVudF8xZDI2YzdyXCIvPjx0ZXh0QW5ub3RhdGlv\nbiBpZD1cIlRleHRBbm5vdGF0aW9uXzFreHhpeXRcIj48dGV4dD5TdGFydCB5b3VyIHdvcmtmbG93\nIGhlcmU8L3RleHQ+PC90ZXh0QW5ub3RhdGlvbj48YXNzb2NpYXRpb24gaWQ9XCJBc3NvY2lhdGlv\nbl8xc2V1ajQ4XCIgc291cmNlUmVmPVwiU3RhcnRFdmVudF8xNTVhc3htXCIgdGFyZ2V0UmVmPVwi\nVGV4dEFubm90YXRpb25fMWt4eGl5dFwiLz48dGV4dEFubm90YXRpb24gaWQ9XCJUZXh0QW5ub3Rh\ndGlvbl8xcThudTQwXCI+PHRleHQ+Rm9ybWF0IHRlYW1zX3BheWxvYWQgYXMgYSBqc29uIG9iamVj\ndC4gU2VlIHByZS1wcm9jZXNzb3Igc2NyaXB0IGZvciBmb3JtYXQuPC90ZXh0PjwvdGV4dEFubm90\nYXRpb24+PGFzc29jaWF0aW9uIGlkPVwiQXNzb2NpYXRpb25fMTkyNXNkdVwiIHNvdXJjZVJlZj1c\nIlNlcnZpY2VUYXNrXzE3bjY4YmZcIiB0YXJnZXRSZWY9XCJUZXh0QW5ub3RhdGlvbl8xcThudTQw\nXCIvPjwvcHJvY2Vzcz48YnBtbmRpOkJQTU5EaWFncmFtIGlkPVwiQlBNTkRpYWdyYW1fMVwiPjxi\ncG1uZGk6QlBNTlBsYW5lIGJwbW5FbGVtZW50PVwidW5kZWZpbmVkXCIgaWQ9XCJCUE1OUGxhbmVf\nMVwiPjxicG1uZGk6QlBNTlNoYXBlIGJwbW5FbGVtZW50PVwiU3RhcnRFdmVudF8xNTVhc3htXCIg\naWQ9XCJTdGFydEV2ZW50XzE1NWFzeG1fZGlcIj48b21nZGM6Qm91bmRzIGhlaWdodD1cIjM2XCIg\nd2lkdGg9XCIzNlwiIHg9XCIxNjJcIiB5PVwiMTg4XCIvPjxicG1uZGk6QlBNTkxhYmVsPjxvbWdk\nYzpCb3VuZHMgaGVpZ2h0PVwiMFwiIHdpZHRoPVwiOTBcIiB4PVwiMTU3XCIgeT1cIjIyM1wiLz48\nL2JwbW5kaTpCUE1OTGFiZWw+PC9icG1uZGk6QlBNTlNoYXBlPjxicG1uZGk6QlBNTlNoYXBlIGJw\nbW5FbGVtZW50PVwiVGV4dEFubm90YXRpb25fMWt4eGl5dFwiIGlkPVwiVGV4dEFubm90YXRpb25f\nMWt4eGl5dF9kaVwiPjxvbWdkYzpCb3VuZHMgaGVpZ2h0PVwiMzBcIiB3aWR0aD1cIjEwMFwiIHg9\nXCI5OVwiIHk9XCIyNTRcIi8+PC9icG1uZGk6QlBNTlNoYXBlPjxicG1uZGk6QlBNTkVkZ2UgYnBt\nbkVsZW1lbnQ9XCJBc3NvY2lhdGlvbl8xc2V1ajQ4XCIgaWQ9XCJBc3NvY2lhdGlvbl8xc2V1ajQ4\nX2RpXCI+PG9tZ2RpOndheXBvaW50IHg9XCIxNjlcIiB4c2k6dHlwZT1cIm9tZ2RjOlBvaW50XCIg\neT1cIjIyMFwiLz48b21nZGk6d2F5cG9pbnQgeD1cIjE1M1wiIHhzaTp0eXBlPVwib21nZGM6UG9p\nbnRcIiB5PVwiMjU0XCIvPjwvYnBtbmRpOkJQTU5FZGdlPjxicG1uZGk6QlBNTlNoYXBlIGJwbW5F\nbGVtZW50PVwiU2VydmljZVRhc2tfMTduNjhiZlwiIGlkPVwiU2VydmljZVRhc2tfMTduNjhiZl9k\naVwiPjxvbWdkYzpCb3VuZHMgaGVpZ2h0PVwiODBcIiB3aWR0aD1cIjEwMFwiIHg9XCIyNTFcIiB5\nPVwiMTY2XCIvPjwvYnBtbmRpOkJQTU5TaGFwZT48YnBtbmRpOkJQTU5FZGdlIGJwbW5FbGVtZW50\nPVwiU2VxdWVuY2VGbG93XzBxNWxzaGJcIiBpZD1cIlNlcXVlbmNlRmxvd18wcTVsc2hiX2RpXCI+\nPG9tZ2RpOndheXBvaW50IHg9XCIxOThcIiB4c2k6dHlwZT1cIm9tZ2RjOlBvaW50XCIgeT1cIjIw\nNlwiLz48b21nZGk6d2F5cG9pbnQgeD1cIjI1MVwiIHhzaTp0eXBlPVwib21nZGM6UG9pbnRcIiB5\nPVwiMjA2XCIvPjxicG1uZGk6QlBNTkxhYmVsPjxvbWdkYzpCb3VuZHMgaGVpZ2h0PVwiMTNcIiB3\naWR0aD1cIjBcIiB4PVwiMjI0LjVcIiB5PVwiMTg0XCIvPjwvYnBtbmRpOkJQTU5MYWJlbD48L2Jw\nbW5kaTpCUE1ORWRnZT48YnBtbmRpOkJQTU5TaGFwZSBicG1uRWxlbWVudD1cIkVuZEV2ZW50XzFk\nMjZjN3JcIiBpZD1cIkVuZEV2ZW50XzFkMjZjN3JfZGlcIj48b21nZGM6Qm91bmRzIGhlaWdodD1c\nIjM2XCIgd2lkdGg9XCIzNlwiIHg9XCI0MTVcIiB5PVwiMTg4XCIvPjxicG1uZGk6QlBNTkxhYmVs\nPjxvbWdkYzpCb3VuZHMgaGVpZ2h0PVwiMTNcIiB3aWR0aD1cIjBcIiB4PVwiNDMzXCIgeT1cIjIy\nN1wiLz48L2JwbW5kaTpCUE1OTGFiZWw+PC9icG1uZGk6QlBNTlNoYXBlPjxicG1uZGk6QlBNTkVk\nZ2UgYnBtbkVsZW1lbnQ9XCJTZXF1ZW5jZUZsb3dfMWo5ZGE0NVwiIGlkPVwiU2VxdWVuY2VGbG93\nXzFqOWRhNDVfZGlcIj48b21nZGk6d2F5cG9pbnQgeD1cIjM1MVwiIHhzaTp0eXBlPVwib21nZGM6\nUG9pbnRcIiB5PVwiMjA2XCIvPjxvbWdkaTp3YXlwb2ludCB4PVwiNDE1XCIgeHNpOnR5cGU9XCJv\nbWdkYzpQb2ludFwiIHk9XCIyMDZcIi8+PGJwbW5kaTpCUE1OTGFiZWw+PG9tZ2RjOkJvdW5kcyBo\nZWlnaHQ9XCIxM1wiIHdpZHRoPVwiMFwiIHg9XCIzODNcIiB5PVwiMTg0XCIvPjwvYnBtbmRpOkJQ\nTU5MYWJlbD48L2JwbW5kaTpCUE1ORWRnZT48YnBtbmRpOkJQTU5TaGFwZSBicG1uRWxlbWVudD1c\nIlRleHRBbm5vdGF0aW9uXzFxOG51NDBcIiBpZD1cIlRleHRBbm5vdGF0aW9uXzFxOG51NDBfZGlc\nIj48b21nZGM6Qm91bmRzIGhlaWdodD1cIjY4XCIgd2lkdGg9XCIxODVcIiB4PVwiMTMwXCIgeT1c\nIjY4XCIvPjwvYnBtbmRpOkJQTU5TaGFwZT48YnBtbmRpOkJQTU5FZGdlIGJwbW5FbGVtZW50PVwi\nQXNzb2NpYXRpb25fMTkyNXNkdVwiIGlkPVwiQXNzb2NpYXRpb25fMTkyNXNkdV9kaVwiPjxvbWdk\naTp3YXlwb2ludCB4PVwiMjcxXCIgeHNpOnR5cGU9XCJvbWdkYzpQb2ludFwiIHk9XCIxNjZcIi8+\nPG9tZ2RpOndheXBvaW50IHg9XCIyNDlcIiB4c2k6dHlwZT1cIm9tZ2RjOlBvaW50XCIgeT1cIjEz\nNlwiLz48L2JwbW5kaTpCUE1ORWRnZT48L2JwbW5kaTpCUE1OUGxhbmU+PC9icG1uZGk6QlBNTkRp\nYWdyYW0+PC9kZWZpbml0aW9ucz4iLCAid29ya2Zsb3dfaWQiOiAiZXhhbXBsZV9wb3N0X3Rhc2tf\ndG9fbWljcm9zb2Z0X3RlYW1zIiwgInZlcnNpb24iOiA0MH0sICJsYXN0X21vZGlmaWVkX3RpbWUi\nOiAxNTY4NzY5NDg1NTQxLCAiY3JlYXRvcl9pZCI6ICJhQGV4YW1wbGUuY29tIiwgImFjdGlvbnMi\nOiBbXSwgInByb2dyYW1tYXRpY19uYW1lIjogImV4YW1wbGVfcG9zdF90YXNrX3RvX21pY3Jvc29m\ndF90ZWFtcyIsICJuYW1lIjogIkV4YW1wbGU6IFBvc3QgVGFzayB0byBNaWNyb3NvZnQgVGVhbXMi\nfSwgeyJ1dWlkIjogImQ1YjU4NDJjLTI3MTUtNDA4Ny1iNDM0LWRkZDk5ZTM4YzNmOCIsICJkZXNj\ncmlwdGlvbiI6ICJFeGFtcGxlIG9mIHBvc3RpbmcgaW5jaWRlbnQgZGF0YSB0byBhIE1pY3Jvc29m\ndCBUZWFtcyBjaGFubmVsLiIsICJvYmplY3RfdHlwZSI6ICJpbmNpZGVudCIsICJleHBvcnRfa2V5\nIjogImV4YW1wbGVfcG9zdF9pbmNpZGVudF90b19tc190ZWFtcyIsICJ3b3JrZmxvd19pZCI6IDUy\nLCAibGFzdF9tb2RpZmllZF9ieSI6ICJhQGV4YW1wbGUuY29tIiwgImNvbnRlbnQiOiB7InhtbCI6\nICI8P3htbCB2ZXJzaW9uPVwiMS4wXCIgZW5jb2Rpbmc9XCJVVEYtOFwiPz48ZGVmaW5pdGlvbnMg\neG1sbnM9XCJodHRwOi8vd3d3Lm9tZy5vcmcvc3BlYy9CUE1OLzIwMTAwNTI0L01PREVMXCIgeG1s\nbnM6YnBtbmRpPVwiaHR0cDovL3d3dy5vbWcub3JnL3NwZWMvQlBNTi8yMDEwMDUyNC9ESVwiIHht\nbG5zOm9tZ2RjPVwiaHR0cDovL3d3dy5vbWcub3JnL3NwZWMvREQvMjAxMDA1MjQvRENcIiB4bWxu\nczpvbWdkaT1cImh0dHA6Ly93d3cub21nLm9yZy9zcGVjL0RELzIwMTAwNTI0L0RJXCIgeG1sbnM6\ncmVzaWxpZW50PVwiaHR0cDovL3Jlc2lsaWVudC5pYm0uY29tL2JwbW5cIiB4bWxuczp4c2Q9XCJo\ndHRwOi8vd3d3LnczLm9yZy8yMDAxL1hNTFNjaGVtYVwiIHhtbG5zOnhzaT1cImh0dHA6Ly93d3cu\ndzMub3JnLzIwMDEvWE1MU2NoZW1hLWluc3RhbmNlXCIgdGFyZ2V0TmFtZXNwYWNlPVwiaHR0cDov\nL3d3dy5jYW11bmRhLm9yZy90ZXN0XCI+PHByb2Nlc3MgaWQ9XCJleGFtcGxlX3Bvc3RfaW5jaWRl\nbnRfdG9fbXNfdGVhbXNcIiBpc0V4ZWN1dGFibGU9XCJ0cnVlXCIgbmFtZT1cIkV4YW1wbGU6IFBv\nc3QgSW5jaWRlbnQgdG8gTWljcm9zb2Z0IFRlYW1zXCI+PGRvY3VtZW50YXRpb24+RXhhbXBsZSBv\nZiBwb3N0aW5nIGluY2lkZW50IGRhdGEgdG8gYSBNaWNyb3NvZnQgVGVhbXMgY2hhbm5lbC48L2Rv\nY3VtZW50YXRpb24+PHN0YXJ0RXZlbnQgaWQ9XCJTdGFydEV2ZW50XzE1NWFzeG1cIj48b3V0Z29p\nbmc+U2VxdWVuY2VGbG93XzF0cWV1dWs8L291dGdvaW5nPjwvc3RhcnRFdmVudD48c2VydmljZVRh\nc2sgaWQ9XCJTZXJ2aWNlVGFza18wbnJubGthXCIgbmFtZT1cIlRlYW1zIFBvc3QgTWVzc2FnZVwi\nIHJlc2lsaWVudDp0eXBlPVwiZnVuY3Rpb25cIj48ZXh0ZW5zaW9uRWxlbWVudHM+PHJlc2lsaWVu\ndDpmdW5jdGlvbiB1dWlkPVwiMGM4ZTQ0OTctYzEzMS00ZDVkLWJkZjMtMzE1M2QzMGI5YmJjXCI+\ne1wiaW5wdXRzXCI6e1wiNzYwMjNjZTMtZmMxNy00MWQxLTkwMDItMjM5MjI4M2NlMzE1XCI6e1wi\naW5wdXRfdHlwZVwiOlwic3RhdGljXCIsXCJzdGF0aWNfaW5wdXRcIjp7XCJtdWx0aXNlbGVjdF92\nYWx1ZVwiOltdLFwidGV4dF92YWx1ZVwiOlwidGVzdGNoYW5uZWxcIn19LFwiZmE2NGEwOTktZjNk\nNC00Y2FhLWJkNjQtNzJmZmRiNDY0MTRmXCI6e1wiaW5wdXRfdHlwZVwiOlwic3RhdGljXCIsXCJz\ndGF0aWNfaW5wdXRcIjp7XCJib29sZWFuX3ZhbHVlXCI6dHJ1ZSxcIm11bHRpc2VsZWN0X3ZhbHVl\nXCI6W119fX0sXCJwcmVfcHJvY2Vzc2luZ19zY3JpcHRcIjpcImZyb20gamF2YS51dGlsIGltcG9y\ndCBEYXRlXFxuXFxuaW5wdXRzLmluY2lkZW50X2lkID0gaW5jaWRlbnQuaWRcXG5cXG5cXFwiXFxc\nIlxcXCJcXG5mb3JtYXQgb2YgYSBwYXlsb2FkLiAqID0gb3B0aW9uYWxcXG57IFxcXCJ0aXRsZVxc\nXCIqOiB4eCwgXFxuICBcXFwic3VtbWFyeVxcXCI6IHh4LCBcXG4gIFxcXCJzZWN0aW9uc1xcXCI6\nIFt7IFxcXCJ0aXRsZVxcXCIqOiB5eSwgXFxcInRleHRcXFwiKjogeXksIFxcbiAgICAgICAgICAg\nICAgICAgICAgICAgIFxcXCJmYWN0c1xcXCIqOiBbe1xcXCJuYW1lXFxcIjogenosIFxcXCJ2YWx1\nZVxcXCI6IHp6fV1cXG4gICAgICAgICAgICAgIH1dXFxufVxcblxcXCJcXFwiXFxcIlxcblxcbnBh\neWxvYWQgPSB1XFxcIlxcXCJcXFwie3sgXFxcInN1bW1hcnlcXFwiOiBcXFwiUmVzaWxpZW50IElu\nY2lkZW50XFxcIiwgXFxcInNlY3Rpb25zXFxcIjogWyBcXG4gIHt7IFxcXCJmYWN0c1xcXCI6IFsg\nXFxuICAgIHt7IFxcXCJuYW1lXFxcIjogXFxcIk5hbWVcXFwiLCBcXFwidmFsdWVcXFwiOiBcXFwi\ne31cXFwiIH19LCBcXG4gICAge3sgXFxcIm5hbWVcXFwiOiBcXFwiRGVzY3JpcHRpb25cXFwiLCBc\nXFwidmFsdWVcXFwiOiBcXFwie31cXFwiIH19LCBcXG4gICAge3sgXFxcIm5hbWVcXFwiOiBcXFwi\nSWRcXFwiLCBcXFwidmFsdWVcXFwiOiBcXFwie31cXFwiIH19LCBcXG4gICAge3sgXFxcIm5hbWVc\nXFwiOiBcXFwiT3duZXJcXFwiLCBcXFwidmFsdWVcXFwiOiBcXFwie31cXFwiIH19LCBcXG4gICAg\ne3sgXFxcIm5hbWVcXFwiOiBcXFwiVHlwZXNcXFwiLCBcXFwidmFsdWVcXFwiOiBcXFwie31cXFwi\nIH19LCBcXG4gICAge3sgXFxcIm5hbWVcXFwiOiBcXFwiTklTVCBBdHRhY2sgVmVjdG9yc1xcXCIs\nIFxcXCJ2YWx1ZVxcXCI6IFxcXCJ7fVxcXCIgfX0sIFxcbiAgICB7eyBcXFwibmFtZVxcXCI6IFxc\nXCJDcmVhdGUgRGF0ZVxcXCIsIFxcXCJ2YWx1ZVxcXCI6IFxcXCJ7fVxcXCIgfX0sIFxcbiAgICB7\neyBcXFwibmFtZVxcXCI6IFxcXCJEYXRlIE9jY3VycmVkXFxcIiwgXFxcInZhbHVlXFxcIjogXFxc\nInt9XFxcIiB9fSwgXFxuICAgIHt7IFxcXCJuYW1lXFxcIjogXFxcIkRpc2NvdmVyZWQgRGF0ZVxc\nXCIsIFxcXCJ2YWx1ZVxcXCI6IFxcXCJ7fVxcXCIgfX0sIFxcbiAgICB7eyBcXFwibmFtZVxcXCI6\nIFxcXCJDb25maXJtZWRcXFwiLCBcXFwidmFsdWVcXFwiOiBcXFwie31cXFwiIH19LCBcXG4gICAg\ne3sgXFxcIm5hbWVcXFwiOiBcXFwiU2V2ZXJpdHlcXFwiLCBcXFwidmFsdWVcXFwiOiBcXFwie31c\nXFwiIH19IFxcbiAgIF1cXG4gIH19XFxuIF0gXFxufX0gXFxuXFxcIlxcXCJcXFwiLmZvcm1hdChp\nbmNpZGVudC5uYW1lLCBpbmNpZGVudC5kZXNjcmlwdGlvbi5jb250ZW50LnJlcGxhY2UoJ1xcXCIn\nLCAnXFxcXFxcXFxcXFwiJykgaWYgaW5jaWRlbnQuZGVzY3JpcHRpb24gZWxzZSBcXFwiLVxcXCIs\nIGluY2lkZW50LmlkLFxcbiAgIGluY2lkZW50Lm93bmVyX2lkIGlmIGluY2lkZW50Lm93bmVyX2lk\nIGVsc2UgXFxcIi1cXFwiLFxcbiAgIFxcXCIsIFxcXCIuam9pbihzdHIoeCkgZm9yIHggaW4gaW5j\naWRlbnQuaW5jaWRlbnRfdHlwZV9pZHMpLCBcXFwiLCBcXFwiLmpvaW4oc3RyKHgpIGZvciB4IGlu\nIGluY2lkZW50Lm5pc3RfYXR0YWNrX3ZlY3RvcnMpLFxcbiAgIERhdGUoaW5jaWRlbnQuY3JlYXRl\nX2RhdGUpLCBEYXRlKGluY2lkZW50LnN0YXJ0X2RhdGUpIGlmIGluY2lkZW50LnN0YXJ0X2RhdGUg\nZWxzZSBcXFwiLVxcXCIsIERhdGUoaW5jaWRlbnQuZGlzY292ZXJlZF9kYXRlKSxcXG4gICBcXFwi\nVHJ1ZVxcXCIgaWYgaW5jaWRlbnQuY29uZmlybWVkIGVsc2UgXFxcIkZhbHNlXFxcIixcXG4gICBc\nXFwiLVxcXCIgaWYgbm90IGluY2lkZW50LnNldmVyaXR5X2NvZGUgZWxzZSBpbmNpZGVudC5zZXZl\ncml0eV9jb2RlXFxuICAgKVxcblxcbmlucHV0cy50ZWFtc19wYXlsb2FkID0gcGF5bG9hZFwifTwv\ncmVzaWxpZW50OmZ1bmN0aW9uPjwvZXh0ZW5zaW9uRWxlbWVudHM+PGluY29taW5nPlNlcXVlbmNl\nRmxvd18xdHFldXVrPC9pbmNvbWluZz48b3V0Z29pbmc+U2VxdWVuY2VGbG93XzE0cjZ5dzQ8L291\ndGdvaW5nPjwvc2VydmljZVRhc2s+PHNlcXVlbmNlRmxvdyBpZD1cIlNlcXVlbmNlRmxvd18xdHFl\ndXVrXCIgc291cmNlUmVmPVwiU3RhcnRFdmVudF8xNTVhc3htXCIgdGFyZ2V0UmVmPVwiU2Vydmlj\nZVRhc2tfMG5ybmxrYVwiLz48ZW5kRXZlbnQgaWQ9XCJFbmRFdmVudF8xY3g1eW05XCI+PGluY29t\naW5nPlNlcXVlbmNlRmxvd18xNHI2eXc0PC9pbmNvbWluZz48L2VuZEV2ZW50PjxzZXF1ZW5jZUZs\nb3cgaWQ9XCJTZXF1ZW5jZUZsb3dfMTRyNnl3NFwiIHNvdXJjZVJlZj1cIlNlcnZpY2VUYXNrXzBu\ncm5sa2FcIiB0YXJnZXRSZWY9XCJFbmRFdmVudF8xY3g1eW05XCIvPjx0ZXh0QW5ub3RhdGlvbiBp\nZD1cIlRleHRBbm5vdGF0aW9uXzFreHhpeXRcIj48dGV4dD5TdGFydCB5b3VyIHdvcmtmbG93IGhl\ncmU8L3RleHQ+PC90ZXh0QW5ub3RhdGlvbj48YXNzb2NpYXRpb24gaWQ9XCJBc3NvY2lhdGlvbl8x\nc2V1ajQ4XCIgc291cmNlUmVmPVwiU3RhcnRFdmVudF8xNTVhc3htXCIgdGFyZ2V0UmVmPVwiVGV4\ndEFubm90YXRpb25fMWt4eGl5dFwiLz48dGV4dEFubm90YXRpb24gaWQ9XCJUZXh0QW5ub3RhdGlv\nbl8waW5nOHJyXCI+PHRleHQ+PCFbQ0RBVEFbRm9ybWF0IHRlYW1zX3BheWxvYWQgYXMgYSBqc29u\nIG9iamVjdC4gU2VlIHByZS1wcm9jZXNzb3Igc2NyaXB0IGZvciBmb3JtYXQuXG5dXT48L3RleHQ+\nPC90ZXh0QW5ub3RhdGlvbj48YXNzb2NpYXRpb24gaWQ9XCJBc3NvY2lhdGlvbl8xY2d6YjAzXCIg\nc291cmNlUmVmPVwiU2VydmljZVRhc2tfMG5ybmxrYVwiIHRhcmdldFJlZj1cIlRleHRBbm5vdGF0\naW9uXzBpbmc4cnJcIi8+PC9wcm9jZXNzPjxicG1uZGk6QlBNTkRpYWdyYW0gaWQ9XCJCUE1ORGlh\nZ3JhbV8xXCI+PGJwbW5kaTpCUE1OUGxhbmUgYnBtbkVsZW1lbnQ9XCJ1bmRlZmluZWRcIiBpZD1c\nIkJQTU5QbGFuZV8xXCI+PGJwbW5kaTpCUE1OU2hhcGUgYnBtbkVsZW1lbnQ9XCJTdGFydEV2ZW50\nXzE1NWFzeG1cIiBpZD1cIlN0YXJ0RXZlbnRfMTU1YXN4bV9kaVwiPjxvbWdkYzpCb3VuZHMgaGVp\nZ2h0PVwiMzZcIiB3aWR0aD1cIjM2XCIgeD1cIjE2MlwiIHk9XCIxODhcIi8+PGJwbW5kaTpCUE1O\nTGFiZWw+PG9tZ2RjOkJvdW5kcyBoZWlnaHQ9XCIwXCIgd2lkdGg9XCI5MFwiIHg9XCIxNTdcIiB5\nPVwiMjIzXCIvPjwvYnBtbmRpOkJQTU5MYWJlbD48L2JwbW5kaTpCUE1OU2hhcGU+PGJwbW5kaTpC\nUE1OU2hhcGUgYnBtbkVsZW1lbnQ9XCJUZXh0QW5ub3RhdGlvbl8xa3h4aXl0XCIgaWQ9XCJUZXh0\nQW5ub3RhdGlvbl8xa3h4aXl0X2RpXCI+PG9tZ2RjOkJvdW5kcyBoZWlnaHQ9XCIzMFwiIHdpZHRo\nPVwiMTAwXCIgeD1cIjk5XCIgeT1cIjI1NFwiLz48L2JwbW5kaTpCUE1OU2hhcGU+PGJwbW5kaTpC\nUE1ORWRnZSBicG1uRWxlbWVudD1cIkFzc29jaWF0aW9uXzFzZXVqNDhcIiBpZD1cIkFzc29jaWF0\naW9uXzFzZXVqNDhfZGlcIj48b21nZGk6d2F5cG9pbnQgeD1cIjE2OVwiIHhzaTp0eXBlPVwib21n\nZGM6UG9pbnRcIiB5PVwiMjIwXCIvPjxvbWdkaTp3YXlwb2ludCB4PVwiMTUzXCIgeHNpOnR5cGU9\nXCJvbWdkYzpQb2ludFwiIHk9XCIyNTRcIi8+PC9icG1uZGk6QlBNTkVkZ2U+PGJwbW5kaTpCUE1O\nU2hhcGUgYnBtbkVsZW1lbnQ9XCJTZXJ2aWNlVGFza18wbnJubGthXCIgaWQ9XCJTZXJ2aWNlVGFz\na18wbnJubGthX2RpXCI+PG9tZ2RjOkJvdW5kcyBoZWlnaHQ9XCI4MFwiIHdpZHRoPVwiMTAwXCIg\neD1cIjI3OFwiIHk9XCIxNjZcIi8+PC9icG1uZGk6QlBNTlNoYXBlPjxicG1uZGk6QlBNTkVkZ2Ug\nYnBtbkVsZW1lbnQ9XCJTZXF1ZW5jZUZsb3dfMXRxZXV1a1wiIGlkPVwiU2VxdWVuY2VGbG93XzF0\ncWV1dWtfZGlcIj48b21nZGk6d2F5cG9pbnQgeD1cIjE5OFwiIHhzaTp0eXBlPVwib21nZGM6UG9p\nbnRcIiB5PVwiMjA2XCIvPjxvbWdkaTp3YXlwb2ludCB4PVwiMjc4XCIgeHNpOnR5cGU9XCJvbWdk\nYzpQb2ludFwiIHk9XCIyMDZcIi8+PGJwbW5kaTpCUE1OTGFiZWw+PG9tZ2RjOkJvdW5kcyBoZWln\naHQ9XCIxM1wiIHdpZHRoPVwiMFwiIHg9XCIyMzhcIiB5PVwiMTg0XCIvPjwvYnBtbmRpOkJQTU5M\nYWJlbD48L2JwbW5kaTpCUE1ORWRnZT48YnBtbmRpOkJQTU5TaGFwZSBicG1uRWxlbWVudD1cIkVu\nZEV2ZW50XzFjeDV5bTlcIiBpZD1cIkVuZEV2ZW50XzFjeDV5bTlfZGlcIj48b21nZGM6Qm91bmRz\nIGhlaWdodD1cIjM2XCIgd2lkdGg9XCIzNlwiIHg9XCI0NTdcIiB5PVwiMTg4XCIvPjxicG1uZGk6\nQlBNTkxhYmVsPjxvbWdkYzpCb3VuZHMgaGVpZ2h0PVwiMTNcIiB3aWR0aD1cIjBcIiB4PVwiNDc1\nXCIgeT1cIjIyN1wiLz48L2JwbW5kaTpCUE1OTGFiZWw+PC9icG1uZGk6QlBNTlNoYXBlPjxicG1u\nZGk6QlBNTkVkZ2UgYnBtbkVsZW1lbnQ9XCJTZXF1ZW5jZUZsb3dfMTRyNnl3NFwiIGlkPVwiU2Vx\ndWVuY2VGbG93XzE0cjZ5dzRfZGlcIj48b21nZGk6d2F5cG9pbnQgeD1cIjM3OFwiIHhzaTp0eXBl\nPVwib21nZGM6UG9pbnRcIiB5PVwiMjA2XCIvPjxvbWdkaTp3YXlwb2ludCB4PVwiNDU3XCIgeHNp\nOnR5cGU9XCJvbWdkYzpQb2ludFwiIHk9XCIyMDZcIi8+PGJwbW5kaTpCUE1OTGFiZWw+PG9tZ2Rj\nOkJvdW5kcyBoZWlnaHQ9XCIxM1wiIHdpZHRoPVwiMFwiIHg9XCI0MTcuNVwiIHk9XCIxODRcIi8+\nPC9icG1uZGk6QlBNTkxhYmVsPjwvYnBtbmRpOkJQTU5FZGdlPjxicG1uZGk6QlBNTlNoYXBlIGJw\nbW5FbGVtZW50PVwiVGV4dEFubm90YXRpb25fMGluZzhyclwiIGlkPVwiVGV4dEFubm90YXRpb25f\nMGluZzhycl9kaVwiPjxvbWdkYzpCb3VuZHMgaGVpZ2h0PVwiODJcIiB3aWR0aD1cIjIwN1wiIHg9\nXCIxMzBcIiB5PVwiNTdcIi8+PC9icG1uZGk6QlBNTlNoYXBlPjxicG1uZGk6QlBNTkVkZ2UgYnBt\nbkVsZW1lbnQ9XCJBc3NvY2lhdGlvbl8xY2d6YjAzXCIgaWQ9XCJBc3NvY2lhdGlvbl8xY2d6YjAz\nX2RpXCI+PG9tZ2RpOndheXBvaW50IHg9XCIyOTNcIiB4c2k6dHlwZT1cIm9tZ2RjOlBvaW50XCIg\neT1cIjE2NlwiLz48b21nZGk6d2F5cG9pbnQgeD1cIjI3MFwiIHhzaTp0eXBlPVwib21nZGM6UG9p\nbnRcIiB5PVwiMTM5XCIvPjwvYnBtbmRpOkJQTU5FZGdlPjwvYnBtbmRpOkJQTU5QbGFuZT48L2Jw\nbW5kaTpCUE1ORGlhZ3JhbT48L2RlZmluaXRpb25zPiIsICJ3b3JrZmxvd19pZCI6ICJleGFtcGxl\nX3Bvc3RfaW5jaWRlbnRfdG9fbXNfdGVhbXMiLCAidmVyc2lvbiI6IDU5fSwgImxhc3RfbW9kaWZp\nZWRfdGltZSI6IDE1Njg3Njk0MTc0OTAsICJjcmVhdG9yX2lkIjogImFAZXhhbXBsZS5jb20iLCAi\nYWN0aW9ucyI6IFtdLCAicHJvZ3JhbW1hdGljX25hbWUiOiAiZXhhbXBsZV9wb3N0X2luY2lkZW50\nX3RvX21zX3RlYW1zIiwgIm5hbWUiOiAiRXhhbXBsZTogUG9zdCBJbmNpZGVudCB0byBNaWNyb3Nv\nZnQgVGVhbXMifV0sICJhY3Rpb25zIjogW3sibG9naWNfdHlwZSI6ICJhbGwiLCAibmFtZSI6ICJF\neGFtcGxlOiBQb3N0IGFuIEluY2lkZW50IHRvIE1pY3Jvc29mdCBUZWFtcyIsICJ2aWV3X2l0ZW1z\nIjogW10sICJ0eXBlIjogMSwgIndvcmtmbG93cyI6IFsiZXhhbXBsZV9wb3N0X2luY2lkZW50X3Rv\nX21zX3RlYW1zIl0sICJvYmplY3RfdHlwZSI6ICJpbmNpZGVudCIsICJ0aW1lb3V0X3NlY29uZHMi\nOiA4NjQwMCwgInV1aWQiOiAiYWI0MjM0NTctNmE4MC00M2UxLWJhNzItMjRkNzcyOTM1OWRkIiwg\nImF1dG9tYXRpb25zIjogW10sICJleHBvcnRfa2V5IjogIkV4YW1wbGU6IFBvc3QgYW4gSW5jaWRl\nbnQgdG8gTWljcm9zb2Z0IFRlYW1zIiwgImNvbmRpdGlvbnMiOiBbXSwgImlkIjogNzQsICJtZXNz\nYWdlX2Rlc3RpbmF0aW9ucyI6IFtdfSwgeyJsb2dpY190eXBlIjogImFsbCIsICJuYW1lIjogIkV4\nYW1wbGU6IFBvc3QgYSBUYXNrIHRvIE1pY3Jvc29mdCBUZWFtcyIsICJ2aWV3X2l0ZW1zIjogW10s\nICJ0eXBlIjogMSwgIndvcmtmbG93cyI6IFsiZXhhbXBsZV9wb3N0X3Rhc2tfdG9fbWljcm9zb2Z0\nX3RlYW1zIl0sICJvYmplY3RfdHlwZSI6ICJ0YXNrIiwgInRpbWVvdXRfc2Vjb25kcyI6IDg2NDAw\nLCAidXVpZCI6ICJhNGVhMTcyZS02MTAzLTRkYjEtODdhMC0xN2E1ZTJlMjdhOTciLCAiYXV0b21h\ndGlvbnMiOiBbXSwgImV4cG9ydF9rZXkiOiAiRXhhbXBsZTogUG9zdCBhIFRhc2sgdG8gTWljcm9z\nb2Z0IFRlYW1zIiwgImNvbmRpdGlvbnMiOiBbXSwgImlkIjogNzUsICJtZXNzYWdlX2Rlc3RpbmF0\naW9ucyI6IFtdfV0sICJsYXlvdXRzIjogW10sICJleHBvcnRfZm9ybWF0X3ZlcnNpb24iOiAyLCAi\naWQiOiA5MSwgImluZHVzdHJpZXMiOiBudWxsLCAicGhhc2VzIjogW10sICJhY3Rpb25fb3JkZXIi\nOiBbXSwgImdlb3MiOiBudWxsLCAibG9jYWxlIjogbnVsbCwgInNlcnZlcl92ZXJzaW9uIjogeyJt\nYWpvciI6IDMxLCAidmVyc2lvbiI6ICIzMS4wLjQyNTQiLCAiYnVpbGRfbnVtYmVyIjogNDI1NCwg\nIm1pbm9yIjogMH0sICJ0aW1lZnJhbWVzIjogbnVsbCwgIndvcmtzcGFjZXMiOiBbXSwgImF1dG9t\nYXRpY190YXNrcyI6IFtdLCAiZnVuY3Rpb25zIjogW3siZGlzcGxheV9uYW1lIjogIlRlYW1zIFBv\nc3QgTWVzc2FnZSIsICJkZXNjcmlwdGlvbiI6IHsiY29udGVudCI6ICJQb3N0IGEgbWVzc2FnZSB0\nbyBhIE1pY3Jvc29mdCBUZWFtcyBjaGFubmVsIiwgImZvcm1hdCI6ICJ0ZXh0In0sICJjcmVhdG9y\nIjogeyJkaXNwbGF5X25hbWUiOiAiYWJsZSBiYWtlciIsICJ0eXBlIjogInVzZXIiLCAiaWQiOiAz\nLCAibmFtZSI6ICJhQGV4YW1wbGUuY29tIn0sICJ2aWV3X2l0ZW1zIjogW3sic2hvd19pZiI6IG51\nbGwsICJmaWVsZF90eXBlIjogIl9fZnVuY3Rpb24iLCAic2hvd19saW5rX2hlYWRlciI6IGZhbHNl\nLCAiZWxlbWVudCI6ICJmaWVsZF91dWlkIiwgImNvbnRlbnQiOiAiM2YzNWYxYTktZjVkNi00NDBh\nLWE4MjUtNjZhMzQwYWVhZWZlIiwgInN0ZXBfbGFiZWwiOiBudWxsfSwgeyJzaG93X2lmIjogbnVs\nbCwgImZpZWxkX3R5cGUiOiAiX19mdW5jdGlvbiIsICJzaG93X2xpbmtfaGVhZGVyIjogZmFsc2Us\nICJlbGVtZW50IjogImZpZWxkX3V1aWQiLCAiY29udGVudCI6ICI5NThmMDk1My04YjZmLTQ0NzIt\nYjc4Ni1iOWFlNDM1MWRkZmUiLCAic3RlcF9sYWJlbCI6IG51bGx9LCB7InNob3dfaWYiOiBudWxs\nLCAiZmllbGRfdHlwZSI6ICJfX2Z1bmN0aW9uIiwgInNob3dfbGlua19oZWFkZXIiOiBmYWxzZSwg\nImVsZW1lbnQiOiAiZmllbGRfdXVpZCIsICJjb250ZW50IjogIjc2MDIzY2UzLWZjMTctNDFkMS05\nMDAyLTIzOTIyODNjZTMxNSIsICJzdGVwX2xhYmVsIjogbnVsbH0sIHsic2hvd19pZiI6IG51bGws\nICJmaWVsZF90eXBlIjogIl9fZnVuY3Rpb24iLCAic2hvd19saW5rX2hlYWRlciI6IGZhbHNlLCAi\nZWxlbWVudCI6ICJmaWVsZF91dWlkIiwgImNvbnRlbnQiOiAiMTNhMjRlYjEtMWMwNC00MDA5LWE4\nMGUtODU3YTVjOGRjNDFmIiwgInN0ZXBfbGFiZWwiOiBudWxsfSwgeyJzaG93X2lmIjogbnVsbCwg\nImZpZWxkX3R5cGUiOiAiX19mdW5jdGlvbiIsICJzaG93X2xpbmtfaGVhZGVyIjogZmFsc2UsICJl\nbGVtZW50IjogImZpZWxkX3V1aWQiLCAiY29udGVudCI6ICJmYTY0YTA5OS1mM2Q0LTRjYWEtYmQ2\nNC03MmZmZGI0NjQxNGYiLCAic3RlcF9sYWJlbCI6IG51bGx9XSwgImV4cG9ydF9rZXkiOiAidGVh\nbXNfcG9zdF9tZXNzYWdlIiwgInV1aWQiOiAiMGM4ZTQ0OTctYzEzMS00ZDVkLWJkZjMtMzE1M2Qz\nMGI5YmJjIiwgImxhc3RfbW9kaWZpZWRfYnkiOiB7ImRpc3BsYXlfbmFtZSI6ICJhYmxlIGJha2Vy\nIiwgInR5cGUiOiAidXNlciIsICJpZCI6IDMsICJuYW1lIjogImFAZXhhbXBsZS5jb20ifSwgInZl\ncnNpb24iOiAzLCAid29ya2Zsb3dzIjogW3siZGVzY3JpcHRpb24iOiBudWxsLCAib2JqZWN0X3R5\ncGUiOiAiaW5jaWRlbnQiLCAiYWN0aW9ucyI6IFtdLCAibmFtZSI6ICJFeGFtcGxlOiBQb3N0IElu\nY2lkZW50IHRvIE1pY3Jvc29mdCBUZWFtcyIsICJ3b3JrZmxvd19pZCI6IDUyLCAicHJvZ3JhbW1h\ndGljX25hbWUiOiAiZXhhbXBsZV9wb3N0X2luY2lkZW50X3RvX21zX3RlYW1zIiwgInV1aWQiOiBu\ndWxsfSwgeyJkZXNjcmlwdGlvbiI6IG51bGwsICJvYmplY3RfdHlwZSI6ICJ0YXNrIiwgImFjdGlv\nbnMiOiBbXSwgIm5hbWUiOiAiRXhhbXBsZTogUG9zdCBUYXNrIHRvIE1pY3Jvc29mdCBUZWFtcyIs\nICJ3b3JrZmxvd19pZCI6IDUzLCAicHJvZ3JhbW1hdGljX25hbWUiOiAiZXhhbXBsZV9wb3N0X3Rh\nc2tfdG9fbWljcm9zb2Z0X3RlYW1zIiwgInV1aWQiOiBudWxsfV0sICJsYXN0X21vZGlmaWVkX3Rp\nbWUiOiAxNTY4NzQ3Mjg3NTQ4LCAiZGVzdGluYXRpb25faGFuZGxlIjogImZuX3RlYW1zIiwgImlk\nIjogNDYsICJuYW1lIjogInRlYW1zX3Bvc3RfbWVzc2FnZSJ9XSwgIm5vdGlmaWNhdGlvbnMiOiBu\ndWxsLCAicmVndWxhdG9ycyI6IG51bGwsICJpbmNpZGVudF90eXBlcyI6IFt7ImNyZWF0ZV9kYXRl\nIjogMTU3MTQyNTQ4NTM4MywgImRlc2NyaXB0aW9uIjogIkN1c3RvbWl6YXRpb24gUGFja2FnZXMg\nKGludGVybmFsKSIsICJleHBvcnRfa2V5IjogIkN1c3RvbWl6YXRpb24gUGFja2FnZXMgKGludGVy\nbmFsKSIsICJpZCI6IDAsICJuYW1lIjogIkN1c3RvbWl6YXRpb24gUGFja2FnZXMgKGludGVybmFs\nKSIsICJ1cGRhdGVfZGF0ZSI6IDE1NzE0MjU0ODUzODMsICJ1dWlkIjogImJmZWVjMmQ0LTM3NzAt\nMTFlOC1hZDM5LTRhMDAwNDA0NGFhMCIsICJlbmFibGVkIjogZmFsc2UsICJzeXN0ZW0iOiBmYWxz\nZSwgInBhcmVudF9pZCI6IG51bGwsICJoaWRkZW4iOiBmYWxzZX1dLCAic2NyaXB0cyI6IFtdLCAi\ndHlwZXMiOiBbXSwgIm1lc3NhZ2VfZGVzdGluYXRpb25zIjogW3sidXVpZCI6ICI4ZTVmOTRlYy02\nYTM5LTQzZTgtYjI0Yy0yMmZmZjkzN2UxMjEiLCAiZXhwb3J0X2tleSI6ICJmbl90ZWFtcyIsICJu\nYW1lIjogImZuX3RlYW1zIiwgImRlc3RpbmF0aW9uX3R5cGUiOiAwLCAicHJvZ3JhbW1hdGljX25h\nbWUiOiAiZm5fdGVhbXMiLCAiZXhwZWN0X2FjayI6IHRydWUsICJ1c2VycyI6IFsiYUBleGFtcGxl\nLmNvbSJdfV0sICJpbmNpZGVudF9hcnRpZmFjdF90eXBlcyI6IFtdLCAicm9sZXMiOiBbXSwgImZp\nZWxkcyI6IFt7Im9wZXJhdGlvbnMiOiBbXSwgInR5cGVfaWQiOiAwLCAib3BlcmF0aW9uX3Blcm1z\nIjoge30sICJ0ZXh0IjogIlNpbXVsYXRpb24iLCAiYmxhbmtfb3B0aW9uIjogZmFsc2UsICJwcmVm\naXgiOiBudWxsLCAiY2hhbmdlYWJsZSI6IHRydWUsICJpZCI6IDUxLCAicmVhZF9vbmx5IjogdHJ1\nZSwgInV1aWQiOiAiYzNmMGUzZWQtMjFlMS00ZDUzLWFmZmItZmU1Y2EzMzA4Y2NhIiwgImNob3Nl\nbiI6IGZhbHNlLCAiaW5wdXRfdHlwZSI6ICJib29sZWFuIiwgInRvb2x0aXAiOiAiV2hldGhlciB0\naGUgaW5jaWRlbnQgaXMgYSBzaW11bGF0aW9uIG9yIGEgcmVndWxhciBpbmNpZGVudC4gIFRoaXMg\nZmllbGQgaXMgcmVhZC1vbmx5LiIsICJpbnRlcm5hbCI6IGZhbHNlLCAicmljaF90ZXh0IjogZmFs\nc2UsICJ0ZW1wbGF0ZXMiOiBbXSwgImV4cG9ydF9rZXkiOiAiaW5jaWRlbnQvaW5jX3RyYWluaW5n\nIiwgImhpZGVfbm90aWZpY2F0aW9uIjogZmFsc2UsICJuYW1lIjogImluY190cmFpbmluZyIsICJk\nZXByZWNhdGVkIjogZmFsc2UsICJkZWZhdWx0X2Nob3Nlbl9ieV9zZXJ2ZXIiOiBmYWxzZSwgInZh\nbHVlcyI6IFtdfSwgeyJvcGVyYXRpb25zIjogW10sICJ0eXBlX2lkIjogMTEsICJvcGVyYXRpb25f\ncGVybXMiOiB7fSwgInRleHQiOiAidGVhbXNfY2hhbm5lbCIsICJibGFua19vcHRpb24iOiBmYWxz\nZSwgInByZWZpeCI6IG51bGwsICJjaGFuZ2VhYmxlIjogdHJ1ZSwgImlkIjogMzAzLCAicmVhZF9v\nbmx5IjogZmFsc2UsICJ1dWlkIjogIjc2MDIzY2UzLWZjMTctNDFkMS05MDAyLTIzOTIyODNjZTMx\nNSIsICJjaG9zZW4iOiBmYWxzZSwgImlucHV0X3R5cGUiOiAidGV4dCIsICJ0b29sdGlwIjogIkxv\nb2t1cCB2YWx1ZSB0byBjaGFubmVsIHRvIHBvc3QgYSBtZXNzYWdlIiwgImludGVybmFsIjogZmFs\nc2UsICJyaWNoX3RleHQiOiBmYWxzZSwgInRlbXBsYXRlcyI6IFtdLCAiZXhwb3J0X2tleSI6ICJf\nX2Z1bmN0aW9uL3RlYW1zX2NoYW5uZWwiLCAiaGlkZV9ub3RpZmljYXRpb24iOiBmYWxzZSwgInBs\nYWNlaG9sZGVyIjogIiIsICJuYW1lIjogInRlYW1zX2NoYW5uZWwiLCAiZGVwcmVjYXRlZCI6IGZh\nbHNlLCAiZGVmYXVsdF9jaG9zZW5fYnlfc2VydmVyIjogZmFsc2UsICJyZXF1aXJlZCI6ICJhbHdh\neXMiLCAidmFsdWVzIjogW119LCB7Im9wZXJhdGlvbnMiOiBbXSwgInR5cGVfaWQiOiAxMSwgIm9w\nZXJhdGlvbl9wZXJtcyI6IHt9LCAidGV4dCI6ICJ0ZWFtc19wYXlsb2FkIiwgImJsYW5rX29wdGlv\nbiI6IGZhbHNlLCAicHJlZml4IjogbnVsbCwgImNoYW5nZWFibGUiOiB0cnVlLCAiaWQiOiAzMDIs\nICJyZWFkX29ubHkiOiBmYWxzZSwgInV1aWQiOiAiMTNhMjRlYjEtMWMwNC00MDA5LWE4MGUtODU3\nYTVjOGRjNDFmIiwgImNob3NlbiI6IGZhbHNlLCAiaW5wdXRfdHlwZSI6ICJ0ZXh0IiwgInRvb2x0\naXAiOiAianNvbiBvZiB0ZWFtcyBjb252ZXJzYXRpb24gbWVzc2FnZTogc2VjdGlvbnMsIHRpdGxl\nLCB0ZXh0LCBmYWN0cyIsICJpbnRlcm5hbCI6IGZhbHNlLCAicmljaF90ZXh0IjogZmFsc2UsICJ0\nZW1wbGF0ZXMiOiBbXSwgImV4cG9ydF9rZXkiOiAiX19mdW5jdGlvbi90ZWFtc19wYXlsb2FkIiwg\nImhpZGVfbm90aWZpY2F0aW9uIjogZmFsc2UsICJwbGFjZWhvbGRlciI6ICIiLCAibmFtZSI6ICJ0\nZWFtc19wYXlsb2FkIiwgImRlcHJlY2F0ZWQiOiBmYWxzZSwgImRlZmF1bHRfY2hvc2VuX2J5X3Nl\ncnZlciI6IGZhbHNlLCAicmVxdWlyZWQiOiAiYWx3YXlzIiwgInZhbHVlcyI6IFtdfSwgeyJvcGVy\nYXRpb25zIjogW10sICJ0eXBlX2lkIjogMTEsICJvcGVyYXRpb25fcGVybXMiOiB7fSwgInRleHQi\nOiAidGFza19pZCIsICJibGFua19vcHRpb24iOiBmYWxzZSwgInByZWZpeCI6IG51bGwsICJjaGFu\nZ2VhYmxlIjogdHJ1ZSwgImlkIjogMjEyLCAicmVhZF9vbmx5IjogZmFsc2UsICJ1dWlkIjogIjk1\nOGYwOTUzLThiNmYtNDQ3Mi1iNzg2LWI5YWU0MzUxZGRmZSIsICJjaG9zZW4iOiBmYWxzZSwgImlu\ncHV0X3R5cGUiOiAibnVtYmVyIiwgInRvb2x0aXAiOiAiIiwgImludGVybmFsIjogZmFsc2UsICJy\naWNoX3RleHQiOiBmYWxzZSwgInRlbXBsYXRlcyI6IFtdLCAiZXhwb3J0X2tleSI6ICJfX2Z1bmN0\naW9uL3Rhc2tfaWQiLCAiaGlkZV9ub3RpZmljYXRpb24iOiBmYWxzZSwgInBsYWNlaG9sZGVyIjog\nIiIsICJuYW1lIjogInRhc2tfaWQiLCAiZGVwcmVjYXRlZCI6IGZhbHNlLCAiZGVmYXVsdF9jaG9z\nZW5fYnlfc2VydmVyIjogZmFsc2UsICJ2YWx1ZXMiOiBbXX0sIHsib3BlcmF0aW9ucyI6IFtdLCAi\ndHlwZV9pZCI6IDExLCAib3BlcmF0aW9uX3Blcm1zIjoge30sICJ0ZXh0IjogImluY2lkZW50X2lk\nIiwgImJsYW5rX29wdGlvbiI6IGZhbHNlLCAicHJlZml4IjogbnVsbCwgImNoYW5nZWFibGUiOiB0\ncnVlLCAiaWQiOiAyMDgsICJyZWFkX29ubHkiOiBmYWxzZSwgInV1aWQiOiAiM2YzNWYxYTktZjVk\nNi00NDBhLWE4MjUtNjZhMzQwYWVhZWZlIiwgImNob3NlbiI6IGZhbHNlLCAiaW5wdXRfdHlwZSI6\nICJudW1iZXIiLCAidG9vbHRpcCI6ICIiLCAiaW50ZXJuYWwiOiBmYWxzZSwgInJpY2hfdGV4dCI6\nIGZhbHNlLCAidGVtcGxhdGVzIjogW10sICJleHBvcnRfa2V5IjogIl9fZnVuY3Rpb24vaW5jaWRl\nbnRfaWQiLCAiaGlkZV9ub3RpZmljYXRpb24iOiBmYWxzZSwgInBsYWNlaG9sZGVyIjogIiIsICJu\nYW1lIjogImluY2lkZW50X2lkIiwgImRlcHJlY2F0ZWQiOiBmYWxzZSwgImRlZmF1bHRfY2hvc2Vu\nX2J5X3NlcnZlciI6IGZhbHNlLCAicmVxdWlyZWQiOiAiYWx3YXlzIiwgInZhbHVlcyI6IFtdfSwg\neyJvcGVyYXRpb25zIjogW10sICJ0eXBlX2lkIjogMTEsICJvcGVyYXRpb25fcGVybXMiOiB7fSwg\nInRleHQiOiAidGVhbXNfbXJrZG93biIsICJibGFua19vcHRpb24iOiB0cnVlLCAicHJlZml4Ijog\nbnVsbCwgImNoYW5nZWFibGUiOiB0cnVlLCAiaWQiOiAzMDEsICJyZWFkX29ubHkiOiBmYWxzZSwg\nInV1aWQiOiAiZmE2NGEwOTktZjNkNC00Y2FhLWJkNjQtNzJmZmRiNDY0MTRmIiwgImNob3NlbiI6\nIGZhbHNlLCAiaW5wdXRfdHlwZSI6ICJib29sZWFuIiwgInRvb2x0aXAiOiAiIiwgImludGVybmFs\nIjogZmFsc2UsICJyaWNoX3RleHQiOiBmYWxzZSwgInRlbXBsYXRlcyI6IFtdLCAiZXhwb3J0X2tl\neSI6ICJfX2Z1bmN0aW9uL3RlYW1zX21ya2Rvd24iLCAiaGlkZV9ub3RpZmljYXRpb24iOiBmYWxz\nZSwgInBsYWNlaG9sZGVyIjogIiIsICJuYW1lIjogInRlYW1zX21ya2Rvd24iLCAiZGVwcmVjYXRl\nZCI6IGZhbHNlLCAiZGVmYXVsdF9jaG9zZW5fYnlfc2VydmVyIjogZmFsc2UsICJyZXF1aXJlZCI6\nICJhbHdheXMiLCAidmFsdWVzIjogW119XSwgIm92ZXJyaWRlcyI6IFtdLCAiZXhwb3J0X2RhdGUi\nOiAxNTcxNDI1NDc4NDgzfQ==\n\"\"\"\n )", "title": "" }, { "docid": "73acc1ec32b3709506cde10d8043490b", "score": "0.45401424", "text": "def extra_prepare(self, configuration, args_dict):\n aws_syncr = args_dict.pop(\"aws_syncr\")\n\n self.configuration.update(\n { \"$@\": aws_syncr.get(\"extra\", \"\")\n , \"aws_syncr\": aws_syncr\n , \"templates\": {}\n , \"config_folder\": self.configuration_folder\n }\n , source = \"<args_dict>\"\n )", "title": "" }, { "docid": "cdfa000783f63828a792026292f5a6de", "score": "0.45377466", "text": "def custom_settings():\n\n return {}", "title": "" }, { "docid": "0a6295ee1a3561b7f9e09a90b3674070", "score": "0.452671", "text": "def module_config_template():\n\n d = {\n \"GCEBurnRatePublisher\": {\n \"module\": \"modules.GCE.publishers.GCEBurnRate_publisher\",\n \"name\": \"GCEBurnRatePublisher\",\n },\n }\n print(\"Entry in channel configuration\")\n pprint.pprint(d)\n print(\"where\")\n print(\"\\t name - name of the class to be instantiated by task manager\")\n print(\"\\t publish_to_graphite - publish to graphite if True\")\n print(\"\\t graphite_host - graphite host name\")", "title": "" }, { "docid": "04a52f326eb403379e6f880c201cd151", "score": "0.45189866", "text": "def process_schema(self):\n\n specs_df = self.get_schema_spec()\n\n # compose copo schema from cg-core spec\n df = specs_df.T.copy()\n df[\"ref\"] = list(df.index)\n\n df[\"id\"] = df['COPO_ID'].apply(lambda x: \".\".join((\"copo\", \"cgCore\", x)))\n df[\"label\"] = df['LABEL']\n df[\"help_tip\"] = df['HELP_TIP']\n df[\"dependency\"] = df['DEPENDENCY']\n df[\"control\"] = df['COPO_CONTROL']\n df[\"stage_id\"] = df['Wizard_Stage_ID']\n df[\"target_repo\"] = df['REPO']\n df[\"prefix\"] = df['REPO_PREFIX']\n df[\"data_maxItems\"] = -1\n\n # set max item for lookup control\n temp_df_1 = df[(df['control'] == 'copo-lookup2') & (df['TYPE'] == '1')]\n if len(temp_df_1):\n df.loc[temp_df_1.index, 'data_maxItems'] = 1\n\n # set cardinality\n df[\"type\"] = df['TYPE'].replace({'1': 'string', 'm': 'array'})\n\n # set data source for relevant controls\n df['data_source'] = np.where(\n df['control'].isin(['copo-lookup2', 'copo-multi-select2', 'copo-button-list', 'copo-single-select']),\n df['COPO_DATA_SOURCE'],\n '')\n\n # reset 'type' to string for select2 controls\n temp_df_1 = df[df['control'].isin(['copo-lookup2', 'copo-multi-select2', 'copo-single-select', 'copo-select2'])]\n df.loc[temp_df_1.index, 'type'] = 'string'\n\n filtered_columns = [\"ref\", \"id\", \"label\", \"help_tip\", \"control\", \"type\", \"stage_id\", \"data_source\",\n \"data_maxItems\", \"dependency\", \"target_repo\", \"prefix\"]\n\n df = df.loc[:, filtered_columns]\n\n df[\"required\"] = False # this will be set later\n df[\"field_constraint\"] = \"optional\" # this will be set later\n\n schema_list = df.to_dict('records')\n\n # update schema in file\n cg_schema = d_utils.json_to_pytype(self.path_to_json)\n cg_schema['properties'] = schema_list\n\n with open(self.path_to_json, 'w') as fout:\n json.dump(cg_schema, fout)\n\n return True", "title": "" }, { "docid": "3d52b6a46e263e1519450be851d5c2a1", "score": "0.45179266", "text": "def __init__(self):\r\n self.label = \"Registro SAI\"\r\n self.alias = \"Registro SAI\"\r\n\r\n # List of tool classes associated with this toolbox\r\n self.tools = [Generate_data,Filters,Generate_Data_Compatible]", "title": "" }, { "docid": "c3171d6332ec12769930e42e2f88049d", "score": "0.45158106", "text": "def generate(env):\r\n try:\r\n bld = env['BUILDERS']['Slicec']\r\n except KeyError:\r\n bld = SlicecBuilder\r\n env['BUILDERS']['Slicec'] = bld\r\n \r\n env['SLICEC'] = env.Detect(protocs) or '/opt/Ice-3.3/bin/slice2cpp'\r\n env['SLICECFLAGS'] = SCons.Util.CLVar('')\r\n env['SLICECSLICEPATH'] = SCons.Util.CLVar('')\r\n env['SLICECCOM'] = '$SLICEC ${[\"-I%s\"%x for x in SLICECSLICEPATH]} $SLICECFLAGS --output-dir ${SOURCE.dir} -DA ${SOURCE}'\r\n env['SLICECOUTDIR'] = '${SOURCE.dir}'\r\n env['SLICECPYTHONOUTDIR'] = None #\"python\"\r\n env['SLICECSRCSUFFIX'] = '.ice'", "title": "" }, { "docid": "a4a554b59b0628861e2a1083ad768660", "score": "0.4512426", "text": "def do_scheme():\n\n desc = (\"Enable modular inputs to collect Kafka topic data. Use only if \"\n \"managing inputs manually from individual forwarders. \"\n \"See documentation for details.\")\n print \"\"\"\n <scheme>\n <title>Splunk Add-on for {}</title>\n <description>{}</description>\n <use_external_validation>true</use_external_validation>\n <streaming_mode>xml</streaming_mode>\n <use_single_instance>{}</use_single_instance>\n <endpoint>\n <args>\n <arg name=\"name\">\n <title>Kafka Data Input Name</title>\n </arg>\n <arg name=\"kafka_cluster\" required_on_create=\"true\">\n <title>Kafka cluster</title>\n </arg>\n <arg name=\"kafka_topic\" required_on_create=\"true\">\n <title>Kafka topic name</title>\n </arg>\n <arg name=\"kafka_partition\" required_on_create=\"false\">\n <title>Kafka partitions</title>\n </arg>\n <arg name=\"kafka_partition_offset\" required_on_create=\"true\">\n <title>Kafka partition offset</title>\n </arg>\n <arg name=\"kafka_topic_group\" required_on_create=\"false\">\n <title>Kafka topic group</title>\n </arg>\n </args>\n </endpoint>\n </scheme>\n \"\"\".format(c.ta_short_name, desc, kcdl.use_single_instance())", "title": "" }, { "docid": "80d027eeee4ba611537d9977e2cc00fa", "score": "0.45083037", "text": "def define(cls, spec):\n super().define(spec)\n spec.inputs['protocol'].valid_type = ChoiceType(('fast', 'moderate', 'precise', 'verification-PBE-v1'))\n spec.inputs['spin_type'].valid_type = ChoiceType((SpinType.NONE, SpinType.COLLINEAR))\n spec.inputs['relax_type'].valid_type = ChoiceType(\n (RelaxType.NONE, RelaxType.POSITIONS, RelaxType.POSITIONS_CELL, RelaxType.POSITIONS_SHAPE)\n )\n spec.inputs['electronic_type'].valid_type = ChoiceType((ElectronicType.METAL, ElectronicType.INSULATOR))\n spec.inputs['engines']['relax']['code'].valid_type = CodeType('siesta.siesta')", "title": "" }, { "docid": "e78c7496b25f7ad24243b7aea4399756", "score": "0.4506888", "text": "def _emit_minerva_basics(self, emit):\n\n\n emit(\"#ifndef read_csr\")\n emit(\"#define read_csr(reg) ({ unsigned long __tmp; \\\\\")\n emit(\" asm volatile (\\\"csrr %0, \\\" #reg : \\\"=r\\\"(__tmp)); \\\\\")\n emit(\" __tmp; })\")\n emit(\"#endif\")\n emit(\"\")\n emit(\"#ifndef write_csr\")\n emit(\"#define write_csr(reg, val) ({ \\\\\")\n emit(\" asm volatile (\\\"csrw \\\" #reg \\\", %0\\\" :: \\\"rK\\\"(val)); })\")\n emit(\"#endif\")\n emit(\"\")\n emit(\"#ifndef set_csr\")\n emit(\"#define set_csr(reg, bit) ({ unsigned long __tmp; \\\\\")\n emit(\" asm volatile (\\\"csrrs %0, \\\" #reg \\\", %1\\\" : \\\"=r\\\"(__tmp) : \\\"rK\\\"(bit)); \\\\\")\n emit(\" __tmp; })\")\n emit(\"#endif\")\n emit(\"\")\n emit(\"#ifndef clear_csr\")\n emit(\"#define clear_csr(reg, bit) ({ unsigned long __tmp; \\\\\")\n emit(\" asm volatile (\\\"csrrc %0, \\\" #reg \\\", %1\\\" : \\\"=r\\\"(__tmp) : \\\"rK\\\"(bit)); \\\\\")\n emit(\" __tmp; })\")\n emit(\"#endif\")\n emit(\"\")\n\n emit(\"#ifndef MSTATUS_MIE\")\n emit(\"#define MSTATUS_MIE 0x00000008\")\n emit(\"#endif\")\n emit(\"\")\n\n emit(\"//\")\n emit(\"// Minerva headers\")\n emit(\"//\")\n emit(\"\")\n emit(\"static inline uint32_t irq_getie(void)\")\n emit(\"{\")\n emit(\" return (read_csr(mstatus) & MSTATUS_MIE) != 0;\")\n emit(\"}\")\n emit(\"\")\n emit(\"static inline void irq_setie(uint32_t ie)\")\n emit(\"{\")\n emit(\" if (ie) {\")\n emit(\" set_csr(mstatus, MSTATUS_MIE);\")\n emit(\" } else {\")\n emit(\" clear_csr(mstatus, MSTATUS_MIE);\")\n emit(\" }\")\n emit(\"}\")\n emit(\"\")\n emit(\"static inline uint32_t irq_getmask(void)\")\n emit(\"{\")\n emit(\" return read_csr(0x330);\")\n emit(\"}\")\n emit(\"\")\n emit(\"static inline void irq_setmask(uint32_t value)\")\n emit(\"{\")\n emit(\" write_csr(0x330, value);\")\n emit(\"}\")\n emit(\"\")\n emit(\"static inline uint32_t pending_irqs(void)\")\n emit(\"{\")\n emit(\" return read_csr(0x360);\")\n emit(\"}\")\n emit(\"\")", "title": "" }, { "docid": "9c2f99476e3ee08bd1a1a09da5d69b06", "score": "0.45065406", "text": "def build(self):\n\n @click.group()\n @click.option('--api-key', help='Your SignalFx API key')\n @pass_config\n def cli(ctx, api_key):\n ctx.resources = self.resources\n ctx.api_key = api_key\n\n @cli.command()\n @click.option('-f', '--force', type=bool, is_flag=True, default=False,\n help='Force the creation of new resources')\n @click.option('-i', '--interactive', type=bool, is_flag=True, default=False,\n help='Interactive mode of creating new resources')\n @click.option('-d', '--dry-run', type=bool, is_flag=True, default=False,\n help='Print the configuration that would be sent to SignalFx')\n @pass_config\n def create(ctx, force, interactive, dry_run):\n for resource in ctx.resources:\n res = invoke(resource, 'create', ctx.api_key,\n force=force, interactive=interactive, dry_run=dry_run)\n pp_json(res)\n\n @cli.command()\n @click.option('--name', type=str, nargs=1, help='New Dashboard name')\n @click.option('--description', type=str, nargs=1,\n help='New Dashboard description')\n @click.option('-d', '--dry-run', type=bool, is_flag=True, default=False,\n help='Print the configuration that would be sent to SignalFx')\n @pass_config\n def update(ctx, name, description, dry_run):\n for resource in ctx.resources:\n res = invoke(resource, 'update', ctx.api_key,\n name=name, description=description, dry_run=dry_run)\n pp_json(res)\n\n @cli.command()\n @pass_config\n def read(ctx):\n for resource in ctx.resources:\n click.echo(invoke(resource, 'read', ctx.api_key))\n\n @cli.command()\n @pass_config\n def delete(ctx):\n for resource in ctx.resources:\n click.echo(invoke(resource, 'delete', ctx.api_key))\n\n return cli", "title": "" }, { "docid": "0dd02e54f74aa36c17b01887f2e17f64", "score": "0.4505322", "text": "def test_custom_css(self, app, sysadmin_env):\n # current tagline\n intro_response_html = BeautifulSoup(app.get(\"/\").body)\n style_tag = intro_response_html.select(\"head style\")\n assert len(style_tag) == 0\n # set new tagline css\n url = url_for(u\"admin.config\")\n form = {\n \"ckan.site_custom_css\": \"body {background-color:red}\",\n \"save\": \"\",\n }\n app.post(url, data=form, environ_overrides=sysadmin_env)\n\n # new tagline not visible yet\n new_intro_response_html = BeautifulSoup(app.get(\"/\").body)\n style_tag = new_intro_response_html.select(\"head style\")\n assert len(style_tag) == 1\n assert style_tag[0].string.strip() == \"body {background-color:red}\"\n\n # reset config value\n _reset_config(app)\n reset_intro_response_html = BeautifulSoup(app.get(\"/\").body)\n style_tag = reset_intro_response_html.select(\"head style\")\n assert len(style_tag) == 0", "title": "" }, { "docid": "45da1bae32c720f2febf8e0271e558ba", "score": "0.45052183", "text": "def _generate_headlines(self):\n includes = set()\n for include in self._ast.usertype_includes:\n includes.add(include)\n for include in self._pybind11_only_includes:\n includes.add(include)\n yield '#include \"third_party/pybind11/include/pybind11/complex.h\"'\n yield '#include \"third_party/pybind11/include/pybind11/functional.h\"'\n yield '#include \"third_party/pybind11/include/pybind11/native_enum.h\"'\n yield '#include \"third_party/pybind11/include/pybind11/operators.h\"'\n yield '#include \"third_party/pybind11/include/pybind11/smart_holder.h\"'\n yield '#include \"third_party/pybind11/include/pybind11/stl.h\"'\n yield '#include \"third_party/pybind11/include/pybind11/type_caster_pyobject_ptr.h\"' # pylint: disable=long-line\n yield ''\n yield '// See pybind11_protobuf/proto_caster_impl.h'\n yield '#if !defined(PYBIND11_PROTOBUF_UNSAFE)'\n yield I + '#define PYBIND11_PROTOBUF_UNSAFE 1'\n yield '#endif'\n yield ''\n for include in includes:\n yield f'#include \"{include}\"'\n yield f'#include \"{self._header_path}\"'\n yield '#include \"clif/pybind11/clif_type_casters.h\"'\n yield '#include \"clif/pybind11/runtime.h\"'\n yield '#include \"clif/pybind11/type_casters.h\"'\n yield '#include \"third_party/pybind11_protobuf/native_proto_caster.h\"'\n yield ''\n yield 'namespace py = pybind11;'\n yield ''", "title": "" }, { "docid": "8e1e11ccd8b37297498d1339caabb294", "score": "0.45036557", "text": "def codegen_reload_data():\n return {\n \"package\": u\"fn_soar_utils\",\n \"message_destinations\": [u\"fn_soar_utils\"],\n \"functions\": [u\"soar_utils_artifact_hash\", u\"soar_utils_attachment_hash\", u\"soar_utils_attachment_to_base64\", u\"soar_utils_attachment_zip_extract\", u\"soar_utils_attachment_zip_list\", u\"soar_utils_base64_to_artifact\", u\"soar_utils_base64_to_attachment\", u\"soar_utils_close_incident\", u\"soar_utils_create_incident\", u\"soar_utils_get_contact_info\", u\"soar_utils_search_incidents\", u\"soar_utils_soar_search\", u\"soar_utils_string_to_attachment\"],\n \"workflows\": [u\"example_soar_utilities_artifact_attachment_to_base64\", u\"example_soar_utilities_artifact_hash\", u\"example_soar_utilities_attachment_hash\", u\"example_soar_utilities_attachment_to_base64\", u\"example_soar_utilities_close_incident\", u\"example_soar_utilities_create_incident\", u\"example_soar_utilities_get_incident_contact_info\", u\"example_soar_utilities_get_task_contact_info\", u\"example_soar_utilities_search_incidents\", u\"example_soar_utilities_soar_search\", u\"example_soar_utilities_string_to_attachment\", u\"example_soar_utilities_zip_extract\", u\"example_soar_utilities_zip_extract_to_artifact\", u\"example_soar_utilities_zip_list\"],\n \"actions\": [u\"Example: SOAR Utilities (Artifact) Attachment to Base64\", u\"Example: SOAR Utilities Artifact Hash\", u\"Example: SOAR Utilities Attachment Hash\", u\"Example: SOAR Utilities Attachment to Base64\", u\"Example: SOAR Utilities Close Incident\", u\"Example: SOAR Utilities Create Incident\", u\"Example: SOAR Utilities Get Incident Contact Info\", u\"Example: SOAR Utilities Get Task Contact Info\", u\"Example: SOAR Utilities Search Incidents\", u\"Example: SOAR Utilities SOAR Search\", u\"Example: SOAR Utilities String to Attachment\", u\"Example: SOAR Utilities Zip Extract\", u\"Example: SOAR Utilities Zip Extract to Artifact\", u\"Example: SOAR Utilities Zip List\"],\n \"incident_fields\": [],\n \"incident_artifact_types\": [],\n \"incident_types\": [],\n \"datatables\": [],\n \"automatic_tasks\": [],\n \"scripts\": [],\n \"playbooks\": []\n }", "title": "" }, { "docid": "262991656a7e040c7b4a4e364ca41e4f", "score": "0.45007777", "text": "def customization(self, customization):\n\n self._customization = customization", "title": "" }, { "docid": "846c2cd8ef84c6608ab8d8efdb938ab9", "score": "0.45004988", "text": "def get_custom_types() -> Dict[str, List[ResourceFieldConfig]]:\n types = {}\n types_configs: Dict[str, Dict[str, Dict[str, Any]]] = yaml.load_file(\n get_path(\"types.yaml\")\n )\n for name, fields in types_configs.items():\n resolved_fields = []\n for field_name, field_config in fields.items():\n # print(f\"Resolving type field {field_name!r}\")\n # First replace whitespace from keys with underscores\n field_config = replace_whitespace_in_keys(field_config)\n # Then resolve synonyms\n field_config = resolve_synonyms(field_config)\n # Then convert the dict into a ResourceFieldConfig\n field = ResourceFieldConfig(name=field_name, **field_config)\n # Raise error if this field's type is custom\n if field.type not in NATIVE_FIELD_TYPES:\n raise ResourceFieldConfigError(\n f\"In type {name!r}: fields of custom types should be of native types only. Field {field.name!r} has type {field.type!r}.\"\n )\n # Then resolve field name shortcuts\n field = resolve_field_name_shortcuts(field)\n # Then resolve positive\n field = resolve_config_positive(field)\n # Append to the fields list\n resolved_fields.append(field)\n\n types[name] = resolved_fields\n\n return types", "title": "" }, { "docid": "27aff04de3cad88668ebdf763c57fa12", "score": "0.44997355", "text": "def get_description():\n desc = {\"description\": __doc__, \"data\": True, \"nass\": True}\n desc[\"arguments\"] = [\n {\n \"type\": \"year\",\n \"min\": 1981,\n \"default\": utc().year - 1,\n \"name\": \"year\",\n \"label\": \"Select year to display\",\n },\n {\n \"type\": \"csector\",\n \"name\": \"csector\",\n \"default\": \"IA\",\n \"label\": \"Select state/sector\",\n },\n {\n \"type\": \"select\",\n \"label\": \"Label Values?\",\n \"default\": \"yes\",\n \"options\": PDICT2,\n \"name\": \"ilabel\",\n },\n dict(type=\"cmap\", name=\"cmap\", default=\"BrBG\", label=\"Color Ramp:\"),\n ]\n return desc", "title": "" }, { "docid": "3e1e0408d10efe3d5dda8aa4f4643747", "score": "0.4495328", "text": "def create_config() -> str:\n return \"\"\"[saturnin.recipe]\nrecipe_type = bundle\nexecution_mode = normal\ndescription = Prints a text file on screen with optional syntax highlight.\napplication = 826ecaca-d3b6-11ed-97b5-5c879cc92822\n\n[bundle]\nagents = reader, writer\n\n[args]\nfilename = stdin\ncharset = utf-8\n\n[reader]\nagent = 936d2670-93d8-5c45-84a7-b8dbc799ad97\npipe = pipe-1\npipe_address = inproc://${pipe}\npipe_mode = bind\npipe_format = text/plain;charset=utf-8\nfilename = ${args:filename}\nfile_format = text/plain;charset=${args:charset}\n\n[writer]\nagent = 4e606fdf-3fa9-5d18-a714-9448a8085aab\npipe = pipe-1\npipe_address = inproc://${pipe}\npipe_mode = connect\npipe_format = text/plain;charset=utf-8\nfilename = stdout\nfile_format = text/plain;charset=utf-8\nfile_mode = write\n\"\"\"", "title": "" }, { "docid": "b730ef9f4c4158cf9ca55f290632ff1b", "score": "0.44932535", "text": "def install_config(self):\n\n # setup configured dirs\n conda.makedirs(os.path.join(self.prefix, 'var', 'cache', 'adagucserver'))\n\n # generate config\n result = templ_autoresource.render(**self.options)\n output = os.path.join(self.prefix, 'etc', 'adagucserver', 'adaguc.autoresource.xml')\n conda.makedirs(os.path.dirname(output))\n \n try:\n os.remove(output)\n except OSError:\n pass\n\n with open(output, 'wt') as fp:\n fp.write(result)\n return [output]", "title": "" }, { "docid": "0bf70c0dd96b1e2cc9cdf2a8f30c2497", "score": "0.44873747", "text": "def generate_plugin_after_install(output_file):\n\n import mpi_is_sw.brain_connectivity.plugin as plugin_main\n\n file_to_process = inspect.getsourcefile(plugin_main)\n generatePythonFilterFromFiles(script_file=file_to_process, output_file=output_file)", "title": "" }, { "docid": "058e5b1489035768e596fc6dfa455edd", "score": "0.44812745", "text": "def customize_grammar_rules(self, tokens, customize):\n\n is_pypy = False\n\n # For a rough break out on the first word. This may\n # include instructions that don't need customization,\n # but we'll do a finer check after the rough breakout.\n customize_instruction_basenames = frozenset(\n ('BUILD', 'CALL', 'CONTINUE', 'DELETE', 'GET',\n 'JUMP', 'LOAD', 'LOOKUP', 'MAKE',\n 'RAISE', 'UNPACK'))\n\n # Opcode names in the custom_ops_seen set have rules that get added\n # unconditionally and the rules are constant. So they need to be done\n # only once and if we see the opcode a second we don't have to consider\n # adding more rules.\n #\n # Note: BUILD_TUPLE_UNPACK_WITH_CALL gets considered by\n # default because it starts with BUILD. So we'll set to ignore it from\n # the start.\n custom_ops_seen = set(('BUILD_TUPLE_UNPACK_WITH_CALL',))\n\n # In constrast to custom_ops_seen, seen_xxx rules here are part of some\n # other rule; so if we see them a second time we still have to loop\n # over customization\n seen_LOAD_BUILD_CLASS = False\n seen_GET_AWAITABLE_YIELD_FROM = False\n\n # Loop over instructions adding custom grammar rules based on\n # a specific instruction seen.\n\n if 'PyPy' in customize:\n is_pypy = True\n self.addRule(\"\"\"\n stmt ::= assign3_pypy\n stmt ::= assign2_pypy\n assign3_pypy ::= expr expr expr store store store\n assign2_pypy ::= expr expr store store\n return_if_lambda ::= RETURN_END_IF_LAMBDA\n stmt ::= conditional_lambda\n conditional_lambda ::= expr jmp_false expr return_if_lambda\n return_lambda LAMBDA_MARKER\n \"\"\", nop_func)\n\n has_get_iter_call_function1 = False\n n = len(tokens)\n max_branches = 0\n for i, token in enumerate(tokens):\n if token == 'GET_ITER' and i < n-2 and self.call_fn_name(tokens[i+1]) == 'CALL_FUNCTION_1':\n has_get_iter_call_function1 = True\n max_branches += 1\n elif (token == 'GET_AWAITABLE' and i < n-3\n and tokens[i+1] == 'LOAD_CONST' and tokens[i+2] == 'YIELD_FROM'):\n max_branches += 1\n seen_GET_AWAITABLE_YIELD_FROM = True\n if max_branches > 2:\n break\n\n for i, token in enumerate(tokens):\n opname = token.kind\n\n # Do a quick breakout before testing potentially\n # each of the dozen or so instruction in if elif.\n if (opname[:opname.find('_')] not in customize_instruction_basenames\n or opname in custom_ops_seen):\n continue\n\n opname_base = opname[:opname.rfind('_')]\n # The order of opname listed is roughly sorted below\n if opname_base == 'BUILD_CONST_KEY_MAP':\n # This is in 3.6+\n kvlist_n = 'expr ' * (token.attr)\n rule = \"dict ::= %sLOAD_CONST %s\" % (kvlist_n, opname)\n self.addRule(rule, nop_func)\n elif opname.startswith('BUILD_LIST_UNPACK'):\n v = token.attr\n rule = ('build_list_unpack ::= ' + 'expr1024 ' * int(v//1024) +\n 'expr32 ' * int((v//32) % 32) +\n 'expr ' * (v % 32) + opname)\n self.addRule(rule, nop_func)\n rule = 'expr ::= build_list_unpack'\n self.addRule(rule, nop_func)\n elif opname_base == 'BUILD_MAP':\n kvlist_n = \"kvlist_%s\" % token.attr\n if opname == 'BUILD_MAP_n':\n # PyPy sometimes has no count. Sigh.\n rule = ('dictcomp_func ::= BUILD_MAP_n LOAD_FAST FOR_ITER store '\n 'comp_iter JUMP_BACK RETURN_VALUE RETURN_LAST')\n self.add_unique_rule(rule, 'dictomp_func', 1, customize)\n\n kvlist_n = 'kvlist_n'\n rule = 'kvlist_n ::= kvlist_n kv3'\n self.add_unique_rule(rule, 'kvlist_n', 0, customize)\n rule = 'kvlist_n ::='\n self.add_unique_rule(rule, 'kvlist_n', 1, customize)\n rule = \"dict ::= BUILD_MAP_n kvlist_n\"\n elif self.version >= 3.5:\n if opname != 'BUILD_MAP_WITH_CALL':\n if opname == 'BUILD_MAP_UNPACK':\n rule = kvlist_n + ' ::= ' + 'expr ' * (token.attr*2)\n self.add_unique_rule(rule, opname, token.attr, customize)\n rule = 'dict_entry ::= ' + 'expr ' * (token.attr*2)\n self.add_unique_rule(rule, opname, token.attr, customize)\n rule = 'dict ::= ' + 'dict_entry ' * token.attr\n self.add_unique_rule(rule, opname, token.attr, customize)\n rule = ('unmap_dict ::= ' +\n ('dict ' * token.attr) +\n 'BUILD_MAP_UNPACK')\n else:\n rule = kvlist_n + ' ::= ' + 'expr ' * (token.attr*2)\n self.add_unique_rule(rule, opname, token.attr, customize)\n rule = \"dict ::= %s %s\" % (kvlist_n, opname)\n else:\n rule = kvlist_n + ' ::= ' + 'expr expr STORE_MAP ' * token.attr\n self.add_unique_rule(rule, opname, token.attr, customize)\n rule = \"dict ::= %s %s\" % (opname, kvlist_n)\n self.add_unique_rule(rule, opname, token.attr, customize)\n elif opname.startswith('BUILD_MAP_UNPACK_WITH_CALL'):\n v = token.attr\n rule = ('build_map_unpack_with_call ::= ' + 'expr1024 ' * int(v//1024) +\n 'expr32 ' * int((v//32) % 32) +\n 'expr ' * (v % 32) + opname)\n elif opname_base in ('BUILD_LIST', 'BUILD_SET', 'BUILD_TUPLE'):\n v = token.attr\n\n is_LOAD_CLOSURE = False\n if opname_base == 'BUILD_TUPLE':\n # If is part of a \"load_closure\", then it is not part of a\n # \"list\".\n is_LOAD_CLOSURE = True\n for j in range(v):\n if tokens[i-j-1].kind != 'LOAD_CLOSURE':\n is_LOAD_CLOSURE = False\n break\n if is_LOAD_CLOSURE:\n rule = ('load_closure ::= %s%s' % (('LOAD_CLOSURE ' * v), opname))\n self.add_unique_rule(rule, opname, token.attr, customize)\n if not is_LOAD_CLOSURE or v == 0:\n collection = opname_base[opname_base.find('_')+1:].lower()\n rule = (('%s ::= ' % collection) + 'expr1024 ' * int(v//1024) +\n 'expr32 ' * int((v//32) % 32) +\n 'expr ' * (v % 32) + opname)\n self.add_unique_rules([\n 'expr ::= %s' % collection,\n rule], customize)\n continue\n elif opname_base == 'BUILD_SLICE':\n if token.attr == 2:\n self.add_unique_rules([\n 'expr ::= build_slice2',\n 'build_slice2 ::= expr expr BUILD_SLICE_2'\n ], customize)\n else:\n assert token.attr == 3, \"BUILD_SLICE value must be 2 or 3; is %s\" % v\n self.add_unique_rules([\n 'expr ::= build_slice3',\n 'build_slice3 ::= expr expr expr BUILD_SLICE_3',\n ], customize)\n elif (opname in frozenset(('CALL_FUNCTION',\n 'CALL_FUNCTION_EX',\n 'CALL_FUNCTION_EX_KW',\n 'CALL_FUNCTION_VAR',\n 'CALL_FUNCTION_VAR_KW'))\n or opname.startswith('CALL_FUNCTION_KW')):\n self.custom_classfunc_rule(opname, token, customize,\n seen_LOAD_BUILD_CLASS,\n seen_GET_AWAITABLE_YIELD_FROM, tokens[i+1])\n elif opname_base == 'CALL_METHOD':\n # PyPy only - DRY with parse2\n\n args_pos, args_kw = self.get_pos_kw(token)\n\n # number of apply equiv arguments:\n nak = ( len(opname_base)-len('CALL_METHOD') ) // 3\n rule = ('call ::= expr ' +\n ('pos_arg ' * args_pos) +\n ('kwarg ' * args_kw) +\n 'expr ' * nak + opname)\n self.add_unique_rule(rule, opname, token.attr, customize)\n elif opname == 'CONTINUE':\n self.addRule('continue ::= CONTINUE', nop_func)\n custom_ops_seen.add(opname)\n elif opname == 'CONTINUE_LOOP':\n self.addRule('continue ::= CONTINUE_LOOP', nop_func)\n custom_ops_seen.add(opname)\n elif opname == 'DELETE_ATTR':\n self.addRule('del_stmt ::= expr DELETE_ATTR', nop_func)\n custom_ops_seen.add(opname)\n elif opname == 'DELETE_DEREF':\n self.addRule(\"\"\"\n stmt ::= del_deref_stmt\n del_deref_stmt ::= DELETE_DEREF\n \"\"\", nop_func)\n custom_ops_seen.add(opname)\n elif opname == 'DELETE_SUBSCR':\n self.addRule(\"\"\"\n del_stmt ::= delete_subscr\n delete_subscr ::= expr expr DELETE_SUBSCR\n \"\"\", nop_func)\n custom_ops_seen.add(opname)\n elif opname == 'GET_ITER':\n self.addRule(\"\"\"\n expr ::= get_iter\n attribute ::= expr GET_ITER\n \"\"\", nop_func)\n custom_ops_seen.add(opname)\n elif opname == 'JUMP_IF_NOT_DEBUG':\n v = token.attr\n self.addRule(\"\"\"\n stmt ::= assert_pypy\n stmt ::= assert2_pypy\", nop_func)\n assert_pypy ::= JUMP_IF_NOT_DEBUG assert_expr jmp_true\n LOAD_ASSERT RAISE_VARARGS_1 COME_FROM\n assert2_pypy ::= JUMP_IF_NOT_DEBUG assert_expr jmp_true\n LOAD_ASSERT expr CALL_FUNCTION_1\n RAISE_VARARGS_1 COME_FROM\n assert2_pypy ::= JUMP_IF_NOT_DEBUG assert_expr jmp_true\n LOAD_ASSERT expr CALL_FUNCTION_1\n RAISE_VARARGS_1 COME_FROM,\n \"\"\", nop_func)\n custom_ops_seen.add(opname)\n elif opname == 'LOAD_BUILD_CLASS':\n seen_LOAD_BUILD_CLASS = True\n self.custom_build_class_rule(opname, i, token, tokens, customize)\n elif opname == 'LOAD_CLASSDEREF':\n # Python 3.4+\n self.addRule(\"expr ::= LOAD_CLASSDEREF\", nop_func)\n custom_ops_seen.add(opname)\n elif opname == 'LOAD_CLASSNAME':\n self.addRule(\"expr ::= LOAD_CLASSNAME\", nop_func)\n custom_ops_seen.add(opname)\n elif opname == 'LOAD_DICTCOMP':\n if has_get_iter_call_function1:\n rule_pat = (\"dict_comp ::= LOAD_DICTCOMP %sMAKE_FUNCTION_0 expr \"\n \"GET_ITER CALL_FUNCTION_1\")\n self.add_make_function_rule(rule_pat, opname, token.attr, customize)\n # listcomp is a custom Python3 rule\n elif opname == 'LOAD_ATTR':\n self.addRule(\"\"\"\n expr ::= attribute\n attribute ::= expr LOAD_ATTR\n \"\"\", nop_func)\n custom_ops_seen.add(opname)\n elif opname == 'LOAD_LISTCOMP':\n self.add_unique_rule(\"expr ::= listcomp\", opname, token.attr, customize)\n elif opname == 'LOAD_SETCOMP':\n # Should this be generalized and put under MAKE_FUNCTION?\n if has_get_iter_call_function1:\n self.addRule(\"expr ::= set_comp\", nop_func)\n rule_pat = (\"set_comp ::= LOAD_SETCOMP %sMAKE_FUNCTION_0 expr \"\n \"GET_ITER CALL_FUNCTION_1\")\n self.add_make_function_rule(rule_pat, opname, token.attr, customize)\n elif opname == 'LOOKUP_METHOD':\n # A PyPy speciality - DRY with parse3\n self.addRule(\"\"\"\n expr ::= attribute\n attribute ::= expr LOOKUP_METHOD\n \"\"\",\n nop_func)\n custom_ops_seen.add(opname)\n elif opname.startswith('MAKE_CLOSURE'):\n # DRY with MAKE_FUNCTION\n # Note: this probably doesn't handle kwargs proprerly\n\n args_pos, args_kw, annotate_args = token.attr\n\n # FIXME: Fold test into add_make_function_rule\n if self.version < 3.3:\n j = 1\n else:\n j = 2\n if is_pypy or (i >= j and tokens[i-j] == 'LOAD_LAMBDA'):\n rule_pat = ('mklambda ::= %sload_closure LOAD_LAMBDA %%s%s' %\n ('pos_arg '* args_pos, opname))\n self.add_make_function_rule(rule_pat, opname, token.attr, customize)\n\n if has_get_iter_call_function1:\n rule_pat = (\"generator_exp ::= %sload_closure load_genexpr %%s%s expr \"\n \"GET_ITER CALL_FUNCTION_1\" % ('pos_arg '* args_pos, opname))\n self.add_make_function_rule(rule_pat, opname, token.attr, customize)\n\n if has_get_iter_call_function1:\n if (is_pypy or (i >= j and tokens[i-j] == 'LOAD_LISTCOMP')):\n # In the tokens we saw:\n # LOAD_LISTCOMP LOAD_CONST MAKE_FUNCTION (>= 3.3) or\n # LOAD_LISTCOMP MAKE_FUNCTION (< 3.3) or\n # and have GET_ITER CALL_FUNCTION_1\n # Todo: For Pypy we need to modify this slightly\n rule_pat = ('listcomp ::= %sload_closure LOAD_LISTCOMP %%s%s expr '\n 'GET_ITER CALL_FUNCTION_1' % ('pos_arg ' * args_pos, opname))\n self.add_make_function_rule(rule_pat, opname, token.attr, customize)\n if (is_pypy or (i >= j and tokens[i-j] == 'LOAD_SETCOMP')):\n rule_pat = ('set_comp ::= %sload_closure LOAD_SETCOMP %%s%s expr '\n 'GET_ITER CALL_FUNCTION_1' % ('pos_arg ' * args_pos, opname))\n self.add_make_function_rule(rule_pat, opname, token.attr, customize)\n if (is_pypy or (i >= j and tokens[i-j] == 'LOAD_DICTCOMP')):\n self.add_unique_rule('dict_comp ::= %sload_closure LOAD_DICTCOMP %s '\n 'expr GET_ITER CALL_FUNCTION_1' %\n ('pos_arg '* args_pos, opname),\n opname, token.attr, customize)\n\n if args_kw > 0:\n kwargs_str = 'kwargs1 '\n else:\n kwargs_str = ''\n\n # Note order of kwargs and pos args changed between 3.3-3.4\n if self.version <= 3.2:\n rule = ('mkfunc ::= %s%sload_closure LOAD_CONST kwargs %s'\n % (kwargs_str, 'expr ' * args_pos, opname))\n elif self.version == 3.3:\n rule = ('mkfunc ::= %s%sload_closure LOAD_CONST LOAD_CONST %s'\n % (kwargs_str, 'expr ' * args_pos, opname))\n elif self.version >= 3.4:\n rule = ('mkfunc ::= %s%s load_closure LOAD_CONST LOAD_CONST %s'\n % ('expr ' * args_pos, kwargs_str, opname))\n\n self.add_unique_rule(rule, opname, token.attr, customize)\n rule = ('mkfunc ::= %sload_closure load_genexpr %s'\n % ('pos_arg ' * args_pos, opname))\n self.add_unique_rule(rule, opname, token.attr, customize)\n\n if self.version < 3.4:\n rule = ('mkfunc ::= %sload_closure LOAD_CONST %s'\n % ('expr ' * args_pos, opname))\n self.add_unique_rule(rule, opname, token.attr, customize)\n\n pass\n elif opname_base.startswith('MAKE_FUNCTION'):\n # DRY with MAKE_CLOSURE\n if self.version >= 3.6:\n # The semantics of MAKE_FUNCTION in 3.6 are totally different from\n # before.\n args_pos, args_kw, annotate_args, closure = token.attr\n stack_count = args_pos + args_kw + annotate_args\n rule = ('mkfunc ::= %s%s%s%s' %\n ('expr ' * stack_count,\n 'load_closure ' * closure,\n 'LOAD_CONST ' * 2,\n opname))\n self.add_unique_rule(rule, opname, token.attr, customize)\n\n if has_get_iter_call_function1:\n rule_pat = (\"generator_exp ::= %sload_genexpr %%s%s expr \"\n \"GET_ITER CALL_FUNCTION_1\" % ('pos_arg '* args_pos, opname))\n self.add_make_function_rule(rule_pat, opname, token.attr, customize)\n rule_pat = (\"generator_exp ::= %sload_closure load_genexpr %%s%s expr \"\n \"GET_ITER CALL_FUNCTION_1\" % ('pos_arg '* args_pos, opname))\n self.add_make_function_rule(rule_pat, opname, token.attr, customize)\n if is_pypy or (i >= 2 and tokens[i-2] == 'LOAD_LISTCOMP'):\n rule_pat = (\"listcomp ::= %sLOAD_LISTCOMP %%s%s expr \"\n \"GET_ITER CALL_FUNCTION_1\" % ('expr ' * args_pos, opname))\n self.add_make_function_rule(rule_pat, opname, token.attr, customize)\n\n if is_pypy or (i >= 2 and tokens[i-2] == 'LOAD_LAMBDA'):\n rule_pat = ('mklambda ::= %s%sLOAD_LAMBDA %%s%s' %\n (('pos_arg '* args_pos),\n ('kwarg '* args_kw),\n opname))\n self.add_make_function_rule(rule_pat, opname, token.attr, customize)\n continue\n\n if self.version < 3.6:\n args_pos, args_kw, annotate_args = token.attr\n else:\n args_pos, args_kw, annotate_args, closure = token.attr\n\n if self.version < 3.3:\n j = 1\n else:\n j = 2\n\n if has_get_iter_call_function1:\n rule_pat = (\"generator_exp ::= %sload_genexpr %%s%s expr \"\n \"GET_ITER CALL_FUNCTION_1\" % ('pos_arg '* args_pos, opname))\n self.add_make_function_rule(rule_pat, opname, token.attr, customize)\n\n if is_pypy or (i >= j and tokens[i-j] == 'LOAD_LISTCOMP'):\n # In the tokens we saw:\n # LOAD_LISTCOMP LOAD_CONST MAKE_FUNCTION (>= 3.3) or\n # LOAD_LISTCOMP MAKE_FUNCTION (< 3.3) or\n # and have GET_ITER CALL_FUNCTION_1\n # Todo: For Pypy we need to modify this slightly\n rule_pat = (\"listcomp ::= %sLOAD_LISTCOMP %%s%s expr \"\n \"GET_ITER CALL_FUNCTION_1\" % ('expr ' * args_pos, opname))\n self.add_make_function_rule(rule_pat, opname, token.attr, customize)\n\n # FIXME: Fold test into add_make_function_rule\n if is_pypy or (i >= j and tokens[i-j] == 'LOAD_LAMBDA'):\n rule_pat = ('mklambda ::= %s%sLOAD_LAMBDA %%s%s' %\n (('pos_arg '* args_pos),\n ('kwarg '* args_kw),\n opname))\n self.add_make_function_rule(rule_pat, opname, token.attr, customize)\n\n if self.version < 3.3:\n # positional args after keyword args\n rule = ('mkfunc ::= kwargs %s%s %s' %\n ('pos_arg ' * args_pos, 'LOAD_CONST ',\n opname))\n elif self.version == 3.3:\n # positional args after keyword args\n rule = ('mkfunc ::= kwargs %s%s %s' %\n ('pos_arg ' * args_pos, 'LOAD_CONST '*2,\n opname))\n elif self.version > 3.5:\n # positional args before keyword args\n rule = ('mkfunc ::= %skwargs1 %s %s' %\n ('pos_arg ' * args_pos, 'LOAD_CONST '*2,\n opname))\n elif self.version > 3.3:\n # positional args before keyword args\n rule = ('mkfunc ::= %skwargs %s %s' %\n ('pos_arg ' * args_pos, 'LOAD_CONST '*2,\n opname))\n else:\n rule = ('mkfunc ::= kwargs %sexpr %s' %\n ('pos_arg ' * args_pos, opname))\n self.add_unique_rule(rule, opname, token.attr, customize)\n if opname.startswith('MAKE_FUNCTION_A'):\n if self.version >= 3.6:\n rule = ('mkfunc_annotate ::= %s%sannotate_tuple LOAD_CONST LOAD_CONST %s' %\n (('pos_arg ' * (args_pos)),\n ('call ' * (annotate_args-1)), opname))\n self.add_unique_rule(rule, opname, token.attr, customize)\n rule = ('mkfunc_annotate ::= %s%sannotate_tuple LOAD_CONST LOAD_CONST %s' %\n (('pos_arg ' * (args_pos)),\n ('annotate_arg ' * (annotate_args-1)), opname))\n if self.version >= 3.3:\n # Normally we remove EXTENDED_ARG from the opcodes, but in the case of\n # annotated functions can use the EXTENDED_ARG tuple to signal we have an annotated function.\n # Yes this is a little hacky\n rule = ('mkfunc_annotate ::= %s%sannotate_tuple LOAD_CONST LOAD_CONST EXTENDED_ARG %s' %\n (('pos_arg ' * (args_pos)),\n ('call ' * (annotate_args-1)), opname))\n self.add_unique_rule(rule, opname, token.attr, customize)\n rule = ('mkfunc_annotate ::= %s%sannotate_tuple LOAD_CONST LOAD_CONST EXTENDED_ARG %s' %\n (('pos_arg ' * (args_pos)),\n ('annotate_arg ' * (annotate_args-1)), opname))\n else:\n # See above comment about use of EXTENDED_ARG\n rule = ('mkfunc_annotate ::= %s%sannotate_tuple LOAD_CONST EXTENDED_ARG %s' %\n (('pos_arg ' * (args_pos)),\n ('annotate_arg ' * (annotate_args-1)), opname))\n self.add_unique_rule(rule, opname, token.attr, customize)\n rule = ('mkfunc_annotate ::= %s%sannotate_tuple LOAD_CONST EXTENDED_ARG %s' %\n (('pos_arg ' * (args_pos)),\n ('call ' * (annotate_args-1)), opname))\n self.addRule(rule, nop_func)\n elif opname == 'RETURN_VALUE_LAMBDA':\n self.addRule(\"\"\"\n return_lambda ::= ret_expr RETURN_VALUE_LAMBDA\n \"\"\", nop_func)\n custom_ops_seen.add(opname)\n elif opname == 'RAISE_VARARGS_0':\n self.addRule(\"\"\"\n stmt ::= raise_stmt0\n raise_stmt0 ::= RAISE_VARARGS_0\n \"\"\", nop_func)\n custom_ops_seen.add(opname)\n elif opname == 'RAISE_VARARGS_1':\n self.addRule(\"\"\"\n stmt ::= raise_stmt1\n raise_stmt1 ::= expr RAISE_VARARGS_1\n \"\"\", nop_func)\n custom_ops_seen.add(opname)\n elif opname == 'RAISE_VARARGS_2':\n self.addRule(\"\"\"\n stmt ::= raise_stmt2\n raise_stmt2 ::= expr expr RAISE_VARARGS_2\n \"\"\", nop_func)\n custom_ops_seen.add(opname)\n elif opname_base in ('UNPACK_EX',):\n before_count, after_count = token.attr\n rule = 'unpack ::= ' + opname + ' store' * (before_count + after_count + 1)\n self.addRule(rule, nop_func)\n elif opname_base in ('UNPACK_TUPLE', 'UNPACK_SEQUENCE'):\n rule = 'unpack ::= ' + opname + ' store' * token.attr\n self.addRule(rule, nop_func)\n elif opname_base == 'UNPACK_LIST':\n rule = 'unpack_list ::= ' + opname + ' store' * token.attr\n self.addRule(rule, nop_func)\n self.check_reduce['aug_assign1'] = 'AST'\n self.check_reduce['aug_assign2'] = 'AST'\n self.check_reduce['while1stmt'] = 'noAST'\n self.check_reduce['while1elsestmt'] = 'noAST'\n self.check_reduce['annotate_tuple'] = 'noAST'\n self.check_reduce['kwarg'] = 'noAST'\n # FIXME: remove parser errors caused by the below\n # self.check_reduce['while1elsestmt'] = 'noAST'\n return", "title": "" }, { "docid": "9a9b7e39cc2c1912365a8cc4514f6be8", "score": "0.44733468", "text": "def createItem(*args):\n\n itemName = cmds.textFieldGrp('create_customname', q=True, text=True)\n itemCommand = cmds.textFieldGrp('create_customCommand', q=True, text=True)\n value = cmds.radioButtonGrp('create_customSourceRadioButton', q=True, select=True)\n if not itemName or not itemCommand or not value:\n raise UserWarning('You need to fill all the fields!')\n if value == 1:\n sourceType = 'python'\n else:\n sourceType = 'mel'\n cmds.deleteUI(WINDOWNAME, window=True)\n # create the custom item\n item = cmds.menuItem(parent=PARENT, label=itemName, command=itemCommand, sourceType=sourceType)\n SETTINGS.add(item, [itemName, itemCommand, sourceType])", "title": "" }, { "docid": "2572d0a59a354c6d76ce60f779033ca7", "score": "0.44714624", "text": "def extra_configuration_collection(self, configuration):\n aws_syncr_spec = AwsSyncrSpec()\n registered = {}\n directory = pkg_resources.resource_filename(\"aws_syncr\", \"option_spec\")\n\n for location in sorted(os.listdir(directory)):\n import_name = os.path.splitext(location)[0]\n if import_name != '__pycache__':\n try:\n args = imp.find_module(import_name, [directory])\n except ImportError as error:\n raise BadImport(directory=directory, importing=import_name, error=error)\n\n try:\n module = imp.load_module(import_name, *args)\n except SyntaxError as error:\n raise BadImport(directory=self.directory, importing=self.import_name, error=error)\n\n if hasattr(module, \"__register__\"):\n registered.update(module.__register__())\n\n configuration['__registered__'] = [name for _, name in sorted(registered.keys())]\n by_name = dict((r[1], registered[r]) for r in registered)\n for thing in ['aws_syncr', 'accounts', 'templates'] + list(by_name.keys()):\n def make_converter(thing):\n def converter(p, v):\n log.info(\"Converting %s\", p)\n meta = Meta(p.configuration, [(thing, \"\")])\n configuration.converters.started(p)\n if thing in by_name:\n return by_name[thing].normalise(meta, v)\n else:\n return getattr(aws_syncr_spec, \"{0}_spec\".format(thing)).normalise(meta, v)\n return converter\n configuration.add_converter(Converter(convert=make_converter(thing), convert_path=[thing]))", "title": "" }, { "docid": "142c8b7e8449b27a23b85e2596f34092", "score": "0.44704252", "text": "def install(self):\n ...", "title": "" }, { "docid": "e96f01e6a848c29bb16e88877bedcee9", "score": "0.44648287", "text": "def add_style_format(config: Config, name, verbose):\n if verbose:\n click.secho(f'Server: {config.url}', bold=True, fg='black')\n click.secho('\\tAdding new classification system style ... ', bold=False, fg='black')\n\n config.service.add_style_format(name=name)\n\n click.secho('\\tFinished!', bold=False, fg='black')\n\n else:\n config.service.add_style_format(name=name)", "title": "" }, { "docid": "1a34b31a3e8a36f92131b55c536ea796", "score": "0.44635814", "text": "def gen_reactions(self):\n model = self.model\n cell = self.knowledge_base.cell\n nucleus = model.compartments.get_one(id='n')\n mitochondrion = model.compartments.get_one(id='m')\n cytosol = model.compartments.get_one(id='c')\n\n polr_occupancy_width = self.options.get('polr_occupancy_width')\n ribosome_occupancy_width = self.options['ribosome_occupancy_width']\n transcription_unit = self.options['transcription_unit']\n rna_input_seq = self.options['rna_input_seq']\n\n self.submodel.framework = onto['WC:next_reaction_method'] \n\n # Get species involved in reaction\n metabolic_participants = ['atp', 'ctp', 'gtp', 'utp', 'ppi', \n 'amp', 'cmp', 'gmp', 'ump', 'h2o', 'h', 'adp', 'pi']\n metabolites = {}\n for met in metabolic_participants:\n met_species_type = model.species_types.get_one(id=met)\n metabolites[met] = {\n 'n': met_species_type.species.get_or_create(compartment=nucleus, model=model),\n 'm': met_species_type.species.get_or_create(compartment=mitochondrion, model=model)\n }\n\n ref_polr_width = wc_lang.Reference(\n model=model,\n title='Structure and mechanism of the RNA Polymerase II transcription machinery',\n author='Steven Hahn', \n year=2004,\n type=onto['WC:article'],\n publication='Nature Structural & Molecular Biology', \n volume='11',\n issue='5',\n pages='394-403'\n )\n ref_polr_width.id = 'ref_'+str(len(model.references))\n\n ref_polr_distribution = wc_lang.Reference(\n model=model,\n title='In vivo dynamics of RNA polymerase II transcription',\n author='Xavier Darzacq, Yaron Shav-Tal, Valeria de Turris, Yehuda Brody, '\n 'Shailesh M Shenoy, Robert D Phair, Robert H Singer', \n year=2007,\n type=onto['WC:article'],\n publication='Nature Structural & Molecular Biology', \n volume='14',\n pages='796-806'\n )\n ref_polr_distribution.id = 'ref_'+str(len(model.references))\n\n ref_ribo_width = wc_lang.Reference(\n model=model,\n title='Genome-wide analysis in vivo of translation with nucleotide resolution using ribosome profiling',\n author='Nicholas T Ingolia, Sina Ghaemmaghami, John R. S. Newman, Jonathan S. Weissman', \n year=2009,\n type=onto['WC:article'],\n publication='Science', \n volume='324',\n issue='5924',\n pages='218-223'\n )\n ref_ribo_width.id = 'ref_'+str(len(model.references)) \n\n print('Start generating transcription submodel...') \n \n # Create for each RNA polymerase a reaction of binding to non-specific site \n nuclear_genome_length = 0\n mitochondrial_genome_length = 0\n for chromosome in cell.species_types.get(__type=wc_kb.core.DnaSpeciesType):\n if 'M' in chromosome.id:\n mitochondrial_genome_length += len(chromosome.get_seq())\n else:\n nuclear_genome_length += len(chromosome.get_seq())\n self._mitochondrial_max_binding_sites = math.floor(\n mitochondrial_genome_length/polr_occupancy_width)\n self._nuclear_max_binding_sites = math.floor(\n nuclear_genome_length/polr_occupancy_width)\n\n self._total_polr = {}\n self._gene_bound_polr = {}\n rna_pol_pair = self.options.get('rna_pol_pair')\n for polr in set(rna_pol_pair.values()):\n\n self._gene_bound_polr[polr] = []\n \n if 'mito' in polr:\n transcription_compartment = mitochondrion\n genome_sites = self._mitochondrial_max_binding_sites\n else:\n transcription_compartment = nucleus\n genome_sites = self._nuclear_max_binding_sites\n \n polr_complex = model.species_types.get_one(name=polr)\n polr_complex_species = model.species.get_one(\n species_type=polr_complex, compartment=transcription_compartment)\n conc_free_polr = model.distribution_init_concentrations.get_one(\n species=polr_complex_species)\n self._total_polr[polr] = conc_free_polr.mean\n conc_free_polr.mean = math.floor(0.75*conc_free_polr.mean)\n conc_free_polr.comments = 'The free pool is estimated to be three quarters of the total concentration'\n conc_free_polr.references.append(ref_polr_distribution)\n \n polr_non_specific_binding_site_st = model.species_types.get_or_create(\n id='polr_non_specific_binding_site',\n name='non-specific binding site of RNA polymerase',\n type=onto['WC:pseudo_species'],\n )\n polr_non_specific_binding_site_st.structure = wc_lang.ChemicalStructure(\n empirical_formula = EmpiricalFormula(),\n molecular_weight = 0.,\n charge = 0)\n polr_non_specific_binding_site_species = model.species.get_or_create(\n species_type=polr_non_specific_binding_site_st, compartment=transcription_compartment)\n polr_non_specific_binding_site_species.id = polr_non_specific_binding_site_species.gen_id()\n\n conc_model = model.distribution_init_concentrations.get_or_create(\n species=polr_non_specific_binding_site_species,\n mean=genome_sites,\n units=unit_registry.parse_units('molecule'),\n comments='Set to genome length divided by {} bp to allow '\n 'queueing of RNA polymerase during transcription'.format(polr_occupancy_width),\n references=[ref_polr_width],\n )\n conc_model.id = conc_model.gen_id()\n\n polr_bound_non_specific_species_type = model.species_types.get_or_create(\n id='{}_bound_non_specific_site'.format(polr_complex.id),\n name='{}-bound non-specific site'.format(polr_complex.id),\n type=onto['WC:pseudo_species'], \n )\n polr_bound_non_specific_species_type.structure = wc_lang.ChemicalStructure(\n empirical_formula = polr_complex.structure.empirical_formula,\n molecular_weight = polr_complex.structure.molecular_weight,\n charge = polr_complex.structure.charge)\n polr_bound_non_specific_species = model.species.get_or_create(\n species_type=polr_bound_non_specific_species_type, compartment=transcription_compartment)\n polr_bound_non_specific_species.id = polr_bound_non_specific_species.gen_id()\n\n conc_model = model.distribution_init_concentrations.get_or_create(\n species=polr_bound_non_specific_species,\n mean=math.floor(self._total_polr[polr]*0.2475),\n units=unit_registry.parse_units('molecule'),\n comments='Approximately 24.75 percent of RNA polymerase is bound to non-specific site',\n references=[ref_polr_distribution])\n conc_model.id = conc_model.gen_id()\n\n ns_binding_reaction = model.reactions.create(\n submodel=self.submodel, id='non_specific_binding_{}'.format(polr_complex.id),\n name='non-specific binding of {} in {}'.format(polr, transcription_compartment.name),\n reversible=False)\n \n ns_binding_reaction.participants.append(\n polr_complex_species.species_coefficients.get_or_create(\n coefficient=-1))\n ns_binding_reaction.participants.append(\n polr_non_specific_binding_site_species.species_coefficients.get_or_create(\n coefficient=-1))\n ns_binding_reaction.participants.append(\n polr_bound_non_specific_species.species_coefficients.get_or_create(\n coefficient=1))\n \n # Create initiation and elongation reactions for each RNA\n init_el_rxn_no = 0\n transcribed_genes = [i for i in cell.loci.get(__type=wc_kb.eukaryote.GeneLocus) \\\n if i.transcripts]\n self._initiation_polr_species = {}\n self._elongation_modifier = {}\n self._allowable_queue_len = {} \n for gene in transcribed_genes:\n \n transcription_compartment = mitochondrion if 'M' in gene.polymer.id else nucleus\n translation_compartment = mitochondrion if 'M' in gene.polymer.id else cytosol\n\n len_add_rna = 0\n if len(gene.transcripts) == 1:\n rna_kb = gene.transcripts[0]\n add_seq = {} \n else:\n rna_kb = [i for i in gene.transcripts if i.id in transcription_unit][0]\n add_seq = {'A': 0, 'C': 0, 'G': 0, 'U': 0, 'len': 0}\n for add_transcript in transcription_unit[rna_kb.id]:\n len_add_rna += 1\n if add_transcript in gvar.transcript_ntp_usage:\n add_count = gvar.transcript_ntp_usage[add_transcript]\n else:\n if add_transcript in rna_input_seq:\n seq = rna_input_seq[add_transcript]\n else:\n seq = cell.species_types.get_one(id=add_transcript).get_seq() \n add_count = gvar.transcript_ntp_usage[add_transcript] = {\n 'A': seq.upper().count('A'),\n 'C': seq.upper().count('C'),\n 'G': seq.upper().count('G'),\n 'U': seq.upper().count('U'),\n 'len': len(seq),\n }\n add_seq = {k:v+add_count[k] for k,v in add_seq.items()}\n \n # Create initiation reaction\n polr_complex = model.species_types.get_one(name=rna_pol_pair[rna_kb.id])\n polr_complex_species = model.species.get_one(\n species_type=polr_complex, compartment=transcription_compartment)\n self._initiation_polr_species[rna_kb.id] = polr_complex_species\n \n polr_bound_non_specific_species_type = model.species_types.get_one(\n id='{}_bound_non_specific_site'.format(polr_complex.id))\n polr_bound_non_specific_species = model.species.get_one(\n species_type=polr_bound_non_specific_species_type, compartment=transcription_compartment)\n \n polr_non_specific_binding_site_st = model.species_types.get_one(\n id='polr_non_specific_binding_site')\n polr_non_specific_binding_site_species = model.species.get_one(\n species_type=polr_non_specific_binding_site_st, compartment=transcription_compartment)\n \n polr_binding_site_st = model.species_types.get_or_create(\n id='{}_binding_site'.format(gene.id),\n name='binding site of {}'.format(gene.name),\n type=onto['WC:pseudo_species'],\n )\n polr_binding_site_st.structure = wc_lang.ChemicalStructure(\n empirical_formula = EmpiricalFormula(),\n molecular_weight = 0.,\n charge = 0)\n polr_binding_site_species = model.species.get_or_create(\n species_type=polr_binding_site_st, compartment=transcription_compartment)\n polr_binding_site_species.id = polr_binding_site_species.gen_id()\n\n gene_seq = gene.get_seq()\n gene_len = len(gene_seq) + (add_seq['len'] if add_seq else 0)\n conc_model = model.distribution_init_concentrations.create(\n species=polr_binding_site_species,\n mean=math.floor(gene_len/polr_occupancy_width) + 1,\n units=unit_registry.parse_units('molecule'),\n comments='Set to gene length divided by {} bp to allow '\n 'queueing of RNA polymerase during transcription'.format(polr_occupancy_width),\n references=[ref_polr_width] \n )\n conc_model.id = conc_model.gen_id()\n self._allowable_queue_len[rna_kb.id] = (polr_binding_site_species, conc_model.mean)\n\n polr_bound_species_type = model.species_types.get_or_create(\n id='{}_bound_{}'.format(polr_complex.id, gene.id),\n name='{} bound {}'.format(polr_complex.name, gene.name),\n type=onto['WC:pseudo_species'], \n )\n polr_bound_species_type.structure = wc_lang.ChemicalStructure(\n empirical_formula = polr_complex.structure.empirical_formula,\n molecular_weight = polr_complex.structure.molecular_weight,\n charge = polr_complex.structure.charge)\n polr_bound_species = model.species.get_or_create(\n species_type=polr_bound_species_type, compartment=transcription_compartment)\n polr_bound_species.id = polr_bound_species.gen_id()\n self._elongation_modifier[rna_kb.id] = polr_bound_species\n self._gene_bound_polr[rna_pol_pair[rna_kb.id]].append(polr_bound_species)\n\n conc_model = model.distribution_init_concentrations.create(\n species=polr_bound_species,\n units=unit_registry.parse_units('molecule'),\n )\n conc_model.id = conc_model.gen_id()\n\n init_reaction = model.reactions.create(\n submodel=self.submodel, id='transcription_initiation_' + rna_kb.id,\n name='transcription initiation of ' + rna_kb.name,\n reversible=False, comments='Set to irreversible to model only the net flux')\n \n init_reaction.participants.append(\n polr_bound_non_specific_species.species_coefficients.get_or_create(\n coefficient=-1))\n init_reaction.participants.append(\n polr_binding_site_species.species_coefficients.get_or_create(\n coefficient=-1))\n init_reaction.participants.append(\n polr_bound_species.species_coefficients.get_or_create(\n coefficient=1))\n init_reaction.participants.append(\n polr_non_specific_binding_site_species.species_coefficients.get_or_create(\n coefficient=1))\n\n # Add ATP hydrolysis requirement for DNA melting and promoter escape by RNA polymerase II\n if 'RNA Polymerase II' in rna_pol_pair[rna_kb.id]:\n init_reaction.participants.append(metabolites['atp'][\n transcription_compartment.id].species_coefficients.get_or_create(\n coefficient=-2))\n init_reaction.participants.append(metabolites['h2o'][\n transcription_compartment.id].species_coefficients.get_or_create(\n coefficient=-2))\n init_reaction.participants.append(metabolites['adp'][\n transcription_compartment.id].species_coefficients.get_or_create(\n coefficient=2))\n init_reaction.participants.append(metabolites['pi'][\n transcription_compartment.id].species_coefficients.get_or_create(\n coefficient=2))\n init_reaction.participants.append(metabolites['h'][\n transcription_compartment.id].species_coefficients.get_or_create(\n coefficient=2))\n\n # Create elongation reaction\n rna_model = model.species_types.get_one(id=rna_kb.id).species[0]\n reaction = model.reactions.get_or_create(\n submodel=self.submodel, id='transcription_elongation_' + rna_kb.id,\n name='transcription elongation of ' + rna_kb.name,\n reversible=False, comments='Lumped reaction') \n\n if rna_kb.gene.strand == wc_kb.core.PolymerStrand.positive:\n pre_rna_seq = gene_seq.transcribe()\n else:\n pre_rna_seq = gene_seq.reverse_complement().transcribe()\n pre_rna_count = {\n 'A': pre_rna_seq.upper().count('A'),\n 'C': pre_rna_seq.upper().count('C'),\n 'G': pre_rna_seq.upper().count('G'),\n 'U': pre_rna_seq.upper().count('U'),\n 'N': pre_rna_seq.upper().count('N'),\n 'len': len(pre_rna_seq),\n }\n \n if rna_kb.id in gvar.transcript_ntp_usage:\n ntp_count = gvar.transcript_ntp_usage[rna_kb.id]\n else:\n if rna_kb.id in rna_input_seq:\n seq = rna_input_seq[rna_kb.id]\n else: \n seq = rna_kb.get_seq()\n ntp_count = gvar.transcript_ntp_usage[rna_kb.id] = {\n 'A': seq.upper().count('A'),\n 'C': seq.upper().count('C'),\n 'G': seq.upper().count('G'),\n 'U': seq.upper().count('U'),\n 'len': len(seq),\n }\n\n if add_seq:\n pre_rna_count = {k:(v+add_seq[k] if k in add_seq else v) for k,v in pre_rna_count.items()}\n ntp_count = {k:v+add_seq[k] for k,v in ntp_count.items()} \n\n # Adding participants to LHS\n reaction.participants.append(\n polr_bound_species.species_coefficients.get_or_create(\n coefficient=-1))\n reaction.participants.append(metabolites['atp'][\n transcription_compartment.id].species_coefficients.get_or_create(\n coefficient=-pre_rna_count['A']))\n reaction.participants.append(metabolites['ctp'][\n transcription_compartment.id].species_coefficients.get_or_create(\n coefficient=-pre_rna_count['C']))\n reaction.participants.append(metabolites['gtp'][\n transcription_compartment.id].species_coefficients.get_or_create(\n coefficient=-pre_rna_count['G']))\n reaction.participants.append(metabolites['utp'][\n transcription_compartment.id].species_coefficients.get_or_create(\n coefficient=-pre_rna_count['U']))\n reaction.participants.append(metabolites['h2o'][\n transcription_compartment.id].species_coefficients.get_or_create(\n coefficient=-(pre_rna_count['len']-pre_rna_count['N']+len_add_rna\n -ntp_count['len']+1)))\n \n # Adding participants to RHS\n if rna_kb.id in transcription_unit:\n for add_transcript in transcription_unit[rna_kb.id]:\n add_rna_model = model.species_types.get_one(id=add_transcript).species[0]\n reaction.participants.append(\n add_rna_model.species_coefficients.get_or_create(\n coefficient=1))\n\n reaction.participants.append(\n rna_model.species_coefficients.get_or_create(\n coefficient=1))\n reaction.participants.append(metabolites['ppi'][\n transcription_compartment.id].species_coefficients.get_or_create(\n coefficient=pre_rna_count['len']-pre_rna_count['N']))\n reaction.participants.append(metabolites['amp'][\n transcription_compartment.id].species_coefficients.get_or_create(\n coefficient=pre_rna_count['A']-ntp_count['A']))\n reaction.participants.append(metabolites['cmp'][\n transcription_compartment.id].species_coefficients.get_or_create(\n coefficient=pre_rna_count['C']-ntp_count['C']))\n reaction.participants.append(metabolites['gmp'][\n transcription_compartment.id].species_coefficients.get_or_create(\n coefficient=pre_rna_count['G']-ntp_count['G']))\n reaction.participants.append(metabolites['ump'][\n transcription_compartment.id].species_coefficients.get_or_create(\n coefficient=pre_rna_count['U']-ntp_count['U']))\n reaction.participants.append(metabolites['h'][\n transcription_compartment.id].species_coefficients.get_or_create(\n coefficient=pre_rna_count['len']-pre_rna_count['N']+len_add_rna\n -ntp_count['len']+1))\n reaction.participants.append(\n polr_complex_species.species_coefficients.get_or_create(\n coefficient=1))\n reaction.participants.append(\n polr_binding_site_species.species_coefficients.get_or_create(\n coefficient=1))\n\n all_transcripts = [rna_kb]\n if rna_kb.id in transcription_unit:\n for add_transcript in transcription_unit[rna_kb.id]:\n all_transcripts.append(cell.species_types.get_one(id=add_transcript))\n \n for rna in all_transcripts:\n if rna.type==wc_kb.eukaryote.TranscriptType.mRna:\n ribo_binding_site_st = model.species_types.get_or_create(\n id='{}_ribosome_binding_site'.format(rna.id),\n name='ribosome binding site of {}'.format(rna.name),\n type=onto['WC:pseudo_species'],\n )\n ribo_binding_site_st.structure = wc_lang.ChemicalStructure(\n empirical_formula = EmpiricalFormula(),\n molecular_weight = 0.,\n charge = 0)\n ribo_binding_site_species = model.species.get_or_create(\n species_type=ribo_binding_site_st, compartment=translation_compartment)\n ribo_binding_site_species.id = ribo_binding_site_species.gen_id()\n\n site_per_rna = math.floor(gvar.transcript_ntp_usage[rna.id]['len'] / \\\n ribosome_occupancy_width) + 1\n reaction.participants.append(\n ribo_binding_site_species.species_coefficients.get_or_create(\n coefficient=site_per_rna))\n\n rna_model = model.species_types.get_one(id=rna.id).species[0]\n rna_init_conc = model.distribution_init_concentrations.get_one(\n species=rna_model).mean\n conc_model = model.distribution_init_concentrations.create(\n species=ribo_binding_site_species,\n mean=site_per_rna * rna_init_conc,\n units=unit_registry.parse_units('molecule'),\n comments='Set to mRNA length divided by {} bp to allow '\n 'queueing of ribosome during translation'.format(ribosome_occupancy_width),\n references=[ref_ribo_width] \n )\n conc_model.id = conc_model.gen_id()\n\n init_el_rxn_no += 1\n\n print('{} reactions each for initiation and elongation have been generated'.format(\n init_el_rxn_no))", "title": "" }, { "docid": "9e76f54528f5855fcb12f59638ed7312", "score": "0.44531295", "text": "def define_components(reqs):\n define_common(reqs)\n define_mercury(reqs)\n define_pmix(reqs)\n\n isal_build = ['./autogen.sh ',\n './configure --prefix=$ISAL_PREFIX --libdir=$ISAL_PREFIX/lib',\n 'make $JOBS_OPT', 'make install']\n reqs.define('isal',\n retriever=GitRepoRetriever(\n 'https://github.com/01org/isa-l.git'),\n commands=isal_build,\n required_progs=['nasm', 'yasm'],\n libs=[\"isal\"])\n\n\n retriever = GitRepoRetriever(\"https://github.com/pmem/pmdk.git\")\n\n pmdk_build = [\"make \\\"BUILD_RPMEM=n\\\" \\\"NDCTL_ENABLE=n\\\" \"\n \"\\\"NDCTL_DISABLE=y\\\" \" \"$JOBS_OPT install \"\n \"prefix=$PMDK_PREFIX\"]\n\n reqs.define('pmdk',\n retriever=retriever,\n commands=pmdk_build,\n libs=[\"pmemobj\"])\n\n retriever = GitRepoRetriever(\"https://github.com/pmodels/argobots.git\",\n True)\n reqs.define('argobots',\n retriever=retriever,\n commands=['git clean -dxf ',\n './autogen.sh',\n './configure --prefix=$ARGOBOTS_PREFIX CC=gcc',\n 'make $JOBS_OPT',\n 'make $JOBS_OPT install'],\n libs=['abt'],\n headers=['abt.h'])\n\n retriever = GitRepoRetriever(\"https://review.hpdd.intel.com/daos/iof\",\n True)\n reqs.define('iof',\n retriever=retriever,\n commands=[\"scons $JOBS_OPT \"\n \"OMPI_PREBUILT=$OMPI_PREFIX \"\n \"CART_PREBUILT=$CART_PREFIX \"\n \"FUSE_PREBUILT=$FUSE_PREFIX \"\n \"PREFIX=$IOF_PREFIX \"\n \"USE_INSTALLED=\" + ','.join(reqs.installed) + ' ' +\n \"install\"],\n headers=['cnss_plugin.h'],\n requires=['cart', 'fuse', 'ompi'])\n\n retriever = GitRepoRetriever(\"https://github.com/daos-stack/daos\",\n True)\n reqs.define('daos',\n retriever=retriever,\n commands=[\"scons $JOBS_OPT \"\n \"OMPI_PREBUILT=$OMPI_PREFIX \"\n \"CART_PREBUILT=$CART_PREFIX \"\n \"PREFIX=$DAOS_PREFIX \"\n \"USE_INSTALLED=\" + ','.join(reqs.installed) + ' ' +\n \"install\"],\n headers=['daos.h'],\n requires=['cart', 'ompi'])\n\n retriever = GitRepoRetriever('https://github.com/libfuse/libfuse')\n reqs.define('fuse',\n retriever=retriever,\n commands=['meson $FUSE_SRC --prefix=$FUSE_PREFIX' \\\n ' -D udevrulesdir=$FUSE_PREFIX/udev' \\\n ' -D disable-mtab=True' \\\n ' -D utils=False',\n '$ninja -v $JOBS_OPT',\n '$ninja install'],\n libs=['fuse3'],\n defines=[\"FUSE_USE_VERSION=32\"],\n required_progs=['libtoolize', NINJA_PROG],\n headers=['fuse3/fuse.h'],\n out_of_src_build=True)\n\n retriever = GitRepoRetriever(\"https://github.com/daos-stack/cart\",\n True)\n reqs.define('cart',\n retriever=retriever,\n commands=[\"scons $JOBS_OPT \"\n \"OMPI_PREBUILT=$OMPI_PREFIX \"\n \"MERCURY_PREBUILT=$MERCURY_PREFIX \"\n \"PMIX_PREBUILT=$PMIX_PREFIX \"\n \"PREFIX=$CART_PREFIX \"\n \"USE_INSTALLED=\" + ','.join(reqs.installed) + ' ' +\n \"install\"],\n headers=[\"cart/api.h\", \"gurt/list.h\"],\n libs=[\"cart\", \"gurt\"],\n requires=['mercury', 'uuid', 'crypto', 'ompi',\n 'pmix', 'boost', 'yaml'],\n package='cart-devel' if inst(reqs, 'cart') else None)\n\n reqs.define('fio',\n retriever=GitRepoRetriever(\n 'https://github.com/axboe/fio.git'),\n commands=['git checkout fio-3.3',\n './configure --prefix=\"$FIO_PREFIX\"',\n 'make $JOBS_OPT', 'make install'],\n progs=['genfio', 'fio'])\n\n retriever = GitRepoRetriever(\"https://github.com/spdk/spdk.git\", True)\n reqs.define('spdk',\n retriever=retriever,\n commands=['./configure --prefix=\"$SPDK_PREFIX\" ' \\\n ' --with-fio=\"$FIO_SRC\"',\n 'make $JOBS_OPT', 'make install',\n 'mkdir -p \"$SPDK_PREFIX/share/spdk\"',\n 'cp -r include scripts examples/nvme/fio_plugin ' \\\n '\"$SPDK_PREFIX/share/spdk\"'],\n libs=['spdk'],\n requires=['fio'])\n\n url = 'https://github.com/protobuf-c/protobuf-c/releases/download/' \\\n 'v1.3.0/protobuf-c-1.3.0.tar.gz'\n web_retriever = WebRetriever(url, \"08804f8bdbb3d6d44c2ec9e71e47ef6f\")\n reqs.define('protobufc',\n retriever=web_retriever,\n commands=['./configure --prefix=$PROTOBUFC_PREFIX '\n '--disable-protoc', 'make $JOBS_OPT',\n 'make install'],\n libs=['protobuf-c'],\n headers=['protobuf-c/protobuf-c.h'])", "title": "" }, { "docid": "df53c7b01224cd8f9acbae64e266aa93", "score": "0.44493562", "text": "def test_customize_customize_before_call():\n cs = customize('test', 'm_name')\n owner = FalseOwner()\n with pytest.raises(RuntimeError):\n cs.customize(owner, 'dd')\n\n cs = customize('test', 'm_name', ('remove',))\n owner = FalseOwner()\n cs.customize(owner, 'dd')\n assert owner.test.args == ('m_name', None, ('remove',), 'custom')", "title": "" }, { "docid": "8cc207721b6ec40e578497d2dd2e2cea", "score": "0.4445085", "text": "def schema_custom(name: str, **kwargs) -> dict:\n unknown = kwargs.get(\"unknown\", \"custom\")\n adapter_name = kwargs.get(\"adapter_name\", unknown)\n adapter_name_raw = kwargs.get(\"adapter_name_raw\", unknown)\n adapter_title = kwargs.get(\"adapter_title\", unknown.capitalize())\n adapter_prefix = kwargs.get(\"adapter_name_raw\", unknown[:2])\n title = name.capitalize()\n column_name = kwargs.get(\"column_name\", f\"{adapter_name}:{name}\")\n column_title = kwargs.get(\"column_title\", f\"{adapter_title}: {title}\")\n ftype = kwargs.get(\"type\", \"string\")\n ftype_norm = kwargs.get(\"type_name\", \"string\")\n return {\n \"adapter_name_raw\": adapter_name_raw,\n \"adapter_name\": adapter_name,\n \"adapter_title\": adapter_title,\n \"adapter_prefix\": adapter_prefix,\n \"column_name\": column_name,\n \"column_title\": column_title,\n \"sub_fields\": [],\n \"is_complex\": False,\n \"is_list\": False,\n \"is_root\": True,\n \"parent\": \"root\",\n \"name\": name,\n \"name_base\": name,\n \"name_qual\": name,\n \"title\": title,\n \"type\": ftype,\n \"type_norm\": ftype_norm,\n \"selectable\": False,\n \"is_agg\": False,\n \"expr_field_type\": \"agg\",\n \"is_details\": False,\n \"is_all\": False,\n }", "title": "" }, { "docid": "534d87009f378a1da7fa5b5a7b976442", "score": "0.44434965", "text": "def _build_configs(self):\n raise NotImplementedError", "title": "" }, { "docid": "bb5007f10b3b4fc155ae0b7d0a88e36f", "score": "0.44434765", "text": "def custom_rules():\r\n ws4 = wb.create_sheet(title='Custom System Rules')\r\n rule_cols = ['name', 'rule', 'resource filter', 'actions', 'disabled']\r\n systemrules = get_qlik_sense.get_systemrules('Custom')\r\n num_of_systemrules = len(systemrules)\r\n r = 1\r\n for item in rule_cols:\r\n ws4.cell(row=1, column=r).value = item\r\n r += 1\r\n \r\n for row in range(num_of_systemrules):\r\n ws4.append(systemrules[row])", "title": "" }, { "docid": "ae62aa1e0cd3e0add27e873411753abe", "score": "0.44414645", "text": "def _GenerateClientAuxMethods(self, ident, file):\n decl = \"\"\"\nfunc %(camel)sByName(c contrail.ApiClient, fqn string) (*%(camel)s, error) {\n obj, err := c.FindByName(\"%(typeid)s\", fqn)\n if err != nil {\n return nil, err\n }\n return obj.(*%(camel)s), nil\n}\n\nfunc %(camel)sByUuid(c contrail.ApiClient, uuid string) (*%(camel)s, error) {\n obj, err := c.FindByUuid(\"%(typeid)s\", uuid)\n if err != nil {\n return nil, err\n }\n return obj.(*%(camel)s), nil\n}\n\"\"\" % {'camel': ident.getCppName(), 'typeid': ident.getName()}\n file.write(decl)", "title": "" }, { "docid": "fbf337a4179b39717787adfd211ca3a4", "score": "0.44413272", "text": "def proto_file(self, context):\n return ProtoFile(InfraRepoConfig().to_recipes_cfg(self.path))", "title": "" } ]
a27a6263310b83f334a5c20a33dfbdea
Try to find a handler for the specified action.
[ { "docid": "f10ca768c00b50727e194b3f09686196", "score": "0.58375084", "text": "def get_action(self, action):\n if not hasattr(self, action):\n return None\n\n x = getattr(self, action)\n if not hasattr(x, 'chat_action'):\n return None\n\n return x", "title": "" } ]
[ { "docid": "507c1d6f9f4e70d5e40e29ca55edcb2f", "score": "0.79287875", "text": "def get_handler_for_action(action: str) -> Union[HandlerFuncType, None]:\n return _registry.get(action)", "title": "" }, { "docid": "9011100e1235f2fbb8172ea80d5b24a7", "score": "0.7242842", "text": "def get_handler(request_id, action_id):\r\n return _handlers.get((request_id, action_id), NotImplemented)", "title": "" }, { "docid": "4bce0c979c95e0286b1448d488c2f0f7", "score": "0.6306563", "text": "def _get_handler(self, check):\n # get handler\n for handler in self._handlers:\n if issubclass(check.__class__, handler.handles):\n return handler\n\n # default\n return None", "title": "" }, { "docid": "3e1b5e577a37442518228cbb28249bd1", "score": "0.6238827", "text": "def get_action_by_id(self, action_id):\n for action in self.actions:\n if action.action_id == action_id:\n return action\n return None", "title": "" }, { "docid": "595ec2059660503dca4b2f549be0ac52", "score": "0.6127543", "text": "def maybe_handle(self):\n request = self.request\n table_name, action_name, obj_id = self.check_handler(request)\n if table_name == self.name and action_name:\n action_names = [action.name for action in\n self.base_actions.values() if not action.preempt]\n # do not run preemptive actions here\n if action_name in action_names:\n return self.take_action(action_name, obj_id)\n return None", "title": "" }, { "docid": "d70c9af280b93363886c376b567ba233", "score": "0.6119552", "text": "def find_handler(self, *l):\n if l:\n ps = '/' + '/'.join(filter(None, l))\n method_name = 'index'\n while ps:\n c = controllers_path.get(ps)\n if c:\n method = getattr(c, method_name, None)\n if method:\n exposed = getattr(method, 'exposed', False)\n if exposed == 'json':\n _logger.debug(\"Dispatch json to %s %s %s\", ps, c, method_name)\n return lambda request: JsonRequest(request).dispatch(method)\n elif exposed == 'http':\n _logger.debug(\"Dispatch http to %s %s %s\", ps, c, method_name)\n return lambda request: HttpRequest(request).dispatch(method)\n ps, _slash, method_name = ps.rpartition('/')\n if not ps and method_name:\n ps = '/'\n return None", "title": "" }, { "docid": "3dbcaeab58b50733a624d36178f115b5", "score": "0.5984032", "text": "def get_action(cls, action_name):\n\n # first search in default actions\n action_obj = cls.default_actions.get(action_name)\n\n if action_obj:\n return action_obj\n\n # now search in actions db\n action_obj = action.Action.get(name=action_name)\n\n model_class_path = action_obj.model_path\n\n if not model_class_path:\n raise InvalidActionClass(model_class_path)\n\n module_name, class_name = model_class_path.rsplit('.', 1)\n loaded_module = importlib.import_module(module_name)\n\n return getattr(loaded_module, class_name)", "title": "" }, { "docid": "fd3668edd4a8b82160fa595c3df1f44b", "score": "0.58897626", "text": "def handler_for(self, command):\n try:\n if command.handler() != None:\n return command.handler()\n except AttributeError:\n pass\n\n try:\n return getattr(\n self._getmodule(command), command.__class__.__name__ + \"Handler\"\n )\n except AttributeError:\n return None", "title": "" }, { "docid": "772931c8c48e34761349de7da55bedfa", "score": "0.58750755", "text": "def FindById(cls, action_id):\r\n return cls._action_map.get(action_id)", "title": "" }, { "docid": "48497e32629a6d960e3d66448939d18c", "score": "0.58478004", "text": "def find_handler(self, path_info):\n\t\timport routes\n\n\t\trequest = cherrypy.request\n\t\t\n\t\tconfig = routes.request_config()\n\t\tconfig.mapper = self.mapper\n\t\tif hasattr(cherrypy.request, 'wsgi_environ'):\n\t\t\tconfig.environ = cherrypy.request.wsgi_environ\n\n\t\tconfig.host = request.headers.get('Host', None)\n\t\tconfig.protocol = request.scheme\n\t\tconfig.redirect = self.redirect\n\n\t\tresult = self.mapper.match(path_info)\n\t\t_log.debug('find %s = %s' %(path_info,result))\n\t\tconfig.mapper_dict = result\n\t\tparams = {}\n\t\tif result:\n\t\t\tparams = result.copy()\n\t\telse:\n\t\t\t_log.debug(\"%s not in Routes\" % path_info)\n\n\t\tif not self.full_result:\n\t\t\tparams.pop('controller', None)\n\t\t\tparams.pop('action', None)\n\t\t\t# routes add id attribute if when routes not explicit.\n\t\t\t# we remove id form result, to avoid attribute error\n\t\t\tif not params.get('id',None):\n\t\t\t\tparams.pop('id', None)\n\n\t\tparams.update(request.params)\n\t\trequest.params.update(params)", "title": "" }, { "docid": "69c6bf0c0f0f09f40dad04fc992f2c0c", "score": "0.5768871", "text": "def act(action=\"\"):\n actions.get(action.lower(), partial(print, \"No such action...\"))()\n return actions.get(action.lower(), None)", "title": "" }, { "docid": "8acdb8840be139cf04846d9d27bb1ae7", "score": "0.5749196", "text": "def requested_action(self) -> Callable[[], Optional[Dict[str, str]]]:\n for name, action in self.available_actions.items():\n was_requested = getattr(self, name)\n if was_requested:\n return action", "title": "" }, { "docid": "cb0a3c1c663c3af6ed4a582fbbbcc968", "score": "0.5638102", "text": "def get_handler(self, req):\n handler_name = 'do_%s' % req.method.lower()\n return getattr(self, handler_name, None)", "title": "" }, { "docid": "cb0a3c1c663c3af6ed4a582fbbbcc968", "score": "0.5638102", "text": "def get_handler(self, req):\n handler_name = 'do_%s' % req.method.lower()\n return getattr(self, handler_name, None)", "title": "" }, { "docid": "ec92f0f8f5a5b44a1c98ac3863a37d88", "score": "0.56313825", "text": "def find_handler(self, path_info):\r\n import routes\r\n \r\n request = cherrypy.serving.request\r\n \r\n config = routes.request_config()\r\n config.mapper = self.mapper\r\n if hasattr(request, 'wsgi_environ'):\r\n config.environ = request.wsgi_environ\r\n config.host = request.headers.get('Host', None)\r\n config.protocol = request.scheme\r\n config.redirect = self.redirect\r\n \r\n result = self.mapper.match(path_info)\r\n \r\n config.mapper_dict = result\r\n params = {}\r\n if result:\r\n params = result.copy()\r\n if not self.full_result:\r\n params.pop('controller', None)\r\n params.pop('action', None)\r\n request.params.update(params)\r\n \r\n # Get config for the root object/path.\r\n request.config = base = cherrypy.config.copy()\r\n curpath = \"\"\r\n \r\n def merge(nodeconf):\r\n if 'tools.staticdir.dir' in nodeconf:\r\n nodeconf['tools.staticdir.section'] = curpath or \"/\"\r\n base.update(nodeconf)\r\n \r\n app = request.app\r\n root = app.root\r\n if hasattr(root, \"_cp_config\"):\r\n merge(root._cp_config)\r\n if \"/\" in app.config:\r\n merge(app.config[\"/\"])\r\n \r\n # Mix in values from app.config.\r\n atoms = [x for x in path_info.split(\"/\") if x]\r\n if atoms:\r\n last = atoms.pop()\r\n else:\r\n last = None\r\n for atom in atoms:\r\n curpath = \"/\".join((curpath, atom))\r\n if curpath in app.config:\r\n merge(app.config[curpath])\r\n \r\n handler = None\r\n if result:\r\n controller = result.get('controller')\r\n controller = self.controllers.get(controller, controller)\r\n if controller:\r\n if isinstance(controller, (type, types.ClassType)):\r\n controller = controller()\r\n # Get config from the controller.\r\n if hasattr(controller, \"_cp_config\"):\r\n merge(controller._cp_config)\r\n \r\n action = result.get('action')\r\n if action is not None:\r\n handler = getattr(controller, action, None)\r\n # Get config from the handler \r\n if hasattr(handler, \"_cp_config\"): \r\n merge(handler._cp_config)\r\n else:\r\n handler = controller\r\n \r\n # Do the last path atom here so it can\r\n # override the controller's _cp_config.\r\n if last:\r\n curpath = \"/\".join((curpath, last))\r\n if curpath in app.config:\r\n merge(app.config[curpath])\r\n \r\n return handler", "title": "" }, { "docid": "ab65e6ad166cbfc2bfb757ea40306e35", "score": "0.562774", "text": "def _get_handlers(self, task, action, normalized_retcode, results,\n **kwargs):\n if action == 'resolve':\n return self.resolved\n elif action == 'raise':\n return self.raised\n else:\n raise ValueError(\"Unrecognized action '%s'\" % action)", "title": "" }, { "docid": "e7c497f5aa72f7612eeee39214f2d0ea", "score": "0.56210166", "text": "def _take_action(self, action):\n pass", "title": "" }, { "docid": "55dbc46cf2528b59bf0cc56a946d968c", "score": "0.56094635", "text": "def get_action(self, action_type):\n return action_type.objects.get(action_ptr=self.pk)", "title": "" }, { "docid": "17a76a308e4c6eeeeecf6c1c1526e2aa", "score": "0.5549011", "text": "def get_handler(requested):\n\n if not __inited__:\n init()\n if requested not in chksum_types:\n raise MissingChksumHandler(\"no handler for %s\" % requested)\n return chksum_types[requested]", "title": "" }, { "docid": "91b75ef6dceb4799d2b9043bc77572cd", "score": "0.55445707", "text": "def get_action(action):\n ACTIONS = [0, 2, 3]\n return ACTIONS[action]", "title": "" }, { "docid": "d2e3b45cf895c174a553521cb843fd35", "score": "0.55409783", "text": "def register_handler(self, action, handler=None):\n if action.action in self.actions:\n raise InvalidActionError(\n \"Trying to register a handler for an action that already \"\n \"has a handler.\"\n )\n\n self.actions[action.action] = action\n if handler is not None:\n self.handlers[action.action] = handler\n else:\n # Used as decorator, return wrapper function\n def _register_handler(func):\n self.handlers[action.action] = func\n\n return func\n\n return _register_handler", "title": "" }, { "docid": "c5eccd25248958546898cda632c8b2b4", "score": "0.5540687", "text": "def findAction(self, view, name):\n return self.actions[view].get(name, None)", "title": "" }, { "docid": "f2dd1087d2af77a9a0070fbae4cb3dd6", "score": "0.5539927", "text": "def prepare_dispatch(self):\n uri = web.ctx.env['REQUEST_URI']\n uribase = web.ctx.env['SCRIPT_NAME']\n assert uri[0:len(uribase)] == uribase\n uri = uri[len(uribase):]\n\n for pattern, handler in rules:\n m = pattern.match(uri)\n if m:\n return handler(), m.groups()\n raise core.NotFound('%s does not map to any REST API.' % uri)", "title": "" }, { "docid": "7d6c3331e1e0566f6100727861c1d3a9", "score": "0.5537576", "text": "def get(self,action):\n h = hashify(action)\n if h in self.tbl:\n return self.tbl[hashify(action)]\n else:\n return 0", "title": "" }, { "docid": "f938677c8bf0cd4f66a85fb70c2a5d10", "score": "0.5537385", "text": "def __dispatch__(self, path=None):\n try:\n match, handler = self.__dict__[\"url\"].__match__(path)\n if match.groups():\n return handler(**match.groupdict())\n else:\n return handler()\n except Http404, e:\n # Dispatch to fileserver if we have it\n return self._handle(path)", "title": "" }, { "docid": "4aa6ad5475b5696d5d3121efbc3c3763", "score": "0.55246216", "text": "def check_handler(cls, request):\n if request.method == \"POST\" and \"action\" in request.POST:\n table, action, obj_id = cls.parse_action(request.POST[\"action\"])\n elif \"table\" in request.GET and \"action\" in request.GET:\n table = request.GET[\"table\"]\n action = request.GET[\"action\"]\n obj_id = request.GET.get(\"obj_id\", None)\n else:\n table = action = obj_id = None\n return table, action, obj_id", "title": "" }, { "docid": "2cc634fdc462f6d89704e38db26ce43f", "score": "0.5494449", "text": "def _on_action_triggered(self, action: str, args: dict):\n action_obj = utils.find_method(module=action, obj=self)\n if action_obj:\n action_obj(**args)", "title": "" }, { "docid": "0f6fb59c566de8b794d6958f88a4dfa2", "score": "0.5479455", "text": "def get_url_handler(self, url):\n handler = self.hosts.get(url.host())\n\n if handler is None:\n for root, _handler in self.root_paths.items():\n if url.path().split('/')[1] == root:\n handler = _handler\n\n return handler", "title": "" }, { "docid": "b65091a21cbe92c3bd797a586e8c5912", "score": "0.5476138", "text": "def handler(self):\n if not self.handler_name:\n return None\n handler = logging._handlers.get(self.handler_name, None)\n expected_attr = ('resource', 'labels')\n if handler and not all(hasattr(handler, ea) for ea in expected_attr):\n handler = self.prepare_handler(handler)\n return handler", "title": "" }, { "docid": "47c8df808c1d4a2a2899ca3ccf57b13c", "score": "0.5472504", "text": "def get_handler(self, target):\n with log_exception(self._logger, to_suppress=(IndexError, ), format=f\"No hadnler for target {target}\"):\n target_parsers = ( [i for i in self.handlers() if i.target == target])\n out = target_parsers.pop(0)\n self._logger.debug(f\"got handler {out} for {target}\")\n return out", "title": "" }, { "docid": "22eac9e3bd3fc448fe64bf899bb84032", "score": "0.5472141", "text": "def find_operation(self, command, action):\n if command not in self.ops:\n raise ValueError(f\"Command not found: {command}\")\n\n command_dict = self.ops[command]\n\n if action in command_dict:\n return command_dict[action]\n\n # Find the matching alias\n for op in command_dict.values():\n if action in op.action_aliases:\n return op\n\n # Fail if no matching alias was found\n raise ValueError(f\"No action {action} for command {command}\")", "title": "" }, { "docid": "94fa8fd00776d542a2893ecc1b1a32de", "score": "0.546054", "text": "def get_action(self, action_name: str, exact_match: bool = False) -> Dict[str, Any]:\n _, current_action = super().get(self._actions, action_name, exact_match)\n if not current_action:\n raise (Exception(\"There is no action with name {} in the system under test (Exact match = {})\".format(\n action_name, exact_match)))\n return current_action", "title": "" }, { "docid": "c28b8ae521852ed7d34cf32d72ab9617", "score": "0.5438951", "text": "def _get_action_func(self):\n try:\n func = self.action_funcs[self.action]\n except KeyError:\n raise RunnerInvalidActionError(\n \"Unknown action: %(action)r\" % vars(self))\n return func", "title": "" }, { "docid": "c108cf3a3b3589cc47d0e40486603900", "score": "0.54321635", "text": "def get_action_parser(\n self, *, script: str, action: BaseSchemaAction | BaseMorphAction | BaseCategoryAction = None\n ) -> ActionParser | MorphParser | CategoryParser:\n if not action:\n action = self.get_action(script=script)\n # Test for each of BaseSchemaAction | BaseMorphAction | BaseCategoryAction\n if isinstance(action, BaseSchemaAction):\n return ActionParser\n if isinstance(action, BaseMorphAction):\n return MorphParser\n if isinstance(action, BaseCategoryAction):\n return CategoryParser\n # Action unknown\n raise ValueError(f\"Script cannot be parsed ({script}).\")", "title": "" }, { "docid": "d1f9478e9d84810e969a2b252841a3ba", "score": "0.5417408", "text": "def _get_handler_instance(request, routes):\n resource_type_arr = request.resource_type.split(\"::\")\n # If the first component of the resource_type is 'Custom', delete it\n if resource_type_arr[0].lower() == \"custom\":\n del resource_type_arr[0]\n\n resource_type = \"::\".join(resource_type_arr)\n print(\"Looking up handler class for resource type `%s`\"% resource_type)\n print(routes)\n handler_cls = routes.get(resource_type, None)\n print(\"Looking up handler class `%s`\"% handler_cls)\n\n if not handler_cls:\n handler_not_found(resource_type)\n raise HandlerError(\"Can't find handler class for %s!\"% resource_type)\n else:\n # Handler class found, initialize it\n handler = handler_cls(request)\n return(handler)", "title": "" }, { "docid": "25e7fb1966ff1653e54bba01b0f99d4f", "score": "0.54111534", "text": "def add_handler(self, action, callback):\n if callable(callback):\n self._handlers[action] = callback\n else:\n raise ValueError", "title": "" }, { "docid": "b55cfc81877c9816a3f1984eef56951d", "score": "0.5383757", "text": "def get_handler(split_mode=True, control_scheme=default_scheme):\r\n if not split_mode:\r\n return ActionHandler()\r\n if control_scheme == default_scheme:\r\n return SplitActionHandler()\r\n return DynamicActionHandler(control_scheme)", "title": "" }, { "docid": "f84782f1e7cfd9685e1740f9f03128b8", "score": "0.53071696", "text": "def create_handler(name):\n try:\n return globals()['%sHandler' % name.capitalize()]()\n except KeyError:\n return None", "title": "" }, { "docid": "8d54bdc3999d13128abde8e8ebba6300", "score": "0.5305581", "text": "def resolveActionInState(self, state, action):\n if not self.mapping.has_key(state):\n return None\n \n resulting_state = None\n existing_mappings = self.mapping[state]\n \n for mapping in existing_mappings:\n if mapping[0] == action:\n resulting_state = mapping[1]\n \n \n return resulting_state", "title": "" }, { "docid": "b0db1e642547f56111f9d31706997b5e", "score": "0.529863", "text": "def handle_action(self, action: Optional[Action]) -> bool:\r\n if action is None:\r\n return False\r\n \"\"\"\r\n print(\"action is None\")\r\n self.state = MainGameEventHandler(self.engine)\r\n return MainGameEventHandler(self.engine)\r\n \"\"\"\r\n try:\r\n action.perform()\r\n except exceptions.Impossible as exc:\r\n self.engine.message_log.add_message(exc.args[0], colors.impossible)\r\n return False # Skip enemy turn on exceptions\r\n\r\n self.engine.handle_enemy_turns()\r\n self.engine.update_fov()\r\n #self.state = MainGameEventHandler(self.engine)\r\n return MainGameEventHandler(self.engine)", "title": "" }, { "docid": "e872785d7229b3ba06e5c0998de1291c", "score": "0.5297683", "text": "def handler_not_found(resource_type):\n error_msg = \"Handler not found for resource type '%s'\"% resource_type\n log.error(error_msg)\n raise HandlerError(error_msg)", "title": "" }, { "docid": "bef16b5d63a838664f4efb33fc8aacc4", "score": "0.52919066", "text": "def labelForAction(self, action):\n for label in self.actionLabels():\n if label.action() == action:\n return label\n return None", "title": "" }, { "docid": "5ce9bde1f86801f5832de3562b6e37dd", "score": "0.5287658", "text": "def match(self, environ, path=None, method=None):\n if path == None:\n path = urllib_parse.unquote(environ['PATH_INFO'])\n if method == None:\n method = environ['REQUEST_METHOD']\n fallback_handler = self.app.notFound\n matched_handlers = []\n for priority, regexp, params_name, converters, method_dict in self._handlers:\n match = regexp.search(path)\n if not match:\n continue\n # We check if there was a handler registered for the HTTP\n # request which name corresponds to method (GET, POST, ...)\n method_handler = method_dict.get(method)\n if method_handler != None:\n variables = match.groupdict()\n # We convert the variables to the proper type using the\n # converters\n for key in list(variables.keys()):\n variables[key] = converters[key](variables[key])\n # We return the handler along with the variables\n matched_handlers.append(\n (priority, method_handler, variables, params_name))\n if not matched_handlers:\n fallback_handler = self.app.notSupported\n if matched_handlers:\n # NOTE: Was matched_handlers.sort(lambda a,b:cmp(b[0],a[0]))\n # Make sure this is the same order.\n matched_handlers.sort(key=lambda _: _[0], reverse=True)\n matched_handlers.append((-1, fallback_handler, {}, None))\n return matched_handlers\n elif path and path[0] == \"/\":\n # If we didn't found any matching handler, we try without the\n # / prefix\n return self.match(environ, path[1:], method)\n else:\n return [(0, fallback_handler, {}, None)]", "title": "" }, { "docid": "b381bac4e7bcf650bb736d89f8d0f7bc", "score": "0.5269366", "text": "def dispatch_action(self, action, element=None):\n action_type = \"Not-Instantiated\"\n try:\n action_type = action[ACTION_KEY]\n getattr(self, action_type)(action, element)\n\n except SeleniumHelperExceptions as selenium_error:\n message = \"A Selenium error was caught while performing a '{}' action\".format(selenium_error, action_type)\n raise ActionException(message)\n\n except KeyError as key_error:\n message = \"A KeyError Exception for the key named {} was raised while performing a '{}' action! \" \\\n \"Check the spelling of the key and/or update the configuration schema\" \\\n \"to include a check for the proper key's existence\".format(key_error, action_type)\n raise ActionException(message)\n\n except AttributeError as attr_error:\n message = \"An AttributeError Exception was raised while performing a '{}' action! Check the spelling \" \\\n \"of the action and/or update the configuration schema to include a check \" \\\n \"for the proper naming of this action. Error: {}\".format(action_type, attr_error)\n raise ActionException(message)\n\n except Exception as actions_error:\n raise ActionException(\"An error occurred while performing a '{}' \"\n \"action on {} | {}\".format(action_type, self.sh.get_current_url or \"No-URL\",\n actions_error))", "title": "" }, { "docid": "997c9af6894803cd234cb8321a9400bb", "score": "0.52608097", "text": "def __get_action_index(self, agt_action):\n\n for (i, action) in enumerate(self.feasible_actions):\n if agt_action == action:\n return i\n\n raise Exception(\"The agent response does not exist\")", "title": "" }, { "docid": "fbbf7dbd9373598985aa4533e806b4f3", "score": "0.5260242", "text": "def get(identifier: str) -> ActionScheme:\n if identifier not in _registry.keys():\n raise KeyError(f'Identifier {identifier} is not associated with any `ActionScheme`.')\n\n return _registry[identifier]()", "title": "" }, { "docid": "3181ed2776a99b1723c7b3c82872e0d2", "score": "0.52555656", "text": "def getHandler(self, method):\n\t\tret = self._handlers.get(method)\n\n\t\t# This is crazy Python magic, but it's doing something rather simple.\n\t\t# First look at the handler we got. If it turns out to be an unbound method,\n\t\t# the we need to fill in the self object. We use the current self as the\n\t\t# self object....\n\t\tif ret and (type(ret) is types.MethodType) and (ret.im_self is None):\n\t\t\tret = ret.__get__(self, self.__class__)\n\n\t\treturn ret", "title": "" }, { "docid": "748282fa6f0564d8fd71e267301bb575", "score": "0.5248759", "text": "def getAction(self, actionName):\n try:\n return self._actions[actionName][0]\n except KeyError:\n raise NameError, \"Action '%s' not provided by any behaviour\" % \\\n actionName", "title": "" }, { "docid": "1693bb620129711ed3f11a0acef82290", "score": "0.52408224", "text": "def action_call(self):\n #Check if the action exists\n if self.action_exists() is False:\n return self.catch()\n \n # Check permissions\n if self.user_can_access(self.get_action_method_name()):\n #Run the action method\n return getattr(self, self.get_action_method_name())()\n else:\n self.output_type = '403'", "title": "" }, { "docid": "ea22fec02efe4af7d56ad088780976e6", "score": "0.5227941", "text": "def _take_action(self, action):\n # action_type = ACTION_LOOKUP[action[0]]\n # if action_type == hfo_py.DASH:\n # self.env.act(action_type, action[1], action[2])\n # elif action_type == hfo_py.TURN:\n # self.env.act(action_type, action[3])\n # elif action_type == hfo_py.KICK:\n # self.env.act(action_type, action[4], action[5])\n # else:\n # print('Unrecognized action %d' % action_type)\n # self.env.act(hfo_py.NOOP)", "title": "" }, { "docid": "51d4b73a89dd416c840a5ce45f3d8c99", "score": "0.5210179", "text": "def load_from_action(cls, action_id, client=None):\n # Check if we are using the global client\n if client is None:\n client = configuration.__GLOBAL_CLIENT__\n cls._Resource__validate_client(client)\n\n try:\n response = client.get(Action.endpoint, \"%s/path\" % action_id)\n return cls.set_response(response)\n # TODO: add exceptions and such for unauthed states when we add in more permissions\n except NotFound:\n raise DoesNotExist(\"Path for action '%s' does not exist on the server.\" % str(action_id))", "title": "" }, { "docid": "689cbc5e4e1c3cb498792853c9244d8d", "score": "0.51852465", "text": "def get_handler(self):\n try:\n return self._handler\n except AttributeError:\n self._handler = self.get_handler_class()(self)\n return self._handler", "title": "" }, { "docid": "f92fe95bd346a694f14a29044f6237c6", "score": "0.5177346", "text": "def _get_action(arg):\n\n try:\n return {\n 'set': Set,\n 'show': Show,\n 'addproj': AddProject,\n 'lsproj': ListProjects\n }[arg.lower()]()\n except (KeyError, AttributeError):\n raise InvalidActionException(\"Action \" + str(arg) + \" does not exist.\")", "title": "" }, { "docid": "8d6ae558b7c62c9ece474cb3f45b04aa", "score": "0.5175863", "text": "def handles(action: str) -> Callable[[HandlerFuncType], Any]:\n def wrapper(func: HandlerFuncType):\n @wraps(func)\n def wrapped_func(command: Command):\n if not isinstance(command, Command):\n raise RuntimeError(\"{} is not an instance of Command\"\n .format(command))\n return func(command)\n\n # Register just the first function and ensure that the wrapped\n # function is registered, not the raw one\n if action not in _registry:\n _registry[action] = wrapped_func\n\n return wrapped_func\n return wrapper", "title": "" }, { "docid": "b8a16c06f0468742277f2b40eef86a0c", "score": "0.5149893", "text": "def _get_action(self):\n return self.__action", "title": "" }, { "docid": "b8a16c06f0468742277f2b40eef86a0c", "score": "0.5149893", "text": "def _get_action(self):\n return self.__action", "title": "" }, { "docid": "50aeeb6e8dd55c52054c747c5e926f20", "score": "0.5146281", "text": "def getHandler(self, element):\n if element.name in self.skip_handlers:\n return None\n elif element.name in self.auto_handlers:\n log.info(\"Found auto handler for '%s' ('%s')\" % (element.name, self))\n return self.createExtractHandler(element.name)\n elif element.name in self.auto_class_handlers:\n log.info(\"Found auto handler for '%s' ('%s')\" % (element.name, self))\n #\n obj_class, add_to = self.auto_class_handlers[element.name]\n\n if obj_class == self.__class__:\n # Oops, recursive handling - we should handle the sub elements\n def class_handler(elem):\n for sub_element in elem.elements:\n self.handleSubObject(sub_element, obj_class, add_to)\n else:\n def class_handler(elem):\n self.handleSubObject(elem, obj_class, add_to)\n\n return class_handler\n\n try:\n return getattr(self, \"handle_%s\" % element.name)\n except AttributeError:\n return None", "title": "" }, { "docid": "a908bbb678b536a42022db6eccb09293", "score": "0.5131127", "text": "def get_named_handler(logger, name):\n while logger:\n for handler in logger.handlers:\n if handler.name == name:\n return handler\n logger = logger.parent", "title": "" }, { "docid": "e62a886f7876352b2008bdf15f0014e0", "score": "0.5120676", "text": "def get_intent_handler(intent_name):\n possibles = globals().copy()\n possibles.update(locals())\n intent_handler = possibles.get(intent_name)\n if not intent_handler:\n raise NotImplementedError('Intent with name ' + intent_name + ' not supported')\n return intent_handler", "title": "" }, { "docid": "0c9ff496700043d352b2700fda824815", "score": "0.5104001", "text": "def _match(\n self, key: str, *args: Any, **kwargs: Any\n ) -> Union[bool, None]:\n func = self.actions.get(key)\n return func(*args, **kwargs) if func else None", "title": "" }, { "docid": "8c18109442ff5f46cb367e836ad94c02", "score": "0.5101227", "text": "def get_handler():\n return _CURRENT_HANDLER", "title": "" }, { "docid": "ddb5c9403e1b978ed88a4aae6f65ff14", "score": "0.5093709", "text": "def route(self, path):\n # TODO: This will be slow for many URLs but we're unlikely to see that.\n for pattern, handler in self.map.items():\n params = match_path(pattern, path)\n if params is not None:\n return handler, params\n raise KeyError('Path not found')", "title": "" }, { "docid": "7cc8bcc739abb11a6e8f67979e4cbb12", "score": "0.5088519", "text": "def _handle(self):\n action = '{0}_handler'.format(self.job.action)\n action_handler = getattr(self, action, None)\n if not asyncio:\n msg = \"No such handler: {}\".format(action)\n raise TMQNoNamespaceHandler(msg)\n yield from action_handler(**self.job.data)", "title": "" }, { "docid": "36bb4e55c9074cbc3e3b627d307289f6", "score": "0.50778705", "text": "def main(args = None):\n if args is None:\n args = sys.argv[1:]\n try:\n args[0]\n except IndexError:\n help()\n else:\n try:\n handler = handlers[args[0]][0]\n except KeyError:\n print(\"Error: Unknown action {0}\".format(args[0]))\n else:\n try:\n handler(args[1:])\n finally:\n if session:\n session.remove()", "title": "" }, { "docid": "58cc77dbc196f2f4ae3e0a00abc875f0", "score": "0.50755286", "text": "def __get_action_from_string(self, action_name):\n\n #let s check if the action name is actually valid otherwise skip\n if not self.__fix_action_name(action_name) in self.__actions_dict:\n return\n\n my_action = self.__actions_dict[self.__fix_action_name(action_name)]\n my_action = imp.load_source(self.__fix_action_name(action_name, 0), \\\n my_action)\n return my_action", "title": "" }, { "docid": "e03f7005df84ffeb7da766994097f0d6", "score": "0.5073169", "text": "def _get_if_else_action_class(cls, action_dict: Dict) -> Type['BaseAction']:\n from shortcuts.actions import IfAction, ElseAction, EndIfAction\n return cls._get_action_class_by_wf_control_flow(\n from_classes=(IfAction, ElseAction, EndIfAction),\n action_dict=action_dict,\n )", "title": "" }, { "docid": "8c73823e9b072d1e1a8b38963da3fe00", "score": "0.5072288", "text": "def get_parser_action_by_dest(parser, dest):\n for act in parser._actions:\n if act.dest == dest:\n return act\n return None", "title": "" }, { "docid": "8d5dfc9ba68a88b9a7ecb17a809ffae3", "score": "0.5050585", "text": "def find_next_action(self, child):\r\n # The target action must be tested for membership against the\r\n # current actions on the menu itself, since this method may be\r\n # called after a child is added, but before the actions for the\r\n # child have actually been added to the menu.\r\n index = self.index_of(child)\r\n if index != -1:\r\n actions = set(self.widget().actions())\r\n for child in self.children()[index + 1:]:\r\n target = None\r\n if isinstance(child, QtMenu):\r\n target = child.widget().menuAction()\r\n elif isinstance(child, QtAction):\r\n target = child.widget()\r\n elif isinstance(child, QtActionGroup):\r\n acts = child.actions()\r\n target = acts[0] if acts else None\r\n if target in actions:\r\n return target", "title": "" }, { "docid": "8cc41db5d76459419feaa9a68ec7304b", "score": "0.50498277", "text": "def find_next_action(self, child):\r\n # The target action must be tested for membership against the\r\n # current actions on the menu bar itself, since this method may\r\n # be called after a child is added, but before the actions for\r\n # the child have actually added to the menu.\r\n index = self.index_of(child)\r\n if index != -1:\r\n actions = set(self.widget().actions())\r\n for child in self.children()[index + 1:]:\r\n if isinstance(child, QtMenu):\r\n target = child.widget().menuAction()\r\n if target in actions:\r\n return target", "title": "" }, { "docid": "09ae66a8ed50112ec5db4d7aa760dcbc", "score": "0.5049186", "text": "def get_a_action(base, action_id):\n res = base.client.get_obj('actions', action_id)\n return res['body']", "title": "" }, { "docid": "a3b095f65d13cb0620ec1b7779e0290b", "score": "0.5043162", "text": "def init_module():\n original_module, module_path, handler_name = import_original_module()\n try:\n handler = original_module\n for name in module_path.split('.')[1:] + [handler_name]:\n handler = getattr(handler, name)\n return handler\n except AttributeError:\n raise AttributeError(\n 'No handler {} in module {}'.format(handler_name, module_path)\n )", "title": "" }, { "docid": "c8b30afe55c830823ec62b60c3dede43", "score": "0.503716", "text": "def find_child(self, action):\n\n candi = [c for c in self.children if c.action == action]\n if candi:\n return np.random.choice(candi)\n else:\n return None", "title": "" }, { "docid": "b3f815448f1ce52aec5ffb9c03c8120f", "score": "0.5027835", "text": "async def handle_page_action(self, request=None):\n if self.use_fastapi:\n msg = await self.get_request_json(request)\n else:\n msg = self.get_request_json(request)\n if 'args' not in msg:\n msg['args'] = []\n response = callbackRegistry.make_callback(msg['cb_uuid'], msg['args'])\n if response is not None:\n return self.jsonify(response)\n else:\n return ErrorResponse(\"No Action\", error_type=\"204\").as_dict()", "title": "" }, { "docid": "97d68799784de9a2633322158f69e6a1", "score": "0.5025627", "text": "def get_action(self, input_val, last_output):\n\n for action in self.actions:\n if action.read(input_val, last_output):\n return action\n\n return None", "title": "" }, { "docid": "cff35063af55ddee3a82e6858fca8a29", "score": "0.50181794", "text": "def action(self, name):\n return self.actions.get(name, None)", "title": "" }, { "docid": "147312187267c5f267e7b78cfed7c79a", "score": "0.50167", "text": "def execute_action(self, action):\n pass", "title": "" }, { "docid": "32f807d9618dcfcd467955683f477dbe", "score": "0.50139964", "text": "def check_action(self, action):\r\n if action == options[8]:\r\n self.action_activation(action, '')\r\n elif action == options[10]:\r\n self.action_activation(action, '')\r\n elif action == options[11]:\r\n self.action_activation(action, '')\r\n elif action == options[1]:\r\n self.disconnect_client()\r\n else:\r\n arguments = self.get_arguments()\r\n self.action_activation(action, arguments)", "title": "" }, { "docid": "55e762d4476b2bf70cf840da118caa4d", "score": "0.5004226", "text": "def get_filename_handler(name):\n return HANDLERS[name or DEFAULT_HANDLER]", "title": "" }, { "docid": "88894b4e41224f149512ea7b2924b21b", "score": "0.50022376", "text": "def dialog_action_functions(action: str):\n action_mappings = {\n SLACK_COMMAND_ASSIGN_ROLE_SLUG: [handle_assign_role_action],\n SLACK_COMMAND_ENGAGE_ONCALL_SLUG: [handle_engage_oncall_action],\n SLACK_COMMAND_REPORT_EXECUTIVE_SLUG: [handle_executive_report_create],\n SLACK_COMMAND_REPORT_TACTICAL_SLUG: [handle_tactical_report_create],\n }\n\n # this allows for unique action blocks e.g. invite-user or invite-user-1, etc\n for key in action_mappings.keys():\n if key in action:\n return action_mappings[key]\n return []", "title": "" }, { "docid": "e198df0a4cdda142402710f4ddcd78a5", "score": "0.5001393", "text": "def index(self, action):\n return self.actions.index(action)", "title": "" }, { "docid": "47d2b5a535a7ad88e779ad7f955c5b50", "score": "0.49962696", "text": "def find_next_action(self, child):\r\n index = self.index_of(child)\r\n if index != -1:\r\n for child in self.children()[index + 1:]:\r\n target = None\r\n if isinstance(child, (WxMenu, WxAction)):\r\n target = child.widget()\r\n elif isinstance(child, WxActionGroup):\r\n acts = child.actions()\r\n target = acts[0] if acts else None\r\n if target is not None:\r\n return target", "title": "" }, { "docid": "cbfc2b987989642385e6083ab60ac432", "score": "0.49944967", "text": "def get_action_for(self, dest):\n return self._get_or_none(dest, \"action\")", "title": "" }, { "docid": "efb91cc9e3926378874f962278eb5f41", "score": "0.49930412", "text": "def on_action_set_handler(self, content):\r\n self._handler = content['handler']\r\n self.refresh_traits_widget()", "title": "" }, { "docid": "dc11547c469136d71a13c44b839e6c29", "score": "0.49928248", "text": "def get_action(self):\n pass", "title": "" }, { "docid": "ab7631179b32c5c5413db27e521ea41f", "score": "0.4988951", "text": "def handler(request_id, action_id=0):\r\n def define_handler(function):\r\n _handlers[request_id, action_id] = function\r\n return function\r\n return define_handler", "title": "" }, { "docid": "57647e671589b55c67f087b130f8a577", "score": "0.49852052", "text": "def has_method(self, method_action):\n method = None\n try:\n method = self.lookup_method(method_action)\n except KubernetesException:\n try:\n method = self.lookup_method(method_action, namespace='namespace')\n except KubernetesException:\n return False\n return method is not None", "title": "" }, { "docid": "7c23d54dcbaf424da4f24a299d272628", "score": "0.4971053", "text": "def find_next_action(self, child):\r\n index = self.index_of(child)\r\n if index != -1:\r\n for child in self.children()[index + 1:]:\r\n target = None\r\n if isinstance(child, WxAction):\r\n target = child.widget()\r\n elif isinstance(child, WxActionGroup):\r\n acts = child.actions()\r\n target = acts[0] if acts else None\r\n if target is not None:\r\n return target", "title": "" }, { "docid": "0b4423c879d50e6b58615a460286ea22", "score": "0.497046", "text": "def __get_action(self, name):\n if name.startswith(self.GET_ACTION_KEY):\n return self.GET_ACTION_KEY\n elif name.startswith(self.SET_ACTION_KEY):\n return self.SET_ACTION_KEY\n else:\n raise KeyError('%s is not a member of the object.' % name)", "title": "" }, { "docid": "107702bca6089553527e2bd16467047a", "score": "0.49644879", "text": "def handle_action(self, param):\n # Get the action that we are supposed to execute for this App Run\n action = self.get_action_identifier()\n ret_val = phantom.APP_SUCCESS\n\n self.debug_print(\"action_id: {}\".format(self.get_action_identifier()))\n\n # Dictionary mapping each action with its corresponding actions\n action_mapping = {\n \"test_connectivity\": self._handle_test_connectivity,\n \"get_policy_event\": self._handle_get_policy_event,\n \"get_message\": self._handle_get_message,\n \"remediate_message\": self._handle_remediate_message,\n \"list_policy_events\": self._handle_list_policy_events,\n \"list_messages\": self._handle_list_messages,\n \"on_poll\": self._handle_on_poll,\n }\n\n if action in action_mapping.keys():\n action_function = action_mapping[action]\n ret_val = action_function(param)\n\n return ret_val", "title": "" }, { "docid": "3df72f6e72f1988da64dc31ca491bc06", "score": "0.49156085", "text": "def dispatch_request(self):\n try:\n endpoint, values = self.match_request()\n return self.view_functions[endpoint](**values)\n except HTTPException, e:\n handler = self.error_handlers.get(e.code)\n if handler is None:\n return e\n return handler(e)\n except Exception, e:\n handler = self.error_handlers.get(500)\n if self.debug or handler is None:\n raise\n return handler(e)", "title": "" }, { "docid": "524e49df53c9ca10b4797097002078ff", "score": "0.49155393", "text": "def _execute_catching_error(self, handler, evt):\n try:\n return handler(evt)\n except Exception:\n self.logger.exception(f'Error in handler while processing {evt}')\n return None", "title": "" }, { "docid": "f7ddfe5e9023849e57f0fe46738cf1b9", "score": "0.49130142", "text": "def process(self, packet):\n packet_type = packet.get_header().get_type()\n action = self.lookup(packet_type)\n return action(packet) if (action is not None) else None", "title": "" }, { "docid": "57c333e5315ec4cecf20efeebfc18a35", "score": "0.4910071", "text": "def get_key(action: str) -> Any:\n if action in keymapping:\n result = keymapping[action]\n elif action in keymapping['direction'].values():\n directions = {v: k for k, v in keymapping['direction'].items()}\n result = int(directions[action])\n return result", "title": "" }, { "docid": "cbfa3275283c3be5d2da807c8852faa3", "score": "0.4908421", "text": "def handleRequest(self, xml):\r\n handler = None\r\n iq = parseXml(xml)\r\n for queryString, method in self.service.iqHandlers.iteritems():\r\n if xpath.internQuery(queryString).matches(iq):\r\n handler = getattr(self.service, method)\r\n\r\n if handler:\r\n d = defer.maybeDeferred(handler, iq)\r\n else:\r\n d = defer.fail(NotImplementedError())\r\n\r\n return d", "title": "" }, { "docid": "e7c5464e8990fbc353520f386370c34c", "score": "0.4898006", "text": "def receive_action(self, action, content):\r\n if self._initialized:\r\n dispatch_action(self, action, content)", "title": "" }, { "docid": "1967278d1bde3974db8c3c9ca054ed37", "score": "0.48838708", "text": "def action(self, action, id=None, data = None):\r\n\r\n #Define the actions that are possible, and map them to functions\r\n action_dict = {\r\n 'get' : self.get,\r\n 'post' : self.post,\r\n 'update' : self.update,\r\n 'delete' : self.delete,\r\n }\r\n #Check to see if action is possible\r\n if action not in action_dict:\r\n error = \"Could not find action {0} in registered actions.\".format(action)\r\n log.info(error)\r\n raise InvalidValueException(error)\r\n\r\n #Check to see if id is provided for update and delete\r\n if action in ['update', 'delete'] and id is None:\r\n error = \"Need to provide an id along with action {0}.\".format(action)\r\n log.info(error)\r\n raise InvalidValueException(error)\r\n\r\n #check to see if data is provided for update and post\r\n if action in ['update', 'post'] and data is None:\r\n error = \"Need to provide data along with action {0}.\".format(action)\r\n log.info(error)\r\n raise InvalidValueException(error)\r\n\r\n #Perform the action\r\n result = action_dict[action](data=data, id=id)\r\n return result", "title": "" }, { "docid": "3f900525da7be2a66bc97d9f46087a14", "score": "0.48797157", "text": "def of(action, sys, tsk):\n for event_type in EventType:\n if event_type.act is action and event_type.sys is sys and event_type.tsk is tsk:\n return event_type\n raise KeyError(\"Cannot find event type for action={}, scope={}, task={}\".format(action, sys, tsk))", "title": "" }, { "docid": "bfc3f5dbb5c949d86eeb48d67e69d530", "score": "0.486273", "text": "def _get_handler(self, exec_server):\n if exec_server == 'proxy':\n return StorletProxyHandler\n elif exec_server == 'object':\n return StorletObjectHandler\n else:\n raise ValueError(\n 'configuration error: execution_server must be either proxy'\n ' or object but is %s' % exec_server)", "title": "" }, { "docid": "2558f24da30f65d641643ef45c206759", "score": "0.48576942", "text": "def find_matching_url(self, request):\n allowed_methods = set()\n for (regex, re_match, methods, callback, status) in self.routes:\n m = re_match(request.path)\n if m:\n if request.method in methods:\n return (callback, m.groupdict(), status)\n allowed_methods.update(methods)\n if allowed_methods:\n raise MethodNotAllowed(\"The HTTP request method '%s' is \"\n \"not supported.\" % request.method)\n raise NotFound(\"Sorry, nothing here.\")", "title": "" }, { "docid": "9c7f9307a4352024d34ebfedac40e24b", "score": "0.48572624", "text": "def act(self, action) -> None:\n pass", "title": "" } ]
3b6c297c202100584aa08d25d0940cb9
Uses GATK HaplotypeCaller to identify SNPs and Indels and writes a gVCF. Calls persample genotyper to genotype gVCF.
[ { "docid": "8c96fdfc9302cbcaa2784a0bd993bae6", "score": "0.6022498", "text": "def haplotype_caller(job, shared_ids, input_args):\n work_dir = job.fileStore.getLocalTempDir()\n input_files = ['ref.fa', 'ref.fa.fai', 'ref.dict', 'toil.bam', 'toil.bam.bai']\n read_from_filestore_hc(job, work_dir, shared_ids, *input_files)\n output = '%s.raw.BOTH%s.gvcf' % (input_args['uuid'],\n input_args['suffix'])\n \n # Call GATK -- HaplotypeCaller\n command = ['-U', 'ALLOW_SEQ_DICT_INCOMPATIBILITY', # RISKY! (?) See #189\n '-nct', str(input_args['cpu_count']),\n '-R', 'ref.fa',\n '-T', 'HaplotypeCaller',\n '--genotyping_mode', 'Discovery',\n '--emitRefConfidence', 'GVCF',\n '-I', 'toil.bam',\n '-o', output,\n '-variant_index_type', 'LINEAR',\n '-variant_index_parameter', '128000',\n '--annotation', 'QualByDepth',\n '--annotation', 'DepthPerSampleHC',\n '--annotation', 'FisherStrand',\n '--annotation', 'ReadPosRankSumTest']\n try:\n inputs=input_files\n outputs={output: None}\n docker_call(work_dir = work_dir,\n env={'JAVA_OPTS':'-Xmx%sg' % input_args['memory']},\n parameters = command,\n tool = 'quay.io/ucsc_cgl/gatk:3.5--dba6dae49156168a909c43330350c6161dc7ecc2',\n inputs=inputs,\n outputs=outputs,\n sudo = input_args['sudo'])\n except:\n sys.stderr.write(\"Running haplotype caller with %s in %s failed.\" % (\n \" \".join(command), work_dir))\n raise\n\n # Update fileStore and spawn child job\n shared_ids[output] = job.fileStore.writeGlobalFile(os.path.join(work_dir, output))\n\n # upload gvcf\n upload_or_move_hc(work_dir, input_args, output)\n\n # call variants prior to vqsr\n job.addChildJobFn(genotype_gvcf, shared_ids, input_args, cores = input_args['cpu_count'])", "title": "" } ]
[ { "docid": "af8695a76a8c8ada581f2704922dd564", "score": "0.61019504", "text": "def genotype_gvcf(job, shared_ids, input_args):\n\n work_dir = job.fileStore.getLocalTempDir()\n input_files = ['%s.raw.BOTH%s.gvcf' % (input_args['uuid'],\n input_args['suffix']),\n 'ref.fa', 'ref.fa.fai', 'ref.dict']\n read_from_filestore_hc(job, work_dir, shared_ids, *input_files)\n output = 'unified.raw.BOTH.gatk.vcf'\n \n command = ['-U', 'ALLOW_SEQ_DICT_INCOMPATIBILITY', # RISKY! (?) See #189\n '-nt', str(input_args['cpu_count']),\n '-R', 'ref.fa',\n '-T', 'GenotypeGVCFs',\n '--variant', '%s.raw.BOTH.gatk.gvcf' % input_args['uuid'],\n '--out', output,\n '-stand_emit_conf', '10.0',\n '-stand_call_conf', '30.0']\n\n try:\n inputs=input_files\n outputs={output: None}\n docker_call(work_dir = work_dir,\n env={'JAVA_OPTS':'-Xmx%sg' % input_args['memory']},\n parameters = command,\n tool = 'quay.io/ucsc_cgl/gatk:3.5--dba6dae49156168a909c43330350c6161dc7ecc2',\n inputs=inputs,\n outputs=outputs,\n sudo = input_args['sudo'])\n except:\n sys.stderr.write(\"Running GenotypeGVCFs with %s in %s failed.\" % (\n \" \".join(command), work_dir))\n raise\n\n # Update fileStore and spawn child job\n shared_ids[output] = job.fileStore.writeGlobalFile(os.path.join(work_dir, output))\n\n # run vqsr\n job.addChildJobFn(vqsr_snp, shared_ids, input_args, cores = input_args['cpu_count'])\n job.addChildJobFn(vqsr_indel, shared_ids, input_args, cores = input_args['cpu_count'])", "title": "" }, { "docid": "8f0cc31c2c01afb8aae690cb15e1c3ad", "score": "0.6015409", "text": "def generate(\n input_maf,\n reference_fasta,\n gbcms_path,\n patient_id,\n standard_bam,\n duplex_bam,\n simplex_bam,\n filter_duplicate,\n fragment_count,\n mapping_quality,\n threads\n):\n logger_output = pathlib.Path.cwd().joinpath(\"genotype_variants.log\")\n fh = logging.FileHandler(logger_output)\n formatter = logging.Formatter(\n fmt=\"%(asctime)s - %(name)s - %(levelname)s - %(message)s\",\n datefmt=\"%m/%d/%Y %I:%M:%S %p\",\n )\n fh.setFormatter(formatter)\n logger.addHandler(fh)\n logger.info(\"====================================================\")\n logger.info(\">>> Running genotype_variants for small variants <<<\")\n logger.info(\"====================================================\")\n t1_start = time.perf_counter()\n t2_start = time.process_time()\n if standard_bam or duplex_bam or simplex_bam:\n pass\n else:\n logger.error(\n \"Required to specify at-least one input BAM file option. Please refer to the README for more information\"\n )\n exit(1)\n\n logger.info(\"small_variants: Patient ID: %s\", patient_id)\n logger.info(\"small_variants: Input MAF: %s\", input_maf)\n logger.info(\"small_variants: Reference FASTA: %s\", reference_fasta)\n if standard_bam:\n logger.info(\"small_variants: Standard BAM: %s\", standard_bam)\n if duplex_bam:\n logger.info(\"small_variants: Duplex BAM: %s\", duplex_bam)\n if simplex_bam:\n logger.info(\"small_variants: Simplex BAM: %s\", simplex_bam)\n logger.info(\"small_variants: GetBaseCountMultiSample -> Path: %s\", gbcms_path)\n logger.info(\n \"small_variants: GetBaseCountMultiSample -> Filter Duplicate: %s\",\n str(filter_duplicate),\n )\n logger.info(\n \"small_variants: GetBaseCountMultiSample -> Fragment Count: %s\",\n str(fragment_count),\n )\n logger.info(\n \"small_variants: GetBaseCountMultiSample -> Mapping Quality: %s\",\n str(mapping_quality),\n )\n logger.info(\"small_variants: GetBaseCountMultiSample -> Threads: %s\", str(threads))\n\n # Run GetBaseMultisampleCount for each available bam file\n std_output_maf = None\n duplex_output_maf = None\n simplex_output_maf = None\n p1, p2, p3 = None, None, None\n if standard_bam:\n btype = \"STANDARD\"\n (cmd, std_output_maf) = generate_gbcms_cmd(\n input_maf,\n btype,\n reference_fasta,\n gbcms_path,\n patient_id,\n standard_bam,\n filter_duplicate,\n fragment_count,\n mapping_quality,\n threads,\n )\n p1 = run_cmd(cmd)\n logger.info(\n \"small_variants: Done running gbcms on %s and data has been written to %s\",\n standard_bam, std_output_maf,\n )\n\n if duplex_bam:\n btype = \"DUPLEX\"\n (cmd, duplex_output_maf) = generate_gbcms_cmd(\n input_maf,\n btype,\n reference_fasta,\n gbcms_path,\n patient_id,\n duplex_bam,\n filter_duplicate,\n fragment_count,\n mapping_quality,\n threads,\n )\n p2 = run_cmd(cmd)\n logger.info(\n \"small_variants: Done running gbcms on %s and data has been written to %s\",\n duplex_bam, duplex_output_maf,\n )\n\n if simplex_bam:\n btype = \"SIMPLEX\"\n (cmd, simplex_output_maf) = generate_gbcms_cmd(\n input_maf,\n btype,\n reference_fasta,\n gbcms_path,\n patient_id,\n simplex_bam,\n filter_duplicate,\n fragment_count,\n mapping_quality,\n threads,\n )\n p3 = run_cmd(cmd)\n logger.info(\n \"small_variants: Done running gbcms on %s and data has been written to %s\",\n simplex_bam, simplex_output_maf,\n )\n\n # merge if duplex and simplex bam present\n if duplex_bam and simplex_bam:\n merge_maf(patient_id, input_maf, duplex_output_maf, simplex_output_maf)\n \n logger.info(\"small_variants: Completed processing based on the given instructions\")\n\n t1_stop = time.perf_counter()\n t2_stop = time.process_time()\n logger.info(\"--------------------------------------------------\")\n logger.info(\"Elapsed time: %.1f [min]\" % ((t1_stop - t1_start) / 60))\n logger.info(\"CPU process time: %.1f [min]\" % ((t2_stop - t2_start) / 60))\n logger.info(\"--------------------------------------------------\")\n return", "title": "" }, { "docid": "c24dcb5737823d049a9e414a18804cef", "score": "0.5657658", "text": "def run_whatshap(\n variant_file: str,\n output: TextIO = sys.stdout,\n chromosomes: Optional[List[str]] = None,\n indels: bool = True,\n mapping_quality: int = 20,\n max_coverage: int = 15,\n ped: Optional[str] = None,\n recombrate: float = 1.26,\n genmap: Optional[str] = None,\n genetic_haplotyping: bool = True,\n default_gq: int = 30,\n use_ped_samples: bool = False,\n tag: str = \"PS\",\n):\n timers = StageTimer()\n logger.info(f\"This is WhatsHap {__version__} running under Python {platform.python_version()}\")\n numeric_sample_ids = NumericSampleIds()\n command_line: Optional[str]\n command_line = None\n read_merger = DoNothingReadMerger()\n with ExitStack() as stack:\n try:\n vcf_writer = stack.enter_context(\n PhasedVcfWriter(\n command_line=command_line,\n in_path=variant_file,\n out_file=output,\n tag=tag,\n indels=indels,\n )\n )\n except (OSError, VcfError) as e:\n raise CommandLineError(e)\n\n phased_input_reader = stack.enter_context(\n PhasedInputReader(\n variant_file,\n numeric_sample_ids,\n mapq_threshold=mapping_quality,\n indels=indels,\n )\n )\n show_phase_vcfs = phased_input_reader.has_vcfs\n\n # Only read genotype likelihoods from VCFs when distrusting genotypes\n # vcf_reader = stack.enter_context(\n # VcfReader(variant_file, indels=indels)\n # )\n # samples = vcf_reader.samples\n\n # if --use-ped-samples is set, use only samples from PED file\n if ped and use_ped_samples:\n samples = PedReader(ped).samples()\n\n # raise_if_any_sample_not_in_vcf(vcf_reader, samples)\n\n # recombination_cost_computer = make_recombination_cost_computer(ped, genmap, recombrate)\n\n families, family_trios = setup_families(samples, ped, max_coverage)\n del samples\n for trios in family_trios.values():\n for trio in trios:\n # Ensure that all mentioned individuals have a numeric id\n _ = numeric_sample_ids[trio.child]\n\n read_list = None\n\n with timers(\"parse_phasing_vcfs\"):\n # TODO should this be done in PhasedInputReader.__init__?\n phased_input_reader.read_vcfs()\n\n superreads: Dict[str, ReadSet]\n components: Dict\n for variant_table in timers.iterate(\"parse_vcf\", vcf_reader):\n chromosome = variant_table.chromosome\n if (not chromosomes) or (chromosome in chromosomes):\n logger.info(\"======== Working on chromosome %r\", chromosome)\n else:\n logger.info(\n \"Leaving chromosome %r unchanged (present in VCF but not requested by option --chromosome)\",\n chromosome,\n )\n with timers(\"write_vcf\"):\n superreads, components = dict(), dict()\n vcf_writer.write(chromosome, superreads, components)\n continue\n\n # These two variables hold the phasing results for all samples\n superreads, components = dict(), dict()\n\n # Iterate over all families to process, i.e. a separate DP table is created\n # for each family.\n # TODO: Can the body of this loop be factored out into a phase_family function?\n for representative_sample, family in sorted(families.items()):\n if len(family) == 1:\n logger.info(\"---- Processing individual %s\", representative_sample)\n else:\n logger.info(\"---- Processing family with individuals: %s\", \",\".join(family))\n max_coverage_per_sample = max(1, max_coverage // len(family))\n logger.info(\"Using maximum coverage per sample of %dX\", max_coverage_per_sample)\n trios = family_trios[representative_sample]\n assert len(family) == 1 or len(trios) > 0\n\n homozygous_positions, phasable_variant_table = find_phaseable_variants(\n family, trios, variant_table\n )\n\n # Get the reads belonging to each sample\n readsets = dict() # TODO this could become a list\n for sample in family:\n with timers(\"read_bam\"):\n readset, vcf_source_ids = phased_input_reader.read(\n chromosome, phasable_variant_table.variants, sample\n )\n\n # TODO: Read selection done w.r.t. all variants, where using heterozygous\n # variants only would probably give better results.\n with timers(\"select\"):\n readset = readset.subset(\n [i for i, read in enumerate(readset) if len(read) >= 2]\n )\n logger.info(\n \"Kept %d reads that cover at least two variants each\", len(readset)\n )\n merged_reads = read_merger.merge(readset)\n selected_reads = select_reads(\n merged_reads,\n max_coverage_per_sample,\n preferred_source_ids=vcf_source_ids,\n )\n\n readsets[sample] = selected_reads\n\n all_reads = merge_readsets(readsets)\n\n # Determine which variants can (in principle) be phased\n accessible_positions = sorted(all_reads.get_positions())\n logger.info(\n \"Variants covered by at least one phase-informative \"\n \"read in at least one individual after read selection: %d\",\n len(accessible_positions),\n )\n if len(family) > 1 and genetic_haplotyping:\n # In case of genetic haplotyping, also retain all positions homozygous\n # in at least one individual (because they might be phased based on genotypes)\n accessible_positions = sorted(\n set(accessible_positions).union(homozygous_positions)\n )\n logger.info(\n \"Variants either covered by phase-informative read or homozygous \"\n \"in at least one individual: %d\",\n len(accessible_positions),\n )\n\n # Keep only accessible positions\n phasable_variant_table.subset_rows_by_position(accessible_positions)\n assert len(phasable_variant_table.variants) == len(accessible_positions)\n\n pedigree = create_pedigree(\n default_gq,\n family,\n numeric_sample_ids,\n phasable_variant_table,\n trios,\n )\n # recombination_costs = recombination_cost_computer.compute(accessible_positions)\n\n # Finally, run phasing algorithm\n # with timers(\"phase\"):\n # problem_name = \"MEC\" if len(family) == 1 else \"PedMEC\"\n # logger.info(\n # \"Phasing %d sample%s by solving the %s problem ...\",\n # len(family),\n # plural_s(len(family)),\n # problem_name,\n # )\n\n # dp_table: Union[HapChatCore, PedigreeDPTable]\n\n # dp_table = PedigreeDPTable(\n # all_reads,\n # recombination_costs,\n # pedigree,\n # accessible_positions,\n # )\n\n # superreads_list, transmission_vector = dp_table.get_super_reads()\n # logger.info(\"%s cost: %d\", problem_name, dp_table.get_optimal_cost())\n\n # with timers(\"components\"):\n # overall_components = compute_overall_components(\n # accessible_positions,\n # all_reads,\n # family,\n # genetic_haplotyping,\n # homozygous_positions,\n # numeric_sample_ids,\n # superreads_list,\n # )\n # log_component_stats(overall_components, len(accessible_positions))\n # Superreads in superreads_list are in the same order as individuals were added to the pedigree\n # for sample, sample_superreads in zip(family, superreads_list):\n # superreads[sample] = sample_superreads\n # assert len(sample_superreads) == 2\n # assert (\n # sample_superreads[0].sample_id\n # == sample_superreads[1].sample_id\n # == numeric_sample_ids[sample]\n # )\n # # identical for all samples\n # components[sample] = overall_components\n\n logger.debug(\"Chromosome %r finished\", chromosome)\n\n log_time_and_memory_usage(timers, show_phase_vcfs=show_phase_vcfs)", "title": "" }, { "docid": "dccadf8640442c944be44c3ea58b6b6d", "score": "0.5654805", "text": "def get_genotype(self,index=0,min_gq=0,min_per_ad=float(0),min_tot_dp=0,het_binom_p=False,return_flags=False): #### working on function to accomodate hets\n\n\t\tgenotype = self.genotypes[index]\n\t\tparsed_genotype = genotype.split(':')[0]\n\n\t\t#print(\"The following is the parsed genotype:\")\n\t\t#print(parsed_genotype)\n\t\t#print('The following is the genotype:')\n\t\t#print(genotype)\n\n\t\t#Flag that indicates if the sample is diploid.\n\t\tdip_flag = False\n\t\tif len(parsed_genotype) == 3:\n\t\t\tdip_flag = True\n\n\n\t\tparsed_genotype_list = [parsed_genotype,0,0,0,0]\n\n\t\t#GQ Filter\n\t\ttry:\n\t\t\tgq = self.get_GQ(parsed_genotype,index)\n\t\t\tif int(gq) < int(min_gq):\n\t\t\t\tparsed_genotype = '.'\n\t\t\t\tif dip_flag == True:\n\t\t\t\t\tparsed_genotype = './.'\n\t\t\t\tparsed_genotype_list[1] = 1\n\t\texcept:\n\t\t\tpass\n\n\t\t#AD Filter\n\t\ttry:\n\t\t\t#Compute the right allele fraction.\n\t\t\tif dip_flag == True:\n\t\t\t\tpercent_ad = self.get_percent_AD_dip(index)\n\t\t\telse:\n\t\t\t\tpercent_ad = self.get_percent_AD_hap(index)\n\t\t\t#Apply the filter\n\t\t\tif float(percent_ad) < min_per_ad:\n\t\t\t\tparsed_genotype = '.'\n\t\t\t\tif dip_flag == True:\n\t\t\t\t\tparsed_genotype = './.'\n\t\t\t\tparsed_genotype_list[2] = 1\n\t\texcept:\n\t\t\tpass\n\n\t\t#Depth coverage filter\n\t\ttry:\n\t\t\ttotal_dp = self.get_total_DP(index)\n\t\t\tif int(total_dp) < int(min_tot_dp):\n\t\t\t\tparsed_genotype = '.'\n\t\t\t\tif dip_flag == True:\n\t\t\t\t\tparsed_genotype = './.'\n\t\t\t\tparsed_genotype_list[3] = 1\n\t\texcept:\n\t\t\tpass\n\n\t\t#Binomial AD test filter\n\t\thet_flag = self.is_het(index)\n\t\tif het_binom_p and het_flag:\n\t\t\ttry:\n\t\t\t\tp = self.get_AD_binomial_p(index)\n\t\t\t\tif p < float(het_binom_p):\n\t\t\t\t\tparsed_genotype = './.'\n\t\t\t\t\tparsed_genotype_list[4] = 1\n\t\t\texcept:\n\t\t\t\tpass\n\n\t\tif not return_flags:\n\t\t\treturn parsed_genotype\n\t\telse:\n\t\t\tparsed_genotype_list[0] = parsed_genotype\n\t\t\treturn parsed_genotype_list", "title": "" }, { "docid": "b1cd67a2425f16b1b54f30f94dadd1b6", "score": "0.5609641", "text": "def add_genpept_to_genbank(gb_file, gp_file, output_gb, feature_gene, prot_file_type='genpept'):\n gb_parsed = SeqIO.read(gb_file, 'gb') \n cds_loc = [i for i in gb_parsed.features if i.type == 'CDS']\n if len(cds_loc) == 1:\n start = int(cds_loc[0].location.start)\n end = int(cds_loc[0].location.end)\n else:\n raise IOError('CDS Sequence not available or incorrect file format')\n domains = get_domain_details_from_protein(gp_file, file_type=prot_file_type)\n feature_count = 0\n for domain in domains:\n gb_parsed.features.append(\n SeqFeature(\n FeatureLocation(\n start + (domain['start'])*3, \n start + (domain['end']+1)*3,\n ), type=\"misc_feature\",\n qualifiers=OrderedDict([\n ('gene', [feature_gene]),\n ('note', [domain['feature']])\n ])\n ))\n feature_count += 1\n print('{0} features added'.format(feature_count))\n with open(output_gb, 'w') as output_fh:\n SeqIO.write(gb_parsed, output_fh, 'gb')", "title": "" }, { "docid": "7de33c878faee4305318544bfcb743ec", "score": "0.5574189", "text": "def init():\n global CHR20_FASTA\n global CHR20_BAM\n global CHR20_BAM_FIRST_HALF\n global CHR20_BAM_SECOND_HALF\n global NOCHR20_BAM\n global CHR20_CRAM\n global GOLDEN_TRAINING_EXAMPLES\n global GOLDEN_CALLING_CANDIDATES\n global GOLDEN_CANDIDATE_POSITIONS\n global GOLDEN_CALLING_EXAMPLES\n global CONFIDENT_REGIONS_BED\n global TRUTH_VARIANTS_VCF\n global TRUTH_VARIANTS_VCF_WITH_TYPES\n global GOLDEN_POSTPROCESS_INPUT\n global GOLDEN_POSTPROCESS_OUTPUT\n global GOLDEN_POSTPROCESS_OUTPUT_PASS_ONLY\n global GOLDEN_POSTPROCESS_OUTPUT_COMPRESSED\n global GOLDEN_POSTPROCESS_GVCF_INPUT\n global GOLDEN_POSTPROCESS_GVCF_OUTPUT\n global GOLDEN_POSTPROCESS_GVCF_OUTPUT_COMPRESSED\n global GOLDEN_MAKE_EXAMPLES_RUN_INFO\n global WS_ALLELE_COUNT_LINEAR_MODEL\n global WS_ALLELE_COUNT_LINEAR_MODEL_PCKL\n global WS_VARIANT_READS_THRESHOLD_MODEL\n global GOLDEN_VCF_CANDIDATE_IMPORTER_POSTPROCESS_INPUT\n global GOLDEN_VCF_CANDIDATE_IMPORTER_POSTPROCESS_OUTPUT\n\n CHR20_FASTA = deepvariant_testdata('input/ucsc.hg19.chr20.unittest.fasta.gz')\n CHR20_BAM = deepvariant_testdata('input/NA12878_S1.chr20.10_10p1mb.bam')\n # # Here is how \"NA12878_S1.chr20.10_10p1mb.first_half.bam\"\n # # and \"NA12878_S1.chr20.10_10p1mb.second_half.bam\" are split\n # # from NA12878_S1.chr20.10_10p1mb.bam.\n # READS_FIRST_HALF=${TESTDATA_DIR}/NA12878_S1.chr20.10_10p1mb.first_half.bam\n # READS_SECOND_HALF=${TESTDATA_DIR}/NA12878_S1.chr20.10_10p1mb.second_half.bam\n # READS=${TESTDATA_DIR}/NA12878_S1.chr20.10_10p1mb.bam\n # samtools view -H ${READS} > /tmp/f1.sam\n # cp /tmp/f1.sam /tmp/f2.sam\n # # Because ${READS} has total of 52035 lines, we split in roughly half.\n # samtools view ${READS} | head -26000 >> /tmp/f1.sam\n # samtools view ${READS} | tail -26035 >> /tmp/f2.sam\n # samtools view -S -b /tmp/f1.sam > ${READS_FIRST_HALF}\n # samtools view -S -b /tmp/f2.sam > ${READS_SECOND_HALF}\n # samtools index ${READS_FIRST_HALF}\n # samtools index ${READS_SECOND_HALF}\n CHR20_BAM_FIRST_HALF = deepvariant_testdata(\n 'input/NA12878_S1.chr20.10_10p1mb.first_half.bam')\n CHR20_BAM_SECOND_HALF = deepvariant_testdata(\n 'input/NA12878_S1.chr20.10_10p1mb.second_half.bam')\n # # Here is how the \"HG002_NIST_150bp_downsampled_30x.chr20.10_10p1mb.bam\"\n # # file was created.\n # samtools view -hb HG002_NIST_150bp_downsampled_30x.bam \\\n # 20:10,000,000-10,100,000 \\\n # > HG002_NIST_150bp_downsampled_30x.chr20.10_10p1mb.bam\n # samtools index HG002_NIST_150bp_downsampled_30x.chr20.10_10p1mb.bam\n NOCHR20_BAM = deepvariant_testdata(\n 'input/HG002_NIST_150bp_downsampled_30x.chr20.10_10p1mb.bam')\n CHR20_CRAM = deepvariant_testdata('input/NA12878_S1.chr20.10_10p1mb.cram')\n GOLDEN_TRAINING_EXAMPLES = deepvariant_testdata(\n 'golden.training_examples.tfrecord.gz')\n GOLDEN_CALLING_CANDIDATES = deepvariant_testdata(\n 'golden.calling_examples.tfrecord.gz')\n GOLDEN_CANDIDATE_POSITIONS = deepvariant_testdata(\n 'golden.candidate_positions')\n GOLDEN_CALLING_EXAMPLES = deepvariant_testdata(\n 'golden.calling_examples.tfrecord.gz')\n CONFIDENT_REGIONS_BED = deepvariant_testdata(\n 'input/test_nist.b37_chr20_100kbp_at_10mb.bed')\n TRUTH_VARIANTS_VCF = deepvariant_testdata(\n 'input/test_nist.b37_chr20_100kbp_at_10mb.vcf.gz')\n TRUTH_VARIANTS_VCF_WITH_TYPES = deepvariant_testdata(\n 'input/with_types.test_nist.b37_chr20_4kbp_at_10mb.vcf.gz')\n GOLDEN_POSTPROCESS_INPUT = deepvariant_testdata(\n 'golden.postprocess_single_site_input.tfrecord.gz')\n GOLDEN_POSTPROCESS_OUTPUT = deepvariant_testdata(\n 'golden.postprocess_single_site_output.vcf')\n GOLDEN_POSTPROCESS_OUTPUT_PASS_ONLY = deepvariant_testdata(\n 'golden.postprocess_single_site_output.pass_only.vcf')\n GOLDEN_POSTPROCESS_OUTPUT_COMPRESSED = deepvariant_testdata(\n 'golden.postprocess_single_site_output.vcf.gz')\n GOLDEN_POSTPROCESS_GVCF_INPUT = deepvariant_testdata(\n 'golden.postprocess_gvcf_input.tfrecord.gz')\n GOLDEN_POSTPROCESS_GVCF_OUTPUT = deepvariant_testdata(\n 'golden.postprocess_gvcf_output.g.vcf')\n GOLDEN_MAKE_EXAMPLES_RUN_INFO = deepvariant_testdata(\n 'golden.training_examples.tfrecord.gz.run_info.pbtxt')\n WS_ALLELE_COUNT_LINEAR_MODEL = deepvariant_testdata(\n 'obsolete/window_selector_allele_count_linear.pbtxt')\n WS_ALLELE_COUNT_LINEAR_MODEL_PCKL = deepvariant_testdata(\n 'obsolete/window_selector_allele_count_linear.pckl')\n WS_VARIANT_READS_THRESHOLD_MODEL = deepvariant_testdata(\n 'obsolete/window_selector_variant_read_threshold.pbtxt')\n GOLDEN_VCF_CANDIDATE_IMPORTER_POSTPROCESS_INPUT = deepvariant_testdata(\n 'golden.vcf_candidate_importer_postprocess_single_site_input.tfrecord.gz')\n GOLDEN_VCF_CANDIDATE_IMPORTER_POSTPROCESS_OUTPUT = deepvariant_testdata(\n 'golden.vcf_candidate_importer_postprocess_single_site_output.vcf')\n\n # For CustomizedClassesVariantLabeler:\n global CUSTOMIZED_CLASSES_GOLDEN_TRAINING_EXAMPLES\n CUSTOMIZED_CLASSES_GOLDEN_TRAINING_EXAMPLES = deepvariant_testdata(\n 'customized_classes.golden.training_examples.tfrecord.gz')\n\n # For VcfCandidateImporter:\n global GOLDEN_VCF_CANDIDATE_IMPORTER_TRAINING_EXAMPLES\n global GOLDEN_VCF_CANDIDATE_IMPORTER_CALLING_EXAMPLES\n global VCF_CANDIDATE_IMPORTER_VARIANTS\n GOLDEN_VCF_CANDIDATE_IMPORTER_TRAINING_EXAMPLES = deepvariant_testdata(\n 'golden.vcf_candidate_importer.training_examples.tfrecord.gz')\n GOLDEN_VCF_CANDIDATE_IMPORTER_CALLING_EXAMPLES = deepvariant_testdata(\n 'golden.vcf_candidate_importer_calling_examples.tfrecord')\n VCF_CANDIDATE_IMPORTER_VARIANTS = deepvariant_testdata(\n 'input/vcf_candidate_importer.indels.chr20.vcf.gz')\n\n # For alt-aligned pileups:\n global ALT_ALIGNED_ROWS_EXAMPLES\n global ALT_ALIGNED_DIFF_CHANNELS_EXAMPLES\n ALT_ALIGNED_ROWS_EXAMPLES = deepvariant_testdata(\n 'golden.alt_aligned_pileup_rows_examples.tfrecord.gz')\n ALT_ALIGNED_DIFF_CHANNELS_EXAMPLES = deepvariant_testdata(\n 'golden.alt_aligned_pileup_diff_channels_examples.tfrecord.gz')\n\n # For runtime-by-region in make_examples:\n global RUNTIME_BY_REGION\n global RUNTIME_BY_REGION_SHARDED\n RUNTIME_BY_REGION = deepvariant_testdata('input/make_examples_runtime.tsv')\n RUNTIME_BY_REGION_SHARDED = deepvariant_testdata(\n 'input/[email protected]')\n\n # For allele_frequency with GRCh38:\n global VCF_WITH_ALLELE_FREQUENCIES\n global GRCH38_FASTA\n global AF_VCF_CHR20\n global AF_VCF_CHR21\n global AF_VCF_CHR20_21_WILDCARD\n global AF_VCF_CHR20_AND_21\n global GRCH38_CHR20_AND_21_BAM\n global GOLDEN_ALLELE_FREQUENCY_EXAMPLES\n VCF_WITH_ALLELE_FREQUENCIES = deepvariant_testdata(\n 'input/allele_frequencies_vcf.vcf.gz')\n\n # Fasta filtered to regions: chr20:1-10000000 and chr21:1-10000000.\n GRCH38_FASTA = deepvariant_testdata('input/grch38.chr20_and_21_10M.fa.gz')\n # VCFs filtered to chr20:1-100000 and chr21:5100000-5200000.\n AF_VCF_CHR20 = deepvariant_testdata('input/cohort-chr20_100k.vcf.gz')\n AF_VCF_CHR21 = deepvariant_testdata('input/cohort-chr21_100k.vcf.gz')\n AF_VCF_CHR20_AND_21 = deepvariant_testdata(\n 'input/cohort-chr20_and_chr21_100k.vcf.gz')\n AF_VCF_CHR20_21_WILDCARD = deepvariant_testdata(\n 'input/cohort-chr2?_100k.vcf.gz')\n # This bam filtered to regions: chr20:61001-62000 and chr21:5114000-5114999\n # and header is edited with the following to match the GRCH38_FASTA:\n # @SQ SN:chr20 LN:10000000\n # @SQ SN:chr21 LN:10000000\n GRCH38_CHR20_AND_21_BAM = deepvariant_testdata(\n 'input/grch38_1k_subset_chr20_and_chr21.bam')\n GOLDEN_ALLELE_FREQUENCY_EXAMPLES = deepvariant_testdata(\n 'golden.allele_frequency_examples.tfrecord.gz')", "title": "" }, { "docid": "9700defaff852e135d0a7c3ea8a3df4f", "score": "0.5562371", "text": "def handle(self, *args, **opts):\n sample = get_sample(opts['sample_tag'])\n insert_gene_annotations(opts, sample)", "title": "" }, { "docid": "95d7ed193f388144af9a19bae9cec914", "score": "0.556113", "text": "def build_output_record(single_caller_variants, output_vcf, sample_names, hotspot=False):\n output_record = output_vcf.new_record()\n # Set consistent attributes\n liftover_record = single_caller_variants[0].record\n output_record.chrom = liftover_record.chrom\n output_record.alleles = liftover_record.alleles\n output_record.id = liftover_record.id\n output_record.start = liftover_record.start\n output_record.stop = liftover_record.stop\n\n # For each caller, get information required for format fields for this variant\n for variant in single_caller_variants:\n variant.samples = [Sample(name, variant.record, variant.caller)\n for name in sample_names]\n\n variant_lookup = {v.caller: v for v in single_caller_variants}\n\n for index in range(len(sample_names)):\n GT_list = []\n AD_list = []\n AF_list = []\n DP_list = []\n for caller in CALLER_NAMES:\n try:\n targ_var = variant_lookup[caller.lower()]\n except KeyError:\n for flist in (GT_list, AD_list, AF_list, DP_list):\n flist.append('.')\n continue\n targ_sample = targ_var.samples[index]\n\n GT_list.append(str(targ_sample.GT))\n AD_list.append(stringify(targ_sample.AD))\n AF_list.append('{:0.4f}'.format(targ_sample.AF))\n DP_list.append(targ_sample.DP)\n\n GT = stringify(GT_list, FORMAT_JOIN)\n AD = stringify(AD_list, FORMAT_JOIN)\n AF = stringify(AF_list, FORMAT_JOIN)\n DP = stringify(DP_list, FORMAT_JOIN)\n\n output_record.samples[index]['GTC'] = GT\n output_record.samples[index]['ADC'] = AD\n output_record.samples[index]['AFC'] = AF\n output_record.samples[index]['DPC'] = DP\n\n consensus_gt, gt_tag = get_gt_consensus(GT_list)\n dp_range = get_range(DP_list)\n af_range = get_range(AF_list)\n ad_range_1 = get_range([item.split(',')[0] for item in AD_list])\n ad_range_2 = get_range([item.split(',')[-1] for item in AD_list])\n\n output_record.samples[index]['GT'] = tuple([int(i) for i in consensus_gt.split('/')])\n output_record.samples[index]['GT_STATUS'] = gt_tag\n output_record.samples[index]['AD'] = get_ad_consensus(AD_list)\n output_record.samples[index]['AF'] = get_af_consensus(AF_list)\n output_record.samples[index]['DP'] = get_dp_consensus(DP_list)\n output_record.samples[index]['ADR'] = (ad_range_1, ad_range_2)\n output_record.samples[index]['AFR'] = af_range\n output_record.samples[index]['DPR'] = dp_range\n\n output_record.info['CAL'] = ','.join([c for c in CALLER_NAMES if c.lower() in variant_lookup])\n\n output_record.info['HotSpotAllele'] = (int(hotspot),)\n\n output_record.filter.add('PASS')\n\n return output_record", "title": "" }, { "docid": "832fafd66059110f982ff517a0420511", "score": "0.55050075", "text": "def main():\n\n\n #################################################################\n # Argument and Options Parsing\n #################################################################\n\n p = optparse.OptionParser(__doc__)\n p.add_option(\"-o\", \"--output\", dest=\"filename\", help=\"write \\\n report to FILE\")\n p.add_option(\"-G\", \"--genotype\", dest=\"G\", help=\\\n \"Use imputed/genotypes if available, should be in VCF file format\")\n p.add_option(\"-v\", \"--vcf_file\", action=\"store_true\", dest=\"inputisvcfile\",\n help=\"the input is a VCF file\")\n p.add_option(\"-q\", \"--quality_threshold\", type=\"int\", dest=\"qual\",\n help=\"base quality threshold to take allele counts from\")\n p.add_option(\"-p\", \"--pileup\", action=\"store_true\", dest=\"p\",\n help= \"Input files are pileup files\")\n p.add_option(\"-D\", \"--debug\", action=\"store_true\", dest=\"D\", help=\"debug\")\n p.add_option(\"-c\", \"--count-threshold\", action=\"store\", type=\"int\",\n dest=\"c\", help=\"Set the count threshold for making AEI calls\")\n p.add_option(\"-V\", \"--output_vcf\", action=\"store_true\", dest=\"outputVCF\",\n help=\"Output the results to a VCF file\")\n p.add_option(\"-A\", \"--auto_parse\", action=\"store_true\", dest=\"auto\",\n help=\"Autoparse readgroups, if set to false will assume a\\\n single sample in each file\")\n\n options, args = p.parse_args()\n if options.qual: pass\n else: options.qual = 20\n\n\n # For testing purposes\n debug = 1\n output = open(options.filename, 'wb')\n\n vcf = VCF(args[0])\n\n # A tab delimited file mapping sample names to bams ############# \n bam_inputs = open(args[1], 'rU')\n sample_to_file = {}\n for line in bam_inputs:\n line = line.split(\"\\t\")\n sample_to_file[line[0]] = line[1].rstrip(\"\\n\")\n\n\n # Handling of multiple BAM/SAM inputs\n\n\n INDEX_BASE = ['A', 'C', 'G', 'T']\n if options.c:\n count_threshold = options.c\n else:\n count_threshold = 20\n\n multi_tuples = []\n # Temproary change the column names\n vcf.vcf.rename(columns=sample_to_file, inplace=True)\n for i in vcf.vcf.columns[9:]:\n multi_tuples.append(i)\n multi_tuples.append(i)\n multi_tuples.append(i)\n multi_tuples.append(i)\n multi = zip(multi_tuples, [0,1,2,3]*len(vcf.vcf.columns[9:]))\n multi_index = pd.MultiIndex.from_tuples(multi, names=['sample', 'alleles'])\n\n counts_matrix = pd.DataFrame(np.zeros((vcf.vcf.shape[0], len(vcf.vcf.columns[9:])*4), dtype=np.int16),\n index=vcf.vcf.index, columns=multi_index)\n\n counts_fixed = functools.partial(counts_for_individuals, c_m=counts_matrix,\n chrm=vcf.vcf['CHROM'], pos=vcf.vcf['POS'])\n vcf.vcf.ix[:, 9:].apply(counts_fixed, axis=0)\n counts_matrix.to_csv(options.filename)\n pickle.p", "title": "" }, { "docid": "806834d17edb9441ef8e733712dd51a6", "score": "0.54660696", "text": "def main():\n\n\n #################################################################\n # Argument and Options Parsing\n #################################################################\n\n p = optparse.OptionParser(__doc__)\n p.add_option(\"-o\", \"--output\", dest=\"filename\", help=\"write \\\n report to FILE\")\n p.add_option(\"-G\", \"--genotype\", dest=\"G\", help=\\\n \"Use imputed/genotypes if available, should be in VCF file format\")\n p.add_option(\"-v\", \"--vcf_file\", action=\"store_true\", dest=\"inputisvcfile\",\n help=\"the input is a VCF file\")\n p.add_option(\"-q\", \"--quality_threshold\", type=\"int\", dest=\"qual\",\n help=\"base quality threshold to take allele counts from\")\n p.add_option(\"-p\", \"--pileup\", action=\"store_true\", dest=\"p\",\n help= \"Input files are pileup files\")\n p.add_option(\"-D\", \"--debug\", action=\"store_true\", dest=\"D\", help=\"debug\")\n p.add_option(\"-c\", \"--count-threshold\", action=\"store\", type=\"int\",\n dest=\"c\", help=\"Set the count threshold for making AEI calls\")\n p.add_option(\"-V\", \"--output_vcf\", action=\"store_true\", dest=\"outputVCF\",\n help=\"Output the results to a VCF file\")\n\n options, args = p.parse_args()\n if options.qual: pass\n else: options.qual = 20\n\n\n # For testing purposes\n debug = 1\n\n # Open the bedfile/vcf file\n # file_a = csv.reader(open(args[0], \"rU\"), delimiter=\"\\t\")\n # Right now defaulting to VCF file, since speed is an issue\n\n \"\"\"\n if options.inputisvcfile: file_a = VCFfile(args[0])\n else: file_a = BEDfile(args[0])\n \"\"\"\n file_a = open(args[0],\"rb\")\n\n # Handling of multiple BAM/SAM inputs\n bam_Names = args[1:]\n bam_files = []\n for filename in bam_Names:\n bam_files.append(pysam.Samfile(filename,\"rb\"))\n # Creates a dictionary with the bam_file name as the key, and the samples\n # by readgroup as the value i.e. {\"bamfile\":[RG1, RG2, RG3]\n # \"bamfile2\":[RG4,RG5]\"\n\n # Also creates a read group sample dictionary\n readGroup_sample = {}\n bam_ReadGroups = {}\n for bam_file, bamName in map(None, bam_files, bam_Names):\n samples = []\n # Grab only the header information\n m = re.compile('@RG.*')\n readGroups = m.findall(bam_file.text)\n for r in readGroups:\n r = r.split('\\t')\n for i in r:\n if i[0:3] == \"ID:\": ID = i[3:]\n elif i[0:3] == \"SM:\": SM = i[3:]\n else : pass\n readGroup_sample[ID] = SM\n samples.append(ID)\n\n if len(bam_files) == 1 and len(readGroups) == 0:\n readGroup_sample['No_SM'] = 'No_ID'\n samples.append('No_SM')\n bam_ReadGroups[bamName] = samples\n break\n elif len(bam_files) == 1 and len(readGroups) == 0:\n print('If you have more than 1 bam file, all the bam files \\\n need sample information from the read group')\n sys.exit()\n else:\n bam_ReadGroups[bamName] == samples\n\n bam_ReadGroups[bamName] = samples\n\n\n # Print the header\n header = [\"chr\", \"pos\", \"rsID\"]\n for i in bam_Names:\n ReadGroupsinBam = bam_ReadGroups[i]\n for t in ReadGroupsinBam:\n header.append(readGroup_sample[t])\n header.append(\"Genotype(Maj/Min)\")\n header.append(\"Ratio\")\n print(\"\\t\".join(header))\n\n INDEX_BASE = ['A', 'C', 'G', 'T']\n if options.c:\n count_threshold = options.c\n else:\n count_threshold = 30\n if options.G != None:\n geno = open(options.G, \"rU\")\n # :TODO ensure allow different well known file formats and also check\n geno_samples = geno.next().strip('\\n').split('\\t')[4:]\n # Initialize first row\n geno_line = geno.next().strip('\\n').split('\\t')\n # match geno_samples to bam_samples\n geno_to_bam_ind = []\n else: pass\n\n for line in file_a:\n line = line.strip('\\n').split('\\t')\n counts = []\n # Counts is a list of numpy arrays\n\n ##################################################################\n #\n # Grab the information for bam files or :TODO pileups. Seems like\n # something that Hadoop will be very good at.\n #\n ##################################################################\n\n region = str(line[0])\n position = int(line[2])\n\n for bamfile, bamNames in map(None, bam_files, bam_Names):\n # :TODO in the VCF and bed files make sure to type the attributes\n variant = lociInformation(region, position,\n samples=bam_ReadGroups[bamNames],\n phredThreshold=options.qual)\n\n bamfile.pileup(variant.region, variant.position,\n variant.position+1, callback=variant)\n for i in variant.samples:\n counts.append(variant.allele_counts[i])\n # First determines if any of the samples meet the read threshold\n # Secondly determines if there are any heterozygotes in the sample (if\n # there aren't any it skips printing that line.\n # There are several ways it calculates this, if imputed genotypes are\n # given it will use use that,\n # otherwise the posterior probability of being a heterozygote\n # given the data is calculated.\n # Need to map genotype names in the files with the bamfiles or sample\n # Need to move to the same position as where the SNP is.\n # Ideally all this information would be loaded into a database.\n # Maybe load into memory?\n # convert into VCF file\n if options.G != None:\n while geno_line[2] < position:\n # Need to handle edge cases at the end of chromosomes\n geno_line = geno.next().strip('\\n').split('\\t')\n if region == \"Stuff\":\n pass\n if geno_line[2] == position: pass\n # Reorder line to match samples\n\n if threshold_counts(counts, threshold=count_threshold):\n p_values = []\n for sample_c in counts:\n ind = sample_c.argsort()[::-1]\n # Should it also return the value of the estimate?\n any_hets = []\n if sample_c.sum() >= count_threshold:\n if lf.isHet(sample_c):\n p_values.append(lf.ratioLik(sample_c))\n p_values.append(\"%s:%s\" % (INDEX_BASE[ind[0]],\n INDEX_BASE[ind[1]]))\n any_hets.append(True)\n else:\n p_values.append(\"HOMO\")\n p_values.append(str(INDEX_BASE[ind[0]]))\n any_hets.append(False)\n p_values.append(\"%s:%s\" % (sample_c[ind[0]],\n sample_c[ind[1]]))\n else:\n p_values.append(\"NA\")\n p_values.append(str(INDEX_BASE[ind[0]]))\n p_values.append(\"%s:%s\" % (sample_c[ind[0]],\n sample_c[ind[1]]))\n any_hets.append(False)\n if any(any_hets):\n # Only print if there are heterozygotes\n print(\"\\t\".join([variant.region,\n str(variant.position),\"\\t\"]) +\n \"\\t\".join([str(i) for i in list(p_values)]))\n else: pass\n\n # For testing purposes\n if options.D:\n if debug > 2000: break\n else: debug += 1\n else:pass", "title": "" }, { "docid": "64195769fa277e044e927086cbfdb809", "score": "0.53954417", "text": "def sample(self):\n\n raise NotImplementedError(\n \"The sample method must be defined according each data type handler\"\n ) # pragma: no cover", "title": "" }, { "docid": "d33714ccedb68335c9df73df6b6b6b7e", "score": "0.5361301", "text": "def read_vcf(self):\n # Insert VCF Records\n for record in self.records:\n # Get annotation, filter, comment\n annotation = self.get_annotation(record)\n feature = self.get_feature(record.INFO['FeatureType'])\n record_filters = self.get_filter(record.FILTER)\n variant = {}\n variant['filter_id'] = record_filters.pk\n # Store variant confidence\n AD = f'{record.gt_ref_depths[0]},{record.gt_alt_depths[0]}'\n PL = f'{record.gt_phred_ll_homref[0]}, {record.gt_phred_ll_het[0]}'\n try:\n QD = f'{record.INFO[\"QD\"]:.2f}'\n except KeyError:\n QD = '.'\n variant['AC'] = record.INFO.get('AC'),\n variant['AD'] = AD,\n variant['AF'] = record.INFO['AF'],\n variant['DP'] = record.INFO['DP'],\n variant['GQ'] = f'{record.gt_quals[0]:.2f}',\n variant['GT'] = f'{record.gt_depths[0]}',\n variant['MQ'] = f'{record.INFO[\"MQ\"]:.2f}',\n variant['PL'] = PL,\n variant['QD'] = QD,\n variant['quality'] = f'{record.QUAL:.2f}'\n\n if record.is_snp:\n comment = self.get_comment(record.INFO['Comments'][0])\n try:\n snp_obj = self.all_snps[(\n record.POS,\n record.REF,\n str(record.ALT[0])\n )]\n except KeyError:\n snp_obj = self.get_snp(record, self.reference, annotation,\n feature)\n variant['comment'] = comment.pk\n variant['annotation_id'] = snp_obj.annotation.pk\n variant['snp_id'] = snp_obj.pk\n self.snps.append(variant)\n else:\n key = (record.POS, record.REF, str(record.ALT[0]))\n self.indel_positions.append(record.POS)\n self.indel_queries.append({\n key: {\n 'record': record,\n 'reference': self.reference,\n 'annotation': annotation,\n 'feature': feature,\n }\n })\n self.temp_indels.append({'key': key, 'data': variant})", "title": "" }, { "docid": "3c02dfa3d040b5e346616f2052003c10", "score": "0.53564554", "text": "def VST(self,conf):\n #appname,apppath,genome,appparams = conf[0]\n #print conf[0]\n #conf = [('VarScan.jar mpileup2snp', '/bluegill/version/app/varscan/2.3.3/', 'hg19', '--output-vcf 1')]\n #current = []\n #mainapp,subapp = appname.split(' ')\n\n m = self._get_app(conf,'GVF')\n app = m[0]\n \n current = []\n\n \n if not self.control_case:\n raise Exception('Application [VST] require both control and case samples')\n \n #print self.products\n \n _control,_case = self.products\n #print 'PRODUCTS:',_control,_case\n #_normal = _control[0] \n #_tumor = _case[0]\n \n tmp = []\n fout = os.path.commonprefix(_control).strip('_')\n if not fout:\n fout = 'tmp.'+str(time.time())\n time.sleep(1)\n foutput = fout+'.cdr'\n \n if len(_control) >1:\n cmd = \"%s -o 'I(0..%d)' -b %s %s > %s\" % (app,len(_control)-1,self.genome,' '.join(_control),foutput)\n else: \n cmd = \"%s -o 'I(0)' -b %s %s > %s\" % (app,self.genome,' '.join(_control),foutput) \n self.commands.append(cmd) \n tmp.append(foutput)\n current.append(tmp)\n \n tmp = []\n fout = os.path.commonprefix(_case).strip('_')\n if not fout:\n fout = 'tmp.'+str(time.time())\n time.sleep(1)\n foutput = fout+'.cdr'\n if len(_case) >1:\n cmd = \"%s -o 'I(0..%d)' -b %s %s > %s\" % (app,len(_case)-1,self.genome,' '.join(_case),foutput)\n else: \n cmd = \"%s -o 'I(0)' -b %s %s > %s\" % (app,self.genome,' '.join(_case),foutput) \n self.commands.append(cmd) \n tmp.append(foutput)\n current.append(tmp)\n \n self.products = current", "title": "" }, { "docid": "61ba644dd9f2ae4282569ebc2dc27e12", "score": "0.5311768", "text": "def SigSNPs(args):\n p = OptionParser(SigSNPs.__doc__)\n p.add_option('--MeRatio', default = '1',\n help = \"specify the ratio of independent SNPs, maize is 0.32, sorghum is 0.53\")\n p.add_option('--chromosome', default = 'all',\n help = \"specify chromosome, such 1, 2, 'all' means genome level\")\n p.add_option('--software', default = 'mvp', choices=('gemma', 'gapit', 'farmcpu', 'mvp'),\n help = 'specify which software generates the GWAS result')\n opts, args = p.parse_args(args)\n\n if len(args) == 0:\n sys.exit(not p.print_help())\n gwas,output, = args\n if opts.software == 'gemma':\n df = pd.read_csv(gwas, delim_whitespace=True, usecols=['chr', 'rs', 'ps', 'p_lrt'])\n cutoff = 0.05/(float(opts.MeRatio) * df.shape[0])\n print('significant cutoff: %s'%cutoff)\n df['chr'] = df['chr'].astype('str')\n df = df if opts.chromosome=='all' else df[df['chr']==opts.chromosome]\n df = df[['rs', 'chr', 'ps', 'p_lrt']]\n df[df['p_lrt'] < cutoff].to_csv(output, index=False, sep='\\t')\n\n elif opts.software == 'mvp':\n df = pd.read_csv(gwas)\n cutoff = 0.05/(float(opts.MeRatio) * df.shape[0])\n print('significant cutoff: %s'%cutoff)\n df['Chrom'] = df['Chrom'].astype('str')\n df = df if opts.chromosome=='all' else df[df['Chrom']==opts.chromosome]\n df[df.iloc[:,4] < cutoff].to_csv(output, index=False, sep='\\t')\n\n elif opts.software == 'farmcpu':\n df = pd.read_csv(gwas, usecols=['SNP', 'Chromosome', 'Position', 'P.value'])\n cutoff = 0.05/(float(opts.MeRatio) * df.shape[0])\n print('significant cutoff: %s'%cutoff)\n df['Chromosome'] = df['Chromosome'].astype('str')\n df = df if opts.chromosome=='all' else df[df['chr']==opts.chromosome]\n df[df['P.value'] < cutoff].to_csv(output, index=False, sep='\\t')\n\n elif opts.software == 'gapit':\n df = pd.read_csv(gwas, usecols=['SNP', 'Chromosome', 'Position ', 'P.value'])\n cutoff = 0.05/(float(opts.MeRatio) * df.shape[0])\n print('significant cutoff: %s'%cutoff)\n df['Chromosome'] = df['Chromosome'].astype('str')\n df = df if opts.chromosome=='all' else df[df['chr']==opts.chromosome]\n df[df['P.value'] < cutoff].to_csv(output, index=False, sep='\\t')\n else:\n sys.exit('specify which software you use: mvp, gemma, farmcpu, gapit.')\n print('Done! Check %s'%output)", "title": "" }, { "docid": "224b925f74f60a9f244b769bc304b1b4", "score": "0.52129555", "text": "def initGenotypes(hyperGenotype): #### KEY FUNCTION.\r\n return singleGenotype(hyperGenotype) # only one genotype. \r", "title": "" }, { "docid": "2250c71fb07df28d1675142194ed0f7e", "score": "0.5201189", "text": "def callSnvs(self):\r\n if len(self.pool.samples) == 0:\r\n raise LookupError(\"no samples for executing samtools\")\r\n \r\n if self.chromosome not in self.pool.vcf:\r\n self.pool.vcf[self.chromosome] = VcfFile.VcfFile(self.pool, chrom=self.chromosome, bcf=True)\r\n inputFileString = \"\"\r\n for sample in self.pool.samples:\r\n inputFileString = inputFileString + \" \" + sample.bam.getFile()\r\n \r\n outputFile = self.pool.vcf[self.chromosome].fileName\r\n \r\n cmd = Program.config.getPath(\"samtools\") + \" mpileup \" + self.getProgramArguments(\"samtools mpileup\")\r\n if self.chromosome !=None:\r\n cmd = cmd + \" -r \" + self.chromosome\r\n cmd = cmd + \" -gf \" + os.path.abspath(Program.config.getPath(\"refGenome\")) + inputFileString + \" > \" + outputFile\r\n \r\n self.execute(cmd, \"samtools mpileup\", self.pool.vcf[self.chromosome])\r\n \r\n self._filterVcf(self.pool.vcf[self.chromosome])", "title": "" }, { "docid": "c67f9e6c4469dec8858372fec395ba4b", "score": "0.5201109", "text": "def _callable_from_gvcf(data, vrn_file, out_dir):\n methods = {\"freebayes\": \"freebayes\", \"platypus\": \"platypus\",\n \"gatk-haplotype\": \"gatk\"}\n gvcf_type = methods.get(dd.get_variantcaller(data))\n if gvcf_type:\n out_file = os.path.join(out_dir, \"%s-gcvf-coverage.bed\" %\n utils.splitext_plus(os.path.basename(vrn_file))[0])\n if not utils.file_uptodate(out_file, vrn_file):\n with file_transaction(data, out_file) as tx_out_file:\n cmd = (\"gvcf_regions.py --gvcf_type {gvcf_type} {vrn_file} \"\n \"| bedtools merge > {tx_out_file}\")\n do.run(cmd.format(**locals()), \"Convert gVCF to BED file of callable regions\")\n return out_file", "title": "" }, { "docid": "42a44f029310ce7b71ed6c532f1e67c9", "score": "0.5197337", "text": "def cli(input_vcf, vcf_type, output_dire):\n # read strelka results\n df = read_vcf(input_vcf)\n\n # first filters\n chroms = [str(x) for x in range(1, 23, 1)]\n chroms.extend(['X', 'Y'])\n df['#CHROM'].astype(str)\n df = df[df[\"#CHROM\"].isin(chroms)]\n\n df = df[(df.FILTER == 'PASS') | (df.FILTER == 'DP')]\n\n # mut type processing\n if vcf_type == 'snvs':\n df_proc = df.progress_apply(lambda x: snvs_processing(x), axis=1)\n elif vcf_type == 'indels':\n df_proc = df.apply(lambda x: indels_processing(x), axis=1)\n else:\n print(\"Wrong vcf_type string. Write 'snvs' or 'indels' depending on the mut type\")\n\n # write results\n df_proc[['n_alt_reads', 'n_ref_reads',\n 't_alt_reads', 't_ref_reads', 'POS']] = df_proc[['n_alt_reads', 'n_ref_reads', 't_alt_reads',\n 't_ref_reads', 'POS']].astype(int)\n df_proc = df_proc[['#CHROM', 'POS', 'ID', 'REF', 'ALT', 'QUAL', 'FILTER', 'INFO', 'FORMAT', 'NORMAL', 'TUMOR', 'DP_tumor',\n 't_alt_reads', 't_ref_reads', 'DP_normal', 'n_alt_reads', 'n_ref_reads', 'mut_type', 'GT_normal',\n 'GT_tumor']]\n\n file = input_vcf.split(\"/\")[-1]\n file = file.replace(\".vcf.gz\", \"\")\n\n print(len(df_proc))\n\n df_proc.to_csv(os.path.join(output_dire, file + \".maf\"), sep='\\t', index=False)", "title": "" }, { "docid": "1eebffe6cc928b97d9c5e3ad8bac567e", "score": "0.51970387", "text": "def sample_fx(self):\n raise NotImplementedError", "title": "" }, { "docid": "a3b04835f0b13ce5cf4215825e4b99fa", "score": "0.5171285", "text": "def __init__(self,\n chromosome,\n source,\n feature,\n start,\n end,\n score,\n strand,\n frame,\n gene_id,\n gene_biotype,\n gene_name):\n\n # Basic gtf info [Required]\n self.chromosome = str(chromosome)\n self.source = str(source)\n self.feature = str(feature)\n self.start = int(start)\n self.end = int(end)\n self.score = str(score)\n self.strand = str(strand)\n self.frame = str(frame)\n\n # Extra gtf info [Required]\n self.gene_id = str(gene_id)\n # in ENSEMBL is gene_biotype, in gencode is gene_type\n self.gene_biotype = str(gene_biotype)\n self.gene_name = str(gene_name)\n\n # dictionary that contains as key exon coordinates in the format:\n # chromosome:start:exon:end:strand\n # and as values a list of exon ids\n self.exon_coordinates_dict = dict()\n\n # Dictionary of known transcripts.\n self.transcripts = dict()\n\n # list of novel transcritps (that occur from novel splicing)\n self.novel_transcripts = list()\n\n # list of novel transcripts (that occur from read through)\n self.novel_transcripts_readthrough = list()\n\n # list of annotated terminal exons\n self.annotated_terminal_exons = list()\n\n # list of annotated intermediate exons\n self.annotated_intermediate_exons = list()\n\n # list of background regions\n self.background = list()\n\n # list of annotated introns without polya sites\n self.annotated_introns_without_polya_site = list()\n\n # list of annotated introns with polya sites\n self.annotated_introns_with_polya_site = list()\n\n # list of potential novel exons\n self.potential_novel_exons = list()\n\n # list of potential novel readthough exons\n self.potential_novel_readthroughs = list()\n\n # intron length\n self.intron_length = 0\n\n # intronic reads\n self.intron_reads = 0\n\n # union exon length (UnionExonLength)\n self.union_exon_length = 0\n\n # number of reads that fall in the gene (TotalGeneReads)\n self.total_reads = 0\n\n # GeneExpressionPerKBApproximated\n self.GeneExpressionPerKBApproximated = 0\n\n # BackgroundPerKB\n self.BackgroundPerKB = 0\n\n # GeneExpressionPerKBInclBackground\n self.GeneExpressionPerKBInclBackground = 0\n\n # GeneExpressionBackgroundPerKB\n self.GeneExpressionBackgroundPerKB = 0\n\n # GeneExpressionPerKBwithoutBackground\n self.GeneExpressionPerKBwithoutBackground = 0\n\n # BackgroundFraction\n self.BackgroundFraction = 0\n\n # Flag to specify if the gene overlaps with some other gene\n # default True\n self.overlaps_with_other_gene = True\n\n # Dictionary that contains as a key the novel transcript\n # ids and as value the list with all transcirpt ids\n # from which the novel transcript can originate from.\n self.mother_transcripts_of_novel_transcripts = defaultdict(list)", "title": "" }, { "docid": "a1743910fb51d267355f1be72548cd88", "score": "0.515055", "text": "def _post(self, *args, **kwargs):\n return _blocks_swig5.sample_and_hold_ff_sptr__post(self, *args, **kwargs)", "title": "" }, { "docid": "f4ec3f417c7276a91088385163a9428f", "score": "0.5149947", "text": "def coregister(insrc, inref, band, bandref, resample=1, step=256, minstep=16, minsiftpoints=40, iterate=1, prec=3, mode=2, datadir=None, pattern='*STACK.tif', datatype='S2', writeFeatures=False, workingDirectory=None):\n from Common.FileUtils import ensure_dir\n pathWd = os.path.dirname(insrc) if not workingDirectory else workingDirectory\n if os.path.exists(pathWd) == False :\n ensure_dir(pathWd)\n\n srcClip = os.path.join(pathWd,'tempSrcClip.tif')\n extractROIApp = OtbAppBank.CreateExtractROIApplication({\"in\": insrc,\n \"mode\": \"fit\",\n \"mode.fit.im\": inref,\n \"out\": srcClip,\n \"pixType\": \"uint16\"})\n extractROIApp.ExecuteAndWriteOutput()\n # #SensorModel generation\n SensorModel = os.path.join(pathWd,'SensorModel.geom')\n PMCMApp = OtbAppBank.CreatePointMatchCoregistrationModel({\"in\": srcClip,\n \"band1\": band,\n \"inref\": inref,\n \"bandref\": bandref,\n \"resample\": resample,\n \"precision\": str(prec),\n \"mfilter\": \"1\",\n \"backmatching\": \"1\",\n \"outgeom\": SensorModel,\n \"initgeobinstep\": str(step),\n \"mingeobinstep\": str(minstep),\n \"minsiftpoints\": str(minsiftpoints),\n \"iterate\": iterate\n })\n PMCMApp.ExecuteAndWriteOutput()\n\n # mode 1 : application on the source image\n if mode == 1 or mode == 3:\n outSrc = os.path.join(pathWd, 'temp_file.tif')\n io_Src = str(srcClip + '?&skipcarto=true&geom=' + SensorModel)\n ds = gdal.Open(srcClip)\n prj = ds.GetProjection()\n gt = ds.GetGeoTransform()\n srs = osr.SpatialReference()\n srs.ImportFromWkt(prj)\n code = srs.GetAuthorityCode(None)\n gsp = str(int(2 * round(max(abs(gt[1]), abs(gt[5])))))\n ds = None\n orthoRecApp = OtbAppBank.CreateOrthoRectification({\"in\": io_Src,\n \"io.out\": outSrc,\n \"map\": \"epsg\",\n \"map.epsg.code\": code,\n \"opt.gridspacing\": gsp,\n \"pixType\": \"uint16\"\n })\n\n if writeFeatures:\n orthoRecApp[0].ExecuteAndWriteOutput()\n else:\n orthoRecApp[0].Execute()\n\n ext = os.path.splitext(insrc)[1]\n finalOutput = os.path.join(pathWd,os.path.basename(insrc.replace(ext, ext.replace('.', '_COREG.'))))\n superImposeApp = OtbAppBank.CreateSuperimposeApplication({\"inr\": srcClip,\n \"inm\": orthoRecApp[0],\n \"out\": finalOutput,\n \"pixType\": \"uint16\"})\n superImposeApp[0].ExecuteAndWriteOutput()\n\n shutil.move(finalOutput,insrc.replace(ext, ext.replace('.', '_COREG.')))\n shutil.move(finalOutput.replace(ext, '.geom'),insrc.replace(ext, '_COREG.geom'))\n\n # Mask registration if exists\n masks = glob.glob(os.path.dirname(insrc) + os.sep + 'MASKS' + os.sep + '*BINARY_MASK' + ext)\n if len(masks) != 0:\n for mask in masks:\n srcClip = os.path.join(pathWd,'tempSrcClip.tif')\n extractROIApp = OtbAppBank.CreateExtractROIApplication({\"in\": mask,\n \"mode\": \"fit\",\n \"mode.fit.im\": inref,\n \"out\": srcClip,\n \"pixType\": \"uint16\"})\n extractROIApp.ExecuteAndWriteOutput()\n outSrc = os.path.join(pathWd, 'temp_file.tif')\n io_Src = str(mask + '?&skipcarto=true&geom=' + SensorModel)\n orthoRecApp = OtbAppBank.CreateOrthoRectification({\"in\": io_Src,\n \"io.out\": outSrc,\n \"map\": \"epsg\",\n \"map.epsg.code\": code,\n \"opt.gridspacing\": gsp,\n \"pixType\": \"uint16\"\n })\n if writeFeatures:\n orthoRecApp[0].ExecuteAndWriteOutput()\n else:\n orthoRecApp[0].Execute()\n\n ext = os.path.splitext(insrc)[1]\n finalMask = os.path.join(pathWd,os.path.basename(mask.replace(ext, ext.replace('.', '_COREG.'))))\n superImposeApp= OtbAppBank.CreateSuperimposeApplication({\"inr\": mask,\n \"inm\": orthoRecApp[0],\n \"out\": finalMask,\n \"pixType\": \"uint16\"})\n superImposeApp[0].ExecuteAndWriteOutput()\n\n if finalMask != mask.replace(ext, ext.replace('.', '_COREG.')) :\n shutil.move(finalMask,mask.replace(ext, ext.replace('.', '_COREG.')))\n shutil.move(finalMask.replace(ext, '.geom'),mask.replace(ext, '_COREG.geom'))\n\n if mode == 3:\n folders = glob.glob(os.path.join(datadir,'*'))\n vhr_ref = inref\n if datatype in ['S2','S2_S2C']:\n dates = [os.path.basename(fld).split('_')[1].split(\"-\")[0] for fld in folders]\n ref_date = os.path.basename(insrc).split('_')[1].split(\"-\")[0]\n elif datatype in ['L5','L8']:\n dates = [os.path.basename(fld).split('_')[3] for fld in folders]\n ref_date = os.path.basename(insrc).split('_')[3]\n dates.sort()\n ref_date_ind = dates.index(ref_date)\n bandref = band\n clean_dates = [ref_date]\n for date in reversed(dates[:ref_date_ind]):\n inref = glob.glob(os.path.join(datadir,'*'+clean_dates[-1]+'*',pattern))[0]\n insrc = glob.glob(os.path.join(datadir,'*'+date+'*',pattern))[0]\n srcClip = os.path.join(pathWd,'srcClip.tif')\n extractROIApp = OtbAppBank.CreateExtractROIApplication({\"in\": insrc,\n \"mode\": \"fit\",\n \"mode.fit.im\": inref,\n \"out\": srcClip,\n \"pixType\": \"uint16\"})\n extractROIApp.ExecuteAndWriteOutput()\n outSensorModel = os.path.join(pathWd,'SensorModel_%s.geom'%date)\n try :\n PMCMApp = OtbAppBank.CreatePointMatchCoregistrationModel({\"in\": srcClip,\n \"band1\": band,\n \"inref\": inref,\n \"bandref\": bandref,\n \"resample\": resample,\n \"precision\": str(prec),\n \"mfilter\": \"1\",\n \"backmatching\": \"1\",\n \"outgeom\": outSensorModel,\n \"initgeobinstep\": str(step),\n \"mingeobinstep\": str(minstep),\n \"minsiftpoints\": str(minsiftpoints),\n \"iterate\": iterate\n })\n PMCMApp.ExecuteAndWriteOutput()\n except RuntimeError :\n shutil.copy(SensorModel,outSensorModel)\n logger.warning('Coregistration failed, %s will be process with %s' %(insrc, outSensorModel))\n continue\n\n outSrc = os.path.join(pathWd, 'temp_file.tif')\n io_Src = str(srcClip + '?&skipcarto=true&geom=' + outSensorModel)\n ds = gdal.Open(srcClip)\n prj = ds.GetProjection()\n gt = ds.GetGeoTransform()\n srs = osr.SpatialReference()\n srs.ImportFromWkt(prj)\n code = srs.GetAuthorityCode(None)\n gsp = str(int(2 * round(max(abs(gt[1]), abs(gt[5])))))\n ds = None\n try :\n orthoRecApp = OtbAppBank.CreateOrthoRectification({\"in\": io_Src,\n \"io.out\": outSrc,\n \"map\": \"epsg\",\n \"map.epsg.code\": code,\n \"opt.gridspacing\": gsp,\n \"pixType\": \"uint16\"\n })\n\n if writeFeatures:\n orthoRecApp[0].ExecuteAndWriteOutput()\n else:\n orthoRecApp[0].Execute()\n except RuntimeError :\n os.remove(outSensorModel)\n shutil.copy(SensorModel,outSensorModel)\n logger.warning('Coregistration failed, %s will be process with %s' %(insrc, outSensorModel))\n orthoRecApp = OtbAppBank.CreateOrthoRectification({\"in\": io_Src,\n \"io.out\": outSrc,\n \"map\": \"epsg\",\n \"map.epsg.code\": code,\n \"opt.gridspacing\": gsp,\n \"pixType\": \"uint16\"\n })\n continue\n\n if writeFeatures:\n orthoRecApp[0].ExecuteAndWriteOutput()\n else:\n orthoRecApp[0].Execute()\n\n ext = os.path.splitext(insrc)[1]\n finalOutput = os.path.join(pathWd, os.path.basename(insrc.replace(ext, ext.replace('.', '_COREG.'))))\n superImposeApp = OtbAppBank.CreateSuperimposeApplication({\"inr\": srcClip,\n \"inm\": orthoRecApp[0],\n \"out\": finalOutput,\n \"pixType\": \"uint16\"})\n superImposeApp[0].ExecuteAndWriteOutput()\n\n shutil.move(finalOutput,insrc.replace(ext, ext.replace('.', '_COREG.')))\n shutil.move(finalOutput.replace(ext, '.geom'),insrc.replace(ext, '_COREG.geom'))\n\n # Mask registration if exists\n masks = glob.glob(os.path.dirname(insrc) + os.sep + 'MASKS' + os.sep + '*BINARY_MASK' + ext)\n if len(masks) != 0:\n for mask in masks:\n srcClip = os.path.join(pathWd,'srcClip.tif')\n extractROIApp = OtbAppBank.CreateExtractROIApplication({\"in\": mask,\n \"mode\": \"fit\",\n \"mode.fit.im\": inref,\n \"out\": srcClip,\n \"pixType\": \"uint16\"})\n extractROIApp.ExecuteAndWriteOutput()\n outSrc = os.path.join(pathWd, 'temp_file.tif')\n io_Src = str(srcClip + '?&skipcarto=true&geom=' + outSensorModel)\n orthoRecApp = OtbAppBank.CreateOrthoRectification({\"in\": io_Src,\n \"io.out\": outSrc,\n \"map\": \"epsg\",\n \"map.epsg.code\": code,\n \"opt.gridspacing\": gsp,\n \"pixType\": \"uint16\"\n })\n if writeFeatures:\n orthoRecApp[0].ExecuteAndWriteOutput()\n else:\n orthoRecApp[0].Execute()\n\n ext = os.path.splitext(insrc)[1]\n finalMask = os.path.join(pathWd, os.path.basename(mask.replace(ext, ext.replace('.', '_COREG.'))))\n superImposeApp= OtbAppBank.CreateSuperimposeApplication({\"inr\": srcClip,\n \"inm\": orthoRecApp[0],\n \"out\": finalMask,\n \"pixType\": \"uint16\"})\n superImposeApp[0].ExecuteAndWriteOutput()\n\n shutil.move(finalMask,mask.replace(ext, ext.replace('.', '_COREG.')))\n shutil.move(finalMask.replace(ext, '.geom'),mask.replace(ext, '_COREG.geom'))\n \n if not writeFeatures and os.path.exists(outSensorModel):\n os.remove(outSensorModel)\n\n if datatype in ['S2','S2_S2C']:\n mtd_file = glob.glob(os.path.join(os.path.dirname(insrc),'*_MTD_ALL*'))[0]\n cloud_clear = get_S2_Tile_Cloud_Cover(mtd_file)\n cover = get_S2_Tile_Coverage(mtd_file)\n if cloud_clear > 0.6 and cover > 0.8 :\n clean_dates.append(date)\n elif datatype in ['L5','L8']:\n mlt_file = glob.glob(os.path.join(os.path.dirname(insrc),'*_MTL*'))[0]\n cloud_clear = get_L8_Tile_Cloud_Cover(mlt_file)\n if cloud_clear > 0.6 :\n clean_dates.append(date)\n\n clean_dates = [ref_date]\n for date in dates[ref_date_ind+1:]:\n inref = glob.glob(os.path.join(datadir,'*'+clean_dates[-1]+'*',pattern))[0]\n insrc = glob.glob(os.path.join(datadir,'*'+date+'*',pattern))[0]\n srcClip = os.path.join(pathWd,'srcClip.tif')\n extractROIApp = OtbAppBank.CreateExtractROIApplication({\"in\": insrc,\n \"mode\": \"fit\",\n \"mode.fit.im\": inref,\n \"out\": srcClip,\n \"pixType\": \"uint16\"})\n extractROIApp.ExecuteAndWriteOutput()\n outSensorModel = os.path.join(pathWd,'SensorModel_%s.geom'%date)\n try :\n PMCMApp = OtbAppBank.CreatePointMatchCoregistrationModel({\"in\": srcClip,\n \"band1\": band,\n \"inref\": inref,\n \"bandref\": bandref,\n \"resample\": resample,\n \"precision\": str(prec),\n \"mfilter\": \"1\",\n \"backmatching\": \"1\",\n \"outgeom\": outSensorModel,\n \"initgeobinstep\": str(step),\n \"mingeobinstep\": str(minstep),\n \"minsiftpoints\": str(minsiftpoints),\n \"iterate\": iterate\n })\n PMCMApp.ExecuteAndWriteOutput()\n except RuntimeError :\n shutil.copy(SensorModel,outSensorModel)\n logger.warning('Coregistration failed, %s will be process with %s' %(insrc, outSensorModel))\n continue\n\n outSrc = os.path.join(pathWd,'temp_file.tif')\n io_Src = str(srcClip + '?&skipcarto=true&geom=' + outSensorModel)\n ds = gdal.Open(srcClip)\n prj = ds.GetProjection()\n gt = ds.GetGeoTransform()\n srs = osr.SpatialReference()\n srs.ImportFromWkt(prj)\n code = srs.GetAuthorityCode(None)\n gsp = str(int(2 * round(max(abs(gt[1]), abs(gt[5])))))\n ds = None\n try :\n orthoRecApp = OtbAppBank.CreateOrthoRectification({\"in\": io_Src,\n \"io.out\": outSrc,\n \"map\": \"epsg\",\n \"map.epsg.code\": code,\n \"opt.gridspacing\": gsp,\n \"pixType\": \"uint16\"\n })\n\n if writeFeatures:\n orthoRecApp[0].ExecuteAndWriteOutput()\n else:\n orthoRecApp[0].Execute()\n except RuntimeError :\n os.remove(outSensorModel)\n shutil.copy(SensorModel,outSensorModel)\n orthoRecApp = OtbAppBank.CreateOrthoRectification({\"in\": io_Src,\n \"io.out\": outSrc,\n \"map\": \"epsg\",\n \"map.epsg.code\": code,\n \"opt.gridspacing\": gsp,\n \"pixType\": \"uint16\"\n })\n continue\n\n if writeFeatures:\n orthoRecApp[0].ExecuteAndWriteOutput()\n else:\n orthoRecApp[0].Execute()\n\n ext = os.path.splitext(insrc)[1]\n finalOutput = os.path.join(pathWd, os.path.basename(insrc.replace(ext, ext.replace('.', '_COREG.'))))\n superImposeApp = OtbAppBank.CreateSuperimposeApplication({\"inr\": srcClip,\n \"inm\": orthoRecApp[0],\n \"out\": finalOutput,\n \"pixType\": \"uint16\"})\n superImposeApp[0].ExecuteAndWriteOutput()\n\n shutil.move(finalOutput,insrc.replace(ext, ext.replace('.', '_COREG.')))\n shutil.move(finalOutput.replace(ext, '.geom'),insrc.replace(ext, '_COREG.geom'))\n\n # Mask registration if exists\n masks = glob.glob(os.path.dirname(insrc) + os.sep + 'MASKS' + os.sep + '*BINARY_MASK' + ext)\n if len(masks) != 0:\n for mask in masks:\n srcClip = os.path.join(pathWd,'srcClip.tif')\n extractROIApp = OtbAppBank.CreateExtractROIApplication({\"in\": mask,\n \"mode\": \"fit\",\n \"mode.fit.im\": inref,\n \"out\": srcClip,\n \"pixType\": \"uint16\"})\n extractROIApp.ExecuteAndWriteOutput()\n outSrc = os.path.join(pathWd, 'temp_file.tif')\n io_Src = str(srcClip + '?&skipcarto=true&geom=' + outSensorModel)\n orthoRecApp = OtbAppBank.CreateOrthoRectification({\"in\": io_Src,\n \"io.out\": outSrc,\n \"map\": \"epsg\",\n \"map.epsg.code\": code,\n \"opt.gridspacing\": gsp,\n \"pixType\": \"uint16\"\n })\n if writeFeatures:\n orthoRecApp[0].ExecuteAndWriteOutput()\n else:\n orthoRecApp[0].Execute()\n\n ext = os.path.splitext(insrc)[1]\n finalMask = os.path.join(pathWd, os.basename(mask.replace(ext, ext.replace('.', '_COREG.'))))\n superImposeApp= OtbAppBank.CreateSuperimposeApplication({\"inr\": srcClip,\n \"inm\": orthoRecApp[0],\n \"out\": finalMask,\n \"pixType\": \"uint16\"})\n superImposeApp[0].ExecuteAndWriteOutput()\n\n shutil.move(finalMask,mask.replace(ext, ext.replace('.', '_COREG.')))\n shutil.move(finalMask.replace(ext, '.geom'),mask.replace(ext, '_COREG.geom'))\n \n if writeFeatures == False and os.path.exists(outSensorModel):\n os.remove(outSensorModel)\n\n if datatype in ['S2','S2_S2C']:\n mtd_file = glob.glob(os.path.join(os.path.dirname(insrc),'*_MTD_ALL*'))[0]\n cloud_clear = get_S2_Tile_Cloud_Cover(mtd_file)\n cover = get_S2_Tile_Coverage(mtd_file)\n if cloud_clear > 0.6 and cover > 0.8 :\n clean_dates.append(date)\n elif datatype in ['L5','L8']:\n mlt_file = glob.glob(os.path.join(os.path.dirname(insrc),'*_MTL*'))[0]\n cloud_clear = get_L8_Tile_Cloud_Cover(mlt_file)\n if cloud_clear > 0.6 :\n clean_dates.append(date)\n\n if not writeFeatures and os.path.exists(SensorModel):\n os.remove(SensorModel)\n # mode 2 : application on the time series\n elif mode == 2:\n ext = os.path.splitext(insrc)[1]\n file_list = glob.glob(datadir + os.sep + '*' + os.sep + pattern)\n for insrc in file_list:\n srcClip = os.path.join(pathWd,'tempSrcClip.tif')\n extractROIApp = OtbAppBank.CreateExtractROIApplication({\"in\": insrc,\n \"mode\": \"fit\",\n \"mode.fit.im\": inref,\n \"out\": srcClip,\n \"pixType\": \"uint16\"})\n extractROIApp.ExecuteAndWriteOutput()\n outSrc = os.path.join(pathWd,'temp_file.tif')\n io_Src = str(srcClip + '?&skipcarto=true&geom=' + SensorModel)\n ds = gdal.Open(srcClip)\n prj = ds.GetProjection()\n gt = ds.GetGeoTransform()\n srs = osr.SpatialReference()\n srs.ImportFromWkt(prj)\n code = srs.GetAuthorityCode(None)\n gsp = str(int(2 * round(max(abs(gt[1]), abs(gt[5])))))\n ds = None\n orthoRecApp = OtbAppBank.CreateOrthoRectification({\"in\": io_Src,\n \"io.out\": outSrc,\n \"map\": \"epsg\",\n \"map.epsg.code\": code,\n \"opt.gridspacing\": gsp,\n \"pixType\": \"uint16\"\n })\n\n if writeFeatures:\n orthoRecApp[0].ExecuteAndWriteOutput()\n else:\n orthoRecApp[0].Execute()\n\n ext = os.path.splitext(insrc)[1]\n finalOutput = os.path.join(pathWd, os.path.basename(insrc.replace(ext, ext.replace('.', '_COREG.'))))\n superImposeApp = OtbAppBank.CreateSuperimposeApplication({\"inr\": srcClip,\n \"inm\": orthoRecApp[0],\n \"out\": finalOutput,\n \"pixType\": \"uint16\"})\n superImposeApp[0].ExecuteAndWriteOutput()\n\n shutil.move(finalOutput,insrc.replace(ext, ext.replace('.', '_COREG.')))\n shutil.move(finalOutput.replace(ext, '.geom'),insrc.replace(ext, '_COREG.geom'))\n\n # Mask registration if exists\n masks = glob.glob(os.path.dirname(insrc) + os.sep + 'MASKS' + os.sep + '*BINARY_MASK*' + ext)\n if len(masks) != 0:\n for mask in masks:\n srcClip = os.path.join(pathWd,'tempSrcClip.tif')\n extractROIApp = OtbAppBank.CreateExtractROIApplication({\"in\": mask,\n \"mode\": \"fit\",\n \"mode.fit.im\": inref,\n \"out\": srcClip,\n \"pixType\": \"uint16\"})\n extractROIApp.ExecuteAndWriteOutput()\n outSrc = os.path.join(pathWd,'temp_file.tif')\n io_Src = str(srcClip + '?&skipcarto=true&geom=' + SensorModel)\n orthoRecApp = OtbAppBank.CreateOrthoRectification({\"in\": io_Src,\n \"io.out\": outSrc,\n \"map\": \"epsg\",\n \"map.epsg.code\": code,\n \"opt.gridspacing\": gsp,\n \"pixType\": \"uint16\"\n })\n if writeFeatures:\n orthoRecApp[0].ExecuteAndWriteOutput()\n else:\n orthoRecApp[0].Execute()\n\n ext = os.path.splitext(insrc)[1]\n finalMask = os.path.join(pathWd,os.path.basename(mask.replace(ext, ext.replace('.', '_COREG.'))))\n superImposeApp= OtbAppBank.CreateSuperimposeApplication({\"inr\": srcClip,\n \"inm\": orthoRecApp[0],\n \"out\": finalMask,\n \"pixType\": \"uint16\"})\n superImposeApp[0].ExecuteAndWriteOutput()\n\n shutil.move(finalMask,mask.replace(ext, ext.replace('.', '_COREG.')))\n shutil.move(finalMask.replace(ext, '.geom') ,mask.replace(ext, '_COREG.geom'))\n\n os.remove(srcClip)\n if not writeFeatures and os.path.exists(SensorModel):\n os.remove(SensorModel)\n\n return None", "title": "" }, { "docid": "39ce30313a5677b2cf04c747c6a4b308", "score": "0.51326454", "text": "def createvcf(ctx, input_file,reference_file, output_file):\n \"\"\" Exact information which is collected as keys in read_input() is retrieved and matched\"\"\"\n \"\"\" Compare the matching keys in both files and extract additional information \"\"\"\n start_time = datetime.now()\n vcf_op = vcfheader(output_file)\n #AF = read_input(input_file)\n AF = ctx.invoke(readinput,input_file=input_file)\n filtered_variants = []\n for variant in VCF(reference_file):\n \"\"\" skipping transctipt_ids \"\"\"\n gene_symbol = re.sub(r'(.*)(_ENST.*)',r'\\1', variant.INFO.get('GENE'))\n vcf_id = variant.ID + ':' + gene_symbol + ':' + variant.INFO.get('AA')\n vcf_id_value = AF.get(vcf_id)\n if vcf_id_value:\n #click.echo('key is:' + str(vcf_id) +';' +'value is:'+ str(vcf_id_value))\n af, VARIANT_TYPE, AA_HGVS = vcf_id_value.split(';')\n variant_info = str(variant).split('\\t')\n \"\"\" Ensembl transcript IDs and CNTs values are not required; remove it\"\"\"\n \"\"\" Convert af values to fraction and join all relevant information \"\"\"\n variant_info = [re.sub(r'(.*)(_ENST\\d+;)',r'\\1;', i) for i in variant_info]\n variant_info = [re.sub(r'(.*)(;CNT=\\d+\\n)',r'\\1', i) for i in variant_info]\n add_info = \";\".join([\"VARIANT_TYPE=\"+ str(VARIANT_TYPE), \"AA_HGVS=\"+str(AA_HGVS),\"AF=\"+str(round(float(af)/100,5))])\n info = \"\\t\".join(variant_info) + ';' + add_info\n if info not in filtered_variants:\n \t#click.echo(info)\n filtered_variants.append(info)\n vcf_op.write(info + '\\n')\n click.echo(\"vcf file created\")\n end_time = datetime.now()\n click.echo(\"Runtime:\" + '{}'.format(end_time - start_time))\n return filtered_variants", "title": "" }, { "docid": "13ad548913a223c187734cc89f235882", "score": "0.51289076", "text": "def genotype(\n input_bam, reference, input_vcf,\n output_vcf, output_csv, tempdir,\n cell_id, docker_image=None\n):\n helpers.makedirs(tempdir)\n\n cmd = ['svtyper-sso',\n '--input_vcf', input_vcf,\n '--bam', input_bam,\n '--ref_fasta', reference,\n '-o', output_vcf]\n\n pypeliner.commandline.execute(*cmd, docker_image=docker_image)\n\n base_data = parse_vcf(output_vcf, None, return_pandas=True)\n\n svtype_annotations = extract_svtyper_info(base_data)\n\n base_data = base_data.iloc[:, :-2] # assumes svtyper info in last 2 cols\n\n output = pd.concat([base_data, svtype_annotations], axis=1)\n\n output['cell_id'] = cell_id\n\n csvutils.write_dataframe_to_csv_and_yaml(output, output_csv, write_header=True)", "title": "" }, { "docid": "f06cdd9534e5e2105f8529adc2774a89", "score": "0.5105667", "text": "def _gen_master_psf(self):\n\n ############################################################\n # !!!!!!!!! USER DEFINED CUBE NOT FULLY TESTED !!!!!!!!!!! #\n # !!!!!!! Analytic still using Airy (x) Gaussian !!!!!!!!! #\n # !!!!!!!!!!!!!!! Implement Moffat PSF !!!!!!!!!!!!!!!!!!! #\n ############################################################\n\n self.psf_size = self.cmds[\"SIM_PSF_SIZE\"]\n\n # Make a PSF for the main mirror. If there is one on file, read it in\n # otherwise generate an Airy+Gaussian (or Moffat, Oliver?)\n\n if self.cmds[\"SCOPE_PSF_FILE\"] is None:\n warnings.warn(\"\"\"\n SCOPE_PSF_FILE == None.\n Generating Moffat profile from with FWHM = OBS_SEEING\"\"\")\n logging.debug(\"No PSF Given: making Seeing PSF\")\n\n hdulist = fits.HDUList()\n for lam in self.cmds.lam_bin_centers:\n\n psf_mo = psf.seeing_psf(fwhm=self.cmds[\"OBS_SEEING\"],\n size=self.cmds[\"SIM_PSF_SIZE\"],\n pix_res=self.cmds[\"SIM_DETECTOR_PIX_SCALE\"],\n psf_type=\"moffat\", filename=None)\n\n psf_mo[0].header[\"WAVELENG\"] = lam\n hdulist.append(psf_mo[0])\n\n psf_m1 = psf.UserPSFCube(hdulist, self.lam_bin_centers)\n\n\n elif isinstance(self.cmds[\"SCOPE_PSF_FILE\"], psf.PSFCube):\n psf_m1 = self.cmds[\"SCOPE_PSF_FILE\"]\n #logging.debug(\"Using PSF: \" + self.cmds[\"SCOPE_PSF_FILE\"])\n\n elif isinstance(self.cmds[\"SCOPE_PSF_FILE\"], str):\n if self.cmds.verbose:\n print(\"Using PSF:\", self.cmds[\"SCOPE_PSF_FILE\"])\n\n if os.path.exists(self.cmds[\"SCOPE_PSF_FILE\"]):\n #logging.debug(\"Using PSF: \" + self.cmds[\"SCOPE_PSF_FILE\"])\n\n hdr = fits.getheader(self.cmds[\"SCOPE_PSF_FILE\"], 0)\n if \"ETYPE\" in hdr and hdr[\"ETYPE\"] == \"FVPSF\":\n fname = self.cmds[\"SCOPE_PSF_FILE\"]\n psf_m1 = psf.FieldVaryingPSF(filename=fname,\n **self.cmds.cmds)\n\n else:\n psf_m1 = psf.UserPSFCube(self.cmds[\"SCOPE_PSF_FILE\"],\n self.lam_bin_centers)\n\n if psf_m1[0].pix_res != self.pix_res:\n psf_m1.resample(self.pix_res)\n else:\n warnings.warn(\"\"\"\n Couldn't resolve SCOPE_PSF_FILE.\n Returning an Delta function for SCOPE_PSF_FILE\"\"\")\n\n psf_m1 = psf.DeltaPSFCube(self.lam_bin_centers,\n pix_res=self.pix_res,\n size=9)\n logging.debug(\"Couldn't resolve given PSF: making Delta PSF\")\n\n return psf_m1", "title": "" }, { "docid": "462004899a036f7e972e91b903a4de96", "score": "0.51041055", "text": "def batch_run_sample(bam_fname, target_bed, antitarget_bed, ref_fname,\n output_dir, male_reference=False, scatter=False,\n diagram=False, rlibpath=None, by_count=False):\n # ENH - return probes, segments (cnarr, segarr)\n echo(\"Running the CNVkit pipeline on\", bam_fname, \"...\")\n sample_id = core.fbase(bam_fname)\n sample_pfx = os.path.join(output_dir, sample_id)\n\n raw_tgt = do_coverage(target_bed, bam_fname, by_count)\n raw_tgt.write(sample_pfx + '.targetcoverage.cnn')\n\n raw_anti = do_coverage(antitarget_bed, bam_fname, by_count)\n raw_anti.write(sample_pfx + '.antitargetcoverage.cnn')\n\n cnarr = do_fix(raw_tgt, raw_anti, CNA.read(ref_fname))\n cnarr.write(sample_pfx + '.cnr')\n\n echo(\"Segmenting\", sample_pfx + '.cnr ...')\n segments = segmentation.do_segmentation(sample_pfx + '.cnr', False, 'cbs',\n rlibpath)\n segments.write(sample_pfx + '.cns')\n\n if scatter:\n do_scatter(cnarr, segments)\n pyplot.savefig(sample_pfx + '-scatter.pdf', format='pdf',\n bbox_inches=\"tight\")\n echo(\"Wrote\", sample_pfx + '-scatter.pdf')\n\n if diagram:\n from cnvlib import diagram\n outfname = sample_pfx + '-diagram.pdf'\n diagram.create_diagram(cnarr, segments, 0.6, outfname, male_reference)\n echo(\"Wrote\", outfname)", "title": "" }, { "docid": "58e771ac5adfbf734081ed8bdc27548d", "score": "0.51030767", "text": "def add_call_to_variant(variant, predictions, qual_filter=0, sample_name=None):\n call = variant_utils.only_call(variant)\n n_alleles = len(variant.alternate_bases) + 1\n index, genotype = most_likely_genotype(predictions, n_alleles=n_alleles)\n gq, variant.quality = compute_quals(predictions, index)\n call.call_set_name = sample_name\n variantcall_utils.set_gt(call, genotype)\n variantcall_utils.set_gq(call, gq)\n gls = [genomics_math.perror_to_bounded_log10_perror(gp) for gp in predictions]\n variantcall_utils.set_gl(call, gls)\n uncall_gt_if_no_ad(variant)\n variant.filter[:] = compute_filter_fields(variant, qual_filter)\n uncall_homref_gt_if_lowqual(variant, FLAGS.cnn_homref_call_min_gq)\n return variant", "title": "" }, { "docid": "a4d3b5b5680f6edd94c5b80a10bb5580", "score": "0.5100759", "text": "def do_stdpractice_gst(dataFilenameOrSet,targetGateFilenameOrSet,\n prepStrsListOrFilename, effectStrsListOrFilename,\n germsListOrFilename, maxLengths, modes=\"TP,CPTP,Target\",\n gaugeOptSuite=('single','unreliable2Q'),\n gaugeOptTarget=None, modelsToTest=None, comm=None, memLimit=None,\n advancedOptions=None, output_pkl=None, verbosity=2):\n printer = _objs.VerbosityPrinter.build_printer(verbosity, comm)\n if modelsToTest is None: modelsToTest = {}\n \n #Get/load target gateset\n gs_target = _load_gateset(targetGateFilenameOrSet)\n\n #Get/load fiducials and germs\n prepStrs, effectStrs, germs = _load_fiducials_and_germs(\n prepStrsListOrFilename,\n effectStrsListOrFilename,\n germsListOrFilename)\n \n #Get/load dataset\n ds = _load_dataset(dataFilenameOrSet, comm, printer)\n\n ret = None\n modes = modes.split(\",\")\n with printer.progress_logging(1):\n for i,mode in enumerate(modes):\n printer.show_progress(i, len(modes), prefix='-- Std Practice: ', suffix=' (%s) --' % mode)\n\n #prepare advanced options dictionary\n if advancedOptions is not None:\n advanced = advancedOptions.get('all',{})\n advanced.update( advancedOptions.get(mode,{}) )\n else: advanced = {}\n\n if mode == \"Target\":\n est_label = mode\n tgt = gs_target.copy() #no parameterization change\n tgt.default_gauge_group = _objs.TrivialGaugeGroup(tgt.dim) #so no gauge opt is done\n advanced.update( {'appendTo': ret, 'estimateLabel': est_label,\n 'onBadFit': []} )\n ret = do_model_test(gs_target, ds, tgt, prepStrs, \n effectStrs, germs, maxLengths, False, advanced,\n comm, memLimit, None, printer-1)\n \n elif mode in ('full','TP','CPTP','H+S','S','static'): # mode is a parameterization \n est_label = parameterization = mode #for now, 1-1 correspondence\n tgt = gs_target.copy(); tgt.set_all_parameterizations(parameterization)\n advanced.update( {'appendTo': ret, 'estimateLabel': est_label } )\n ret = do_long_sequence_gst(ds, tgt, prepStrs, effectStrs, germs,\n maxLengths, False, advanced, comm, memLimit,\n None, printer-1)\n elif mode in modelsToTest:\n est_label = mode\n tgt = gs_target.copy() #no parameterization change\n tgt.default_gauge_group = _objs.TrivialGaugeGroup(tgt.dim) #so no gauge opt is done\n advanced.update( {'appendTo': ret, 'estimateLabel': est_label } )\n ret = do_model_test(modelsToTest[mode], ds, tgt, prepStrs,\n effectStrs, germs, maxLengths, False, advanced,\n comm, memLimit, None, printer-1)\n else:\n raise ValueError(\"Invalid item in 'modes' argument: %s\" % mode)\n\n #Get gauge optimization dictionary\n assert(not printer.is_recording()); printer.start_recording()\n gaugeOptSuite_dict = gaugeopt_suite_to_dictionary(gaugeOptSuite, tgt,\n advancedOptions, printer-1)\n\n if gaugeOptTarget is not None:\n assert(isinstance(gaugeOptTarget,_objs.GateSet)),\"`gaugeOptTarget` must be None or a GateSet\"\n for goparams in gaugeOptSuite_dict.values():\n goparams_list = [goparams] if hasattr(goparams,'keys') else goparams\n for goparams_dict in goparams_list:\n if 'targetGateset' in goparams_dict:\n _warnings.warn((\"`gaugeOptTarget` argument is overriding\"\n \"user-defined targetGateset in gauge opt\"\n \"param dict(s)\"))\n goparams_dict.update( {'targetGateset': gaugeOptTarget } )\n \n #Gauge optimize to list of gauge optimization parameters\n for goLabel,goparams in gaugeOptSuite_dict.items():\n \n printer.log(\"-- Performing '%s' gauge optimization on %s estimate --\" % (goLabel,est_label),2)\n gsStart = ret.estimates[est_label].get_start_gateset(goparams)\n ret.estimates[est_label].add_gaugeoptimized(goparams, None, goLabel, comm, printer-3)\n\n #Gauge optimize data-scaled estimate also\n for suffix in ROBUST_SUFFIX_LIST:\n if est_label + suffix in ret.estimates:\n gsStart_robust = ret.estimates[est_label+suffix].get_start_gateset(goparams)\n if gsStart_robust.frobeniusdist(gsStart) < 1e-8:\n printer.log(\"-- Conveying '%s' gauge optimization to %s estimate --\" % (goLabel,est_label+suffix),2)\n params = ret.estimates[est_label].goparameters[goLabel] #no need to copy here\n gsopt = ret.estimates[est_label].gatesets[goLabel].copy()\n ret.estimates[est_label + suffix].add_gaugeoptimized(params, gsopt, goLabel, comm, printer-3)\n else:\n printer.log(\"-- Performing '%s' gauge optimization on %s estimate --\" % (goLabel,est_label+suffix),2)\n ret.estimates[est_label + suffix].add_gaugeoptimized(goparams, None, goLabel, comm, printer-3)\n \n # Add gauge optimizations to end of any existing \"stdout\" meta info\n if 'stdout' in ret.estimates[est_label].meta:\n ret.estimates[est_label].meta['stdout'].extend(printer.stop_recording())\n else:\n ret.estimates[est_label].meta['stdout'] = printer.stop_recording()\n\n #Write results to a pickle file if desired\n if output_pkl and (comm is None or comm.Get_rank() == 0):\n if _compat.isstr(output_pkl):\n with open(output_pkl, 'wb') as pklfile:\n _pickle.dump(ret, pklfile)\n else:\n _pickle.dump(ret, output_pkl)\n\n return ret", "title": "" }, { "docid": "55aa8bf74b670f46c79ea481d5355846", "score": "0.50940514", "text": "def gwas(batch, vcf, phenotypes):\n cores = 2\n g = batch.new_job(name='run-gwas')\n g.image('us-docker.pkg.dev/<MY_PROJECT>/1kg-gwas:latest')\n g.cpu(cores)\n g.declare_resource_group(ofile={\n 'bed': '{root}.bed',\n 'bim': '{root}.bim',\n 'fam': '{root}.fam',\n 'assoc': '{root}.assoc'\n })\n g.command(f'''\npython3 /run_gwas.py \\\n --vcf {vcf} \\\n --phenotypes {phenotypes} \\\n --output-file {g.ofile} \\\n --cores {cores}\n''')\n return g", "title": "" }, { "docid": "6bdfaba5f38de280d4c2ee70dce7c405", "score": "0.50940377", "text": "def post_action_for_cpp ( ) :\n\n from Gaudi.Configuration import log\n from Configurables import ApplicationMgr\n app = ApplicationMgr( OutputLevel = 3 )\n app.EvtMax = 0\n app.EvtSel = 'NONE'\n\n\n from Configurables import LHCb__ParticlePropertySvc as PPSvc\n from Configurables import DetDataSvc\n from Configurables import LoKiSvc\n\n #\n ## some reshuffling of order of services is needed\n # in particular DOD should come after PPSVC, LoKiSvc and ToolSvc\n #\n\n services = app.ExtSvc\n app.ExtSvc = [ DetDataSvc('DetectorDataSvc'), PPSvc() ,\n LoKiSvc(Welcome=False) ] + services\n\n #\n ## suppress some prints\n #\n from Configurables import TimingAuditor\n timer = TimingAuditor()\n from Configurables import SequencerTimerTool\n timer.addTool ( SequencerTimerTool , 'TIMER' )\n timer.TIMER.OutputLevel = 5\n\n # suppress printout of various summaries from algorithms.\n from Gaudi.Configuration import allConfigurables\n for conf in allConfigurables.itervalues():\n for opt in ('StatPrint', 'ErrorsPrint', 'HistoPrint'):\n if opt in conf.__slots__:\n setattr(conf, opt, False)\n\n # ensure that prints from the main tools/factories are not suppressed\n import Configurables\n from Configurables import LoKi__Hybrid__CoreFactory as CoreFactory\n from Configurables import LoKi__Hybrid__Tool as HybridFactory\n from Configurables import LoKi__Hybrid__HltFactory as HltFactory\n for Factory, names in {HltFactory : (\"HltFactory\",\n \"Hlt1HltFactory\",\n \"Hlt2HltFactory\"),\n CoreFactory : (\"Hlt1CoreFactory\",\n \"Hlt2CoreFactory\",\n \"Hlt1Factory\"),\n HybridFactory : (\"Hlt1HybridFactory\", \"Hlt2HybridFactory\")}.iteritems():\n for name in names:\n f = Factory(name)\n f.OutputLevel = 2\n f.StatPrint = True", "title": "" }, { "docid": "ea38b0c86e669658eff78ba273e66fe1", "score": "0.50843084", "text": "def run(items):\n assert len(items) == 1, \"Expect one input to MetaSV ensemble calling\"\n data = items[0]\n work_dir = _sv_workdir(data)\n out_file = os.path.join(work_dir, \"variants.vcf.gz\")\n cmd = _get_cmd() + [\"--sample\", dd.get_sample_name(data), \"--reference\", dd.get_ref_file(data),\n \"--bam\", dd.get_align_bam(data), \"--outdir\", work_dir]\n methods = []\n for call in data.get(\"sv\", []):\n if call[\"variantcaller\"] in SUPPORTED and call[\"variantcaller\"] not in methods:\n methods.append(call[\"variantcaller\"])\n cmd += [\"--%s_vcf\" % call[\"variantcaller\"], call.get(\"vcf_file\", call[\"vrn_file\"])]\n if len(methods) >= MIN_CALLERS:\n if not utils.file_exists(out_file):\n tx_work_dir = utils.safe_makedir(os.path.join(work_dir, \"raw\"))\n ins_stats = shared.calc_paired_insert_stats_save(dd.get_align_bam(data),\n os.path.join(tx_work_dir, \"insert-stats.yaml\"))\n cmd += [\"--workdir\", tx_work_dir, \"--num_threads\", str(dd.get_num_cores(data))]\n cmd += [\"--spades\", utils.which(\"spades.py\"), \"--age\", utils.which(\"age_align\")]\n cmd += [\"--assembly_max_tools=1\", \"--assembly_pad=500\"]\n cmd += [\"--boost_sc\", \"--isize_mean\", ins_stats[\"mean\"], \"--isize_sd\", ins_stats[\"std\"]]\n do.run(cmd, \"Combine variant calls with MetaSV\")\n filters = (\"(NUM_SVTOOLS = 1 && ABS(SVLEN)>50000) || \"\n \"(NUM_SVTOOLS = 1 && ABS(SVLEN)<4000 && BA_FLANK_PERCENT>80) || \"\n \"(NUM_SVTOOLS = 1 && ABS(SVLEN)<4000 && BA_NUM_GOOD_REC=0) || \"\n \"(ABS(SVLEN)<4000 && BA_NUM_GOOD_REC>2)\")\n filter_file = vfilter.cutoff_w_expression(out_file, filters,\n data, name=\"ReassemblyStats\", limit_regions=None)\n effects_vcf, _ = effects.add_to_vcf(filter_file, data, \"snpeff\")\n data[\"sv\"].append({\"variantcaller\": \"metasv\",\n \"vrn_file\": effects_vcf or filter_file})\n return [data]", "title": "" }, { "docid": "25c85fd351c9ca7dfa4a2ad0f9bda0ca", "score": "0.5080464", "text": "def batch_run_sample(\n bam_fname,\n target_bed,\n antitarget_bed,\n ref_fname,\n output_dir,\n is_haploid_x_reference,\n diploid_parx_genome,\n plot_scatter,\n plot_diagram,\n rscript_path,\n by_count,\n skip_low,\n seq_method,\n segment_method,\n processes,\n do_cluster,\n fasta=None,\n):\n # ENH - return probes, segments (cnarr, segarr)\n logging.info(\"Running the CNVkit pipeline on %s ...\", bam_fname)\n sample_id = core.fbase(bam_fname)\n sample_pfx = os.path.join(output_dir, sample_id)\n\n raw_tgt = coverage.do_coverage(target_bed, bam_fname, by_count, 0, processes, fasta)\n tabio.write(raw_tgt, sample_pfx + \".targetcoverage.cnn\")\n\n raw_anti = coverage.do_coverage(\n antitarget_bed, bam_fname, by_count, 0, processes, fasta\n )\n tabio.write(raw_anti, sample_pfx + \".antitargetcoverage.cnn\")\n\n cnarr = fix.do_fix(\n raw_tgt,\n raw_anti,\n read_cna(ref_fname),\n diploid_parx_genome,\n do_gc=True,\n do_edge=(seq_method == \"hybrid\"),\n do_rmask=True,\n do_cluster=do_cluster,\n )\n tabio.write(cnarr, sample_pfx + \".cnr\")\n\n logging.info(\"Segmenting %s.cnr ...\", sample_pfx)\n segments = segmentation.do_segmentation(\n cnarr,\n segment_method,\n diploid_parx_genome,\n rscript_path=rscript_path,\n skip_low=skip_low,\n processes=processes,\n **({\"threshold\": 1e-6} if seq_method == \"wgs\" else {}),\n )\n\n logging.info(\"Post-processing %s.cns ...\", sample_pfx)\n # TODO/ENH take centering shift & apply to .cnr for use in segmetrics\n seg_metrics = segmetrics.do_segmetrics(\n cnarr,\n segments,\n interval_stats=[\"ci\"],\n alpha=0.5,\n smoothed=True,\n skip_low=skip_low,\n )\n tabio.write(seg_metrics, sample_pfx + \".cns\")\n\n # Remove likely false-positive breakpoints\n seg_call = call.do_call(seg_metrics, method=\"none\", filters=[\"ci\"])\n # Calculate another segment-level test p-value\n seg_alltest = segmetrics.do_segmetrics(\n cnarr, seg_call, location_stats=[\"p_ttest\"], skip_low=skip_low\n )\n # Finally, assign absolute copy number values to each segment\n seg_alltest.center_all(\"median\", diploid_parx_genome=diploid_parx_genome)\n seg_final = call.do_call(seg_alltest, method=\"threshold\")\n tabio.write(seg_final, sample_pfx + \".call.cns\")\n\n # Test for single-bin CNVs separately\n seg_bintest = bintest.do_bintest(cnarr, seg_call, target_only=True)\n tabio.write(seg_bintest, sample_pfx + \".bintest.cns\")\n\n if plot_scatter:\n scatter.do_scatter(cnarr, seg_final)\n pyplot.savefig(sample_pfx + \"-scatter.png\", format=\"png\", bbox_inches=\"tight\")\n logging.info(\"Wrote %s-scatter.png\", sample_pfx)\n\n if plot_diagram:\n is_xx = cnarr.guess_xx(is_haploid_x_reference, diploid_parx_genome)\n outfname = sample_pfx + \"-diagram.pdf\"\n diagram.create_diagram(\n cnarr.shift_xx(is_haploid_x_reference, is_xx, diploid_parx_genome),\n seg_final.shift_xx(is_haploid_x_reference, is_xx, diploid_parx_genome),\n 0.5,\n 3,\n outfname,\n )\n logging.info(\"Wrote %s\", outfname)", "title": "" }, { "docid": "27f155338d2f4d5508430902eb8db9ab", "score": "0.5079436", "text": "def batch_run_sample(bam_fname, target_bed, antitarget_bed, ref_fname,\n output_dir, male_reference=False, scatter=False,\n diagram=False, rlibpath=None, by_count=False):\n # ENH - return probes, segments (cnarr, segarr)\n echo(\"Running the CNVkit pipeline on\", bam_fname, \"...\")\n sample_id = core.fbase(bam_fname)\n sample_pfx = os.path.join(output_dir, sample_id)\n\n raw_tgt = do_coverage(target_bed, bam_fname, by_count)\n raw_tgt.write(sample_pfx + '.targetcoverage.cnn')\n\n raw_anti = do_coverage(antitarget_bed, bam_fname, by_count)\n raw_anti.write(sample_pfx + '.antitargetcoverage.cnn')\n\n cnarr = do_fix(raw_tgt, raw_anti, _CNA.read(ref_fname))\n cnarr.write(sample_pfx + '.cnr')\n\n echo(\"Segmenting\", sample_pfx + '.cnr ...')\n segments = segmentation.do_segmentation(sample_pfx + '.cnr', 'cbs',\n rlibpath=rlibpath)\n segments.write(sample_pfx + '.cns')\n\n if scatter:\n do_scatter(cnarr, segments)\n pyplot.savefig(sample_pfx + '-scatter.pdf', format='pdf',\n bbox_inches=\"tight\")\n echo(\"Wrote\", sample_pfx + '-scatter.pdf')\n\n if diagram:\n from cnvlib import diagram\n outfname = sample_pfx + '-diagram.pdf'\n diagram.create_diagram(cnarr, segments, 0.5, 3, outfname,\n male_reference)\n echo(\"Wrote\", outfname)", "title": "" }, { "docid": "86ea09fef38688b7721c511f4ac8f4c9", "score": "0.50791883", "text": "def init_genotypes(self):\n atom = tables.Int8Atom()\n self.genotype = self.h5_file.createCArray(self.h5_file.root, 'genotype', atom,\n (self.num_probes, self.num_individuals),\n title='Genotype', filters=self.pytable_filters)", "title": "" }, { "docid": "b622762a835071a14263b0fb1087994a", "score": "0.50739306", "text": "def Gen(self, *args):\n return _snap.TSFltVV_Gen(self, *args)", "title": "" }, { "docid": "369760f41943e67c7002d8033e96958a", "score": "0.5065871", "text": "def process_vcf(self, inputfile):\n # initialize reference genome\n fasta_reader = Fasta(self.args.fastafile, read_ahead=1000000)\n\n # initialize vcf reader\n if self.args.samplefile:\n keep_samples = parseSampleFile(self.args.samplefile)\n\n vcf_reader = VCF(\n inputfile,\n mode='rb',\n gts012=True,\n lazy=True,\n samples=keep_samples)\n else:\n vcf_reader = VCF(inputfile, mode='rb', gts012=True, lazy=True)\n\n nbp = (self.args.length - 1) // 2\n\n # index samples\n if (self.args.samplefile and self.args.groupvar):\n all_samples = vcf_reader.samples\n\n sg_dict = indexGroups(self.args.samplefile, self.args.groupvar)\n samples = sorted(list(set(sg_dict.values())))\n\n # get boolean vector of samples that are in sample file\n samples_keep_match = np.isin(all_samples, list(sg_dict.keys()))\n\n # get indices of matching samples\n samples_keep_idx = np.where(samples_keep_match)\n\n # get list of individual sample ids to keep\n samples_keep = sorted(list(set(sg_dict.keys())))\n\n util_log.debug(\"%s samples will be pooled into %s groups: %s\",\n len(all_samples), len(samples), \",\".join(samples))\n else:\n samples = vcf_reader.samples\n\n samples_dict = {}\n for i, sample in enumerate(samples):\n samples_dict[sample] = i\n\n # Query records in VCF and build matrix\n M = np.zeros((len(samples), len(self.subtypes_dict)))\n numsites_keep = 0\n numsites_skip = 0\n chrseq = '0'\n chr_check = \"none\"\n\n for record in vcf_reader:\n\n # Filter by SNP status, # alt alleles, and FILTER column\n if (not record.is_snp or len(record.ALT) != 1\n or record.FILTER is not None):\n numsites_skip += 1\n continue\n\n # Filter by allele count\n if record.INFO['AC'] > self.args.maxac > 0:\n numsites_skip += 1\n continue\n\n row_chr = record.CHROM\n\n # check chromosome formatting matches between MAF and fasta files\n if numsites_keep == 0:\n if \"chr1\" in fasta_reader and \"chr\" not in row_chr:\n chr_check = \"add\"\n util_log.debug(\n \"formatting mismatch: 'chr' only in fasta file\")\n elif \"chr1\" not in fasta_reader and \"chr\" in row_chr:\n chr_check = \"delete\"\n util_log.debug(\n \"formatting mismatch: 'chr' only in MAF file\")\n else:\n util_log.debug(\"chromosome formatting matches\")\n\n if chr_check == \"add\":\n row_chr = \"chr\" + row_chr\n elif chr_check == \"delete\":\n row_chr = row_chr.replace('chr', '')\n\n if row_chr != chrseq:\n sequence = fasta_reader[row_chr]\n chrseq = row_chr\n\n # check and update chromosome sequence\n # if record.CHROM != chrseq:\n # sequence = fasta_reader[record.CHROM]\n # chrseq = record.CHROM\n\n lseq = sequence[record.POS - (nbp + 1):record.POS + nbp].seq\n\n mu_type = record.REF + str(record.ALT[0])\n category = getCategory(mu_type)\n motif_a = getMotif(lseq)\n subtype = str(category + \".\" + motif_a)\n\n if subtype not in self.subtypes_dict:\n numsites_skip += 1\n continue\n\n st = self.subtypes_dict[subtype]\n\n # currently only works with singletons--\n if (self.args.samplefile and self.args.groupvar):\n\n gt_new = record.gt_types\n\n if (self.args.impute and 3 in gt_new):\n gt_complete = gt_new[gt_new != 3]\n freq = sum(gt_complete) / len(gt_complete)\n gt_new[gt_new == 3] = freq\n\n else:\n gt_new[gt_new == 3] = 0\n\n # if not any(\"/\" in b for b in record.gt_bases):\n if self.args.haploid:\n gt_new = np.divide(gt_new, 2.)\n\n # get array of genotypes only for samples in samplefile\n gt_sub = gt_new[samples_keep_idx]\n\n if gt_sub.sum() == 0:\n numsites_skip += 1\n continue\n\n # initialize dict of group allele counts = 0\n sg_counts = {k: 0 for k in sorted(list(set(sg_dict.values())))}\n\n # initialize dict of allele counts per sample\n d2 = dict(zip(samples_keep, gt_sub))\n\n # iterate per-sample counts and update per-group counts\n for key, value in d2.items():\n sg_counts[sg_dict[key]] += value\n\n # add to matrix\n M[:, st] = M[:, st] + list(sg_counts.values())\n numsites_keep += 1\n\n else:\n gt_new = record.gt_types\n if (self.args.impute and 3 in gt_new):\n gt_complete = gt_new[gt_new != 3]\n freq = sum(gt_complete) / len(gt_complete)\n gt_new[gt_new == 3] = freq\n\n else:\n gt_new[gt_new == 3] = 0\n\n # if not any(\"/\" in b for b in record.gt_bases):\n if self.args.haploid:\n gt_new = np.divide(gt_new, 2.)\n\n M[:, st] = M[:, st] + gt_new\n numsites_keep += 1\n # util_log.debug(gt_new)\n\n if numsites_keep % 100000 != 0:\n continue\n util_log.debug(\"%s : %s sites counted\", inputfile, numsites_keep)\n\n util_log.debug(\"%s : %s sites counted\", inputfile, numsites_keep)\n util_log.debug(\"%s : %s sites skipped\", inputfile, numsites_skip)\n\n out = collections.namedtuple('Out', ['M', 'samples'])(M, samples)\n if self.par:\n out = M\n\n return out", "title": "" }, { "docid": "f4759de3cb2203701157434ad00e9704", "score": "0.50571954", "text": "def annotate_effects(orig_file, snpeff_file, genome_file, config):\n broad_runner = broad.runner_from_config(config)\n out_file = \"%s-annotated%s\" % os.path.splitext(orig_file)\n # Avoid generalization since 2.0.3 is not working\n #snpeff_file = _general_snpeff_version(snpeff_file)\n variant_regions = config[\"algorithm\"].get(\"variant_regions\", None)\n if not file_exists(out_file):\n with file_transaction(out_file) as tx_out_file:\n params = [\"-T\", \"VariantAnnotator\",\n \"-R\", genome_file,\n \"-A\", \"SnpEff\",\n \"--variant\", orig_file,\n \"--snpEffFile\", snpeff_file,\n \"--out\", tx_out_file]\n broad_runner.run_gatk(params)\n if variant_regions:\n params += [\"-L\", variant_regions, \"--interval_set_rule\", \"INTERSECTION\"]\n return out_file", "title": "" }, { "docid": "289db3beaed5bbe11a9e6f6cb182bd54", "score": "0.50569683", "text": "def on_post_sample(self, fit, **kwargs):\n print(\"In OtherDummyPlugin `on_post_sample`.\")\n return fit", "title": "" }, { "docid": "9df1e1e4ae990b74df8222ff843c8d53", "score": "0.5055952", "text": "def format_pindel_vcf(input_vcf: str, output_vcf: str) -> None:\n logger = Logger.get_logger(\"format_pindel_vcf\")\n logger.info(\"Formats Pindel VCFs.\")\n\n # setup\n total = 0\n reader = pysam.VariantFile(input_vcf)\n header = get_header(reader.header)\n mode = get_pysam_outmode(output_vcf)\n writer = pysam.VariantFile(output_vcf, mode=mode, header=header)\n\n # Process\n try:\n for record in reader.fetch():\n total += 1\n\n tgt = record.samples[\"TUMOR\"][\"GT\"]\n flag = tgt == (0, 0)\n if flag:\n record.samples[\"TUMOR\"][\"GT\"] = (0, 1)\n # Info\n new_info = get_info(record, flag)\n\n # New record\n new_record = writer.new_record()\n new_record.contig = record.contig\n new_record.alleles = record.alleles\n new_record.start = record.start\n new_record.stop = record.stop\n new_record.id = record.id\n new_record.qual = record.qual\n\n for f in record.filter:\n new_record.filter.add(f)\n\n for i in new_info:\n new_record.info[i[0]] = i[1]\n\n for i, sample in enumerate(record.samples):\n for k, v in record.samples[sample].items():\n new_record.samples[i][k] = v\n writer.write(new_record)\n\n finally:\n reader.close()\n writer.close()\n\n if mode == \"wz\":\n logger.info(\"Creating tabix index...\")\n tbx = pysam.tabix_index(output_vcf, preset=\"vcf\", force=True)\n\n logger.info(\"Processed {} records.\".format(total))", "title": "" }, { "docid": "56f2f66bb5eb6a7ff14397d4c156060a", "score": "0.50403845", "text": "def main(features_path, output_filepath, type_feature):\n logger = logging.getLogger(__name__)\n logger.info('making final data set from normed data')\n\n feat_extractor = {\n 'maxpeak_argmax': find_maxpeak_argmax,\n 'maxpeak_2d': find_maxpeak_2d,\n 'maxpeak_n_argmax': gen_n_peak(10, \"argmax\"),\n 'maxpeak_n_2d': gen_n_peak(5, \"amp\"),\n 'eng': extract_features,\n 'fit': fit_params,\n 'clean': clean_spec\n }\n\n for type in ['labeled', 'unlabeled']:\n feat_extractor[type_feature](os.path.join(features_path, type), os.path.join(output_filepath, type))", "title": "" }, { "docid": "f9cc2268b407058dc6674616719b881f", "score": "0.5036633", "text": "def prepare_input(GLat: np.ndarray, GLon: np.ndarray, MLat: np.ndarray, MLon: np.ndarray, MLT: np.ndarray, DOY: np.ndarray, \n SYMH: np.ndarray, P107: np.ndarray, Kp: np.ndarray) -> (np.ndarray, np.ndarray):\n # CONFIGURE THE PANDAS DATAFRAME FOR THE F2-PEAK:\n df_F2peak = pd.DataFrame(columns = ['GLat', 'GLon','MLat', 'MLon', 'MLT', 'DOY', 'SYM-H', 'P10.7', 'Kp'],\n data = np.column_stack((GLat, GLon, MLat, MLon, MLT, DOY, SYMH, P107, Kp)))\n # Adding Fourier features:\n df_F2peak_with_FFT = Add_FFT(df_F2peak, ['GLat','MLat'], [360,360], 2)\n df_F2peak_with_FFT = Add_FFT(df_F2peak_with_FFT, ['GLon','MLon','DOY'], [360,360,365], 3)\n df_F2peak_with_FFT = Add_FFT(df_F2peak_with_FFT, ['MLT'], [24], 4)\n # The correct order of columns to run NmF2 and hmF2 models:\n column_names_peak = ['SYM-H', 'P10.7', 'Kp', 'sin_1_GLat', 'cos_1_GLat', 'sin_2_GLat',\n 'cos_2_GLat', 'sin_1_MLat', 'cos_1_MLat', 'sin_2_MLat', 'cos_2_MLat',\n 'sin_1_GLon', 'cos_1_GLon', 'sin_2_GLon', 'cos_2_GLon', 'sin_3_GLon',\n 'cos_3_GLon', 'sin_1_MLon', 'cos_1_MLon', 'sin_2_MLon', 'cos_2_MLon',\n 'sin_3_MLon', 'cos_3_MLon', 'sin_1_DOY', 'cos_1_DOY', 'sin_2_DOY',\n 'cos_2_DOY', 'sin_3_DOY', 'cos_3_DOY', 'sin_1_MLT', 'cos_1_MLT',\n 'sin_2_MLT', 'cos_2_MLT', 'sin_3_MLT', 'cos_3_MLT', 'sin_4_MLT', 'cos_4_MLT']\n\n # CONFIGURE THE PANDAS DATAFRAME FOR THE TOPSIDE:\n df_topside = pd.DataFrame(columns = ['GLat', 'GLon', 'toplat','toplon', 'MLat', 'MLon', 'MLT', 'DOY', 'SYM-H', 'P10.7', 'Kp'],\n data = np.column_stack((GLat, GLon, GLat, GLon, MLat, MLon, MLT, DOY, SYMH, P107, Kp))) # During the model training, toplat and toplon variables from RO files were used due to the fact that COSMIC profiles are not exactly vertical, but during the usual model runs we assume the vertical profiles and therefore toplat=GLat and toplon=GLon\n \n #Adding Fourier features:\n df_topside_with_FFT = Add_FFT(df_topside, ['GLat','MLat'], [360,360], 2)\n df_topside_with_FFT = Add_FFT(df_topside_with_FFT, ['GLon','MLon','DOY'], [360,360,365], 3)\n df_topside_with_FFT = Add_FFT(df_topside_with_FFT, ['MLT'], [24], 4)\n df_topside_with_FFT = Add_FFT(df_topside_with_FFT, ['toplon'], [360], 1)\n # The correct order of columns to run H0 and dHs/dh models:\n column_names_topside=['toplat', 'SYM-H', 'P10.7', 'Kp', 'sin_1_GLat', 'cos_1_GLat',\n 'sin_2_GLat', 'cos_2_GLat', 'sin_1_MLat', 'cos_1_MLat', 'sin_2_MLat',\n 'cos_2_MLat', 'sin_1_GLon', 'cos_1_GLon', 'sin_2_GLon', 'cos_2_GLon',\n 'sin_3_GLon', 'cos_3_GLon', 'sin_1_MLon', 'cos_1_MLon', 'sin_2_MLon',\n 'cos_2_MLon', 'sin_3_MLon', 'cos_3_MLon', 'sin_1_DOY', 'cos_1_DOY',\n 'sin_2_DOY', 'cos_2_DOY', 'sin_3_DOY', 'cos_3_DOY', 'sin_1_MLT',\n 'cos_1_MLT', 'sin_2_MLT', 'cos_2_MLT', 'sin_3_MLT', 'cos_3_MLT', 'sin_4_MLT',\n 'cos_4_MLT', 'sin_1_toplon', 'cos_1_toplon']\n \n return df_F2peak_with_FFT[column_names_peak].to_numpy(), df_topside_with_FFT[column_names_topside].to_numpy()", "title": "" }, { "docid": "4bc48f7092769e8209db1644235a3dbe", "score": "0.503415", "text": "def handle_chromosome_and_variants(\n chr: SeqIO.SeqRecord,\n vcf_file: VCF,\n sample_list: List[str],\n outdir: Path,\n io_mode: Literal[\"single_txt\", \"jsonl\", \"multiple_txt\"] = \"single_txt\",\n):\n samples = \",\".join(sample_list)\n\n if io_mode == \"single_txt\":\n filename = outdir / f\"{chr.id}.{samples}.documents.txt\"\n with filename.open(mode=\"w\") as out:\n for document in tqdm(generate_consensus_documents(chr, vcf_file, sample_list), desc=chr.description):\n out.write(document.to_text())\n out.write(\"\\n\")\n elif io_mode == \"jsonl\":\n filename = outdir / f\"{chr.id}.{samples}.documents.jsonl\"\n with filename.open(mode=\"w\") as out:\n for document in tqdm(generate_consensus_documents(chr, vcf_file, sample_list), desc=chr.description):\n out.write(document.to_jsonl())\n out.write(\"\\n\")\n elif io_mode == \"multiple_txt\":\n for idx, document in enumerate(generate_consensus_documents(chr, vcf_file, sample_list)):\n filename = outdir / f\"{chr.id}.{samples}.document_{idx}.txt\"\n with filename.open(mode=\"w\") as out:\n out.write(document.to_text())", "title": "" }, { "docid": "9083d144029673bedc455460d29f8b03", "score": "0.50340605", "text": "def __init__(self,method,ischain=True,isfunc=None,\n thinlen=0.0,burnlen=0.0,\n ndim=None, kmax= 5, \n priorvolume=1,debug=False,\n nsample=None,\n nbatch=1,\n brange=None,\n bscale='',\n verbose=1,args={},\n **gdkwargs):\n #\n self.verbose=verbose\n if debug or verbose>1: logging.basicConfig(level=logging.DEBUG)\n if verbose==0: logging.basicConfig(level=logging.WARNING) \n self.logger = logging.getLogger(__name__)\n \n self.info={}\n #\n self.nbatch=nbatch\n self.brange=brange #todo: check for [N] \n self.bscale=bscale if not isinstance(self.brange,int) else 'constant'\n \n # The arrays of powers and nchain record the number of samples \n # that will be analysed at each iteration. \n #idtrial is just an index\n self.idbatch=np.arange(self.nbatch,dtype=int)\n self.powers = np.zeros(self.nbatch)\n self.bsize = np.zeros(self.nbatch,dtype=int)\n self.nchain = np.zeros(self.nbatch,dtype=int) \n #\n self.kmax=max(2,kmax)\n self.priorvolume=priorvolume\n #\n self.ischain=ischain\n #\n self.fname=None\n #\n if ischain:\n \n if isinstance(method,str):\n self.fname=method \n self.logger.debug('Using chains: ',method)\n else:\n self.logger.debug('dictionary of samples and loglike array passed')\n \n else: #python class which includes a method called sampler\n \n if nsample is None:\n self.nsample=100000\n else:\n self.nsample=nsample\n \n #given a class name, get an instance\n if isinstance(method,str):\n XClass = getattr(sys.modules[__name__], method)\n else:\n XClass=method\n \n if hasattr(XClass, '__class__'):\n self.logger.debug(__name__+': method is an instance of a class')\n self.method=XClass\n else:\n self.logger.debug(__name__+': method is class variable .. instantiating class')\n self.method=XClass(*args) \n #if passed class has some info, display it\n try:\n print()\n msg=self.method.info() \n print()\n except:\n pass \n # Now Generate samples.\n # Output should be dict - {'chains':,'logprob':,'weight':} \n method=self.method.Sampler(nsamples=self.nsamples) \n \n #======== By this line we expect only chains either in file or dict ====\n self.gd = MCSamples(method,debug=verbose>1,**gdkwargs)\n\n if burnlen>0:\n _=self.gd.removeBurn(remove=burnlen)\n if thinlen>0:\n if thinlen<1:\n self.logger.info('calling poisson_thin ..')\n _=self.gd.thin_poisson(thinlen)\n else:\n _=self.gd.thin(nthin=thinlen) \n\n if isfunc:\n #try:\n self.gd.importance_sample(isfunc)\n #except:\n # self.logger.warn('Importance sampling failed. Make sure getdist is installed.')\n \n self.info['NparamsMC']=self.gd.nparamMC\n self.info['Nsamples_read']=self.gd.get_shape()[0]\n self.info['Nparams_read']=self.gd.get_shape()[1]\n #\n\n #after burn-in and thinning\n self.nsample = self.gd.get_shape()[0] \n if ndim is None: ndim=self.gd.nparamMC \n self.ndim=ndim \n #\n self.info['NparamsCosmo']=self.ndim\n self.info['Nsamples']=self.nsample\n #\n #self.info['MaxAutoCorrLen']=np.array([self.gd.samples.getCorrelationLength(j) for j in range(self.ndim)]).max()\n\n #print('***** ndim,nparamMC,MaxAutoCorrLen :',self.ndim,self.nparamMC,self.info['MaxAutoCorrLen'])\n \n #print('init minmax logl',method['lnprob'].min(),method['lnprob'].max()) \n self.logger.info('chain array dimensions: %s x %s ='%(self.nsample,self.ndim))\n \n #\n self.set_batch()", "title": "" }, { "docid": "1a3ce620a376339ccc31b48a0d2ff7c0", "score": "0.5031658", "text": "def gather_snp_info( pops, pop2snpInfoFN, pop2ancFreqFN, pop2sampleSizeFN, getio = None ):\n \n if getio: return dict( depends_on = list(pop2snpInfoFN.values()), creates = ( pop2ancFreqFN, pop2sampleSizeFN ),\n attrs = dict( pop = pops ) )\n\n\n pop2ancFreq = pd.DataFrame( data =\n dict([ ( pop, pd.read_csv( pop2snpInfoFN[ pop ],\n sep = '\\t', usecols = ( 'SNP pos (bases)', 'Ancestral Freq' ),\n index_col = 'SNP pos (bases)' )[ 'Ancestral Freq' ] )\n for pop in pops ]) )\n\n pop2ancFreq.dropna( inplace = True )\n pop2ancFreq.to_csv( pop2ancFreqFN, sep = '\\t', index_label = 'pos' )\n\n def getSampleSize( pop ):\n z = pd.read_csv( pop2snpInfoFN[ pop ],\n sep = '\\t', usecols = ( 'A0count', 'A1count' ), nrows = 1 )\n return z.at[ 0, 'A0count' ] + z.at[ 0, 'A1count' ]\n\n pop2sampleSize = pd.Series( dict([ ( pop, getSampleSize( pop ) ) for pop in pops ]),\n name = 'sampleSize' )\n pop2sampleSize.to_csv( pop2sampleSizeFN, sep = '\\t', header = True, index_label = 'pop' )", "title": "" }, { "docid": "44554d22257048fc9089ad91e08c3f79", "score": "0.5026197", "text": "def _process_genotype_by_class(self, rec, genotype_class, genotype_type, alt):\n if genotype_class == 'SNP':\n position_based_vcf = (rec.rec_ref, alt, self._genotype_indexer(genotype_type))\n self._update_dictionary(rec.rec_pos + self.vcf_offset, position_based_vcf, SNP)\n elif genotype_class == 'DEL':\n list_of_records = self._handle_delete(rec.rec_pos, rec.rec_ref, alt, genotype_type)\n for record in list_of_records:\n pos, ref_seq, alt_seq, genotype = record\n position_based_vcf = (ref_seq, alt_seq, self._genotype_indexer(genotype))\n self._update_dictionary(pos, position_based_vcf, DEL)\n elif genotype_class == 'IN':\n if len(rec.rec_ref) > 1:\n rec.rec_ref, alt = self._trim_insert_sequences(rec.rec_ref, alt)\n\n position_based_vcf = (rec.rec_ref, alt, self._genotype_indexer(genotype_type))\n self._update_dictionary(rec.rec_pos + self.vcf_offset, position_based_vcf, IN)", "title": "" }, { "docid": "052547fac2898affd070210ece13fe0e", "score": "0.5023955", "text": "def phenotype_microarray_spec_sens(ref, df) -> tuple:\n carbapenem_genes = (\"OXA\", \"VIM\", \"NDM\", \"CTX\", \"SHV\", \"KPC\", \"ACT\", \"ADC\", \"CMH\", \"VEB\", \"PAL\")\n samples = tuple(ref[\"Strain_ID\"])\n pheno_tp, pheno_fp, pheno_fn, pheno_tn= 0, 0, 0, 0\n micro_tp= 0\n\n for sample in samples:\n phenotype = ref[ref['Strain_ID'] == sample].iloc[0,12]\n microarray = ref[ref['Strain_ID'] == sample].iloc[0,3].split(\", \")\n subset_df = df[df[\"Sample\"] == sample]\n carba_genes_found = 0\n #check carbapenemase genes detected\n for gene in carbapenem_genes:\n if any(gene in pred for pred in list(subset_df[\"Best_hit\"])):\n carba_genes_found += 1\n if carba_genes_found != 0 and phenotype == \"POS\":\n pheno_tp += 1\n elif carba_genes_found == 0 and phenotype == \"NEG\":\n pheno_tn += 1\n elif carba_genes_found != 0 and phenotype == \"NEG\":\n pheno_fp += 1\n else:\n pheno_fn += 1\n #check if all gene fams detected by micrarray are present\n for gene_fam in microarray:\n if any(gene_fam in pred for pred in list(subset_df[\"Best_hit\"])):\n micro_tp += 1\n\n return (round((pheno_tp/(pheno_tp+pheno_fn))*100), round((pheno_tn/(pheno_tn+pheno_fp))*100), round((micro_tp/33)*100), len(samples), pheno_tp+pheno_fn)", "title": "" }, { "docid": "fce89b0ede10976f544e7e809cdabed9", "score": "0.50163066", "text": "def setupGeneralG():\n\n inputFC = ARCPY.GetParameterAsText(0) \n varName = ARCPY.GetParameterAsText(1).upper() \n displayIt = ARCPY.GetParameter(2) \n\n #### Parse Space Concept ####\n spaceConcept = ARCPY.GetParameterAsText(3).upper().replace(\" \", \"_\")\n if spaceConcept == \"INVERSE_DISTANCE_SQUARED\":\n exponent = 2.0\n else:\n exponent = 1.0\n try:\n spaceConcept = WU.convertConcept[spaceConcept] \n wType = WU.weightDispatch[spaceConcept]\n except:\n ARCPY.AddIDMessage(\"Error\", 723)\n raise SystemExit()\n\n #### EUCLIDEAN or MANHATTAN ####\n distanceConcept = ARCPY.GetParameterAsText(4).upper().replace(\" \", \"_\")\n concept = WU.conceptDispatch[distanceConcept]\n\n #### Row Standardized ####\n rowStandard = ARCPY.GetParameterAsText(5).upper()\n if rowStandard == 'ROW':\n rowStandard = True\n else:\n rowStandard = False\n\n #### Distance Threshold ####\n threshold = UTILS.getNumericParameter(6)\n\n #### Spatial Weights File ####\n weightsFile = UTILS.getTextParameter(7) \n if weightsFile == None and wType == 8:\n ARCPY.AddIDMessage(\"ERROR\", 930)\n raise SystemExit()\n if weightsFile and wType != 8:\n ARCPY.AddIDMessage(\"WARNING\", 925)\n weightsFile = None\n\n #### Create a Spatial Stats Data Object (SSDO) ####\n ssdo = SSDO.SSDataObject(inputFC, useChordal = True)\n\n #### Set Unique ID Field ####\n masterField = UTILS.setUniqueIDField(ssdo, weightsFile = weightsFile)\n\n #### Populate SSDO with Data ####\n if WU.gaTypes[spaceConcept]:\n ssdo.obtainDataGA(masterField, [varName], minNumObs = 3, \n warnNumObs = 30)\n else:\n ssdo.obtainData(masterField, [varName], minNumObs = 3, \n warnNumObs = 30)\n\n #### Run High-Low Clustering ####\n gg = GeneralG(ssdo, varName, wType, weightsFile = weightsFile, \n concept = concept, rowStandard = rowStandard, \n threshold = threshold, exponent = exponent)\n\n #### Report and Set Parameters ####\n ggString, zgString, pvString = gg.report()\n try:\n ARCPY.SetParameterAsText(8, ggString)\n ARCPY.SetParameterAsText(9, zgString)\n ARCPY.SetParameterAsText(10, pvString)\n except:\n ARCPY.AddIDMessage(\"WARNING\", 902)\n\n #### Create HTML Output ####\n if displayIt:\n htmlOutFile = gg.reportHTML(htmlFile = None)\n ARCPY.SetParameterAsText(11, htmlOutFile)", "title": "" }, { "docid": "55c494549f50f7cf9b45079b9059a697", "score": "0.5010238", "text": "def capture_data( self, sampling_time_secs, trigger_type = 6, sample_freq_hz = 80e6, ht40_mode = 0 ):\n #if self.capture_data is None:\n capture_data = self.hwd.LP_VsaDataCapture\n capture_data.restype = ctypes.c_int\n capture_data.argtypes = [ ctypes.c_double, ctypes.c_int, ctypes.c_double, ctypes.c_int, ctypes.c_int ]\n\n rc = capture_data( sampling_time_secs, trigger_type, sample_freq_hz , ht40_mode, 0 )\n if rc != 0:\n #raise Exception( rc, self.get_error_string(rc) )\n pass", "title": "" }, { "docid": "6e32f6516887e1afb634997a2c7e5052", "score": "0.50086665", "text": "def varcalls_to_svtyper_input(input, vcf, tempdir, caller):\n\n helpers.makedirs(tempdir)\n\n csv = VarcallLoader(input, caller)\n num_records = len(csv.data.index)\n\n chrom = csv[\"CHROM\"]\n\n pos = csv[\"POS\"]\n\n ref = [\"N\"] * num_records\n\n qual = [\".\"] * num_records\n\n vcf_filter = [\"Pass\"] * num_records\n\n sv_type = csv[\"TYPE\"]\n\n vcf_id = [None] * num_records\n alt = [None] * num_records\n info = [None] * num_records\n\n for i in range(num_records):\n if sv_type[i] == \"BND\":\n vcf_id[i] = str(i) + \"_1\"\n alt[i] = make_alt(\n csv[\"STRAND\"][i],\n pos[i],\n chrom[i],\n ref[i]\n )\n else:\n vcf_id[i] = str(i)\n alt[i] = \"<\" + str(sv_type[i]) + \">\"\n\n info[i] = expand_info_section(\n sv_type[i], csv, i\n )\n\n new_data = pd.DataFrame(\n {'#CHROM': chrom, 'POS': pos, \"ID\": vcf_id,\n \"REF\": ref, 'ALT': alt, 'QUAL': qual,\n 'FILTER': vcf_filter, 'INFO': info\n },\n columns=[\n '#CHROM', 'POS', 'ID', 'REF',\n 'ALT', 'QUAL', 'FILTER', 'INFO'\n ]\n )\n\n new_data = add_bnd_mates(new_data, csv, num_records, caller)\n\n new_data[\"INFO\"] = [info_tostr(info) for info in new_data[\"INFO\"]]\n\n new_data.to_csv(vcf, sep=\"\\t\", index=False)", "title": "" }, { "docid": "66c02eb810fd5724b41678b676e7ebed", "score": "0.5004976", "text": "def Gen(self, *args):\n return _snap.TFltVVV_Gen(self, *args)", "title": "" }, { "docid": "efc14cf7b2047030b52da216bae415d7", "score": "0.49995863", "text": "def on_post_sample(self, fit, **kwargs):\n print(\"In DummyPlugin `on_post_sample`.\")\n return fit", "title": "" }, { "docid": "e273288d5fb7ff2e2731f245f7d76027", "score": "0.49947253", "text": "def Gen(self, *args):\n return _snap.TChVV_Gen(self, *args)", "title": "" }, { "docid": "8c97a454fa806cd5e6ae1c41f72db0d6", "score": "0.49908382", "text": "def process_variant(\n vcf_file_objs, qc_counts, overall_counts, args, out_files\n):\n high_qual_present = any([\n v.has_curr_variant == 'high_qual' for v in vcf_file_objs\n ])\n files_per_variant = {} # (REF, ALT, GT): list of VCF file objs\n\n if high_qual_present:\n for v in vcf_file_objs:\n if v.has_curr_variant in ['low_qual', 'high_qual']:\n chrom = parse_chromosome(v.curr_rec.CHROM)\n pos = v.curr_rec.POS\n for v in vcf_file_objs:\n if v.has_curr_variant in ['low_qual', 'high_qual']:\n call = v.curr_call\n record = v.curr_rec\n ref = str(record.REF)\n alt = str(record.ALT[0])\n gt = call['GT']\n\n if (ref, alt, gt) in files_per_variant:\n files_per_variant[(ref, alt, gt)].append(v)\n else:\n files_per_variant[(ref, alt, gt)] = [v]\n\n # Output each (REF, ALT, GT) combination at given position\n # on chromosome\n for x in files_per_variant:\n output_ref_alt_gt(\n x, out_files, vcf_file_objs, overall_counts, qc_counts,\n args, files_per_variant\n )\n\n # Update plot data\n if args.create_qc_plots:\n multiple_variants = [\n 'high_qual' in [v.has_curr_variant for v in files_per_variant[x]]\n for x in files_per_variant\n ].count(True) > 1\n\n all_agree = (\n all(\n v.has_curr_variant != 'absent' for v in\n vcf_file_objs\n ) and (not multiple_variants)\n )\n\n for v in vcf_file_objs:\n if v.has_curr_variant not in ['low_qual', 'high_qual']:\n continue\n gt_type = v.curr_call.gt_type\n cat = 'undefined'\n if all_agree:\n if gt_type == 1:\n cat = 'HET all agree'\n elif gt_type == 2:\n cat = 'HOM all agree'\n else:\n if gt_type == 1:\n cat = 'HET disagree'\n elif gt_type == 2:\n cat = 'HOM disagree'\n if cat != 'undefined':\n update_qc_counts(\n qc_counts, v, cat, len([\n v for v in vcf_file_objs if\n v.has_curr_variant in ['low_qual', 'high_qual']\n ])\n )", "title": "" }, { "docid": "882f4dc731d17c4ac2e7b4883eec8678", "score": "0.49895492", "text": "def allsamples(cls):", "title": "" }, { "docid": "fae6f0e4f25e10c6686c13eb21678ead", "score": "0.49852818", "text": "def postcall_annotate(in_file, bam_file, ref_file, vrn_files, config):\n #out_file = _check_file_gatk_merge(in_file)\n out_file = annotation.annotate_nongatk_vcf(in_file, bam_file, vrn_files.dbsnp,\n ref_file, config)\n return out_file", "title": "" }, { "docid": "bbc29e0a116fae0cabf0d51e4d5c904c", "score": "0.49691328", "text": "def test_sample__process(clin_class):\n expected_sampledf = pd.DataFrame(\n dict(\n SAMPLE_ID=[\n \"GENIE-SAGE-ID1-1\",\n \"GENIE-SAGE-ID2-1\",\n \"GENIE-SAGE-ID3-1\",\n \"GENIE-SAGE-ID4-1\",\n \"GENIE-SAGE-ID5-1\",\n ],\n PATIENT_ID=[\n \"GENIE-SAGE-ID1\",\n \"GENIE-SAGE-ID2\",\n \"GENIE-SAGE-ID3\",\n \"GENIE-SAGE-ID4\",\n \"GENIE-SAGE-ID5\",\n ],\n AGE_AT_SEQ_REPORT=[100000, 100000, 100000, 100000, 100000],\n ONCOTREE_CODE=[\"AMPCA\", \"UNKNOWN\", \"AMPCA\", \"AMPCA\", \"AMPCA\"],\n SAMPLE_TYPE=[\"Test\", \"Why\", \"foo\", \"Me\", \"Me\"],\n CENTER=[\"SAGE\", \"SAGE\", \"SAGE\", \"SAGE\", \"SAGE\"],\n SAMPLE_TYPE_DETAILED=[\"non\", \"asdf\", \"asdf\", \"asdff\", \"asdff\"],\n SEQ_ASSAY_ID=[\"SAGE-1\", \"SAGE-1\", \"SAGE-1\", \"SAGE-1\", \"SAGE-1\"],\n SEQ_DATE=[\"Jan-2012\", \"Apr-2013\", \"Jul-2014\", \"Oct-2015\", \"Release\"],\n SEQ_YEAR=[2012, 2013, 2014, 2015, float(\"nan\")],\n )\n )\n sample_cols = [\n \"SAMPLE_ID\",\n \"PATIENT_ID\",\n \"AGE_AT_SEQ_REPORT\",\n \"ONCOTREE_CODE\",\n \"SAMPLE_TYPE\",\n \"SEQ_ASSAY_ID\",\n \"SEQ_DATE\",\n \"SAMPLE_TYPE_DETAILED\",\n \"SEQ_YEAR\",\n ]\n\n clinical_template = pd.DataFrame(columns=sample_cols)\n # patient = False\n sampledf = pd.DataFrame(\n dict(\n SAMPLE_ID=[\n \"GENIE-SAGE-ID1-1\",\n \"GENIE-SAGE-ID2-1\",\n \"GENIE-SAGE-ID3-1\",\n \"GENIE-SAGE-ID4-1\",\n \"GENIE-SAGE-ID5-1\",\n ],\n PATIENT_ID=[\n \"GENIE-SAGE-ID1\",\n \"GENIE-SAGE-ID2\",\n \"GENIE-SAGE-ID3\",\n \"GENIE-SAGE-ID4\",\n \"GENIE-SAGE-ID5\",\n ],\n Age_AT_SEQ_REPORT=[100000, 100000, 100000, 100000, 100000],\n ONCOTree_CODE=[\"AMPCA\", \" UNKNOWN\", \"AMPCA\", \"AMPCA\", \"AMPCA\"],\n SAMPLE_TYPE=[1, 2, 3, 4, 4],\n SEQ_ASSAY_ID=[\"SAGE-1\", \"SAGE-1\", \"SAGE-1\", \"SAGE-1\", \"SAGE-1\"],\n SEQ_DATE=[\"Jan-2012\", \"Apr-2013\", \"JUL-2014\", \"Oct-2015\", \"release\"],\n )\n )\n\n new_sampledf = clin_class._process(sampledf, clinical_template)\n assert new_sampledf.columns.isin(expected_sampledf.columns).all()\n assert expected_sampledf.equals(new_sampledf[expected_sampledf.columns])", "title": "" }, { "docid": "98834c9316aa4014eab0ea0b16859b5e", "score": "0.4967973", "text": "def lotus_mixed_model_gwas(phenotype_id=4, phen_file = '/home/bjarni/LotusGenome/cks/Lotus31012019/20181113_136LjAccessionData.csv', \n gt_file = '/home/bjarni/LotusGenome/cks/Lotus31012019/all_chromosomes_binary.csv', \n pvalue_file='mm_results.pvals', manhattan_plot_file='mm_manhattan.png', qq_plot_file_prefix='mm_qq'):\n import linear_models as lm\n import kinship\n import gwaResults as gr\n import dataParsers as dp\n # Load genotypes\n sd = dp.parse_snp_data(gt_file)\n\n # Load phenotypes\n import phenotypeData as pd\n phend = pd.parse_phenotype_file(phen_file, with_db_ids=False)\n \n # Coordinate phenotype of interest and genotypes. This filters the genotypes and \n # phenotypes, leaving only accessions (individuals) which overlap between both, \n # and SNPs that are polymorphic in the resulting subset.\n sd.coordinate_w_phenotype_data(phend, phenotype_id)\n\n # Calculate kinship (IBS)\n K = kinship.calc_ibs_kinship(sd.get_snps())\n\n # Perform mixed model GWAS\n mm_results = lm.emmax(sd.get_snps(), phend.get_values(phenotype_id), K)\n\n # Construct a results object\n res = gr.Result(scores=mm_results['ps'], snps_data=sd)\n\n # Save p-values to file\n res.write_to_file(pvalue_file)\n\n # Plot Manhattan plot\n res.plot_manhattan(png_file=manhattan_plot_file, percentile=90, plot_bonferroni=True,\n neg_log_transform=True)\n # Plot a QQ-plot\n res.plot_qq(qq_plot_file_prefix)", "title": "" }, { "docid": "4da15fe05eb48e993958bdf74beda41c", "score": "0.49638417", "text": "def update_vcf_variant_dict(variant_dict):\n\n ref, alt = variant_dict['REF'], variant_dict['ALT']\n\n for i, d in variant_dict['sample'].items():\n\n if 'GT' in d:\n d['genotype'] = get_vcf_genotype(ref, alt, d['GT'])", "title": "" }, { "docid": "77873b21f8e3118f7f96c4e171beda63", "score": "0.4958868", "text": "def predict_genes(self):\n # Filter contigs by coverage\n contig_coverage_cutoff = 3.0\n\n prodigal_infile = os.path.join(self.assembly_dir, 'all_contigs.fa')\n with open(prodigal_infile, 'w') as outfile:\n for function in sorted(self.assembly.contigs.keys()):\n for contig in sorted(self.assembly.contigs[function].keys()):\n if self.assembly.contigs[function][\n contig\n ].get_coverage() >= contig_coverage_cutoff:\n outfile.write('>' + function + '|' + contig + '\\n')\n outfile.write(self.assembly.contigs[function][contig].sequence + '\\n')\n\n # Run Prodigal\n prodigal_outfile = os.path.join(self.assembly_dir, 'all_contigs.prodigal.out.faa')\n run_prodigal(prodigal_infile, prodigal_outfile, self.project.config.prodigal_path)\n\n with open(prodigal_outfile, 'r') as infile:\n current_id = None\n sequence = ''\n for line in infile:\n line = line.rstrip('\\n\\r')\n if line.startswith('>'):\n if current_id:\n line_tokens = current_id.split(' # ')\n function_id, contig_id, _ = parse_gene_id(line_tokens[0])\n gene = Gene(contig_id=contig_id,\n gene_id=line_tokens[0],\n sequence=sequence,\n start=line_tokens[1],\n end=line_tokens[2],\n strand=line_tokens[3])\n self.assembly.contigs[function_id][contig_id].add_gene(gene)\n line_tokens = line.split(' ')\n current_id = line[1:] # line_tokens[0][1:]\n sequence = ''\n else:\n sequence += line\n line_tokens = current_id.split(' # ')\n function_id, contig_id, _ = parse_gene_id(line_tokens[0])\n gene = Gene(contig_id=contig_id,\n gene_id=line_tokens[0],\n sequence=sequence,\n start=line_tokens[1],\n end=line_tokens[2],\n strand=line_tokens[3])\n self.assembly.contigs[function_id][contig_id].add_gene(gene)", "title": "" }, { "docid": "324646c6f3fedb1ca1c144bf5ed09b55", "score": "0.49536267", "text": "def get_EH_genotypes(sample_list, file_format='vcf'):\n # List of genotypes for each sample for each repeat_id\n # genotype[sample][repeat_id] = \"genotype1/genotype2\"\n genotypes = {}\n for sample in sample_list:\n if sample[2] == None:\n sample_genotypes = None\n elif file_format == 'json':\n eh_json = json.load(open(sample[2]))\n sample_genotypes = {repeat_id: eh_json[repeat_id]['Genotype']\n for repeat_id in eh_json if repeat_id != 'BamStats'}\n sample_genotypes = {repeat_id: [int(g) for g in sample_genotypes[repeat_id].split(\n '/') if g != ''] for repeat_id in sample_genotypes}\n elif file_format == 'tsv':\n graphEH_out = [l.strip().split() for l in open(sample[2])]\n if len(graphEH_out) == 0 or len(graphEH_out[0]) == 3:\n sample_genotypes = {l[1]:l[2] for l in graphEH_out}\n sample_genotypes = {repeat_id: [int(g) for g in sample_genotypes[repeat_id].split(\n '/') if g != ''] for repeat_id in sample_genotypes}\n else:\n sample_genotypes = defaultdict(lambda:[], {})\n for l in graphEH_out:\n sample_genotypes[l[1]].append((l[2], [int(gt) for gt in l[3].split('/')]))\n elif file_format in ['vcf', 'v3', 'v2.5', 'v3.0.0-rc1']:\n sample_genotypes = defaultdict(lambda: [-1, -1], {})\n if os.path.splitext(sample[2])[1] == '.vcf':\n for l in open(sample[2]):\n ll = l.strip().split()\n if len(ll) == 0 or ll[0][0] == '#':\n continue\n info_dict = {f.split('=')[0]:f.split('=')[1] for f in ll[7].split(';')}\n if 'VARID' in info_dict:\n site = info_dict['VARID']\n elif 'REPID' in info_dict:\n site = info_dict['REPID']\n else:\n continue\n if 'REF' in info_dict:\n try:\n refgt = int(info_dict['REF'])\n except:\n continue\n else:\n continue\n gtlist = [refgt] + [int(f.strip('<>STR')) for f in ll[4].split(',') if f != '.']\n feature_dict = {f[0]:f[1] for f in zip(ll[8].split(':'), ll[9].split(':'))}\n if 'GT' in feature_dict:\n genotype = [gtlist[0] if g == '.' else gtlist[int(g)] for g in feature_dict['GT'].split('/')]\n else:\n continue\n genotype.sort()\n sample_genotypes[site] = genotype\n else:\n sample_genotypes = defaultdict(lambda: [-1, -1], {})\n if file_format in ['v3', 'v3.0.0-rc1']:\n eh_json = json.load(open(sample[2]))[\"LocusResults\"]\n for locus_id in eh_json.keys():\n if 'Variants' not in eh_json[locus_id]:\n continue\n for variant_id in eh_json[locus_id][\"Variants\"].keys():\n if 'Genotype' not in eh_json[locus_id][\"Variants\"][variant_id]:\n continue\n sample_genotypes[variant_id] = eh_json[locus_id][\"Variants\"][variant_id]['Genotype']\n else:\n eh_json = json.load(open(sample[2]))\n sample_genotypes = {repeat_id: eh_json[repeat_id]['Genotype']\n for repeat_id in eh_json if repeat_id != 'BamStats'} \n sample_genotypes = {repeat_id: [int(g) for g in sample_genotypes[repeat_id].split(\n '/') if g != ''] for repeat_id in sample_genotypes} \n genotypes[sample[0]] = sample_genotypes\n return genotypes", "title": "" }, { "docid": "d19ed42b2853f7a413e63e5eb0c6470e", "score": "0.49531755", "text": "def Gen(self, *args):\n return _snap.TFltVV_Gen(self, *args)", "title": "" }, { "docid": "5f61b803fe25436b08c43bfda6c22519", "score": "0.49494767", "text": "def produce_r_input(self):\n\n # SNP POC output\n # dist matrice\n self.poc_snp_df.to_csv(os.path.join(self.output_dir_18s, 'pocillopora_snp_biallelic.dist.csv'), index=False, header=False)\n # meta df\n meta_df = self.sample_provenance_df.loc[self.poc_snp_df.index]\n meta_df = meta_df[meta_df['SAMPLE PROTOCOL LABEL, level 2'] == 'CS4L']\n meta_df = meta_df.loc[:,['ISLAND#', 'SITE#']]\n meta_df.rename(columns={'ISLAND#':'ISLAND', 'SITE#':'SITE'}, inplace=True)\n meta_df.to_csv(os.path.join(self.output_dir_18s, 'pocillopora_snp_biallelic.meta.csv'), index_label='sample_id', index=True, header=True)\n\n # SNP POR output\n # dist matrice\n self.por_snp_df.to_csv(os.path.join(self.output_dir_18s, 'porites_snp_biallelic.dist.csv'), index=False, header=False)\n # meta df\n meta_df = self.sample_provenance_df.loc[self.por_snp_df.index]\n meta_df = meta_df[meta_df['SAMPLE PROTOCOL LABEL, level 2'] == 'CS4L']\n meta_df = meta_df.loc[:,['ISLAND#', 'SITE#']]\n meta_df.rename(columns={'ISLAND#':'ISLAND', 'SITE#':'SITE'}, inplace=True)\n meta_df.to_csv(os.path.join(self.output_dir_18s, 'porites_snp_biallelic.meta.csv'), index_label='sample_id', index=True, header=True)\n\n # 18S\n # POC BrayCurtis\n dist_file_name = f'Pocillopora_True_True_True_False_biallelic_braycurtis_dist_10000_pwr_False_0.08_200_3.dist.gz'\n dist_df, meta_df = self._get_dist_meta_df(dist_path=os.path.join(self.output_dir_18s, dist_file_name), abundance_dict_file_name=dist_file_name.replace('.dist.gz', '_abundance_dict.p.bz'), genus='Pocillopora')\n dist_df.to_csv(os.path.join(self.output_dir_18s, 'pocillopora_18S_braycurtis_0.08_200.dist.csv'), index=False, header=False)\n meta_df.to_csv(os.path.join(self.output_dir_18s, 'pocillopora_18S_braycurtis_0.08_200.meta.csv'), index=True, header=True, index_label='sample_id')\n\n # POC Unifrac\n dist_file_name = f'Pocillopora_True_True_True_False_biallelic_unifrac_dist_1000_pwr_False_0.08_200_3.dist.gz'\n dist_df, meta_df = self._get_dist_meta_df(dist_path=os.path.join(self.output_dir_18s, dist_file_name), abundance_dict_file_name=dist_file_name.replace('.dist.gz', '_abundance_dict.p.bz'), genus='Pocillopora')\n dist_df.to_csv(os.path.join(self.output_dir_18s, 'pocillopora_18S_unifrac_0.08_200.dist.csv'), index=False, header=False)\n meta_df.to_csv(os.path.join(self.output_dir_18s, 'pocillopora_18S_unifrac_0.08_200.meta.csv'), index=True, header=True, index_label='sample_id')\n\n # POR BrayCurtis\n dist_file_name = f'Porites_True_True_True_False_biallelic_braycurtis_dist_10000_pwr_False_0.2_80_3.dist.gz'\n dist_df, meta_df = self._get_dist_meta_df(dist_path=os.path.join(self.output_dir_18s, dist_file_name), abundance_dict_file_name=dist_file_name.replace('.dist.gz', '_abundance_dict.p.bz'), genus='Pocillopora')\n dist_df.to_csv(os.path.join(self.output_dir_18s, 'porites_18S_braycurtis_0.2_80.dist.csv'), index=False, header=False)\n meta_df.to_csv(os.path.join(self.output_dir_18s, 'porites_18S_braycurtis_0.2_80.meta.csv'), index=True, header=True, index_label='sample_id')\n\n # POR Unifrac\n dist_file_name = f'Pocillopora_True_True_True_False_biallelic_unifrac_dist_1000_pwr_False_0.66_100_3.dist.gz'\n dist_df, meta_df = self._get_dist_meta_df(dist_path=os.path.join(self.output_dir_18s, dist_file_name), abundance_dict_file_name=dist_file_name.replace('.dist.gz', '_abundance_dict.p.bz'), genus='Pocillopora')\n dist_df.to_csv(os.path.join(self.output_dir_18s, 'porites_18S_unifrac_0.66_100.dist.csv'), index=False, header=False)\n meta_df.to_csv(os.path.join(self.output_dir_18s, 'porites_18S_unifrac_0.66_100.meta.csv'), index=True, header=True, index_label='sample_id')\n\n foo = 'bar'", "title": "" }, { "docid": "f2308626b5e520b563baeb136de7eb16", "score": "0.4944352", "text": "def annotate_nongatk_vcf(orig_file, bam_files, dbsnp_file, ref_file, config):\n broad_runner = broad.runner_from_config(config)\n out_file = \"%s-gatkann%s\" % os.path.splitext(orig_file)\n if not file_exists(out_file):\n with file_transaction(out_file) as tx_out_file:\n annotations = get_gatk_annotations(config)\n params = [\"-T\", \"VariantAnnotator\",\n \"-R\", ref_file,\n \"--variant\", orig_file,\n \"--dbsnp\", dbsnp_file,\n \"--out\", tx_out_file,\n \"-L\", orig_file]\n for bam_file in bam_files:\n params += [\"-I\", bam_file]\n for x in annotations:\n params += [\"-A\", x]\n broad_runner.run_gatk(params)\n return out_file", "title": "" }, { "docid": "404e832cf57d79315ae15ad52db53bd0", "score": "0.494358", "text": "def write_variants(out, phylo, contribs, obs_tab, args):\n haplogroups = [con[1] for con in contribs]\n variants = collections.defaultdict(list)\n for hap in haplogroups:\n for var in phylo.hap_var[hap]:\n pos = phylotree.pos_from_var(var)\n variants[pos].append(\"%s:%s\" % (hap, var))\n\n polymorphic = set(phylo.polymorphic_sites(haplogroups))\n for ref_pos in range(len(phylo.refseq)):\n obs = obs_tab.obs_at(ref_pos)\n\n samp_status = \"sample_fixed\"\n threshold = max(args.min_var_reads, obs_tab.total_obs(pos) * args.frac_var_reads)\n if sum(obs[base] >= threshold for base in 'ACGT') > 1:\n samp_status = \"variant\"\n\n phy_status = \"fixed\"\n if ref_pos in polymorphic:\n phy_status = \"polymorphic\"\n\n out.write(\"%d\\t%s\\t%s\\t%s\\t%s\\n\" % (ref_pos + 1,\n '\\t'.join([str(obs[base])\n for base in 'ACGT']),\n phy_status, samp_status,\n ','.join(variants[ref_pos])))\n return", "title": "" }, { "docid": "20602da27cb530b469d39bc47ce30907", "score": "0.49253082", "text": "def posterior_genotypes_values(factorList, ALPHABET,samplenames,bedstring,fh):\n genotype_factors=factorList[0:len(samplenames)]\n sample_factorObj_zip=zip(samplenames, genotype_factors)\n #print bedstring\n for sample, f in sample_factorObj_zip:\n #print sample, \": \"\n #values=f.getVal().tolist()\n \n #prob_val_normalized=( lognormalize( f.getVal() ) )\n prob_val_normalized=f.getVal()/np.sum(f.getVal())\n #print sample\n #val=f.getVal()\n #print np.sum(val)\n #print val/np.sum(val)\n #pdb.set_trace()\n #print prob_val_normalized.tolist()\n #genotype_probZip=zip(ALPHABET,values)\n posteriors=[]\n #print prob_val_normalized.tolist()\n for posterior_val in prob_val_normalized.tolist():\n #for posterior_val in values:\n posteriors.append(str(posterior_val))\n #posteriors.append(str(round(posterior_val,5) ))\n #print posteriors\n gstring=\"\\t\".join(posteriors)\n #print gstring\n outstring=\"\\t\".join([bedstring, sample,gstring])\n \n fh.write(outstring + \"\\n\")", "title": "" }, { "docid": "4ebc845b5689e96eaa8ac20dacfcf590", "score": "0.4916762", "text": "def extend_vcf_annotations(query_vcf, gvanno_db_directory, lof_prediction = 0, oncogenicity_annotation = 0, regulatory_annotation = 0, debug = 0):\n\n ## read VEP and PCGR tags to be appended to VCF file\n vcf_infotags_meta = annoutils.read_infotag_file(logger, os.path.join(gvanno_db_directory,'gvanno_infotags.tsv'))\n gvanno_xref_map = annoutils.read_genexref_namemap(logger, os.path.join(gvanno_db_directory,'gvanno_xref', 'gvanno_xref_namemap.tsv'))\n out_vcf = re.sub(r'\\.vcf(\\.gz){0,}$','.annotated.vcf',query_vcf)\n\n meta_vep_dbnsfp_info = annoutils.vep_dbnsfp_meta_vcf(query_vcf, vcf_infotags_meta)\n dbnsfp_prediction_algorithms = meta_vep_dbnsfp_info['dbnsfp_prediction_algorithms']\n vep_csq_fields_map = meta_vep_dbnsfp_info['vep_csq_fieldmap']\n vcf = VCF(query_vcf)\n for tag in vcf_infotags_meta:\n if lof_prediction == 0 and regulatory_annotation == 0:\n if not tag.startswith('LoF') and not tag.startswith('REGULATORY_'):\n vcf.add_info_to_header({'ID': tag, 'Description': str(vcf_infotags_meta[tag]['description']), \\\n 'Type':str(vcf_infotags_meta[tag]['type']), 'Number': str(vcf_infotags_meta[tag]['number'])})\n elif lof_prediction == 1 and regulatory_annotation == 0:\n if not tag.startswith('REGULATORY_'):\n vcf.add_info_to_header({'ID': tag, 'Description': str(vcf_infotags_meta[tag]['description']), \\\n 'Type':str(vcf_infotags_meta[tag]['type']), 'Number': str(vcf_infotags_meta[tag]['number'])})\n elif lof_prediction == 0 and regulatory_annotation == 1:\n if not tag.startswith('LoF'):\n vcf.add_info_to_header({'ID': tag, 'Description': str(vcf_infotags_meta[tag]['description']), \\\n 'Type':str(vcf_infotags_meta[tag]['type']), 'Number': str(vcf_infotags_meta[tag]['number'])})\n else:\n vcf.add_info_to_header({'ID': tag, 'Description': str(vcf_infotags_meta[tag]['description']), \\\n 'Type':str(vcf_infotags_meta[tag]['type']), 'Number': str(vcf_infotags_meta[tag]['number'])})\n\n\n w = Writer(out_vcf, vcf)\n current_chrom = None\n num_chromosome_records_processed = 0\n num_records_filtered = 0\n\n cancer_hotspots = annoutils.read_cancer_hotspots(logger, os.path.join(gvanno_db_directory,'cancer_hotspots', 'cancer_hotspots.tsv'))\n \n vcf_info_element_types = {}\n for e in vcf.header_iter():\n header_element = e.info()\n if 'ID' in header_element and 'HeaderType' in header_element and 'Type' in header_element:\n identifier = str(header_element['ID'])\n fieldtype = str(header_element['Type'])\n vcf_info_element_types[identifier] = fieldtype\n\n for rec in vcf:\n if current_chrom is None:\n current_chrom = str(rec.CHROM)\n num_chromosome_records_processed = 0\n else:\n if str(rec.CHROM) != current_chrom:\n logger.info('Completed summary of functional annotations for ' + str(num_chromosome_records_processed) + ' variants on chromosome ' + str(current_chrom))\n current_chrom = str(rec.CHROM)\n num_chromosome_records_processed = 0\n if rec.INFO.get('CSQ') is None:\n num_records_filtered = num_records_filtered + 1\n #logger.warning('Variant record ' + str(variant_id) + ' does not have CSQ tag from Variant Effect Predictor (--vep_skip_intergenic or --vep_coding_only turned ON?) - variant will be skipped')\n continue\n num_chromosome_records_processed += 1\n gvanno_xref = annoutils.make_transcript_xref_map(rec, gvanno_xref_map, xref_tag = \"GVANNO_XREF\")\n\n if regulatory_annotation == 1:\n csq_record_results_all = annoutils.parse_vep_csq(rec, gvanno_xref, vep_csq_fields_map, logger, pick_only = False, csq_identifier = 'CSQ')\n\n if 'picked_gene_csq' in csq_record_results_all:\n vep_csq_records_all = csq_record_results_all['picked_gene_csq']\n rec.INFO['REGULATORY_ANNOTATION'] = annoutils.map_regulatory_variant_annotations(vep_csq_records_all)\n\n vep_csq_record_results = annoutils.parse_vep_csq(rec, gvanno_xref, vep_csq_fields_map, logger, pick_only = True, csq_identifier = 'CSQ', debug = debug)\n\n \n principal_hgvsp = '.'\n principal_hgvsc = '.'\n if 'picked_csq' in vep_csq_record_results:\n csq_record = vep_csq_record_results['picked_csq']\n for k in csq_record:\n if k in vcf_info_element_types:\n if vcf_info_element_types[k] == \"Flag\":\n #rec.INFO[k] = False\n if csq_record[k] == \"1\":\n rec.INFO[k] = True \n else:\n if not csq_record[k] is None:\n rec.INFO[k] = csq_record[k]\n\n if k == 'HGVSp_short':\n principal_hgvsp = csq_record[k]\n \n if k == 'HGVSc':\n principal_hgvsc = csq_record[k].split(':')[1]\n #else:\n # print(\"missing\\t\" + str(k))\n\n if 'all_csq' in vep_csq_record_results:\n rec.INFO['VEP_ALL_CSQ'] = ','.join(vep_csq_record_results['all_csq'])\n annoutils.map_cancer_hotspots(vep_csq_record_results['all_csq'], cancer_hotspots, rec, principal_hgvsp, principal_hgvsc)\n\n if not rec.INFO.get('DBNSFP') is None:\n annoutils.map_variant_effect_predictors(rec, dbnsfp_prediction_algorithms)\n\n if oncogenicity_annotation == 1:\n oncogenicity.assign_oncogenicity_evidence(rec, tumortype = \"Any\")\n w.write_record(rec)\n w.close()\n logger.info('Completed summary of functional annotations for ' + str(num_chromosome_records_processed) + ' variants on chromosome ' + str(current_chrom))\n vcf.close()\n logger.info(\"Number of variant calls filtered by VEP (No CSQ tag, '--vep_coding_only' / '--vep_skip_intergenic'): \" + str(num_records_filtered))\n\n if os.path.exists(out_vcf):\n if os.path.getsize(out_vcf) > 0:\n os.system('bgzip -f ' + str(out_vcf))\n os.system('tabix -f -p vcf ' + str(out_vcf) + '.gz')\n annotated_vcf = out_vcf + '.gz'\n annoutils.write_pass_vcf(annotated_vcf, logger)\n else:\n annoutils.error_message('No remaining PASS variants found in query VCF - exiting and skipping STEP 4 (gvanno-writer)', logger)\n else:\n annoutils.error_message('No remaining PASS variants found in query VCF - exiting and skipping STEP 4 (gvanno-writer)', logger)", "title": "" }, { "docid": "c3d0bcfc4f2dcf624a1c5bf400029a9c", "score": "0.49090862", "text": "def __init__(self, **kwargs):\n self.COMPFILES = sorted(glob.glob(os.path.join(os.environ[\"PYSYN_CDBS\"],\"mtab\",\"*tmc.fits\")))\n self.GRAPHFILES = sorted(glob.glob(os.path.join(os.environ[\"PYSYN_CDBS\"],\"mtab\",\"*tmg.fits\")))\n self.THERMFILES = sorted(glob.glob(os.path.join(os.environ[\"PYSYN_CDBS\"],\"mtab\",\"*tmt.fits\")))\n\n if 'logger' in kwargs:\n self.logger = kwargs['logger']\n else:\n self.logger = logging.getLogger('__stips__')\n log_level = SelectParameter('log_level', kwargs)\n self.logger.setLevel(getattr(logging, log_level))\n if not len(self.logger.handlers):\n stream_handler = logging.StreamHandler(sys.stderr)\n format = '%(asctime)s %(levelname)s: %(message)s'\n stream_handler.setFormatter(logging.Formatter(format))\n self.logger.addHandler(stream_handler)\n\n self.out_path = SelectParameter('out_path', kwargs)\n self.prefix = kwargs.get('prefix', '')\n self.cat_type = SelectParameter('cat_type', kwargs)\n self.flatfile = GetStipsData(os.path.join(\"residual_files\", self.FLATFILE))\n self.darkfile = GetStipsData(os.path.join(\"residual_files\", self.DARKFILE))\n self.oversample = SelectParameter('oversample', kwargs)\n self.psf_grid_size = SelectParameter('psf_grid_size', kwargs)\n self.seed = SelectParameter('seed', kwargs)\n self.imgbase = kwargs.get('imgbase', '')\n self.ra = kwargs.get('ra', 0.)\n self.dec = kwargs.get('dec', 0.)\n self.pa = kwargs.get('pa', 0.)\n self.distortion = SelectParameter('distortion', kwargs)\n self.exptime = kwargs.get('exptime', 1.)\n self.small_subarray = kwargs.get('small_subarray', False)\n self.filter = None\n self.detectors = None\n self.psf_commands = kwargs.get('psf_commands', None)\n self.instrument = kwargs.get('instrument', \"\")\n self.background_value = SelectParameter('background', kwargs)\n self.background_location = SelectParameter('jbt_location', kwargs)\n self.custom_background = kwargs.get('custom_background', 0.)\n self.CENTRAL_OFFSET = (0., 0., 0.)\n self.convolve_size = SelectParameter('convolve_size', kwargs)\n self.memmap = SelectParameter('memmap', kwargs)\n self.set_celery = kwargs.get('set_celery', None)\n self.get_celery = kwargs.get('get_celery', None)\n\n #Adjust # of detectors based on keyword:\n n_detectors = int(kwargs.get('detectors', len(self.DETECTOR_OFFSETS)))\n self.DETECTOR_OFFSETS = self.DETECTOR_OFFSETS[:n_detectors]\n self.OFFSET_NAMES = self.OFFSET_NAMES[:n_detectors]\n if hasattr(self, \"N_OFFSET\"):\n self.CENTRAL_OFFSET = self.N_OFFSET[n_detectors]\n msg = \"{} with {} detectors. Central offset {}\"\n self._log('info', msg.format(self.DETECTOR, n_detectors, \n self.CENTRAL_OFFSET))", "title": "" }, { "docid": "fe2cc7de270405c81c8582adb4662132", "score": "0.49085012", "text": "def sample(self):\n raise NotImplementedError", "title": "" }, { "docid": "1c28dc184d3b24685bdf4aff3701c85d", "score": "0.49067336", "text": "def process(sample, pipeline_config, args):\n\n\tprint(\"Start processing sample %s.\" % sample.sample_name)\n\n\t# for path in [\"sample_root\"] + sample.paths.__dict__.keys():\n\t# \tif not os.path.exists(sample.paths[path]):\n\t# \t\ttry:\n\t# \t\t\tos.mkdir(sample.paths[path])\n\t# \t\texcept OSError(\"Cannot create '%s' path: %s\" % (path, sample.paths[path])):\n\t# \t\t\traise\n\n\t# Start Pypiper object\n\tpm = pypiper.PipelineManager(\"rnaKallisto\", sample.paths.sample_root, args=args)\n\n\tprint \"\\nPipeline configuration:\"\n\tprint(pm.config)\n\ttools = pm.config.tools # Convenience alias\n\tparam = pm.config.parameters\n\tresources = pm.config.resources\n\n\traw_folder = os.path.join(sample.paths.sample_root, \"raw\")\n\tfastq_folder = os.path.join(sample.paths.sample_root, \"fastq\")\n\n\tsample.paired = False\n\tif args.single_or_paired == \"paired\":\n\t\tsample.paired = True\n\n\t# Create a ngstk object\n\tngstk = pypiper.NGSTk(pm=pm)\n\n\t# Convert bam to fastq\n\tpm.timestamp(\"Converting to Fastq format\")\n\n\tlocal_input_files = ngstk.merge_or_link([args.input, args.input2], raw_folder, args.sample_name)\n\tcmd, out_fastq_pre, unaligned_fastq = ngstk.input_to_fastq(local_input_files, args.sample_name, sample.paired, fastq_folder)\n\tpm.run(cmd, unaligned_fastq, \n\t\tfollow=ngstk.check_fastq(local_input_files, unaligned_fastq, sample.paired))\n\tpm.clean_add(out_fastq_pre + \"*.fastq\", conditional=True)\n\n\tpm.report_result(\"File_mb\", ngstk.get_file_size(local_input_files))\n\tpm.report_result(\"Read_type\", args.single_or_paired)\n\tpm.report_result(\"Genome\", args.genome_assembly)\n\n\tsample.fastq = out_fastq_pre + \"_R1.fastq\"\n\tsample.trimmed = out_fastq_pre + \"_R1_trimmed.fastq\"\n\tsample.fastq1 = out_fastq_pre + \"_R1.fastq\" if sample.paired else None\n\tsample.fastq2 = out_fastq_pre + \"_R2.fastq\" if sample.paired else None\n\tsample.trimmed1 = out_fastq_pre + \"_R1_trimmed.fastq\" if sample.paired else None\n\tsample.trimmed1Unpaired = out_fastq_pre + \"_R1_unpaired.fastq\" if sample.paired else None\n\tsample.trimmed2 = out_fastq_pre + \"_R2_trimmed.fastq\" if sample.paired else None\n\tsample.trimmed2Unpaired = out_fastq_pre + \"_R2_unpaired.fastq\" if sample.paired else None\n\n\t#if not sample.paired:\n\t#\tpm.clean_add(sample.fastq, conditional=True)\n\t#if sample.paired:\n\t#\tpm.clean_add(sample.fastq1, conditional=True)\n\t#\tpm.clean_add(sample.fastq2, conditional=True)\n\t#\tpm.clean_add(sample.fastqUnpaired, conditional=True)\n\n\t# Trim reads\n\tpm.timestamp(\"Trimming adapters from sample\")\n\tif pipeline_config.parameters.trimmer == \"trimmomatic\":\n\n\t\tinputFastq1 = sample.fastq1 if sample.paired else sample.fastq\n\t\tinputFastq2 = sample.fastq2 if sample.paired else None\n\t\toutputFastq1 = sample.trimmed1 if sample.paired else sample.trimmed\n\t\toutputFastq1unpaired = sample.trimmed1Unpaired if sample.paired else None\n\t\toutputFastq2 = sample.trimmed2 if sample.paired else None\n\t\toutputFastq2unpaired = sample.trimmed2Unpaired if sample.paired else None\n\n\t\tPE = sample.paired\n\t\tpe = \"PE\" if PE else \"SE\"\n\t\tcmd = tools.java + \" -Xmx\" + str(pm.mem) + \" -jar \" + tools.trimmomatic\n\t\tcmd += \" {0} -threads {1} {2}\".format(pe, args.cores, inputFastq1)\n\t\tif PE:\n\t\t\tcmd += \" {0}\".format(inputFastq2)\n\t\tcmd += \" {0}\".format(outputFastq1)\n\t\tif PE:\n\t\t\tcmd += \" {0} {1} {2}\".format(outputFastq1unpaired, outputFastq2, outputFastq2unpaired)\n\t\tif args.quantseq: cmd += \" HEADCROP:6\"\n\t\tcmd += \" ILLUMINACLIP:\" + resources.adapters + \":2:10:4:1:true\"\n\t\tif args.quantseq: cmd += \" ILLUMINACLIP:\" + \"/data/groups/lab_bsf/resources/trimmomatic_adapters/PolyA-SE.fa\" + \":2:30:5:1:true\"\n\t\tcmd += \" SLIDINGWINDOW:4:1\"\n\t\tcmd += \" MAXINFO:16:0.40\"\n\t\tcmd += \" MINLEN:21\"\n\n\n\t\tpm.run(cmd, sample.trimmed1 if sample.paired else sample.trimmed, shell=True, nofail=True,\n\t\t\tfollow = ngstk.check_trim(sample.trimmed, sample.paired, sample.trimmed2,\n\t\t\t\tfastqc_folder = os.path.join(sample.paths.sample_root, \"fastqc/\")))\n\t\tif not sample.paired:\n\t\t\tpm.clean_add(sample.trimmed, conditional=True)\n\t\telse:\n\t\t\tpm.clean_add(sample.trimmed1, conditional=True)\n\t\t\tpm.clean_add(sample.trimmed1Unpaired, conditional=True)\n\t\t\tpm.clean_add(sample.trimmed2, conditional=True)\n\t\t\tpm.clean_add(sample.trimmed2Unpaired, conditional=True)\n\n\telif pipeline_config.parameters.trimmer == \"skewer\":\n\t\tskewer_dirpath = os.path.join(sample.paths.sample_root, \"skewer\")\n\t\tngstk.make_dir(skewer_dirpath)\n\t\tsample.trimlog = os.path.join(skewer_dirpath, \"trim.log\")\n\t\tcmd = ngstk.skewer(\n\t\t\tinputFastq1=sample.fastq1 if sample.paired else sample.fastq,\n\t\t\tinputFastq2=sample.fastq2 if sample.paired else None,\n\t\t\toutputPrefix=os.path.join(sample.paths.sample_root, \"fastq/\", sample.sample_name),\n\t\t\toutputFastq1=sample.trimmed1 if sample.paired else sample.trimmed,\n\t\t\toutputFastq2=sample.trimmed2 if sample.paired else None,\n\t\t\tlog=sample.trimlog,\n\t\t\tcpus=args.cores,\n\t\t\tadapters=pipeline_config.resources.adapters\n\t\t)\n\t\tpm.run(cmd, sample.trimmed1 if sample.paired else sample.trimmed, shell=True, nofail=True, \n\t\t\tfollow = ngstk.check_trim(sample.trimmed, sample.paired, sample.trimmed2,\n\t\t\t\tfastqc_folder = os.path.join(sample.paths.sample_root, \"fastqc/\")))\n\t\tif not sample.paired:\n\t\t\tpm.clean_add(sample.trimmed, conditional=True)\n\t\telse:\n\t\t\tpm.clean_add(sample.trimmed1, conditional=True)\n\t\t\tpm.clean_add(sample.trimmed2, conditional=True)\n\n\t# With kallisto from unmapped reads\n\tpm.timestamp(\"Quantifying read counts with kallisto\")\n\n\tinputFastq = sample.trimmed1 if sample.paired else sample.trimmed\n\tinputFastq2 = sample.trimmed1 if sample.paired else None\n\ttranscriptomeIndex = os.path.join(\tpm.config.resources.genomes, \n\t\t\t\t\t\t\t\t\t\tsample.transcriptome,\n\t\t\t\t\t\t\t\t\t\t\"indexed_kallisto\",\n\t\t\t\t\t\t\t\t\t\tsample.transcriptome + \"_kallisto_index.idx\")\n\n\tbval = 0 # Number of bootstrap samples (default: 0)\n\tsize = 50 # Estimated average fragment length\n\tsdev = 20 # Estimated standard deviation of fragment length\n\tsample.paths.quant = os.path.join(sample.paths.sample_root, \"kallisto\")\n\tsample.kallistoQuant = os.path.join(sample.paths.quant,\"abundance.h5\")\n\tcmd1 = tools.kallisto + \" quant -b {0} -l {1} -s {2} -i {3} -o {4} -t {5}\".format(bval, size, sdev, transcriptomeIndex, sample.paths.quant, args.cores)\n\tif not sample.paired:\n\t\tcmd1 += \" --single {0}\".format(inputFastq)\n\telse:\n\t\tcmd1 += \" {0} {1}\".format(inputFastq, inputFastq2)\n\tcmd2 = tools.kallisto + \" h5dump -o {0} {0}/abundance.h5\".format(sample.paths.quant)\n\n\tpm.run([cmd1,cmd2], sample.kallistoQuant, shell=True, nofail=True)\n\n\tpm.stop_pipeline()\n\tprint(\"Finished processing sample %s.\" % sample.sample_name)", "title": "" }, { "docid": "ad7454e043010b83421cafdc37c3befc", "score": "0.4906048", "text": "def create_sample(self):\n pass", "title": "" }, { "docid": "44789b642abe61500ff9057c8af18ff5", "score": "0.49051026", "text": "def get_genotypes(chrom, start, end, sampleArray):\n\tvcf_reader = get_vcf_reader()\n\tgt_dict = {}\n\tfor i in sampleArray:\n\t\tgt = []\n\t\tfor record in vcf_reader.fetch(chrom, int(start), int(end)):\n\t\t\tfor sample in record.samples:\n\t\t\t\tif (i == sample.sample):\n\t\t\t\t\tgt.append(sample['GT'])\t\t\n\t\tgt_dict[i] = gt\n\t#test_answers(chrom, start, end)\n\treturn gt_dict", "title": "" }, { "docid": "58f7de5a37a6f10c4f20736e8b2b7614", "score": "0.49006668", "text": "def get_sample_info(self, accession=None, fo=None):\n \n if not fo:\n if not accession:\n sys.stderr.write(\"Please supply either accession or fo\")\n sys.exit(1)\n # Dowload GEO SOFT file\n soft = GEOFTP_URLBASE.format(accession)\n try:\n fh = urlopen(soft)\n except:\n sys.stderr.write(\"Could not open SOFT URL {0}\\n\".format(soft))\n sys.exit(1)\n fo = StringIO.StringIO(fh.read())\n \n # Parse gzipped SOFT file\n g = gzip.GzipFile(fileobj=fo)\n record = self._soft_read(g)\n \n self.gse = record['SERIES'].keys()[0]\n \n for gsm, data in record['SAMPLE'].items():\n #for k,v in data.items():\n # print k,v\n sample = {'gsm':gsm}\n sample['tax_id'] = int(data['Sample_taxid_ch1'][0])\n sample['sra'] = []\n sample['name'] = data['Sample_title'][0]\n sample['library'] = data['Sample_library_strategy'][0]\n sample['info'] = data['Sample_characteristics_ch1']\n for sra_link in [x for x in data['Sample_relation'] if x.startswith(\"SRA\")]:\n sample['sra'].append(sra_link)\n yield sample", "title": "" }, { "docid": "522bcf8f5c49fee6b0b0a1bd3b587b90", "score": "0.4885065", "text": "def __init__(self,\n start_chr, start_pos, n_loci,\n whole_genome = True,\n span_chromosomes = True,\n verbose = True):\n\n self.n_loci = n_loci\n self.whole_genome = whole_genome\n self.span_chromosomes = span_chromosomes\n self.verbose = verbose\n\n # data locations\n mums_dir = \"/data/safe/paa/analysis/mums\"\n fasta = mums_dir + \"/hg19/chrAll.fa\"\n if whole_genome:\n counts_name = \"genome1M.bed\"\n families_file = mums_dir + \"/wg-families.txt\"\n samples_dir = mums_dir + \"/wg-output/samples\"\n bed_file = mums_dir + \"/\" + counts_name\n else:\n counts_name = \"target50.bed\"\n families_file = mums_dir + \"/families.txt\"\n samples_dir = mums_dir + \"/output/samples\"\n bed_file = mums_dir + \"/\" + counts_name\n\n # objects to load\n pop = Population(families_file)\n ref = Reference(fasta)\n mapp = Mappability(fasta)\n\n # locus and sample counts\n n_samples = pop.n_samples()\n if self._minimal_load:\n n_samples = self._samples_show\n if n_loci > self._loci_show:\n n_loci = self._loci_show\n\n # just to get loci_left in chromosome or genome\n counts_dir = pop.mumdex_name(samples_dir, 0) + \"/\" + counts_name\n sample_counts = Counts(bed_file, counts_dir)\n sample_counts.load_position(start_chr, start_pos)\n if span_chromosomes:\n loci_left = sample_counts.positions_to_genome_end()\n else:\n loci_left = sample_counts.positions_to_next_chromosome()\n if n_loci > loci_left:\n n_loci = loci_left\n\n # create empty numpy arrays\n samples_type = np.dtype({'names':['fam_id', 'fam_size',\n 'sample_id', 'rtp', 'gender'],\n 'formats':['S12', 'uint8',\n 'S8', 'S7', 'S3']})\n samples = np.empty((n_samples,), dtype = samples_type)\n positions_type = {'names':['chrom', 'pos', 'abs_pos', 'v1',\n 'ref_base', 'low_map', 'high_map'],\n 'formats':['S25', 'int', 'uint', 'int',\n 'S1', 'uint8', 'uint8']}\n positions = np.empty((n_loci,), dtype = positions_type)\n shape = (2, n_samples, n_loci)\n refs = np.empty(shape, np.uint16)\n anchors = np.empty(shape, np.uint16)\n position_coverage = np.zeros((2, n_loci), np.float64)\n sample_coverage = np.zeros((2, n_samples), np.float64)\n \n # load data\n next_chr = \"\"\n next_pos = 0;\n chr = start_chr\n chr_index = ref.index(chr)\n abspos_offset = ref.offset(chr_index)\n for s in xrange(0, n_samples):\n if verbose:\n print >> sys.stderr, \\\n \"\\rReading\", s + 1, \"of\", n_samples, \"samples for\", \\\n n_loci, \"loci\",\n sys.stdout.flush()\n samples[s] = tuple(\n [pop.family(pop.sample_family(s)), pop.n_members(s),\n pop.sample(s), pop.member(s), pop.sex(s)])\n counts_dir = pop.mumdex_name(samples_dir, s) + \"/\" + counts_name\n sample_counts = Counts(bed_file, counts_dir)\n sample_counts.load_position(start_chr, start_pos)\n last_chr = \"\"\n for n in xrange(0, n_loci):\n if span_chromosomes:\n chr = sample_counts.chromosome();\n if last_chr != chr:\n chr_index = ref.index(chr)\n abspos_offset = ref.offset(chr_index)\n last_chr = chr\n if s == 0:\n pos = sample_counts.position()\n abspos = pos + abspos_offset\n positions[n] = (chr, pos, abspos, 0,\n ref.base_index(chr_index, pos),\n mapp.low_map(abspos),\n mapp.high_map(abspos)) \n for o in xrange(0, 2):\n refs[o][s][n] = sample_counts.reference(o)\n anchors[o][s][n] = sample_counts.anchor(o)\n coverage = float(sample_counts.reference(o)) + \\\n float(sample_counts.anchor(o))\n position_coverage[o][n] += coverage\n sample_coverage[o][s] += coverage\n \n if n + 1 != n_loci:\n sample_counts.load_next()\n elif s == 0 and sample_counts.positions_to_genome_end() - 1:\n sample_counts.load_next()\n next_chr = sample_counts.chromosome()\n next_pos = sample_counts.position()\n\n for o in xrange(0, 2):\n for s in xrange(0, n_samples):\n sample_coverage[o][s] /= n_loci\n for n in xrange(0, n_loci):\n position_coverage[o][n] /= n_samples\n\n if verbose:\n print >> sys.stderr\n\n self.samples = samples\n self.positions = positions\n self.refs = refs\n self.anchors = anchors\n self.sample_coverage = sample_coverage\n self.position_coverage = position_coverage\n self.next_chr = next_chr\n self.next_pos = next_pos", "title": "" }, { "docid": "83b6a034d42ddda36673520894d7a550", "score": "0.48809007", "text": "def _actionAfterReading(self):\r\n for contig in self.contigs:\r\n contig.constructHaplotypes()", "title": "" }, { "docid": "ac141416c815f56464440b560ee034c5", "score": "0.48730397", "text": "def gethists(self, *args, **kwargs):\n verbosity = LOG.getverbosity(kwargs)\n if verbosity>=1:\n print \">>> gethists\"\n variables, selection, issingle = unwrap_gethist_args(*args)\n datavars = filter(lambda v: v.data,variables) # filter out gen-level variables\n dodata = kwargs.get('data', True ) # create data hists\n domc = kwargs.get('mc', True ) # create expected (SM background) hists\n doexp = kwargs.get('exp', domc ) # create expected (SM background) hists\n dosignal = kwargs.get('signal', domc and self.sigsamples ) # create signal hists (for new physics searches)\n weight = kwargs.get('weight', \"\" ) # extra weight (for MC only)\n dataweight = kwargs.get('dataweight', \"\" ) # extra weight for data\n replaceweight = kwargs.get('replaceweight', None ) # replace substring of weight\n split = kwargs.get('split', True ) # split samples into components\n blind = kwargs.get('blind', True ) # blind data in some given range: blind={xvar:(xmin,xmax)}\n scaleup = kwargs.get('scaleup', 0.0 ) # scale up histograms\n reset = kwargs.get('reset', False ) # reset scales\n parallel = kwargs.get('parallel', False ) # create and fill hists in parallel\n tag = kwargs.get('tag', \"\" )\n method = kwargs.get('method', None ) # data-driven method; 'QCD_OSSS', 'QCD_ABCD', 'JTF', 'FakeFactor', ...\n imethod = kwargs.get('imethod', -1 ) # position on list; -1 = last (bottom of stack)\n filters = kwargs.get('filter', None ) or [ ] # filter these samples\n vetoes = kwargs.get('veto', None ) or [ ] # filter out these samples\n #makeJTF = kwargs.get('JTF', False ) and data\n #nojtf = kwargs.get('nojtf', makeJTF ) and data\n #keepWJ = kwargs.get('keepWJ', False )\n #makeQCD = kwargs.get('QCD', False ) and data and not makeJTF\n #ratio_WJ_QCD = kwargs.get('ratio_WJ_QCD_SS', False )\n #QCDshift = kwargs.get('QCDshift', 0.0 )\n #QCDrelax = kwargs.get('QCDrelax', False )\n #JTFshift = kwargs.get('JTFshift', [ ] )\n sysvars = kwargs.get('sysvars', { } ) # list or dict to be filled up with systematic variations\n addsys = kwargs.get('addsys', True )\n task = kwargs.get('task', \"Creating histograms\" ) # task title for loading bar\n #saveto = kwargs.get('saveto', \"\" ) # save to TFile\n #file = createFile(saveto,text=cuts) if saveto else None\n filters = ensurelist(filters)\n vetoes = ensurelist(vetoes)\n if method and not hasattr(self,method):\n ensuremodule(method,'Plotter.methods') # load SampleSet class method\n \n # FILTER\n samples = [ ]\n for sample in self.samples:\n if not dosignal and sample.issignal: continue\n if not dodata and sample.isdata: continue\n if split and sample.splitsamples:\n subsamples = sample.splitsamples\n else:\n subsamples = [sample] # sample itself\n for subsample in subsamples:\n if filters and not subsample.match(*filters): continue\n if vetoes and subsample.match(*vetoes): continue\n samples.append(subsample)\n #if nojtf:\n # samples = [s for s in samples if not ((not keepWJ and s.match('WJ',\"W*J\",\"W*j\")) or \"gen_match_2==6\" in s.cuts or \"genPartFlav_2==0\" in s.cuts)]\n \n # INPUT / OUTPUT\n mcargs = (variables,selection)\n dataargs = (datavars, selection)\n expkwargs = { 'tag':tag, 'weight': weight, 'replaceweight': replaceweight, 'verbosity': verbosity, } #'nojtf': nojtf \n sigkwargs = { 'tag':tag, 'weight': weight, 'replaceweight': replaceweight, 'verbosity': verbosity, 'scaleup': scaleup }\n datakwargs = { 'tag':tag, 'weight': dataweight, 'verbosity': verbosity, 'blind': blind, 'parallel': parallel }\n result = HistSet(variables,dodata,doexp,dosignal) # container for dictionaries of histogram (list): data, exp, signal\n if not variables:\n LOG.warning(\"Sample.gethists: No variables to make histograms for...\")\n return result\n \n # PRINT\n bar = None\n if verbosity>=2:\n if not ('QCD' in task or 'JFR' in task):\n LOG.header(\"Creating histograms for %s\"%selection) #.title\n print \">>> variables: '%s'\"%(\"', '\".join(v.filename for v in variables))\n #print \">>> split=%s, makeQCD=%s, makeJTF=%s, nojtf=%s, keepWJ=%s\"%(split,makeQCD,makeJTF,nojtf,keepWJ)\n print '>>> with extra weights \"%s\" for MC and \"%s\" for data'%(weight,dataweight)\n elif self.loadingbar and verbosity<=1:\n bar = LoadingBar(len(samples),width=16,pre=\">>> %s: \"%(task),counter=True,remove=True) # %s: selection.title\n \n # GET HISTOGRAMS (PARALLEL)\n if parallel:\n expproc = MultiProcessor()\n sigproc = MultiProcessor()\n dataproc = MultiProcessor()\n for sample in samples:\n if reset: sample.resetscale()\n if sample.name in self.ignore: continue\n if dosignal and sample.issignal: # SIGNAL\n sigproc.start(sample.gethist,mcargs,sigkwargs,name=sample.title)\n elif doexp and sample.isexp: # EXPECTED (SM BACKGROUND)\n expproc.start(sample.gethist,mcargs,expkwargs,name=sample.title)\n elif dodata and sample.isdata: # DATA\n dataproc.start(sample.gethist,dataargs,datakwargs,name=sample.title)\n for dtype, processor, varset in [('exp',expproc,variables),('sig',sigproc,variables),('data',dataproc,datavars)]:\n for process in processor:\n if bar: bar.message(process.name)\n newhists = process.join()\n for var, hist in zip(varset,newhists): # assume match variables -> histograms\n if dtype=='data':\n getattr(result,dtype)[var] = hist\n else:\n getattr(result,dtype)[var].append(hist)\n if bar: bar.count(\"%s done\"%process.name)\n \n # GET HISTOGRAMS (SEQUENTIAL)\n else:\n for sample in samples:\n if bar: bar.message(sample.title)\n if reset: sample.resetscale()\n if sample.name in self.ignore:\n if bar: bar.count(\"%s skipped\"%sample.title)\n continue\n if dosignal and sample.issignal: # SIGNAL\n hists = sample.gethist(*mcargs,**sigkwargs)\n for var, hist in zip(variables,hists):\n result.signal[var].append(hist)\n elif doexp and sample.isexp: # EXPECTED (SM BACKGROUND)\n hists = sample.gethist(*mcargs,**expkwargs)\n for var, hist in zip(variables,hists):\n result.exp[var].append(hist)\n elif dodata and sample.isdata: # DATA\n hists = sample.gethist(*mcargs,**datakwargs)\n for var, hist in zip(datavars,hists):\n result.data[var] = hist\n if bar: bar.count(\"%s done\"%sample.title)\n \n # EXTRA METHODS\n if method:\n hists = getattr(self,method)(*dataargs,**kwargs)\n for var, hist in zip(datavars,hists):\n idx = imethod if imethod>=0 else len(result.exp[var])+1+imethod\n result.exp[var].insert(idx,hist)\n \n ## ADD QCD\n #if makeJTF:\n # hists = self.jetTauFake(*argsD,tag=tag,weight=weight,replaceweight=replaceweight,verbosity=verbosity,saveToFile=file,parallel=parallel,shift=JTFshift,sysvars=sysvars,addsys=addsys)\n # for var, hist in zip(variablesD,hists):\n # result.exp[var].insert(0,hist)\n #elif makeQCD:\n # hists = self.QCD(*argsD,tag=tag,weight=weight,replaceweight=replaceweight,verbosity=verbosity,shift=QCDshift,ratio_WJ_QCD_SS=ratio_WJ_QCD,saveToFile=file,parallel=parallel)\n # for var, hist in zip(variablesD,hists):\n # result.exp[var].insert(0,hist)\n \n ## SAVE histograms\n #if file:\n # file.cd()\n # for hist in histsD + result.exp + result.exp:\n # hist.GetXaxis().SetTitle(var)\n # hist.Write(hist.GetName())\n # #file.Write(hist.GetName())\n # file.Close()\n \n # YIELDS\n if verbosity>=2 and len(variables)>0:\n var = variables[0]\n print \">>> selection:\"\n print \">>> %r\"%(selection.selection)\n print \">>> yields: \"\n TAB = LOG.table(\"%11.1f %11.2f %r\")\n TAB.printheader(\"entries\",\"integral\",\"hist name\")\n totint = 0\n totent = 0\n if dodata:\n TAB.printrow(result.data[var].Integral(),result.data[var].GetEntries(),result.data[var].GetName())\n for hist in result.exp[var]:\n totint += hist.Integral()\n totent += hist.GetEntries()\n TAB.printrow(hist.Integral(),hist.GetEntries(),hist.GetName())\n TAB.printrow(totint,totent,\"total exp.\")\n if dosignal:\n for hist in result.signal[var]:\n TAB.printrow(hist.Integral(),hist.GetEntries(),hist.GetName())\n \n if issingle:\n result.setsingle()\n return result\n return result", "title": "" }, { "docid": "ad4d1ee340fd366f9163ef482ec40a14", "score": "0.4869646", "text": "def _generate_250K_2010_FLC_data_(impute=True):\n\timport phenotypeData as pd\n\timport env\n\t\n\tphed = pd.readPhenotypeFile(\"/Users/bjarnivilhjalmsson/Projects/Data/phenotypes/FLC_phenotypes_011710.tsv\") \n\t\n\td2010_file = env.home_dir+\"Projects/Data/2010/2010_imputed_012610.csv\"\n\td2010_sd = dataParsers.parse_snp_data(d2010_file,id=\"2010_data\")\n\td2010_sd.filter_accessions(phed.accessions)\n\td2010_sd.filter_na_snps()\n\td2010_sd.filter_maf_snps(0.05)\n\n\t#d250k_file = env.home_dir+\"Projects/Data/250k/250K_t54.csv\"\n\td250k_file = env.home_dir+\"Projects/Data/250k/250K_192_043009.csv\"\n\td250k_sd = dataParsers.parse_snp_data(d250k_file)\n\td250k_sd.filter_accessions(phed.accessions)\n\td250k_sd.filter_maf_snps(0.05)\n\t\n\td250k_sd.merge_snps_data(d2010_sd)\n\td250k_sd.filter_na_accessions()\n\td250k_sd.filter_na_snps(0.7)\t\n\td250k_sd.filter_monomorphic_snps()\n\t\n\n\tref_seq_name = \"raw_ref_col-0\"\n\tref_start = 3170501\n\tref_chr = 5\n\tseq_file = env.home_dir+\"Projects/FLC_analysis/flc_seqs_aln_merged_050410.fasta\"\n\tad = sequences.readFastaAlignment(seq_file,ref_seq_name=ref_seq_name,ref_start=ref_start,\n\t\t\tref_chr=ref_chr,alignment_type=\"muscle\",ref_direction=1)\n#\tref_start = 3170500\n#\tad2 = sequences.readFastaAlignment(seq_file,ref_seq_name=ref_seq_name,ref_start=ref_start,\n#\t\t\tref_chr=ref_chr,alignment_type=\"muscle\",ref_direction=1)\n#\tref_start = 3170502\n#\tad3 = sequences.readFastaAlignment(seq_file,ref_seq_name=ref_seq_name,ref_start=ref_start,\n#\t\t\tref_chr=ref_chr,alignment_type=\"muscle\",ref_direction=1)\n\tpdb.set_trace()\n\tr = ad.get_snps(type=0)\n\tseq_snpsd1 = r['snpsd']\n\tseq_snpsd1.merge_data(r['indels'],error_threshold=0.0)\n\n#\tr2 = ad2.get_snps(type=0)\n#\tseq_snpsd2 = r2['snpsd']\n#\tseq_snpsd2.merge_data(r2['indels'],error_threshold=0.0)\n#\n#\tr3 = ad3.get_snps(type=0)\n#\tseq_snpsd3 = r3['snpsd']\n#\tseq_snpsd3.merge_data(r3['indels'],error_threshold=0.0)\n\t\n\t\n\tprint \"Now merging data..\"\n\n\td250k_sd.snpsDataList[4].compareWith(seq_snpsd1)\n#\td250k_sd.snpsDataList[4].compareWith(seq_snpsd2)\n#\td250k_sd.snpsDataList[4].compareWith(seq_snpsd3)\n\td250k_sd.snpsDataList[4].merge_data(seq_snpsd1,union_accessions=False)\n\td250k_sd.filter_na_accessions()\n\td250k_sd.filter_na_snps(0.7)\t\n\td250k_sd.filter_monomorphic_snps()\t\n\td250k_sd.snpsDataList[4].impute_data()\n\td250k_sd.writeToFile(\"/tmp/test.csv\")\n\tprint \"YEAH!\"", "title": "" }, { "docid": "0b02c1ce5cd0c6b47395a5e49ca5c925", "score": "0.4865275", "text": "def __init__(self, *args):\n _snap.TFfGGen_swiginit(self,_snap.new_TFfGGen(*args))", "title": "" }, { "docid": "b1c5454d8d322baf17fddc2903f07a2c", "score": "0.48639834", "text": "def run(protocol):\n #Load Tips\n\n tips20 = [protocol.load_labware('opentrons_96_tiprack_20ul', '1')]\n\n #Load Pipettes\n #p20Single = protocol.load_instrument('p20_single_gen2', 'right', tip_racks=tips20)\n p20Multi = protocol.load_instrument('p20_multi_gen2', 'right', tip_racks=tips20)\n \n #load labware\n #Transformation wellplates\n transformation = protocol.load_labware(\"corning_96_wellplate_360ul_flat\", '2')\n\n \n #Agar plates\n \n agar1 = protocol.load_labware(\"biorad_96_wellplate_200ul_pcr\", \"4\")\n agar2 = protocol.load_labware(\"biorad_96_wellplate_200ul_pcr\", \"5\")\n\n\n def spot(dest, spot_vol):\n \"\"\"Takes a diluted transformed culture and spots the defined volume onto agar \n in a Nunc omnitray\"\"\"\n\n\n SAFE_HEIGHT = 15 \n spotting_dispense_rate=0.25 \n\n p20Multi.move_to(dest.top(SAFE_HEIGHT))\n protocol.max_speeds[\"Z\"] = 50\n p20Multi.move_to(dest.top(2))\n p20Multi.dispense(volume=spot_vol, rate=spotting_dispense_rate)\n p20Multi.move_to(dest.top(0))\n del protocol.max_speeds[\"Z\"]\n \n\n for i in range(1,6):\n w1 = \"A\" + str(i)\n w2 = \"A\" + str(i + 6)\n p20Multi.pick_up_tip()\n p20Multi.mix(3, 20, transformation[w1])\n p20Multi.aspirate(10, transformation[w1])\n spot(agar1[w1], 5)\n spot(agar1[w2], 5)\n p20Multi.drop_tip()", "title": "" }, { "docid": "907a786dd6d47676ca8137dec3aa9f46", "score": "0.48559937", "text": "def run_gixs(t=0.5, user_name=\"XXX\"):\n # define names of samples on sample bar\n sample_list = [\n \"S1_P3PS_1p10_40mgml_Drop\",\n ]\n # define piezo-x-postion\n x_list = [\n -33000,\n ]\n assert len(x_list) == len(sample_list), f\"Sample name/position list is borked\"\n inc_angles = np.array(\n [\n 0.05,\n 0.08,\n 0.1,\n 0.12,\n 0.2,\n 0.3,\n ]\n ) # incident angles\n # waxs_angle_array = np.linspace(0, 84, 15)\n waxs_angle_array = np.linspace(\n 0, 19.5, 4\n ) # q=4*3.14/0.77*np.sin((max angle+3.5)/2*3.14159/180)\n # if 12, 3: up to q=2.199\n # if 18, 4: up to q=3.04\n # if 24, 5: up to q=3.87\n # if 30, 6: up to q=4.70\n # 52/6.5 +1 =8\n max_waxs_angle = np.max(waxs_angle_array)\n inverse_angle = False\n for x, sample in zip(x_list, sample_list): # loop over samples on bar\n yield from bps.mv(piezo.x, x) # move to next sample\n yield from alignement_gisaxs(0.1) # run alignment routine\n th_meas = (\n inc_angles + piezo.th.position\n ) # np.array([0.10 + piezo.th.position, 0.20 + piezo.th.position])\n th_real = inc_angles\n det_exposure_time(t, t)\n if inverse_angle:\n Twaxs_angle_array = waxs_angle_array[::-1]\n else:\n Twaxs_angle_array = waxs_angle_array\n for waxs_angle in Twaxs_angle_array: # loop through waxs angles\n yield from bps.mv(waxs, waxs_angle)\n if waxs_angle == max_waxs_angle:\n dets = [\n \"pil300KW\",\n \"pil1M\",\n ] # waxs, maxs, saxs = [pil300KW, rayonix, pil1M]\n print(\"Meausre both saxs and waxs here for w-angle=%s\" % waxs_angle)\n else:\n dets = [\"pil300KW\"]\n for i, th in enumerate(th_meas): # loop over incident angles\n yield from bps.mv(piezo.th, th)\n x_meas = x + (1 + i) * 200 # move the x-position\n yield from bps.mv(piezo.x, x_meas)\n if inverse_angle:\n name_fmt = \"{sample}_{th:5.4f}deg_waxsN{waxs_angle:05.2f}_x{x}_expt{t}s_sid{scan_id:08d}\"\n else:\n name_fmt = \"{sample}_{th:5.4f}deg_waxsP{waxs_angle:05.2f}_x{x}_expt{t}s_sid{scan_id:08d}\"\n sample_name = name_fmt.format(\n sample=sample,\n th=th_real[i],\n waxs_angle=waxs_angle,\n x=x_meas,\n t=t,\n scan_id=RE.md[\"scan_id\"],\n )\n sample_id(user_name=user_name, sample_name=sample_name)\n print(f\"\\n\\t=== Sample: {sample_name} ===\\n\")\n yield from bp.count(dets, num=1)\n inverse_angle = not inverse_angle\n sample_id(user_name=\"test\", sample_name=\"test\")\n det_exposure_time(0.5)", "title": "" }, { "docid": "2c28df7ac36d1623722bea786d90da6d", "score": "0.4851177", "text": "def main():\n\n logging.basicConfig(format=\"[%(asctime)s] %(levelname)s: %(message)s\",\n level=logging.DEBUG)\n\n logging.debug(\"----------- STARTING ----------\")\n\n args = vars(get_args())\n\n logging.debug(\"Args: %s\", args)\n\n # read in the phe metadata fromt he civet input file, like region etc.\n phe_metadata = {}\n with open(args['phemetadata'], 'r') as phemd:\n reader = csv.DictReader(phemd)\n for row in reader:\n phe_metadata[row['name']] = {'country': row['region'], 'casecontact': row['casecontact']}\n\n # read in cogmetadata and store uklin and epiweek per samples\n qry_samples = {}\n with open(args['cogmetadata'], 'r') as cogmetadata:\n reader = csv.DictReader(cogmetadata)\n for row in reader:\n for query_id in phe_metadata:\n if query_id in row['sequence_name']:\n uklin = row['uk_lineage']\n epiweek = int(row['epi_week'])\n qry_samples[query_id] = {'uklin': uklin, 'epiweek': epiweek}\n\n logging.debug(\"Read %i query samples\", len(qry_samples))\n\n # output a table\n sys.stdout.write(\"#query_id\\tregion\\tcasecontact\\tuklin\\ttotal\\twk_b4\\t2wk_b4\\t2prec_wks\\t4prec_wks\\tmin_d\\t2follow\\t4follow\\tPreceding Dist\\n\")\n\n # open cog metadata again\n with open(args['cogmetadata'], 'r') as mdfile:\n reader = csv.DictReader(mdfile)\n\n # iterate through samples\n for qryid in qry_samples:\n # reset values that are outputted for each query sequence.\n cnt = 0\n wkb4_1 = 0\n wkb4_2 = 0\n prec_wks2 = 0\n prec_wks4 = 0\n foll_wks2 = 0\n foll_wks4 = 0\n\n # get the seq of the query samples from the alignment\n qry_seq = get_seq_from_aln([qryid], args['cogalign'])\n assert len(qry_seq) == 1 # better check ...\n\n this_sample_epi_week = qry_samples[qryid]['epiweek']\n\n # iterate through the COG metadata\n beforeseqnames = []\n foll2wks_seqnames = []\n foll4wks_seqnames = []\n for row in reader:\n if row['uk_lineage'] == qry_samples[qryid]['uklin']:\n # count samples w/ same lineage\n cnt += 1\n row_epi_week = int(row['epi_week'])\n # count all older samples by epiweek\n if row_epi_week <= this_sample_epi_week - 1:\n wkb4_1 += 1\n # count all samples that are two weeks older\n if row_epi_week <= this_sample_epi_week - 2:\n wkb4_2 += 1\n # count all samples in the preceeding 2 epi weeks\n if row_epi_week < this_sample_epi_week and row_epi_week >= this_sample_epi_week - 2:\n prec_wks2 += 1\n # count all samples in the preceeding 4 epi weeks\n if row_epi_week < this_sample_epi_week and row_epi_week >= this_sample_epi_week - 4:\n prec_wks4 += 1\n # store other seqname\n beforeseqnames.append(row['sequence_name'])\n # count all samples in the following 2 epi weeks\n if row_epi_week > this_sample_epi_week and row_epi_week <= this_sample_epi_week + 2:\n foll2wks_seqnames.append(row['sequence_name'])\n # count all samples in the following 4 epi weeks\n if row_epi_week > this_sample_epi_week and row_epi_week <= this_sample_epi_week + 4:\n foll4wks_seqnames.append(row['sequence_name'])\n\n # calculate minimum distance to preceding samples\n mindis = -1\n minoth = 1000\n prec4 = \"\"\n if len(beforeseqnames) > 0:\n # get stored min dist from for this query sample\n otherseqs = get_seq_from_aln(beforeseqnames, args['cogalign'])\n y = list(qry_seq.values())[0]\n mindis = min([calc_pw_dist(y, x) for x in otherseqs.values()])\n #look at all seq in the preceeding 4 weeks\n if len(otherseqs) <= 1000:\n for ky1, vl1 in otherseqs.items():\n minoth = 10000\n for ky2, vl2 in otherseqs.items():\n if ky2 != ky1:\n odist = calc_pw_dist(vl1, vl2)\n if odist < minoth:\n minoth = odist\n if minoth < 10000:\n prec4 += \"{},\".format(minoth)\n else:\n prec4 = \"-1\"\n\n\n\n # calculate how many samples are within 2 SNPs in the follwing 2 and 4 weeks\n foll_wks2 = 0\n foll_wks4 = 0\n if len(foll4wks_seqnames) > 0:\n # logging.debug(\"%i in the following 4 weeks\", len(foll4wks_seqnames))\n # logging.debug(\"%i in the following 2 weeks\", len(foll2wks_seqnames))\n # get all seqs in the 4 weeks after from alignment - 2 weeks are a strict subset\n otherseqs = get_seq_from_aln(foll4wks_seqnames, args['cogalign'])\n # y is now the query sequence\n y = list(qry_seq.values())[0]\n all_dists = {}\n wks2_dists = []\n for (nme, sq1) in otherseqs.items():\n nme = nme.replace(\">\", \"\").strip()\n dst = calc_pw_dist(y, sq1)\n # all_dists now contains all seqs from the 4 weeks after\n all_dists[nme] = dst\n if nme in foll2wks_seqnames:\n # wks2_dists contains a subset of those\n wks2_dists.append(dst)\n # logging.debug(\"4 weeks all dists: %s\", all_dists.values())\n # logging.debug(\"2 weeks all dists: %s\", wks2_dists)\n foll_wks4 = sum([1 if d <= 2 else 0 for d in all_dists.values()])\n foll_wks2 = sum([1 if d <= 2 else 0 for d in wks2_dists])\n\n # output table row for this query sample\n # logging.debug(\"%i samples with UK lineage %s, %i earlier ones, same as %s\", cnt, qry_samples[qryid]['uklin'], earlier, qryid)\n sys.stdout.write(\"%s\\t%s\\t%s\\t%s\\t%i\\t%i\\t%i\\t%i\\t%i\\t%i\\t%i\\t%i\\t%s\\n\" % (qryid,\n phe_metadata[qryid]['country'],\n phe_metadata[qryid]['casecontact'],\n qry_samples[qryid]['uklin'],\n cnt,\n wkb4_1,\n wkb4_2,\n prec_wks2,\n prec_wks4,\n mindis,\n foll_wks2,\n foll_wks4,\n prec4))\n\n # reset the iterator of the COG metadatafile for next query sample\n mdfile.seek(0)\n next(reader)\n\n logging.debug(\"----------- FINISHED ----------\")\n return 0", "title": "" }, { "docid": "24eb178cb9d46a2f6bc25aa6ce634871", "score": "0.48431617", "text": "def main():\n parser = argparse.ArgumentParser(description=\"Build sample-specific co-expression networks\" + \\\n \"for a sampled LOSO on the RFS data\",\n add_help=True)\n parser.add_argument(\"data_dir\", help=\"Path to the data\")\n parser.add_argument(\"repeat\", help=\"Index of the repeat\", type=int)\n args = parser.parse_args()\n\n outDir = '%s/outputs/U133A_combat_RFS/sampled_loso/repeat%d' % (args.data_dir, args.repeat)\n \n # Create outDir if it does not exist\n if not os.path.isdir(outDir):\n sys.stdout.write(\"Creating %s\\n\" % outDir)\n try: \n os.makedirs(outDir)\n except OSError:\n if not os.path.isdir(outDir):\n raise\n\n # Get expression data, sample labels.\n # Do not normalize the data while loading it (so as not to use test data for normalization).\n f = h5py.File(\"%s/ACES/experiments/data/U133A_combat.h5\" % args.data_dir)\n expressionData = np.array(f['U133A_combat_RFS']['ExpressionData'])\n sampleLabels = np.array(f['U133A_combat_RFS']['PatientClassLabels'])\n sampleAccess = np.array(f['U133A_combat_RFS']['PatientLabels']).tolist()\n f.close()\n \n # Map the indices to the studies\n studyDict = {} # studyId:[sampleIdx]\n\n gse_rfs_dir = '%s/GSE_RFS/' % args.data_dir\n for studyFile in os.listdir(gse_rfs_dir):\n studyPath = '%s/%s' % (gse_rfs_dir, studyFile)\n print studyPath\n with open(studyPath, 'r') as f:\n gsmNames = set([x.split()[0] for x in f.readlines()])\n f.close()\n gsmNames = gsmNames.intersection(set(sampleAccess))\n studyDict[studyFile.split(\".\")[0]] = [sampleAccess.index(gsm) for gsm in gsmNames]\n \n studyList = studyDict.keys()\n numStudies = len(studyList)\n print \"Found %d studies\" % numStudies\n\n np.random.seed(seed=args.repeat)\n for foldNr in range(numStudies):\n # Training data:\n # randomly sample 50% of each study that is not foldNr\n trIndices = []\n for studyId in [x for x in studyList if x!=foldNr]:\n studyIndices = np.random.choice(studyDict[studyId],\n size=len(studyDict[studyId])/2,\n replace=False)\n trIndices.extend(studyIndices)\n # studyIndices = studyDict[studyId]\n # random.shuffle(studyIndices)\n # n = len(studyIndices)\n # trIndices.extend(studyIndices[:(n/2)])\n \n # Test data:\n # the data from foldNr\n teIndices = studyDict[studyList[foldNr]]\n \n # Create output directory\n foldDir = \"%s/fold%d\" % (outDir, foldNr)\n try: \n os.makedirs(foldDir)\n except OSError:\n if not os.path.isdir(foldDir):\n raise\n \n # Save train indices to file\n trIndicesF = '%s/train.indices' % foldDir\n np.savetxt(trIndicesF, trIndices, fmt='%d')\n sys.stdout.write(\"Wrote training indices for fold %d to %s\\n\" % (foldNr, trIndicesF))\n\n # Save test indices to file\n teIndicesF = '%s/test.indices' % foldDir\n np.savetxt(teIndicesF, teIndices, fmt='%d')\n sys.stdout.write(\"Wrote test indices for fold %d to %s\\n\" % (foldNr, teIndicesF))\n\n # Save train labels to file\n trLabelsF = '%s/train.labels' % foldDir\n np.savetxt(trLabelsF, np.array(sampleLabels[trIndices], dtype='int'),\n fmt='%d')\n sys.stdout.write(\"Wrote training labels for fold %d to %s\\n\" % (foldNr, trLabelsF))\n\n # Save test labels to file\n teLabelsF = '%s/test.labels' % foldDir\n np.savetxt(teLabelsF, np.array(sampleLabels[teIndices], dtype='int'),\n fmt='%d')\n sys.stdout.write(\"Wrote test labels for fold %d to %s\\n\" % (foldNr, teLabelsF))", "title": "" }, { "docid": "62efdd299a933dafe72b8b6b894ef469", "score": "0.48404464", "text": "def generate_geno_annot_split(path_geno, path_annot, ancestries, args, nbins):\n logging.info('Generating genotype files...')\n # obtain genotype data\n geno_full_path = MT_TEMP_BUCKET + 'initial_genotypes_all_ancestry.mt'\n if hl.hadoop_exists(geno_full_path):\n logging.info('Genotype (prefiltered by pops and withdrawal) MatrixTable found and loaded.')\n mt = hl.read_matrix_table(geno_full_path)\n else:\n logging.info('Creating genotype (prefiltered by pops and withdrawal) MatrixTable...')\n mt = get_mt_filtered_by_pops(pops=ancestries, entry_fields=('GT',))\n if args.checkpoint:\n mt = mt.checkpoint(geno_full_path, overwrite=True)\n\n if args.verbose:\n print('Imported per-population genotype Ns:')\n print_pop_Ns(mt)\n\n MAFRANGE = MAFRANGE_2 if args.maf_bins_2 else MAFRANGE_5\n \n geno_filtered_path = MT_TEMP_BUCKET + 'filtered_genotypes_all_ancestry.mt'\n if hl.hadoop_exists(geno_filtered_path):\n logging.info('Genotype MatrixTable, filtered, loaded.')\n mt_filt = hl.read_matrix_table(geno_filtered_path)\n else:\n logging.info('Filtering genotype MatrixTable...')\n\n # remove relateds\n mt_nonrel = mt.filter_cols(~mt.related)\n\n # filter MAF > cutoff (in all populations) and is defined (all populations)\n custom_af_ht_path = MT_TEMP_BUCKET + 'rhemc_custom_af_ht.ht'\n if hl.hadoop_is_file(custom_af_ht_path + '/_SUCCESS'):\n af_ht = hl.read_table(custom_af_ht_path)\n else:\n af_mt = mt_nonrel.group_cols_by(mt_nonrel.pop).aggregate(call_info = hl.agg.call_stats(mt_nonrel.GT, mt_nonrel.alleles))\n af_mt = af_mt.annotate_entries(**{x: af_mt.call_info[x] for x in af_mt.call_info.keys()}).drop('call_info')\n af_ht = af_mt.localize_entries('col_info', 'pops')\n af_ht = af_ht.annotate(af_dict = hl.dict(hl.zip(af_ht.pops.pop, af_ht.col_info.AF, fill_missing=True))).drop('col_info')\n af_ht = af_ht.annotate(af = af_ht.af_dict.map_values(lambda x: x[1]))\n af_ht = af_ht.checkpoint(custom_af_ht_path, overwrite=True)\n \n af_ht_f = af_ht.filter(hl.all(lambda x: hl.is_defined(af_ht.af[x]), \n hl.literal(ancestries)))\n af_ht_f = af_ht_f.filter(hl.all(lambda x: (af_ht_f.af[x] >= args.maf) & \\\n (af_ht_f.af[x] <= (1-args.maf)), \n hl.literal(ancestries)))\n mt_maf = mt_nonrel.semi_join_rows(af_ht_f)\n\n # compute phwe, remove those with p < 1e-7\n mt_nonrel_hwe = mt_maf.annotate_rows(**{'hwe_' + anc.lower(): \n hl.agg.filter(mt_maf.pop == anc, \n hl.agg.hardy_weinberg_test(mt_maf.GT)) \n for anc in ancestries})\n ancestries_tf = [mt_nonrel_hwe['hwe_' + anc.lower()].p_value >= PHWE for anc in ancestries]\n mt_nonrel_hwe = mt_nonrel_hwe.filter_rows(hl.all(lambda x: x, ancestries_tf))\n\n # remove MHC region\n mt_filt = hl.filter_intervals(mt_nonrel_hwe, [MHC], keep=False)\n\n if args.checkpoint:\n mt_filt = mt_filt.checkpoint(geno_filtered_path, overwrite=True)\n\n logging.info('Filtering complete.')\n\n if args.verbose:\n print('Post-filtering per-population genotype Ns:')\n _ = print_pop_Ns(mt_filt)\n\n # output results as plink files\n output_files = get_geno_split_names(ancestries, dictout=True, plinkprefix=True)\n output_files_plink = get_geno_split_names(ancestries, dictout=True, plinkprefix=False)\n for anc in ancestries:\n plinkfiles = output_files_plink[anc]\n if all([hl.hadoop_exists(path_geno + x) for x in plinkfiles]):\n print('PLINK files for ' + anc + ' already exist. Skipping generation...')\n else:\n logging.info('Saving PLINK files for ' + anc + '...')\n mt_filt_anc = mt_filt.filter_cols(mt_filt.pop == anc)\n hl.export_plink(mt_filt_anc, path_geno+output_files[anc], ind_id = mt_filt_anc.s)\n\n # import LD score information & arrange to have the same order as genotype data\n # then produce MAF bins and LD score bins\n logging.info('Constructing annotation files...')\n snps_out = mt_filt.rows().select('varid')\n output_files_annot = get_annot_split_names(ancestries, dictout=True, n_annot=nbins)\n output_files_annot_noextn = get_annot_split_names(ancestries, dictout=True, n_annot=nbins, suffix_incl=False)\n for anc in ancestries:\n ht_anc = hl.read_table(get_ld_score_ht_path(pop=anc))\n ht_anc_expr = ht_anc[snps_out.key]\n this_tab = snps_out.annotate(ld_score = ht_anc_expr.ld_score,\n af = ht_anc_expr.AF)\n this_tab = this_tab.annotate(maf = 0.5 - hl.abs(0.5 - this_tab.af))\n LD_bin_anc = make_quantiles(this_tab.ld_score, nq=args.num_ld_bins, \n approx=args.approx_quantiles, ht=this_tab)\n this_tab = this_tab.select(maf_bin = discretize(this_tab.maf, MAFRANGE),\n LD_bin = discretize(this_tab.ld_score, LD_bin_anc))\n exploded_tab = explode_bins(this_tab, ['maf_bin','LD_bin'], args.verbose)\n\n # back up annotations as a hail table\n exploded_tab.write(path_annot+'ht/'+output_files_annot_noextn[anc] + '.ht',\n overwrite=True)\n\n # output\n _write_flat_without_key(exploded_tab, path_annot+output_files_annot[anc], \n delimiter=' ', header=False)\n logging.info('Annotation files created.')", "title": "" }, { "docid": "5ee153d04174bfe0caebdc2e5094f64c", "score": "0.48386616", "text": "def run(CONFIG, fname_spike_train, fname_templates):\n\n logger = logging.getLogger(__name__)\n\n logger.info('GENERATTING PHY files')\n\n # set root directory for output\n root_dir = CONFIG.data.root_folder\n fname_standardized = os.path.join(os.path.join(os.path.join(\n root_dir,'tmp'),'preprocess'),'standardized.bin')\n\n #\n n_channels = CONFIG.recordings.n_channels\n n_times = CONFIG.recordings.sampling_rate//1000 * CONFIG.recordings.spike_size_ms +1\n\n # output folder\n output_directory = os.path.join(root_dir, 'phy')\n if not os.path.exists(output_directory):\n os.makedirs(output_directory)\n \n # pca # of components \n n_components = 3\n\n # cluster id for each spike; [n_spikes]\n #spike_train = np.load(root_dir + '/tmp/spike_train.npy')\n #spike_train = np.load(root_dir + '/tmp/final_deconv/deconv/spike_train.npy')\n spike_train = np.load(fname_spike_train)\n spike_clusters = spike_train[:,1]\n np.save(os.path.join(root_dir,'phy','spike_clusters.npy'), spike_clusters)\n\n # spike times for each spike: [n_spikes]\n spike_times = spike_train[:,0]\n np.save(os.path.join(root_dir,'phy','spike_times.npy'), spike_times)\n\n # save templates; not sure why this is required?!\n np.save(os.path.join(root_dir,'phy','spike_templates.npy'), spike_clusters)\n\n # save geometry\n chan_pos = np.loadtxt(os.path.join(root_dir,CONFIG.data.geometry))\n np.save(os.path.join(root_dir,'phy','channel_positions.npy'), chan_pos)\n\n # sequential channel order\n channel_map = np.arange(chan_pos.shape[0])\n np.save(os.path.join(root_dir,'phy','channel_map.npy'), channel_map)\n\n # pick largest SU channels for each unit; [n_templates x n_channels_loc]; \n # gives # of channels of the corresponding columns in pc_features, for each spike.\n n_idx_chans = 7\n templates = np.load(fname_templates).transpose(1,2,0)\n print (\"PHY loaded templates: \", templates.shape)\n ptps = templates.ptp(0)\n pc_feature_ind = ptps.argsort(0)[::-1][:n_idx_chans].T\n np.save(os.path.join(root_dir,'phy','pc_feature_ind.npy'),pc_feature_ind)\n\n # \n n_channels = templates.shape[1]\n n_times = templates.shape[0]\n units = np.arange(templates.shape[2])\n\n # unit templates [n_units, times, n_chans]\n temps = templates.transpose(2,0,1)\n np.save(os.path.join(root_dir,\"phy\",\"templates.npy\"),temps)\n\n # *********************************************\n # ************** SAVE params.py file **********\n # *********************************************\n fname_out = os.path.join(output_directory, 'params.py')\n fname_bin = os.path.join(root_dir,CONFIG.data.recordings)\n #\n f= open(fname_out,\"w+\")\n f.write(\"dat_path = '%s'\\n\" % fname_bin)\n f.write(\"n_channels_dat = %i\\n\" % n_channels)\n f.write(\"dtype = 'int16'\\n\")\n f.write(\"offset = 0\\n\")\n f.write(\"sample_rate = %i\\n\" % CONFIG.recordings.sampling_rate)\n f.write(\"hp_filtered = False\")\n f.close()\n \n # *********************************************\n # ************** GET PCA OBJECTS **************\n # *********************************************\n fname_out = os.path.join(output_directory,'pc_objects.npy')\n if os.path.exists(fname_out)==False:\n pc_projections = get_pc_objects(root_dir, pc_feature_ind, n_channels,\n n_times, units, n_components, CONFIG, spike_train)\n np.save(fname_out, pc_projections)\n else:\n pc_projections = np.load(fname_out,allow_pickle=True)\n \n \n # *********************************************\n # ******** GENERATE PC PROJECTIONS ************\n # *********************************************\n fname_out = os.path.join(output_directory, 'pc_features.npy')\n if os.path.exists(fname_out)==False:\n pc_projections = compute_pc_projections(root_dir, templates, spike_train, \n pc_feature_ind, fname_standardized, n_channels, \n n_times, units, pc_projections, n_idx_chans,\n n_components, CONFIG) \n \n \n # *********************************************\n # ******** GENERATE SIMILARITY MATRIX *********\n # *********************************************\n print (\"... making similarity matrix\")\n # Cat: TODO: better similarity algorithms/metrics available in YASS\n\n\n similar_templates = np.zeros((temps.shape[0],temps.shape[0]),'float32')\n \n fname_out = os.path.join(os.path.join(root_dir,'phy'),'similar_templates.npy')\n if os.path.exists(fname_out)==False:\n\n if CONFIG.resources.multi_processing==False:\n for k in tqdm(range(temps.shape[0])):\n for p in range(k,temps.shape[0]):\n temp1 = temps[k].T.ravel()\n results=[]\n for z in range(-1,2,1):\n temp_temp = np.roll(temps[p].T,z,axis=0).ravel()\n results.append(cos_sim(temps[k].T.ravel(),temp_temp))\n\n similar_templates[k,p] = np.max(results)\n else:\n units_split = np.array_split(np.arange(temps.shape[0]), CONFIG.resources.n_processors)\n res = parmap.map(similarity_matrix_parallel, units_split, temps, similar_templates,\n processes=CONFIG.resources.n_processors,\n pm_pbar=True)\n \n print (res[0].shape)\n similar_templates = res[0]\n for k in range(1, len(res),1):\n similar_templates+=res[k]\n \n similar_templates = symmetrize(similar_templates)\n np.save(fname_out,similar_templates)\n\n return", "title": "" }, { "docid": "725377d6f64eb5dca549fad507ddf858", "score": "0.4838503", "text": "def test_srnaseq_annotation_genomic_bam(self):\n with make_workdir():\n clcode = [\"mirtop\",\n \"gff\",\n \"--sps\", \"hsa\", \"--add-extra\", \"--genomic\",\n \"--hairpin\", \"../../data/examples/annotate/hairpin.fa\",\n \"--gtf\", \"../../data/db/mirbase/hsa.gff3\",\n \"-o\", \"test_out_mirs\",\n \"../../data/examples/annotate/hsa-let-7a-nm.sam\"]\n print(\"\")\n print(\" \".join(clcode))\n subprocess.check_call(clcode)", "title": "" }, { "docid": "3de48da6f8a9a910f68c71e70ef9db47", "score": "0.48355767", "text": "def __init__(\n self,\n pop_size=100,\n mu=0,\n sigma=0.15,\n indpb=0.06,\n tournsize=4,\n cxpb=0.9,\n mutpb=0.45,\n ngen=200,\n interactive=True,\n show_plotting=True,\n ):\n self.awg = devices.TektronixAWG7122B(\"GPIB1::1::INSTR\")\n self.osc = devices.Agilent86100C(\"GPIB1::7::INSTR\")\n\n # setup oscilloscope for measurement\n self.osc.set_acquire(average=True, count=30, points=1350)\n self.osc.set_timebase(position=2.4e-8, range_=30e-9)\n self.T = np.linspace(start=0, stop=30e-9, num=1350, endpoint=False)\n\n # find rise-time ref values\n self.osc.set_timebase(position=2.4e-8, range_=15e-9)\n time.sleep(1)\n self.awg.send_waveform([-ss_amplitude] * 120 + [ss_amplitude] * 120)\n time.sleep(6)\n res = self.osc.measurement(1)\n self.osc.set_timebase(position=2.4e-8, range_=30e-9)\n time.sleep(1)\n self.ss_low = res[0]\n self.ss_high = res[-1]\n\n creator.create(\"Fitness\", base.Fitness, weights=(-1.0,))\n creator.create(\"Individual\", list, fitness=creator.Fitness)\n\n self.toolbox = base.Toolbox()\n initial = lambda: [random.uniform(-1, 0) for _ in range(30)] + [\n random.uniform(0, 1) for _ in range(90)\n ]\n # fmt: off\n self.toolbox.register(\"ind\", tools.initIterate, creator.Individual, initial)\n self.toolbox.register(\"population\", tools.initRepeat, list, self.toolbox.ind, n=pop_size)\n self.toolbox.register(\"evaluate\", self.SOA_fitness)\n self.toolbox.register(\"mate\", tools.cxTwoPoint)\n self.toolbox.register(\"mutate\", tools.mutGaussian, mu=mu, sigma=sigma, indpb=indpb)\n self.toolbox.register(\"select\", tools.selTournament, tournsize=tournsize)\n self.toolbox.register(\"eaSimple\", eaSimple, cxpb=cxpb, mutpb=mutpb, ngen=ngen, interactive=interactive, show_plotting=show_plotting)\n # fmt: on", "title": "" }, { "docid": "6f9201bc492590047af0392688ff6dcd", "score": "0.48348257", "text": "def _write_samples(self, samples, *args):\n now = time.gmtime()\n sec_of_day = lambda x: 3600*x.tm_hour + 60*x.tm_min + x.tm_sec\n dt_reached = self._options.dt != 0 and self._start_ts is not None and sec_of_day(now)//self._options.dt != sec_of_day(self._start_ts)//self._options.dt\n if self._start_ts is None or (self._options.filename == '' and dt_reached):\n self._start_ts = now\n self._start_time = time.time()\n # Write a static WAV header\n with open(self._get_output_filename(), 'wb') as fp:\n _write_wav_header(fp, 100, int(self._output_sample_rate), self._num_channels, self._options.is_kiwi_wav)\n if self._options.is_kiwi_tdoa:\n # NB for TDoA support: MUST be a print (i.e. not a logging.info)\n print(\"file=%d %s\" % (self._options.idx, self._get_output_filename()))\n else:\n logging.info(\"Started a new file: %s\" % self._get_output_filename())\n with open(self._get_output_filename(), 'ab') as fp:\n if self._options.is_kiwi_wav:\n gps = args[0]\n self._gnss_performance.analyze(self._get_output_filename(), gps)\n fp.write(struct.pack('<4sIBBII', b'kiwi', 10, gps['last_gps_solution'], 0, gps['gpssec'], gps['gpsnsec']))\n sample_size = samples.itemsize * len(samples)\n fp.write(struct.pack('<4sI', b'data', sample_size))\n # TODO: something better than that\n samples.tofile(fp)\n self._update_wav_header()", "title": "" }, { "docid": "db6f324c453caa2f5bb20933cd6c34a8", "score": "0.48200825", "text": "def sample_func(args, vmf, mvg, prior, req_samples):\n prior_samples = prior.sample(req_samples)\n\n # Sample the chirp mass and mass_ratio from the mvg\n if 'chirp_mass' not in args.set_to_true_value:\n masses = mvg.sample((10 * req_samples,)).squeeze(1)\n masses, _ = torch.sort(masses, dim=1)\n masses = masses[masses[:, 0] > args.mass_minimum]\n masses = masses[masses[:, 1] < args.mass_maximum]\n masses = masses[:req_samples]\n prior_samples['mass_1'] = masses[:, 1].numpy()\n prior_samples['mass_2'] = masses[:, 0].numpy()\n prior_samples['mass_ratio'] = bilby.gw.conversion.component_masses_to_mass_ratio(masses[:, 1], masses[:, 0]).numpy()\n prior_samples['chirp_mass'] = bilby.gw.conversion.component_masses_to_chirp_mass(masses[:, 1], masses[:, 0]).numpy()\n else:\n total_mass = bilby.gw.conversion.chirp_mass_and_mass_ratio_to_total_mass(prior_samples['chirp_mass'], prior_samples['mass_ratio'])\n prior_samples['mass_1'], prior_samples['mass_2'] = bilby.gw.conversion.total_mass_and_mass_ratio_to_component_masses(prior_samples['mass_ratio'], total_mass)\n\n # Sample the vmf for the sky coordinates\n xyz = vmf.sample(req_samples)\n prior_samples['dec'], prior_samples['ra'] = utils_train.xyz2decra(xyz)\n prior_samples['dec'], prior_samples['ra'] = prior_samples['dec'].numpy(), prior_samples['ra'].numpy()\n prior_samples['x'] = xyz[:, 0].numpy()\n prior_samples['y'] = xyz[:, 1].numpy()\n prior_samples['z'] = xyz[:, 2].numpy()\n\n # Convert to pandas dataframe\n return pd.DataFrame().from_dict(prior_samples), xyz", "title": "" }, { "docid": "37f43987e30370086d58b9769eac774f", "score": "0.48199853", "text": "def _post(self, *args, **kwargs):\n return _blocks_swig5.sample_and_hold_ii_sptr__post(self, *args, **kwargs)", "title": "" }, { "docid": "229d8748801302fd23f4b0a3e42d54b4", "score": "0.4817984", "text": "def make_pacbio6fm_gene_grouped(iso_annot, ensg_gene, sample_fasta, output_fasta):\n # get associated gene for each pb acc, for final write-out (below)\n # pb_gene is pb_acc -> gene dictionary\n df = pd.read_table(iso_annot)[['isoform', 'associated_gene']]\n df2 = pd.read_table(ensg_gene, header=None)\n df2.columns = ['associated_gene', 'gene']\n df3 = pd.merge(df, df2, on='associated_gene', how='left').fillna('NOVEL')[['isoform', 'gene']]\n pb_gene = pd.Series(df3.gene.values, index=df3.isoform).to_dict()\n\n gene_seqs = defaultdict(lambda: set()) # gene -> pacbio sequences as list\n\n for rec in SeqIO.parse(sample_fasta, 'fasta'):\n pb_id = rec.id.split('|')[0] \n gene = pb_gene[pb_id]\n F1 = rec.seq.translate()\n F2 = rec.seq[1:].translate()\n F3 = rec.seq[2:].translate()\n R1 = rec.seq.reverse_complement().translate()\n R2 = rec.seq.reverse_complement()[1:].translate()\n R3 = rec.seq.reverse_complement()[2:].translate()\n translations = [F1, F2, F3, R1, R2, R3]\n for tr in translations:\n orfs = set(return_all_orfs(tr))\n gene_seqs[gene].update(orfs)\n\n\n # write out fasta file in which entries represent each gene and the\n # pacbio-derived protein \"space\"\n with open(output_fasta, 'w') as ofile:\n for gene, orfs in gene_seqs.items():\n ofile.write('>' + gene + '\\n' + '-'.join(orfs) + '\\n')", "title": "" }, { "docid": "0f344ca757507722cefe5bbdbe8076e6", "score": "0.4816679", "text": "def Gen(self, *args):\n return _snap.TFltVQ_Gen(self, *args)", "title": "" }, { "docid": "0e86e638f773fd8a6404e5930acd8a07", "score": "0.48112854", "text": "def get_vcf_genotype(ref, alt, gt=None, format_=None, sample=None):\n\n gt = gt.replace('/', '|')\n\n ref_alts = [ref] + alt.split(',')\n\n return [ref_alts[int(a_gt)] for a_gt in gt.split('|')]", "title": "" }, { "docid": "320f65498b246235d06649410292540b", "score": "0.4807533", "text": "def start(input, phased_proximal_variants_vcf, samplename, alleles, epitope_lengths, prediction_algorithms, output,\n peptide_sequence_length, net_chop_method, netmhc_stab, pass_only, top_score_metric,\n binding_threshold, allele_specific_cutoffs, minimum_fold_change,\n normal_cov, tdna_cov, trna_cov, normal_vaf, tdna_vaf, trna_vaf, maximum_transcript_support_level,\n expn_val, net_chop_threshold, fasta_size, iedb_retries, iedb_install_dir,\n downstream_sequence_length, keep_tmp_files):\n if type(epitope_lengths) == list:\n epitope_lengths = ','.join(str(item) for item in epitope_lengths)\n if type(alleles) == list:\n alleles = ','.join(str(item) for item in alleles)\n if type(prediction_algorithms) == list:\n prediction_algorithms = ','.join(str(item) for item in prediction_algorithms)\n command = [\n 'pvacseq',\n 'run',\n input,\n samplename,\n alleles\n ]\n for algo in prediction_algorithms.split(','):\n command.append(algo)\n command += [\n output,\n '-e', epitope_lengths,\n '-l', str(peptide_sequence_length),\n '-m', top_score_metric,\n '-b', str(binding_threshold),\n '-c', str(minimum_fold_change),\n '--normal-cov', str(normal_cov),\n '--tdna-cov', str(tdna_cov),\n '--trna-cov', str(trna_cov),\n '--normal-vaf', str(normal_vaf),\n '--tdna-vaf', str(tdna_vaf),\n '--trna-vaf', str(trna_vaf),\n '--expn-val', str(expn_val),\n '--maximum-transcript-support-level', str(maximum_transcript_support_level),\n '-s', str(fasta_size),\n '-r', str(iedb_retries),\n '-d', str(downstream_sequence_length)\n ]\n if len(net_chop_method):\n command += [\n '--net-chop-method', net_chop_method,\n '--net-chop-threshold', str(net_chop_threshold)\n ]\n if netmhc_stab:\n command.append('--netmhc-stab')\n if allele_specific_cutoffs:\n command.append('--allele-specific-binding-thresholds')\n if keep_tmp_files:\n command.append('-k')\n if pass_only:\n command.append('--pass-only')\n if len(iedb_install_dir):\n command += [\n '--iedb-install-directory',\n iedb_install_dir\n ]\n if len(phased_proximal_variants_vcf):\n command +=[\n '--phased-proximal-variants-vcf',\n phased_proximal_variants_vcf,\n ]\n\n # stdout and stderr from the child process will be directed to this file\n logfile = os.path.join(output, 'pVAC-Seq.log')\n with current_app.config['storage']['synchronizer']:\n data = current_app.config['storage']['loader']()\n data['processid']+=1\n os.makedirs(os.path.dirname(logfile), exist_ok = True)\n current_app.config['storage']['children'][data['processid']] = subprocess.Popen(\n command,\n stdout=open(logfile, 'w'), # capture stdout in the logfile\n stderr=subprocess.STDOUT,\n # isolate the child in a new process group\n # this way it will remainin running no matter what happens to this process\n preexec_fn=os.setpgrp\n )\n # Store some data about the child process\n data.addKey(\n 'process-%d'%(data['processid']),\n {\n 'command': \" \".join([quote(token) for token in command]),\n 'logfile':logfile,\n 'pid':current_app.config['storage']['children'][data['processid']].pid,\n 'status': 0,\n 'files':{},\n 'output':os.path.abspath(output)\n },\n current_app.config['files']['processes']\n )\n if 'reboot' not in data:\n data.addKey(\n 'reboot',\n current_app.config['reboot'],\n current_app.config['files']['processes']\n )\n data.save()\n return data['processid']", "title": "" }, { "docid": "32aca2cc5a03f4210bd1b6a15c49a3a1", "score": "0.48062894", "text": "def _cmd_export_vcf(args):\n segments = _CNA.read(args.segments)\n is_sample_female = segments.guess_xx(args.male_reference, verbose=False)\n if args.gender:\n is_sample_female_given = (args.gender in [\"f\", \"female\"])\n if is_sample_female != is_sample_female_given:\n print(\"Sample gender specified as\", args.gender,\n \"but chrX copy number looks like\",\n \"female\" if is_sample_female else \"male\",\n file=sys.stderr)\n is_sample_female = is_sample_female_given\n print(\"Treating sample gender as\",\n \"female\" if is_sample_female else \"male\",\n file=sys.stderr)\n\n header, body = export.export_vcf(segments, args.ploidy, args.male_reference,\n is_sample_female, args.sample_id)\n core.write_text(args.output, header, body)", "title": "" }, { "docid": "2803e591219e3050827341907ca91038", "score": "0.48055786", "text": "def load_gi_example(frac, data_path, side_path, cluster=None):\n path_dataset = data_path\n side_dataset = side_path\n\n M = pd.read_csv(path_dataset,index_col=0)\n side = pd.read_csv(side_dataset,index_col=0)\n\n if cluster:\n linkage_ = linkage(M, method=cluster) #average, ward, single, complete, centroid, weighted, median\n dendrogram_ = dendrogram(linkage_, no_plot=True)\n clust_index = dendrogram_['leaves']\n M = M.iloc[clust_index,clust_index]\n side = side.iloc[clust_index,:]\n\n #Two functions for sampling from the phenotype matrix\n random.seed(30)\n\n #Sample a percentage of the genes\n M_train, M_test, S_train, S_test = sample_mask(M,frac,use_upper=True)\n M_train = M_train.values\n M_test = M_test.values\n S_train = S_train.values\n S_test = S_test.values\n\n return [M,S_train,S_test,M_train,M_test],side", "title": "" }, { "docid": "6c52d4a82dd814dc50ad8678460fa1ac", "score": "0.4793248", "text": "def write_output_header(output_vcf, sample_list, contig_list, hotspot_source=None):\n # Version is set to VCF4.2 on creation\n # Add reference? date?\n\n # Hotspot source as of 01/2021 was\n # Memorial Sloan Kettering Cancer Center\n # based on Chang et al. 2017; see https://www.cancerhotspots.org\n if hotspot_source:\n hotspot_string = ' as defined by %s' % hotspot_source\n else:\n hotspot_string = ''\n\n output_vcf.header.info.add('MQ',1,'Integer',\n 'RMS mapping quality (normal sample)')\n output_vcf.header.info.add('MQ0',1,'Integer',\n 'Number of MAPQ=0 reads (normal sample)')\n output_vcf.header.info.add('CAL','.','String',\n 'List of callers making this call')\n output_vcf.header.info.add(\"HotSpotAllele\",'A','Integer',\n 'Included by exception to consensus rule due to hotspot status%s' % hotspot_string)\n output_vcf.header.formats.add('GT', '1', 'String',\n 'Consensus genotype')\n output_vcf.header.formats.add('AD', 'R', 'Integer',\n 'Consensus depths for the ref and alt alleles in the order listed')\n output_vcf.header.formats.add('AF', 'A', 'Float',\n 'Consensus allele frequency')\n output_vcf.header.formats.add('DP', '1', 'Integer',\n 'Consensus depth')\n output_vcf.header.formats.add('GTC', '.', 'String',\n 'Genotypes for %s' % FORMAT_JOIN.join(CALLER_NAMES))\n output_vcf.header.formats.add('GT_STATUS', '.', 'String',\n (\"Degree of unanimity of genotype: 'unanimous', 'majority', or 'deadlock' if\"\n \" equally supported by individual callers\"))\n output_vcf.header.formats.add('ADC', '.', 'String',\n 'Allele depths for %s' % FORMAT_JOIN.join(CALLER_NAMES))\n output_vcf.header.formats.add('DPC', '.', 'String',\n 'Read depths for %s' % FORMAT_JOIN.join(CALLER_NAMES))\n output_vcf.header.formats.add('AFC', '.', 'String',\n 'Allele frequencies for %s' % FORMAT_JOIN.join(CALLER_NAMES))\n output_vcf.header.formats.add('ADR', 'R', 'Integer',\n 'Difference between highest and lowest AD')\n output_vcf.header.formats.add('AFR', 'A', 'Float',\n 'Difference between highest and lowest AF')\n output_vcf.header.formats.add('DPR', '1', 'Integer',\n 'Difference between highest and lowest DP')\n for contig in sorted(contig_list, key=lambda x: x.id):\n if contig.name in ALLOWED_CHROMS:\n output_vcf.header.contigs.add(contig.name, contig.length)\n for sample in sample_list:\n output_vcf.header.add_sample(sample)", "title": "" }, { "docid": "7ecfc5509402a2b7ba9067338a299521", "score": "0.47866887", "text": "def subsample_genomes(options):\n\n\targ = '-c' if options.subsample_genomes.get('picloud', False) else ''\n\targ += ' -f' if options.subsample_genomes.get('forcenew', False) else ''\n\n\n\tfor org in GENOMES:\n\t\t#only do if missing or FORCING\n\t\tsh('python subsample.py %(arg)s %(name)s' % {'arg':arg, 'name':org})", "title": "" }, { "docid": "b7f4e194499828255a8348dc932ea074", "score": "0.4781615", "text": "def main(file, psf, outdir):\n # Setup a parser to take command line arguments\n\n config = desc.imsim.read_config(None) \n\n logger = desc.imsim.get_logger(\"INFO\")\n\n # Get the number of rows to read from the instance file. Use\n # default if not specified.\n \n numRows = None\n sensor = None\n # The PhoSim instance file contains both pointing commands and\n # objects. The parser will split them and return a both phosim\n # command dictionary and a dataframe of objects.\n commands, phosim_objects = \\\n desc.imsim.parsePhoSimInstanceFile(file, numRows)\n\n phosim_objects = \\\n desc.imsim.validate_phosim_object_list(phosim_objects).accepted\n\n # Build the ObservationMetaData with values taken from the\n # PhoSim commands at the top of the instance file.\n obs_md = desc.imsim.phosim_obs_metadata(commands)\n #print (commands)\n #obs_md.OpsimMetaData['altitude' ] = 20\n camera = LsstSimMapper().camera\n\n # Sub-divide the source dataframe into stars and galaxies.\n if sensor is not None:\n # Trim the input catalog to a single chip.\n phosim_objects['chipName'] = \\\n chipNameFromRaDec(phosim_objects['raICRS'].values,\n phosim_objects['decICRS'].values,\n parallax=phosim_objects['parallax'].values,\n camera=camera, obs_metadata=obs_md,\n epoch=2000.0)\n\n starDataBase = \\\n phosim_objects.query(\"galSimType=='pointSource' and chipName=='%s'\"\n % sensor)\n galaxyDataBase = \\\n phosim_objects.query(\"galSimType=='sersic' and chipName=='%s'\"\n % sensor)\n else:\n starDataBase = \\\n phosim_objects.query(\"galSimType=='pointSource'\")\n galaxyDataBase = \\\n phosim_objects.query(\"galSimType=='sersic'\")\n\n # Simulate the objects in the Pandas Dataframes.\n\n # First simulate stars\n phoSimStarCatalog = desc.imsim.ImSimStars(starDataBase, obs_md)\n phoSimStarCatalog.photParams = desc.imsim.photometricParameters(commands)\n\n # Add noise and sky background\n # The simple code using the default lsst-GalSim interface would be:\n #\n # PhoSimStarCatalog.noise_and_background = ExampleCCDNoise(addNoise=True,\n # addBackground=True)\n #\n # But, we need a more realistic sky model and we need to pass more than\n # this basic info to use Peter Y's ESO sky model.\n # We must pass obs_metadata, chip information etc...\n phoSimStarCatalog.noise_and_background = ESOSkyModel(obs_md, addNoise=True,\n addBackground=True)\n\n # Add a PSF.\n if psf.lower() == \"doublegaussian\":\n # This one is taken from equation 30 of\n # www.astro.washington.edu/users/ivezic/Astr511/LSST_SNRdoc.pdf .\n #\n # Set seeing from self.obs_metadata.\n phoSimStarCatalog.PSF = \\\n SNRdocumentPSF(obs_md.OpsimMetaData['FWHMgeom'])\n elif psf.lower() == \"kolmogorov\":\n # This PSF was presented by David Kirkby at the 23 March 2017\n # Survey Simulations Working Group telecon\n #\n # https://confluence.slac.stanford.edu/pages/viewpage.action?spaceKey=LSSTDESC&title=SSim+2017-03-23\n\n # equation 3 of Krisciunas and Schaefer 1991\n airmass = 1.0/np.sqrt(1.0-0.96*(np.sin(0.5*np.pi-obs_md.OpsimMetaData['altitude']))**2)\n\n phoSimStarCatalog.PSF = \\\n Kolmogorov_and_Gaussian_PSF(airmass=airmass,\n rawSeeing=obs_md.OpsimMetaData['rawSeeing'],\n band=obs_md.bandpass)\n else:\n raise RuntimeError(\"Do not know what to do with psf model: \"\n \"%s\" % psf)\n\n phoSimStarCatalog.camera = camera\n phoSimStarCatalog.get_fitsFiles()\n\n # Now galaxies\n phoSimGalaxyCatalog = desc.imsim.ImSimGalaxies(galaxyDataBase, obs_md)\n phoSimGalaxyCatalog.copyGalSimInterpreter(phoSimStarCatalog)\n phoSimGalaxyCatalog.PSF = phoSimStarCatalog.PSF\n phoSimGalaxyCatalog.noise_and_background = phoSimStarCatalog.noise_and_background\n phoSimGalaxyCatalog.get_fitsFiles()\n\n # Write out the fits files\n outdir = outdir\n if not os.path.isdir(outdir):\n os.makedirs(outdir)\n prefix = config['persistence']['eimage_prefix']\n phoSimGalaxyCatalog.write_images(nameRoot=os.path.join(outdir, prefix) +\n str(commands['obshistid']))", "title": "" }, { "docid": "b191b4165355f4f65317b908367af0d9", "score": "0.47794884", "text": "def run_gixs_simulate(t=0.5, user_name=\"XXX\"):\n # define names of samples on sample bar\n sample_list = [\n \"Sample1\",\n \"Sample2\",\n \"Sample3\",\n ]\n # define piezo-x-postion\n x_list = [\n -30000,\n -24000,\n -10000,\n ]\n assert len(x_list) == len(sample_list), f\"Sample name/position list is borked\"\n # inc_angles = np.array([ 0.05, 0.08, 0.1, 0.12, 0.2, 0.3, ]) # incident angles\n inc_angles = np.array(\n [\n 0.05,\n 0.1,\n 0.15,\n ]\n ) # incident angles\n # waxs_angle_array = np.linspace(0, 84, 15)\n waxs_angle_array = np.linspace(\n 0, 19.5, 4\n ) # q=4*3.14/0.77*np.sin((max angle+3.5)/2*3.14159/180)\n # if 12, 3: up to q=2.199\n # if 18, 4: up to q=3.04\n # if 24, 5: up to q=3.87\n # if 30, 6: up to q=4.70\n # 52/6.5 +1 =8\n max_waxs_angle = np.max(waxs_angle_array)\n inverse_angle = False\n for x, sample in zip(x_list, sample_list): # loop over samples on bar\n # yield from bps.mv(piezo.x, x) #move to next sample\n # yield from alignement_gisaxs(0.1) #run alignment routine\n th_meas = inc_angles # + piezo.th.position #np.array([0.10 + piezo.th.position, 0.20 + piezo.th.position])\n th_real = inc_angles\n # det_exposure_time(t,t)\n if inverse_angle:\n Twaxs_angle_array = waxs_angle_array[::-1]\n else:\n Twaxs_angle_array = waxs_angle_array\n for waxs_angle in Twaxs_angle_array: # loop through waxs angles\n # yield from bps.mv(waxs, waxs_angle)\n if waxs_angle == max_waxs_angle:\n dets = [\n \"pil300KW\",\n \"pil1M\",\n ] # waxs, maxs, saxs = [pil300KW, rayonix, pil1M]\n print(\"Meausre both saxs and waxs here for w-angle=%s\" % waxs_angle)\n else:\n dets = [\"pil300KW\"]\n for i, th in enumerate(th_meas): # loop over incident angles\n # yield from bps.mv(piezo.th, th)\n x_meas = x + (1 + i) * 200 # move the x-position\n # yield from bps.mv(piezo.x, x_meas)\n if inverse_angle:\n name_fmt = \"{sample}_{th:5.4f}deg_waxsN{waxs_angle:05.2f}_x{x}_expt{t}s_sid{scan_id:08d}\"\n else:\n name_fmt = \"{sample}_{th:5.4f}deg_waxsP{waxs_angle:05.2f}_x{x}_expt{t}s_sid{scan_id:08d}\"\n sample_name = name_fmt.format(\n sample=sample,\n th=th_real[i],\n waxs_angle=waxs_angle,\n x=x_meas,\n t=t,\n scan_id=123,\n )\n # sample_id(user_name=user_name, sample_name=sample_name)\n print(f\"\\n\\t=== Sample: {sample_name} ===\\n\")\n # yield from bp.count(dets, num=1)\n inverse_angle = not inverse_angle\n # sample_id(user_name='test', sample_name='test')\n # det_exposure_time(0.5)", "title": "" }, { "docid": "bd4b66b827c95741ef5dd18a38f1094d", "score": "0.47780436", "text": "def create_example(self, data_sample):\n\n \traise NotImplementedError", "title": "" }, { "docid": "ae3fe8baba1fd6e16715a10efc48eabd", "score": "0.4776441", "text": "def __init__(self, gtfpath):\n\n if gtfpath.endswith('.gtf.gz'):\n opener = gzip.open(gtfpath, 'rt')\n else:\n opener = open(gtfpath, 'r')\n\n self.genes = []\n with opener as gtf:\n for row in gtf:\n row = row.strip().split('\\t')\n\n if row[0][0] == '#': continue # skip header\n\n chrom = row[0]\n annot_type = row[2]\n start_pos = int(row[3])\n end_pos = int(row[4])\n strand = row[6]\n\n attributes = defaultdict(list)\n for a in row[8].replace('\"', '').replace('_biotype', '_type').split(';')[:-1]:\n kv = a.strip().split(' ')\n if kv[0]!='tag':\n attributes[kv[0]] = kv[1]\n else:\n attributes['tags'].append(kv[1])\n\n if annot_type == 'gene':\n assert 'gene_id' in attributes\n if 'gene_name' not in attributes:\n attributes['gene_name'] = attributes['gene_id']\n gene_id = attributes['gene_id']\n g = Gene(gene_id, attributes['gene_name'], attributes['gene_type'],\n chrom, strand, start_pos, end_pos)\n g.source = row[1]\n g.phase = row[7]\n g.attributes_string = row[8].replace('_biotype', '_type')\n self.genes.append(g)\n\n elif annot_type == 'transcript':\n assert 'transcript_id' in attributes\n if 'transcript_name' not in attributes:\n attributes['transcript_name'] = attributes['transcript_id']\n transcript_id = attributes['transcript_id']\n t = Transcript(attributes.pop('transcript_id'), attributes.pop('transcript_name'),\n attributes.pop('transcript_type'), g, start_pos, end_pos)\n t.attributes = attributes\n g.transcripts.append(t)\n\n elif annot_type == 'exon':\n if 'exon_id' in attributes:\n e = Exon(attributes['exon_id'], attributes['exon_number'], t, start_pos, end_pos)\n else:\n e = Exon(str(len(t.exons)+1), len(t.exons)+1, t, start_pos, end_pos)\n t.exons.append(e)\n\n if len(self.genes) % 1000 == 0:\n print(f'\\rParsing GTF: {len(self.genes)} genes processed', end='')\n print(f'\\rParsing GTF: {len(self.genes)} genes processed')\n\n self.genes = np.array(self.genes)", "title": "" } ]
c565d045277814380a3e3596a51179dd
Given the name of a senator, compares his/her voting record to the voting records of all senators whose names are in sen_set, computing a dot product for each, and then returns the average dot product.
[ { "docid": "5fa5a3767610b7c38df1320b6ce2deb1", "score": "0.7502001", "text": "def find_average_similarity(sen, sen_set, voting_dict):\n sum = 0\n for s in sen_set:\n sum += policy_compare(sen, s, voting_dict)\n avg = sum / len(sen_set)\n return avg", "title": "" } ]
[ { "docid": "0856858641cf367033ed245076e621b2", "score": "0.6240421", "text": "def distance_sen_dem_rep(votes, senator_name):\n # Get democrats and republicans lists\n democrat_names_list = list(votes[votes[\"party\"] == \"D\"].name)\n republican_names_list = list(votes[votes[\"party\"] == \"R\"].name)\n\n # Computing the mean distance between Angus and these two party\n democrat_distances = []\n republican_distances = []\n for democrat in democrat_names_list:\n democrat_distances.append(calculate_distance_name(votes, democrat, senator_name))\n for republican in republican_names_list:\n republican_distances.append(calculate_distance_name(votes, republican, senator_name))\n democrat_mean_distance = np.mean(democrat_distances)\n republican_mean_distance = np.mean(republican_distances)\n\n return democrat_mean_distance, republican_mean_distance", "title": "" }, { "docid": "b808ead04d6b352791d265b99957cc02", "score": "0.5817774", "text": "def calculate_distance_name(votes, senator_name_1, senator_name_2):\n senator_1_votes = votes[votes[\"name\"] == senator_name_1].iloc[0, 3:].reshape(1, -1)\n senator_2_votes = votes[votes[\"name\"] == senator_name_2].iloc[0, 3:].reshape(1, -1)\n distance = float(euclidean_distances(senator_1_votes, senator_2_votes)[0])\n return distance", "title": "" }, { "docid": "8aaf48946f0d546669523ab015833e10", "score": "0.5765176", "text": "def policy_compare(sen_a, sen_b, voting_dict):\n dot_product = 0\n for i in range(0, len(voting_dict[sen_a])):\n dot_product += voting_dict[sen_a][i] * voting_dict[sen_b][i]\n return dot_product", "title": "" }, { "docid": "5718f83235d801d7680b8a15dccdc996", "score": "0.5574725", "text": "def vote(results):\n return np.mean(results, axis=0)", "title": "" }, { "docid": "aad191810e1b26a81d8778da0b3c1674", "score": "0.5253092", "text": "def most_similar(sen, voting_dict):\n max = 0\n #del voting_dict[sen]\n for comp_sen in voting_dict.keys():\n if comp_sen != sen:\n dot_product = policy_compare(sen, comp_sen, voting_dict)\n if dot_product > max:\n max = dot_product\n most_similar = comp_sen\n return most_similar + ' is the most similar to ' + sen + ' with a degree of ' + str(max) + '.'", "title": "" }, { "docid": "3ce0b52adf90d376f58d278b6caeb70a", "score": "0.51992637", "text": "def score_sentences(sen1, sen2):\n\n s1 = set(sen1.lower().split())\n s2 = set(sen2.lower().split())\n score = 0\n if s1 and s2:\n avg = len(s1) + len(s2) / 2.0\n score = len(s1.intersection(s2)) / avg\n return score", "title": "" }, { "docid": "e8291d28e42fca61c2c8878e05f226f0", "score": "0.51552117", "text": "def usr_avg(user_ratings):\n return round(sum(user_ratings[movie] for movie in user_ratings) / len(user_ratings), 3)", "title": "" }, { "docid": "442ae11634cfba41926b6c896956be5a", "score": "0.51381004", "text": "def compute_user_similarity(d1, d2, ave_rat1, ave_rat2):\n # Your code here\n k1 = d1.keys()\n k2 = d2.keys()\n common = list(set(k1) & set(k2))\n if not common:\n return 0.0\n else:\n numerator_sum = []\n denominator_sum1 = []\n denominator_sum2 = []\n for movie in common:\n numerator_sum.append((d1[movie] - ave_rat1) * (d2[movie] - ave_rat2))\n denominator_sum1.append((d1[movie] - ave_rat1) ** 2)\n denominator_sum2.append((d2[movie] - ave_rat2) ** 2)\n if sum(denominator_sum1) == 0 or sum(denominator_sum2) == 0:\n return 0.0\n else:\n return sum(numerator_sum) / math.sqrt(sum(denominator_sum1) * sum(denominator_sum2))", "title": "" }, { "docid": "52671e7747b47c1842fd6971fc4f09fa", "score": "0.50017273", "text": "def evaluate(group_members, recommendation, test_ratings): \n prec_mean=0\n rec_mean=0\n for i in range(len(group_members)):\n actual_recom, false_positives = generate_recommendations(group_members[i],test_ratings,4)\n prec, rec, tp, fp = calculate_metrics(actual_recom, recommendation[i],false_positives)\n prec_mean+=prec\n rec_mean+=rec\n prec_mean=prec_mean/len(group_members)\n rec_mean=rec_mean/len(group_members)\n \n return prec_mean, rec_mean", "title": "" }, { "docid": "7f46a007c7e06b29fc35722252a9cb06", "score": "0.4897609", "text": "def _avg_victim(self, attr):\n l = [getattr(m, attr) for m in self.get_messages('victim')]\n return sum(l) / len(l) if l else 0.0", "title": "" }, { "docid": "c82bbb470baad24fe6524ec0a42a299f", "score": "0.4886269", "text": "def plurality(voters):\n votes = np.zeros(len(voters[0]))\n for voter in voters:\n votes[np.argmax(voter)] += 1\n return np.argmax(votes)", "title": "" }, { "docid": "326909f35d0e5fc465e087ced02091e4", "score": "0.48783973", "text": "def mse(submission, use_centroid=True):\n if 'instructorScore' in submission:\n ins = submission['instructorScore']\n elif use_centroid:\n ins = centroid(submission)\n else:\n return None\n\n devs = []\n for r in submission['reviewScores']:\n devs += [(r[q]-ins[q])**2.0 for q in ins]\n\n return sum(devs)/len(devs)", "title": "" }, { "docid": "5e945887487c660b02d8bb0f9b2c7b84", "score": "0.48637134", "text": "def gendervote(neighbours):\r\n neighbourGenders = []\r\n for n in neighbours:\r\n neighbourGenders.append(n.gender)\r\n return Counter(neighbourGenders).most_common(1)[0][0]", "title": "" }, { "docid": "cbee42cd7abe58b02df141c61b81ee42", "score": "0.4861897", "text": "def compute_profile_means(self):\n for user in self.get_users():\n if user not in self._profile_means:\n mean_user_rating = self.get_profile(user)['rating'].mean()\n self._profile_means[user] = mean_user_rating", "title": "" }, { "docid": "f20798882bf7ad87ebbb31b70270ef7f", "score": "0.4853021", "text": "def make_predictions(movies, ratings_train, ratings_test):\n ###TODO\n result = list()\n for i,row in ratings_test.iterrows():\n # b = movies.loc[movies['movieId'] == row['movieId']].squeeze()['features']\n b = movies.loc[movies.movieId == row.movieId]['features'].squeeze()\n trainMovie = ratings_train.loc[ratings_train['userId'] == row['userId']]\n \n weighted_avg = 0\n weight_sum=0\n div=0\n \n for i1, row1 in trainMovie.iterrows():\n # a = movies.loc[movies['movieId'] == row1['movieId']].squeeze()['features']\n a = movies.loc[movies.movieId == row1.movieId]['features'].squeeze()\n cosSim = cosine_sim(b, a)\n if cosSim > 0:\n div = div + cosSim\n weight_sum = weight_sum + cosSim*row1.rating\n if(div>0):\n avg=(weight_sum/div)\n result.append(avg)\n else:\n user_rating=np.mean(trainMovie.rating)\n result.append(user_rating)\n result=np.array(result)\n return result", "title": "" }, { "docid": "d6878c31153e7a2ece9816962aeb8094", "score": "0.48468754", "text": "def _call_ev(self, g):\n final_rating = 0\n\n for opponent_name in g.players_in_round:\n if opponent_name != self.name and self._pa(opponent_name):\n final_rating += self._call_vs(self.opponents[opponent_name], g)\n final_rating = final_rating / (len(g.players_in_round) - 1)\n\n return final_rating", "title": "" }, { "docid": "15eb72400835efd77ffe017d6526a596", "score": "0.48286423", "text": "def calculate_voting_predictions(self, members, league):\r\n votes = np.array([ get_score_resources(m).league_predictions[league].astype(int) for m in members ])\r\n predictions = np.apply_along_axis(lambda x: np.argmax(np.bincount(x)), axis=0, arr=votes)\r\n return predictions", "title": "" }, { "docid": "612b5ef1cdfcd05a99526cfd3e62b2d7", "score": "0.48156798", "text": "def avg_group_similarity(self, group_ids, alpha=1):\n user_similarities = np.zeros(len(group_ids))\n for user1 in group_ids:\n curr_similarities = []\n for user2 in group_ids:\n similarity = self.__cosine_sim__(user1, user2)\n curr_similarities.append(similarity)\n user_similarities += np.array(curr_similarities)\n user_similarities /= len(group_ids)\n avg_similarity = np.mean(user_similarities)\n return user_similarities, avg_similarity", "title": "" }, { "docid": "9a803076566a197d6f90c845b213220c", "score": "0.4797836", "text": "def predict(self, users_ratings):\n predictions = defaultdict(dict)\n for user, items in users_ratings.iteritems():\n for item, _ in items.iteritems():\n total, denom = 0.0, 0.0\n if item not in self.similar_items_:\n predictions[user][item] = None\n continue\n for item2, similarity in self.similar_items_[item].iteritems():\n if item2 in self.X_train_[user]:\n total += self.X_train_[user][item2] * similarity\n denom += similarity\n if denom == 0:\n predictions[user][item] = None\n continue\n predictions[user][item] = total/denom\n return predictions", "title": "" }, { "docid": "5797ecc40887fa3f3ad3247b1a929731", "score": "0.47719222", "text": "def evaluate(self, s, t):\n s1 = self.generate_ngrams(s)\n s2 = self.generate_ngrams(t)\n intersection = s1.intersection(s2)\n union = s1.union(s2)\n if len(union) == 0:\n return 1\n else:\n return len(intersection) / len(union)", "title": "" }, { "docid": "58d51805ed063f552c82f2a0a8c98799", "score": "0.47653434", "text": "def calcUserSim(userVec):\n bestSim = 0.0\n bestSimUser = None\n IdTranslate = icamf_recommender.rating_object.items_ids\n users = icamf_recommender.rating_object.user_rated_item_in_ctx_multimap\n\n # convert values from DB format to Dictionary format\n poi_id_rating_tuple_list = [(IdTranslate[poi], rating) for poi, rating in userVec]\n\n for u in users:\n DBuser = users[u]\n matches = 0\n sqdiffs = 0.0\n\n for userval in poi_id_rating_tuple_list:\n itemID = userval[0]\n\n if itemID in DBuser:\n ratedItem = DBuser[itemID]\n matches += 1\n count = 0\n temp = 0\n\n for context in ratedItem:\n # If there is multiple ratings in different contexts -> average\n temp += ratedItem[context]\n count += 1\n normDB = norm(temp / count)\n normUser = norm(userval[1])\n # temp/count = average\n sqdiffs += (normDB - normUser) * (normDB - normUser)\n if matches == 0:\n continue\n MSD = sqdiffs / matches\n\n lenDBuser = len(DBuser)\n lenUserVec = len(userVec)\n jaccard = matches / (lenDBuser + lenUserVec - matches)\n\n tempSim = jaccard * (1 - MSD)\n\n if tempSim > bestSim:\n bestSim = tempSim\n bestSimUser = u\n bestSimUser = icamf_recommender.rating_object.ids_user[bestSimUser]\n\n return bestSimUser, bestSim", "title": "" }, { "docid": "29c9b33a4e79481cfc2a56d0408e58f5", "score": "0.47649845", "text": "def _apply_votes(self) -> None:\r\n candidates = list(self._all_selected_candidates.values())\r\n candidate_positions = []\r\n for candidate in candidates:\r\n index = self._VSD.get_index_of_candidate_name(candidate)\r\n if index == -1:\r\n candidate_positions.append([])\r\n else:\r\n candidate_positions.append([index])\r\n if len(candidate_positions) < 4:\r\n while len(candidate_positions) != 4:\r\n candidate_positions.append([])\r\n self._VSD.apply_student_votes(candidate_positions[0], candidate_positions[1], candidate_positions[2],\r\n candidate_positions[3])", "title": "" }, { "docid": "c88b39638f7e542f9821e5e7e8967121", "score": "0.47182646", "text": "def compute_sentence_vector(self, sentence):\n return np.average(np.array([self.compute_word_vector(w) for w in sentence.split()]), axis=0)", "title": "" }, { "docid": "444833046d4afd3868bc5598b6683868", "score": "0.47182104", "text": "def make_predictions(movies, ratings_train, ratings_test):\n ###TODO\n predicted = []\n for test_index,test_row in ratings_test.iterrows(): \n #the p+ve similatiry store weighted ratings while n-ve only ratings\n weights = {'p_ratings':[],'p_weights':[],'n':[]}\n feature_test = movies[movies.movieId==test_row['movieId']].get_values()[0][4]\n for train_index,train_row in ratings_train[ratings_train.userId==test_row['userId']].iterrows():\n feature_train = movies[movies.movieId==train_row['movieId']].get_values()[0][4]\n #find cosine/weight\n sim_m=cosine_sim(feature_test,feature_train)\n if sim_m>0:\n weights['p_ratings'].append(train_row['rating'])\n weights['p_weights'].append(sim_m)\n else:\n weights['n'].append(train_row['rating'])\n if len(weights['p_ratings'])!=0 : \n #weighted average\n predicted.append(np.dot(np.array(weights['p_ratings']),np.array(weights['p_weights'])) /sum(weights['p_weights']))\n else :\n predicted.append(np.mean(weights['n']))\n return np.array(predicted)", "title": "" }, { "docid": "2e3fa63f0fb5e3873880d294446839eb", "score": "0.47119394", "text": "def compute_average_user_ratings(user_ratings):\n ave_ratings = {}\n # Your code here\n for key in user_ratings:\n ratings = user_ratings[key].values()\n ave_ratings[key] = sum(ratings) / len(ratings)\n return ave_ratings", "title": "" }, { "docid": "6181d27dba2d6d92c49f4dd687fcbbf3", "score": "0.47059372", "text": "def calculate_voting_evaluation(self, member, base_members, league):\r\n scorer = ScoreQuery(member).get_metric()\r\n\r\n data = member.get_simulation().get_training_data()\r\n features = data.features\r\n x = data.x\r\n y = data.y\r\n\r\n base_predictions = self.calculate_voting_predictions(base_members, league)\r\n base_score = scorer.score(y, base_predictions)\r\n\r\n combined_members = [ member ] + base_members\r\n combined_predictions = self.calculate_voting_predictions(combined_members, league)\r\n combined_score = scorer.score(y, combined_predictions)\r\n\r\n score_boost = combined_score - base_score\r\n\r\n voting_evaluation = VotingEvaluation()\r\n voting_evaluation.base_score = base_score\r\n voting_evaluation.combined_score = combined_score\r\n voting_evaluation.score_boost = score_boost\r\n voting_evaluation.evaluated = True\r\n return voting_evaluation", "title": "" }, { "docid": "e7901d8fb0abb0dc7524487688f8eaaf", "score": "0.4694233", "text": "def f1(predictions, gold):\n if len(gold) == 0:\n return 1 if len(predictions) == 0 else 0\n nom = 2 * len(set(predictions).intersection(set(gold)))\n denom = len(set(predictions)) + len(set(gold))\n return nom / denom", "title": "" }, { "docid": "a43bf8fa782b45066a7f37d706f8fc95", "score": "0.46906427", "text": "def calculate_average_mark(student: Dict) -> float:", "title": "" }, { "docid": "ea55d276622d0cc845b1d7c8ea75cb31", "score": "0.46749783", "text": "def _normed_vect(self, tokens):\n totalLength = 0\n for token in tokens:\n totalLength+= math.pow(float(token['Rating']),2)\n totalLength = math.sqrt(totalLength)\n\n vect = {}\n for token in tokens:\n vect[token['BeerId']] = float(token['Rating'])/totalLength\n return vect", "title": "" }, { "docid": "35b36da751356a2580561b7e9a65492c", "score": "0.46590158", "text": "def least_similar(sen, voting_dict):\n min = 9999\n #del voting_dict[sen]\n for comp_sen in voting_dict.keys():\n if comp_sen != sen:\n dot_product = policy_compare(sen, comp_sen, voting_dict)\n if dot_product < min:\n min = dot_product\n most_similar = comp_sen\n return most_similar + ' is the least similar to ' + sen + ' with a degree of ' + str(min) + '.'", "title": "" }, { "docid": "e327a59eee6777f69547ae7ee1adb630", "score": "0.46032223", "text": "def evaluate_fitness(self, trainers_set):\n for predictor in self.population:\n total = 0\n reduced_train_x, reduced_train_y = self.predictor_data(\n predictor.genes)\n\n for trainer in trainers_set.population:\n f_predicted = trainers_set.fitness(trainer, reduced_train_x, reduced_train_y)\n\n total += (abs(trainer.fitness - f_predicted)/trainer.fitness)*100\n\n predictor.fitness = total/len(trainers_set.population)", "title": "" }, { "docid": "5b6db9d79ece9403e1638f4ec288ae02", "score": "0.4598225", "text": "def transform_sentence(self, sentence):\n\n s = tokenize(sentence, tknzr=self.tokenizer, to_lower=True)\n tokens = s.split()\n vectors = [self.transform_word(token) for token in tokens]\n return np.average(vectors, axis=0)", "title": "" }, { "docid": "ef385a36e53fbeaf391d57f8ffe7c637", "score": "0.45914635", "text": "def sentence_similarity(sentence1, sentence2):\n # Tokenize and tag\n sentence1 = pos_tag(word_tokenize(sentence1))\n sentence2 = pos_tag(word_tokenize(sentence2))\n \n\n # Get the synsets for the tagged words\n synsets1 = [tagged_to_synset(*tagged_word) for tagged_word in sentence1]\n synsets2 = [tagged_to_synset(*tagged_word) for tagged_word in sentence2]\n \n\n # Filter out the Nones\n synsets1 = [ss for ss in synsets1 if ss]\n synsets2 = [ss for ss in synsets2 if ss]\n \n score, count = 0.0, 0\n \n # For each word in the first sentence\n for synset in synsets1:\n\n temp = [synset.path_similarity(ss) for ss in synsets2]\n\n \n #print(\"Adding None now\")\n temp = [0.0 if v is None else v for v in temp]\n \n #print(temp)\n # Get the similarity value of the most similar word in the other sentence\n #print(len(temp))\n\n if (len(temp)) :\n best_score = max(temp)\n else:\n best_score = 0.0\n count=1\n # Check that the similarity could have been computed\n if best_score is not 0.0:\n score += best_score\n count += 1\n \n # Average the values\n score /= count\n return score", "title": "" }, { "docid": "a6681840babc59ca3087a2975662696f", "score": "0.45904684", "text": "def _calculateSimilarity(self, rssUser, rssOther, multSum): #done\r\n\r\n return multSum / (rssUser * rssOther)", "title": "" }, { "docid": "2b7847b1bf76ecf2b6b45c4968efa5e0", "score": "0.45895416", "text": "def similarity(self, sv1, sv2):\n return ((sv1-sv2)**2).sum()/sv1.size", "title": "" }, { "docid": "79b59c44973bd6d473912b1d93aca8e6", "score": "0.45877436", "text": "def vote(self, distances):\n \n distance = np.argsort(distances)\n votes = distance[:self.k]\n prediction = {} # There are two alternatives for the vote, person 1 or person 2.\n for i in votes:\n label = self.labels[i]\n prediction[i] = label\n \n cnt = Counter()\n for p in prediction:\n cnt[p] += 1\n maxVote = cnt.most_common(1) # One is always best. Or at least got most votes (think Trump)..\n \n return self.labels[maxVote[0][0]]", "title": "" }, { "docid": "5291c66818867cc28efeba9f4c795657", "score": "0.45698467", "text": "def annotate(self, mentions):\n\n for m in mentions:\n m.candidate_entities = []\n\n empty_mentions = copy.deepcopy(mentions)\n\n for i in range(len(self.annotators)):\n mentions_ = copy.deepcopy(empty_mentions)\n\n # Annotate all mentions using this annotator\n self.annotators[i].annotate(mentions_)\n\n # Multiply prior_probs with weight\n for m in mentions_:\n for c in m.candidate_entities:\n c.prior_prob *= self.weights[i]\n \n # Add candidates to original mentions object, and check for duplicates\n for mention in mentions:\n\n mention_ = [m for m in mentions_ if m.substring == mention.substring]\n if(len(mention_) > 0): # If any entities were found, otherwise the mention is removed\n mention_ = mention_[0]\n \n for c in mention_.candidate_entities:\n\n # If duplicate, just add on to prior_prob\n if(c in mention.candidate_entities):\n c2 = next(x for x in mention.candidate_entities if x == c)\n c2.prior_prob += c.prior_prob\n else:\n # Else just add the new candidate\n mention.candidate_entities.append(c)\n\n to_remove = []\n for mention in mentions:\n if not mention.candidate_entities:\n # Remove mentions where no annotator found any candidates\n to_remove.append(mention)\n else:\n # If any annotator did not find any candidates for a mention, we need to renormalize\n Z = sum([c.prior_prob for c in mention.candidate_entities])\n for c in mention.candidate_entities:\n c.prior_prob /= Z\n\n for obj in to_remove:\n mentions.remove(obj)", "title": "" }, { "docid": "cf1ccebd47296d18c1b3ca0b4f0048fd", "score": "0.4564405", "text": "def get_ratings(df, user_id, curr_user_data):\n print(\"Using userId {}\".format(user_id))\n\n # Get the users that have seen the same movies as the selected user\n user_subset = df[(df['movieId'].isin(curr_user_data['movieId'].tolist())) & (df.userId != user_id)]\n # Sort them by the amount of movies that they have in common with the selected user\n sizes = user_subset.groupby(['userId']).userId.agg('count').to_frame('c').reset_index()\n sizes = sizes.sort_values(by=\"c\", ascending=False)[0:K * 2]\n\n # Get all users that are in the selected list\n user_subset = user_subset[user_subset['userId'].isin(sizes[\"userId\"].tolist())]\n user_subset_group = user_subset.groupby(['userId'])\n\n # Store the Pearson Correlation in a dictionary, where the key is the user Id and the value is\n # the coefficient\n pearson_correlation_dict = {}\n\n # Implementation based on: https://medium.com/swlh/how-to-build-simple-recommender-systems-in-python-647e5bcd78bd\n # For every user group in our subset\n for name, group in user_subset_group:\n # Let's start by sorting the input and current user group so the values aren't mixed up later on\n group = group.sort_values(by='movieId')\n input_movies = curr_user_data.sort_values(by='movieId')\n # Get the N for the formula\n n_ratings = len(group)\n # Get the review scores for the movies that they both have in common\n temp_df = input_movies[input_movies['movieId'].isin(group['movieId'].tolist())]\n # And then store them in a temporary buffer variable in a list format to facilitate future calculations\n temp_rating_list = temp_df['rating'].tolist()\n # Let's also put the current user group reviews in a list format\n temp_group_list = group['rating'].tolist()\n sum_rating = sum(temp_rating_list)\n sum_group = sum(temp_group_list)\n # Now let's calculate the pearson correlation between two users, so called, x and y\n # Sxx = sum(i ** 2 for i in temp_rating_list) - pow(sum_rating, 2) / float(n_ratings)\n # Syy = sum(i ** 2 for i in temp_group_list) - pow(sum_group, 2) / float(n_ratings)\n # Sxy = sum(i * j for i, j in zip(temp_rating_list, temp_group_list)) - sum_rating * sum_group / float(n_ratings)\n Sxx = sum([i ** 2 for i in temp_rating_list]) - pow(sum(temp_rating_list), 2) / float(n_ratings)\n Syy = sum([i ** 2 for i in temp_group_list]) - pow(sum(temp_group_list), 2) / float(n_ratings)\n Sxy = sum(i * j for i, j in zip(temp_rating_list, temp_group_list)) - sum(temp_rating_list) * sum(\n temp_group_list) / float(n_ratings)\n # If the denominator is different than zero, then divide, else, 0 correlation.\n if Sxx != 0 and Syy != 0:\n # Clamp to counter rounding errors\n pearson_correlation_dict[name] = min(1, max( -1, Sxy / sqrt(Sxx * Syy)))\n else:\n pearson_correlation_dict[name] = 0\n\n # Convert the dict to a DataFrame\n pearson_df = pd.DataFrame.from_dict(pearson_correlation_dict, orient='index')\n pearson_df.columns = ['similarityIndex']\n pearson_df['userId'] = pearson_df.index\n pearson_df.index = range(len(pearson_df))\n top_users = pearson_df.sort_values(by='similarityIndex', ascending=False)[0:K]\n \n # We only consider users with at least some similar taste i.e. similarityIndex > 0\n top_users = top_users[top_users[\"similarityIndex\"] > 0]\n\n top_users_rating = top_users.merge(df, left_on='userId', right_on='userId', how='inner')\n\n # Multiplies the similarity by the user's ratings\n top_users_rating['weightedRating'] = top_users_rating['similarityIndex'] * top_users_rating['rating']\n\n # Applies a sum to the top_users after grouping it up by userId\n temp_top_users_rating = top_users_rating.groupby(\"movieId\").sum()[[\"similarityIndex\", \"weightedRating\"]]\n temp_top_users_rating.columns = [\"sum_similarityIndex\", \"sum_weightedRating\"]\n\n # Filter out entries where the similarity index is zero\n temp_top_users_rating = temp_top_users_rating[~(temp_top_users_rating[\"sum_similarityIndex\"] == 0)]\n\n recommendation_df = pd.DataFrame()\n\n # Now we take the weighted average\n recommendation_df['rating'] = temp_top_users_rating['sum_weightedRating'] / temp_top_users_rating[\n 'sum_similarityIndex']\n\n recommendation_df['movieId'] = temp_top_users_rating.index\n\n # recommendation_df['rating'] = recommendation_df['rating'].clip(1, 5)\n\n return recommendation_df", "title": "" }, { "docid": "178773d8df6d342265166aa08e018f6b", "score": "0.45620382", "text": "def calcUserSim( moviesDict1, moviesDict2, intersection, lenIntersection ):\n\n ratings1 = []\n ratings2 = []\n\n for movieID in intersection:\n ratings1.append(moviesDict1[movieID])\n ratings2.append(moviesDict2[movieID])\n\n # user of moviesDict1 is the Y-VALUE (ie the predicted value, later)\n slope, intercept, r_value, p_value, std_err = stats.linregress(ratings2, ratings1)\n\n return [r_value, slope, intercept, lenIntersection]", "title": "" }, { "docid": "ef0499e259fc4549b65adbe04c2801d1", "score": "0.45527816", "text": "def author_proportions(self, considerate_admin=True):\n\n # Fetch all the authors who participated in the pad\n authors = self.authors[:]\n\n # Delete the admin if needed\n if not considerate_admin and 'Etherpad_admin' in authors:\n authors = list(np.delete(authors, authors.index('Etherpad_admin')))\n # Initialize the number of letters written by each authors\n author_lengths = np.zeros(len(authors))\n # increment the participation accordingly\n for op in self.operations:\n op_author = op.author\n # Skip the incrementation if needed\n if considerate_admin or op_author != 'Etherpad_admin':\n author_lengths[authors.index(op_author)] += abs(op.get_length_of_op())\n\n # Compute the overall participation\n overall_length = sum(author_lengths)\n if overall_length:\n proportions = author_lengths / overall_length\n else:\n proportions = np.zeros(len(authors))\n return authors, proportions", "title": "" }, { "docid": "12f62107175c6e66790e544a90aa724f", "score": "0.45474437", "text": "def match_users_approx(self):\n\n print('Matching the users')\n\n # Add the users to the matcher\n self.users['ba'] = pd.read_csv(self.data_folder + 'ba/users.csv')\n self.users['rb'] = pd.read_csv(self.data_folder + 'rb/users.csv')\n\n # Compute the lowercase letter of usernames\n for key in ['ba', 'rb']:\n low = [x.lower() for x in self.users[key]['user_name']]\n self.users[key].loc[:, 'user_name_lower'] = low\n\n # Get the small and big df\n if len(self.users['ba']) < len(self.users['rb']):\n small = 'ba'\n big = 'rb'\n else:\n small = 'rb'\n big = 'ba'\n\n corpus = list(set(self.users['ba']['user_name_lower']) | set(self.users['rb']['user_name_lower']))\n\n # Vectorize and get the vocabulary\n tfidf_vect = TfidfVectorizer(analyzer='char', ngram_range=(2, 2))\n tfidf_vect.fit_transform(corpus)\n vocabulary = tfidf_vect.get_feature_names()\n\n # Prepare the JSON DF\n df_json = {}\n for key1 in ['ba', 'rb']:\n df_json[key] = {}\n for col in list(self.users[key].columns):\n df_json[key][col] = []\n\n df_json['scores'] = {'sim': []}\n\n for i in self.users[small].index:\n row_small = self.users[small].ix[i]\n\n subset_big = self.users[big][self.users[big]['location'] == row_small['location']]\n subset_big.index = range(len(subset_big))\n\n if len(subset_big) > 0:\n\n # Train the TF-IDF Vectorizer\n tfidf_vect = TfidfVectorizer(vocabulary=vocabulary, analyzer='char', ngram_range=(2, 2))\n tfidf_train = tfidf_vect.fit_transform([row_small['user_name_lower']])\n\n # And test it\n tfidf_test = tfidf_vect.transform(subset_big['user_name_lower'])\n\n # Get matrix of cosine similarity\n sim = cosine_similarity(tfidf_test, tfidf_train)\n sim = sim[:, 0]\n\n nbr_matches = 1\n\n sim_idx = (-sim).argsort()[:nbr_matches]\n\n score = sim[sim_idx[0]]\n\n row_big = subset_big.ix[sim_idx[0]]\n\n if score >= 0.8:\n # Add small\n for col in self.users[small].columns:\n df_json[small][col].append(row_small[col])\n\n # Add big\n for col in self.users[big].columns:\n df_json[big][col].append(row_big[col])\n\n df_json['scores']['sim'].append(score)\n\n # Create the pandas DF from the dict\n df = pd.DataFrame.from_dict({(i, j): df_json[i][j]\n for i in df_json.keys()\n for j in df_json[i].keys()})\n\n # Save it\n df.to_csv(self.data_folder + 'matched/users_approx.csv', index=False)", "title": "" }, { "docid": "616873045d80c5d8830d48d45e6c431f", "score": "0.45448261", "text": "def score(self, hypothesis_tokens):\n pass #to be implemented in sublcass", "title": "" }, { "docid": "5cf93155f4c0e69efb5ad12aee1c6769", "score": "0.45406258", "text": "def _aggregate_one(self, segmentations: annotations.SEGMENTATIONS) -> annotations.SEGMENTATION:\n size = len(segmentations)\n segmentations = np.stack(segmentations.values)\n weights = np.full(size, 1 / size)\n for _ in range(self.n_iter):\n mv = self._segmentation_weighted_majority_vote(segmentations, weights)\n weights = self._calculate_weights(segmentations, mv)\n return mv", "title": "" }, { "docid": "0112d95c27dda3f1688614f39170dba9", "score": "0.45248756", "text": "def vote_based_on_means(res_single, res_pairs):\n #first, vote for yourself\n for i in res_single.keys():\n for i_comp in res_single[i].keys():\n res_single[i][i_comp][\"mean_votes\"] = 0\n res_single[i][i_comp][\"freq_votes\"] = 0\n \n \n for pair in res_pairs.keys():\n for p_comp in res_pairs[pair].keys():\n res_pairs[pair][p_comp][\"mean_votes\"] = 0\n res_pairs[pair][p_comp][\"freq_votes\"] = 0\n \n #next, take into account votes of pairs\n for key, key_log in res_pairs.items():\n mean_score_table = {}\n freq_score_table = {}\n \n for ind, i in enumerate(key):\n mean_score_table[i] = {}\n freq_score_table[i] = {}\n for i_comp in res_single[i].keys(): \n for count, key_comp in enumerate(key_log.keys()):\n if (key_comp < i_comp):\n continue\n single_means = res_single[i][i_comp][\"means\"]\n dim = res_single[i][i_comp][\"means\"].shape[1]\n pair_means = key_log[key_comp][\"means\"][:, ind*dim : (ind+1)*dim]\n score, freq_score, arrows = compute_mean_fit(single_means, pair_means)\n #print(arrows)\n mean_score_table[i][(i_comp, key_comp)] = score\n freq_score_table[i][(i_comp, key_comp)] = freq_score\n #res_pairs[pair][p_comp][\"mean_arrows\"][i][i_comp] = arrows\n\n \n \n \n #compute combined score\n\n best_mean_fit = dict(sorted(mean_score_table[i].items(), \n key=lambda x: (10*x[1]+freq_score_table[i][x[0]]))[:6])\n \n _, high_mean = max(best_mean_fit.items(), key=lambda x: x[1])\n \n for p in best_mean_fit.keys():\n \n score_silh = res_single[i][p[0]][\"score\"]*res_pairs[key][p[1]][\"score\"]\n res_single[i][p[0]][\"mean_votes\"] += max([0, (1 - best_mean_fit[p])*score_silh])\n res_pairs[key][p[1]][\"mean_votes\"] += max([0, (1 - best_mean_fit[p])*score_silh])\n \n return 0", "title": "" }, { "docid": "68dd5747faf8dfde6bbf04d16418e844", "score": "0.45243806", "text": "def average_preference_distance(matching):\n return np.average([np.where(woman == matching[i])[0][0] \n for i, woman in enumerate(PREFERENCES_MATRIX)])", "title": "" }, { "docid": "ed8c2ad5347922590837c9e0848967b2", "score": "0.452286", "text": "def main():\n number_of_students = int(input())\n students = {}\n for _ in range(number_of_students):\n input_string = input()\n input_list = input_string.split()\n students[input_list[0]] = input_list[1:]\n chosen_one = input()\n chosen_sum = sum(float(x) for x in students[chosen_one])\n chosen_average = round(chosen_sum / len(students[chosen_one]), 2)\n print(\"{0:.2f}\".format(chosen_average))", "title": "" }, { "docid": "e08055ae99571982fe8a8d92f966e905", "score": "0.4521667", "text": "def aggregated_voting(users_list, Fridge=False):\n fridge = []\n group_users = idToUser(users_list)\n if Fridge:\n # TODO call other service, this is just a test without using the service. to delete later\n # group_users[0].add_ingredients_fridge([\"chocolate\", \"banana\", \"eggs\"])\n # get the list of ingredients contained in the fridge using connection with the users' server\n if len(users_list) > 1:\n fridge = ast.literal_eval(get_group_profils(users_list))['fridge']\n else:\n fridge = ast.literal_eval(get_profil(users_list[0]))['fridge']\n all_rec = get_all_recommendations(recommendations)\n\n group_rec = get_group_recommendations(group_users, all_rec)\n dislikes = union_group_undesirable(group_users)\n union_rec = union_recommendations(group_rec)\n if not Fridge:\n all_df_rows = get_line_vote(group_users, group_rec)\n else:\n all_df_rows = get_line_vote_fridge(group_users, group_rec, fridge)\n df = get_pandas_df(all_df_rows, union_rec)\n df.loc['s'] = df.sum()\n df = df.sort_values(by='s', axis=1, ascending=False)\n print(df)\n recommended_sorted = df.columns[1:].values\n test = recipe_from_id(recommended_sorted)\n # if recommendation should consider only the fridge\n # if fridge:\n # test = filter_fridge(test, fridge)\n # if there are people in the group who have dislikes or allergies, remove recipes that contains those ingredients\n if dislikes:\n test = remove_dislikes(test, dislikes)\n #print(test[\"Ingredients\"].values)\n res = list(test.T.to_dict().values())\n return res", "title": "" }, { "docid": "9a8d2ca45ee4f1de21ca470fc4309f6c", "score": "0.45203292", "text": "def similarity(sentence1, sentence2):\n return (sentence_similarity(sentence1, sentence2) + sentence_similarity(sentence2, sentence1)) / 2", "title": "" }, { "docid": "36e4841f63fc3ad2fbf7e82e560b53dc", "score": "0.4517766", "text": "def verify(model, name, candidates):\n global rating_percentage\n print(\"#### verification for %s ####\" % (name) )\n rating_total = 0\n similar_rating = 0\n for c in candidates:\n if(c == name):\n continue\n if(name not in all_names):\n print(\"%s is not in the reviews file\" %(name))\n continue\n if(c not in all_names):\n print(\"%s is not in the reviews file\" %(c))\n continue\n\n temp = uv[[name, c]]\n temp = temp.loc[(temp[name]>0) & (temp[c]>0)]\n if(temp.shape[0] == 0):\n print(\"there is not enough reviews for %s and %s\" % (name, c))\n continue\n rating_total += temp.shape[0]\n\n #similar rating:\n temp = temp.loc[abs(temp[name] - temp[c]) <= 1]\n similar_rating += temp.shape[0]\n if(rating_total == 0):\n print(\"there is not enough reviews for %s\" % (name))\n else:\n rating_percentage[name] = similar_rating/rating_total\n print(\"all: %d, same ratings: %d, percentage: %.2f\" % (rating_total, similar_rating, similar_rating/rating_total))", "title": "" }, { "docid": "60ab4087dfb38f0a2068dcdf03df232d", "score": "0.45170176", "text": "def recalculate_statistics(self, donor_name):\n try:\n self.database.connect()\n self.database.execute_sql(self.fk)\n # Vars for calculations to be fed back into a query\n donation_sum = 0\n\n # for donation in Donation.select().where(Donation.donor == donor_name):\n # donation_sum += donation.donation_amt\n # counter += 1\n \n x = Donation.select().where(Donation.donor == donor_name)\n counter = x.count()\n for item in x:\n donation_sum += int(item.donation_amt)\n\n logger.info(f\"{donor_name} | {donation_sum} | {counter}\")\n\n # Update donor totals\n with self.database.transaction():\n donor = Donor.get(Donor.name == donor_name)\n donor.num_donations = counter\n donor.total_donation_amt = donation_sum\n donor.avg_donation = donation_sum / counter\n donor.save()\n\n except Exception as e:\n logger.error(e)\n finally:\n self.database.close()", "title": "" }, { "docid": "373e5fefd228a4788b2d0b8ba3c5620c", "score": "0.45164928", "text": "def update(self,new_sen,new_ans):\n # if float(self.get_similar_sen(new_sen,1)[0][2]) < 0.9:\n self.data.append(new_sen)\n new_prced_sen = self.pre_process(new_sen)\n self.corpus.append(new_prced_sen)\n self.ans_list.append(new_ans)\n self.corpus_tfidf_matrix = self.vect.fit_transform(self.corpus)\n print(\"Updated new sentences\")", "title": "" }, { "docid": "83092c9f74dea92f3fae24ee1790cab0", "score": "0.45150056", "text": "def average(self):\n if len(self.donations) == 0:\n return 0\n\n return self.sum() / len(self.donations)", "title": "" }, { "docid": "e73bbd76a9e5dd0a63f1f39b7cbb8f5b", "score": "0.4501361", "text": "def __predict_ratings(self, user_and_movie_df):\n\n predicted_df = self.model.transform(user_and_movie_df)\n predicted_df= predicted_df.join(self.movies_rating_counts_df, on='movieId')\n\n return predicted_df", "title": "" }, { "docid": "7739e8212eba5c19ca286e6701cdfba7", "score": "0.44895586", "text": "def calculate_initial_average_similarity(self, seeds):\n n_accounts, n_hashes = self.active_signatures.shape\n n_communities = len(seeds)\n\n # get the average similarity between a community and all other accounts\n average_similarities = np.zeros(shape=(n_communities, n_accounts))\n index = []\n for set_num, candidates in enumerate(seeds.iteritems()):\n # store the individual similarities for each account\n name, accounts = candidates\n index.append(name)\n similarities = np.zeros(shape=(len(accounts), n_accounts))\n for similarity_idx, account_idx in enumerate(accounts):\n # convert to an index into just the active signatures\n row_idx = self.lsh_candidates.get_active_idx(account_idx)\n similarities[similarity_idx, :] = self.get_account_similarities(self.active_signatures,\n self.active_signatures[row_idx, :])\n\n average_similarities[set_num, :] = similarities.mean(0)\n df = pd.DataFrame(data=average_similarities, index=index)\n return df", "title": "" }, { "docid": "e06d8bbe1acae28bf556ffad9e29a9a4", "score": "0.44881544", "text": "def get_similar_sen(self,new_sen,num_sens):\n\n tfidf_matrix = self.corpus_tfidf_matrix\n data = self.data\n ans_list = self.ans_list\n corpus = self.corpus\n vect = self.vect\n\n similar_sen_list = []\n tfidf_new = self.get_new_sen_tfidf(new_sen,vect)\n tfidf_new = [tfidf_new]\n t0 = time()\n cos_sim = cosine_similarity(tfidf_new, tfidf_matrix)\n #print(\"Calculate cosine similar in %f s\" %(time()-t0))\n cos_sim = cos_sim[0]\n for i in xrange(num_sens):\n max_val = max(cos_sim)\n max_idx = np.where(cos_sim == max_val)\n max_idx = max_idx[0][0]\n raw_sen = data[max_idx]\n sen = corpus[max_idx]\n ans = ans_list[max_idx]\n similar_sen_list.append([raw_sen, sen, unicode(max_val), ans])\n cos_sim[max_idx] = -1\n return (similar_sen_list)", "title": "" }, { "docid": "dc01da11bd998513168af321a7b2def7", "score": "0.4476649", "text": "def run_similarity(self, df):\n\n comments1, comments2, word_counts, name_similarities = self.features(df)\n return np.array(list(self.model.predict([comments1, comments2, word_counts, name_similarities]).ravel()))", "title": "" }, { "docid": "75dc4e4b31e66eb57ce0ff0ba8cce918", "score": "0.44755682", "text": "def get_avg_smape_for_pollutant(results_dict, city_name, pollutant_name):\n smape_values = []\n for key, metrics in results_dict.items():\n if city_name in key and pollutant_name in key:\n smape_values.append(metrics['smape_score'])\n\n return np.mean(smape_values)", "title": "" }, { "docid": "c508213b7fb1191eb1ad8a640c6b6b03", "score": "0.44704187", "text": "def cosine(self, u, v):\n dot_prod = 0\n overlap = self.get_overlap(u, v)\n \n if overlap == set():\n return 0\n for movieId in overlap:\n # TODO Rest of implementation\n \n u_rating = self.get_rating(u,movieId)\n v_rating = self.get_rating(v,movieId)\n \n dot_prod += u_rating * v_rating\n \n \n return dot_prod/(self.get_profile_length(u)*self.get_profile_length(v))", "title": "" }, { "docid": "db01bbaec7ebe06e7189578288a9616f", "score": "0.44672376", "text": "def MeanAveragePrecisionK(rating_true, rating_pred, k=5):\n df_hit, df_hit_count, n_users = _GetHitDF(rating_true, rating_pred, k)\n\n if df_hit.shape[0] == 0:\n return 0.0\n\n # Calculate Reciprocal Rank\n df_hit_sorted = df_hit.copy()\n df_hit_sorted[\"rRank\"] = (\n df_hit_sorted.groupby(\"USER\").cumcount() + 1\n ) / df_hit_sorted[\"rank\"]\n df_hit_sorted = df_hit_sorted.groupby(\"USER\").agg({\"rRank\": \"sum\"}).reset_index()\n\n # Calculate Mean Averate Precision\n df_merge = pd.merge(df_hit_sorted, df_hit_count, on=\"USER\")\n return (df_merge[\"rRank\"] / df_merge[\"actual\"]).sum() / n_users", "title": "" }, { "docid": "43626b6cc0cd41e3c9206a2a545c803c", "score": "0.44470027", "text": "def evaluate(self):\n best_fitness = float(\"-inf\")\n worst_fitness = float(\"inf\")\n self.average_fitness = 0\n # Para cada individuo...\n for ind in self.set:\n # Lo evaluamos\n ind.evaluate()\n # Actualizamos mejor y peor individuo del set junto al fitness medio.\n self.average_fitness += ind.fitness\n if ind.fitness > best_fitness:\n self.best_individual = ind\n best_fitness = ind.fitness\n if ind.fitness < worst_fitness:\n self.worst_individual = ind\n worst_fitness = ind.fitness\n self.average_fitness /= self.size()", "title": "" }, { "docid": "cd325f5ce198efc050a8f336ffdde823", "score": "0.4444773", "text": "def k_fold_predict(self, users_ratings):\n predictions = defaultdict(dict)\n for user, items in users_ratings.iteritems():\n for item, v in items.iteritems():\n if v is None:\n total, denom = 0.0, 0.0\n if item not in self.similar_items_:\n predictions[user][item] = None\n continue\n for item2, similarity in self.similar_items_[item].iteritems():\n if item2 in users_ratings[user] and users_ratings[user][item2] != None:\n total += users_ratings[user][item2] * similarity\n denom += similarity\n if denom == 0:\n predictions[user][item] = None\n continue\n predictions[user][item] = total/denom\n return predictions", "title": "" }, { "docid": "3ff2c99d23977f3ead180ea4659db79b", "score": "0.4443649", "text": "def sentence_similarity(sentence1, sentence2):\n # Tokenize and tag\n sentence3 = []\n sentence2 = sentence2.split(' ')\n for i in range(len(sentence2)):\n if sentence2[i].isalpha():\n sentence3.append(sentence2[i])\n str1 = \"\"\n for i in sentence3:\n str1 += i + ','\n #print(\"Here I am\",sentence1)\n sentence1 = pos_tag(word_tokenize(sentence1))\n str1 = pos_tag(word_tokenize(str1))\n synsets1 = [tagged_to_synset(*tagged_word) for tagged_word in sentence1]\n synsets2 = [tagged_to_synset(*tagged_word) for tagged_word in str1]\n synsets1 = [ss for ss in synsets1 if ss]\n synsets2 = [ss for ss in synsets2 if ss]\n\n score, count = 0.0, 0\n \n for synset in synsets1:\n # Get the similarity value of the most similar word in the other sentence\n simlist = [synset.path_similarity(ss) for ss in synsets2 if synset.path_similarity(ss) is not None]\n if not simlist:\n continue;\n best_score = max(simlist)\n # Check that the similarity could have been computed\n score += best_score\n count += 1\n if count!=0:\n score /= count\n return score", "title": "" }, { "docid": "149b13557d92c025d56bd91be3ee27c7", "score": "0.44338435", "text": "def res_studs(self):\n\n #list of all advisors and the number of students they review\n alladv = self.obj.alladvis\n countlist = self.rev_count(alladv)\n\n while np.max(countlist)-np.min(countlist) > 1:\n\n#get the advisors with most and least reviews\n maxrev, = np.where(countlist == np.max(countlist))\n minrev, = np.where(countlist == np.min(countlist))\n\n#get random index number from the maximums and the mins\n maxdex = randint(0,maxrev.size-1)\n mindex = randint(0,minrev.size-1)\n\n#get name of old and new reviewer\n oldrev = alladv[maxrev[maxdex]]\n newrev = alladv[minrev[mindex]]\n \n#Student to remove from advisors list\n moving_student = self.obj.classadv[oldrev].students[-1]\n#make sure the new advisor is not the same as the first advisor\n if newrev not in self.obj.classreu[moving_student].revs: \n#remove last assigned student (will take 2nd choices away fist because order of adding)\n self.obj.classadv[oldrev].rem_reu(moving_student)\n self.obj.classreu[moving_student].rem_rev(oldrev)\n#assign student to new reviewer\n self.obj.classadv[newrev].add_reu(moving_student)\n self.obj.classreu[moving_student].add_rev(newrev)\n \n#update the count list with the new assignment \n countlist = self.rev_count(alladv)", "title": "" }, { "docid": "1688f1a8350ebf76d03d4ece94ae40fd", "score": "0.4433001", "text": "def sentence_similarity(sentence1, sentence2):\r\n # Tokenize and tag\r\n sentence1 = pos_tag(word_tokenize(sentence1))\r\n sentence2 = pos_tag(word_tokenize(sentence2))\r\n \r\n # Get the synsets for the tagged words\r\n synsets1 = [tagged_to_synset(*tagged_word) for tagged_word in sentence1]\r\n synsets2 = [tagged_to_synset(*tagged_word) for tagged_word in sentence2]\r\n \r\n # Filter out the Nones\r\n synsets1 = [ss for ss in synsets1 if ss]\r\n synsets2 = [ss for ss in synsets2 if ss]\r\n \r\n score, count = 0.0, 0\r\n \r\n # For each word in the first sentence\r\n for synset in synsets1:\r\n # Get the similarity value of the most similar word in the other sentence\r\n best_score = max([synset.path_similarity(ss) for ss in synsets2])\r\n \r\n # Check that the similarity could have been computed\r\n if best_score is not None:\r\n score += best_score\r\n count += 1\r\n \r\n # Average the values\r\n score /= count\r\n return score", "title": "" }, { "docid": "1688f1a8350ebf76d03d4ece94ae40fd", "score": "0.4433001", "text": "def sentence_similarity(sentence1, sentence2):\r\n # Tokenize and tag\r\n sentence1 = pos_tag(word_tokenize(sentence1))\r\n sentence2 = pos_tag(word_tokenize(sentence2))\r\n \r\n # Get the synsets for the tagged words\r\n synsets1 = [tagged_to_synset(*tagged_word) for tagged_word in sentence1]\r\n synsets2 = [tagged_to_synset(*tagged_word) for tagged_word in sentence2]\r\n \r\n # Filter out the Nones\r\n synsets1 = [ss for ss in synsets1 if ss]\r\n synsets2 = [ss for ss in synsets2 if ss]\r\n \r\n score, count = 0.0, 0\r\n \r\n # For each word in the first sentence\r\n for synset in synsets1:\r\n # Get the similarity value of the most similar word in the other sentence\r\n best_score = max([synset.path_similarity(ss) for ss in synsets2])\r\n \r\n # Check that the similarity could have been computed\r\n if best_score is not None:\r\n score += best_score\r\n count += 1\r\n \r\n # Average the values\r\n score /= count\r\n return score", "title": "" }, { "docid": "1a9f7a5e03cd3fc05d93d1d0c290ce41", "score": "0.4419066", "text": "def calculate_user_name_metrics(subname,\n aff_word,\n w2u_path,\n u2w_path,\n total_users,\n intercepting_words=True):\n num_of_users = []\n aff_value = []\n slang_to_user_wc = 0\n \n w2u = pickle_load(w2u_path)\n u2w = pickle_load(u2w_path)\n \n u2w, w2u = remove_intercepting_words(u2w, w2u)\n \n for w in aff_word: \n if w in w2u:\n num_of_users.append(len(w2u[w]))\n slang_to_user_wc += 1\n else:\n num_of_users.append(0)\n \n mean = np.mean(num_of_users)\n std = np.std(num_of_users)\n user_percent = np.sum(num_of_users)/total_users\n \n return [mean, std, slang_to_user_wc, user_percent]", "title": "" }, { "docid": "5e3c291f41165d1c40de0eb0d16e66c6", "score": "0.44172013", "text": "def evaluate(self, s, t):\n s_set = set(s.split(\" \"))\n k = 0\n\n t_tf = self.compute_tf(t)\n for s_word in s_set:\n if s_word not in t_tf.keys():\n continue\n else:\n freq = t_tf[s_word]\n k += freq * self.tfidf[(s_word, s)]\n return k", "title": "" }, { "docid": "7d2c837d20955423789b0ff108d657f3", "score": "0.4400692", "text": "def eval_avg(self, setting=1):\n LogInfo.begin_track(\"Eval on ROC using average word representations using setting %d...\", setting)\n correct = 0\n for i in range(0, 1871):\n ask4 = self.copa_ground[i][0]\n sentence, option1, option2 = self.copa_data[i]\n sent_vec = self.get_repr(sentence, ask4, setting, 'q')\n opt1_vec = self.get_repr(option1, ask4, setting, 'o')\n opt2_vec = self.get_repr(option2, ask4, setting, 'o')\n score1 = self.get_similarity(sent_vec, opt1_vec)\n score2 = self.get_similarity(sent_vec, opt2_vec)\n truth = self.copa_ground[i][1]\n if score1 > score2:\n if truth == 1:\n # LogInfo.logs(\"[%d] ret: %d(%.2f>%.2f), truth: %d. [T]\", i+1, 1, score1, score2, truth)\n correct += 1\n # else:\n # LogInfo.logs(\"[%d] ret: %d(%.2f>%.2f), truth: %d. [F]\", i+1, 1, score1, score2, truth)\n else:\n if truth == 2:\n # LogInfo.logs(\"[%d] ret: %d(%.2f<%.2f), truth: %d. [T]\", i+1, 2, score1, score2, truth)\n correct += 1\n # else:\n # LogInfo.logs(\"[%d] ret: %d(%.2f<%.2f), truth: %d. [F]\", i+1, 2, score1, score2, truth)\n\n LogInfo.logs(\"[summary] accuracy: %.4f(%d/%d).\", float(correct)/1871, correct, 1871)\n LogInfo.end_track()", "title": "" }, { "docid": "939e1bf9143b3564a930c935d563e663", "score": "0.43958008", "text": "def calculate_similarity(merged_metadata: pd.DataFrame) -> np.ndarray:\n soup = (\n merged_metadata['keywords'] +\n merged_metadata['cast'] +\n merged_metadata['director'] +\n merged_metadata['genres']\n ).apply(' '.join)\n count = CountVectorizer(analyzer='word', ngram_range=(1, 2), min_df=0, stop_words='english')\n count_matrix = count.fit_transform(soup)\n return linear_kernel(count_matrix, count_matrix)", "title": "" }, { "docid": "c8f5b16c920ac88ec33688552ceebf7c", "score": "0.4390826", "text": "def cal_movies_similarities(movie_ratings_df: DataFrame):\n # Find all pair of different movies watched by the same person.\n # func.col('mr1.movie_id') < func.col('mr2.movie_id') to avoid duplication.\n # Parenthesis is mandatory for combined condition (e.g. &, |)\n movie_ratings_df = movie_ratings_df.repartition(100)\n ratings_pairs_df = movie_ratings_df.alias('mr1'). \\\n join(movie_ratings_df.alias('mr2'),\n (func.col('mr1.user_id') == func.col('mr2.user_id')) & (func.col('mr1.movie_id') < func.col('mr2.movie_id'))).\\\n select(\n func.col('mr1.movie_id').alias('movie_id_1'),\n func.col('mr2.movie_id').alias('movie_id_2'),\n func.col('mr1.rating').alias('rating_1'),\n func.col('mr2.rating').alias('rating_2')\n )\n\n # Calculate dot product (numerator) and magnitude (denominator) of cosine similarity equation.\n # Each movie is considered a vector of its ratings.\n ratings_pairs_df = ratings_pairs_df.groupBy('movie_id_1', 'movie_id_2'). \\\n agg(func.sum(func.col('rating_1') * func.col('rating_2')).alias('sim_dot_product'),\n (func.sqrt(func.sum(func.pow(func.col('rating_1'), 2))) * func.sqrt(func.sum(func.pow(func.col('rating_2'), 2)))).alias('sim_magnitude'),\n func.count(func.col('movie_id_1')).alias('co_occurrence_count')\n )\n\n # Calculate cosine similarity as a new column:\n # (doc product of two movie ratings / doc product of two magnitude ratings)\n movies_similarities_df = ratings_pairs_df.\\\n withColumn('similarity_score',\n func.when(func.col('sim_magnitude') != 0,\n func.col('sim_dot_product') / func.col('sim_magnitude')).otherwise(0)\n ).select('movie_id_1', 'movie_id_2', 'similarity_score', 'co_occurrence_count')\n\n return movies_similarities_df", "title": "" }, { "docid": "e40881083b8334640c886bd861d3dfcf", "score": "0.43905997", "text": "def manhatten(rating1, rating2):\n distance = 0\n num = 0\n for key in rating1:\n if key in rating2:\n distance += abs(rating1[key] - rating2[key])\n num += 1\n \n if flag > 0:\n return distance / num\n else:\n return -1", "title": "" }, { "docid": "b2f3df8c13098cff989bcd6bd8ba8af4", "score": "0.43900508", "text": "def get_sim(self,new_sen):\n sen_list = self.get_similar_sen(new_sen,4)\n arr = []\n for sen in sen_list:\n if float(sen[2]) > 0.5:\n arr.append([sen[0],sen[3]])\n # return [[sen[0],sen[3]] for sen in sen_list]\n print arr\n return arr", "title": "" }, { "docid": "3d712c43e34c5fc77c108914b93e52d4", "score": "0.43875924", "text": "def student_call():\n return face_analyzer.student_attentiveness()", "title": "" }, { "docid": "e66fbe058c397d41da2681a50ce15f24", "score": "0.4382787", "text": "def sentence_similarity(sentence1, sentence2):\n # Tokenize and tag\n NoneType = type(None)\n sentence1 = pos_tag(word_tokenize(sentence1))\n sentence1nv = []\n sentence2 = pos_tag(word_tokenize(sentence2))\n sentence2nv = []\n # Get the synsets for the tagged words\n synsets1 = []\n count1 = 0\n count2 = 0\n tup = ()\n synsetnone1 = []\n for tagged_word in sentence1:\n if type(tagged_to_synset(*tagged_word)) == NoneType and tagged_word[0][0].isupper() and penn_to_wn(\n tagged_word[1]) == 'n':\n count1 += 1\n tup = (tagged_word[0], tagged_word[1])\n sentence1nv.append(tup)\n synsetnone1.append(tup)\n continue\n if penn_to_wn(tagged_word[1]) == 'n':\n count1 += 1\n tup = (tagged_word[0], tagged_word[1])\n sentence1nv.append(tup)\n synsets2 = []\n tup = ()\n synsetnone2 = []\n for tagged_word in sentence2:\n if type(tagged_to_synset(*tagged_word)) == NoneType and tagged_word[0][0].isupper() and penn_to_wn(\n tagged_word[1]) == 'n':\n count2 += 1\n tup = (tagged_word[0], tagged_word[1])\n sentence2nv.append(tup)\n synsetnone2.append(tup)\n continue\n if penn_to_wn(tagged_word[1]) == 'n':\n count2 += 1\n tup = (tagged_word[0], tagged_word[1])\n sentence2nv.append(tup)\n synsets1 = [tagged_to_synset(tagged_word, tag) for tagged_word, tag in sentence1nv]\n synsets2 = [tagged_to_synset(tagged_word, tag) for tagged_word, tag in sentence2nv]\n\n # Filter out the Nones\n synsets1 = [ss for ss in synsets1 if not type(ss) == NoneType]\n synsets2 = [ss for ss in synsets2 if not type(ss) == NoneType]\n score = 0.0\n # For each word in the first sentence\n for synset in synsets1:\n # Get the similarity value of the most similar word in the other sentence\n if not synsets2 == []:\n best_score = max([pathsim(synset, ss) for ss in synsets2])\n # Check that the similarity could have been computed\n if best_score is not None:\n score += best_score\n for synset in synsets2:\n # Get the similarity value of the most similar word in the other sentence\n if not synsets1 == []:\n best_score = max([pathsim(synset, ss) for ss in synsets1])\n # Check that the similarity could have been computed\n if best_score is not None:\n score += best_score\n for synsetnone in synsetnone2:\n if synsetnone in synsetnone1:\n score += 1\n for synsetnone in synsetnone1:\n if synsetnone in synsetnone2:\n score += 1\n # Average the values\n score /= (count1 + count2)\n return score", "title": "" }, { "docid": "d31848a787006dca59f6ef176b0858ac", "score": "0.43765646", "text": "def predictRatings(s, usrId):\n\n #----- Find similar users ----\n\n similarUsrs = s.similarUsrSearcher.findSimilarUsers(usrId)\n\n #----- Predict user ratings ----\n\n ratings = s.predicter.getRatings(usrId, similarUsrs)\n\n return ratings", "title": "" }, { "docid": "509545d8dcc8dfcc2da687fc2c5d4268", "score": "0.43743914", "text": "def calculate(self, results):\n self.results = results\n\n for i, result in enumerate(self.results):\n if isinstance(result, FailedAttackResult):\n continue\n elif isinstance(result, SkippedAttackResult):\n continue\n else:\n self.original_candidates.append(result.original_result.attacked_text)\n self.successful_candidates.append(result.perturbed_result.attacked_text)\n\n use_scores = []\n for c in range(len(self.original_candidates)):\n use_scores.append(\n self.use_obj._sim_score(\n self.original_candidates[c], self.successful_candidates[c]\n ).item()\n )\n\n self.all_metrics[\"avg_attack_use_score\"] = round(\n sum(use_scores) / len(use_scores), 2\n )\n\n return self.all_metrics", "title": "" }, { "docid": "cd70341632640399f09d33dd6e7f38a2", "score": "0.43720952", "text": "def adaptive_voting(s,a,c):\n # Define boundaries and sort\n K = len(s)\n v=[]\n\n for i in range(len(s)):\n v.append(s[i]-a[i]*c)\n v.append(s[i]+a[i]*c)\n v=sorted(v)\n\n # Compute middle points\n m=[]\n for i in range(2*K-1):\n m.append((v[i]+v[i+1])/2.)\n\n # voting\n S=[]\n for i in range(2*K-1):\n S.append([])\n\n for k in range(K):\n\n if m[i]<= s[k] + a[k]*c and m[i]>= s[k] - a[k]*c:\n S[i].append(k)\n\n # Enumerate consensus sets and return best\n f_list=[]\n for i in range(2*K-1):\n if S[i]:\n ww=0.\n ss = 0.\n for k in S[i]:\n ww+=1./(a[k]**2)\n ss+=s[k]/(a[k]**2)\n s_est = ss/ww\n f_list.append((s_est,f(s,a,s_est,c)))\n else:\n s_est = m[i]\n f_list.append((s_est,f(s,a,s_est,c)))\n\n f_list = sorted(f_list, key=lambda s:s[1])\n return f_list[0][0]", "title": "" }, { "docid": "826d0fb0c29a7b3aa11787740635d2e7", "score": "0.43715566", "text": "def score(self, sentence):\n return sum(map(lambda token: math.log(self.get_prob(token)), sentence))", "title": "" }, { "docid": "94f9881c42672ce0760707ccfaf3ae9a", "score": "0.43627062", "text": "def avg_precision(labeled_results, k=10):\n labeled_results = labeled_results[:k]\n result_count = 0\n _sum = 0\n\n for idx, val in enumerate(labeled_results):\n if val == 1:\n result_count += 1\n _sum += result_count / (idx + 1)\n score = _sum / k\n return score", "title": "" }, { "docid": "71db80177380115b0e4e6810ca41e5db", "score": "0.43614137", "text": "def _calc_mean(movies):\n\n movie_scores = [movie.score for movie in movies]\n return round(sum(movie_scores) / len(movies), 1)", "title": "" }, { "docid": "20364d732eb90f322087bb097623e4c6", "score": "0.4361278", "text": "def compute_mu(self, dict_u, dict_v):\n list1 = list(dict_u.keys())\n list2 = list(dict_v.keys())\n list1 = [int(x) for x in list1]\n list2 = [int(x) for x in list2]\n\n # fill shortest list with 0s\n new_list1, new_list2 = self.get_lists_same_size(list1, list2)\n # compute cosine similarity among the 2 lists (transformed in np.arrays)\n mu = cosine_similarity(new_list1.reshape(1, -1), new_list2.reshape(1, -1))\n return mu[0][0] # access first element of a list of lists", "title": "" }, { "docid": "718ce03a72de99cda2ebd666d2bdf8c2", "score": "0.4351764", "text": "def _raise_ev(self, g):\n final_rating = 0\n final_amount = 0\n total_amount_ratings = 0\n\n for opponent_name in g.players_in_round:\n if opponent_name != self.name and self._pa(opponent_name):\n rating, amount, amount_rating = self._raise_vs(self.opponents[opponent_name], g)\n final_rating += rating\n final_amount += round(amount * amount_rating)\n total_amount_ratings += amount_rating\n final_amount = final_amount / max(1, abs(total_amount_ratings))\n\n return (final_rating, final_amount)", "title": "" }, { "docid": "81beb6a7b994821170ea264d847fcfd6", "score": "0.4350956", "text": "def sentence_similarity(\n self, sentence: str, other_sentences: List[str], *, model: Optional[str] = None\n ) -> List[float]:\n response = self.post(\n json={\"inputs\": {\"source_sentence\": sentence, \"sentences\": other_sentences}},\n model=model,\n task=\"sentence-similarity\",\n )\n return response.json()", "title": "" }, { "docid": "a3237ac46e0ce8c3e7ecf9fcabc9149f", "score": "0.43508443", "text": "def main():\n args = parse_argument()\n train_file = args['train'][0]\n test_file = args['test'][0]\n print train_file, test_file\n # your code here\n train_user_ratings, train_movie_ratings = parse_file(train_file)\n ave_ratings = compute_average_user_ratings(train_user_ratings)\n test_user_ratings, test_movie_ratings = parse_file(test_file)\n similarity_dict = {}\n predictions = {}\n for user in test_user_ratings:\n for movie in test_user_ratings[user]:\n other_users = train_movie_ratings[movie].keys()\n sum_numerator = []\n sum_denominator = []\n for other_user in other_users:\n if (user, other_user) in similarity_dict:\n similarity = similarity_dict[(user, other_user)]\n elif (other_user, user) in similarity_dict:\n similarity = similarity_dict[(other_user, user)]\n else:\n similarity_dict[(user, other_user)] = compute_user_similarity(train_user_ratings[user], train_user_ratings[other_user], ave_ratings[user], ave_ratings[other_user])\n similarity = similarity_dict[(user, other_user)]\n sum_numerator.append(similarity * (train_user_ratings[other_user][movie] - ave_ratings[other_user]))\n sum_denominator.append(abs(similarity))\n if sum(sum_denominator) == 0:\n prediction = ave_ratings[user]\n else:\n prediction = ave_ratings[user] + (1 / sum(sum_denominator)) * (sum(sum_numerator))\n if user in predictions:\n predictions[user][movie] = prediction\n else:\n predictions[user] = coll.defaultdict(list)\n predictions[user][movie] = prediction\n with open(test_file) as f:\n lines = csv.reader(f)\n f2 = open('predictions.txt', 'w')\n sum_rmse = []\n sum_mae = []\n for line in lines:\n user = float(line[1])\n movie = float(line[0])\n observed = float(line[2])\n sum_rmse.append((observed - predictions[user][movie]) ** 2)\n sum_mae.append(abs(observed - predictions[user][movie]))\n f2.write(','.join(line) + ','+ str(predictions[user][movie]) + '\\n')\n f2.close()\n rmse = math.sqrt(sum(sum_rmse) / len(sum_rmse))\n mae = sum(sum_mae) / len(sum_mae)\n print \"RMSE \" + str(rmse)\n print \"MAE \" + str(mae)", "title": "" }, { "docid": "07f7521f70dd5a4fc04c35b8d72a25f8", "score": "0.4347808", "text": "def _avg_betrayer(self, attr):\n l = [getattr(m, attr) for m in self.get_messages('betrayer')]\n return sum(l) / len(l) if l else 0.0", "title": "" }, { "docid": "acfa6d87bc9689d0d996b2e3bc6f89fb", "score": "0.4340766", "text": "def names(listnames,listmarks):\r\n riskstudents = []\r\n tempmean = mean(listmarks)\r\n standard = standarddev(listmarks)\r\n for i in range(len(listmarks)):\r\n if listmarks[i] < (tempmean-standard): # If the mark is less than one standard deviation of the mean of the list of numbers\r\n riskstudents.append(listnames[i])\r\n return riskstudents", "title": "" }, { "docid": "f87d3f815eff11982dea98b3a5531045", "score": "0.4338428", "text": "def _score_members(self):\n\n self.scores = []\n\n if self.adaptive:\n for member in self.population:\n input_data = self._create_data_subset(member)\n clf = self._create_classifier(input_data)\n score = clf.score() - (0.01 * len(member))\n\n # Apply a penalty for two wavelengths being within ~20nm of each other\n for i in range(len(member)):\n for j in range(i + 1, len(member)):\n if abs(member[i] - member[j]) <= 10:\n score = score * 0.5\n\n self.scores.append(score)\n\n else:\n for member in self.population:\n input_data = self._create_data_subset(member)\n clf = self._create_classifier(input_data)\n score = clf.score()\n\n # Apply a penalty for two wavelengths being within ~20nm of each other\n # (this is used for the secondary fitness function)\n '''for i in range(len(member)):\n for j in range(i+1, len(member)):\n if abs(member[i] - member[j]) <= 10:\n score = score * 0.5'''\n\n self.scores.append(score)", "title": "" }, { "docid": "b7d246d9bfcbfd77a9460df590038503", "score": "0.433835", "text": "def get_average_name_per_taxon(self):\n from sequana.lazy import numpy as np\n\n return np.mean([len(values) for values in self._group_name.values()])", "title": "" }, { "docid": "4c4d523909b4e61c0c8f412fca5c6d39", "score": "0.43366697", "text": "def calculateStats(self, X, memberships):\n \n N = X.shape[0]\n weights = np.sum(memberships, axis = 0)/N\n means = np.zeros([X.shape[1],memberships.shape[1]])\n cov = np.zeros([memberships.shape[1], X.shape[1], X.shape[1]]) #3D array containing the memberships\n \n for i in np.arange( memberships.shape[1] ):\n \n Ni = weights[i]*N\n means[:,i] = np.sum( (X.T*memberships[:,i]).T, axis = 0 ) /(Ni)\n cov[i,:,:] = ((memberships[:,i])*X.T@X)/Ni - np.expand_dims(means[:,i], axis = 1)@np.expand_dims(means[:,i], axis = 1).T +np.eye(X.shape[1])*10**(-4)\n cov[i, :, :] = np.linalg.inv( cov[i, :, :] ) \n \n return weights, means.T, cov", "title": "" }, { "docid": "28fff21a10e30b7732b1eb43d531c1d9", "score": "0.43256733", "text": "def calculate_score(gram_output, key_phrases_as_tuples_to_rating, normalizing_factor):\n # normalizing factor should be the length of the original gram before adding the synonyms duplicates\n score = 0\n for gram in gram_output:\n if gram in key_phrases_as_tuples_to_rating:\n print(f'{gram}: {key_phrases_as_tuples_to_rating[gram]}')\n score += key_phrases_as_tuples_to_rating[gram]\n # normalize scores, so longer tags wont have an advantage\n if gram_output:\n score = score / normalizing_factor\n return score", "title": "" }, { "docid": "7b9032d4eadba8d1ab4ad2482415662a", "score": "0.43231615", "text": "def calc_sensitivity(self, count_true_positives, count_false_negatives):\n return count_true_positives / (count_true_positives + count_false_negatives)", "title": "" }, { "docid": "3e55c8804a2f3098cd0085bbe175d27a", "score": "0.43185732", "text": "def empirically_results(numberOfSets, numberOfPoints):\r\n\r\n aux = 0\r\n for i in range(numberOfSets):\r\n aux += multivariate_gaussian_distribution(numberOfPoints)\r\n\r\n per = aux/numberOfSets\r\n return per", "title": "" }, { "docid": "f603239eb9461814d1fa44791f5a6063", "score": "0.4316839", "text": "def compute_similarity(self, data_point):\n result = []\n\n for i, feat in enumerate(self._indexing_structure):\n total = 0\n for j, val in enumerate(data_point):\n total += math.pow(val - feat[j], 2)\n\n if total == 0.0:\n result.append(Stat(self._id_map[i], 1.0))\n\n return result", "title": "" }, { "docid": "8bc157a6807681c3d1157fe761aa1c4b", "score": "0.43119085", "text": "def score(self, sentence):\n score = 0.0\n previousWord = \"\"\n v = len(self.unigramCount.items())\n for word in sentence:\n count = self.bigramCount[previousWord][word] + 1\n tot = self.unigramCount[word] + v\n score += math.log(count/tot)\n previousWord = word\n return score", "title": "" }, { "docid": "d7876c7de6f6ac18f7e446a298df6dc5", "score": "0.43083557", "text": "def calc_ravens_DV(df):\n df = df.query('stim_response == stim_response').reset_index()\n dvs = calc_common_stats(df)\n dvs['score'] = df['score_response'].sum()\n description = 'Score is the number of correct responses out of 18'\n return dvs,description", "title": "" }, { "docid": "9d4a1c9d91283d8f060e8316e7048276", "score": "0.4307877", "text": "def similarity_scores(self, other):\r\n a = compare_dictionaries(other.words, self.words)\r\n b = compare_dictionaries(other.word_lengths, self.word_lengths)\r\n c = compare_dictionaries(other.stems, self.stems)\r\n d = compare_dictionaries(other.sentence_lengths, self.sentence_lengths)\r\n e = compare_dictionaries(other.first_person, self.first_person)\r\n return [a,b,c,d,e]", "title": "" }, { "docid": "4a7bd400b05c08d333d5cee0b6820de2", "score": "0.4305049", "text": "def _compute_rating(self, cand):\n return cand.gaussfit.mu", "title": "" }, { "docid": "bb8e5b893aa1fa7807d21b6240cb2126", "score": "0.43044126", "text": "def avg_currents(currents, voltages):\n u_voltages = unique(voltages)\n u_current = []\n for v in u_voltages:\n u_indices = [i for i, x in enumerate(voltages) if x == v]\n u_current.append(np.mean(currents[u_indices]))\n return np.asarray([u_current, u_voltages])", "title": "" }, { "docid": "4df411e59da343ba66cf215748b93d20", "score": "0.43038478", "text": "def calc_dale_score(doc_tokens, doc_sent_cnt, dale_list):\n simple_cnt = len([x for x in doc_tokens if x.lower() in dale_list])\n total_cnt = len(doc_tokens) * 1.0\n if total_cnt == 0:\n return 0, 0\n\n pdw = 1 - simple_cnt / total_cnt\n asl = total_cnt / doc_sent_cnt\n score = 0.1579 * pdw + 0.0496 * asl\n if pdw > 0.05:\n score = score + 3.6365\n\n text = ''.join(doc_tokens)\n sy_cnt = syllable_count(text)\n asw = sy_cnt / total_cnt\n # score2 = 206.835 - 1.015*asl-84.6*asw\n\n return (score, asw)", "title": "" }, { "docid": "e5b4d4a5e7d12161266f034c6342e3f9", "score": "0.43017104", "text": "def assist_difference(data):\r\n winner = tools.winner_loser(data)[0]\r\n loser = tools.winner_loser(data)[1]\r\n win = winner.groupby(\"match_id\")[\"assists\"].mean()\r\n lose = loser.groupby(\"match_id\")[\"assists\"].mean()\r\n difference = (win - lose).mean()\r\n return round(difference)", "title": "" } ]
cfbeb513e67f6e0c0a154e6f1fb50c54
Get a 8 bit signed integer
[ { "docid": "d32642cc31fd09a40962ab0b8d4a519a", "score": "0.64202243", "text": "def get_s8(self):\n val = self._data[self._idx]\n self._idx += 1\n return val", "title": "" } ]
[ { "docid": "873e8c65023e1951d2bc1b4b6b02b33a", "score": "0.7569413", "text": "def int8(self) -> int:\n raise NotImplementedError(\"int8 not implemented\")", "title": "" }, { "docid": "f767d287d3deabfa858f95dfa2f88309", "score": "0.71418214", "text": "def uint8(self) -> int:\n raise NotImplementedError(\"uint8 not implemented\")", "title": "" }, { "docid": "bf2db25352ca6fc65c4d871941ca9b97", "score": "0.71186", "text": "def signed_int(x):\n if (x&0x8):\n return (x - 16)\n else:\n return x", "title": "" }, { "docid": "4001137948dfd2e5678e547c501399dd", "score": "0.70181096", "text": "def read_signed_byte(self) -> int:\n value = self.read_bits(8)\n if value & 0x80:\n # 2er complement\n return -((~value & 0xFF) + 1)\n else:\n return value", "title": "" }, { "docid": "bfbcf25bc8c53e9ffd02f3a832d206a8", "score": "0.68518984", "text": "def int8_from_byte(byte):\n if byte > 127:\n return (256 - byte) * (-1)\n else:\n return byte", "title": "" }, { "docid": "e22b406587b326c951236ed1948a9ea2", "score": "0.68179125", "text": "def convert_int8(self, input_value):\n return self._convert_integer(\n format_string='!b',\n input_value=input_value\n )", "title": "" }, { "docid": "875b7e0a40b2f74fa56dd60121809c29", "score": "0.66726047", "text": "def int_(value: int) -> bytearray:\n value_ = abs(value * 2)\n if value < 0:\n value_ -= 1\n return uint(value_)", "title": "" }, { "docid": "3fcb608a2d09f4f4d68de8f687b970f6", "score": "0.6591929", "text": "def get_varint(data):\n\tscale = ret = 0\n\tbyte = 128\n\twhile byte > 127:\n\t\tbyte = data.get(1)[0]\n\t\tret |= (byte&127) << scale\n\t\tscale += 7\n\treturn ret", "title": "" }, { "docid": "aab404201b4a0e4bf6addb468aaade28", "score": "0.6573359", "text": "def byte8(value):\n return int.to_bytes(value & 0xFF, 1, byteorder=\"little\")", "title": "" }, { "docid": "3a4f70eedec7ef536b657eba319de4ba", "score": "0.64857", "text": "def _readS8(self, register):\n result = self._readU8(register)\n if result > 127:\n result -= 256\n return result", "title": "" }, { "docid": "d601f26ae7a5e08d1a1d27e9cd1e787e", "score": "0.6461508", "text": "def sign_bit(size):\n\n return 1 << (size - 1)", "title": "" }, { "docid": "b3e8abdb874fb2f5436436c569026447", "score": "0.6312502", "text": "def read8(self):\n self._input_mask = 0xFF\n self._output = 0\n self._write()\n self._read()\n return self._input", "title": "" }, { "docid": "2e270d99b08481173a38e4b2d598c9a1", "score": "0.630126", "text": "def extract_uint8(self):\n self.__unpack_number()", "title": "" }, { "docid": "0c1b002d37638fcd670332c24ee02c30", "score": "0.629708", "text": "def get_u8(self):\n val = struct.unpack('<B', self._data[self._idx:self._idx+1])[0]\n self._idx += 1\n return val", "title": "" }, { "docid": "fb31f14f1430636da7d6ecd1d661aec7", "score": "0.62826276", "text": "def _extract_8_bits(long_value, shift=1):\n\n bitmask = (1 << 8 * shift) - 1\n\n return (long_value & bitmask) >> (8 * (shift-1))", "title": "" }, { "docid": "700dc2d9b9142e4b5df3fdaebf55e554", "score": "0.6228148", "text": "def read_unsigned_byte(self) -> int:\n return self.read_bits(8)", "title": "" }, { "docid": "175dd17eb23582cb6417d3809cbef398", "score": "0.62197787", "text": "def asInt(f):\n bytes = [ord(c) for c in struct.pack(\">f\", f)]\n v = 0\n while bytes:\n v = (v << 8) | bytes.pop(0)\n if v >= 0x80000000:\n v = 0x80000000 - v\n return v", "title": "" }, { "docid": "da599aad75dd0352a3f9a2a45aaa77f3", "score": "0.62167776", "text": "def _getint(self):\n ui = self.uint\n if ui < (1 << (self.len - 1)):\n # Top bit not set - must be positive\n return ui\n tmp = (~(ui - 1)) & ((1 << self.len) - 1)\n return -tmp", "title": "" }, { "docid": "34b32416e3fdeb4fe7eaeac6ecbba726", "score": "0.62122643", "text": "def get_byte(number: int, i: int):\n return (number & (0xff << (i * 8))) >> (i * 8)", "title": "" }, { "docid": "1ae6c5cb8b2d71f0ce7cee3f714aa066", "score": "0.61896867", "text": "def int(msb, lsb):\n\txx = (msb << 8) + lsb\n\treturn xx - 0xffff if xx > 0x7fff else xx", "title": "" }, { "docid": "22ea5ad492636a676fed7fb0ff01ac85", "score": "0.6186212", "text": "def int_from_bytes(value, signed=False):\n\n if value == b'':\n return 0\n\n num = long(value.encode(\"hex\"), 16) # noqa\n\n if not signed:\n return num\n\n # Check for sign bit and handle two's complement\n if ord(value[0:1]) & 0x80:\n bit_len = len(value) * 8\n return num - (1 << bit_len)\n\n return num", "title": "" }, { "docid": "7d467fa55b88a088629d5721e2be4299", "score": "0.6182359", "text": "def data2uint8(self) -> int:\n return struct.unpack(\">B\", self.data)[0]", "title": "" }, { "docid": "ced3c9431486ddb1d7295a9f8591c54e", "score": "0.61489403", "text": "def convert_uint8(self, input_value):\n return self._convert_integer(\n format_string='!B',\n input_value=input_value\n )", "title": "" }, { "docid": "b617a24f47f3b429364eab690475af77", "score": "0.61355186", "text": "def read_signed_long(self) -> int:\n value = self.read_unsigned_long()\n if value & 0x80000000:\n # 2er complement\n return -((~value & 0xFFFFFFFF) + 1)\n else:\n return value", "title": "" }, { "docid": "f37976642280344281d577afcf41f01e", "score": "0.6116668", "text": "def binary_to_int(b, signed=False):\n\n return b.int if signed else b.uint", "title": "" }, { "docid": "a788af9abc9d5342fa90a29f1ccac92e", "score": "0.609523", "text": "def _read_uint8(chunk: Chunk) -> int:\n return struct.unpack('B', chunk.read(1))[0]", "title": "" }, { "docid": "3a0970a5eb38e84d4eac58ed37164df6", "score": "0.60910374", "text": "def read_short_int(self):\n # My *guess* is that they're all signed\n t = self.r.read(2)\n if len(t) != 2:\n self.eof = True\n return 0\n return struct.unpack(\"<h\", t)[0]", "title": "" }, { "docid": "2bcdb33caeb1b270eca5fc61d9624c7d", "score": "0.6085187", "text": "def read_int(self):\n # My *guess* is that they're all signed\n t = self.r.read(4)\n if len(t) != 4:\n self.eof = True\n return 0\n return struct.unpack(\"<i\", t)[0]", "title": "" }, { "docid": "a958aa0f038405842a3d536e46bae66d", "score": "0.607047", "text": "def unsigned_16_to_signed(value):\n return ((value) & 0x7FFF) - (0x8000 & (value))", "title": "" }, { "docid": "df56b859c62602dbfc57d33675d56755", "score": "0.60553396", "text": "def unpack_int8(self, offset):\n o = self._offset + offset\n try:\n return struct.unpack_from(\"<b\", self._buf, o)[0]\n except struct.error:\n raise OverrunBufferException(o, len(self._buf))", "title": "" }, { "docid": "c6898637e709a9119d150175011e1c33", "score": "0.6013701", "text": "def WriteInt8(self, value, endian=\"<\"):\n return self.pack('%sb' % endian, value)", "title": "" }, { "docid": "21c4858280c1b3d09007b72b93a6224c", "score": "0.60106254", "text": "def Get8(self):\n if self.idx >= self.limit:\n raise message.DecodeError('truncated')\n c = self.buf[self.idx]\n self.idx += 1\n return c", "title": "" }, { "docid": "779d72a33d93cad42c47ac19cf19d829", "score": "0.59961575", "text": "def mapIntensityToUint8(self, I):\n return np.where(I <= 0, 0, np.where(I < self._uint8Max, I, self._uint8Max))", "title": "" }, { "docid": "65b5d748b6a577f7fd5a3997a1cf85da", "score": "0.5979041", "text": "def _get_int(self, pop=True):\n x = self._stack[-1]\n if pop:\n self._stack.pop()\n\n if isinstance(x, bytes):\n if x:\n # Handle bitcoin's weird negative representation\n negative = x[-1] & 0x80\n _x = x[0:-1] + bytes([x[-1] & 0x7f])\n x = int.from_bytes(_x, byteorder='little')\n if negative:\n x = -x\n else:\n # Null-length byte vector is positive 0\n x = 0\n\n return x", "title": "" }, { "docid": "178a655d6ce65fb88f24ac93a3cca334", "score": "0.5970958", "text": "def convertToUINT8(x):\r\n\treturn skimage.img_as_ubyte(x / np.amax(x))", "title": "" }, { "docid": "5562128823273205e794ef9230424d6b", "score": "0.5943872", "text": "def revert_int8(self, input_value):\n return self._revert_integer(\n format_string='!b',\n input_value=input_value\n )", "title": "" }, { "docid": "174428233ac2989dd06a6736ad597185", "score": "0.5887253", "text": "def valuebits(value):\n if value >= 0 or type(value) != int:\n raise RuntimeError(\"valuebits only on -ve int at moment\")\n\n if value == -1: # always 0xFF, so always needs exactly 2 bits to represent (sign and value)\n return 2 # bits required\n # #trace(\"valuebits of:%d\" % value)\n # Turn into a 2's complement representation\n MAXBYTES = 15\n MAXBITS = 1 << (MAXBYTES * 8)\n # TODO: check for truncation?\n value = value & MAXBITS - 1\n # #trace(\"hex:%s\" % hex(value))\n highz = Value.highestClearBit(value, MAXBYTES * 8)\n # #trace(\"highz at bit:%d\" % highz)\n # allow for a sign bit, and bit numbering from zero\n neededbits = highz + 2\n\n # #trace(\"needed bits:%d\" % neededbits)\n return neededbits", "title": "" }, { "docid": "7e6dce736f3239ddd2a4fe4fd3d01698", "score": "0.5878506", "text": "def _eight_byte_real(value):\n byte1 = 0\n byte2 = 0\n short3 = 0\n long4 = 0\n if value != 0:\n if value < 0:\n byte1 = 0x80\n value = -value\n exponent = int(numpy.floor(numpy.log2(value) * 0.25))\n mantissa = long(value * 16L ** (14 - exponent))\n while mantissa >= 72057594037927936L:\n exponent += 1\n mantissa = long(value * 16L ** (14 - exponent))\n byte1 += exponent + 64\n byte2 = (mantissa // 281474976710656L)\n short3 = (mantissa % 281474976710656L) // 4294967296L\n long4 = mantissa % 4294967296L\n return struct.pack(\">HHL\", byte1 * 256 + byte2, short3, long4)", "title": "" }, { "docid": "7d4f6d15748d6ebd8eb95bb56d39d1fd", "score": "0.58707607", "text": "def int_get_bit(value, pos):\n \n assert(pos <= value.bit_length)\n bit = (value >> pos) % 2\n return bit", "title": "" }, { "docid": "2fa9e63655ca8dda5cb62071ab2eb241", "score": "0.5869416", "text": "def int8(context=None):\n if context is not None:\n return Type(lib.LLVMInt8TypeInContext(context))\n else:\n return Type(lib.LLVMInt8Type())", "title": "" }, { "docid": "f985a75d74583d06e1ac6a1f4cfe6eaf", "score": "0.58480257", "text": "def int_from_bytes(value, signed=False):\n\n return int.from_bytes(value, 'big', signed=signed)", "title": "" }, { "docid": "f69f86b774676c6894a803ca6450d8f8", "score": "0.58458906", "text": "def signed2int(num, nbits=32):\r\n if signed_is_negative(num, nbits):\r\n return num - 2 ** nbits\r\n else:\r\n return num", "title": "" }, { "docid": "1be52cb400e78442c8de78d4443899cf", "score": "0.5844063", "text": "def uint8(self, bitstream=None):\n if bitstream is None:\n bitstream = self.bitstream\n\n if bitstream is None:\n raise HKEBitstreamError\n\n uint8 = bitstream.read('uintle:8')\n return uint8", "title": "" }, { "docid": "1985b256f832608947b242bbefdde6a3", "score": "0.5827225", "text": "def int2signed(num, nbits=32):\r\n if num < 0:\r\n return 2 ** nbits + num\r\n else:\r\n return num", "title": "" }, { "docid": "0090e5e21e5f8605b798b0f2a105b907", "score": "0.58154464", "text": "def _lsbToInt(self,lsbValue):\n return struct.unpack('<h',lsbValue)[0]", "title": "" }, { "docid": "c89152ed96aab38213a09ae7f24ba968", "score": "0.58068717", "text": "def uint(value: int) -> bytearray:\n if value < 0:\n error = 'unsigned int cannot be negative: {0}'.format(value)\n logging.exception(error)\n raise PackingError(error)\n result = bytearray()\n result.insert(0, value & 127)\n value >>= 7\n while value:\n result.insert(0, value & 127 | 128)\n value >>= 7\n return result", "title": "" }, { "docid": "b1868cb93f109723fb22b561137b2f4e", "score": "0.5783882", "text": "def read_signed_short(self) -> int:\n value = self.read_unsigned_short()\n if value & 0x8000:\n # 2er complement\n return -((~value & 0xFFFF) + 1)\n else:\n return value", "title": "" }, { "docid": "e30494e1356c3650387db8c0e9c57f95", "score": "0.5773407", "text": "def decode_signed_int(string):\n print(string)\n hex_code = bytes.fromhex(string)\n unpacked = struct.unpack('>i', hex_code)\n return unpacked[0]", "title": "" }, { "docid": "602167b7ac2e9e5a5e4f4ffb06bb603b", "score": "0.57654387", "text": "def byteValue(self, pos: int) -> int:\n\n if pos < 0:\n raise ValueError(\"bit position must be non-negative\")\n\n if self.value == 0:\n return 0\n return 2**pos", "title": "" }, { "docid": "bd0f20e67cf273fb31e8ba2030cbde28", "score": "0.5762882", "text": "def int2fixed(i):\n return i << 16", "title": "" }, { "docid": "0b78039321d909ed8ef64dacb5d5d7ad", "score": "0.5751555", "text": "def binaryToInt(value):\n return int(value, 2)", "title": "" }, { "docid": "6fad7e05a68049ce1570f1851e5aef1a", "score": "0.5750566", "text": "def int_to_bytes(value, signed=False, width=None):\n\n if width is None:\n if signed:\n if value < 0:\n bits_required = abs(value + 1).bit_length()\n else:\n bits_required = value.bit_length()\n if bits_required % 8 == 0:\n bits_required += 1\n else:\n bits_required = value.bit_length()\n width = math.ceil(bits_required / 8) or 1\n return value.to_bytes(width, byteorder='big', signed=signed)", "title": "" }, { "docid": "2eb702679aeb6733f6774b0a88ce3914", "score": "0.57162386", "text": "def byte2int(b):\n return struct.unpack('B', b)[0]", "title": "" }, { "docid": "7999b98830a61c6be020ad75b1da1b65", "score": "0.57134986", "text": "def _to_int(s):\n value = 0\n for offset, c in enumerate(iterbytes(s[::-1])):\n value += c << offset * 8\n return value", "title": "" }, { "docid": "326342126a1f2289bf964ff048a0211b", "score": "0.5701947", "text": "def _to_8bit(self, image):\n\n image = (image - image.min())/image.ptp()\n image *= 255\n return image.astype('uint8')", "title": "" }, { "docid": "a3f688a40a23c18c1fe7878c21f669b2", "score": "0.56976086", "text": "def convert_bytes_to_bits(_bytes):\n if bytes is None:\n return None\n return int(_bytes) * 8", "title": "" }, { "docid": "258190e4b56c9cfcdcb15219750a6e1b", "score": "0.56889373", "text": "def signed(value, depth):\r\n mask = (1 << (depth*8)) - 1\r\n if value > ((1 << (depth*8)-1) - 1):\r\n return -(~(value-1) & mask)\r\n else:\r\n return value", "title": "" }, { "docid": "b43d5db0e87c3ba1978819e423c4c704", "score": "0.56843114", "text": "def __int__(self):\n return int.from_bytes(self, byteorder=\"big\")", "title": "" }, { "docid": "8e209362e40c10d0dcf776c641e50b44", "score": "0.5675738", "text": "def VarIntToInt(d):\n\tb = d[:8]\n\tlen_hdr = bin(b[0])[2:].rjust(8,\"0\")\n\tl = len_hdr.find(\"1\") + 1\n\tvarint = b[:l]\n\ti_s = bytes([varint[0] ^ (1 << (8 - l))]) + varint[1:l]\n\ti = int.from_bytes(i_s, \"big\")\n\treturn i, l", "title": "" }, { "docid": "49909c880a7bbae0ccb6f4d959a594bf", "score": "0.5670728", "text": "def unsigned2signed(uint, bitnum):\n return uint - (uint >> (bitnum-1)) * 2**bitnum", "title": "" }, { "docid": "b7ab303dc82e800de9d5b0cc6f5718b8", "score": "0.5664659", "text": "def bytes_to_int_big_endian(data: bytes) -> int:\n res = 0\n for x in data:\n res = (res << 8) | x\n return res", "title": "" }, { "docid": "4591b1012f3b76d9614b4d1c758b0353", "score": "0.56620586", "text": "def getUnsignedValue(value: java.lang.Number) -> java.lang.Number:\n ...", "title": "" }, { "docid": "681e8345aed00c066a0098a763d7ba25", "score": "0.5660721", "text": "def signed1(value):\r\n return signed(value, 1)", "title": "" }, { "docid": "fd6096bf34dfdb5eab666e11ea0e8894", "score": "0.5646786", "text": "def _intToUnsignedLong(x):\n if x < 0:\n x += (sys.maxint + 1) << 1\n return x", "title": "" }, { "docid": "6e4a4b9847821b7648a9850811ce2e48", "score": "0.5638674", "text": "def _to_int(x):\n if x > 0x7FFFFFFF:\n return int(x - 0x100000000)\n else:\n return int(x)", "title": "" }, { "docid": "3b3b001c86955cb4756d11b72c858721", "score": "0.5636391", "text": "def bytes_to_int(bytes_data):\n result = struct.unpack('<i', bytes_data)[0]\n return result", "title": "" }, { "docid": "c3f6f428ad3ada38867f72bd92d15323", "score": "0.56336844", "text": "def packIntegerAsULong(value):\n return struct.pack('H', value)", "title": "" }, { "docid": "b71354300ed6ea130fa75ba2bfc7e8b9", "score": "0.56324893", "text": "def int_to_bytes(value, signed=False, width=None):\n\n if value == 0 and width == 0:\n return b''\n\n # Handle negatives in two's complement\n is_neg = False\n if signed and value < 0:\n is_neg = True\n bits = int(math.ceil(len('%x' % abs(value)) / 2.0) * 8)\n value = (value + (1 << bits)) % (1 << bits)\n\n hex_str = '%x' % value\n if len(hex_str) & 1:\n hex_str = '0' + hex_str\n\n output = hex_str.decode('hex')\n\n if signed and not is_neg and ord(output[0:1]) & 0x80:\n output = b'\\x00' + output\n\n if width is not None:\n if len(output) > width:\n raise OverflowError('int too big to convert')\n if is_neg:\n pad_char = b'\\xFF'\n else:\n pad_char = b'\\x00'\n output = (pad_char * (width - len(output))) + output\n elif is_neg and ord(output[0:1]) & 0x80 == 0:\n output = b'\\xFF' + output\n\n return output", "title": "" }, { "docid": "955d8f97dd3fc56f64a8aa237a5b4597", "score": "0.5621043", "text": "def counter_wrap(self):\n v = self.read(self.value_offset+0x8)\n return v&0x3FFFFFFF", "title": "" }, { "docid": "564e8a0dc583c56798e288ec277dc8f6", "score": "0.5615564", "text": "def min(cls) -> int:\n if cls.signed():\n return -(2 ** (cls.bit_size() - 1))\n else:\n return 0", "title": "" }, { "docid": "9f3ad1b89cff7f8d23311396b919432a", "score": "0.5610093", "text": "def unsigned(number):\n return number * 2 + 1", "title": "" }, { "docid": "0c5a2073ebf1f59786d491758f8e95c8", "score": "0.5598671", "text": "def test_int_to_bytes(self):\n byte1 = int_to_bytes(1)\n #if we unpack the bytes as a unsigned integer, we should get the same value\n self.assertEqual(unpack('I', byte1)[0], 1)\n #test out 0\n byte0 = int_to_bytes(0)\n self.assertEqual(unpack('I', byte0)[0], 0)\n #test out max signed 32 bit int\n byte_max_32 = int_to_bytes(2**31 -1)\n self.assertEqual(unpack('I', byte_max_32)[0], 2**31 -1)\n #test out max unsigned 32 bit int\n byte_max_u32 = int_to_bytes(2**32 -1)\n self.assertEqual(unpack('I', byte_max_u32)[0], 2**32 -1)", "title": "" }, { "docid": "0c5a2073ebf1f59786d491758f8e95c8", "score": "0.5598671", "text": "def test_int_to_bytes(self):\n byte1 = int_to_bytes(1)\n #if we unpack the bytes as a unsigned integer, we should get the same value\n self.assertEqual(unpack('I', byte1)[0], 1)\n #test out 0\n byte0 = int_to_bytes(0)\n self.assertEqual(unpack('I', byte0)[0], 0)\n #test out max signed 32 bit int\n byte_max_32 = int_to_bytes(2**31 -1)\n self.assertEqual(unpack('I', byte_max_32)[0], 2**31 -1)\n #test out max unsigned 32 bit int\n byte_max_u32 = int_to_bytes(2**32 -1)\n self.assertEqual(unpack('I', byte_max_u32)[0], 2**32 -1)", "title": "" }, { "docid": "523136d41295e711f83bfb50329c261c", "score": "0.5596242", "text": "def as_uint8(arr):\n return np.uint8(255*(arr - arr.min())/arr.ptp())", "title": "" }, { "docid": "9859df6d2abf58b009ea2f67bf4ee848", "score": "0.55923146", "text": "def _as_unsigned(inp: np.ndarray):\n dtype = inp.dtype\n if dtype not in _SIGNED_TO_UNSIGNED:\n return inp\n lastbit = np.iinfo(dtype).max + 1\n return inp.astype(_SIGNED_TO_UNSIGNED[dtype]) ^ lastbit", "title": "" }, { "docid": "3f4b784ddb6077b885f5f53e6065460d", "score": "0.5592244", "text": "def read_short_int(self):\n data = self.read_bytes(2)\n if len(data) < 2:\n print(\"Data read not long enough, returning 0\")\n return 0\n return struct.unpack(\"<H\", data)[0]", "title": "" }, { "docid": "4442458cb3dbdb9accb2a37b6028dcae", "score": "0.5581766", "text": "def _getuint(self):\n if not self:\n raise ValueError(\n \"An empty BitString cannot be interpreted as an integer.\"\n )\n # Special case if the datastore is only one byte long.\n if self._datastore.bytelength == 1:\n mask = ((1 << self.len) - 1) << (8 - self.len - self._offset)\n val = self._datastore[0] & mask\n val >>= 8 - self._offset - self.len\n return val\n # Take the bits in the first byte and shift them to their final position\n firstbits = 8 - self._offset\n mask = (1 << firstbits) - 1\n shift = self.len - firstbits\n val = (self._datastore[0] & mask) << shift\n # For the middle of the data we use struct.unpack to do the conversion\n # as it's more efficient. This loop only gets invoked if the BitString's\n # data is more than 10 bytes.\n j = 1\n structsize = struct.calcsize(\"Q\")\n end = self._datastore.bytelength - 1\n # TODO: This loop could be done with a single struct.unpack (probably more efficient).\n while j + structsize < end:\n shift -= 8 * structsize\n # Convert next 8 bytes to an int, then shift it to proper place\n # and add it\n d = self._datastore[j : j + structsize].tostring()\n val += struct.unpack(\">Q\", d)[0] << shift\n j += structsize\n # Do the remaining bytes, except for the final one\n while j < end:\n shift -= 8\n val += self._datastore[j] << shift\n j += 1\n # And the very final byte\n assert shift <= 8\n bitsleft = (self._offset + self.len) % 8\n if bitsleft == 0:\n bitsleft = 8\n lastbyte = self._datastore[-1]\n mask = 255 - ((1 << (8 - bitsleft)) - 1)\n val += (lastbyte & mask) >> (8 - bitsleft)\n return val", "title": "" }, { "docid": "b2b53219641f151f2fedbc9eaa7b1355", "score": "0.55765456", "text": "def long_long_int(value):\n try:\n return 8, struct.unpack('>q', value[0:8])[0]\n except TypeError:\n raise ValueError('Could not unpack data')", "title": "" }, { "docid": "6e718660128284243e41a5b68dce9a3d", "score": "0.5572538", "text": "def sign_extend(num):\n num |= (-1 & ~0xFF) if (num & 0x80) else 0\n return num", "title": "" }, { "docid": "c91eaa6eef55f6ef3008131925a9c9b7", "score": "0.5556319", "text": "def PackInt1(i):\n return chr(i & 255)", "title": "" }, { "docid": "4ab7124c117183f2d8e8175691fd069f", "score": "0.55555654", "text": "def bits2int(bits):\r\n x = 0\r\n for i in range(len(bits)):\r\n x |= bits[i] << i\r\n return x", "title": "" }, { "docid": "c930e8965f08456c817fc996ce376892", "score": "0.5550721", "text": "def i32le(c, o=0):\n return unpack_from(\"<I\", c, o)[0]", "title": "" }, { "docid": "7dd95bcaa9eb1ff037846ae3d1085fc4", "score": "0.55475175", "text": "def _get_bit_value(n, p):\r\n return (n >> p) & 1", "title": "" }, { "docid": "b9eb73222098ed21983230d44ddfed35", "score": "0.5540538", "text": "def tobyte(i):\n i = int(i)\n if i < 0: i = 0\n if i > 255: i = 255\n return i", "title": "" }, { "docid": "9059726dfbeaf2072498edf571a848f9", "score": "0.5534853", "text": "def h2i(a):\n return int(a, 16)", "title": "" }, { "docid": "4d8761a440010810669656bd239754b0", "score": "0.5534257", "text": "def revert_uint8(self, input_value):\n return self._revert_integer(\n format_string='!B',\n input_value=input_value\n )", "title": "" }, { "docid": "1684e81ba1cf0707a87eb163115bc656", "score": "0.5533726", "text": "def __bytes__(self):\n if self == 0:\n return self.to_bytes(1, 'little')\n\n value = int(self)\n bytes_ = b''\n\n while True:\n temp = value & 0b01111111\n value = rshift(value, 7)\n\n if value:\n temp |= 0b10000000\n\n bytes_ += temp.to_bytes(1, 'little')\n\n if not value:\n break\n\n return bytes_", "title": "" }, { "docid": "9e18c88d5194de802c48e2f83b1d16b3", "score": "0.5533305", "text": "def get_bit(byte, bit_num):\r\n return (byte & (1 << bit_num)) >> bit_num", "title": "" }, { "docid": "96771f3546897c45c4aed2fdd4b5a2a7", "score": "0.5530059", "text": "def uint32(x):\n return x & 0xffffffff", "title": "" }, { "docid": "ee48e9f85f666c2827a4e70474612ca4", "score": "0.5526275", "text": "def bin2int(s, signed=False):\n\t# check signed / unsigned flag\n\tif signed:\n\t\t# check the most significant bit to test negativity\n\t\tif s[0] == \"1\":\n\t\t\tinverse = \"\"\n\t\t\t# invert all bits\n\t\t\tfor c in s[1:]:\n\t\t\t\tif c == \"0\":\n\t\t\t\t\tinverse += \"1\"\n\t\t\t\telse:\n\t\t\t\t\tinverse += \"0\"\n\t\t\t# convert to int and add 1\n\t\t\tval = int(inverse,2) + 1\n\t\t\t# return negative value\n\t\t\treturn -val\n\t\telse:\n\t\t\treturn int(s,2)\n\telse:\n\t\treturn int(s,2)", "title": "" }, { "docid": "e5c036ca6392df563fd8fea03a790c7a", "score": "0.5521852", "text": "def getElement(self, index: 'uint32_t') -> \"signed char\":\n return _PolyVoxCore.Vector3Dint8_t_getElement(self, index)", "title": "" }, { "docid": "6f7e2e70be2c6e48a993f7ad6eed5cf8", "score": "0.55200213", "text": "def int2byte(i):\n return struct.pack('B', i)", "title": "" }, { "docid": "68fa04215c1a271c4aee426ccd3716ba", "score": "0.55184144", "text": "def get_bit(byte, bit_num):\n return (byte & (1 << bit_num)) >> bit_num", "title": "" }, { "docid": "bd13286ad46247eb3b45a0edd1a3a551", "score": "0.55049324", "text": "def __int_to_bytes(n):\n # This is similar to Java's ByteBuffer#putLong\n return n.to_bytes(8, \"big\")", "title": "" }, { "docid": "f8ffc72490f9e04d59c39ad269b37b8f", "score": "0.54943144", "text": "def read_unsigned_short(self) -> int:\n if self.bit_index & 7:\n s1 = self.read_bits(8)\n s2 = self.read_bits(8)\n else: # aligned data\n s1, s2 = self.read_aligned_bytes(2)\n return (s2 << 8) + s1", "title": "" }, { "docid": "8713b09905b8221a8a264e8f6e63b7fb", "score": "0.5493285", "text": "def popcount32b(self, x: int) -> int:\n x -= (x >> 1) & 0x55555555\n x = (x & 0x33333333) + ((x >> 2) & 0x33333333)\n x = (x + (x >> 4)) & 0x0f0f0f0f\n x += x >> 8\n x += x >> 16\n return x & 0x7f", "title": "" }, { "docid": "096dcf1b9d20e190eaf4957dc8d517fa", "score": "0.5481578", "text": "def bsr(value, bits):\n minint = -2147483648\n if bits == 0:\n return value\n elif bits == 31:\n if value & minint:\n return 1\n else:\n return 0\n elif bits < 0 or bits > 31:\n raise ValueError('bad shift count')\n tmp = (value & 0x7FFFFFFE) // 2**bits\n if (value & minint):\n return (tmp | (0x40000000 // 2**(bits-1)))\n else:\n return tmp", "title": "" }, { "docid": "096dcf1b9d20e190eaf4957dc8d517fa", "score": "0.5481578", "text": "def bsr(value, bits):\n minint = -2147483648\n if bits == 0:\n return value\n elif bits == 31:\n if value & minint:\n return 1\n else:\n return 0\n elif bits < 0 or bits > 31:\n raise ValueError('bad shift count')\n tmp = (value & 0x7FFFFFFE) // 2**bits\n if (value & minint):\n return (tmp | (0x40000000 // 2**(bits-1)))\n else:\n return tmp", "title": "" }, { "docid": "68b5bd65b3421809dbe00c3d5a659b3a", "score": "0.5475897", "text": "def GetVarInt64(self):\n b = self.Get8()\n if b >= _OFFSET and b <= _POS_OFFSET:\n return b - _OFFSET + _MIN_INLINE\n if b == 0:\n return None\n\n if b < _OFFSET:\n negative = True\n num_bytes = _OFFSET - b\n else:\n negative = False\n num_bytes = b - _POS_OFFSET\n\n ret = 0\n for _ in range(num_bytes):\n b = self.Get8()\n if negative:\n b = _MAX_UNSIGNED_BYTE - b\n ret = ret << 8 | b\n\n if negative:\n return _MIN_INLINE - ret\n else:\n return ret + _MAX_INLINE", "title": "" }, { "docid": "18c2323cad5499ee1dafe55cf7cdb5c9", "score": "0.5475547", "text": "def int_to_bytes(n, minlen=0): # Helper function\n nbits = n.bit_length() + (1 if n < 0 else 0) # +1 for any sign bit.\n nbytes = (nbits+7) // 8 # Number of whole bytes.\n b = bytearray()\n for _ in range(nbytes):\n b.append(n & 0xff)\n n >>= 8\n if minlen and len(b) < minlen: # Zero padding needed?\n b.extend([0] * (minlen-len(b)))\n return bytearray(reversed(b)) # High bytes first", "title": "" } ]
142f468e0a664f8f5b2526e1efbf8876
Requests the service to restart after a specified delay, in seconds
[ { "docid": "611a590f87f39170cdf624474b8a1340", "score": "0.7568714", "text": "def restart():\n info = request.get_json() or {}\n delay_secs = int(info.get('delay', 0))\n\n t = threading.Timer(delay_secs, update_trigger_file)\n t.start()\n\n return jsonify(SUCCESS)", "title": "" } ]
[ { "docid": "ac9fe64f3c901f266ac194c718e1cf1d", "score": "0.68044424", "text": "def restart():\n restart_service(SERVICE)\n status()", "title": "" }, { "docid": "3178091b7c5a01776e80f1a9c9a4af66", "score": "0.67897755", "text": "def restart(service, timeout=120):\n service.restart()\n sleep(5) # Wait for service to notice restart\n secs = 0\n while secs < timeout:\n try:\n service.login() # Awake yet?\n return\n except:\n sleep(2)\n secs -= 2 # Approximately\n raise Exception, \"Operation timed out.\"", "title": "" }, { "docid": "1196f8fa279e4d25de68de4af55e92f2", "score": "0.67354226", "text": "def delayed_restart(cls,minutes, message=\"\"):\n\n minutes = int(minutes)\n\n if minutes not in [5,10,30,60]:\n err = 'Unable to do delayed restart. Valid waiting times: 5, 10, 30, 60'\n out(err)\n return False, err\n\n\n def delayed_message(minutes,message=\"\"):\n if minutes == 60:\n cls.broadcast(Lang.get('restart_default').format('60 minutes',message), cls.response_callback_response_only)\n time.sleep(30*60)\n minutes = 30\n\n if minutes == 30:\n cls.broadcast(Lang.get('restart_default').format('30 minutes',message), cls.response_callback_response_only)\n time.sleep(20*60)\n minutes = 10\n\n if minutes == 10:\n cls.broadcast(Lang.get('restart_default').format('10 minutes',message), cls.response_callback_response_only)\n time.sleep(5*60)\n\n cls.broadcast(Lang.get('restart_default').format('5 minutes',message), cls.response_callback_response_only)\n time.sleep(4*60)\n\n cls.broadcast(Lang.get('restart_default').format('60 seconds',message), cls.response_callback_response_only)\n time.sleep(50)\n\n cls.broadcast(Lang.get('restart_default').format('10 seconds',message), cls.response_callback_response_only)\n time.sleep(10)\n\n Storage.restart_timestamp = None\n ServerControl.restart_server()\n\n\n Storage.restart_timestamp = floor(time.time() + (minutes*60))\n callback = lambda:delayed_message(minutes,message)\n ThreadHandler.create_thread(callback,looping=False)\n return True, None", "title": "" }, { "docid": "36d6be13ad7b23e48b6887f2b0d68372", "score": "0.66950524", "text": "def ScheduleRestart(self):\n self.is_restart_requested = True", "title": "" }, { "docid": "2da6b020cce5f48641270a7dbb5a9040", "score": "0.6633449", "text": "async def restart(ctx):\n await ctx.send('I am back in a sec', delete_after=5)\n restart_program()", "title": "" }, { "docid": "9f36d39b6798992df017887fa0c9ff45", "score": "0.634167", "text": "def restart(self) -> None:\n self._user.request('POST', self.url + 'restart/')", "title": "" }, { "docid": "bc71fba39c0f8f44b76510ced45f67b9", "score": "0.62856174", "text": "def Restart(self):\n self.Stop()\n return self.Start()", "title": "" }, { "docid": "e4faf3bf8999d612cba8fed61f069944", "score": "0.6284264", "text": "def reboot_system(self, delay=3):\n return self.check_result(self._transfer(RebootSystem(delay=delay)))", "title": "" }, { "docid": "871e2a0d2dd8d18e6fcf23ced9151981", "score": "0.62767345", "text": "def bugRestartCllbck(req):\n global _to_restart\n _to_restart = True\n print 'service called'\n #this uses the API of the roslaunch package to restart the\n # specified package\n return []", "title": "" }, { "docid": "3a3d0d56307fe95d7a95ce444fd1eb05", "score": "0.62758905", "text": "def restart_server(self, service_name):\n return self.add_task(service_name, \"restart\")", "title": "" }, { "docid": "db4b79f4b3f08799fefd322ce8b96c8a", "score": "0.6206763", "text": "def tick(self):\n BaseServer.tick(self)\n if restarter.should_restart():\n log.info(\"restarting\")\n raise SystemExit(75) # will trigger self.stop via atexit", "title": "" }, { "docid": "ca55dcdb22fe0ae682e8ce84ab5074d6", "score": "0.6199386", "text": "def restart():\n stop()\n run()", "title": "" }, { "docid": "5fe4623e84b3e9d4fa8f7c7442f05a28", "score": "0.618777", "text": "def restart():\n execute(stop)\n execute(start)", "title": "" }, { "docid": "5fa206405191b9980ffd42dfaa46b49a", "score": "0.6184564", "text": "def rerun(self, seconds):\n self._rerun = seconds", "title": "" }, { "docid": "fa7e9597325265b52432076155ab0f73", "score": "0.6170405", "text": "def service_restart(name):\n r = salt.utils.http.query(\n __context__[\"rest_sample\"][\"url\"] + \"service/restart/\" + name,\n decode_type=\"json\",\n decode=True,\n )\n return r[\"dict\"]", "title": "" }, { "docid": "5e60b798a184b382cc97bc123e0680ed", "score": "0.61665374", "text": "def Reboot(self, timeout=None, retries=None):\n self._RunDeviceFastbootCommand(['reboot'])", "title": "" }, { "docid": "318c3ada73d6ca5248208c98042fb70c", "score": "0.61394703", "text": "def restart_services(tag, servicename):\n result = True\n t.log(\"inside def\")\n device = t.get_handle(resource=tag)\n if device is None:\n return False\n response = device.shell(command=\"service \" +str(servicename)+ \" restart\")\n stdout = response.response()\n t.log(\"Service \" +str(servicename) +\" Restarted Successfully\")\n result = True\n return result", "title": "" }, { "docid": "c8de58466c733ecc7e858d863f92a8ee", "score": "0.61103904", "text": "def _reboot_server(cs, s):\n s.reboot()\n sleep(90)\n return True", "title": "" }, { "docid": "6fb9a7bbdfc83e18c0f4f35945cf188d", "score": "0.6076012", "text": "def restart_server():\n stop_server()\n start_server()", "title": "" }, { "docid": "9b5e091640b109a1b27fb6e82c753797", "score": "0.6070804", "text": "def sleep(seconds=2):\n time.sleep(seconds)", "title": "" }, { "docid": "5ee83bd489cf8c3000381e91e7aa4fa0", "score": "0.60666317", "text": "def restart(self, force=False, wait_for_available=True,\n operation_timeout=None):\n body = {'force': force}\n self.manager.session.post(self.uri + '/operations/restart', body=body)\n if wait_for_available:\n time.sleep(10)\n self.manager.client.wait_for_available(\n operation_timeout=operation_timeout)", "title": "" }, { "docid": "96a7881d517689763d67791d9024c8c2", "score": "0.6044264", "text": "def _restart(self):\n self._thread = None\n try:\n response = self._stub.WatchCommand(\n controller_pb2.MachineId(name=self._machine_config.name))\n self._thread = threading.Thread(\n target=self._watch, kwargs={\n 'response': response\n })\n self._thread.start()\n\n self.update_all_status()\n except grpc.RpcError:\n if not self._stopped:\n threading.Timer(self._GRPC_RECONNECT_INTERVAL, self._restart).start()", "title": "" }, { "docid": "734a0e7f5822c33c41ee4e7a1338bbfb", "score": "0.60232157", "text": "async def action_restart(self):\n # 1. Connect to CRUX. Success is expected.\n host = self.services[\"host\"]\n host.max_attempts = 10\n host.timeout = 2\n await host.build_SSL()\n await host.connect_to_CRUX()\n\n if not host.connected:\n self.operations = MUDOp.OFFLINE\n self.status = MUDStatus.OFFLINE\n self.logger.warning(\"The portal seems to be off, the game isn't running.\")\n return\n\n # 2. Send the 'restart_game' command\n self.logger.info(\"Game stopping ...\")\n self.operations = (MUDOp.RELOADING |\n MUDOp.PORTAL_ONLINE | MUDOp.GAME_ONLINE)\n self.status = MUDStatus.ALL_ONLINE\n await host.send_cmd(host.writer, \"restart_game\", dict(announce=True))\n\n # 3. The portal should stop the game process...\n # ... and restart it.\n # 4. Listen for the 'stopped_game' command.\n success, args = await host.wait_for_cmd(host.reader, \"game_stopped\",\n timeout=10)\n if not success:\n self.operations = MUDOp.PORTAL_ONLINE | MUDOp.GAME_ONLINE\n self.status = MUDStatus.ALL_ONLINE\n self.logger.warning(\"The game is still running.\")\n return\n\n self.operations = MUDOp.RELOADING | MUDOp.PORTAL_ONLINE\n self.status = MUDStatus.PORTAL_ONLINE\n self.logger.info(\"... game stopped.\")\n self.logger.info(\"Start game ...\")\n # 5. The game process will send a 'register_game' command to CRUX.\n # 6. ... so wait for the 'registered_game' command to be received.\n success, args = await host.wait_for_cmd(host.reader, \"registered_game\",\n timeout=10)\n if success:\n self.operations = MUDOp.PORTAL_ONLINE | MUDOp.GAME_ONLINE\n self.status = MUDStatus.ALL_ONLINE\n game_id = args.get(\"game_id\", \"UNKNOWN\")\n self.logger.info(f\"... game started (id={game_id}).\")\n else:\n self.operations = MUDOp.PORTAL_ONLINE\n self.status = MUDStatus.PORTAL_ONLINE\n self.logger.error(\n \"The game hasn't started. See logs/game.log \"\n \"for more information.\"\n )", "title": "" }, { "docid": "4d018c340212c607a8a268dc0b641663", "score": "0.59919256", "text": "def restart():\n\n\t\tcheck_call(\"sudo reboot\", shell=True)", "title": "" }, { "docid": "c088110ad91124b57f686418d6d959ff", "score": "0.59882647", "text": "def delay(ms):\r\n TI.sleep(ms / 1000.0)", "title": "" }, { "docid": "d4e22c5500f6a31a5aaee86a47276317", "score": "0.5975329", "text": "def _restart(self):\n\t\ttry:\n\t\t\tself._kill_all_units()\n\t\t\tself._controller.step(2)\n\t\texcept (protocol.ProtocolError, protocol.ConnectionError):\n\t\t\tself.full_restart()", "title": "" }, { "docid": "cf7d162e51008573550a58e9fa4e2c30", "score": "0.59612155", "text": "async def sleep(self, seconds): # pragma: no cover\n self.slept = seconds", "title": "" }, { "docid": "9037b951f656f2ded5c906955bdcc891", "score": "0.5958282", "text": "def test_reboot_with_delay():\n cmd_mock = MagicMock(return_value=\"A\")\n with patch.dict(system.__salt__, {\"cmd.run\": cmd_mock}):\n assert system.reboot(at_time=5) == \"A\"\n cmd_mock.assert_called_with([\"shutdown\", \"-r\", \"5\"], python_shell=False)", "title": "" }, { "docid": "59f4d99398fcc34c671f3809f536e08f", "score": "0.59512913", "text": "def reboot(self, **params):\n\n logger.info('Executing power cycle for: {}'.format(params['sockets']))\n self.turn_off(params['sockets'])\n time.sleep(10)\n self.turn_on(params['sockets'])\n time.sleep(5)", "title": "" }, { "docid": "808d82e00dfd5f55f0b8d7891df76043", "score": "0.59244436", "text": "def restart(self):\n self.target_time = time.monotonic() + self.duration\n logger.debug(f'Restarting {self.name}')", "title": "" }, { "docid": "586339db9c7374f8d5d167ec1c5ec153", "score": "0.5904935", "text": "def request_restart(self, req, msg):\n if self._restart_queue is None:\n raise FailReply(\"No restart queue registered -- cannot restart.\")\n # .put should never block because the queue should have no size limit.\n self._restart_queue.put(self)\n # this message makes it through because stop\n # only registers in .run(...) after the reply\n # has been sent.\n return req.make_reply(\"ok\")", "title": "" }, { "docid": "b219ba74944b327e6fb961911eb558bf", "score": "0.5884368", "text": "def sleep(self, delay):\n if delay is not None and delay>0:\n time.sleep(delay)", "title": "" }, { "docid": "0ea2f5438f624410af5683929db2ff6f", "score": "0.58835256", "text": "def restart_vpn():\n command = \"service openvpn restart; sleep 10; ifconfig tun0\"\n\n exec_console_command(command)\n\n return \"VPN restarted successfully.\"", "title": "" }, { "docid": "bcdb08cb945d0cd28a55a14bdf2bf606", "score": "0.5880627", "text": "def sleep(seconds):\n return time.sleep(seconds)", "title": "" }, { "docid": "511a9b1031de48879200e6d9c4222065", "score": "0.5871849", "text": "def restart(self):\n self.stop()\n self.start()", "title": "" }, { "docid": "4c9d8ef45c304cf1112533799b8b8b5b", "score": "0.58617693", "text": "def reboot_cycle(conn, options):\n\n plug = options[\"--plug\"]\n return_code = 99\n out = \"\"\n err = \"\"\n\n (return_code, out, err) = send_sbd_message(conn, options, plug, \"reset\")\n return not bool(return_code)", "title": "" }, { "docid": "1e969e3a170c78f57dd313ef13c7a5a2", "score": "0.58598816", "text": "def sleep(duration):\n origtime.sleep(duration / 60.0)", "title": "" }, { "docid": "ae18fea4476ba98ead1c9f8b14245dd6", "score": "0.5858565", "text": "def restart_logstash_service_for_pay():\n restart_logstash_process()", "title": "" }, { "docid": "a4c68dfdae954f04f319343dc45ce21b", "score": "0.58514047", "text": "def sleep(self, duration):\n self.backend.do_request(\"sleep\",\n { 'duration': duration,\n })\n return True", "title": "" }, { "docid": "ef4e41c64a4681c12bb9fc32797f581c", "score": "0.58488774", "text": "def restart_bgp(duthost):\n duthost.shell('systemctl restart bgp')\n time.sleep(60)", "title": "" }, { "docid": "2fe0506f4f29a84d21482d819756a819", "score": "0.5830381", "text": "def sleep(self, *args, seconds):\n return deferLater(reactor, seconds, lambda: None)", "title": "" }, { "docid": "1dab822969091a3a484f8d0093814ca0", "score": "0.5827086", "text": "def delay(ms):\n time.sleep(ms / 1000.0)", "title": "" }, { "docid": "f9426ecb0ede7772f610bc390b82bdfa", "score": "0.5822858", "text": "def _sleep_wrapper(seconds):\n time.sleep(seconds)", "title": "" }, { "docid": "dc8e213c360b5f82d05303a03ab39300", "score": "0.5820755", "text": "def restart(self,\n start_timeout=defaults.START_TIMEOUT,\n start_interval=defaults.START_INTERVAL,\n stop_timeout=defaults.STOP_TIMEOUT,\n stop_interval=defaults.STOP_INTERVAL):\n\n self.stop(timeout=stop_timeout,\n interval=stop_interval)\n self.start(timeout=start_timeout,\n interval=start_interval)", "title": "" }, { "docid": "f1febfe5cff87489225ac25fa1a0c138", "score": "0.5819088", "text": "def restart_ssh_service(self):\n return 0", "title": "" }, { "docid": "c18de6d95f4907b624ba6854e5326596", "score": "0.58148897", "text": "def restart_control():\n execute(\"restart_control_node\", env.host_string)", "title": "" }, { "docid": "61f0b9a41c4883a959a68469116ce733", "score": "0.58112633", "text": "def test_restart_one(self):\n self.start_supervisord(autostart=True)\n ret = self.run_function(\n \"supervisord.restart\",\n [\"sleep_service\"],\n conf_file=self.supervisor_conf,\n bin_env=self.venv_dir,\n )\n self.assertEqual(ret, \"sleep_service: stopped\\nsleep_service: started\")", "title": "" }, { "docid": "0d27df3f6fbcd954e630a86c8b57445d", "score": "0.5805034", "text": "def restart(self, **kwargs):\n self.stop(**kwargs)\n self.start(**kwargs)", "title": "" }, { "docid": "1aa4835794885fb1fba43719ffc5c874", "score": "0.5803045", "text": "def reboot(self, port, delay=5):\n # Make sure this is a valid port number.\n self.validatePort(port)\n self.log.info(\"--------- Cycling port %d -----------\" % port)\n self.off(port)\n time.sleep(delay)\n self.on(port)\n self.log.info(\"--------- Port %d cycled ------------\" % port)", "title": "" }, { "docid": "f95475c52112c4615e8a1658354bb583", "score": "0.5802521", "text": "def reboot(self, timeout=5.0, delay_check=0.1):\n if self.type() != 'electric':\n return self._capablity_warning('reboot')\n\n _cmd_result = self._cmd_reboot(block=True, timeout=timeout)\n rospy.sleep(delay_check)\n if self.error():\n if not self.reset(block=True, timeout=timeout):\n rospy.logerr(\"Failed to reset gripper error after reboot.\")\n return False\n self.set_parameters(defaults=True)\n return True", "title": "" }, { "docid": "0c54d893cf8e6143643044746d33adcb", "score": "0.57946473", "text": "def trigger_delay(self, delay: float) -> None:", "title": "" }, { "docid": "6fb29f1338e9e6d68bf9a31ebbc2cc51", "score": "0.57803863", "text": "def server_restart(self, server):", "title": "" }, { "docid": "5410efa282f3e84acab0d2b4bbfc3bc9", "score": "0.5769368", "text": "def upstart_restart(name):\r\n\twith fabric.api.settings(warn_only=True):\r\n\t\tstatus = sudo(\"service %s status\" % name)\r\n\tif status.failed:\r\n\t\treturn sudo(\"service %s start\" % name)\r\n\telse:\r\n\t\tstatus = sudo(\"service %s restart\" % name)\r\n\t\tif status.failed:\r\n\t\t\tsudo(\"service %s stop\" % name)\r\n\t\t\treturn sudo(\"service %s start\" % name)\r\n\t\telse:\r\n\t\t\treturn status", "title": "" }, { "docid": "12fb2590ea31077368495c1697bf2557", "score": "0.57629883", "text": "def _sleep():", "title": "" }, { "docid": "a2839e917d84389159e4e6c0e2fc6bcc", "score": "0.5756049", "text": "def restart():\n\n require(\"project_name\")\n\n utils.supervisorctl(\"restart\", \"play2-%s\" % env.project_name)", "title": "" }, { "docid": "fff081432e5f0ecc9284b1a1678c14bb", "score": "0.57509273", "text": "def sleep():\n sleep_time = 1\n if request.args.get('timeout'):\n sleep_time = int(request.args.get('timeout'))\n time.sleep(sleep_time)\n return \"Done\", 200", "title": "" }, { "docid": "f273afffd1f49c39f03155a63bafeae1", "score": "0.5748332", "text": "def restart(self):\n self.reset()\n self.start()", "title": "" }, { "docid": "4e33060817f1a2eacb7efab0d0faa3b4", "score": "0.57329303", "text": "async def async_command_reboot(call):\n client.reboot()", "title": "" }, { "docid": "65ab101a42ba2872acbdcaeea863248a", "score": "0.57271785", "text": "def sleep(cls, seconds):\n return None", "title": "" }, { "docid": "85b05d6411eefaa699493e071f50a675", "score": "0.5723999", "text": "def restart(self, ):\n self.running = True", "title": "" }, { "docid": "3cb157aa94b10d78469784e2f0aaf8da", "score": "0.57065797", "text": "def restart(self, immediate: bool = True) -> None:\n from bacommon.servermanager import ShutdownCommand, ShutdownReason\n\n self._enqueue_server_command(\n ShutdownCommand(\n reason=ShutdownReason.RESTARTING, immediate=immediate\n )\n )\n\n # If we're asking for an immediate restart but don't get one within\n # the grace period, bring down the hammer.\n if immediate:\n self._subprocess_force_kill_time = (\n time.time() + self.IMMEDIATE_SHUTDOWN_TIME_LIMIT\n )", "title": "" }, { "docid": "33ac451773d82bb93904b981db158cfd", "score": "0.5684372", "text": "def reboot(self, **params):\n raise NotImplementedError", "title": "" }, { "docid": "b3470b1d223416e2ff9e9244542c1a39", "score": "0.56710976", "text": "def ui_command_restart(self):\n if not self.confirm_app_is_published():\n return\n self.extend_autostop(minutes=cfg.defaultAppExpireTime)\n try:\n rClient.restart_application(self.appId)\n except:\n print(\"\\nProblem restarting application!\\n\")\n raise\n print(c.yellow(\"\\nApplication now restarting\"))\n rCache.purge_app_cache(self.appId)\n self.loop_query_status(desiredState='STARTED')", "title": "" }, { "docid": "eab8a57cafd8a6231b1e981539b4e4be", "score": "0.5658123", "text": "def _wakeup(self, timeout, delay=1):", "title": "" }, { "docid": "99e35f903af776c9cf505616af8cc983", "score": "0.56411684", "text": "def full_restart(self):\n\t\tself._sc2_proc.close()\n\t\tself._launch()\n\t\tself.force_restarts += 1", "title": "" }, { "docid": "986bc44f1e2d6e71470f1edc04eb4247", "score": "0.563913", "text": "def _restart():\n print('Restarting REST server...')\n pid = _getPID()\n\n if -2 == pid:\n # PIDFile not found, thus, server is not running.\n print('REST server does not currently appear to be running.')\n _start()\n return\n elif -1 == pid:\n print(\"PIDFile '%s' has been corrupted.\" % _src_dir + '/' +_pidfile)\n return\n\n call(['kill', '-1', str(pid)])\n return", "title": "" }, { "docid": "ecef3de2621265121219fd2de9003e2b", "score": "0.5636643", "text": "def parameterized_delay_rerun(*args):\n ignored(args)\n time.sleep(sleep_seconds)\n return True", "title": "" }, { "docid": "415af0dc412f0879ccf89f8d26a5a1b0", "score": "0.5624509", "text": "def restart_phone():\n pass", "title": "" }, { "docid": "8601559c7d283e4d98c312906ea98cb2", "score": "0.56172645", "text": "def rolling_reboot():\n add_rs_to_draining()\n _hbase_gstop()\n reboot_server()\n time.sleep(300)\n count = region_count()\n if (count == -1):\n time.sleep(60)\n hadoop_start()\n time.sleep(10)\n hbase_start()\n time.sleep(10)\n thrift_start()\n time.sleep(60)\n count = region_count()\n if (count == -1):\n abort(\"RS did NOT reboot/restart correctly.\")\n clear_rs_from_draining()", "title": "" }, { "docid": "5341d19386e5681c1268ac5777e522ef", "score": "0.5612768", "text": "def restart(self):\n self.update_server()\n self.update_device()", "title": "" }, { "docid": "166bb20f370f9b681ecd1e1430f04398", "score": "0.56126654", "text": "def RestartDown(self):\n payload = { \"Arg1\": self }\n return self._execute('restartDown', payload=payload, response_object=None)", "title": "" }, { "docid": "16ee4ea3027772cbaa401b75063fc266", "score": "0.55997247", "text": "def psw_restart(self, wait_for_completion=True, operation_timeout=None):\n result = self.manager.session.post(\n self.uri + '/operations/psw-restart', resource=self,\n wait_for_completion=wait_for_completion,\n operation_timeout=operation_timeout)\n return result", "title": "" }, { "docid": "8a22a4f347cf9241457a9bed8405d896", "score": "0.5576189", "text": "def restart():\n print 'Restarting'\n command = '/usr/bin/sudo /sbin/shutdown -r now'\n import subprocess\n process = subprocess.Popen(command.split(), stdout=subprocess.PIPE)\n output = process.communicate()[0]\n print output", "title": "" }, { "docid": "79be1e38dcef093a0a37ac65fa4e904c", "score": "0.5575904", "text": "def restart_logstash_service_for_activity():\n restart_logstash_process()", "title": "" }, { "docid": "f62c34b3d94bcac6533498acabd43590", "score": "0.5573555", "text": "def restart_hook(self):", "title": "" }, { "docid": "ad151b280d84cca439a55923ed4910a1", "score": "0.55711806", "text": "def restart(name):\n try:\n print('Restarting node %s' % name)\n n = grow.node_factory.get_node_from_state(name)\n n.restart()\n grow.node_factory.save()\n time.sleep(0.2)\n grow.node_factory.get_status(name)\n except ValueError as e:\n print(e)", "title": "" }, { "docid": "51d05215eb28dc29655f2aecb5844164", "score": "0.55705434", "text": "def restart(self):\n return self._invoke('restart', None)", "title": "" }, { "docid": "24c140c8acff4e9be884fc928211be6a", "score": "0.556958", "text": "def response_delay(t1):\n response_time = time.time() - t1\n wait_time = randint(5, 10) - response_time\n time.sleep(wait_time if wait_time > 7 else randint(3, 7))", "title": "" }, { "docid": "b4b35fbccd92794dd4ba44b39318c82d", "score": "0.55640566", "text": "def redis_restart():\n redis_stop()\n redis_run()", "title": "" }, { "docid": "be1670d8689520841a7298414e61751e", "score": "0.55624586", "text": "def _reset_retry_interval(self):\n self._retry_interval = 0.5", "title": "" }, { "docid": "7494d1f7938f74541d30e7ff3af317d8", "score": "0.5562243", "text": "def sleep_in_seconds(self, seconds=1):\n time.sleep(seconds)", "title": "" }, { "docid": "3b2444898d576df7b293cc3a227ee3f4", "score": "0.5558786", "text": "def test_successful_restart(self):\n self.successful_restart()", "title": "" }, { "docid": "3b2444898d576df7b293cc3a227ee3f4", "score": "0.5558786", "text": "def test_successful_restart(self):\n self.successful_restart()", "title": "" }, { "docid": "ae32f16517975af815b7c57896306422", "score": "0.55523294", "text": "def some_processing(duration):\n time.sleep(duration)", "title": "" }, { "docid": "c5a7a9e53b72b9a8901c6444ffad6a85", "score": "0.55519724", "text": "async def _retry_delay(self, current_retry: int) -> None:\n await asyncio.sleep(current_retry ** 2)", "title": "" }, { "docid": "d234fa9a41b86233808e79a9f9362039", "score": "0.55505043", "text": "def callback_restart(cls,*args):\n out('Issuing IMMEDIDATE server restart')\n ServerControl.restart_server()", "title": "" }, { "docid": "9f10c6f69c019030372ec4ae80742580", "score": "0.5549776", "text": "def smart_sleep(self, timeout):\n\n start_time = datetime.now()\n new_time = datetime.now()\n diff = (new_time - start_time).seconds + ((new_time - start_time).microseconds / 1000000.0)\n\n while (diff < timeout):\n try:\n notify = self.drone_connection.waitForNotifications(0.1)\n except:\n color_print(\"reconnecting to wait\", \"WARN\")\n self._reconnect(3)\n\n new_time = datetime.now()\n diff = (new_time - start_time).seconds + ((new_time - start_time).microseconds / 1000000.0)", "title": "" }, { "docid": "b4941bcfe290bedc1b3683111adca98f", "score": "0.55327797", "text": "def restart_logstash_service_for_ask():\n restart_logstash_process()", "title": "" }, { "docid": "b8902fa38821cddf7aab6ea945b6999c", "score": "0.55188954", "text": "def restart_collector():\n execute('restart_collector_node', env.host_string)", "title": "" }, { "docid": "b3ada62781be0449223e2cf996702921", "score": "0.5518614", "text": "def test_restart_one_not_running(self):\n self.start_supervisord(autostart=False)\n ret = self.run_function(\n \"supervisord.restart\",\n [\"sleep_service\"],\n conf_file=self.supervisor_conf,\n bin_env=self.venv_dir,\n )\n self.assertIn(\"sleep_service: ERROR (not running)\", ret)\n self.assertIn(\"sleep_service: started\", ret)", "title": "" }, { "docid": "88591e721ea9aa5039311afca102e067", "score": "0.5512845", "text": "def restart_service(args):\n try:\n service_stop('algorand')\n service_start('algorand')\n except CalledProcessError as e:\n action_set({'output': e.output})\n action_fail('Failed to start algorand service after force_boot')\n return False", "title": "" }, { "docid": "be96af7bc5fe59c18619b6b3007e015a", "score": "0.550772", "text": "def RestartFleetspeakGrrService(grr_id: Text) -> None:\n restart_req = fs_system_pb2.RestartServiceRequest(name=\"GRR\")\n fs_msg = fs_common_pb2.Message()\n fs_msg.message_type = \"RestartService\"\n fs_msg.destination.client_id = GRRIDToFleetspeakID(grr_id)\n fs_msg.destination.service_name = \"system\"\n fs_msg.data.Pack(restart_req)\n\n fleetspeak_connector.CONN.outgoing.InsertMessage(fs_msg)", "title": "" }, { "docid": "b511ba2dbcca4d35ab0eaed8fbffdf4e", "score": "0.55070883", "text": "def RebootBootloader(self, timeout=None, retries=None):\n self._RunDeviceFastbootCommand(['reboot-bootloader'])", "title": "" }, { "docid": "9416a19f9dcce4161337e1b68d65ee42", "score": "0.55031854", "text": "def treade_notification_deamon(sec=0, minutes=0, hours=0):\n while True:\n sleep_time = sec + (minutes * 60) + (hours * 3600)\n time.sleep(sleep_time)\n enable_notifications()\n print \"Refresh enable notification\"", "title": "" }, { "docid": "d2d4068ade2b1b67242f117f75c5c757", "score": "0.5498996", "text": "def restart(self):\n for restart_command in self._HTTPD_RESTART_COMMANDS:\n deployment.log.message(\"\\nTrying to restart httpd with '%s'\" % restart_command)\n p = subprocess.Popen(restart_command, shell=True)\n p.wait()\n if p.returncode == 0:\n deployment.log.message(\"httpd restarted, I think.\")\n return\n else:\n deployment.log.message(\"Command failed with code %d. Trying another way.\" % p.returncode)\n deployment.log.message(\"Giving up trying to restart httpd. You'll have to do it yourself.\")", "title": "" }, { "docid": "75a04f8b9624912a7c63621d3f688c27", "score": "0.5491014", "text": "def _retransmit_timer(p):\n\t\tp._time_up = True", "title": "" }, { "docid": "3fbcabb8db809f4f0ecbf387224f04df", "score": "0.5483154", "text": "def schedule_timer(self, call_key, delay):\n self.decisions.start_timer(timer_id=timer_key(call_key),\n start_to_fire_timeout=str(delay))", "title": "" }, { "docid": "33040a957584315e847a5594dbb5b993", "score": "0.54811025", "text": "def reset_timeout(self):\n return 3000", "title": "" }, { "docid": "ec4a2751f86f4099c2f8050537f7b6b7", "score": "0.547816", "text": "def _increase_delay(self) -> None:\n assert self.current_reconnect_delay is None or self.current_reconnect_delay >= 0\n\n if self.current_reconnect_delay is None:\n self.current_reconnect_delay = self.initial_reconnect_delay # type: ignore\n else:\n self.current_reconnect_delay = min(self.current_reconnect_delay * 2, SIGNING_SERVICE_MAXIMUM_RECONNECT_TIME)", "title": "" }, { "docid": "0eba925721a176677e670edc5401262d", "score": "0.54730046", "text": "def _cmd_reboot(self, block=True, timeout=5.0):\n if self.type() != 'electric':\n return self._capablity_warning('reboot')\n\n cmd = EndEffectorCommand.CMD_REBOOT\n success = self.command(\n cmd,\n block,\n test=lambda: (self._state.enabled == True and\n self._state.ready == True),\n timeout=timeout\n )\n rospy.sleep(0.5) # Allow extra time for reboot to complete\n self.set_parameters(defaults=True)\n return success", "title": "" } ]
977574b9ad89fd6fc241218b438a7c4b
The function creates a segment tree in O(n) time
[ { "docid": "b60935cd3960c70ff30657d3664a4acd", "score": "0.7451623", "text": "def segment_tree_creation(arr):\n n = len(arr)\n\n # for n leaf nodes, there are maximum double of 2**max_height-1 nodes, adding extra 0th elem for ease of calculations\n height = ceil(log2(n))\n m = 2 * (2 ** height)\n tree = [0] * m\n\n # filling the second half of the tree list (leaf nodes) with the array elements\n for i in range(n):\n index = i + int(m / 2)\n tree[index] = arr[i]\n\n # creating the internal nodes, calculating the from leaf to the root\n for i in range(int(m / 2) - 1, 0, -1):\n tree[i] = tree[2 * i] + tree[2 * i + 1]\n\n return tree", "title": "" } ]
[ { "docid": "65e049501dc3abc075cf63ac3bc25d92", "score": "0.6716978", "text": "def __init__(self, nums):\n n = len(nums)\n if n == 0: return\n max_size = 2 * pow(2, int(math.ceil(math.log(n, 2)))) - 1\n self.seg_tree = [0 for i in xrange(max_size)]\n self.nums = nums[:]\n self.build_tree(0, n-1, 0)", "title": "" }, { "docid": "4f9deff340b137fe2e987e6c1d0749a0", "score": "0.59964675", "text": "def __init__(self, data, node_type):\n # The space complexity of a segment tree is O(2^log(n)). Use math.ceil() \n # to round decimal logarithms up to the nearest integer. Since math.ceil()\n # is used, reducing 2^log(n) to n is not mathematically valid. Reducing\n # the math can result in 'index out of range' errors.\n self.tree = [None]*2*int(math.pow(2, math.ceil(math.log(len(data), 2))))\n self.data = data\n self.node_type = node_type\n # The build function takes three parameters: the start and end indices of the \n # interval, and the tree index of the next node to build. Since this is the\n # initialization, we are building the root node which contains the entire\n # data set as its interval, and the root node is always located at position zero.\n self.build(0, len(data)-1, 0)", "title": "" }, { "docid": "09b34e1b4d6f2f5d753fae4e54d1e016", "score": "0.586452", "text": "def get_minimal_tree(array, start, end):\n if start > end:\n return None\n mid = int((end + start) / 2)\n print(mid)\n node = Tree(array[mid])\n node.left = get_minimal_tree(array, start, mid - 1)\n node.right = get_minimal_tree(array, mid + 1, end)\n return node", "title": "" }, { "docid": "dee21a59e5360c7d267555f19270a806", "score": "0.58227617", "text": "def create_state_space_tree(\n sequence: List[Any], current_subsequence: List[Any], index: int\n) -> None:\n\n if index == len(sequence):\n print(current_subsequence)\n return\n\n create_state_space_tree(sequence, current_subsequence, index + 1)\n current_subsequence.append(sequence[index])\n create_state_space_tree(sequence, current_subsequence, index + 1)\n current_subsequence.pop()", "title": "" }, { "docid": "7af122e9f27401b195afe632aed37a51", "score": "0.5800427", "text": "def mktree(array):\n\n if array != []:\n root = TreeNode(array[0])\n \n for n in array[1:]:\n root.insert(n)\n \n return root", "title": "" }, { "docid": "7739c6ef4b47e27ff38f1299bfbb65e3", "score": "0.57798916", "text": "def depthFirstSet(node1,node2,root,segList,distDict):\n to_visit = [] # a list can be used as a stack in Python\n visited=[]\n node1.setWeight(node2.getWeight()+distDict[(node1.getID(),node2.getID())])\n to_visit.append(node1) # Start with root\n while len(to_visit)!= 0:\n v = to_visit.pop()\n #print v\n if v not in visited:\n visited.append(v)\n vNeighbors=[]\n #print segList[v.getID()]\n for seg in segList[v.getID()]:\n firstNode,secondNode=seg.getNodes()\n if firstNode==root or secondNode==root :\n continue\n if firstNode==v and secondNode not in visited:\n vNeighbors.append(secondNode)\n secondNode.setWeight(v.getWeight()+distDict[(v.getID(),secondNode.getID())])\n if secondNode==v and firstNode not in visited:\n vNeighbors.append(firstNode)\n firstNode.setWeight(v.getWeight()+distDict[(v.getID(),firstNode.getID())])\n to_visit.extend(vNeighbors)", "title": "" }, { "docid": "52bcdec7b60cfea2e3d0030c2bed24bf", "score": "0.57758105", "text": "def partition_tree(n, m):\r\n if n == 0:\r\n return tree(True)\r\n elif n < 0 or m == 0:\r\n return tree(False)\r\n else:\r\n left = partition_tree(n-m, m)\r\n right = partition_tree(n, m-1)\r\n return tree(m, [left, right])", "title": "" }, { "docid": "41c3e7985d647ccccbdffdd1bd0e3fa3", "score": "0.5733492", "text": "def create_i(n, t):\n r = Node(n)\n new = Tree(r)\n for tree in t:\n s = tree.root()\n r.add(s)\n\n return new", "title": "" }, { "docid": "ef4ff1f52bd4f0d3894f30bdfc0b3cca", "score": "0.57142776", "text": "def create_segment_object():\n return Segment()", "title": "" }, { "docid": "220b2c07868974d2fb9c06acbe98dcf3", "score": "0.56920505", "text": "def create(self, nodelist):\n if not nodelist:\n sys.stderr.write(\"empty nodelist\\n\")\n exit(0)\n self.root = 0\n self.size = len(nodelist)\n self.nodelist = nodelist\n parent_index = self.root\n s = []\n s.append(parent_index)\n p = 1\n while(len(s) > 0):\n # left children\n while(parent_index != None and \n p < self.size and\n (not self.nodelist[parent_index].isLeaf()) and\n self.nodelist[parent_index].find(self.nodelist[p].content) >= 0):\n # insert\n self.nodelist[parent_index].left = p\n self.nodelist[p].parent = parent_index \n # go to left child\n parent_index = self.nodelist[parent_index].left\n s.append(parent_index)\n p += 1\n # is leaf, so don't go into left child\n if len(self.nodelist[parent_index].content.strip()) <=1:\n self.nodelist[parent_index].leaf = True\n break\n\n # right child\n if(len(s) > 0):\n parent_index = s.pop()\n if (p < self.size and\n (not self.nodelist[parent_index].isLeaf()) and\n self.nodelist[parent_index].find(self.nodelist[p].content) >= 0):\n # insert\n self.nodelist[parent_index].right = p\n self.nodelist[p].parent = parent_index\n # go to right child\n parent_index = self.nodelist[parent_index].right\n s.append(parent_index)\n p += 1\n # is leaf\n if len(self.nodelist[parent_index].content.strip()) <=1:\n self.nodelist[parent_index].leaf = True\n else:\n # go to right child\n parent_index = self.nodelist[parent_index].right", "title": "" }, { "docid": "ba128ef3709e4307924ed474dc0f8789", "score": "0.56796217", "text": "def _create_btree(root, i, n, nums: List[int]) -> BinaryTree:\n if i < n:\n root = BinaryTree(nums[i]) if nums[i] is not None else None\n if root is not None:\n root.left = _create_btree(root.left, 2 * i + 1, n, nums)\n root.right = _create_btree(root.right, 2 * i + 2, n, nums)\n return root", "title": "" }, { "docid": "00986b362a67f90382c9f6312c132235", "score": "0.5669074", "text": "def create_large_tree():\n value_of_nodes = [1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 'a', 'b', 'c', 'd', 'e']\n tree = ''\n depth = 0\n count = 0\n\n while depth < 4:\n if depth == 0:\n tree = [value_of_nodes[0], [], []]\n depth += 1\n count += 1\n elif depth == 1:\n for i in [1,2]:\n tree[i] = [value_of_nodes[count], [], []]\n count += 1\n depth += 1\n elif depth == 2:\n for i,j in itertools.product([1,2], repeat=depth):\n tree[i][j] = [value_of_nodes[count], [], []]\n count += 1\n depth += 1\n elif depth == 3:\n for i, j, k in itertools.product([1,2], repeat=depth):\n tree[i][j][k] = [value_of_nodes[count], [], []]\n count += 1\n depth += 1\n return tree", "title": "" }, { "docid": "fe86edc273fea909368d6aaa2b38b778", "score": "0.56437284", "text": "def BSTMakeFromSortedArray(tab):\n if len(tab)<=0:\n return None\n mid = len(tab)//2\n root=Node(tab[mid])\n root.left=BSTMakeFromSortedArray(tab[:mid])\n root.right=BSTMakeFromSortedArray(tab[mid+1:])\n return root", "title": "" }, { "docid": "16c5a55259087d0011f955891579b8fb", "score": "0.5641156", "text": "def generatetree(pred):\n\n # instantiate tree lookup\n tree = {}\n\n # iterate over the list of predecessor vertices\n for i, p in enumerate(pred):\n\n # if the route begins/ends with itself set the\n # root vertex and continue to next iteration\n if p == -1:\n\n # tree keyed by root vertex with root vertex as path\n tree[i] = [i]\n continue\n\n # set the initial vertex `p` as `idx`\n idx = p\n # and add it as the first vertex in the path\n path = [idx]\n\n # iterate through the path until back to home vertex\n while idx >= 0:\n # set the next vertex on the path\n next_vertex = pred[idx]\n # and redeclare the current `idx`\n idx = next_vertex\n\n # add the vertex to path while not at home vertex\n if idx >= 0:\n path.append(next_vertex)\n\n # tree keyed by root vertex with network vertices as path\n tree[i] = path\n\n return tree", "title": "" }, { "docid": "70b4709bdea62b5d18a6aff8613a888d", "score": "0.56278557", "text": "def tree1():\n ivs = [make_iv(*iv, label=True) for iv in [\n [1, 2],\n [4, 7],\n [5, 9],\n [6, 10],\n [8, 10],\n [8, 15],\n [10, 12],\n [12, 14],\n [14, 15],\n ]]\n t = IntervalTree(ivs)\n return t", "title": "" }, { "docid": "4efdad46aa0f860d9c720fea3962d101", "score": "0.5593676", "text": "def construct_tree(n, tree, rule):\n node = Node(rule=rule)\n successors = []\n if n == 1: # leaf\n for t in tree:\n successors.append(Node(rule=1-rule, is_leaf=True, value=t))\n else: # sub-tree\n for t in tree:\n successors.append(construct_tree(n-1, t, 1-rule))\n node.successor = successors\n return node", "title": "" }, { "docid": "324951999846a3fb279d5ecb50be07c8", "score": "0.55640537", "text": "def ConstructTree(self):\n step = 0\n totalNodes = 0\n while step <= self.__steps:\n self.__nodes[step] = {}\n nUps = 0\n while nUps <= totalNodes:\n combins = BinomialOptionModel.__nCr(totalNodes, nUps)\n self.__nodes[step][nUps] = BinomNode(self.__underlyingStart, nUps, totalNodes - nUps, step, combins)\n nUps += 1\n totalNodes += 1\n step += 1\n # Price the option at each node:\n self.__CalcOptionPrices()\n # Determine asset prices at each node:\n self.__CalcAssetPrices()\n # Compute all the hedge ratios at each node:\n self.__ComputeSCHRs()\n # Compute all stock + bond replicating portfolio hedge ratios at each node:\n self.__ComputeSBHRs()", "title": "" }, { "docid": "c6587b09854bddf7e2fe9b4ec5db5aec", "score": "0.5560939", "text": "def make_tree(list, category):\n\t#takes in a list separated by + and - and puts it into a very left-heavy tree\n\ttree = list[0]\n\tlist.pop(0)\n\tfor i in range(0, len(list)):\n\t\tif list[i] in category:\n\t\t\tsubtree = list[i+1]\n\t\t\tif subtree.count('*') or subtree.count('/'):\n\t\t\t\tsubtree = split_ops(subtree, ['*', '/'])\n\t\t\t\tsubtree = make_tree(subtree, ['*', '/'])\n\t\t\ttree = [list[i], tree, subtree] \n\treturn tree", "title": "" }, { "docid": "a2cb763c374dd1e8f68230c54f3a79ed", "score": "0.5530317", "text": "def generate_perfect_balanced_tree(t: int, h: int):\n return nx.to_numpy_matrix(nx.balanced_tree(t, h), dtype=np.int64)", "title": "" }, { "docid": "b4fdacee20f8c76af3b791739fa21ac9", "score": "0.5528751", "text": "def make_tree(q, data, depthlimit, num_per, weak_learner):\n for _ in range(num_per):\n q.put(Tree(data, depthlimit, weak_learner))", "title": "" }, { "docid": "9e84c25909d2823358ad48263b7f5761", "score": "0.55186176", "text": "def build():\n _1= TreeNode(1)\n _2= TreeNode(2)\n _3= TreeNode(3)\n _1.left = _2\n _2.left= _3\n return _1\n\n\n root = TreeNode(1)\n root.left = TreeNode(2)\n root.left.left = TreeNode(3)\n root.left.right = TreeNode(4)\n root.right = TreeNode(5)\n root.right.right = TreeNode(6)\n return root", "title": "" }, { "docid": "72fea86a19369c122230ad94ddd8608b", "score": "0.55108917", "text": "def generate_tree(dim=2, depth=6, gamma=0.8, p=0.5):\n leaves = list()\n k_leaf = [0]\n def attach_random_nodes(d, lims):\n tree = BinaryTree()\n if d == 0:\n tree.lims = lims\n leaves.append(tree)\n tree.k_leaf = k_leaf[0]\n k_leaf[0] = k_leaf[0] + 1\n # print(\"limits\",\n # [[\"{:.2f}\".format(x), \"{:.2f}\".format(y)]\n # for x, y in tree.lims])\n return tree\n tree.split_dim = np.random.randint(dim)\n # Possible to split while favoring the first dimensions\n tree.split_val = np.random.uniform(*lims[tree.split_dim])\n # print(tree.split_dim, tree.split_val)\n tree.split_orient = np.random.binomial(1, 0.5)\n\n upper_corner = [lims[i] if i != tree.split_dim\n else [tree.split_val, lims[i][1]]\n for i in range(dim)]\n lower_corner = [lims[i] if i != tree.split_dim\n else [lims[i][0], tree.split_val]\n for i in range(dim)]\n\n if tree.split_orient:\n left_lims = upper_corner\n right_lims = lower_corner\n else:\n left_lims = lower_corner\n right_lims = upper_corner\n\n tree.left = attach_random_nodes(d-1, left_lims)\n tree.right = attach_random_nodes(d-1, right_lims)\n return tree\n\n lims = [[0, 1]]*(dim//2) + [[-1, 1]]*(dim//2)\n tree = attach_random_nodes(depth, lims)\n\n tree.gamma = gamma\n tree.leaves = leaves\n tree.n_leaves = len(leaves) # L\n l_tot = tree.n_leaves\n\n tree.delta_l_pos = np.array([gamma**(-l/l_tot)\n for l in range(0, tree.n_leaves)])\n tree.delta_l_pos = tree.delta_l_pos/np.sum(tree.delta_l_pos)\n tree.delta_l_neg = np.array([gamma**(l/l_tot)\n for l in range(0, tree.n_leaves)])\n tree.delta_l_neg = tree.delta_l_neg/np.sum(tree.delta_l_neg)\n\n arg_sort = np.argsort(tree.delta_l_pos/tree.delta_l_neg)[::-1]\n sorted_pos = tree.delta_l_pos[arg_sort]\n sorted_neg = tree.delta_l_neg[arg_sort]\n\n tree.auc = (np.sum(sorted_pos*sorted_neg)/2\n + np.triu(sorted_pos.reshape((-1, 1)).dot(\n sorted_neg.reshape((1, -1))), k=1).sum())\n\n resort = np.array(range(tree.n_leaves))\n np.random.shuffle(resort)\n tree.delta_l_pos = tree.delta_l_pos[resort]\n tree.delta_l_neg = tree.delta_l_neg[resort]\n\n # print(\"AUC = {:.2f}\".format(tree.auc))\n\n tree.p = p\n tree.dim = dim\n tree.depth = dim\n return tree", "title": "" }, { "docid": "7fd702562e5293a46a758a00be995ff9", "score": "0.5509312", "text": "def createTreeTable(self): \n\n self.coutnProb()\n\n for element in self.slownik: #wstawianie drzew do tablicy\n self.treeTable.append(Tree(int(element[1]), str(element[0])))", "title": "" }, { "docid": "5ef2689375e26828c991eb437a4a15ae", "score": "0.5489918", "text": "def make_segments(array, num_segments):\r\n \r\n #Stores the array length (for calculations)\r\n array_length = len(array)\r\n #Stores the length of the smaller segments and the larger segments\r\n small_segment_size = array_length // num_segments\r\n large_segment_size = small_segment_size + 1\r\n #Stores the number of segments that will be \"larger\" (have one more term)\r\n num_larger_segments = array_length % num_segments\r\n #Stores the segments (output)\r\n segments = [None] * num_segments\r\n #Shuffles the array and stores this in a new array\r\n shuffled_array = array.copy()\r\n shuffle(shuffled_array)\r\n \r\n #To be used in the loop\r\n num_elements_used = 0\r\n #Generates the larger segments\r\n for i in range(num_larger_segments):\r\n #Stores the end index for the elements that will go into the segment.\r\n #We do not need a variable for the start element index because this is\r\n #equivalent to num_elements_used\r\n end_element_index = num_elements_used + large_segment_size\r\n #Adds the segment to the segments array\r\n segments[i] = shuffled_array[num_elements_used:end_element_index]\r\n #Updates the number of elements used\r\n num_elements_used = end_element_index\r\n \r\n #Generates the smaller segments\r\n for i in range(num_larger_segments, num_segments):\r\n #Stores the end index for the elements that will go into the segment.\r\n #We do not need a variable for the start element index because this is\r\n #equivalent to num_elements_used\r\n end_element_index = num_elements_used + small_segment_size\r\n #Adds the segment to the segments array\r\n segments[i] = shuffled_array[num_elements_used:end_element_index]\r\n #Updates the number of elements used\r\n num_elements_used = end_element_index\r\n \r\n #Returns the segments array\r\n return segments", "title": "" }, { "docid": "a82f3af61cc939de75a4ede1eee76a4e", "score": "0.5475522", "text": "def createTree(self, partitioner, maxRange):\n\t\tstart = 0\n\t\tend = maxRange\n\t\ttreeRoot = MerkleTreeNode(start, end)\n\t\ttreeRoot.setLeftChild(MerkleTreeNode(start, treeRoot.getNodeRoot()))\n\t\ttreeRoot.setRightChild(MerkleTreeNode(treeRoot.getNodeRoot()+1, end))\n\t\tlchild = treeRoot.getLeftChild()\n\t\trchild = treeRoot.getRightChild()\n\t\tprint \"l: \" + str(lchild)\n\n\n\t\t# for node in self.partitionList:\n\t\t# \tself.hashList[node].append('')\n\t\t# \tself.hashList[node].append('')", "title": "" }, { "docid": "ac324d3322dac6acaa06b4cc82a3c72e", "score": "0.546282", "text": "def build_simple_tree():\n node = TreeNode(1)\n node.left = TreeNode(2)\n node.right = TreeNode(3)\n node.right.left = TreeNode(4)\n node.right.right = TreeNode(5)\n return node", "title": "" }, { "docid": "4ac38786df74ce298212a88a257da3f3", "score": "0.539437", "text": "def __init__(self):\n self.root = TreeNode()\n self.size = 0", "title": "" }, { "docid": "0d83c48caa3df402ed4c988d528bfc18", "score": "0.53929365", "text": "def usr_create_tree_node(self, p_cont):\n _node = TreeNode(p_cont)\n\n for i in self.__state_list:\n\n if p_cont['id'] == i['pid']:\n\n t_node = self.usr_create_tree_node(i)\n _node.append_node(t_node)\n\n return _node", "title": "" }, { "docid": "f1c55d6e7220538793eeab970369e3b5", "score": "0.5379197", "text": "def build_tree(self):\n raise NotImplementedError", "title": "" }, { "docid": "18132e1df19134591d1ecf9e65703546", "score": "0.53785294", "text": "def partition(vec, eqPred):\n size = len(vec)\n nodes = [None] * size\n # The first O(N) pass: create N single-vertex trees\n for i in range(size):\n nodes[i] = dict()\n nodes[i]['parent'] = -1\n nodes[i]['rank'] = 0\n # The main O(N^2) pass: merge connected components\n for i in range(size):\n root1 = i\n # find root of i's tree\n while nodes[root1]['parent'] >= 0:\n root1 = nodes[root1]['parent']\n\n for j in range(size):\n if i != j and eqPred(vec[i], vec[j]):\n root2 = j\n # find root of j's tree\n while nodes[root2]['parent'] >= 0:\n root2 = nodes[root2]['parent']\n \n if root2 != root1:\n # unite both trees\n rank1 = nodes[root1]['rank']\n rank2 = nodes[root2]['rank']\n if rank1 > rank2:\n nodes[root2]['parent'] = root1\n else:\n nodes[root1]['parent'] = root2\n if rank1 == rank2:\n nodes[root2]['rank'] += 1\n root1 = root2;\n if nodes[root1]['parent'] >= 0:\n print \"Assertion failed: nodes[root1][PARENT] < 0\"\n \n # compress the path from node j to root1\n k = j\n parent = nodes[k]['parent']\n while parent >= 0:\n nodes[k]['parent'] = root1\n k = parent\n parent = nodes[k]['parent']\n\n # compress the path from node i to root\n k = i\n parent = nodes[k]['parent']\n while parent >= 0:\n nodes[k]['parent'] = root1\n k = parent\n parent = nodes[k]['parent']\n\n\n # Final O(N) pass: enumerate classes\n labels = np.zeros( (size, 1) ).astype(np.int32) * -1\n nclasses = 1\n \n for i in range(size):\n root = i\n while nodes[root]['parent'] >= 0:\n root = nodes[root]['parent']\n # re-use the rank as the class label\n if nodes[root]['rank'] >= 0:\n nodes[root]['rank'] = -nclasses\n nclasses += 1\n labels[i, 0] = -nodes[root]['rank']\n return labels", "title": "" }, { "docid": "678696d07e49f4953298eb1fd26c4c8f", "score": "0.5374816", "text": "def small_parsimony(t):\n visited = []\n s = {}\n for v in t.nodes:\n if len(t.edges[v]) == 1:\n visited.append(v)\n s[v] = {}\n for k in alphabet:\n if v.label == k:\n s[v][k] = 0\n else:\n s[v][k] = float(\"inf\")\n v.s = s[v]\n v.b = []\n while len(visited) < len(t.nodes):\n for v in [x for x in t.nodes if x not in visited][:]:\n children = []\n for edge in t.edges[v]:\n node = edge[0]\n if node in visited:\n children.append(node)\n if len(children) < 2:\n continue\n visited.append(v)\n s[v] = {}\n mins = []\n min_score = float(\"inf\")\n for k in alphabet:\n score = 0\n for node in children:\n minimum = float(\"inf\")\n for i in alphabet:\n if s[node][i] + int(i != k) < minimum:\n minimum = s[node][i] + int(i != k)\n score += minimum\n if score < min_score:\n min_score = score\n mins = [k]\n elif score == min_score:\n mins.append(k)\n s[v][k] = score\n v.s = s[v]\n v.b = mins\n # print(v.b)\n if len(v.b) == 1:\n v.label = v.b[0]\n if len(t.edges[t.root]) != 2 and len(t.edges[t.root]) < len(t.edges[v]):\n t.root = v\n\n t.root.label = min(t.root.s, key=t.root.s.get)\n for child in t.edges[t.root]:\n backtrack(t, [t.root], t.root, child[0])\n\n return t", "title": "" }, { "docid": "b3066d5fcbc69a227535411717562787", "score": "0.536612", "text": "def make_tree(self):\n self.tree.sort() # we order the nodes\n # careful! some ids may not exist!\n ids, feats, cuts, lefts, rights, covers = \\\n [np.array(x) for x in zip(*self.tree)]\n max_id = np.max(ids)\n self.feats = - np.ones(max_id + 1, dtype=np.int32)\n self.cuts = - np.ones(max_id + 1, dtype=np.float64)\n self.lefts = - np.ones(max_id + 1, dtype=np.int32)\n self.rights = - np.ones(max_id + 1, dtype=np.int32)\n self.covers = - np.ones(max_id + 1, dtype=np.int64)\n for a, b in [(self.feats, feats),\n (self.cuts, cuts),\n (self.lefts, lefts),\n (self.rights, rights),\n (self.covers, covers)]:\n a[ids] = b\n self.means = np.zeros_like(self.cuts)\n self._loop_means(0)", "title": "" }, { "docid": "08d08e494b80fd96793cc7367d4d2cfe", "score": "0.5362628", "text": "def build():\n root = TreeNode(1)\n root.left = TreeNode(2)\n root.right = TreeNode(3)\n root.right.right = TreeNode(7)\n root.left.left = TreeNode(4)\n root.left.right = TreeNode(5)\n return root", "title": "" }, { "docid": "1b548cc063fcd0b7d05d3a0a8428fd4d", "score": "0.53575826", "text": "def EnumerateAllBinaryTrees(n):\n if n == 0:\n yield []\n for i in xrange(n):\n for left_child in EnumerateAllBinaryTrees(i):\n for right_child in EnumerateAllBinaryTrees(n - i - 1):\n yield [left_child, right_child]", "title": "" }, { "docid": "6e3e14b69b6da9d3d609f680744266d1", "score": "0.53409064", "text": "def _create_tree_topology(self, pattern: Pattern, statistics: Dict, leaves: List[TreePlanNode]):\n raise NotImplementedError()", "title": "" }, { "docid": "6c392e65ae6004a0a6a7bdc76850abb3", "score": "0.53219587", "text": "def build_tree() -> Tuple[List[int], int]:\n tree = [-1]*101\n cat_node = int(input())\n line = input()\n while line != \"-1\":\n parent, *children = line.split()\n parent = int(parent)\n for child in children:\n tree[int(child)] = parent\n line = input()\n\n return tree, cat_node", "title": "" }, { "docid": "ac3bb4b247e049f043e6b6bfc7c146e6", "score": "0.5315277", "text": "def indexed_tree_list(to):\n return [[]] + [enumerate_rooted_trees(i) for i in range(1, to)]", "title": "" }, { "docid": "371b471d26f6ea8149d728a341bbbc47", "score": "0.5313309", "text": "def _build_tree(nodes):\n\n max_generational_depth = max([n.generation for n in nodes])\n\n # put the start vertex into the queue, and the marked list\n\n root = nodes[0]\n\n queue = [root]\n marked = [root]\n y_pos = [0]\n\n # store the line coordinates that need to be plotted\n edges = []\n annotations = []\n\n # now step through\n while queue:\n\n # pop the root from the tree\n node = queue.pop(0)\n y = y_pos.pop(0)\n\n # TODO(arl): sync this with layer coloring\n depth = float(node.generation) / max_generational_depth\n edge_color = turbo.map(depth)[0] * 255\n\n # draw the root of the tree\n edges.append(([y, y], [node.t[0], node.t[-1]], edge_color))\n\n # mark if this is an apoptotic tree\n if node.is_leaf:\n annotations.append((y, node.t[-1], str(node.ID), WHITE))\n\n if node.is_root:\n annotations.append((y, node.t[0], str(node.ID), WHITE))\n\n children = [t for t in nodes if t.ID in node.children]\n\n for child in children:\n if child not in marked:\n\n # mark the children\n marked.append(child)\n queue.append(child)\n\n # calculate the depth modifier\n depth_mod = 2.0 / (2.0 ** (node.generation))\n\n if child == children[0]:\n y_pos.append(y + depth_mod)\n else:\n y_pos.append(y - depth_mod)\n\n # plot a linking line to the children\n edges.append(([y, y_pos[-1]], [node.t[-1], child.t[0]], \"w\"))\n annotations.append(\n (\n y_pos[-1],\n child.t[-1] - (child.t[-1] - child.t[0]) / 2.0,\n str(child.ID),\n WHITE,\n )\n )\n\n # now that we have traversed the tree, calculate the span\n tree_span = []\n for edge in edges:\n tree_span.append(edge[0][0])\n tree_span.append(edge[0][1])\n\n # # work out the span of the tree, we can modify positioning here\n # min_x = min(tree_span)\n # max_x = max(tree_span)\n\n return edges, annotations", "title": "" }, { "docid": "9f54b88090296c0d64d16a729febfa12", "score": "0.52925956", "text": "def build():\n root = TreeNode(4)\n root.left = TreeNode(2)\n root.left.left = TreeNode(1)\n root.left.right = TreeNode(3)\n root.right = TreeNode(6)\n root.right.right = TreeNode(7)\n root.right.left = TreeNode(5)\n return root, 2", "title": "" }, { "docid": "6588c65dcb838f897aeebb66deace6c5", "score": "0.52903324", "text": "def split(self):\n child_bounds = split_bounds(self.bounds)\n for bounds in child_bounds:\n self.children.append(\n TreeNode(bounds, self.depth+1, self.qvalue, self.n_visits)\n )", "title": "" }, { "docid": "f5f7334e7298b1ffbfd1a355f5d281fa", "score": "0.5280533", "text": "def make_table_of_segments(self):\n if self.vrb & DEBUG: print('In %s' % (sys._getframe().f_code.co_name))\n\n npoints = self.arr.shape[0]\n nsegs = npoints//4\n self.arr_segs = np.empty(shape=(nsegs, 4, 5), dtype=np.int64)\n\n npoints = nsegs*4\n for i in range(npoints):\n nseg = i//4 # [0, npoints/4]\n npoi = i%4 # [0,3]\n #print 'XXX nseg: %d npoi: %d' % (nseg, npoi)\n self.arr_segs[nseg, npoi,:] = self.arr[i,:]\n\n if self.vrb & DEBUG: print(self.arr_segs)", "title": "" }, { "docid": "b4154c9048e2914094430a624b447300", "score": "0.52746373", "text": "def genDistinct(n):\n\n leafnode = '(.)'\n dp = []\n newset = set()\n newset.add(leafnode)\n dp.append(newset)\n\n for i in range(1, n):\n newset = set()\n for j in range(i):\n for leftchild in dp[j]:\n for rightchild in dp[i - j - 1]:\n newset.add('(' + '.' + leftchild + rightchild + ')')\n dp.append(newset)\n\n return dp[-1]", "title": "" }, { "docid": "98c6a9a94b7f386e00cfc5e1a4b3f707", "score": "0.5269709", "text": "def new_tree_graph(h, m, predicates):\n def rec(D, h, m, t, predicates):\n if h > 1:\n s = str(t)\n for j in range(1,m+1):\n for p in predicates:\n o = t+'.'+str(j)\n D.addNode(s,p,o)\n rec(D, h-1, m, t+'.'+str(j), predicates)\n\n graph = DataGraph()\n rec(graph, h, m, 'root', predicates)\n return graph", "title": "" }, { "docid": "7e6058d1d677e07c1e630a127d0bd8f1", "score": "0.52681804", "text": "def create_binary_tree(words):\n tree = LinkedBST()\n for word in words:\n tree.add(word)\n return tree", "title": "" }, { "docid": "9c4cefbff91630fea94c80b50cf9723c", "score": "0.5265912", "text": "def hsdag(self):\n\n tree = self.tree\n max_time = self.max_time\n self.start_time = time.time()\n\n if tree is None:\n self.tree = tree = HsGraph()\n root = Node(0, None, 0, 0)\n root.tree = tree\n root.h = frozenset()\n tree.add_node(root)\n\n if tree.worklist is None:\n tree.worklist = [root]\n\n worklist = tree.worklist\n if max_time:\n end_time = time.time() + max_time\n else:\n end_time = None\n\n while 1:\n node = worklist[0]\n del worklist[0]\n if end_time and time.time() > end_time:\n self.timeout = True\n break\n if node.tree and node.state == Node.State.OPEN:\n worklist += self.process_node(node)\n if len(worklist) == 0:\n break\n if self.max_num_solutions is not None and len(self.time_map) >= self.max_num_solutions:\n break\n\n return tree", "title": "" }, { "docid": "d039955989178bdf5b361a346f1675c2", "score": "0.5260701", "text": "def build_tree(self):\n stack = []\n self._handle_solo_node_case()\n while self.root_hash == None:\n if len(stack) >= 2 and stack[-1].height == stack[-2].height:\n left = stack.pop()\n right = stack.pop()\n parent_hash = self._md5sum(left.hash + right.hash)\n parent = self.Node(left, right, parent_hash)\n self.node_table[parent_hash] = parent\n left.parent = parent\n right.parent = parent\n\n if parent.height == self.max_height:\n self.root_hash = parent.hash\n\n stack.append(parent)\n elif len(self.leaves) > 0:\n leaf = self.leaves.pop()\n self.node_table[leaf.hash] = leaf\n stack.append(leaf)\n # Handle case where last 2 nodes do not match in height by increasing height of last node.\n\n else:\n stack[-1].height += 1\n self.is_built = True", "title": "" }, { "docid": "2fd99fef53e2b3d7e62976abe64d339c", "score": "0.5254922", "text": "def __init__(self, n):\n if isinstance(n, list):\n self.tree = [0]*(len(n)+1)\n for idx, data in enumerate(n):\n self.incr(idx, data)\n else:\n self.tree = [0]*(n+1)", "title": "" }, { "docid": "f91c34e53028d20f0d0604ece7ab527a", "score": "0.52500314", "text": "def draw_random_bst(n, balanced=False):\n from random import randint\n nums = set()\n max_num = 10 * n\n if 0 < n < MAX_HEIGHT:\n while len(nums) != n:\n nums.add(randint(1, max_num))\n\n draw_bst(list(nums), balanced=balanced)", "title": "" }, { "docid": "fb7ca8ade0be5a4d9f9c448d608f41da", "score": "0.52449954", "text": "def build():\n root = TreeNode(3)\n root.left = TreeNode(9)\n root.right = TreeNode(20)\n root.right.right = TreeNode(7)\n root.right.left = TreeNode(15)\n return root", "title": "" }, { "docid": "88f096c6d9bc50c677fa9ff68d2818de", "score": "0.52428603", "text": "def make_tree(file_content):\n file_tokens = make_file_tokens(file_content)\n root = Node(0)\n tip = root\n branch_point_stack = []\n move_number = 1\n for token in file_tokens:\n if token == '(':\n branch_point_stack.append(tip)\n branch_point_stack.append(move_number)\n elif token == ')':\n move_number = branch_point_stack.pop()\n tip = branch_point_stack.pop()\n else:\n new_move = create_move(token, tip, move_number)\n if new_move.color != 'E':\n move_number += 1\n tip.add_child(new_move)\n tip = new_move\n root = root.get_child(0)\n root.parent = 0\n return root", "title": "" }, { "docid": "7637af850e2c95a17bec441dfef5e67c", "score": "0.52326053", "text": "def create_naive_graph(segments):\r\n g = nx.MultiDiGraph()\r\n for segment in segments:\r\n for i in range(len(segment) - 1):\r\n g.add_edge(segment[i], segment[i + 1])\r\n if len(segment) == 1:\r\n g.add_node(segment[0])\r\n return g", "title": "" }, { "docid": "0217409a7bd56da64c55e4710d7b3364", "score": "0.5229722", "text": "def _create_new_nodes(self, level, n):\n if (level + 1) == len(self._node_list):\n self._node_list.append([])\n\n split_val = self._node_list[level][n].get_split()\n idx = self._node_list[level][n].get_col()\n\n # Split data\n lower_x_data, lower_y_data, upper_x_data, upper_y_data = self._split_data(level, n, idx, split_val)\n\n # Now check if all the same in lower/upper\n # Do not change y_data to average over all values\n if (lower_x_data.shape[0] > 1) and ((lower_x_data - lower_x_data[0, :]) == 0).all():\n lower_x_data = lower_x_data[[0], :]\n if (upper_x_data.shape[0] > 1) and ((upper_x_data - upper_x_data[0, :]) == 0).all():\n upper_x_data = upper_x_data[[0], :]\n # Make lower node if one can\n if lower_x_data.shape[0] > 0:\n lower_curr_index = len(self._node_list[level + 1])\n self._node_list[level + 1].append(self._create_node(lower_x_data, lower_y_data))\n self._node_list[level][n].set_lower_split_index(lower_curr_index)\n else:\n lower_curr_index = None\n # Make upper node\n if upper_x_data.shape[0] > 0:\n upper_curr_index = len(self._node_list[level + 1])\n self._node_list[level + 1].append(self._create_node(upper_x_data, upper_y_data))\n self._node_list[level][n].set_upper_split_index(upper_curr_index)\n else:\n upper_curr_index = None\n\n return [level + 1, lower_curr_index], [level + 1, upper_curr_index]", "title": "" }, { "docid": "712c4c4f5ae77eca82a8fd12450612f9", "score": "0.5223884", "text": "def depthFirstSearch(problem):\n \"*** YOUR CODE HERE ***\"\n frontera, nodosvisitados = inicializar(\"dfs\", False, problem)\n\n #return recursivo(problem, frontera, nodosvisitados) #Descomentar esta linea y comentar la siguiente\n return iterativo(problem, frontera, nodosvisitados)\n #return ['West', 'West', 'West', 'West', 'West', 'West', 'West', 'West', 'West', 'West', 'West', 'West', 'West', 'West', 'West', 'West', 'West', 'West', 'West', 'West', 'West', 'West', 'West', 'West', 'West', 'West', 'West', 'West', 'West', 'West', 'West', 'West', 'West', 'South', 'South', 'South', 'South', 'South', 'South', 'South', 'South', 'South', 'East', 'East', 'East', 'North', 'North', 'North', 'North', 'North', 'North', 'North', 'East', 'East', 'South', 'South', 'South', 'South', 'South', 'South', 'East', 'East', 'North', 'North', 'North', 'North', 'North', 'North', 'East', 'East', 'South', 'South', 'South', 'South', 'East', 'East', 'North', 'North', 'East', 'East', 'East', 'East', 'East', 'East', 'East', 'East', 'South', 'South', 'South', 'East', 'East', 'East', 'East', 'East', 'East', 'East', 'South', 'South', 'South', 'South', 'South', 'South', 'South', 'West', 'West', 'West', 'West', 'West', 'West', 'West', 'West', 'West', 'West', 'West', 'West', 'West', 'West', 'West', 'West', 'West', 'South', 'West', 'West', 'West', 'West', 'West', 'West', 'West', 'West', 'West']", "title": "" }, { "docid": "9cbc01e1b9b5c9f3c5d33b0a5cda348c", "score": "0.5215099", "text": "def tree(node):\n return treant.tree(node, node_constructor=treant.n)", "title": "" }, { "docid": "7863c7e5c451749251c11bf993816101", "score": "0.51980656", "text": "def heuristic_tree(points, iterations=1000, alpha=0.01):\n size = points.shape[0]\n adjacentcy_list = kruskal(points)\n points, adjacentcy_list = add_steiner_points(points, adjacentcy_list)\n points, adjacentcy_list = move_steiner_points(points, adjacentcy_list, size, iterations=iterations, alpha=alpha)\n return points, adjacentcy_list", "title": "" }, { "docid": "d1103144a32aa6521179e7e658e11958", "score": "0.5197796", "text": "def _create_tree(self, input_array = None, hash_function = None):\n if input_array == None or hash_function == None:\n raise ValueError(\"input_array or hash_funciton cannot be None.\")\n \n number_of_points = input_array.shape[0]\n binary_hashes = []\n for i in range(number_of_points):\n binary_hashes.append(self._hash(input_array[i], hash_function))\n \n binary_hashes = np.array(binary_hashes)\n o_i = np.argsort(binary_hashes)\n return o_i, np.sort(binary_hashes)", "title": "" }, { "docid": "91678995591f372149f82837e20f6790", "score": "0.5192925", "text": "def make_binary_tree(L, randoms = Random()):\n if len(L) == 0:\n return Tree(None)\n else:\n left = randoms.randint(0, len(L)-1)\n return Tree(L[left], make_binary_tree(L[0:left], randoms),\n make_binary_tree(L[left+1:], randoms))", "title": "" }, { "docid": "6197cef41b2c2dea73a47ea6343fa3c5", "score": "0.51922995", "text": "def create_pos_graph(segments):\r\n for i in range(len(segments)):\r\n # transform words to their associated parts of speech\r\n segments[i] = nltk.pos_tag(segments[i])\r\n for j in range(len(segments[i])):\r\n segments[i][j] = segments[i][j][1]\r\n g = create_naive_graph(segments)\r\n return g", "title": "" }, { "docid": "d094640df57c4f8be79e5610280c8027", "score": "0.5190494", "text": "def build_tree(data, init_pop, this_node):\n \n # Check if this node ends here or has ramifications\n end_branch = check_end_branch(data, init_pop)\n this_node.death_proba = get_proba(data, \"status\", 0)\n this_node.ques, this_node.answ = 0,0\n if end_branch == False :\n best_ques, best_answ = get_best_split(data)\n #print best_ques, best_answ\n if best_ques == 0:\n end_branch = True\n\n if end_branch == False: # If it does not end here, then split data\n true_data, false_data = split_data(data, best_ques, best_answ) \n this_node.ques = best_ques\n this_node.answ = best_answ \n this_node.true_child = Node()\n this_node.false_child = Node()\n build_tree(true_data, init_pop, this_node.true_child)\n build_tree(false_data, init_pop, this_node.false_child)", "title": "" }, { "docid": "2678b07f0e784ad6cdafdd7eb5d01b7b", "score": "0.51724285", "text": "def build_tree(self, depth=2):\n total_nodes = np.sum([2 ** x for x in range(depth)])\n nodes = list(range(total_nodes))\n nodes_per_level = np.cumsum([2 ** x for x in range(depth - 1)])\n nodes_level = [x.tolist() for x in np.array_split(nodes, nodes_per_level)]\n\n adj_list = dict((idx, {}) for idx in nodes)\n for fr in nodes_level[:-1]:\n for i in fr:\n i_list = adj_list.get(i, {})\n # the connected nodes always follows this pattern\n i_list[\"left\"] = i * 2 + 1\n i_list[\"right\"] = i * 2 + 2\n adj_list[i] = i_list.copy()\n return adj_list", "title": "" }, { "docid": "2678b07f0e784ad6cdafdd7eb5d01b7b", "score": "0.51724285", "text": "def build_tree(self, depth=2):\n total_nodes = np.sum([2 ** x for x in range(depth)])\n nodes = list(range(total_nodes))\n nodes_per_level = np.cumsum([2 ** x for x in range(depth - 1)])\n nodes_level = [x.tolist() for x in np.array_split(nodes, nodes_per_level)]\n\n adj_list = dict((idx, {}) for idx in nodes)\n for fr in nodes_level[:-1]:\n for i in fr:\n i_list = adj_list.get(i, {})\n # the connected nodes always follows this pattern\n i_list[\"left\"] = i * 2 + 1\n i_list[\"right\"] = i * 2 + 2\n adj_list[i] = i_list.copy()\n return adj_list", "title": "" }, { "docid": "7a316777996af1c4ee3e2b1dcd341a0c", "score": "0.5172313", "text": "def _make_tree(centroids) -> KDTree:\n val = np.sum(np.isnan(centroids), 1) < 1\n return KDTree(centroids[val])", "title": "" }, { "docid": "5a7da53c39e3b7747aa1da90078476c5", "score": "0.5170584", "text": "def dfs(tree):\n new_tree={}\n for key in tree:\n subtree=tree[key]\n key='/'+key\n new_tree[key]=subtree\n stack = [new_tree]\n train_queue_with_num = []\n while len(stack) > 0:\n subtree = stack.pop(0)\n queue = []\n for key in subtree:\n if key == 'num':\n continue\n else:\n type_path=key.split('/')\n if len(type_path)==1:\n key='/'+key\n new_tree = {}\n for sub_key in subtree[key]:\n if sub_key == 'num':\n continue\n else:\n new_tree[key + '/' + sub_key] = subtree[key][sub_key]\n stack.append(new_tree)\n queue.append((key, subtree[key]['num']))\n if len(queue) > 0:\n queue = sorted(queue, key=operator.itemgetter(1))\n train_queue_with_num.append(queue)\n return train_queue_with_num", "title": "" }, { "docid": "0396d09974242d4a83fb6ba776c9b2ca", "score": "0.51704305", "text": "def encode_splits(tree, create_dict=True, delete_outdegree_one=True):\n taxon_set = tree.taxon_set\n if taxon_set is None:\n taxon_set = tree.infer_taxa()\n if create_dict:\n tree.split_edges = {}\n split_map = tree.split_edges\n # if tree.is_rooted:\n # tree.split_edges = {}\n # else:\n # atb = taxon_set.all_taxa_bitmask()\n # d = containers.NormalizedBitmaskDict(mask=atb)\n # tree.split_edges = d\n # split_map = tree.split_edges\n if not tree.seed_node:\n return\n\n if delete_outdegree_one:\n sn = tree.seed_node\n if not tree.is_rooted:\n if len(sn.child_nodes()) == 2:\n tree.deroot()\n while len(sn.child_nodes()) == 1:\n c = sn.child_nodes()[0]\n if len(c.child_nodes()) == 0:\n break\n try:\n sn.edge.length += c.edge.length\n except:\n pass\n sn.remove_child(c)\n for gc in c.child_nodes():\n sn.add_child(gc)\n\n for edge in tree.postorder_edge_iter():\n cm = 0\n h = edge.head_node\n child_nodes = h.child_nodes()\n nc = len(child_nodes)\n if nc > 0:\n if nc == 1 and delete_outdegree_one and edge.tail_node:\n p = edge.tail_node\n assert(p)\n c = child_nodes[0]\n try:\n c.edge.length += edge.length\n except:\n pass\n pos = p.child_nodes().index(h)\n p.add_child(c, pos=pos)\n p.remove_child(h)\n else:\n for child in child_nodes:\n cm |= child.edge.split_bitmask\n else:\n t = edge.head_node.taxon\n if t:\n cm = taxon_set.taxon_bitmask(t)\n edge.split_bitmask = cm\n if create_dict:\n split_map[cm] = edge\n # create normalized bitmasks, where the full (tree) split mask is *not*\n # all the taxa, but only those found on the tree\n if not tree.is_rooted:\n mask = tree.seed_node.edge.split_bitmask\n d = containers.NormalizedBitmaskDict(mask=mask)\n for k, v in tree.split_edges.items():\n d[k] = v\n tree.split_edges = d", "title": "" }, { "docid": "e070c71467bff927a8e6761c44d4dd3b", "score": "0.5167843", "text": "def get_tree(X, y, node, discrete, random_features=None, depth=1):\n\n # Base case: all the labels are the same: in this case\n # we found a perfect slipt and we don't need to split the \n # tree anymore\n if np.unique(y).size == 1:\n node.cl = np.unique(y)[0]\n node.is_leaf = True \n node.depth = depth\n else:\n \n if random_features:\n # In this case we need to use the random_features generator\n # to sample a subset of features and obtain the best_partition\n # in this subsample. The attribute returned must be considered\n # as the index of the chosen attribute in the features subset\n features = random_features.sample()\n att_index, val, indexes = best_partition(X[:,features], y, discrete[features])\n att = features[att_index]\n else:\n att, val, indexes = best_partition(X, y, discrete)\n \n # Recursive step: \n if indexes.any():\n X1, y1 = X[indexes], y[indexes]\n X2, y2 = X[~indexes], y[~indexes]\n node.att = att\n node.val = val\n node.discrete = discrete[att]\n node.child1 = Node()\n node.child2 = Node()\n get_tree(X1, y1, node.child1, discrete, random_features, depth+1)\n get_tree(X2, y2, node.child2, discrete, random_features, depth+1)\n \n # Could be the case that the labels are not unique but it's\n # not possible to find a good split of the tree (for example, \n # if we have many instances that are equal to each other but \n # have different classes): in this case a good euristich is to\n # stop to split the tree and take the most frequent label\n else:\n node.cl = np.bincount(y).argmax()\n node.is_leaf=True\n node.depth = depth", "title": "" }, { "docid": "a37e206abde728b53c68d001ee08e979", "score": "0.51677036", "text": "def get_sub_tree(pair, index):\n node = []\n left, right = pair\n l_tree = get_sub_node(left, index)\n if l_tree[0]:\n r_tree = get_sub_node(right, l_tree[1])\n if r_tree[0]:\n if l_tree[2] == 0:\n node.extend(l_tree[0])\n else:\n node.append(l_tree[0])\n if r_tree[2] == 0:\n node.extend(r_tree[0])\n else:\n node.append(r_tree[0])\n return (node, r_tree[1])\n else:\n return (False, r_tree[1])\n else:\n return (False, l_tree[1])", "title": "" }, { "docid": "346dc95372312ad017e57feb4cd6ea87", "score": "0.5166625", "text": "def _calc_spanning_tree():\n\n def flip(link):\n return Discovery.Link(link.dpid2, link.port2, link.dpid1, link.port1, link.link_type,link.available)\n\n adj = defaultdict(lambda: defaultdict(lambda: []))\n switches = set()\n # Add all links and switches\n for l in generator_for_link('lldp'):\n adj[l.dpid1][l.dpid2].append(l)\n switches.add(l.dpid1)\n switches.add(l.dpid2)\n\n # Cull links -- we want a single symmetric link connecting nodes\n for s1 in switches:\n for s2 in switches:\n if s2 not in adj[s1]:\n continue\n if not isinstance(adj[s1][s2], list):\n continue\n assert s1 is not s2\n good = False\n for l in adj[s1][s2]:\n if flip(l) in core.openflow_discovery.adjacency:\n # This is a good one\n adj[s1][s2] = l.port1\n adj[s2][s1] = l.port2\n good = True\n break\n if not good:\n del adj[s1][s2]\n if s1 in adj[s2]:\n # Delete the other way too\n del adj[s2][s1]\n q = []\n more = set(switches)\n\n done = set()\n tree = defaultdict(set)\n while True:\n q = sorted(list(more)) + q\n more.clear()\n if len(q) == 0: break\n v = q.pop(False)\n if v in done: continue\n done.add(v)\n for w, p in adj[v].iteritems():\n if w in tree: continue\n more.add(w)\n tree[v].add((w, p))\n tree[w].add((v, adj[w][v]))\n if False:\n log.debug(\"*** SPANNING TREE ***\")\n for sw, ports in tree.iteritems():\n # print \" \", dpidToStr(sw), \":\", sorted(list(ports))\n # print \" \", sw, \":\", [l[0] for l in sorted(list(ports))]\n log.debug((\" %i : \" % sw) + \" \".join([str(l[0]) for l in\n sorted(list(ports))]))\n log.debug(\"*********************\")\n return tree", "title": "" }, { "docid": "6fe2b37c51b4db555d8b6c545be7456d", "score": "0.5157645", "text": "def hailstone_tree(n, h):\n if h == 0:\n return tree(n)\n else:\n branches = [hailstone_tree(2 * n, h - 1)]\n if ((n - 1) % 3 == 0) and (n not in [1, 4]):\n branches.append(hailstone_tree((n - 1) // 3, h - 1))\n return tree(n, branches)", "title": "" }, { "docid": "25df189bf54cd60de53fc2a86497dbf0", "score": "0.51531", "text": "def square_tree(t):\r\n return tree(label(t)**2, [square_tree(b) for b in branches(t)])", "title": "" }, { "docid": "eaf6211ecfec4dc91718070f52fe970d", "score": "0.5152186", "text": "def __init__(self, size : int) -> None: \n self.root = [idx for idx in range(size)]", "title": "" }, { "docid": "9e4aac53a5789e39255150f647864372", "score": "0.5151803", "text": "def build_tree(start=(0, 0), branch_len=150, angle=270, use_random=True):\n params = {\n \"length\": (0.45, 0.825),\n \"angle\": (-65, 65),\n \"branches\": [2, 2, 2, 3, 3, 3, 4, 5]\n }\n\n if branch_len <= 3:\n return []\n else:\n tree = []\n x_end = start[0] + (branch_len * math.cos(math.radians(angle)))\n y_end = start[1] + (branch_len * math.sin(math.radians(angle)))\n tree.append((start[0], start[1], x_end, y_end))\n\n if use_random:\n for _ in range(random.choice(params[\"branches\"])):\n tree += build_tree(\n (x_end, y_end),\n branch_len * random.uniform(\n params[\"length\"][0],\n params[\"length\"][1]\n ),\n angle + random.randrange(params[\"angle\"][0], params[\"angle\"][1]),\n use_random=use_random\n )\n else:\n tree += build_tree(\n (x_end, y_end),\n branch_len * 0.61,\n angle - 45,\n use_random=use_random\n )\n tree += build_tree(\n (x_end, y_end),\n branch_len * 0.61,\n angle + 45,\n use_random=use_random\n )\n\n return tree", "title": "" }, { "docid": "dcf28bae4d551e53d6651e4bad81db55", "score": "0.5144912", "text": "def d_algo(arr, start_node, end_node, size):\n start_x, start_y = start_node\n\n arr[start_x][start_y] = 0 #Initialise start node in array\n current_node = (start_x, start_y) #Initialise current_node (x,y)\n absorbed_nodes = [current_node] #Initialise absorbed nodes [(x,y)]\n heap = min_heap([]) #Initialise heap structure\n\n while(current_node != end_node):\n current_x, current_y = current_node #x,y coordinates of current node\n current_value = arr[current_x][current_y] #Value of current node\n\n #Obtain list of nodes that are surrounding current nodes\n surrounding_nodes = check_surroundings(arr, absorbed_nodes, current_node, size)\n\n #For each surrounding node, check if it is in heap and check if it needs updating\n for node in surrounding_nodes:\n x, y = node #x,y coordinates of surrounding_nodes\n\n #Node has not been added to heap before, so we add it in, and we update in arr\n if arr[x][y] == float(\"inf\"):\n arr[x][y] = current_value + 1 #Updates arr\n node_with_value = (x, y, arr[x][y])\n heap.heap_insert(node_with_value) #Updates heap\n\n #Value of node <= value of current_node + 1, then we leave it alone\n elif arr[x][y] <= current_value + 1:\n pass\n\n #Value of node > value of current_node + 1, then we want to update node value in heap\n else:\n node_with_value = (x, y, arr[x][y])\n heap_index = heap.find_index(node_with_value)\n #Update the heap_index with appropriate value\n heap.update_heap(heap_index, current_value + 1)\n \n min_node = heap.extract_min() #Obtain smallest node\n if min_node == -1: #Checks if there are any remaining nodes\n return -1\n current_node = (min_node[0], min_node[1]) #update current_node\n absorbed_nodes.append(current_node) #Update absorbed_nodes\n \n #If while loop condition is met, it means the current_node == end_node, and we found the shortest path\n return 0", "title": "" }, { "docid": "76218afbe9f759a7a42ee56fa0f02c2b", "score": "0.5141121", "text": "def constructTree(serialization):\n\tif not serialization:\n\t\treturn None\n\n\t_stack = []\n\troot = TreeNode(serialization.pop(0))\n\t_stack.append(root)\n\n\twhile _stack:\n\t\tif not serialization:\n\t\t\tbreak\n\t\tnode = _stack.pop(0)\n\t\tn_left = serialization.pop(0)\n\t\tif n_left == '#':\n\t\t\tnode.left = None\n\t\telse:\n\t\t\tnode.left = TreeNode(n_left)\n\t\t\t_stack.append(node.left)\n\t\tn_right = serialization.pop(0)\n\t\tif n_right == '#':\n\t\t\tnode.right = None\n\t\telse:\n\t\t\tnode.right = TreeNode(n_right)\n\t\t\t_stack.append(node.right)\n\n\treturn root", "title": "" }, { "docid": "e103dcbd0b9cfb8e143a397ce00f918f", "score": "0.5138649", "text": "def cut_tree_n(n_clusters,clusters):\n aux_cluster = clusters[n_clusters][1]\n n = clusters[\"n_cluster\"]\n assigments=np.zeros(n)\n for i,c in enumerate(aux_cluster):\n if len(c)>1:\n for j in c.split(\",\"):\n assigments[int(j)-1]=i\n else:\n assigments[int(c)-1] = i \n return assigments", "title": "" }, { "docid": "a74140d51e6acb2610b8ad80a6aaf261", "score": "0.51384014", "text": "def make_tree(data, labels):\n _, n = np.shape(data)\n if n == 1:\n counter = Counter()\n for value in data.flatten().A[0]: counter[value] += 1\n return counter.most_common(1)[0][0]\n if len( set(data[:, -1].flatten().A[0]) ) == 1:\n return data[0, -1]\n\n best_feature = choose_best_feature(data)\n tree = { labels[best_feature]: {} }\n\n for value in set(data[:, best_feature].flatten().A[0]):\n lines = range(best_feature) + range(best_feature+1, n)\n tree[labels[best_feature]][value] = \\\n make_tree(data[ data[:, best_feature].flatten().A[0] == value ][:, lines],\n labels[lines])\n return tree", "title": "" }, { "docid": "44215dd5b351f6dc5e660ac6b6fa2f08", "score": "0.5137877", "text": "def findSegments(self,linkSegments=1,nodeID=0,segments=()):\n #print \"Calling find segments with nodeID %d\" % nodeID\n\n if linkSegments and nodeID>0:\n thisPath = [self.nodes[nodeID].parent]\n else:\n thisPath = []\n\n if isinstance(nodeID,int):\n nodeID = [nodeID]\n\n while len(nodeID)==1:\n #print \"appending node %d\" % nodeID[0]\n thisPath.append(nodeID[0])\n nodeID = self.nodes[nodeID[0]].children\n \n segments = segments + (thisPath,) #Store this segment\n\n #Go into the branches with a recursive call\n for thisNode in nodeID:\n segments=self.findSegments(linkSegments,thisNode,segments)\n\n return segments", "title": "" }, { "docid": "8717cd95d00582ccb3daef6f85af58c9", "score": "0.5131499", "text": "def __init__(self):\n # 小顶堆存储较大的数\n self.lager = []\n # 大顶堆存储较小的数\n self.smaller = []", "title": "" }, { "docid": "b86193be7853fd2bfecb441e817ce4b8", "score": "0.5125732", "text": "def depthFirstSearch(problem):\n \"*** YOUR CODE HERE ***\"\n #state = (position, direction ,cost)\n #node = (state, parent)\n solution = []\n explored = {}\n stack = util.Stack()\n node = [problem.getStartState(), None, 0, None]\n stack.push(node)\n while 1:\n if stack.isEmpty():\n return []\n node = stack.pop()\n if problem.isGoalState(node[0]): # node[0]: position\n while node[3] is not None: # node[3]: parent\n solution.append(node[1])\n node = explored[node[3]]\n solution = solution[::-1]\n return solution\n explored.update({node[0]:node})\n for neighbor in problem.getSuccessors(node[0]):\n if neighbor[0] not in explored:\n expand_node = list(neighbor)\n expand_node.append(node[0]) # the parent of expand_node is node\n # explored.update({expand_node[0]: expand_node}) !!different from BFS\n stack.push(expand_node)", "title": "" }, { "docid": "d04c4ec572bd3896d68bda9b8150dd5a", "score": "0.51217824", "text": "def set_build_tree(self,node):\n\n # tree with MAX_LEAFS leafs is constructed.\n # For sklearn add to each leaf its cluster label based on the children in the tree object from sklearn AgglomerativeClustering\n self.leaf_nodes = []\n if self.mode == \"sklearn\":\n tree = self._build_tree(node) # construct whole tree\n tree = self._get_cluster_labels_for_leafs(tree) # get labels for leafs\n tree = self._cut_nodes_from_leafs(tree) # cut nodes from bottom of the tree until only leafs with a unique cluster_label exist (Number leaves = MAX_LEAFS)\n elif self.mode == \"FBE\":\n self.temp_n_leafs = 1\n self.maxDepth = 0\n self._get_maxDepth(0, 0)\n depth = 0\n #print(\"maxDepth: {}\".format(self.maxDepth))\n # build tree by level: create first all children for level 1, then level 2...\n # Prevents that a tree creates children just in one branch and always goes deeper in case of a max number of leavese\n while self.temp_n_leafs < MAX_LEAFS and depth <= self.maxDepth:\n print(\"\\n\\ndepth: {}, temp_n_leafs: {}, maxDepth: {}\".format(depth, self.temp_n_leafs, self.maxDepth))\n tree = self._build_tree(node, depth)\n depth += 1\n\n assert isinstance(tree, Node)\n self.tree = tree\n print(\"Count nodes: {}; leafs: {}\".format(self.count_nodes(), self.count_leafs()))", "title": "" }, { "docid": "836cf1f675120f1fc2a19f15cc966d7d", "score": "0.5121536", "text": "def sampleTree1():\n n5 = TreeNode(None, None)\n n3b = TreeNode(None, None)\n n3a = TreeNode(n5, n3b)\n n9 = TreeNode(None, None)\n n2 = TreeNode(None, n9)\n n1 = TreeNode(n3a, n2)\n return n1, 4", "title": "" }, { "docid": "dc03815f53770438be334cccbad367fb", "score": "0.5114194", "text": "def generate_treemap(self, rect):\n # Read the handout carefully to help get started identifying base cases,\n # and the outline of a recursive step.\n #\n # Programming tip: use \"tuple unpacking assignment\" to easily extract\n # coordinates of a rectangle, as follows.\n # x, y, width, height = rect\n\n if self.data_size == 0:\n return []\n # if self is a leaf\n elif self._subtrees == []:\n return [(rect, self.colour)]\n else:\n x, y, width, height = rect\n rect_lst = []\n pos_x = 0\n pos_y = 0\n\n subtrees_copy = []\n\n for tree in self._subtrees:\n subtrees_copy.append(tree)\n\n # incase there are empty folders with are present in the end\n # because they will create black spaces\n while subtrees_copy[len(subtrees_copy)-1].data_size == 0:\n subtrees_copy.pop()\n if len(subtrees_copy) == 1:\n break\n\n for i in range(len(subtrees_copy)):\n subtree = subtrees_copy[i]\n percent_area = subtree.data_size/self.data_size\n\n # set the last rectangle to fill the remaining space\n if i == len(subtrees_copy) - 1:\n assert subtree.data_size > 0\n if width > height:\n rect_lst.extend(subtree.generate_treemap((x + pos_x, y, width - pos_x, height)))\n else:\n rect_lst.extend(subtree.generate_treemap((x, y + pos_y, width, height - pos_y)))\n else:\n if width > height:\n new_width = math.floor(percent_area * width)\n rect_lst.extend(subtree.generate_treemap((x + pos_x, y, new_width, height)))\n pos_x += new_width\n else:\n new_height = math.floor(percent_area * height)\n rect_lst.extend(subtree.generate_treemap((x, y + pos_y, width, new_height)))\n pos_y += new_height\n\n assert pos_x <= width, str(pos_x) + ' ' + str(width)\n assert pos_y <= height, str(pos_y) + ' ' + str(height)\n\n return rect_lst", "title": "" }, { "docid": "c3d7764aca68f83c512aa6e54971ff06", "score": "0.5112601", "text": "def small_tree():\n tree = KTree()\n tree.root = Node(5)\n tree.root.children = [Node(9), Node(3)]\n return tree", "title": "" }, { "docid": "3b6f0150a0c6a35d46dbf0c21ae15564", "score": "0.5108453", "text": "def _insert_level_order(arr, root, i, n):\n # Base case for recursion\n if i < n:\n temp = Node(arr[i])\n root = temp\n # insert left child\n root.left = _insert_level_order(arr, root.left,\n 2 * i + 1, n)\n # insert right child\n root.right = _insert_level_order(arr, root.right,\n 2 * i + 2, n)\n return root", "title": "" }, { "docid": "8fb31aa4033fd2e5e4a5399a65b51042", "score": "0.51042324", "text": "def make_hierarchical_blockmodel_graphon(n, heights):\n heights = _np.asarray(heights)\n def graphon(x, y):\n ix = _find_buckets(x, y, n)\n return heights[ix]\n return graphon", "title": "" }, { "docid": "8ff3ca5bcf7d9a0ce4cee8f88989cc4c", "score": "0.51022243", "text": "def _traverse_quadtree_to_collect_planned_parcels_and_street_segments(self, node):\n if len(node.children) == 0 and node.width != 1:\n w = int(node.center[0] - node.width * 0.5)\n e = int(node.center[0] + node.width * 0.5)\n n = int(node.center[1] - node.width * 0.5)\n s = int(node.center[1] + node.width * 0.5)\n self._planned_parcels.append((w, n, node.width))\n self._planned_street_segments['ns'][(w, n)] = (w, s)\n self._planned_street_segments['ns'][(e, n)] = (e, s)\n self._planned_street_segments['ew'][(w, n)] = (e, n)\n self._planned_street_segments['ew'][(w, s)] = (e, s)\n for child in node.children:\n self._traverse_quadtree_to_collect_planned_parcels_and_street_segments(node=child)", "title": "" }, { "docid": "78470b5fba662a153da185aeaabd7dcc", "score": "0.50944746", "text": "def __init__(self):\n self.N = 26\n self.M = self.N + 1\n self.root = [0]*self.M", "title": "" }, { "docid": "33fda6bd5105678709e3122b864eb9bd", "score": "0.5090144", "text": "def buildTree(self):\n self.createTreeTable()\n\n while len(self.treeTable) >= 2: #dopoki jest wiecej niz jeden element w tablicy\n #zdjecie 2 elementow\n tmp1 = self.treeTable.pop(0)\n tmp2 = self.treeTable.pop(0)\n #polaczenie w nowe dwrzewko\n newTree = Tree(tmp1.key + tmp2.key, tmp1.key + tmp2.key)\n newTree.left = tmp1\n newTree.right = tmp2\n #wstawienie drzewka z powrotem do listy\n self.treeTable.insert(0, newTree)\n #gotowe drzewo\n self.drzewokodowe = self.treeTable[0]", "title": "" }, { "docid": "ae8f58e2190367d3b53677695edfa56f", "score": "0.50749475", "text": "def create_lemma_graph(segments):\r\n segments = lemmatize(segments)\r\n g = create_naive_graph(segments)\r\n return g", "title": "" }, { "docid": "4fe2ca0f497866eeeebbe867ce58e7d2", "score": "0.50699234", "text": "def insert(self, n):\n if type(n).__name__ is not \"node\":\n raise TypeError('Not of node type')\n if self.root is None:\n self.root = n\n self.head.nxt = n\n self.head.pre = n\n n.pre = self.head\n n.nxt = self.head\n return\n tmp = self.lookup(n.getKey())\n if (tmp is not None):\n # key already in, change value\n tmp.setValue(n.getValue())\n return\n cur = self.root\n level = 0\n while(cur is not None):\n if n.key < cur.key:\n if cur.childL is None:\n n.nxt = cur\n n.pre = cur.pre\n cur.pre.nxt = n\n cur.pre = n\n cur.childL = n\n n.parent = cur\n level += 1\n # increment depth if creating a new layer\n if cur.childR is None and level > self.depth:\n self.depth = level\n break\n level += 1\n cur = cur.childL\n if n.key >= cur.key:\n if cur.childR is None:\n n.nxt = cur.nxt\n n.pre = cur\n cur.nxt.pre = n\n cur.nxt = n\n cur.childR = n\n n.parent = cur\n level += 1\n # increment depth if creating a new layer\n if cur.childL is None and level > self.depth:\n self.depth = level\n break\n level += 1\n cur = cur.childR\n self.itemCount += 1", "title": "" }, { "docid": "276a63badd23d012695e9bc11be3e156", "score": "0.5067928", "text": "def dfs():", "title": "" }, { "docid": "51aab82de1eb1652c8c8f86ba2ba0628", "score": "0.50676584", "text": "def build_perfectly_balanced_bst(keys=[]):\n n = len(keys)\n\n assert(keys), \"keys cannot be empty\"\n assert(sorted(keys)), \"keys are not sorted\"\n assert(n%2!=0), \"number of keys need to be odd\"\n \n median_pos = n // 2 \n median = keys[median_pos]\n first_half = keys[:median_pos]\n second_half = keys[median_pos+1:]\n\n b = BST()\n b.put(keys[median_pos], \"\")\n i = 0\n j = len(first_half)-1\n print(\"items are \", first_half, second_half)\n while j >= 0:\n b.put(second_half[i], \"\")\n b.put(first_half[j],\"\")\n i += 1\n j -= 1\n return b", "title": "" }, { "docid": "83c637e077896bda78690dbbd043d0e6", "score": "0.5067221", "text": "def ninsert(self, value):\n height = 0\n root = self.root\n before = root\n while root:\n before = root\n if root.key <= value:\n root = root.right\n else:\n root = root.left\n height += 1\n if before.key <= value:\n before.right = Node(value)\n else:\n before.left = Node(value)\n if self.height < height:\n self.height = height", "title": "" }, { "docid": "e50de05d8f6dc13af2e64aea990741ff", "score": "0.5067141", "text": "def sampleTree4():\n n6 = TreeNode(None, None)\n n5 = TreeNode(n6, None)\n n3a = TreeNode(n5, None)\n n7 = TreeNode(None, None)\n n9 = TreeNode(None, n7)\n n2 = TreeNode(None, n9)\n n1 = TreeNode(n3a, n2)\n return n1, 8", "title": "" }, { "docid": "384dc4e8ce281ae6bcd534222138b1bf", "score": "0.505431", "text": "def _init_path_graph(self):\n\n def add_scaffold_node(name, path, length):\n # prepare scaffold information\n scaff_start = self.hic.cut_intervals[contig_path[0]][1]\n scaff_end = self.hic.cut_intervals[contig_path[-1]][2]\n #length = scaff_end - scaff_start\n attr = {'name': prev_label,\n 'path': contig_path[:],\n 'length': length,\n 'start': scaff_start,\n 'end': scaff_end,\n 'direction': \"+\"}\n self.scaffold.add_node(prev_label, **attr)\n\n contig_path = []\n prev_label = None\n self.matrix = self.hic.matrix.copy()\n self.matrix_bins = PathGraph()\n self.scaffold = PathGraph()\n self.bin_id_to_scaff = OrderedDict()\n self.total_length = 0\n scaff_length = 0\n for idx, interval in enumerate(self.hic.cut_intervals):\n label, start, end, coverage = interval\n length = end - start\n if prev_label is not None and prev_label != label:\n self.matrix_bins.add_path(contig_path, name=prev_label)\n add_scaffold_node(prev_label, contig_path, scaff_length)\n contig_path = []\n scaff_length = 0\n\n scaff_length += length\n attr = {'name': label,\n 'start': start,\n 'end': end,\n 'coverage': coverage,\n 'length': length}\n\n self.matrix_bins.add_node(idx, **attr)\n self.bin_id_to_scaff[idx] = label\n self.total_length += length\n contig_path.append(idx)\n prev_label = label\n if prev_label is not None:\n add_scaffold_node(prev_label, contig_path, scaff_length)\n\n if len(contig_path) > 1:\n self.matrix_bins.add_path(contig_path, name=label)\n\n # before any merge is done, pg_base == pg_matrix.bins\n self.pg_base = copy.deepcopy(self.matrix_bins)", "title": "" }, { "docid": "3446fa1eb60d451cff8a100eca0e5bc3", "score": "0.5051613", "text": "def fibonacci_tree(n):\n fibonacci = [1, 1]\n for i in range(2, n):\n sum_list = fibonacci[-2] + fibonacci[-1]\n fibonacci.append(sum_list)\n return fibonacci", "title": "" }, { "docid": "f9b55c248faec4dadc3532c899ff452f", "score": "0.50495696", "text": "def get_tree(self, tree, A, a, i):\n if A != []:\n for j in a[i]:\n if j in A:\n tree[i]['children'].append(j)\n tree[j]['father'] = i\n A.remove(j)\n tree, A = self.get_tree(tree, A, a, j)\n return tree, A", "title": "" }, { "docid": "2f2d59e44abefd85747de9d559384953", "score": "0.50495434", "text": "def build_tree(data, impurity, chi_value=1):\n \n thresh, feature, left_Child, right_Child = bestFeature(data,impurity)\n root = DecisionNode(feature, thresh)\n instance_num, nonzero_num, zero_num = numInstances(data)\n root.split[0] = zero_num\n root.split[1] = nonzero_num\n root.split[\"total\"] = instance_num\n \n build_rec(root, data, impurity, chi_value)\n return root", "title": "" }, { "docid": "018c88f84461ceae2081e78b58eb1f4f", "score": "0.50488335", "text": "def build_tree(train, max_depth=None, min_size=1):\r\n # create a root node split by calling get_best_split on the full training set\r\n root = get_best_split(train)\r\n # now build the tree using run_split\r\n run_split(root, max_depth, min_size, 1)\r\n return root", "title": "" }, { "docid": "0213f9cecad18e07dad7d0f404e957b9", "score": "0.50434434", "text": "def pack_tree(self, root, obj, node, idx=0, height=0):\n while idx < len(obj):\n node.page = obj[idx : idx+self.cap]\n node.set_up(height)\n node.new_mbr()\n self.root.page.append(node)\n self.root.set_up(height+1)\n node = Node()\n idx += self.cap\n\n if len(root.page) > self.cap:\n root.set_up(height)\n root.new_mbr()\n node = Node()\n parents = self.root\n self.root = Node()\n self.pack_tree(self.root, parents.page, node, 0, height+1)\n self.root.new_mbr()\n self.height = self.root.level\n return", "title": "" }, { "docid": "47840d04b7f0264f1cfa5dd232d808fe", "score": "0.50387365", "text": "def segment(segment):\n return {'segment': segment}", "title": "" } ]
cf84055bc026711dba27ebb401e4cb2a
Regrid to 2deg (180lon90lat) horizontal resolution Input
[ { "docid": "9ccee3ab882e0228bda38a0e6f71da74", "score": "0.0", "text": "def Regrid2deg(d, var):\n tgrid = grid.create_uniform_grid(-89, 89, 2.0, 0.0, 358., 2.0)\n drg = d.regridder.horizontal(var, tgrid, tool=\"xesmf\", method=\"conservative_normed\",periodic=True)[var]\n \n print(\"Complete regridding from\", d[var].shape, \"to\", drg.shape)\n return drg", "title": "" } ]
[ { "docid": "f815fa237589ae2cb01084b4ca4ace69", "score": "0.58806723", "text": "def setup_grid():\n i0t,imt,j0t,jmt = (0000 ,8640, 0, 4320)\n incr = 360.0/imt\n jR = np.arange(j0t, jmt)\n iR = np.arange(i0t, imt)\n latvec = ( 90 - jR*incr - incr/2)[::-1]\n lonvec = (-180 + iR*incr + incr/2)\n lons,lats = np.meshgrid(lonvec, latvec)\n grid = pr.geometry.GridDefinition(lons=lons, lats=lats)\n grid.ivec = np.arange(grid.shape[1])\n grid.jvec = np.arange(grid.shape[0])\n grid.iarr,grid.jarr = np.meshgrid(grid.ivec, grid.jvec)\n return grid", "title": "" }, { "docid": "b8d2378cb53d6670154c546fb9ef128f", "score": "0.5798142", "text": "def test_genreggrid():\n grid = grids.genreg_grid()\n assert grid.shape == (180, 360)\n lon, lat = grid.gpi2lonlat(3)\n assert lon == -176.5\n assert lat == 89.5\n lon, lat = grid.gpi2lonlat(360)\n assert lon == -179.5\n assert lat == 88.5", "title": "" }, { "docid": "3254fd96d20f0d4723a44db59209140a", "score": "0.5780261", "text": "def getGridCC(lon, lat):\n dx = lon[2]-lon[1]\n dy = lat[2]-lat[1]\n lon = np.append(lon, lon[-1] + dx)\n lat = np.append(lat, lat[-1] + dy)\n lon -= dx/2.\n lat -= dy/2.\n LON, LAT = np.meshgrid(lon, lat)\n return LON, LAT", "title": "" }, { "docid": "a47d6793efd2d9cbdbd4956e7ee71234", "score": "0.56477916", "text": "def fix_data(self, cube):\n rlat = cube.coord('grid_latitude').points\n rlon = cube.coord('grid_longitude').points\n\n # Guess coordinate bounds in rlat, rlon (following BCC-CSM2-MR-1).\n rlat_idx_bnds = np.zeros((len(rlat), 2))\n rlat_idx_bnds[:, 0] = np.arange(len(rlat)) - 0.5\n rlat_idx_bnds[:, 1] = np.arange(len(rlat)) + 0.5\n rlat_idx_bnds[0, 0] = 0.\n rlat_idx_bnds[len(rlat) - 1, 1] = len(rlat)\n rlon_idx_bnds = np.zeros((len(rlon), 2))\n rlon_idx_bnds[:, 0] = np.arange(len(rlon)) - 0.5\n rlon_idx_bnds[:, 1] = np.arange(len(rlon)) + 0.5\n\n # Calculate latitude/longitude vertices by interpolation\n lat_vertices = []\n lon_vertices = []\n for (i, j) in [(0, 0), (0, 1), (1, 1), (1, 0)]:\n (rlat_v, rlon_v) = np.meshgrid(rlat_idx_bnds[:, i],\n rlon_idx_bnds[:, j],\n indexing='ij')\n lat_vertices.append(\n map_coordinates(cube.coord('latitude').points,\n [rlat_v, rlon_v],\n mode='nearest'))\n lon_vertices.append(\n map_coordinates(cube.coord('longitude').points,\n [rlat_v, rlon_v],\n mode='wrap'))\n lat_vertices = np.array(lat_vertices)\n lon_vertices = np.array(lon_vertices)\n lat_vertices = np.moveaxis(lat_vertices, 0, -1)\n lon_vertices = np.moveaxis(lon_vertices, 0, -1)\n\n # Copy vertices to cube\n cube.coord('latitude').bounds = lat_vertices\n cube.coord('longitude').bounds = lon_vertices\n return cube", "title": "" }, { "docid": "aa463dac8ba5a0acc03fc3cb7d8e61bd", "score": "0.5620822", "text": "def set_latitude_grid(self, degrees):\n # Skip -90 and 90, which are the fixed limits.\n grid = np.arange(-90 + degrees, 90, degrees)\n self.yaxis.set_major_locator(FixedLocator(np.deg2rad(grid)))\n self.yaxis.set_major_formatter(self.ThetaFormatter(degrees))", "title": "" }, { "docid": "36aac25432857763bbe605a3b55f6066", "score": "0.55989915", "text": "def regrid_uniform_cc(data, input_lat, input_lon, output_lat, output_lon):\n \n input_grid = create_xesmf_grid_uniform_cc(input_lat, input_lon)\n \n output_grid = create_xesmf_grid_uniform_cc(output_lat, output_lon)\n \n regridder = xesmf.Regridder(input_grid, output_grid, 'conservative')\n regridded = regridder( data )\n \n return regridded", "title": "" }, { "docid": "12fe4dcbf28c0d8c1a96768403db41c5", "score": "0.55584645", "text": "def grid_size_lat(self):\n return 720", "title": "" }, { "docid": "18a868ca3f7f2f3ea6236fe482a27c6c", "score": "0.5543468", "text": "def make_equiangular_grid(resolution):\n longitude = np.linspace(0, 2*np.pi, resolution, endpoint=False)\n colatitude = np.linspace(0, np.pi, resolution)\n return np.meshgrid(longitude, colatitude)", "title": "" }, { "docid": "2ea6611c7011ab59a2391a6b96348129", "score": "0.55357724", "text": "def to_res(self, new_res):\n new_ll_corner = (self.ll_corner[0] - self.res/2 + new_res/2, self.geo_transform[0] + new_res/2)\n new_ur_corner = (self.geo_transform[3] - new_res/2, self.ur_corner[1] + self.res/2 - new_res/2)\n\n return GeoGrid(new_ll_corner, new_ur_corner, new_res, geo_type=self.type)", "title": "" }, { "docid": "10d9e54c40798b00463fd38cabac675a", "score": "0.5518346", "text": "def _lat_to_gridsquare(lat):\n lat = lat + 90\n field, lat = divmod(lat, 10)\n square, lat = divmod(lat, 1)\n subsq = lat * 24\n return (string.ascii_uppercase[int(field)], int(\n square), string.ascii_lowercase[int(subsq)])", "title": "" }, { "docid": "b3483589ddadc383a6ce44195bb5f632", "score": "0.548117", "text": "def deflections_2d_from_grid(self, grid):\r\n deflection_y = -np.multiply(self.magnitude, grid[:, 0])\r\n deflection_x = np.multiply(self.magnitude, grid[:, 1])\r\n return self.rotate_grid_from_reference_frame(\r\n np.vstack((deflection_y, deflection_x)).T\r\n )", "title": "" }, { "docid": "da69538b7a3c4857c02d191d0bdf62aa", "score": "0.5478338", "text": "def in_grid(self):\n self.xgrid = self.x//block_size\n self.ygrid = self.y//block_size", "title": "" }, { "docid": "3c2a580e981df9cb6da71147cd8fc947", "score": "0.546436", "text": "def init_grid():\n lat0 = iemre.SOUTH\n lat1 = iemre.NORTH\n lon0 = iemre.WEST\n lon1 = iemre.EAST\n x0, y0 = lalo2pt(lat1, lon0)\n x1, y1 = lalo2pt(lat0, lon1)\n\n fp = \"/home/ldm/data/gis/images/4326/q2/p48h.png\"\n q2 = gdal.Open(fp, 0)\n q2d = numpy.flipud( q2.ReadAsArray()[y0:y1:22,x0:x1:25] )\n\n return q2d / 25.4 # hard code snow ratio!", "title": "" }, { "docid": "9c94dbaf551e99033b20005a397e9869", "score": "0.5456555", "text": "def regridCoords(self):\n\n g5 = self.linear\n di = MxD03Handle('DISORT')\n ns, nf, km = g5.DELP.shape\n\n # Get GEOS-Edge coordinates\n # -------------------------\n if self.verb:\n print(\" - Getting GEOS-5 edge coordinates\")\n g5.PE, g5.ZE = getedgecoords(g5.T,g5.QV,g5.DELP,g5.ZS,self.PTOP)\n\n g5.logPE = None # compute this on demand later\n\n # Get DISORT edge pressure\n # ------------------------\n if self.verb:\n print(\" - Getting DISORT edge pressure\")\n di.ZE = 1000 * DISORT_LEVELS # in meters\n di.PE,di.PS,di.KS,rc = interppressure(di.ZE,self.ZS,g5.PE,g5.ZE)\n if rc:\n raise MCSError('Error on return from interpPressure(), rc = <%d>'%rc)\n\n di.DELP = di.PE[:,:,1:]-di.PE[:,:,0:-1]\n\n di.logPE = None # compute this on demand later\n\n # Adjust skin temperature for terrain\n # -----------------------------------\n g5.TS = g5.TS * (di.PS/g5.PS)**MAPL_KAPPA\n\n # Notice that *ks* is the surface level, 1-offset as in Fortran\n # ps is the surface pressure which has been adjusted for terrain height\n\n self.di = di", "title": "" }, { "docid": "848373467b4ac1cbb4aff0d063d3d1fa", "score": "0.5446151", "text": "def nsidc_polar_lonlat(longitude, latitude, grid, hemisphere):\n\n true_scale_lat = 70\n re = 6378.273\n e = 0.081816153\n\n if grid != 6.25 and grid != 12.5 and grid != 25:\n raise ValueError(\"Legal grid value are 6.25, 12.5, or 25\")\n \n if hemisphere >= 0:\n delta = 45\n imax = 1216\n jmax = 1792\n xmin = -3850 + grid/2\n ymin = -5350 + grid/2\n else:\n delta = 0\n imax = 1264\n jmax = 1328\n xmin = -3950 + grid/2\n ymin = -3950 + grid/2\n\n if grid == 12.5:\n imax = imax//2\n jmax = jmax//2\n elif grid == 25:\n imax = imax//4\n jmax = jmax//4\n\n xy = polar_lonlat_to_xy(longitude + delta, np.abs(latitude),\n true_scale_lat, re, e, hemisphere)\n i = (np.round((xy[0] - xmin)/grid)).astype(int) + 1\n j = (np.round((xy[1] - ymin)/grid)).astype(int) + 1\n # Flip grid orientation in the 'y' direction\n j = jmax - j + 1\n return [i, j]", "title": "" }, { "docid": "e78084cd6233b2efca384f8da94e6c1a", "score": "0.5442287", "text": "def normalize_coords(grid):\n assert grid.size(1) == 2\n h, w = grid.size()[2:]\n grid[:, 0, :, :] = 2 * (grid[:, 0, :, :].clone() / (w - 1)) - 1 # x: [-1, 1]\n grid[:, 1, :, :] = 2 * (grid[:, 1, :, :].clone() / (h - 1)) - 1 # y: [-1, 1]\n grid = grid.permute((0, 2, 3, 1)) # [B, H, W, 2]\n return grid", "title": "" }, { "docid": "c34bd7bc28e87a73e262662873385635", "score": "0.5430857", "text": "def compute_mapgrid_resolution(self):\n print 'Computing grid resolution of map...'\n grid_factor = math.cos(math.radians(\n min(abs(self.lat_range[0]),\n abs(self.lat_range[1])))) * \\\n 40024000.0 / 360.0 / self.map_resolution\n\n self.lat_map_resolution = int(abs(self.lat_range[0] - \n self.lat_range[1]) * grid_factor)\n self.lon_map_resolution = int(abs(self.lon_range[0] - \n self.lon_range[1]) * grid_factor)", "title": "" }, { "docid": "38168e79e1fa9435015e57fd40c0abb2", "score": "0.54299414", "text": "def geo_to_grid(self,lon_mat,lat_mat):\r\n if self.coord_sys==\"grid\":\r\n print(\"\\n Warning: already in grid coordinates\")\r\n else:\r\n # Read extreme coordinates\r\n lon_max=np.max(lon_mat)\r\n lon_min=np.min(lon_mat)\r\n lat_max=np.max(lat_mat)\r\n lat_min=np.min(lat_mat)\r\n # Converts to grid coordinate\r\n if ((self.x> lon_max) or (self.x< lon_min) or (self.y>lat_max) or (self.y<lat_min)):\r\n print(\"\\n Longitude or latitude out of range, must be between \", lon_min, \" & \", lon_max, \" for longitude and \", lat_min, \" & \", lat_max, \" for latitude \\n\")\r\n else:\r\n h_x=(lon_max-lon_min)/len(lon_mat)\r\n h_y=(lat_max-lat_min)/len(lat_mat)\r\n self.x=(self.x-lon_min)/h_x\r\n self.y=(self.y-lat_min)/h_y\r\n self.coord_sys=\"grid\"", "title": "" }, { "docid": "c9000d12da7fa5c7ae92f4292ab0ad63", "score": "0.5429126", "text": "def test_WGS84_values(self):\n\n lon, lat = 2, 66 # Station M\n x0, y0 = 207.924459, 115.383632 # from proj4\n x, y = self.map1.ll2grid(lon, lat)\n self.assertAlmostEqual(x, x0, places=6) \n self.assertAlmostEqual(y, y0, places=6) \n\n lon, lat = 5.323333, 60.3925 # Bergen\n x, y = self.map1.ll2grid(lon, lat)\n x0, y0 = 167.482134, 66.054642 # from proj4\n self.assertAlmostEqual(x, x0, places=6)\n self.assertAlmostEqual(y, y0, places=6)", "title": "" }, { "docid": "2c7470d44381ea31ac3ed5373d94ec14", "score": "0.5416999", "text": "def latlng2w(self, latitude, longitude):\n\n # // The mapping between latitude, longitude and pixels is defined by the web\n # // mercator projection.\n\n siny = math.sin(latitude * math.pi / 180)\n\n # // Truncating to 0.9999 effectively limits latitude to 89.189. This is\n # // about a third of a tile past the edge of the world tile.\n siny = min(max(siny, -0.9999), 0.9999)\n\n x = self.tile_size * (0.5 + longitude / 360)\n y = self.tile_size * (0.5 - math.log((1 + siny) / (1 - siny)) / (4 * math.pi))\n\n return x, y", "title": "" }, { "docid": "1836ff8472d0e6c2466d9ee8e0e2404d", "score": "0.53747594", "text": "def ul(xtile, ytile, zoom):\n\n n = 2.0 ** zoom\n lon_deg = xtile / n * 360.0 - 180.0\n lat_rad = math.atan(math.sinh(math.pi * (1 - 2 * ytile / n)))\n lat_deg = math.degrees(lat_rad)\n return LngLat(lon_deg, lat_deg)", "title": "" }, { "docid": "672d106a858d871446816b8eada96ad3", "score": "0.53573245", "text": "def geo_to_grid(lon,lat,lon_mat,lat_mat):\r\n # Read extreme coordinates\r\n lon_max=np.max(lon_mat)\r\n lon_min=np.min(lon_mat)\r\n lat_max=np.max(lat_mat)\r\n lat_min=np.min(lat_mat)\r\n # Converts to grid coordinate\r\n if ((lon> lon_max) or (lon< lon_min) or (lat>lat_max) or (lat<lat_min)):\r\n print(\"\\n Longitude or latitude out of range, must be between \", lon_min, \" & \", lon_max, \" for longitude and \", lat_min, \" & \", lat_max, \" for latitude \\n\")\r\n else:\r\n h_x=(lon_max-lon_min)/len(lon_mat)\r\n h_y=(lat_max-lat_min)/len(lat_mat)\r\n i1 = int((lon-lon_min)/h_x)\r\n i2 = i1 + 1\r\n I = [i1,i2]\r\n i0 = np.argmin([abs(lon - lon_mat[i1]), abs(lon - lon_mat[i2])])\r\n i = I[i0]\r\n j1 = int((lat-lat_min)/h_y)\r\n j2 = j1 + 1\r\n J = [j1,j2]\r\n j0 = np.argmin([abs(lat - lat_mat[j1]), abs(lat - lat_mat[j2])])\r\n j = J[j0]\r\n return i,j", "title": "" }, { "docid": "552c52a1d578665e093bf6f42480379f", "score": "0.5334705", "text": "def regrid_maps(oldlat,oldlon,data,newlat,newlon,method='cubic'):\n X, Y = np.meshgrid(oldlon, oldlat)\n XX, YY = np.meshgrid(newlon, newlat)\n regrided = np.empty((data.shape[0],newlat.size,newlon.size))\n for n in range(data.shape[0]):\n regrided[n,...] = interp.griddata((X.flatten(),Y.flatten()),\n data[n].flatten(),\n (XX,YY),\n method=method)\n return regrided", "title": "" }, { "docid": "88fff08fa98afb53f5ed5466f54ce251", "score": "0.53304243", "text": "def equi_coord_fixed_resoltuion(pano_W,pano_H,k_W,k_H,u,v,pano_Hf = -1, pano_Wf=-1):\n pano_Hf = pano_H if pano_Hf<=0 else pano_H/pano_Hf\n pano_Wf = pano_W if pano_Wf<=0 else pano_W/pano_Wf\n fov_w = k_W * np.deg2rad(360./float(pano_Wf))\n focal = (float(k_W)/2) / np.tan(fov_w/2)\n c_x = 0\n c_y = 0\n\n u_r, v_r = u, v \n u_r, v_r = u_r-float(pano_W)/2.,v_r-float(pano_H)/2.\n phi, theta = u_r/(pano_W) * (np.pi) *2, -v_r/(pano_H) * (np.pi)\n\n ROT = rotation_matrix((0,1,0),phi)\n ROT = np.matmul(ROT,rotation_matrix((1,0,0),theta))#np.eye(3)\n\n h_range = np.array(range(k_H))\n w_range = np.array(range(k_W))\n w_ones = (np.ones(k_W))\n h_ones = (np.ones(k_H))\n h_grid = np.matmul(np.expand_dims(h_range,-1),np.expand_dims(w_ones,0))+0.5-float(k_H)/2\n w_grid = np.matmul(np.expand_dims(h_ones,-1),np.expand_dims(w_range,0))+0.5-float(k_W)/2\n \n K=np.array([[focal,0,c_x],[0,focal,c_y],[0.,0.,1.]])\n inv_K = np.linalg.inv(K)\n rays = np.stack([w_grid,h_grid,np.ones(h_grid.shape)],0)\n rays = np.matmul(inv_K,rays.reshape(3,k_H*k_W))\n rays /= np.linalg.norm(rays,axis=0,keepdims=True)\n rays = np.matmul(ROT,rays)\n rays=rays.reshape(3,k_H,k_W)\n \n phi = np.arctan2(rays[0,...],rays[2,...])\n theta = np.arcsin(np.clip(rays[1,...],-1,1))\n x = (pano_W)/(2.*np.pi)*phi +float(pano_W)/2.\n y = (pano_H)/(np.pi)*theta +float(pano_H)/2.\n \n roi_y = h_grid+v_r +float(pano_H)/2.\n roi_x = w_grid+u_r +float(pano_W)/2.\n\n new_roi_y = (y) \n new_roi_x = (x) \n\n offsets_x = (new_roi_x - roi_x)\n offsets_y = (new_roi_y - roi_y)\n\n return offsets_x, offsets_y", "title": "" }, { "docid": "83b81a21d8dd00d45d3762721c14f363", "score": "0.53298587", "text": "def __transform_xy(self, x, y):\n width, height = self.display_info['width'], self.display_info['height']\n # if self.display_info['orientation'] in [1, 3]:\n # width, height = height, width\n\n nx = x * self.max_x / width\n ny = y * self.max_y / height\n\n # print(nx, ny, self.max_x, self.max_y, width, height)\n\n return nx, ny", "title": "" }, { "docid": "269095af7de7ac07e849a5b014db8b01", "score": "0.53293175", "text": "def lonlat2px_rearr(img, lon, lat):\n\n w, h = img.size\n mw = w / 360.0\n mh = h / 180.0\n\n row = h / 2 - lat * mh\n\n if lon >= 0:\n col = 0 + lon * mw\n elif lon < 0:\n col = w + lon * mw\n\n return row, col", "title": "" }, { "docid": "4d90818f47f35880f0b83aa4d9f82883", "score": "0.5329141", "text": "def set_longitude_grid(self, degrees):\n # Skip -180 and 180, which are the fixed limits.\n grid = np.arange(-180 + degrees, 180, degrees)\n self.xaxis.set_major_locator(FixedLocator(np.deg2rad(grid)))\n self.xaxis.set_major_formatter(self.ThetaFormatter(degrees))", "title": "" }, { "docid": "2bdade5f40c5a8501586fd28cb3d169b", "score": "0.531967", "text": "def define_grid(x,y,hres,buffer,lbound,rbound,tbound,bbound):\n\n\n grid_x, grid_y = points.generate_grid(hres, points.get_boundary_coords(x, y),\n buffer)\n grid_x, grid_y = points.trim_grid(grid_x, grid_y, lbound, rbound, tbound, bbound)\n \n return (grid_x, grid_y)", "title": "" }, { "docid": "8ad744a390d60648f5572d992f5b20f5", "score": "0.531518", "text": "def regrid(anomaly, ref_cube):\n\n lat_bounds = anomaly.coord('latitude').bounds\n lat_diffs = numpy.apply_along_axis(lambda x: x[1] - x[0], 1, lat_bounds)\n anomaly_scaled = anomaly / lat_diffs\n\n ref_points = [('latitude', ref_cube.coord('latitude').points)]\n anomaly_regridded = anomaly_scaled.interpolate(ref_points, iris.analysis.Linear()) \n\n ref_lat_bounds = ref_cube.coord('latitude').bounds\n ref_lat_diffs = numpy.apply_along_axis(lambda x: x[1] - x[0], 1, ref_lat_bounds)\n new_anomaly = anomaly_regridded * ref_lat_diffs\n\n return new_anomaly", "title": "" }, { "docid": "877eeda03e93f5087052db5f370131fc", "score": "0.5306936", "text": "def ll2pixel(lon_ifg,lat_ifg,lon,lat):\n x_pts = list()\n y_pts = list()\n\n\n \n if np.isscalar(lon):\n if np.nanmean(lon_ifg) * lon <0:\n print('WARNING: you may need to subtract 360')\n \n a = abs(lat_ifg-lat)\n b = abs(lon_ifg-lon)\n c = a+b\n y,x = np.where(c==c.min()) # y is rows, x is columns\n \n if not np.isscalar(x):\n x=x[0];y=y[0]\n \n x_pts.append(x)\n y_pts.append(y)\n \n else:\n if np.nanmean(lon_ifg) * lon[0] <0:\n print('WARNING: you may need to subtract 360')\n for ii in np.arange(0,len(lat)):\n a = abs(lat_ifg-lat[ii])\n b = abs(lon_ifg-lon[ii])\n c = a+b\n y,x = np.where(c==c.min()) # y is rows, x is columns\n \n if not np.isscalar(x):\n x=x[0];y=y[0]\n \n x_pts.append(x)\n y_pts.append(y)\n return y_pts,x_pts", "title": "" }, { "docid": "f44917ebaac45be8c7d3a370abe28095", "score": "0.53035235", "text": "def to_grid(number):\n x = number % 8\n y = math.trunc(number / 8)\n return x, y", "title": "" }, { "docid": "fb48137a6752589d21bf7ec47aae8ee7", "score": "0.52939916", "text": "def make_xy(ur_row,ll_row,ll_col,ur_col,transform):\n rownums=np.arange(ur_row,ll_row)\n colnums=np.arange(ll_col,ur_col)\n xline=[]\n yline=[]\n for the_col in colnums:\n x,y = transform*(the_col,0)\n xline.append(x)\n for the_row in rownums:\n x,y= transform*(0,the_row)\n yline.append(y)\n xline,yline=np.array(xline),np.array(yline)\n xvals, yvals = np.meshgrid(xline,yline)\n return xvals,yvals", "title": "" }, { "docid": "fd41122bad06d56769fbc8a74486a040", "score": "0.5280526", "text": "def direction_grid_shape(self) -> Tuple[int, int, int, int]:\n\n return (self.latlng_slices, self.latlng_slices, self.pressure_slices,\n self.time_slices)", "title": "" }, { "docid": "2b1e65b8189f5c18a1e0d57428eb8f7b", "score": "0.5276462", "text": "def point_to_grid(point, grid):\n\torigin = grid.info.origin\n\tres = grid.info.resolution\n\tx = (point.x - origin.position.x) // res\n\ty = (point.y - origin.position.y) // res\n\treturn (int(x),int(y))", "title": "" }, { "docid": "ebb25b042b4552b6d649f24a6a092244", "score": "0.52662224", "text": "def grid_size_lon(self):\n return 8640", "title": "" }, { "docid": "66e68128681d39e831b413f050730dc9", "score": "0.52641684", "text": "def _get_rotated_grid(self, grid=None):\n if not grid: grid = self.grid\n size = self.size\n return [[grid[col][size-row-1] for col in range(size)] for row in \\\n range(size)]", "title": "" }, { "docid": "a39d54a8a3c3f2be573cc1326f7604df", "score": "0.52540016", "text": "def forward_mercator(lonlat):\n x = lonlat[0] * 20037508.34 / 180\n n = math.tan((90 + lonlat[1]) * math.pi / 360)\n if n <= 0:\n y = float(\"-inf\")\n else:\n y = math.log(n) / math.pi * 20037508.34\n return (x, y)", "title": "" }, { "docid": "0abcd531b2678f66ef56449dbe2e283b", "score": "0.52418274", "text": "def shiftgrid(lon0,datain,lonsin,start=True,cyclic=360.0):\n if np.fabs(lonsin[-1]-lonsin[0]-cyclic) > 1.e-4:\n # Use all data instead of raise ValueError, 'cyclic point not included'\n start_idx = 0\n else:\n # If cyclic, remove the duplicate point\n start_idx = 1\n if lon0 < lonsin[0] or lon0 > lonsin[-1]:\n raise ValueError('lon0 outside of range of lonsin')\n i0 = np.argmin(np.fabs(lonsin-lon0))\n i0_shift = len(lonsin)-i0\n if ma.isMA(datain):\n dataout = ma.zeros(datain.shape,datain.dtype)\n else:\n dataout = np.zeros(datain.shape,datain.dtype)\n if ma.isMA(lonsin):\n lonsout = ma.zeros(lonsin.shape,lonsin.dtype)\n else:\n lonsout = np.zeros(lonsin.shape,lonsin.dtype)\n if start:\n lonsout[0:i0_shift] = lonsin[i0:]\n else:\n lonsout[0:i0_shift] = lonsin[i0:]-cyclic\n dataout[...,0:i0_shift] = datain[...,i0:]\n if start:\n lonsout[i0_shift:] = lonsin[start_idx:i0+start_idx]+cyclic\n else:\n lonsout[i0_shift:] = lonsin[start_idx:i0+start_idx]\n dataout[...,i0_shift:] = datain[...,start_idx:i0+start_idx]\n return dataout,lonsout", "title": "" }, { "docid": "d1c8398a10a1c686ac471f002fc6724d", "score": "0.5229547", "text": "def make_grid(self):\n\n segmentation = [ ]\n for dec in range(-90, 90, int(self.entry_height.get())):\n segmentation.append(dec)\n\n dists = [ ]\n for dec in segmentation[1::]:\n d = self.__ra0ra1(float(self.entry_width.get()), float(self.entry_height.get()), float(self.entry_height.get()))\n dists.append(d)\n\n ra_grid = []\n for dist in dists:\n ra = (np.arange(0,360, dist))\n ra_grid.append(ra)\n\n dec_grid = segmentation[1::]\n\n dec_columns =[]\n merges = []\n\n #file grid of the sky\n f=open('GWsky_grid.dat','ab')\n\n for ra, dec in zip(ra_grid, dec_grid):\n \n dec_column = np.repeat(dec, len(ra), )\n \n dec_columns.append(dec_column)\n\n merge = list(zip(ra,dec_column))\n np.savetxt(f, np.c_[ra, dec_column])\n merges.append(merge)", "title": "" }, { "docid": "3be62837000bf8eb2dceab6016a50ffe", "score": "0.5215434", "text": "def loft_to_grid(base_line, dir, width):\n assert isinstance(dir, Direction)\n base_line = line(base_line)\n\n log.info(\"Lofting {0} to the {1} to align to the grid\"\n .format(to_string(base_line), dir))\n (p1, p2) = base_line.boundary\n if dir.is_cardinal():\n divisor = 10\n else:\n divisor = 5\n\n (p1, p2) = endpoints_by_direction(base_line, dir.rotate(90))\n\n if dir.vector[0] < 0:\n x1 = math.floor(p1.x / divisor) * divisor\n x2 = math.floor(p2.x / divisor) * divisor\n elif dir.vector[0] > 0:\n x1 = math.ceil(p1.x / divisor) * divisor\n x2 = math.ceil(p2.x / divisor) * divisor\n else:\n x1 = round(p1.x / divisor) * divisor\n x2 = round(p2.x / divisor) * divisor\n if dir.vector[1] < 0:\n y1 = math.floor(p1.y / divisor) * divisor\n y2 = math.floor(p2.y / divisor) * divisor\n elif dir.vector[1] > 0:\n y1 = math.ceil(p1.y / divisor) * divisor\n y2 = math.ceil(p2.y / divisor) * divisor\n else:\n y1 = round(p1.y / divisor) * divisor\n y2 = round(p2.y / divisor) * divisor\n p1 = point(x1, y1)\n p2 = point(x2, y2)\n\n candidate_1 = point_sweep(p1, dir.rotate(-90), width)\n candidate_2 = point_sweep(p2, dir.rotate(90), width)\n # TODO - intermediate possibilities?\n while (sweep(candidate_1, dir, 50)[0].crosses(base_line) or\n sweep(candidate_1, dir, 50)[0].contains(base_line)):\n candidate_1 = translate(candidate_1, dir, 10)\n while (sweep(candidate_2, dir, 50)[0].crosses(base_line) or\n sweep(candidate_2, dir, 50)[0].contains(base_line)):\n candidate_2 = translate(candidate_2, dir, 10)\n\n poly1 = loft(base_line, candidate_1)\n poly2 = loft(base_line, candidate_2)\n\n log.debug(\"First candidate: {0}, {1}\"\n .format(to_string(candidate_1), poly1.area))\n log.debug(\"Second candidate: {0}, {1}\"\n .format(to_string(candidate_2), poly2.area))\n\n if (poly1.area < poly2.area or (poly1.area == poly2.area and\n poly1.length < poly2.length)):\n candidate_line = candidate_1\n poly = poly1\n else:\n candidate_line = candidate_2\n poly = poly2\n\n log.info(\"New line is {0}\".format(to_string(candidate_line)))\n return (candidate_line, poly)", "title": "" }, { "docid": "d6687bbba94cdf3d559264f347b4a1a3", "score": "0.52151495", "text": "def weather_file_grid(weather_dataset):\n # longitude = np.load(Path('data') / 'longitudes.npy')\n # latitude = np.load(Path('data') / 'latitudes.npy')\n # weather_dataset = xr.open_dataset(weather_file)\n longitude = np.array(weather_dataset.longitude)\n latitude = np.array(weather_dataset.latitude)\n\n # Since we want a WGS84 grid, fold values >180 into range -180 to 0\n # longitude -= 180\n # To avoid making a mess when we then turn the points into grid squares need to re-order\n # the longitude so that it goes from min to max\n longitude[longitude > 180] -= 360\n # longitude = np.concatenate([longitude[longitude < 0], longitude[longitude >= 0]])\n longitude = np.sort(longitude)\n latitude = np.sort(latitude)\n\n return generate_polygon_points(longitude, latitude)", "title": "" }, { "docid": "4b793651cd097fe98dd384b4e81bc980", "score": "0.5207646", "text": "def coord_rot90(row: int, col: int, m: int, n: int, r: int = 1) -> (int, int):\n\tr %= 4\n\tif r == 0:\n\t\treturn row, col\n\tif r == 1:\n\t\treturn n - (col + 1), row\n\tif r == 2:\n\t\treturn m - (row + 1), n - (col + 1)\n\tif r == 3:\n\t\treturn col, m - (row + 1)", "title": "" }, { "docid": "e247a8b2a5b2429c0f4f03990689d59e", "score": "0.520418", "text": "def euler2rot(lon, lat, omega):\n###############################################################################\n\n import numpy as np\n\n wx=np.cos(np.radians(lat))*np.cos(np.radians(lon))*np.radians(omega)*1.0E-6\n wy=np.cos(np.radians(lat))*np.sin(np.radians(lon))*np.radians(omega)*1.0E-6\n wz=np.sin(np.radians(lat))*np.radians(omega)*1.0E-6\n\n return (wx,wy,wz)", "title": "" }, { "docid": "c926aca4792753c25d1de1ee907a2472", "score": "0.5202612", "text": "def x2lon(xx):\r\n\treturn xx*180.0/np.pi/r", "title": "" }, { "docid": "0786c11761ce79b89744ce9ed76f07bb", "score": "0.51994944", "text": "def to_grid(self, original_grid, mapping):\n self.rows=len(original_grid)\n self.cols=len(original_grid[0])\n grid = []\n for x in range(self.rows):\n line = []\n for y in range(self.cols):\n line += [mapping[x,y]]#\n grid.append(line)\n\n return grid", "title": "" }, { "docid": "47782d1e6ca900831a36347923975090", "score": "0.519501", "text": "def mk_map_grid(centre, shape, spacing) :\n\n dec = centre[1] + spacing*sp.arange(-(shape[1]-1.)/2., shape[1]/2.)\n ra = centre[0] + (spacing/sp.cos(centre[1]*sp.pi/180.) *\n sp.arange(-(shape[0]-1.)/2., shape[0]/2.))\n\n grid_ra, grid_dec = sp.meshgrid(ra, dec)\n\n return grid_ra, grid_dec", "title": "" }, { "docid": "ddd15dba6399ebf33f04cd8ed842c3c9", "score": "0.5185551", "text": "def transform(self, lon, lat, inverse=False):\n\n # calculate trig terms relating to longitude\n lonpole = self.lonpole\n\n if inverse:\n lonpole += 180.\n lon += self.polerotate\n\n dlonr = (lon - lonpole) * dtor\n cosdlonr = math.cos(dlonr)\n sindlonr = math.sin(dlonr)\n\n # likewise for latitude\n latr = lat * dtor\n coslatr = math.cos(latr)\n sinlatr = math.sin(latr)\n\n # now the main caluculation\n dlonrotr, latrotr = \\\n rotgrid_core(self.coslatpole, self.sinlatpole,\n cosdlonr, sindlonr,\n coslatr, sinlatr)\n\n lonrot = lonpole + dlonrotr * rtod\n latrot = latrotr * rtod\n\n if not inverse:\n lonrot -= self.polerotate\n\n # put lonrot back in range\n while lonrot < self.lonMin:\n lonrot += 360.\n\n while lonrot >= self.lonMax:\n lonrot -= 360.\n\n # print \"Transform returning (%s, %s)\" % (lonrot, latrot)\n return (lonrot, latrot)", "title": "" }, { "docid": "f4bcdb252e509ca6caa0d9e55b25617c", "score": "0.5182906", "text": "def rotateGrid(self, grid: List[List[int]], k: int) -> List[List[int]]:\n def extract_layer(topleft, h, w):\n ## Implementation01\n #L = [grid[topleft][topleft + j] for j in range(w)]\n #L += [grid[topleft + i][topleft + (w-1)] for j in range(1, h)]\n #L += [grid[topleft + (h-1)][topleft + j] for j in reversed(range(w-1))]\n #L += [grid[topleft + i][topleft] for i in reversed(range(1, h-1))]\n\n # Implementation02\n L = [grid[topleft][topleft + j] for j in range(w)]\n L += [grid[topleft + i][topleft + (w-1)] for i in range(h)][1:]\n L += [grid[topleft + (h-1)][topleft + j] for j in reversed(range(w))][1:]\n L += [grid[topleft + (h-1)][topleft + j] for j in reversed(range(w))][1:]\n L += [grid[topleft + i][topleft] for i in reversed(range(h))][1:-1]\n return L\n def reassign(topleft, h, w, L):\n ## top side\n for j in range(w):\n #grid[topleft][topleft + j] = L[j - topleft]\n grid[topleft][topleft + j] = L[j]\n ## right side\n for i in range(h):\n grid[topleft + i][topleft + (w-1)] = L[w-1+i]\n ## bottom side\n for j in reversed(range(w)):\n grid[topleft + (h-1)][topleft + j] = L[-(h-1)-j]\n ## left side\n for i in reversed(range(h)):\n grid[topleft + i][topleft] = L[-i]\n m = len(grid)\n n = len(grid[0])\n h = m\n w = n\n topleft = 0\n while h > 0 and w > 0:\n kmod = k % ((h+w)*2 - 4)\n L = extract_layer(topleft, h, w)\n if kmod != 0:\n L = L[kmod:] + L[:kmod]\n reassign(topleft, h, w, L)\n h -= 2\n w -= 2\n topleft += 1\n return grid", "title": "" }, { "docid": "07b4a7a221bd5327edf5a9fa3836226f", "score": "0.5181892", "text": "def make_basemap_xy(ur_row,ll_row,ll_col,ur_col,bmap,transform):\n xvals,yvals=make_xy(ur_row,ll_row,ll_col,ur_col,transform)\n xvals = xvals + bmap.projparams['x_0']\n yvals=yvals + bmap.projparams['y_0']\n return xvals,yvals", "title": "" }, { "docid": "8549f55ec970e8ec008544b226728193", "score": "0.5173286", "text": "def coordinate_grid(latmin, latmax, lonmin, lonmax, nlat, nlon):\n\n\t# Grouping boundaries\n\tmins, maxs = np.array([latmin,lonmin]),np.array([latmax,lonmax])\n\t# Creating meash grid\n\tx = np.linspace(latmin, latmax, nlat)\n\ty = np.linspace(lonmin, lonmax, nlon)\n\txv, yv = np.meshgrid(x, y)\n\n\tdelta = np.array([x[1]-x[0],y[1]-y[0]])\n\tposition = np.dstack((xv, yv))\n\treturn delta, position, mins, maxs", "title": "" }, { "docid": "3cf9b8398dde796dc4a8a2c60b8b254b", "score": "0.51634824", "text": "def __init__(self, ll_corner, ur_corner, res, geo_type=\"latlon\"):\n super().__init__(ll_corner=ll_corner, ur_corner=ur_corner, res=res, type=geo_type)\n if self.lats == [] or self.lons == []:\n raise GeoGridError(\"GeoGrid has not been rightly defined (empty lats/lons field)\")\n\n # Define geo transform used in raster computing (GDAL syntax)\n self._geo_transform = (self.ll_corner[1] - self.res/2, self.res, 0, self.ur_corner[0] + self.res/2, 0,\n -self.res)\n\n # Set lat and lon numpy arrays\n self._lat = np.asarray(self.lats)[::-1]\n self._lon = np.asarray(self.lons)\n\n # WARNING: sorry guys, but Geogrid implementation of num_x and num_y does not seem robust to me\n # Better to define num_x and num_y as length of lon and lat rather than doing some calculation that fails due\n # to float precision... (for really small resolutions though)\n self.num_x = len(self._lon)\n self.num_y = len(self._lat)\n\n # Compute lat/lon meshgrid of pixel centres\n self._longitude, self._latitude = np.meshgrid(self._lon, self._lat)", "title": "" }, { "docid": "10874cfbad459e2fdcac579adacaa9ca", "score": "0.5158219", "text": "def linear_scaling(self):\r\n # Generate grid\r\n x_step = (math.sqrt(3) / 2) * GRID_SIDE_LENGTH\r\n shortest_distance = self.r_b\r\n expand_ratio = 2 * GRID_SIDE_LENGTH / (2 + self.epsilon)\r\n self.grid_points = list()\r\n self.grid_x_size = int((2 * self.r_b * expand_ratio) / x_step)\r\n self.grid_y_size = int((2 * self.r_b * expand_ratio) / (3 * GRID_SIDE_LENGTH) * 2) + 1\r\n if self.grid_x_size < 5:\r\n self.grid_x_size = 5\r\n if self.grid_y_size < 4:\r\n self.grid_y_size = 4\r\n if self.grid_x_size % 2 != 1:\r\n self.grid_x_size += 1\r\n if self.grid_y_size % 2 != 0:\r\n self.grid_y_size += 1\r\n for i in range(self.grid_y_size):\r\n odd_even_1 = i % 2\r\n if i == 0:\r\n self.grid_points.append((- self.r_b * expand_ratio, -self.r_b * expand_ratio))\r\n else:\r\n self.grid_points.append((self.grid_points[-self.grid_x_size][0] + (\r\n 2 - odd_even_1) * GRID_SIDE_LENGTH, -self.r_b * expand_ratio))\r\n for j in range(1, self.grid_x_size):\r\n self.grid_points.append((self.grid_points[-j][0] + (j % 2) * (\r\n odd_even_1 - 0.5) * GRID_SIDE_LENGTH, self.grid_points[-j][1] + j * x_step))\r\n # add points\r\n for i in range(3):\r\n self.path_start.append(deepcopy(self.starts))\r\n self.path_goal.append(deepcopy(self.goals))\r\n for the_list in [self.path_start[1], self.path_goal[1]]:\r\n for i, _garbage in enumerate(the_list):\r\n the_list[i] = (the_list[i][0], (\r\n the_list[i][1][0] * expand_ratio, the_list[i][1][1] * expand_ratio))\r\n # Match vertices with robots\r\n self.start_match = dict()\r\n self.goal_match = dict()\r\n for robot in self.path_start[1]:\r\n shortest_distance = self.r_b\r\n for index2, point in enumerate(self.grid_points):\r\n if get_euclidean_dist(robot[1], point) < shortest_distance:\r\n self.start_match[robot[0]] = index2\r\n shortest_distance = get_euclidean_dist(robot[1], point)\r\n for index, _garbage in self.path_start[2]:\r\n self.path_start[2][index] = (self.path_start[2][index][0], self.grid_points[\r\n self.start_match[self.path_start[2][index][0]]])\r\n for robot in self.path_goal[1]:\r\n shortest_distance = self.r_b\r\n for index2, point in enumerate(self.grid_points):\r\n if get_euclidean_dist(robot[1], point) < shortest_distance:\r\n self.goal_match[robot[0]] = index2\r\n shortest_distance = get_euclidean_dist(robot[1], point)\r\n for index, _garbage in self.path_goal[0]:\r\n self.path_goal[0][index] = (self.path_goal[0][index][0], self.grid_points[\r\n self.goal_match[self.path_goal[0][index][0]]])", "title": "" }, { "docid": "c3bc9e1e47e9316249e53f467e24b781", "score": "0.5150925", "text": "def latlon_ticks(ax, lat_in=5, lon_in=5, in_crs={'init':'epsg:3857'}, fmt='%0.0f', grid=False):\n \n from pyproj import Proj, transform\n #ax.autoscale(enable=False)\n \n #Get input axes limits\n xlim = ax.get_xlim()\n ylim = ax.get_ylim()\n \n #Define input and output projections\n #Assume in_proj is CRS dictionary\n in_proj = Proj(in_crs)\n out_proj = Proj(init='epsg:4326')\n\n #Get lat/lon coord for lower left and upper right mapped coords\n ll = transform(in_proj, out_proj, xlim[0], ylim[0])\n lr = transform(in_proj, out_proj, xlim[1], ylim[0])\n ul = transform(in_proj, out_proj, xlim[0], ylim[1])\n ur = transform(in_proj, out_proj, xlim[1], ylim[1])\n \n clat = np.mean([ll[1],lr[1],ul[1],ur[1]])\n clon = np.mean([ll[0],lr[0],ul[0],ur[0]])\n \n bottom_clat = np.mean([ll[1],lr[1]])\n bottom_clon = np.mean([ll[0],lr[0]])\n top_clat = np.mean([ul[1],ur[1]])\n top_clon = np.mean([ul[0],ur[0]])\n left_clon = np.mean([ll[0],ul[0]])\n right_clon = np.mean([lr[0],ur[0]])\n \n #Get number of expected lat or lon intervals\n l_nx = np.floor((lr[0] - ll[0])/lon_in)\n u_nx = np.floor((ur[0] - ul[0])/lon_in)\n l_ny = np.floor((ul[1] - ll[1])/lat_in)\n r_ny = np.floor((ur[1] - lr[1])/lat_in)\n \n #Determine rounded lower left\n ll_r = np.zeros(2)\n ll_r[0] = np.ceil(ll[0]/lon_in) * lon_in \n ll_r[1] = np.ceil(ll[1]/lat_in) * lat_in \n ul_r = np.zeros(2)\n ul_r[0] = np.ceil(ul[0]/lon_in) * lon_in \n ul_r[1] = np.floor(ul[1]/lat_in) * lat_in \n lr_r = np.zeros(2)\n lr_r[0] = np.floor(lr[0]/lon_in) * lon_in \n lr_r[1] = np.ceil(lr[1]/lat_in) * lat_in \n ur_r = np.zeros(2)\n ur_r[0] = np.floor(ur[0]/lon_in) * lon_in \n ur_r[1] = np.floor(ur[1]/lat_in) * lat_in\n \n #Prepare lists of rounded coordinates at given intervals\n bottom_list = np.arange(ll_r[0], lr_r[0]+lon_in, lon_in)\n top_list = np.arange(ul_r[0], ur_r[0]+lon_in, lon_in)\n left_list = np.arange(ll_r[1], ul_r[1]+lat_in, lat_in)\n right_list = np.arange(lr_r[1], ur_r[1]+lat_in, lat_in)\n \n bottom_tick_loc_out = list(zip(bottom_list, np.repeat(bottom_clat, bottom_list.size)))\n top_tick_loc_out = list(zip(top_list, np.repeat(top_clat, top_list.size)))\n left_tick_loc_out = list(zip(np.repeat(left_clon, left_list.size), left_list))\n right_tick_loc_out = list(zip(np.repeat(right_clon, right_list.size), right_list))\n \n #Determine tick locations (in input crs) for the desired lat/lon coords\n bottom_tick_loc_init = np.array([transform(out_proj, in_proj, xy[0], xy[1])[0] for xy in bottom_tick_loc_out])\n top_tick_loc_init = np.array([transform(out_proj, in_proj, xy[0], xy[1])[0] for xy in top_tick_loc_out])\n left_tick_loc_init = np.array([transform(out_proj, in_proj, xy[0], xy[1])[1] for xy in left_tick_loc_out])\n right_tick_loc_init = np.array([transform(out_proj, in_proj, xy[0], xy[1])[1] for xy in right_tick_loc_out])\n \n verbose = False\n if verbose:\n print(bottom_list)\n print(bottom_tick_loc_out)\n print(left_list)\n print(left_tick_loc_out)\n \n #Set formatter\n #ax.xaxis.set_major_formatter(FormatStrFormatter(fmt))\n #ax.yaxis.set_major_formatter(FormatStrFormatter(fmt))\n \n #Prepare tick labels with desired format\n if True:\n #bottom_tick_labels = [fmt % x +'$^\\circ$E' for x in bottom_list]\n #top_tick_labels = [fmt % x +'$^\\circ$E' for x in top_list]\n #left_tick_labels = [fmt % y +'$^\\circ$N' for y in left_list]\n #right_tick_labels = [fmt % y +'$^\\circ$N' for y in right_list]\n bottom_tick_labels = [fmt % x for x in bottom_list]\n top_tick_labels = [fmt % x for x in top_list]\n left_tick_labels = [fmt % y for y in left_list]\n right_tick_labels = [fmt % y for y in right_list]\n else:\n bottom_tick_labels = bottom_list\n top_tick_labels = top_list\n left_tick_labels = left_list\n right_tick_labels = right_list\n \n #print(bottom_tick_labels)\n \n ax.set_xticks(bottom_tick_loc_init)\n ax.set_xticklabels(bottom_tick_labels, minor=False)\n ax.set_yticks(left_tick_loc_init)\n ax.set_yticklabels(left_tick_labels, minor=False)\n \n #m = lambda x: x * (top_list[-1] - top_list[0])/(top_tick_loc_init[-1] - top_tick_loc_init[0])\n #im = lambda x: x * (top_tick_loc_init[-1] - top_tick_loc_init[0])/(top_list[-1] - top_list[0])\n \n #ref_clon = bottom_clon\n #ref_clon = top_clon\n ref_clon = in_crs['lon_0']\n im = lambda lon: (-ref_clon + lon) * ((xlim[1] - xlim[0])/(ur[0] - ul[0]))\n m = lambda x: ref_clon + (x * ((ur[0] - ul[0])/(xlim[1] - xlim[0])))\n\n top=True\n right=False\n \n #This doesn't work, as it rescales data\n if True:\n #topax = ax.twiny()\n topax = ax.secondary_xaxis('top', functions=(m,im))\n #topax.set_aspect('equal')\n #topax.set_xlim(ax.get_xlim())\n #topax.set_ylim(ax.get_ylim())\n\n #topax.set_xticks(top_tick_loc_init)\n #print(topax.get_xticklabels())\n #print(top_tick_labels)\n #topax.set_xticklabels(top_tick_labels, minor=False)\n #topax.xaxis.set_major_formatter(FormatStrFormatter(fmt))\n #topax.xaxis.set_major_formatter(FormatStrFormatter(fmt +'$^\\circ$E'))\n #ax.set_xlim(*xlim)\n #ax.set_ylim(*ylim)\n \n if False:\n fig = ax.get_figure()\n topax = fig.add_axes(ax.get_position())\n topax.set_aspect('equal')\n topax.patch.set_visible(False)\n \n topax.xaxis.tick_top()\n topax.set_xticks(top_tick_loc_init)\n topax.set_xticklabels(top_tick_labels, minor=False)\n if right:\n topax.yaxis.tick_right()\n topax.set_yticks(right_tick_loc_init)\n topax.set_yticklabels(right_tick_labels, minor=False)\n else:\n topax.yaxis.set_visible(False)\n topax.set_xlim(ax.get_xlim())\n topax.set_ylim(ax.get_ylim())\n topax.set_title(ax.get_title())\n ax.set_title(None)\n \n #ax.set_xlabel('Longitude')\n #ax.set_ylabel('Latitude')\n ax.set_xlabel('Longitude'+'$^\\circ$E')\n ax.set_ylabel('Latitude'+'$^\\circ$N')\n\n if grid:\n ax.grid(ls=':')", "title": "" }, { "docid": "a2cf80a9d97bbeda4bd419a87b8556d9", "score": "0.51453215", "text": "def create_grid(data, drone_altitude, safety_distance):\n\n # minimum and maximum north coordinates\n north_min = np.floor(np.min(data[:, 0] - data[:, 3]))\n north_max = np.ceil(np.max(data[:, 0] + data[:, 3]))\n #print(north_min, north_max)\n\n # minimum and maximum east coordinates\n east_min = np.floor(np.min(data[:, 1] - data[:, 4]))\n east_max = np.ceil(np.max(data[:, 1] + data[:, 4]))\n #print(east_min, east_max)\n # given the minimum and maximum coordinates we can\n # calculate the size of the grid.\n north_size = int(np.ceil((north_max - north_min)))\n east_size = int(np.ceil((east_max - east_min)))\n #print(north_size, east_size)\n # Initialize an empty grid\n grid = np.zeros((north_size, east_size))\n # Center offset for grid\n north_min_center = np.min(data[:, 0])\n east_min_center = np.min(data[:, 1])\n # Populate the grid with obstacles\n for i in range(data.shape[0]):\n north, east, alt, d_north, d_east, d_alt = data[i, :]\n\n if alt + d_alt + safety_distance > drone_altitude:\n obstacle = [\n int(north - d_north - safety_distance - north_min_center),\n int(north + d_north + safety_distance - north_min_center),\n int(east - d_east - safety_distance - east_min_center),\n int(east + d_east + safety_distance - east_min_center),\n ]\n grid[obstacle[0]:obstacle[1], obstacle[2]:obstacle[3]] = 1\n\n return grid", "title": "" }, { "docid": "62f4c1fe37a2408854d26a666ea205cc", "score": "0.51408994", "text": "def _check_and_flip_ylat(self, ylat):\n # Check if ylat is in ascending order and include the equator\n if np.diff(ylat)[0] < 0:\n raise TypeError(\"ylat must be in ascending order\")\n if (ylat.size % 2 == 0) & (sum(ylat == 0.0) == 0):\n # Even grid\n self.need_latitude_interpolation = True\n self.ylat_no_equator = ylat\n self.ylat = np.linspace(-90., 90., ylat.size+1, endpoint=True)\n self.equator_idx = \\\n np.argwhere(self.ylat == 0)[0][0] + 1\n # Fortran indexing starts from 1\n elif sum(ylat == 0) == 1:\n # Odd grid\n self.need_latitude_interpolation = False\n self.ylat_no_equator = None\n self.ylat = ylat\n self.equator_idx = np.argwhere(ylat == 0)[0][0] + 1 # Fortran indexing starts from 1\n else:\n raise TypeError(\n \"There are more than 1 grid point with latitude 0.\"\n )\n self.clat = np.abs(np.cos(np.deg2rad(self.ylat)))", "title": "" }, { "docid": "a40301e3ff684203a2c823cb3a503db8", "score": "0.51378256", "text": "def transformed_bounds(self):", "title": "" }, { "docid": "c3345c7d2057cab8ffb5e51a83530968", "score": "0.51356477", "text": "def test_correct_num_row_col():\n geogr = \"\"\"\\\n WWWW\n WLLW\n WHLW\n WHHW\n WWWW\"\"\"\n geogr = textwrap.dedent(geogr)\n new_island = Island(geogr)\n new_island.make_map()\n assert (new_island.map_rows, new_island.map_columns) == (5, 4)", "title": "" }, { "docid": "f90f5905c243860a1b1a8660152dfa34", "score": "0.51271033", "text": "def bresenham(x_0, y_0, x_end, y_end):\n # Bresenham returns all the x-y coordinates of grids between (x_0, y_0) and (x_end, y_end)\n # The end points will post likely go outside of our grid map (keep in mind)\n dx = x_end - x_0\n dy = y_end - y_0\n\n # if the thetha is over 45 degrees we rotate the line (mirror the line)\n is_steep = abs(dx) < abs(dy)\n\n if is_steep:\n x_0, y_0 = y_0, x_0\n x_end, y_end = y_end, x_end\n\n # checks if dx is negative, if so rotates the line again => we always look at a line going in positive x direction\n # if dx is negative we change the direction of the \"arrow\"\n swapped = False\n if x_0 > x_end:\n x_0, x_end = x_end, x_0\n y_0, y_end = y_end, y_0\n swapped = True\n\n dx = x_end - x_0\n dy = y_end - y_0\n\n # initial value of error\n error = dx / 2\n\n if y_0 < y_end:\n y_step = 1\n else:\n y_step = -1\n\n # initial y-value to start point\n y = y_0\n # empty array of grid coordinates\n grids = []\n\n # iterates over x-coordinates (may be y-coordinates, if is_steep = true)\n # iterates over each x between x_0 and x_end\n # The error first get subtracted\n for x in range(x_0, x_end + 1):\n if is_steep:\n coord = (y, x)\n else:\n coord = (x, y)\n grids.append(coord)\n error -= abs(dy)\n if error < 0:\n y += y_step\n error += dx\n\n # reverse back list if they were reversed.\n if swapped:\n grids.reverse()\n\n return grids", "title": "" }, { "docid": "c47341a227ae3cc59323870ac5436ede", "score": "0.5122831", "text": "def grid_size_lon(self):\n return 1440", "title": "" }, { "docid": "aec341e4f7262430d76eaf87ecf6fd62", "score": "0.51166433", "text": "def grid_size_lat(self):\n return 4320", "title": "" }, { "docid": "975e7160e06ba5545b7988e40c98a3ea", "score": "0.5115936", "text": "def projection_of_3d_vertical_line(rpc, lon, lat, h_min=-200, h_max=3000, h_step=10):\n return [rpc.projection(lon, lat, h) for h in np.arange(h_min, h_max, h_step)]", "title": "" }, { "docid": "74e6401db76054884f82d87226910c95", "score": "0.51156735", "text": "def projection(lon, lat, use='hammer'):\n # TODO: Figure out why Aitoff is failing\n\n # Note that np.sinc is normalized (hence the division by pi)\n if use.lower() == 'hammer': # Hammer\n x = 2.0 ** 1.5 * np.cos(lat) * np.sin(lon / 2.0) / np.sqrt(1.0 + np.cos(lat) * np.cos(lon / 2.0))\n y = np.sqrt(2.0) * np.sin(lat) / np.sqrt(1.0 + np.cos(lat) * np.cos(lon / 2.0))\n else: # Aitoff, not yet working\n alpha_c = np.arccos(np.cos(lat) * np.cos(lon / 2.0))\n x = 2.0 * np.cos(lat) * np.sin(lon) / np.sinc(alpha_c / np.pi)\n y = np.sin(lat) / np.sinc(alpha_c / np.pi)\n return x, y", "title": "" }, { "docid": "0201e3d862dda9b23e81bcfff9d5ac88", "score": "0.5111539", "text": "def grid_map(self):\n #remember spat spec row col\n spatial_offset_400 = np.zeros_like(self.arrays['a'])\n spatial_offset_400[:,0] = 10\n\n spectral_offset_200 = np.zeros_like(self.arrays['b'])\n spectral_offset_200[:,1] = 20\n\n amap = self.arrays['a'] + spatial_offset_400\n bmap = self.arrays['b'] + spectral_offset_200\n cmap = self.arrays['c']\n full_map = np.concatenate((amap,bmap,cmap))\n mce_grid = ma.zeros((33,24,2),dtype=int) \n mce_grid[:] = ma.masked\n # print(mce_grid)\n mce_grid[full_map[:,2],full_map[:,3]] = full_map[:,0:2]\n\n return mce_grid", "title": "" }, { "docid": "b3f7f8c8cff4c848f593e9737bd7e8b6", "score": "0.51081353", "text": "def latlon_to_rowcol(lat, lon, rsc_data):\n start_lon = rsc_data[\"x_first\"]\n start_lat = rsc_data[\"y_first\"]\n lon_step, lat_step = rsc_data[\"x_step\"], rsc_data[\"y_step\"]\n row = (lat - start_lat) / lat_step\n col = (lon - start_lon) / lon_step\n return int(round(row)), int(round(col))", "title": "" }, { "docid": "5a0187f77caa6959dce0c181ad31367f", "score": "0.5106306", "text": "def calc_grid(pyom):\n aloc = np.zeros((pyom.nx,pyom.ny))\n dxt_gl = np.zeros(pyom.nx+4)\n dxu_gl = np.zeros(pyom.nx+4)\n xt_gl = np.zeros(pyom.nx+4)\n xu_gl = np.zeros(pyom.nx+4)\n dyt_gl = np.zeros(pyom.ny+4)\n dyu_gl = np.zeros(pyom.ny+4)\n yt_gl = np.zeros(pyom.ny+4)\n yu_gl = np.zeros(pyom.ny+4)\n\n \"\"\"\n transfer from locally defined variables to global ones\n \"\"\"\n aloc[:,0] = pyom.dxt[2:-2]\n\n dxt_gl[2:-2] = aloc[:,0]\n\n if pyom.enable_cyclic_x:\n dxt_gl[pyom.nx+2:pyom.nx+4] = dxt_gl[2:4]\n dxt_gl[:2] = dxt_gl[pyom.nx:-2]\n else:\n dxt_gl[pyom.nx+2:pyom.nx+4] = dxt_gl[pyom.nx+1]\n dxt_gl[:2] = dxt_gl[2]\n\n aloc[0,:] = pyom.dyt[2:-2]\n dyt_gl[2:-2] = aloc[0, :]\n\n dyt_gl[pyom.ny+2:pyom.ny+4] = dyt_gl[pyom.ny+1]\n dyt_gl[:2] = dyt_gl[2]\n\n \"\"\"\n grid in east/west direction\n \"\"\"\n u_centered_grid(pyom, dxt_gl, dxu_gl, xt_gl, xu_gl)\n xt_gl += pyom.x_origin - xu_gl[2]\n xu_gl += pyom.x_origin - xu_gl[2]\n\n if pyom.enable_cyclic_x:\n xt_gl[pyom.nx+2:pyom.nx+4] = xt_gl[2:4]\n xt_gl[:2] = xt_gl[pyom.nx:-2]\n xu_gl[pyom.nx+2:pyom.nx+4] = xt_gl[2:4]\n xu_gl[:2] = xu_gl[pyom.nx:-2]\n dxu_gl[pyom.nx+2:pyom.nx+4] = dxu_gl[2:4]\n dxu_gl[:2] = dxu_gl[pyom.nx:-2]\n\n \"\"\"\n grid in north/south direction\n \"\"\"\n u_centered_grid(pyom, dyt_gl, dyu_gl, yt_gl, yu_gl)\n yt_gl += pyom.y_origin - yu_gl[2]\n yu_gl += pyom.y_origin - yu_gl[2]\n\n if pyom.coord_degree:\n \"\"\"\n convert from degrees to pseudo cartesian grid\n \"\"\"\n dxt_gl *= pyom.degtom\n dxu_gl *= pyom.degtom\n dyt_gl *= pyom.degtom\n dyu_gl *= pyom.degtom\n\n \"\"\"\n transfer to locally defined variables\n \"\"\"\n pyom.xt[:] = xt_gl[:]\n pyom.xu[:] = xu_gl[:]\n pyom.dxu[:] = dxu_gl[:]\n pyom.dxt[:] = dxt_gl[:]\n\n pyom.yt[:] = yt_gl[:]\n pyom.yu[:] = yu_gl[:]\n pyom.dyu[:] = dyu_gl[:]\n pyom.dyt[:] = dyt_gl[:]\n\n \"\"\"\n grid in vertical direction\n \"\"\"\n u_centered_grid(pyom, pyom.dzt, pyom.dzw, pyom.zt, pyom.zw)\n pyom.zt -= pyom.zw[-1]\n pyom.zw -= pyom.zw[-1] # zero at zw(nz)\n\n \"\"\"\n metric factors\n \"\"\"\n if pyom.coord_degree:\n pyom.cost[...] = np.cos(pyom.yt * pyom.pi / 180.)\n pyom.cosu[...] = np.cos(pyom.yu * pyom.pi / 180.)\n pyom.tantr[...] = np.tan(pyom.yt * pyom.pi / 180.) / pyom.radius\n else:\n pyom.cost[...] = 1.0\n pyom.cosu[...] = 1.0\n pyom.tantr[...] = 0.0\n\n \"\"\"\n precalculate area of boxes\n \"\"\"\n pyom.area_t[...] = pyom.cost * pyom.dyt * pyom.dxt[:, np.newaxis]\n pyom.area_u[...] = pyom.cost * pyom.dyt * pyom.dxu[:, np.newaxis]\n pyom.area_v[...] = pyom.cosu * pyom.dyu * pyom.dxt[:, np.newaxis]", "title": "" }, { "docid": "a400ec0eba81a365e795a8f1e56dce72", "score": "0.51057416", "text": "def build_cell_width_lat_lon(self):\n km = 1000.0\n\n params = ct.default_params\n\n # QU 120 background mesh and enhanced Atlantic (30km)\n params[\"mesh_type\"] = \"QU\"\n params[\"dx_max_global\"] = 120.0 * km\n params[\"region_box\"] = ct.Atlantic\n params[\"restrict_box\"] = ct.Atlantic_restrict\n params[\"plot_box\"] = ct.Western_Atlantic\n params[\"dx_min_coastal\"] = 30.0 * km\n params[\"trans_width\"] = 5000.0 * km\n params[\"trans_start\"] = 500.0 * km\n\n cell_width, lon, lat = ct.coastal_refined_mesh(params)\n\n # Northeast refinement (10km)\n params[\"region_box\"] = ct.Delaware_Bay\n params[\"plot_box\"] = ct.Western_Atlantic\n params[\"dx_min_coastal\"] = 10.0 * km\n params[\"trans_width\"] = 600.0 * km\n params[\"trans_start\"] = 400.0 * km\n\n cell_width, lon, lat = ct.coastal_refined_mesh(\n params, cell_width, lon, lat)\n\n # Delaware regional refinement (6km)\n params[\"region_box\"] = ct.Delaware_Region\n params[\"plot_box\"] = ct.Delaware\n params[\"dx_min_coastal\"] = 5.0 * km\n params[\"trans_width\"] = 175.0 * km\n params[\"trans_start\"] = 75.0 * km\n\n cell_width, lon, lat = ct.coastal_refined_mesh(\n params, cell_width, lon, lat)\n\n # Delaware Bay high-resolution (2km)\n params[\"region_box\"] = ct.Delaware_Bay\n params[\"plot_box\"] = ct.Delaware\n params[\"restrict_box\"] = ct.Delaware_restrict\n params[\"dx_min_coastal\"] = 2.0 * km\n params[\"trans_width\"] = 100.0 * km\n params[\"trans_start\"] = 17.0 * km\n\n cell_width, lon, lat = ct.coastal_refined_mesh(\n params, cell_width, lon, lat)\n\n return cell_width / 1000, lon, lat", "title": "" }, { "docid": "0e19d461b52d9f6fa9e3d65045f96c57", "score": "0.51046884", "text": "def test_ij2xy(self):\n latlon_grid = LatLonGrid(0.0001)\n lon_should = -71.9667\n lat_should = -1.0444\n tile = latlon_grid.GL.tilesys.create_tile(lon=-71.3456, lat=-1.5432)\n lon, lat = tile.ij2xy(333, 444)\n nptest.assert_allclose(lon_should, lon)\n nptest.assert_allclose(lat_should, lat)", "title": "" }, { "docid": "b5c45c09a0cab70e09065f0bb9f16850", "score": "0.510244", "text": "def set_roi(reg_dict, col_start, col_end, row_start, row_end, mlx75027):\n\n if mlx75027:\n col_max = 640\n row_max = 480\n else:\n col_max = 320\n row_max = 240\n\n # Check input data is correct\n if col_start < 1 or col_start > col_max:\n raise RuntimeError(\"Column start must be between 0 and 640\")\n if col_end < 0 or col_end > col_max:\n raise RuntimeError(\"Column end must be between 0 and 640\")\n if col_start >= col_end:\n raise RuntimeError(\"The column start must less than the column end\")\n if row_start < 1 or row_start > row_max:\n raise RuntimeError(\"The row start must be between 0 and 482\")\n if row_end < 0 or row_end > row_max:\n raise RuntimeError(\"The row end must be between 0 and 482\")\n if row_start >= row_end:\n raise RuntimeError(\"The row start must be less than the row end\")\n\n # As per section 7.19. Y1 should be uneven while Y2 is even, where y is the rows\n if not (row_start & 0x01):\n warnings.warn(\"Row start is even, it should be odd!\", RuntimeWarning)\n if row_end & 0x01:\n warnings.warn(\"Row end is odd, it should be even!\", RuntimeWarning)\n\n reg_dict[\"ROI_COL_START_HI\"][2] = int(col_start) >> 8\n reg_dict[\"ROI_COL_START_LOW\"][2] = int(col_start) & 0xFF\n\n reg_dict[\"ROI_COL_WIDTH_HI\"][2] = int(col_end-col_start+1) >> 8\n reg_dict[\"ROI_COL_WIDTH_LOW\"][2] = int(col_end-col_start+1) & 0xFF\n\n reg_dict[\"ROI_ROW_START_LOW\"][2] = (int(row_start-1) >> 1) & 0xFF\n reg_dict[\"ROI_ROW_START_HI\"][2] = (int(row_start-1) >> 1) >> 8\n\n reg_dict[\"ROI_ROW_END_LOW\"][2] = ((int(row_end) >> 1)+1) & 0xFF\n reg_dict[\"ROI_ROW_END_HI\"][2] = ((int(row_end) >> 1)+1) >> 8\n return", "title": "" }, { "docid": "a0b9a5ca916a62134f54f584450812fc", "score": "0.51008576", "text": "def make_grid(input_val):\n\n coord = [0, 0]\n grid = {tuple(coord): 1}\n level = 1\n val = 1\n while val <= input_val:\n # move to new grid\n coord[0] += 1\n val = calc_val(grid, tuple(coord))\n grid[tuple(coord)] = val\n if val > input_val:\n return grid\n\n # southeast to northeast\n while coord[1] < level:\n coord[1] += 1\n val = calc_val(grid, tuple(coord))\n grid[tuple(coord)] = val\n if val > input_val:\n return grid\n\n # northeast to northwest\n while coord[0] > -level:\n coord[0] -= 1\n val = calc_val(grid, tuple(coord))\n grid[tuple(coord)] = val\n if val > input_val:\n return grid\n\n # northwest to southwest\n while coord[1] > -level:\n coord[1] -= 1\n val = calc_val(grid, tuple(coord))\n grid[tuple(coord)] = val\n if val > input_val:\n return grid\n\n # southwest to southeast\n while coord[0] < level:\n coord[0] += 1\n val = calc_val(grid, tuple(coord))\n grid[tuple(coord)] = val\n if val > input_val:\n return grid\n\n level += 1\n\n return grid", "title": "" }, { "docid": "2e0186bfe3c7bdc3c89365fba5138fb7", "score": "0.50995994", "text": "def GenLat(Input):\n\trad= ((Input['nypix'][1]*Input['binsz'][1])/2.0) + (6./2.0)\n\tbu = (Input['glat'][1] + rad)\n\tbd = (Input['glat'][1] - rad)\n\n#\treturn np.linspace(bd,bu,int(Input['nxpix'][1]+ (10./Input['binsz'][1])))\n\treturn np.linspace(bd,bu,int(2*rad/Input['binsz'][1]))", "title": "" }, { "docid": "c7b3093d0a6fac2eb928a213ed2b542f", "score": "0.5090077", "text": "def cursor_to_grid(grid, y, x):\n def translate_pos(y, x):\n return (y+1, (2*x) + 1)\n\n min_y, min_x = translate_pos(0, 0)\n max_y, max_x = translate_pos(*grid.shape)\n max_y -= 1\n max_x -= 2\n new_y, new_x = translate_pos(y, x)\n\n # Adjusted for minimum possible values\n new_y, new_x = (max(new_y, min_y), max(new_x, min_x))\n # Adjusted for maximum possible values\n new_y, new_x = (min(new_y, max_y), min(new_x, max_x))\n return (new_y, new_x)", "title": "" }, { "docid": "61cbf063b09cefd48e8c59defa3da423", "score": "0.5089609", "text": "def _lng_to_gridsquare(lng):\n lng = lng + 180\n field, lng = divmod(lng, 20)\n square, lng = divmod(lng, 2)\n subsq = (lng * 12)\n return (string.ascii_uppercase[int(field)], int(\n square), string.ascii_lowercase[int(subsq)])", "title": "" }, { "docid": "9502d246692b7c0d87d3dc7eaae3efa3", "score": "0.5085438", "text": "def rescale(self):\n self.rate_lat = 1.5 # self.difference_lat#/self.height_map\n self.rate_lng = 1.5 # self.difference_lng#/self.height_map", "title": "" }, { "docid": "bc74f289853fe432232e00a9edd1003c", "score": "0.50809866", "text": "def test_gpi2rowcol_custom_gpis(self):\n self.custom_gpi_grid = grids.BasicGrid(self.lon.flatten(),\n self.lat.flatten(),\n shape=(len(self.latdim),\n len(self.londim)),\n gpis=np.arange(len(self.lat.flatten()))[::-1])\n gpi = [200, 255]\n row_should = [70, 70]\n column_should = [87, 32]\n row, column = self.custom_gpi_grid.gpi2rowcol(gpi)\n assert np.all(row == row_should)\n assert np.all(column == column_should)", "title": "" }, { "docid": "8175b82982c02c5fb97129dc7b1607dd", "score": "0.50709283", "text": "def reconfig(self):\n h = int(self.cget('height'))\n w = int(self.cget('width'))\n #print 'h,w are', h, w\n self.width = w\n self.height = h\n self.tfm.setPixelRotationCenter(self.width/2,self.height/2) \n self.redraw()", "title": "" }, { "docid": "7e8d44e7999985d4afa01c1364108e21", "score": "0.5068085", "text": "def lon_lat_to_xy(self, (longitude, latitude)):\n x = longitude / 180.0\n y = latitude / -90.0\n\n x = ((x + 1.0) / 2.0) * self.map_x_limit\n y = ((y + 1.0) / 2.0) * self.map_y_limit\n\n x = max(min(x, self.map_x_limit), 0)\n y = max(min(y, self.map_y_limit), 0)\n\n return x, y", "title": "" }, { "docid": "f90274b58623ccb6b741544df9da1605", "score": "0.506774", "text": "def to_gridsquare(latitude, longitude):\n if not (-180 <= latitude <= 180):\n raise ValueError(\"Invalid latitude specified.\")\n if not (-180 <= longitude <= 180):\n raise ValueError(\"Invalid longitude specified.\")\n lat = _lat_to_gridsquare(latitude)\n lng = _lng_to_gridsquare(longitude)\n return \"\".join([str(x) + str(y) for x, y in zip(lng, lat)])", "title": "" }, { "docid": "8547ed469a4a1b783bbedf0843571747", "score": "0.5062534", "text": "def px2lonlat_rearr(img, lon_px, lat_px):\n\n w, h = img.size\n mw = w / 360.0\n mh = h / 180.0\n\n lon_new = np.zeros(len(lon_px))\n lat_new = np.zeros(len(lat_px))\n\n for i in range(0, len(lon_px)): # lon [-180, 179.999]\n if lon_px[i] >= w / 2: # west\n lon_new[i] = -(w - lon_px[i]) / mw\n elif lon_px[i] < w / 2: # east\n lon_new[i] = lon_px[i] / mw\n\n for i in range(0, len(lat_px)):\n lat_new[i] = -(lat_px[i] - h / 2) / mh\n\n return lon_new, lat_new", "title": "" }, { "docid": "6eb84fde7a204ddaac3cd6ffcdfd7cab", "score": "0.5061665", "text": "def xy_subsamp_grid():\n\n x = np.arange(104)*32\n y = np.arange(104)*24\n\n xgrid, ygrid = np.meshgrid(x, y)\n\n return xgrid, ygrid", "title": "" }, { "docid": "1538a9d654a711f2a4a7dd2eaf728fea", "score": "0.50542074", "text": "def test_latlng_to_rowcol(self):\n\n these_row_indices, these_column_indices = radar_utils.latlng_to_rowcol(\n GRID_POINT_LATITUDES_DEG, GRID_POINT_LONGITUDES_DEG,\n nw_grid_point_lat_deg=NW_GRID_POINT_LAT_DEG,\n nw_grid_point_lng_deg=NW_GRID_POINT_LNG_DEG,\n lat_spacing_deg=LAT_SPACING_DEG, lng_spacing_deg=LNG_SPACING_DEG)\n\n self.assertTrue(numpy.allclose(\n these_row_indices, GRID_ROW_INDICES, atol=TOLERANCE\n ))\n self.assertTrue(numpy.allclose(\n these_column_indices, GRID_COLUMN_INDICES, atol=TOLERANCE\n ))", "title": "" }, { "docid": "4c4a350b1499253dcb9cbd41e8fbbc79", "score": "0.5054102", "text": "def make_coordinate_grid(spatial_size, type): # 256,256 -> 256, 256, 2 (-1, 1)\n h, w = spatial_size\n x = torch.arange(w).type(type)\n y = torch.arange(h).type(type)\n\n x = (2 * (x / (w - 1)) - 1)\n y = (2 * (y / (h - 1)) - 1)\n\n yy = y.view(-1, 1).repeat(1, w)\n xx = x.view(1, -1).repeat(h, 1)\n\n meshed = torch.cat([xx.unsqueeze_(2), yy.unsqueeze_(2)], 2)\n\n return meshed", "title": "" }, { "docid": "fb7c7672d1d20a6d823c9e9e1b6616a5", "score": "0.50384736", "text": "def toroidal_step(self):\n next_grid = self.grid.copy()\n for y in range(self.size):\n for x in range(self.size):\n if (y-1<0 and x-1<0):\n miniarreglo = np.vstack([\n np.hstack([self.grid[self.size-1:self.size+1,self.size-1:self.size+1],self.grid[self.size-1:self.size+1,x:x+2]]),\n np.hstack([self.grid[y:y+2,self.size-1:self.size+1],self.grid[y:y+2,x:x+2]])\n ])\n elif (y+2>self.size and x+2>self.size):\n miniarreglo = np.vstack([\n np.hstack([self.grid[y-1:y+1,x-1:x+1],self.grid[y-1:y+1,0:1]]),\n np.hstack([self.grid[0:1,x-1:x+1],self.grid[0:1,0:1]])\n ])\n elif (y-1<0 and x+2>self.size):\n miniarreglo = np.vstack([\n np.hstack([self.grid[self.size-1:self.size+1,x-1:x+1],self.grid[self.size-1:self.size+1,0:1]]),\n np.hstack([self.grid[y:y+2,x-1:x+1],self.grid[y:y+2,0:1]])\n ])\n elif (x-1<0 and y+2>self.size):\n miniarreglo = np.vstack([\n np.hstack([self.grid[y-1:y+1,self.size-1:self.size+1],self.grid[y-1:y+1,x:x+2]]),\n np.hstack([self.grid[0:1,self.size-1:self.size+1],self.grid[0:1,x:x+2]])\n ])\n elif y-1<0:\n miniarreglo = np.vstack([\n self.grid[self.size-1:self.size+1,x-1:x+2],\n self.grid[y:y+2,x-1:x+2]\n ])\n elif x-1<0:\n miniarreglo = np.hstack([\n self.grid[y-1:y+2,self.size-1:self.size+1],\n self.grid[y-1:y+2,x:x+2]\n ])\n elif y+2>self.size:\n miniarreglo = np.vstack([\n self.grid[y-1:y+1,x-1:x+2],\n self.grid[0:1,x-1:x+2]\n ])\n elif x+2>self.size:\n miniarreglo = np.hstack([\n self.grid[y-1:y+2,x-1:x+1],\n self.grid[y-1:y+2,0:1]\n ])\n else:\n miniarreglo = self.grid[y-1:y+2,x-1:x+2]\n celulas_vivas = contador(miniarreglo)\n if self.grid[y, x] == 1:\n celulas_vivas += -1\n if (celulas_vivas==2 or celulas_vivas==3):\n pass\n else:\n next_grid[y, x] = 0\n elif self.grid[y, x] == 0:\n if celulas_vivas==3:\n next_grid[y, x] = 1\n else:\n pass\n # print(f'({y},{x}): {celulas_vivas}')\n # print(miniarreglo,end='\\n\\n')\n self.grid = next_grid.copy()\n self.iterations += 1\n self.live_cells = contador(self.grid)", "title": "" }, { "docid": "206d6ef14f2cd07985eeed76b7b04692", "score": "0.50376445", "text": "def gcarToLcar(X, Y, Z, lat, lon, rho , inverse=False):\n # First get global cartesian coordinates of local origin\n (goX, goY, goZ) = gspToGcar(lat, lon, rho)\n \n if not inverse:\n # Translate global position to local origin\n tx = X - goX\n ty = Y - goY\n tz = Z - goZ\n # Then, rotate about global-Z to get local-X pointing eastward\n rot = -np.radians(lon + 90.0)\n sx = tx * np.cos(rot) - ty * np.sin(rot)\n sy = tx * np.sin(rot) + ty * np.cos(rot)\n sz = tz\n # Finally, rotate about X axis to align Z with upward direction\n rot = -np.radians(90.0 - lat)\n xOut = sx\n yOut = sy * np.cos(rot) - sz * np.sin(rot)\n zOut = sy * np.sin(rot) + sz * np.cos(rot)\n else:\n # First rotate about X axis to align Z with Earth rotational axis\n # direction\n rot = np.radians(90.0 - lat)\n sx = X\n sy = Y * np.cos(rot) - Z * np.sin(rot)\n sz = Y * np.sin(rot) + Z * np.cos(rot)\n # Rotate about global-Z to get global-X pointing to the prime meridian\n rot = np.radians(lon + 90.)\n xOut = sx * np.cos(rot) - sy * np.sin(rot)\n yOut = sx * np.sin(rot) + sy * np.cos(rot)\n zOut = sz\n # Finally, translate local position to global origin\n xOut = xOut + goX\n yOut = yOut + goY\n zOut = zOut + goZ\n \n return xOut, yOut, zOut", "title": "" }, { "docid": "07be32c3c61d0c28e34c797c8402a086", "score": "0.5035815", "text": "def GenLon(Input):\n\trad= ((Input['nxpix'][1]*Input['binsz'][1])/2.0) + (6./2.0)\n\tlu = (Input['glon'][1] + rad)%360\n\tld = (Input['glon'][1] - rad)%360\n\tdf = int(lu/Input['binsz'][1])\n\n\tsz = int(2*rad/Input['binsz'][1])\n\tif ( (lu - ld) < 0 ):\n\t\tdf = (lu - 0.)/Input['binsz'][1]\n\t\tl = np.append( np.linspace(lu,0.,df),np.linspace((360-Input['binsz'][1]),ld,int(sz-df)))\n\telse:\n\t\tl = np.linspace(lu,ld,int(2*rad/Input['binsz'][1]))\n\n\treturn l", "title": "" }, { "docid": "39c6d87d2789778d50052f9c1cc9687f", "score": "0.50357103", "text": "def fix_global_grid(\n lon: np.ndarray, fld_time: np.ndarray, lon_axis: int = -1\n ) -> None:\n # Check longitude dimension index\n if (\n (lon_axis < 0 and -lon_axis > len(fld_time.shape))\n or (lon_axis >= 0 and lon_axis >= len(fld_time.shape))\n or fld_time.shape[lon_axis] != lon.size\n ):\n raise ValueError(\n f\"invalid idx_lon {lon_axis}\"\n f\" (fld_time.shape={fld_time.shape}, lon.size={lon.size})\"\n )\n\n # Check longitudinal range\n if lon.max() - lon.min() > 360.0:\n raise ValueError(\n f\"longitutinal range too large: {lon.max() - lon.min()} > 360\"\n )\n\n # Check that longitude is evenly spaced and seamless across date line\n dlons_raw = np.r_[lon, lon[0]] - np.r_[lon[-1], lon]\n dlons = np.abs(np.stack([dlons_raw, 360 - np.abs(dlons_raw)])).min(axis=0)\n if np.unique(dlons).size > 1:\n raise ValueError(\n f\"longitude not evenly spaced/seamless: {np.unique(dlons).tolist()}\"\n )\n dlon = next(iter(dlons))\n\n # Shift the grid\n if lon[-1] > 180.0:\n # Eastward shift\n n_shift = 0\n while lon[-1] > 180.0:\n n_shift += 1\n lon[:] = np.r_[lon[0] - dlon, lon[:-1]]\n if lon[0] < -180.0 or n_shift >= lon.size:\n raise Exception(\n f\"unexpected error while shifting lon eastward by {n_shift}\"\n )\n idcs = np.arange(fld_time.shape[lon_axis] - 1)\n fld_time[:] = np.concatenate(\n [\n np.take(fld_time, [-1], lon_axis),\n np.take(fld_time, idcs, lon_axis),\n ],\n axis=lon_axis,\n )\n log(wrn=f\"fix global data: shift eastward by {n_shift} * {dlon} deg\")\n return\n\n elif lon[0] < -180.0:\n # Westward shift\n n_shift = 0\n while lon[0] < -180.0:\n n_shift += 1\n lon[:] = np.r_[lon[1:], lon[-1] + dlon]\n if lon[-1] < -180.0 or n_shift >= lon.size:\n raise Exception(\n f\"unexpected error while shifting lon eastward by {n_shift}\"\n )\n idcs = np.arange(1, fld_time.shape[lon_axis])\n fld_time[:] = np.concatenate(\n [\n np.take(fld_time, idcs, lon_axis),\n np.take(fld_time, [0], lon_axis),\n ],\n axis=lon_axis,\n )\n log(wrn=f\"fix global data: shift westward by {n_shift} * {dlon} deg\")\n return", "title": "" }, { "docid": "92bb8263c33ed70aa9f6d755e814f52b", "score": "0.5029375", "text": "def to_screen_rel((x, y), calib):\n\t(ax, bx, cx), (ay, by, cy) = calib\n\treturn max(0., min(1., x*ax + y*bx + cx)), max(0., min(1., x*ay + y*by + cy))", "title": "" }, { "docid": "19c257b387b6ee819d5e98e4c0e07660", "score": "0.5026273", "text": "def wrap_roi_360(cds, minlon, maxlon, topind, height):\n elon, minlon, maxlon = ch.lon360(np.array([cds.elon, minlon, maxlon]))\n\n # The # of pixels roi to the right of elon\n rightwidth = ch.deg2pix(maxlon - elon, cds.xres)\n rightind = 0\n # The # of pixels left of the elon\n leftwidth = ch.deg2pix(elon - minlon, cds.xres)\n leftind = cds.width - leftwidth\n # Read left and right ROIs and concatenate around elon\n w_left = rio.windows.Window(leftind, topind, leftwidth, height)\n w_right = rio.windows.Window(rightind, topind, rightwidth, height)\n left_roi = cds.read(1, window=w_left)\n right_roi = cds.read(1, window=w_right)\n return np.concatenate((left_roi, right_roi), axis=1)", "title": "" }, { "docid": "710a98e614ebf4d805aeb6ede37935de", "score": "0.5020517", "text": "def coordinate_axes(isl):\n x = [0, 0]\n y = [0, 0]\n dist1 = gps_dist(isl[0][0], isl[0][1], isl[1][0], isl[1][1])\n dist2 = gps_dist(isl[0][0], isl[0][1], isl[2][0], isl[2][1])\n\n origin = isl[0]\n\n x[0] = (isl[1][0] - isl[0][0]) / dist1\n x[1] = (isl[1][1] - isl[0][1]) / dist1\n\n y[0] = (isl[2][0] - isl[0][0]) / dist2\n y[1] = (isl[2][1] - isl[0][1]) / dist2\n\n return(origin, x, y, dist1)", "title": "" }, { "docid": "5a0c51dead1e3765fdd41703ec681e96", "score": "0.5015644", "text": "def baseline_renormalize(self, params):\n if self.units != 'fAm':\n print(\"Warning, no dipole renormalization done because units\"\n \" were in %s\" % (self.units))\n return\n\n N_pyr_x = params['N_pyr_x']\n N_pyr_y = params['N_pyr_y']\n # N_pyr cells in grid. This is PER LAYER\n N_pyr = N_pyr_x * N_pyr_y\n # dipole offset calculation: increasing number of pyr\n # cells (L2 and L5, simultaneously)\n # with no inputs resulted in an aggregate dipole over the\n # interval [50., 1000.] ms that\n # eventually plateaus at -48 fAm. The range over this interval\n # is something like 3 fAm\n # so the resultant correction is here, per dipole\n # dpl_offset = N_pyr * 50.207\n dpl_offset = {\n # these values will be subtracted\n 'L2': N_pyr * 0.0443,\n 'L5': N_pyr * -49.0502\n # 'L5': N_pyr * -48.3642,\n # will be calculated next, this is a placeholder\n # 'agg': None,\n }\n # L2 dipole offset can be roughly baseline shifted over\n # the entire range of t\n self.dpl['L2'] -= dpl_offset['L2']\n # L5 dipole offset should be different for interval [50., 500.]\n # and then it can be offset\n # slope (m) and intercept (b) params for L5 dipole offset\n # uncorrected for N_cells\n # these values were fit over the range [37., 750.)\n m = 3.4770508e-3\n b = -51.231085\n # these values were fit over the range [750., 5000]\n t1 = 750.\n m1 = 1.01e-4\n b1 = -48.412078\n # piecewise normalization\n self.dpl['L5'][self.t <= 37.] -= dpl_offset['L5']\n self.dpl['L5'][(self.t > 37.) & (self.t < t1)] -= N_pyr * \\\n (m * self.t[(self.t > 37.) & (self.t < t1)] + b)\n self.dpl['L5'][self.t >= t1] -= N_pyr * \\\n (m1 * self.t[self.t >= t1] + b1)\n # recalculate the aggregate dipole based on the baseline\n # normalized ones\n self.dpl['agg'] = self.dpl['L2'] + self.dpl['L5']", "title": "" }, { "docid": "273c8b3fe4de9d2e8b9c9ba00fccfcec", "score": "0.5008003", "text": "def compress_data(self):\n print 'Arranging data to map latitude/longitude boundaries...'\n dummy, mask_lat, value_range = \\\n eumeltools.mask_values(self.lat,\n self.lat_range)\n if self.lat_range is None:\n self.lat_range = value_range\n\n dummy, mask_lon, value_range = \\\n eumeltools.mask_values(self.lon,\n self.lon_range)\n if self.lon_range is None:\n self.lon_range = value_range\n\n mask = numpy.logical_and(mask_lon,mask_lat)\n\n self.lat = numpy.ravel(self.lat)\n self.lat = numpy.compress(mask==1,self.lat)\n self.lon = numpy.ravel(self.lon)\n self.lon = numpy.compress(mask==1,self.lon)\n self.data = numpy.ravel(self.data)\n self.data = numpy.compress(mask==1,self.data)\n\n # If map grid resolution has not been computed yet, do it.\n if self.lat_map_resolution is None or self.lat_map_resolution is None:\n self.compute_mapgrid_resolution()\n\n # Map input data on regular grid.\n print 'Mapping data to regular grid...'\n lat_reg = numpy.linspace(self.lat_range[0], self.lat_range[1],\n self.lat_map_resolution)\n lon_reg = numpy.linspace(self.lon_range[0], self.lon_range[1],\n self.lon_map_resolution)\n self.data = griddata(self.lon, self.lat, self.data, \n lon_reg,lat_reg)\n self.lon, self.lat = numpy.meshgrid(lon_reg, lat_reg)", "title": "" }, { "docid": "52d7a16d0b21b5b7e4a68495e4d168c4", "score": "0.50059986", "text": "def pix_indeg(self):\n [cd1_1,cd1_2],[cd2_1,cd2_2] = self.wcs.cd if hasattr(self.wcs,\"cd\") else self.pixel_scale_matrix\n pxl = np.sqrt(cd1_1**2+cd2_1**2),np.sqrt(cd1_2**2+cd2_2**2)\n \n if (pxl[0]-pxl[1])/pxl[0] < 1e-2 : # equal to 1%\n return pxl[0] * units.Unit(self.wcs.cunit[0]).in_units(\"degree\") * units.degree\n \n return pxl[0] * units.Unit(self.wcs.cunit[0]).in_units(\"degree\")* units.degree,\\\n pxl[1] * units.Unit(self.wcs.cunit[1]).in_units(\"degree\")* units.degree", "title": "" }, { "docid": "5157e9fba4325bde33ddd67a44fb6bd4", "score": "0.50022644", "text": "def to_grid(self, mapping):\n return list(reversed([[mapping.get((x,y), None)\n for x in range(self.cols)]\n for y in range(self.rows)]))", "title": "" }, { "docid": "5157e9fba4325bde33ddd67a44fb6bd4", "score": "0.50022644", "text": "def to_grid(self, mapping):\n return list(reversed([[mapping.get((x,y), None)\n for x in range(self.cols)]\n for y in range(self.rows)]))", "title": "" }, { "docid": "7d82b5ca97ceb94fe1dd6b525eaf42a8", "score": "0.50022566", "text": "def grid_expand(cls, im_gi, out, per, shape, x, y):\n gxapi_cy.WrapIMU._grid_expand(GXContext._get_tls_geo(), im_gi, out.encode(), per, shape, x, y)", "title": "" }, { "docid": "35bd5dd4effcf7c7131c30143c14be52", "score": "0.50004935", "text": "def _fraunhofer_coord_scale(x, y, z, wavelength):\n\n f_x, f_y = map(lambda item: fftshift(fftfreq(len(item), dx=(item[1] - item[0]))), (x, y))\n x_new, y_new = wavelength*z*f_x, wavelength*z*f_y\n\n return x_new, y_new", "title": "" }, { "docid": "4a94f5346563f3bf1fe5023caa50067f", "score": "0.49953708", "text": "def update_map(self, l_scans, r_pose):\n sx, sy, _ = self.world_to_grid(r_pose[0], r_pose[1], r_pose[2])\n\n # extract all the x and y coordinates of scan end-points\n lxs = l_scans[0]\n lys = l_scans[1]\n lts = np.zeros(l_scans.shape)\n\n # convert all the coordinates into grid coordinates\n # exs, eys, _ = self._v_world_to_grid(lxs, lys, lts)\n ans = self._v_world_to_grid(lxs, lys, lts)\n exs = ans[0]\n eys = ans[1]\n ezs = ans[2]\n # print(\"Printing values now!!\")\n # # print(l_scans.shape, lxs.shape, lys.shape)\n # # print(len(ans))\n # # print(exs.shape)\n # # print(eys.shape)\n # # print(ezs.shape)\n # # print(exs[0][:11])\n # # print(exs[0][500:510])\n # # print(exs[0][:-10])\n # print(eys[0][:11])\n # print(eys[0][500:510])\n # print(eys[0][:-10])\n # print(eys[1][:11])\n # print(eys[1][500:510])\n # print(eys[1][:-10])\n # print(ezs[0][:11])\n # print(ezs[0][500:510])\n # print(ezs[0][:-10])\n # print(\"printed values!!\")\n # n = l_scans.shape[1]\n # print(l_scans.shape)\n # chk_arr_x = []\n # chk_arr_y = []\n\n # for i in range(n):\n # x, y = l_scans[:, i]\n # ex, ey, _ = self.world_to_grid(x, y, 0)\n # chk_arr_x.append(ex)\n # chk_arr_y.append(ey)\n # chk_arr_x = np.array(chk_arr_x)\n # chk_arr_y = np.array(chk_arr_y)\n # assert(abs(np.sum(exs - chk_arr_x)) <= 1e-5)\n # assert(abs(np.sum(eys - chk_arr_y)) <= 1e-5)\n\n # call vectorized bresenham function to update the corresponding cell log odds\n self._v_bres(sx, sy, exs[0], eys[0])", "title": "" }, { "docid": "f5574593a49c688864cd467a15ccf10c", "score": "0.49952668", "text": "def cutout_range(self, rac, decc, xw, yw, units='pixels', coordsys='galactic'):\n head = self.header\n wcs = pywcs.WCS(head)\n if units == 'wcs':\n if coordsys == 'celestial' and wcs.wcs.lngtyp == 'GLON':\n rac, decc = coords.Position((rac, decc), system=coordsys).galactic()\n elif coordsys == 'galactic' and wcs.wcs.lngtyp == 'RA':\n rac, decc = coords.Position((rac, decc), system=coordsys).j2000()\n else:\n raise ValueError(\"problem with wcs instance.\")\n xx, yy = wcs.all_world2pix(rac, decc, 0)\n xx = int(xx)\n yy = int(yy)\n print('the center of the image is at pixel coordinates %f, %f.' % (xx, yy))\n if units == 'pixels':\n xmin, xmax = np.max([0, xx - xw]), np.min([self.naxis1, xx + xw])\n ymin, ymax = np.max([0, yy - yw]), np.min([self.naxis2, yy + yw])\n elif units == 'arcseconds':\n cd1, cd2 = self.pixel_size\n xmin, xmax = np.max([0, xx - xw / np.abs(cd1)]), np.min([self.naxis1, xx + xw / np.abs(cd1)])\n ymin, ymax = np.max([0, yy - yw / np.abs(cd2)]), np.min([self.naxis2, yy + yw / np.abs(cd2)])\n else:\n raise Exception(\"Can't use units %s.\" % units)\n if xmax < 0 or ymax < 0:\n raise ValueError(\"Max Coordinate is outside of map: %f,%f.\" % (xmax, ymax))\n if ymin >= head.get('NAXIS2') or xmin >= head.get('NAXIS1'):\n raise ValueError(\"Min Coordinate is outside of map: %f,%f.\" % (xmin, ymin))\n return xmin, xmax, ymin, ymax", "title": "" }, { "docid": "528b7c60d6d663f8ba6daf91319ed817", "score": "0.49882075", "text": "def main():\n\n gridgap = 50\n \n with open(\"ROI_corners.dat\",'r') as f:\n ROI_corners = f.read().splitlines()\n \n grid_TLX = float(ROI_corners[0].split()[1])\n grid_TLY = float(ROI_corners[1].split()[1])\n grid_BRX = float(ROI_corners[2].split()[1])\n grid_BRY = float(ROI_corners[3].split()[1])\n \n nof_gridcols = math.ceil((grid_BRX - grid_TLX)/gridgap) - 1.0\n nof_gridrows = math.ceil((grid_BRY - grid_TLY)/gridgap) - 1.0\n \n grid_TLX_new = (grid_TLX+grid_BRX)/2.0 - nof_gridcols*gridgap/2.0\n grid_TLY_new = (grid_TLY+grid_BRY)/2.0 - nof_gridrows*gridgap/2.0\n grid_BRX_new = (grid_TLX+grid_BRX)/2.0 + nof_gridcols*gridgap/2.0\n grid_BRY_new = (grid_TLY+grid_BRY)/2.0 + nof_gridrows*gridgap/2.0\n \n gridx_vec = np.linspace(grid_TLX_new,grid_BRX_new,nof_gridcols+1)\n gridy_vec = np.linspace(grid_TLY_new,grid_BRY_new,nof_gridrows+1)\n \n gridx,gridy = np.meshgrid(gridx_vec,gridy_vec)\n \n np.savetxt('gridx.dat',gridx,delimiter='\\t')\n np.savetxt('gridy.dat',gridy,delimiter='\\t')", "title": "" }, { "docid": "579954a69ca5e5aa159926d7d12caf0e", "score": "0.4987001", "text": "def wrap_to_180(lon):\n lon[lon > 180] -= 360.0\n\n return lon", "title": "" }, { "docid": "f4cbbc95c613540101bab1b5ff175426", "score": "0.4984583", "text": "def set_longitude_grid_ends(self, degrees):\n self._longitude_cap = np.deg2rad(degrees)\n self._xaxis_pretransform \\\n .clear() \\\n .scale(1.0, self._longitude_cap * 2.0) \\\n .translate(0.0, -self._longitude_cap)", "title": "" }, { "docid": "9b4f64257412cbf0be11f6dcd970d8b3", "score": "0.49842414", "text": "def test_lonlat2d(self):\n assert np.all(self.lon == self.grid.lon2d)\n assert np.all(self.lat == self.grid.lat2d)", "title": "" } ]
f72c141c9093906b6ba328d63eff93cc
Gets all liked YouTube videos and saves artist and track info for any songs
[ { "docid": "68f3cd4e4e039c39a125a5c8ac98ea53", "score": "0.7771531", "text": "def get_liked_videos(self):\n max_results = 50\n request = self.youtube_client.videos().list(\n part=\"snippet,contentDetails,statistics\",\n maxResults=max_results,\n myRating=\"like\"\n )\n response = request.execute()\n self.store_video_tracks_and_artists(response)\n num_pages_processed = 1\n print(str(max_results) + \" YouTube likes processed\")\n next_page_token = response[\"nextPageToken\"]\n while next_page_token is not None:\n request = self.youtube_client.videos().list(\n part=\"snippet,contentDetails,statistics\",\n maxResults=max_results,\n myRating=\"like\",\n pageToken=next_page_token\n )\n response = request.execute()\n self.store_video_tracks_and_artists(response)\n num_pages_processed += 1\n videos_processed = num_pages_processed * max_results\n print(str(videos_processed) + \" YouTube likes processed\")\n if \"nextPageToken\" in response:\n next_page_token = response[\"nextPageToken\"]\n else:\n break", "title": "" } ]
[ { "docid": "0630591a3e4269d584fd7dc8e64b81a9", "score": "0.82091135", "text": "def get_liked_videos(self):\n #Requisita o token atualizado para recuperar os likeds videos\n youtube_token=self.get_token_youtube()\n query = \"https://www.googleapis.com/youtube/v3/videos?part=snippet%2CcontentDetails%2Cstatistics&maxResults=10&myRating=like&key={}\".format(\n api_key_youtube)\n response = requests.get(\n url=query,\n headers={\n \"Content-Type\": \"application/json\",\n \"Authorization\": \"Bearer {}\".format(youtube_token)\n }\n )\n response_json = response.json()\n if response.status_code != 200 and response.status_code != 201:\n raise ResponseException(response.status_code)\n else:\n # collect each video and get important information\n for item in response_json[\"items\"]:\n video_title = item[\"snippet\"][\"title\"]\n youtube_url = \"https://www.youtube.com/watch?v={}\".format(\n item[\"id\"])\n\n # use youtube_dl to collect the song name & artist name\n video = youtube_dl.YoutubeDL({}).extract_info(\n youtube_url, download=False)\n song_name = video[\"track\"]\n artist = video[\"artist\"]\n print(song_name)\n if song_name is not None and artist is not None:\n # save all important info and skip any missing song and artist\n self.all_song_info[video_title] = {\n \"youtube_url\": youtube_url,\n \"song_name\": song_name,\n \"artist\": artist,\n\n # add the uri, easy to get song to put into playlist\n \"spotify_uri\": self.get_spotify_uri(song_name, artist)\n\n }", "title": "" }, { "docid": "823075f148e28cd67c6b5012d12e441d", "score": "0.6375012", "text": "def songs_youtube_get(self):\n retval = list(self.ps.songs.youtube_ids)\n return(jsonify(retval), status.OK)", "title": "" }, { "docid": "224ae5f80d06b7d9726619be2f0648ec", "score": "0.62498575", "text": "def get_more_youtube_data():\n engine = get_storage_engine()\n\n youtube_keys = app.YOUTUBE_TO_MYSQL\n\n youtube_handler = YoutubeDataHandler(\n engine=engine,\n token=app.YOUTUBE_TOKEN,\n youtube_keys=youtube_keys\n )\n\n youtube_handler.get_more_data(\n output_dir=app.YOUTUBE_OUTPUT_FOLDER,\n limit=app.YOUTUBE_VIDEO_LIMIT,\n itunes_keys=app.ITUNES_TO_MYSQL,\n update=app.YOUTUBE_UPDATE_ENTRIES\n )", "title": "" }, { "docid": "7390686deb5fa13a6f28af02b734d3f5", "score": "0.6196759", "text": "def get_recommendations(self, video_id):\n self._logger.display()\n print('Getting recommendations for video ' + video_id)\n if video_id in self._scrapped_videos:\n print('Video id ' + video_id + ' is already in the database, reusing it.')\n # This video was seen, returning recommendations that we stored\n return self._scrapped_videos[video_id]['recommendations']\n\n\n # Else, we scrap the video:\n\n url = \"https://www.youtube.com/watch?v=\" + video_id\n\n # Until we succeed, try to access the video page:\n while True:\n try:\n html = urlopen(url)\n break\n except Exception as e:\n print(repr(e))\n self._logger.info('We had to wait because an error in scrapping from youtube' + repr(e))\n time.sleep(1)\n self.soup = BeautifulSoup(html, \"lxml\")\n\n # Getting views\n views = -1\n likes = -1\n dislikes = -1\n duration = -1\n pubdate = ''\n channel = ''\n channel_id = ''\n recos = []\n title = ''\n keywords = []\n\n # UPDATED SCRAPPER\n for title_elem in self.soup.findAll('meta', {'name': 'title'}):\n title = title_elem['content']\n\n for desc_elem in self.soup.findAll('meta', {'name': 'description'}):\n description = desc_elem['content']\n\n for upload_elem in self.soup.findAll('meta', {'itemprop': 'uploadDate'}):\n pubdate = upload_elem['content']\n now = datetime.datetime.now()\n month_ago = now - dateutil.relativedelta.relativedelta(months=1)\n month_ago_string = month_ago.strftime('%Y-%m-%d')\n if pubdate < month_ago_string and self._skip_older_videos:\n print('*******')\n print('WARNING THE VIDEO ' + video_id + ' WAS PUBLISHED MORE THAN A MONTH AGO, WE ARE SKIPPING IT ' + pubdate)\n print('******* ')\n print('')\n self._logger.info('Channel skipped because it did not publish in a month')\n return []\n\n for keywords_elem in self.soup.findAll('meta', {'name': 'keywords'}):\n keywords = keywords_elem['content'].split(', ')\n\n try:\n duration_pattern = re.compile(r'approxDurationMs.....(\\d+)')\n duration_text = duration_pattern.findall(repr(self.soup))\n duration = int(int(duration_text[0])/1000)\n except:\n self._logger.info('Scrapping duration not found')\n print('WARNING: scrapping: duration not found')\n\n pattern = re.compile(r'ytInitialData(.*?\\});')\n try:\n v = json.loads(pattern.findall(self.soup.text)[0][5:])\n except:\n try:\n v = json.loads('{\"' + pattern.findall(self.soup.text)[0][5:])\n except:\n print('ERROR WITH JSON:')\n print('SKIPPING VIDEO')\n self._logger.warning('Video skipped because badly formated json')\n print(pattern.findall(self.soup.text))\n # print(pattern.findall(self.soup.text)[0][5:])\n print('END ERROR')\n return []\n\n # print('PATTERN FOUND')\n # for key in v.keys():\n # print(key + ' :')\n # if type(v[key]) is str or type(v[key]) is list:\n # print(' ' + repr(v[key]))\n # else:\n # for subkey in v[key].keys():\n # print(' ' + subkey + ' :')\n # print(' ' + repr(v[key][subkey]))\n # print('\\n\\n\\n')\n\n try:\n recos.append(v['contents']['twoColumnWatchNextResults']['secondaryResults']['secondaryResults']['results'][0]['compactAutoplayRenderer']['contents'][0]['compactVideoRenderer']['videoId'])\n except:\n self._logger.warning('COULD NOT scrap the first recommendation')\n print('WARNING COULD NOT scrap the first recommendation')\n\n for i in range(1, 20):\n try:\n recos.append(v['contents']['twoColumnWatchNextResults']['secondaryResults']['secondaryResults']['results'][i]['compactVideoRenderer']['videoId'])\n except:\n self._logger.info('One reco could not be found')\n print('DEBUG: one reco could not be found')\n\n try:\n primary_renderer = self.find_primary_renderer(v)\n try:\n view_text = primary_renderer['viewCount']['videoViewCountRenderer']['viewCount']['simpleText']\n views = self.extract_number_or_default(view_text)\n except:\n self._logger.info('Viewcount not found')\n print('WARNING: viewcount not found in ' + repr(primary_renderer))\n\n try:\n likes_text = primary_renderer['videoActions']['menuRenderer']['topLevelButtons'][0]['toggleButtonRenderer']['defaultText']['accessibility']['accessibilityData']['label']\n dislikes_text = primary_renderer['videoActions']['menuRenderer']['topLevelButtons'][1]['toggleButtonRenderer']['defaultText']['accessibility']['accessibilityData']['label']\n likes = self.extract_number_or_default(likes_text)\n dislikes = self.extract_number_or_default(dislikes_text)\n except:\n self._logger.info('could not get likes and/or dislikes')\n print('WARNING: could not get likes and/or dislikes')\n except:\n print('ERROR: Primary renderer not found!!')\n self._logger.info('Primary renderer not found, so no likes/dislikes info')\n\n try:\n channel = v['contents']['twoColumnWatchNextResults']['results']['results']['contents'][1]['videoSecondaryInfoRenderer']['owner']['videoOwnerRenderer']['title']['runs'][0]['text']\n except:\n channel = ''\n print('WARNING channel not found in scrapper')\n self._logger.warning('Channel not found in scrapper')\n\n try:\n channel_id = v['contents']['twoColumnWatchNextResults']['results']['results']['contents'][1]['videoSecondaryInfoRenderer']['owner']['videoOwnerRenderer']['title']['runs'][0]['navigationEndpoint']['browseEndpoint']['browseId']\n except:\n channel_id = ''\n print('WARNING channel ID not found in scrapper')\n self._logger.warning('Channel ID not found in scrapper')\n\n if video_id not in self._scrapped_videos:\n self._scrapped_videos[video_id] = {\n 'views': views,\n 'likes': likes,\n 'dislikes': dislikes,\n 'recommendations': recos,\n 'title': title,\n 'id': video_id,\n 'channel': channel,\n 'pubdate': pubdate,\n 'duration': duration,\n 'scrapDate': time.strftime('%Y%m%d-%H%M%S'),\n 'channel_id': channel_id,\n 'description': description,\n 'keywords': keywords}\n print('Video scrapped: ' + repr(self._scrapped_videos[video_id]))\n\n video = self._scrapped_videos[video_id]\n try:\n print(video_id + ': ' + video['title'] + ' [' + channel + ']' + str(video['views']) + ' views and ' + repr(len(video['recommendations'])) + ' recommendations')\n except:\n print('Scrapped vide with special chars ' + video_id)\n return recos", "title": "" }, { "docid": "3edd8adc5f00b6733d6f5be538763b64", "score": "0.6166248", "text": "def youtube(self, irc, msg, args, url):\n\n self._ytinfo(irc, url, True)", "title": "" }, { "docid": "601424cc5567aa52627782e3af720453", "score": "0.6053161", "text": "def start_video_id_scrape(self):\n\n print('Scraping %d youtube songs' % len(self.video_ids))\n\n for idx, video_id in enumerate(self.video_ids):\n song = YouTubeSong(video_id=video_id)\n\n try:\n song.print_header(idx)\n song.obtain_comments(self.min_num_comments, self.min_num_comments, self.filter_list)\n self.add(song)\n\n except:\n # TODO: catch 403 error because it probably means i ran out of YouTube requests quota\n print(' EXCEPTION occurred with this piece, ignoring')\n self.add_exception()", "title": "" }, { "docid": "3aadc1ec2dee01a191506c7d2298ee04", "score": "0.5995638", "text": "def extract_playlistdata(self):\n url_prepend = \"https://www.youtube.com/watch?v=\"\n url_base = \"https://www.youtube.com\"\n if not self._is_connection_possible():\n logger.warning(\"Cannot play playlist. No connection detected!\")\n return \"N/A\", []\n r = requests.get(self.URL)\n soup = BeautifulSoup(r.text, \"html.parser\")\n name = soup.findAll(\"h1\", attrs={\"class\": \"pl-header-title\"})\n self.extract_name(name)\n # soup = soup.findAll('tr', attrs={'class': 'pl-video',\n # 'class': 'yt-uix-tile'})\n logger.debug(len(soup))\n\n # use regex to get video url\n # this seems rigid against <div> changes\n # so, far this works\n links = soup.find_all(\n \"a\", href=re.compile(r\".*watch.*\") # this regex can be improved in future\n )\n for link in links:\n href = link[\"href\"]\n title = link.contents[0]\n # If the link is not a video from playlist, there will be no\n # 'index' substring. Hence, we can skip this\n if \"index\" not in href:\n continue\n # Just to make sure the title is not empty. This is done because\n # there is always a first link that contains 'index', yet does not\n # have a title. This represents the meta-link: a link to playlist\n # itself.\n title = title.strip()\n if not title:\n continue\n # Get video url using simple algorithm. This 3 index search is done\n # just to make sure when youtube playlist url has these query\n # params in shuffled order.\n slicer = self._get_url_slicer(href)\n url = url_base + href[:slicer]\n # Check if the video is deleted. Some videos in playlist turn out\n # to be deleted videos. We can put a check for that by checking\n # if the title is [Deleted video]\n # We have a simpler way to check for deleted videos\n if title.lower()[1:-1] in self._DELETED:\n logger.debug(title.lower()[1:-1])\n logger.info(\"Skipping {}: DELETED/BLOCKED/PRIVATE video.\".format(url))\n continue\n\n if not self._check_valid(url):\n continue\n\n self.list_content_tuple.append(YoutubeMetadata(url, title))\n\n if len(self.list_content_tuple) == 0:\n logger.warning(\n \"Are you sure you have videos in your playlist? Try changing\\\n privacy to public.\"\n )\n\n self.strip_to_start_end()", "title": "" }, { "docid": "507f68a46c58c0d5903378ce8d5c4e56", "score": "0.5944153", "text": "def start_echonest_scrape(self):\n songs = hh.get_all_files(self.basedir)\n if self.limit:\n songs = songs[:self.limit]\n self.limit = len(songs)\n print('Scraping %d youtube songs' % len(songs))\n\n for idx, song_loc in enumerate(songs):\n h5 = hh.open_h5_file_read(song_loc)\n artist = hh.get_artist_name(h5).decode('utf-8')\n title = hh.get_title(h5).decode('utf-8')\n song = YouTubeSong(artist, title)\n\n try:\n song.print_header(idx)\n song.obtain_comments(self.min_num_comments, self.min_num_comments, self.filter_list)\n self.add(song)\n\n except:\n # TODO: catch 403 error because it probably means i ran out of YouTube requests quota\n print(' EXCEPTION occurred with this piece, ignoring')\n self.add_exception()\n\n h5.close()", "title": "" }, { "docid": "a975de50b91bb39b6780063839982d42", "score": "0.5832586", "text": "def generate_youtube_url(raw_song, meta_tags, tries_remaining=5):\n # prevents an infinite loop but allows for a few retries\n if tries_remaining == 0:\n log.debug('No tries left. I quit.')\n return\n\n query = { 'part' : 'snippet',\n 'maxResults' : 50,\n 'type' : 'video' }\n\n if const.args.music_videos_only:\n query['videoCategoryId'] = '10'\n\n if not meta_tags:\n song = raw_song\n query['q'] = song\n else:\n song = '{0} - {1}'.format(meta_tags['artists'][0]['name'],\n meta_tags['name'])\n query['q'] = song\n log.debug('query: {0}'.format(query))\n\n data = pafy.call_gdata('search', query)\n query_results = {'part': 'contentDetails,snippet,statistics',\n 'maxResults': 50,\n 'id': ','.join(i['id']['videoId'] for i in data['items'])}\n log.debug('query_results: {0}'.format(query_results))\n\n vdata = pafy.call_gdata('videos', query_results)\n\n videos = []\n for x in vdata['items']:\n duration_s = pafy.playlist.parseISO8591(x['contentDetails']['duration'])\n youtubedetails = {'link': x['id'], 'title': x['snippet']['title'],\n 'videotime':internals.videotime_from_seconds(duration_s),\n 'seconds': duration_s}\n videos.append(youtubedetails)\n if not meta_tags:\n break\n\n if not videos:\n return None\n\n if const.args.manual:\n log.info(song)\n log.info('0. Skip downloading this song.\\n')\n # fetch all video links on first page on YouTube\n for i, v in enumerate(videos):\n log.info(u'{0}. {1} {2} {3}'.format(i+1, v['title'], v['videotime'],\n \"http://youtube.com/watch?v=\"+v['link']))\n # let user select the song to download\n result = internals.input_link(videos)\n if not result:\n return None\n else:\n if not meta_tags:\n # if the metadata could not be acquired, take the first result\n # from Youtube because the proper song length is unknown\n result = videos[0]\n log.debug('Since no metadata found on Spotify, going with the first result')\n else:\n # filter out videos that do not have a similar length to the Spotify song\n duration_tolerance = 10\n max_duration_tolerance = 20\n possible_videos_by_duration = list()\n\n '''\n start with a reasonable duration_tolerance, and increment duration_tolerance\n until one of the Youtube results falls within the correct duration or\n the duration_tolerance has reached the max_duration_tolerance\n '''\n while len(possible_videos_by_duration) == 0:\n possible_videos_by_duration = list(filter(lambda x: abs(x['seconds'] - meta_tags['duration']) <= duration_tolerance, videos))\n duration_tolerance += 1\n if duration_tolerance > max_duration_tolerance:\n log.error(\"{0} by {1} was not found.\\n\".format(meta_tags['name'], meta_tags['artists'][0]['name']))\n return None\n\n result = possible_videos_by_duration[0]\n\n if result:\n url = \"http://youtube.com/watch?v=\" + result['link']\n else:\n url = None\n\n return url", "title": "" }, { "docid": "5230e0d0471671c98a99e59212a5715a", "score": "0.57287616", "text": "async def youtube(self, ctx: commands.Context, *, query: str):\n await ctx.invoke(self.connect)\n await self.play_track(ctx, query, \"yt\")", "title": "" }, { "docid": "9d8e53e9f83d26a1d884639c4bc243d2", "score": "0.57256216", "text": "def lookup_youtube(search): \n apiResult = None\n url = \"https://youtube-search1.p.rapidapi.com/\" + search\n headers = {\n 'x-rapidapi-key': \"3d2fbbd54bmshbbccc15fd8196c7p11c1aejsn36237f19f62b\",\n 'x-rapidapi-host': \"youtube-search1.p.rapidapi.com\"\n }\n\n response = requests.request(\"GET\", url, headers=headers)\n jsonResponse = response.json()\n for key, value in jsonResponse.items():\n apiResult = jsonResponse[\"items\"][0][\"title\"]\n api_likes = int(jsonResponse[\"items\"][0][\"likeCount\"]) \n api_dislikes = int(jsonResponse[\"items\"][0][\"dislikeCount\"]) \n api_image = jsonResponse[\"items\"][0][\"thumbDefault\"]\n api_ratio = round((api_likes/(api_dislikes + api_likes)) * 100, 2)\n return apiResult, api_ratio, api_image #returns 3 pieces of data (title, ratio of likes and dislikes and image). access the data (0, 1, 2)", "title": "" }, { "docid": "580d97f1f1da623d2357cab6dc39a0a2", "score": "0.57199347", "text": "def extract_youtube(raw_log):\n # Get all links from the log\n log_tracks = []\n for link in re.findall(URL_REGEX, raw_log): # filter all hyperlinks\n if \"youtu\" in link: # only youtube, works with youtu.be and youtube.com ;)\n log_tracks.append(link) # add it to our list\n\n print(f\"Got {len(log_tracks)} track IDs from the log file\")\n pprint.pprint(log_tracks)\n\n # Get the \"track_id\" part of each link without regex!\n cut_links = []\n for link in log_tracks:\n # This particular one is not available and was killing us for some reason...\n if \"O8sWbzGwOv0\" in link:\n continue\n if \"v=\" in link: # e.g. https://www.youtube.com/watch?v=aBcDeFGH\n link = link.split(\"v=\")[1]\n if \"be/\" in link: # e.g. https://youtu.be/aBcDeFGH\n link = link.split(\"be/\")[1]\n if \"&list\" in link: # Some (one) seemed to have this list thing on the end\n link = link.split(\"&list\")[0]\n cut_links.append(link)\n\n print(f\"Got {len(cut_links)} cleaned links from the extracted links\")\n\n # Now we chunk that list into lists of 50 items\n for group in chunks(cut_links, 50):\n\n # Make a \"watch_videos?\"-style list of links\n video_list = \"http://www.youtube.com/watch_videos?video_ids=\" + \",\".join(group)\n\n # Connect to youtube and get the short URL link for the list\n response = requests.get(video_list)\n playlist_link = response.url.split(\"list=\")[1]\n\n # Turn it into a \"playlist\" by appending the \"list\" to this style link\n playlist_url = (\n \"https://www.youtube.com/playlist?list=\"\n + playlist_link\n # gives us old youtube which has the \"add list to\" button we need\n + \"&disable_polymer=true\"\n )\n\n # Pop it open in your web browser\n webbrowser.open(playlist_url)\n print(\"Finished!\")\n return", "title": "" }, { "docid": "1e37a79b6a591ef2a81a1d51ab8a09e1", "score": "0.5712709", "text": "def store_video_tracks_and_artists(self, videos):\n for video in videos[\"items\"]:\n title = video[\"snippet\"][\"title\"]\n if \"-\" in title:\n items = title.split(\"-\")\n if len(items) == 2:\n artist = items[0].strip()\n track = items[1].strip()\n # Remove some parts of artist and track names to help with searching\n if \"(\" in track:\n track = track.split(\"(\", 1)[0].strip()\n if \"[\" in track:\n track = track.split(\"[\", 1)[0].strip()\n if \" ft\" in artist:\n artist = artist.split(\" ft\", 1)[0].strip()\n if \" feat\" in artist:\n artist = artist.split(\" feat\", 1)[0].strip()\n if \" &\" in artist:\n artist = artist.split(\" &\", 1)[0].strip()\n uri = self.find_song_uri(track, artist)\n if uri is not None:\n self.song_info[title] = {\n \"artist\": artist,\n \"track\": track,\n \"spotify_uri\": uri\n }", "title": "" }, { "docid": "059e3c7bc9bf89ad8a51cc0cabf46951", "score": "0.57073355", "text": "def search():\n\n max_results = 3\n result = []\n\n debug((\"Incoming POST request: {}\").format(request.json[\"search\"]))\n\n yt_search_request = (\n \"{}/search?q={}&type=playlist&part=id,snippet\"\n + \"&fields=items(id/playlistId,snippet(thumbnails/medium/url,title))\"\n + \"&maxResults={}&key={}\").format(\n read_config(\"YOUTUBE_API_URL\"), quote(request.json[\"search\"]),\n max_results, read_config(\"YOUTUBE_API_KEY\"))\n try:\n yt_search_response = urllib_request.urlopen(yt_search_request)\n youtube = loads(yt_search_response.read().decode())\n\n for playlist in youtube[\"items\"]:\n\n req = (\n \"{}/playlistItems?playlistId={}\"\n + \"&part=id&fields=pageInfo/totalResults\"\n + \"&maxresults=1&key={}\").format(\n read_config(\"YOUTUBE_API_URL\"), playlist[\"id\"][\"playlistId\"], read_config(\"YOUTUBE_API_KEY\"))\n request_send = urllib_request.urlopen(req)\n videos_in_playlist = loads(request_send.read().decode())\n\n #TODO: decide what to return in case of missing thumbnail\n thumbnail_url = \"\"\n\n if \"thumbnails\" in playlist[\"snippet\"]:\n # api call needed as playlist thumbnail != thumbnail of first video (or not inevitable)\n thumbnail_url = playlist[\"snippet\"][\"thumbnails\"][\"medium\"][\"url\"]\n\n result.append({\n \"source\": \"youtube\",\n \"id\": playlist[\"id\"][\"playlistId\"],\n \"title\": playlist[\"snippet\"][\"title\"],\n \"thumb\": thumbnail_url,\n \"amount\": videos_in_playlist[\"pageInfo\"][\"totalResults\"]})\n except urllib_request.URLError as error:\n debug(('YouTube API request failed: {} {}').format(error.code, error.reason))\n except:\n debug('YouTube API request failed')\n\n VIMEO = VimeoClient(\n token=read_config(\"VIMEO_TOKEN\"),\n key=read_config(\"VIMEO_KEY\"),\n secret=read_config(\"VIMEO_SECRET\"))\n\n vim_search_request = VIMEO.get((\"/channels?query={}&per_page={}\").format(quote(request.json[\"search\"]), max_results), params={\"fields\": \"name, uri, pictures.uri, metadata.connections.videos.total\"})\n vimeo = vim_search_request.json()\n for video in vimeo[\"data\"]:\n result.append({\n \"source\": \"vimeo\",\n \"id\": video[\"uri\"].split(\"/\")[2],\n \"title\": video[\"name\"],\n #TODO: check if thumbnail of first video is always thumbnail of channel (or customizable as on YouTube)\n \"thumb\": (\"https://i.vimeocdn.com/video/{}_100x75.jpg\").format(video[\"pictures\"][\"uri\"].split(\"/\")[4]),\n \"amount\": video[\"metadata\"][\"connections\"][\"videos\"][\"total\"]\n })\n\n headers = Headers()\n headers.add('Access-Control-Allow-Origin', 'https://newsic.io')\n #headers.add('Access-Control-Allow-Origin', '*')\n headers.add('Access-Control-Allow-Headers', 'Content-Type')\n return Response(dumps(result), 200, headers)", "title": "" }, { "docid": "ef1e61e119a3934b943a844cedcaed9f", "score": "0.5590825", "text": "def add_song_to_playlist(self):\n print(\"Verificando Liked Videos\")\n global spotify_token\n global spotify_user_id\n global data\n\n spotify_token,spotify_user_id=self.spotify_authenticate()\n delete=False\n # create a new playlist\n playlist_id = self.create_playlist()\n uris_playlist_tracks=self.verify_playlist_track(playlist_id)\n # populate dictionary with our liked songs\n self.get_liked_videos()\n\n # collect all of uri\n uris = [info[\"spotify_uri\"]\n for song, info in self.all_song_info.items()]\n quantidade1=len(uris)\n quantidade2=len(uris_playlist_tracks[0])\n pos=np.zeros((1,1),dtype='i4')\n n=0\n #Algoritmo para marcar as posições de Musicas repetidas na lista, para excluí-las depois\n if quantidade2>0:\n for x in range(0,quantidade1):\n for y in range(0,quantidade2):\n if uris[x]==uris_playlist_tracks[0][y]:\n pos[0][n]=x\n pos= np.resize(pos, (1, len(pos[0]) + 1))\n n=n+1\n delete=True\n break\n #Deleta as musicas repetidas\n if delete==True:\n pos=np.delete(pos,len(pos[0])-1)\n uris=np.delete(uris,pos)\n #Converte o vetor para lista\n uris=uris.tolist()\n\n #Verifica se a lista é diferente de vazio\n if uris!=[]:\n # add all songs into new playlist\n request_data = json.dumps(uris)\n\n query = \"https://api.spotify.com/v1/playlists/{}/tracks\".format(\n playlist_id)\n\n response = requests.post(\n url=query,\n data=request_data,\n headers={\n \"Content-Type\": \"application/json\",\n \"Authorization\": \"Bearer {}\".format(spotify_token)\n }\n )\n\n # check for valid response status\n if response.status_code != 200 and response.status_code !=201:\n raise ResponseException(response.status_code)\n else:\n print(\"Musicas adicionadas com sucesso à sua playlist {}\".format(nome_playlist))\n response_json = response.json()\n return response_json\n #Se lista vazia, não há novas músicas a serem acrescentadas\n else:\n print(\"Não há músicas novas à serem acrescentadas.\")", "title": "" }, { "docid": "7a667d369a80483620da3e3c70c8716d", "score": "0.5492363", "text": "def songs_youtube_download_post(self):\n # POST params\n youtube_ids = request.json.get('youtube_ids')\n path = request.json.get('path', '')\n if not youtube_ids:\n return(\n jsonify({'error': 'youtube_id param required.'}),\n status.BAD_REQUEST\n )\n for youtube_id in youtube_ids.split():\n self.ps.songs.download_from_youtube(\n youtube_id=youtube_id,\n path=path\n )\n return(jsonify('OK'), status.OK)", "title": "" }, { "docid": "56ec8b12435d90eab4139c35b336a2c5", "score": "0.5489842", "text": "def download_song_from_youtube(song):\n url = song.link\n l = song.link\n response = None\n try:\n response = urllib.request.urlopen(url)\n except:\n text_to_search = song.song_name + ' - ' + song.artist\n query = urllib.parse.quote(text_to_search)\n url = \"https://www.youtube.com/results?search_query=\" + query\n html = response.read()\n soup = BeautifulSoup(html, 'html.parser')\n vid = soup.find_all(attrs={'class': 'yt-uix-tile-link'})[0]\n l = 'https://www.youtube.com' + vid['href']\n\n ydl_opts = {\n 'format': 'bestaudio/best',\n 'playlist_items': '0',\n 'postprocessors': [{\n 'key': 'FFmpegExtractAudio',\n 'preferredcodec': 'mp3',\n 'preferredquality': '192',\n }],\n 'outtmpl': MP3FILES_DIR + \"%(title)s.%(ext)s\"\n }\n with youtube_dl.YoutubeDL(ydl_opts) as ydl:\n try:\n ydl.download([l])\n try:\n info_dict = ydl.extract_info(l, download=False)\n except:\n pass\n\n song.link_on_disc = info_dict.get('title', None) + \".mp3\"\n song.audio = True\n song.save()\n except Exception as e:\n song.link_on_disc = ''\n song.audio = False\n song.save()\n print('not saved ok', e)", "title": "" }, { "docid": "bddaac342b70e897ee5106f44937889b", "score": "0.5466575", "text": "def handle_youtube(self, message, chat_id):\n\n # TODO: Bessere Erkennung von Links, besseres Exception-Handling\n try:\n video = pafy.new(message)\n self.bot.sendMessage(chat_id, \"Lade Video von Youtube: \" + video.title)\n saveloc = self.muckedir + \"/unsortiert/youtube/\"\n self.dlm.query_youtube((video, saveloc, chat_id))\n except OSError:\n logging.error(\"OSError beim YT-DL: \" + message)\n except ValueError:\n self.bot.sendMessage(chat_id, \"Der Techniker ist informiert.\")\n logging.error(\"ValueError beim YT-DL: \" + message)", "title": "" }, { "docid": "2f6cb7835545602cafdea1b11055397b", "score": "0.54599214", "text": "def get_playlists():\n\n res = spotify.get_playlists()\n\n return res", "title": "" }, { "docid": "7ee43dddb1e4ab590ec8effcdc97a4e3", "score": "0.5456811", "text": "def getYoutube(self, uri):\n try:\n outputPath = '-o' + self._youtube.strip() + '/%(title)s.%(ext)s'\n\n p = Popen(['youtube-dl', uri, outputPath, '--keep-video','--restrict-filenames', '--write-info-json', '--add-metadata'],stdout=PIPE)\n stdout, stderr = p.communicate()\n print('\\t%s - %s' % (uri, outputPath))\n \n except:\n print('ERROR')\n print(stderr)", "title": "" }, { "docid": "df66a7f8d4dd7294bcc998a28a3e980a", "score": "0.5437836", "text": "def youtube(request):\n channels = models.YoutubeChannel.objects\\\n .exclude(priority=models.YoutubeChannel.PRIORITY_NONE)\\\n .order_by(\"slug\")\n playlists = models.Playlist.objects.all()\n return render(request, \"notifpy/youtube.html\", {\n \"channels\": channels,\n \"playlists\": playlists,\n })", "title": "" }, { "docid": "794b21ff32633ec08efaf5a6a9a936d3", "score": "0.54364103", "text": "def run(self):\n # Download\n mp4_path = os.path.join(self.download_path, \"mp4\")\n try:\n os.mkdir(mp4_path)\n except FileExistsError:\n pass\n\n time0 = time.time()\n video_properties = (\n (\n key_value,\n (self.download_path, mp4_path),\n self.playlist_properties[index],\n self.save_as_mp4,\n )\n for index, key_value in enumerate(\n self.videos_dict.items()\n ) # dict is naturally sorted in iteration\n )\n utils.map_threads(utils.thread_query_youtube, video_properties)\n shutil.rmtree(mp4_path) # remove mp4 dir\n time1 = time.time()\n\n delta_t = time1 - time0\n self.downloadCount.emit(delta_t)", "title": "" }, { "docid": "53356f8eeadd43b2c269ba523483d92d", "score": "0.54327816", "text": "def search_and_add_youtube_videos_service():\n resent_publish_datetime = get_time_of_most_recent_uploaded_video()\n\n while True:\n search_results = youtube_search_keyword()\n for result in search_results:\n video_publish_datetime = get_datetime_object(result['snippet']['publishedAt'])\n if resent_publish_datetime < video_publish_datetime:\n save_video_and_thumbnail_in_models(result)\n resent_publish_datetime = video_publish_datetime\n time.sleep(10*60)", "title": "" }, { "docid": "2c2b60bddf473b8f81b9430bbb5bbe8e", "score": "0.5429095", "text": "def manage_download(self):\n\n if self.is_playlist:\n playlist = self.client.get('/playlists/%s' % (self.track.id))\n count = 1\n self.label_dl.show()\n for song_url in playlist.tracks:\n self.label_dl.setText(\"%d / %d\" % (count, len(playlist.tracks)))\n count += 1\n self.url_str = song_url[\"permalink_url\"]\n self.get_track()\n self.image = requests.get(self.modifiy_image_size()).content\n self.download()\n if len(playlist.tracks) == 0:\n self.fail_box()\n else:\n self.success_box() # Success box for playlist\n self.label_dl.hide()\n self.enable_input()\n elif self.is_likes:\n likes = self.client.get('/users/%s/favorites/' % (self.track.id),\n linked_partitioning=1, limit=200)\n set_likes = set()\n while True:\n try:\n link = likes.next_href\n except:\n break\n for like in likes.collection:\n set_likes.add(like)\n likes = self.client.get(link, linked_partitioning=1,\n limit=200)\n for like in likes.collection:\n set_likes.add(like)\n count = 1\n self.label_dl.show()\n for like in set_likes:\n self.url_str = like.user['permalink_url']\n self.track = like\n self.label_dl.setText(\"%d / %d\" % (count, len(set_likes)))\n count += 1\n self.image = requests.get(self.modifiy_image_size()).content\n self.download()\n sys.exit(0)\n else:\n self.success_box() # Success box for playlist\n self.label_dl.hide()\n self.enable_input()\n else:\n if self.download():\n self.success_box() # Succes box for single song\n\n self.reset()", "title": "" }, { "docid": "b6efa5aa47d184bb3634f142d684a249", "score": "0.5395927", "text": "def video_urls(self) -> List[str]:\n return [\n \"https://www.youtube.com\" + watch_path for watch_path in self.parse_links()\n ]", "title": "" }, { "docid": "b3331f2baac9bfe5aba9b21c945fb9c6", "score": "0.53844553", "text": "def get_videos(url) -> list:\r\n result = []\r\n if 'list' in url:\r\n print(\"Adding a Playlist\")\r\n playlist = Playlist(url)\r\n # this fixes the empty playlist.videos list\r\n playlist._video_regex = re.compile(r\"\\\"url\\\":\\\"(/watch\\?v=[\\w-]*)\")\r\n try:\r\n for i, v in enumerate(playlist.videos):\r\n # print(f\"{i+1}: '{v.video_id}'\", end=\", \")\r\n result.append(v)\r\n except:\r\n print(\"Failed creating a list of video urls\")\r\n else:\r\n result.append(YouTube(url))\r\n \r\n return result", "title": "" }, { "docid": "285860e10b32b69c9567dea995b8ac1f", "score": "0.5353658", "text": "def get_youtube_info(youtube_url):\n resource = YouTubeResource(youtube_url)\n return resource.get_resource_info()", "title": "" }, { "docid": "4b7e11da0edffd1a69b31643c4719e4c", "score": "0.53492427", "text": "def download_likes(self, username, count=10):\n\t\tuserid = self.get_user_id(username)\n\t\tlikes = self._get_likes(userid, count)\n\t\tif likes is None:\n\t\t\tprint(\"Could not retrieve data.\")\n\t\t\treturn False\n\t\tdownloaded = 0\n\t\tnum_likes = len(likes)\n\t\tif count > 0 and count <= len(likes):\n\t\t\tnum_likes = count\n\t\tprint(\"Preparing to download %s likes...\" % str(num_likes))\n\t\tfor i in range(0, num_likes):\n\t\t\tif likes[i][\"track\"] is None:\n\t\t\t\tcontinue\n\t\t\ttrack_id = likes[i][\"track\"][\"id\"]\n\t\t\tfname = self._get_trackname(likes[i][\"track\"])\n\t\t\tret = self._download_id(track_id, fname, likes[i][\"track\"])\n\t\t\tif ret:\n\t\t\t\tdownloaded += 1\n\t\tprint(\"Successfully downloaded %s likes!\" % str(downloaded))\n\t\treturn True", "title": "" }, { "docid": "ea332bc0cbc10299ab62cddc3d524d6a", "score": "0.5349195", "text": "def complete_existing_song_data():\n engine = get_storage_engine()\n\n youtube_keys = app.YOUTUBE_TO_MYSQL\n\n youtube_handler = YoutubeDataHandler(\n engine=engine,\n token=app.YOUTUBE_TOKEN,\n youtube_keys=youtube_keys,\n search_keys=app.YOUTUBE_ITUNES_SEARCH_KEYWORDS\n )\n\n youtube_handler.complete_all_storage_data(\n output_dir = app.YOUTUBE_OUTPUT_FOLDER,\n limit = app.YOUTUBE_VIDEO_LIMIT,\n itunes_keys=app.ITUNES_TO_MYSQL,\n update = app.YOUTUBE_UPDATE_ENTRIES\n )", "title": "" }, { "docid": "2709b9c32f409eeb248c32a87e737f5e", "score": "0.53437376", "text": "def get_all_api_data(self):\n \n # First we try all the API data present in channel-stats\n video_to_get_by_api = ''\n video_to_get_by_api_nb = 0\n total_videos_got = 0\n\n for video in self._scrapped_videos:\n if video not in self._api_videos:\n # YouTube API takes 50 videos max.\n if video_to_get_by_api_nb == 50:\n print('Calling YouTube API to collect info about 50 videos...')\n self.getVideosFromYouTubeAPI(video_to_get_by_api)\n video_to_get_by_api = ''\n video_to_get_by_api_nb = 0\n if video_to_get_by_api != '':\n video_to_get_by_api += ','\n video_to_get_by_api += video\n video_to_get_by_api_nb += 1\n total_videos_got += 1\n\n for reco in self._scrapped_videos[video]['recommendations']:\n if total_videos_got % 1000 == 0 and total_videos_got > 0:\n self.saveToFile(self._video_to_chan_map, self._video_to_chan_file)\n print('Video to chan made with length ' + repr(len(self._video_to_chan_map)))\n total_videos_got += 1\n\n if reco not in self._api_videos:\n if video_to_get_by_api_nb == 50:\n self.getVideosFromYouTubeAPI(video_to_get_by_api)\n video_to_get_by_api = ''\n video_to_get_by_api_nb = 0\n\n if video_to_get_by_api != '':\n video_to_get_by_api += ','\n video_to_get_by_api += reco\n video_to_get_by_api_nb += 1\n total_videos_got += 1\n \n if video_to_get_by_api != '':\n self.getVideosFromYouTubeAPI(video_to_get_by_api)\n\n for video in self._api_videos:\n self._video_to_chan_map[video] = self._api_videos[video]['snippet']['channelId']\n self.saveToFile(self._video_to_chan_map, self._video_to_chan_file)\n print('New video to chan made with length ' + repr(len(self._video_to_chan_map)))", "title": "" }, { "docid": "fa86bbe1f21053532cd5fce9f9d756a9", "score": "0.5340564", "text": "async def async_process_youtube_playlist(self, playlist_url, **meta):\n\n try:\n info = await self.downloader.safe_extract_info(self.loop, playlist_url, download=False, process=False)\n except Exception as e:\n raise ExtractionError('Could not extract information from {}\\n\\n{}'.format(playlist_url, e))\n\n if not info:\n raise ExtractionError('Could not extract information from %s' % playlist_url)\n\n gooditems = []\n baditems = 0\n for entry_data in info['entries']:\n if entry_data:\n baseurl = info['webpage_url'].split('playlist?list=')[0]\n song_url = baseurl + 'watch?v=%s' % entry_data['id']\n\n try:\n entry, elen = await self.add_entry(song_url, **meta)\n gooditems.append(entry)\n except ExtractionError:\n baditems += 1\n except Exception as e:\n baditems += 1\n print(\"There was an error adding the song {}: {}: {}\\n\".format(\n entry_data['id'], e.__class__.__name__, e))\n else:\n baditems += 1\n\n if baditems:\n print(\"Skipped %s bad entries\" % baditems)\n\n return gooditems", "title": "" }, { "docid": "1bad88fed408fda00c99dfd13c8f9f1d", "score": "0.5318123", "text": "def init_youtube_data():\n engine = get_storage_engine()\n\n youtube_keys = app.YOUTUBE_TO_MYSQL\n\n youtube_handler = YoutubeDataHandler(\n engine = engine,\n token = app.YOUTUBE_TOKEN,\n youtube_keys = youtube_keys\n )\n\n youtube_handler.add_from_local_folder(\n folder = app.YOUTUBE_OUTPUT_FOLDER,\n update = app.YOUTUBE_UPDATE_ENTRIES\n )", "title": "" }, { "docid": "070e96d1b52e91c7c1718594949471e4", "score": "0.5316265", "text": "async def get_all_tweet_likes(tweetId: Optional[int] = None, db: Session = Depends(get_db)):\n tweet_likes = []\n if tweetId:\n tweet_likes = crud.get_all_tweet_likes_for_tweet(db, tweetId)\n\n else:\n tweet_likes = crud.get_all_tweet_likes(db)\n\n return [\n schemas.TweetLikeResponseBody(\n tweetId=like.tweet_id,\n userId=like.user.id,\n username=like.user.username\n ) for like in tweet_likes\n ]", "title": "" }, { "docid": "d621ff1d1bf83900e6caff6b9eeaa53e", "score": "0.530755", "text": "def fetch_data(video_ids, brand_name):\n final = []\n\n is_fetched = check_already_fetched(brand_name)\n end_date = get_end_date()\n if is_fetched:\n with open(\"social_handles_data/\" + brand_name + \"/latest.json\", \"r\") as f:\n final = json.load(f)\n return json.dumps(final)\n else:\n all_fb = get_facebook_stories(video_ids, start_date, end_date, brand_name)\n yb = get_youtube_stories(video_ids, start_date, end_date, brand_name)\n all_yb = yb[0]\n title_info = yb[1]\n\n sum_total_views = []\n sum_total_likes = []\n sum_total_comments = []\n i = 0\n\n for i in xrange(0, len(all_fb)):\n each_video = {}\n each_video['facebook'] = {'video_id': all_fb[i][0], 'views':all_fb[i][1], 'likes':all_fb[i][2], 'comments': all_fb[i][3], 'shares': all_fb[i][4], 'video_title': all_fb[i][5], 'video_impressions_unique': all_fb[i][6], \"video_engaged_users\": all_fb[i][7]}\n\n each_video['youtube'] = {\n 'video_id': all_yb[i][0],\n 'views':all_yb[i][1],\n 'likes': all_yb[i][2],\n 'comments':all_yb[i][3],\n 'video_title': get_youtube_title(title_info, all_yb[i][0])\n }\n each_video['fb_yt'] = {\n 'total_views': all_fb[i][1] + all_yb[i][1],\n 'total_likes': all_fb[i][2] + all_yb[i][2],\n 'total_comments': all_fb[i][3] + all_yb[i][3]\n }\n final.append(each_video)\n\n sum_total_views.append(all_fb[i][1] + all_yb[i][1])\n sum_total_likes.append(all_fb[i][2] + all_yb[i][2])\n sum_total_comments.append(all_fb[i][3] + all_yb[i][3])\n\n # i = i + 1\n facebook_grand = get_grand_facebook_stories(all_fb)\n youtube_grand = get_grand_youtube_stories(all_yb)\n\n final.append(facebook_grand['facebook_grand_views'])\n final.append(youtube_grand['youtube_grand_views'])\n final.append(sum(sum_total_views))\n final.append(facebook_grand['facebook_grand_likes'])\n final.append(youtube_grand['youtube_grand_likes'])\n final.append(sum(sum_total_likes))\n final.append(facebook_grand['facebook_grand_comments'])\n final.append(youtube_grand['youtube_grand_comments'])\n final.append(sum(sum_total_comments))\n final.append(facebook_grand['facebook_grand_shares'])\n final.append(facebook_grand['facebook_grand_unique_impressions'])\n final.append(facebook_grand['facebook_grand_engaged_users'])\n\n with open(\"social_handles_data/\" + brand_name +'/' + end_date + \"_fb_yt_final.json\", \"w\") as f:\n json.dump(final, f, indent=4)\n\n with open(\"social_handles_data/\" + brand_name +\"/latest.json\", \"w\") as f:\n json.dump(final, f, indent=4)\n\n return json.dumps(final)", "title": "" }, { "docid": "5c1681831f1974dc19fbf11c9c1de747", "score": "0.5286492", "text": "def handler_youtube(self, url, domain, channel):\n youtube_handler_enabled = self.registryValue(\"youtubeHandlerEnabled\", channel=channel)\n developer_key = self.registryValue(\"youtubeDeveloperKey\")\n\n if not youtube_handler_enabled:\n return None\n\n if not developer_key:\n log.info(\"SpiffyTitles: no Youtube developer key set! Check the documentation \\\n for instructions.\")\n return None\n\n log.debug(\"SpiffyTitles: calling Youtube handler for %s\" % (url))\n video_id = self.get_video_id_from_url(url, domain)\n yt_template = self.get_template(\"youtubeTitleTemplate\", channel)\n title = \"\"\n\n if video_id:\n options = {\n \"part\": \"snippet,statistics,contentDetails\",\n \"maxResults\": 1,\n \"key\": developer_key,\n \"id\": video_id\n }\n encoded_options = urlencode(options)\n api_url = \"https://www.googleapis.com/youtube/v3/videos?%s\" % (encoded_options)\n agent = self.get_user_agent()\n headers = {\n \"User-Agent\": agent\n }\n\n log.debug(\"SpiffyTitles: requesting %s\" % (api_url))\n\n request = requests.get(api_url, headers=headers)\n ok = request.status_code == requests.codes.ok\n\n if ok:\n response = json.loads(request.text)\n\n if response:\n try:\n if response[\"pageInfo\"][\"totalResults\"] > 0:\n items = response[\"items\"]\n video = items[0]\n snippet = video[\"snippet\"]\n title = snippet[\"title\"]\n statistics = video[\"statistics\"]\n view_count = 0\n like_count = 0\n dislike_count = 0\n comment_count = 0\n favorite_count = 0\n\n if \"viewCount\" in statistics:\n view_count = \"{:,}\".format(int(statistics[\"viewCount\"]))\n\n if \"likeCount\" in statistics:\n like_count = \"{:,}\".format(int(statistics[\"likeCount\"]))\n\n if \"dislikeCount\" in statistics:\n dislike_count = \"{:,}\".format(int(statistics[\"dislikeCount\"]))\n\n if \"favoriteCount\" in statistics:\n favorite_count = \"{:,}\".format(int(statistics[\"favoriteCount\"]))\n\n if \"commentCount\" in statistics:\n comment_count = \"{:,}\".format(int(statistics[\"commentCount\"]))\n\n channel_title = snippet[\"channelTitle\"]\n video_duration = video[\"contentDetails\"][\"duration\"]\n duration_seconds = self.get_total_seconds_from_duration(video_duration)\n\n \"\"\"\n #23 - If duration is zero, then it\"s a LIVE video\n \"\"\"\n if duration_seconds > 0:\n duration = self.get_duration_from_seconds(duration_seconds)\n else:\n duration = \"LIVE\"\n\n timestamp = self.get_timestamp_from_youtube_url(url)\n yt_logo = self.get_youtube_logo()\n\n compiled_template = yt_template.render({\n \"title\": title,\n \"duration\": duration,\n \"timestamp\": timestamp,\n \"view_count\": view_count,\n \"like_count\": like_count,\n \"dislike_count\": dislike_count,\n \"comment_count\": comment_count,\n \"favorite_count\": favorite_count,\n \"channel_title\": channel_title,\n \"yt_logo\": yt_logo\n })\n\n title = compiled_template\n else:\n log.debug(\"SpiffyTitles: video appears to be private; no results!\")\n\n except IndexError as e:\n log.error(\"SpiffyTitles: IndexError parsing Youtube API JSON response: %s\" %\n (str(e)))\n else:\n log.error(\"SpiffyTitles: Error parsing Youtube API JSON response\")\n else:\n log.error(\"SpiffyTitles: Youtube API HTTP %s: %s\" %\n (request.status_code, request.text))\n\n # If we found a title, return that. otherwise, use default handler\n if title:\n return title\n else:\n log.debug(\"SpiffyTitles: falling back to default handler\")\n return self.handler_default(url, channel)", "title": "" }, { "docid": "2685876de10f59d0f0e357d6e84c7c0b", "score": "0.5282614", "text": "def play_starbuck_songs():\n\n import time\n \n url = 'https://www.youtube.com/watch?v=z-sWrPBgiF0'\n\n player = MusicPlayer()\n player.play_url(url)\n\n cur_url = None\n\n count = 0\n while True:\n try:\n #count += 1\n if( count < 10 ) :\n player.skip_if_exists_ad()\n url = player.current_url()\n if( cur_url != url ):\n cur_url = url\n print(cur_url)\n\n #if( count == 20 ):\n #player.stop()\n\n #if( count == 30 ):\n #player.play()\n \n time.sleep(1)\n\n if player.is_unplable():\n print('unplable')\n except:\n break", "title": "" }, { "docid": "aaee440b7aedac0ff4f3f7e72fdb0291", "score": "0.527745", "text": "def filter_yt(info: interceptor.Request):\n url = info.request_url\n if (\n url.host() == \"www.youtube.com\"\n and url.path() == \"/get_video_info\"\n and \"&adformat=\" in url.query()\n ):\n info.block()", "title": "" }, { "docid": "bc06eb5c7e0c32b4567471a01bd64874", "score": "0.5274297", "text": "def update(self):\n\n self.url = 'https://www.youtube.com/embed/' + \\\n self.youtube_hash + '?start=0' + '&rel=0' + '&autoplay=1'\n\n # OMM formatting\n self.omm = 'b.' + self.bitly_hash\n\n if self.title:\n self.omm += '.' + self.title.replace(\" \", \"_\")\n if self.tags and self.tags[0]:\n #if self.tags:\n for tag in self.tags:\n self.omm += '.' + tag\n\n self.html = '<a href=\"' + self.link + '\">' + self.title + '</a>'", "title": "" }, { "docid": "31fc859feff209ebba99a22734af6af9", "score": "0.52716595", "text": "def update_existing_data():\n engine = get_storage_engine()\n\n youtube_keys = app.YOUTUBE_TO_MYSQL\n\n youtube_handler = YoutubeDataHandler(\n engine=engine,\n token=app.YOUTUBE_TOKEN,\n youtube_keys=youtube_keys\n )\n\n youtube_handler.update_all()", "title": "" }, { "docid": "a4ebc3f14232ab5a1badabb017bee0c8", "score": "0.52700746", "text": "def recommend_playlist(moody_user):\n # The service can return results for the following tone IDs:\n # `anger`, `fear`, `joy`, and `sadness` (emotional tones); `analytical`, `confident`,\n # and `tentative` (language tones).\n tone_json = moody(moody_user)\n tones_list = tone_json[\"document_tone\"][\"tones\"]\n\n mood = \"\"\n max_score = -1\n playlist_url = \"\"\n \n for tone in enumerate(tones_list):\n if isemotional(tone[1][\"tone_id\"]) and tone[1][\"score\"] > max_score:\n max_score = tone[1][\"score\"]\n mood = tone[1][\"tone_id\"]\n\n if max_score <= 0:\n return \"Sorry, could not find an emotional tone.\"\n\n if mood == \"anger\":\n playlist_url = \"https://www.youtube.com/watch?v=Q1jE25zn8RU\"\n if mood == \"fear\":\n playlist_url = \"https://www.youtube.com/watch?v=xo1VInw-SKc&list=\"\\\n \t\"PLIeiyBOIivZNqYgeJTdamFTXdQwNzDIiD\"\n if mood == \"joy\":\n playlist_url = \"https://www.youtube.com/watch?v=LjhCEhWiKXk&list=\"\\\n \t\"PL1VuYyZcPYIJTP3W_x0jq9olXviPQlOe1\"\n if mood == \"sadness\":\n playlist_url = \"https://www.youtube.com/watch?v=aJOTlE1K90k&list=\"\\\n \t\"PL4QNnZJr8sRPeLgoOL9t4V-18xRAuqe_f\"\n\n return playlist_url", "title": "" }, { "docid": "68b06508c3b8b013d10b198ee2554ffc", "score": "0.5269722", "text": "async def youtube(self, ctx, *, query: str):\n async with ctx.channel.typing():\n src = Search(query, limit=1).result()\n result = src[\"result\"][0]\n link = result[\"link\"] # noqa\n res = random.choice(\n [\n \"let's try this one\",\n \"give this one a try\",\n \"this might be a good starting point\",\n \"here u go\",\n \"here you go\",\n \"alright this one looks good\",\n \"let's see... try this one\",\n \"hope this is what you were looking for\",\n \"i tried :)\\n\",\n \"this is the best one i could find\",\n \"hope this is the right one\",\n ]\n )\n await ctx.send(f\"{res} {link}\")", "title": "" }, { "docid": "aeb48b1f764b4011a92fd99abd06e755", "score": "0.524468", "text": "async def video(self, ctx, *, term):\n video_list = []\n page_token = ''\n\n while True:\n url = 'https://www.googleapis.com/youtube/v3/search' +\\\n '?key=' + self.client.config['yt_key'] +\\\n '&channelId=UCrUL8K81R4VBzm-KOYwrcxQ' +\\\n '&part=snippet,id' +\\\n '&order=date' +\\\n '&maxResults=50'\n\n if page_token:\n url += '&pageToken=' + page_token\n\n async with self.client.session.get(url) as response:\n videos = await response.json()\n\n for video in videos['items']:\n if 'youtube#video' not in video['id']['kind']:\n continue\n video_list.append({\n 'id': video['id']['videoId'],\n 'title': video['snippet']['title']\n })\n\n if 'nextPageToken' not in videos:\n break\n\n page_token = videos['nextPageToken']\n\n to_send = [v for v in video_list if all(\n keyword in v['title'].lower() for keyword in term.lower().split())]\n\n if not to_send:\n response = 'Sorry, no videos found for: ' + term\n await ctx.send(response)\n else:\n to_send = to_send[:5]\n description = [\n f'[{v[\"title\"]}](https://www.youtube.com/watch?v={v[\"id\"]})' for v in to_send]\n description = '\\n'.join(description)\n e = Embed(\n title='Search Results',\n description=description\n )\n await ctx.send(embed=e)", "title": "" }, { "docid": "67741303e9b5913534b95d0d017ea911", "score": "0.52253944", "text": "def scrape_likes(fb_user, session_id):\n\n url = '%slikes?access_token=%s' % (FB_ME, fb_user.access_token)\n r = requests.get(url)\n if r.status_code == 200:\n # while data has next page follow next pages\n # need to use has_next flag because of no do..while loop\n # in python\n has_next = True\n while has_next:\n likes_json = r.json()\n for like_json in likes_json.get('data'):\n # NOTE: using pymongo to query an existing collections\n # unable to use MongoDBManager's filter.\n # Check if object has been previously saved\n mongo_client = MongoClient(settings.MONGODB_HOST)\n db = mongo_client.loans\n collection = db.facebook_likes\n existing = collection.find_one({'object_id': like_json.get('id')})\n # if not existing, get and save current object\n if existing is None:\n like = Like()\n like.user = fb_user.user.id\n like.category = like_json.get('category')\n like.created_time = like_json.get('created_time')\n like.object_name = like_json.get('name')\n like.object_id = like_json.get('id')\n like.raw = like_json\n like.save()\n # check if current request has next page\n # if none mark has_next flag as false\n paging = likes_json.get('paging')\n if paging is not None:\n paging_next = paging.get('next')\n if paging_next is not None:\n r = requests.get(paging_next)\n if r.status_code != 200:\n has_next = False\n else:\n has_next = False\n else:\n has_next = False\n # task completed flag graph task\n try:\n graph_task = GraphTask.objects.get(user=fb_user.user, \n session_id=session_id)\n graph_task.task_likes = True \n graph_task.save() \n except GraphTask.DoesNotExist, e:\n print e", "title": "" }, { "docid": "9dc8b3e359c1ea63ac8d931890aee8da", "score": "0.52141696", "text": "def add_song_to_playlist(self):\n # populate dictionary with our liked songs\n self.get_playlist_videos()\n\n # collect all of uri\n uris = [info[\"spotify_uri\"]\n for song, info in self.all_song_info.items()]\n\n \n # create a new playlist\n playlist_id = self.create_playlist()\n\n print(\"[spotify] Adding songs to Spotify playlist ...\")\n\n # add all songs into new playlist\n request_data = json.dumps(uris)\n\n query = \"https://api.spotify.com/v1/playlists/{}/tracks\".format(\n playlist_id)\n\n response = requests.post(\n query,\n data=request_data,\n headers={\n \"Content-Type\": \"application/json\",\n \"Authorization\": \"Bearer {}\".format(self.spotify_token)\n }\n )\n\n # check for valid response status\n if response.status_code != 201:\n raise ResponseException(response.status_code)\n\n response_json = response.json()\n print(\"[spotify] Songs addded\")", "title": "" }, { "docid": "4b45b6e83fe408877566b683480fff85", "score": "0.5202603", "text": "def saved_videos(request):\n context = dict()\n context['meta_keyword'] = context['head_title'] = context['page_head_title'] = _('My videos links')\n context['meta_description'] = _(\n 'List of all saved videos'\n )\n queryset = VideoLink.objects.all()\n context['videos_links'] = queryset.filter(poster=request.user.member)\n context['unread_count']: request.user.notifications.unread().count()\n context['notifications']: request.user.notifications.all()\n return render(\n request,\n template_name='vtcuser/saved_videos_list.html',\n context=context,\n )", "title": "" }, { "docid": "da0cc33961bb21fbcccde20abdb24abe", "score": "0.52018374", "text": "def get_music_info(self):\n\n self.url_str = self.text_url.text()\n if not self.get_track():\n return\n\n if not self.is_playlist and not self.is_likes:\n self.artist.setText(self.track.user['username'])\n self.name.setText(self.track.title)\n url = self.modifiy_image_size()\n if url:\n self.image = requests.get(url).content\n self.cover.loadFromData(self.image)\n self.cover = self.cover.scaledToWidth(280)\n self.label_image.setPixmap(self.cover)\n else:\n # Get the last part of URL ( == to playlist name)\n self.album.setText(self.text_url.text().rsplit('/', 1)[-1])\n if self.album.text() != \"\":\n self.text_file.setText(\"%s/%s\" % (self.text_file.text(), self.album.text()))\n if not self.is_likes:\n self.genre.setText(self.track.genre)\n else:\n self.album.setText(self.track.username + \"'s favorites\")", "title": "" }, { "docid": "4058dd799f081603c25176c32b436947", "score": "0.5191355", "text": "def start_import(self):\n print(\"Starting song import...\")\n print(\"Searching Spotify for liked YouTube videos...\")\n self.get_liked_videos()\n if len(self.song_info.items()) == 0:\n print(\"No songs found in liked videos!\")\n return\n playlist_id = self.find_playlist(playlist_name)\n if playlist_id is None:\n playlist_id = self.create_playlist(playlist_name, playlist_description)\n playlist_track_uris = []\n else:\n playlist_track_uris = self.get_playlist_track_uris(playlist_id)\n print(\"Found Spotify playlist titled \" + playlist_name)\n uris_to_add = self.determine_track_uris_to_add(playlist_track_uris)\n if uris_to_add:\n print(\"Adding \" + str(len(uris_to_add)) + \" songs to playlist\")\n playlist_snapshot_id = self.add_tracks_to_playlist(playlist_id, uris_to_add)\n print(\"Current playlist snapshot id: \" + playlist_snapshot_id)\n else:\n print(\"No new songs from YouTube likes to add to playlist\")\n print(\"Import complete!\")", "title": "" }, { "docid": "2eba318a4a6c8d9cc5ec3b6ad0831459", "score": "0.5178755", "text": "def download_songs(self, song_names: list):\n self.logger.add_song_titles(song_names)\n goto_music()\n with youtube_dl.YoutubeDL(self.options) as ydl:\n ydl.download(get_urls(song_names))", "title": "" }, { "docid": "e446ef1cec3d4a61d4d78f2bef228ccd", "score": "0.51756895", "text": "def rip_video(url):\n\n class MyLogger(object):\n def debug(self, msg):\n pass\n\n def warning(self, msg):\n pass\n\n def error(self, msg):\n print(msg)\n\n def my_hook(d):\n if d['status'] == 'finished':\n print('Done downloading, now converting ...')\n\n ydl_opts = {\n 'format': 'bestaudio/best',\n 'postprocessors': [{\n 'key': 'FFmpegExtractAudio',\n 'preferredcodec': 'mp3',\n 'preferredquality': '192',\n }],\n 'logger': MyLogger(),\n 'progress_hooks': [my_hook],\n 'writeautomaticsub': True,\n 'noplaylist': True\n }\n\n with youtube_dl.YoutubeDL(ydl_opts) as ydl:\n ydl.download([url])\n\n try:\n file_paths = (glob('*.mp3')[0], glob('*.vtt')[0])\n except IndexError:\n file_paths = (glob('*.mp3')[0], 'None')\n\n return file_paths", "title": "" }, { "docid": "66199c3333f00fc099f256d0cdb17966", "score": "0.5171898", "text": "def generate_links(self):\n return ['https://www.youtube.com/watch?v='+yt_id\n for yt_id in self.yt_ids]", "title": "" }, { "docid": "d7a533b47731fe3d5b049d6b755f4c60", "score": "0.5166869", "text": "def fetch_extra_trackinfo(self, username: str, info: Dict[str, Any]) -> None:\n try:\n self.log.debug(\"Asking via track\")\n api_result = self.app.track.get_info(\n track=info[\"title\"], artist=info[\"artist\"], username=username\n )\n except lastfm.exceptions.InvalidParameters:\n self.log.warning(\"Last.fm returned InvalidParameters for trackinfo\")\n return\n except KeyError:\n return\n except Exception:\n self.log.exception(\"Got a random exception for trackinfo\")\n return\n\n if \"userplaycount\" in api_result:\n info[\"playcount\"] = int(api_result[\"userplaycount\"])\n\n # getting tags from musicbrainz(if they have id)\n if \"art_mbid\" in info and info[\"art_mbid\"] != \"\":\n mb = musicbrainzngs.get_artist_by_id(info[\"art_mbid\"], includes=\"tags\")\n\n if \"tag-list\" in mb[\"artist\"]:\n sorted_tags = sorted(\n mb[\"artist\"][\"tag-list\"], key=itemgetter(\"count\"), reverse=True\n )\n\n tags = []\n for tag in sorted_tags:\n tags.append(tag[\"name\"])\n info[\"tags\"] = tags\n\n if \"userloved\" in api_result and not info[\"loved\"]:\n info[\"loved\"] = bool(int(api_result[\"userloved\"]))", "title": "" }, { "docid": "2491be5598db7371149c5950151d6b3f", "score": "0.51581836", "text": "def add_youtube_handlers(self):\n self.handlers[\"youtube.com\"] = self.handler_youtube\n self.handlers[\"youtu.be\"] = self.handler_youtube", "title": "" }, { "docid": "3b7bffac9e8fde867a4a65502f71c3b4", "score": "0.51565975", "text": "def download(url_list: list, options=_OPTIONS, \r\n\t\t\ttracklist_in_description=False,\r\n\t\t\tconvert_timestamps=None):\r\n\tdic = {}\r\n\twith youtube_dl.YoutubeDL(options) as ydl:\r\n\t\tresult = ydl.extract_info(url_list[0], download=False)\r\n\t\tif tracklist_in_description:\r\n\t\t\ttracklist = Tracklist.make_tracklist(result['description'], int(result['duration']))\r\n\t\t\tdic['tracklist'] = tracklist\r\n\t\t\tdic['tracklist_duration'] = result['duration']\r\n\t\tif convert_timestamps:\r\n\t\t\ttracklist = Tracklist.make_tracklist_from_timestamps(convert_timestamps)\r\n\t\t\tdic['tracklist'] = tracklist\r\n\t\t\tdic['tracklist_duration'] = result['duration']\r\n\r\n\t\t#dic['title'] = result['title']\r\n\t\t#dic['ext'] = result['ext']\r\n\t\tprint(result['description'])\r\n\t\t# with open('desc.txt','w') as f:\r\n\t\t# f.write(result['description'])\r\n\t\tfilename = ydl.prepare_filename(result)\r\n\t\tdic['title'], dic['ext'] = os.path.splitext(filename)\r\n\t\t# j = ydl._pps\r\n\t\tydl.download(url_list)\r\n\t\treturn dic", "title": "" }, { "docid": "7f0b6d1e5bc2acb6ee6aac1216ef4f0e", "score": "0.5155897", "text": "def search_list_related_videos(self, **kwargs):\n return self.try_to_do(self.search_list_related_videos_try, **kwargs)", "title": "" }, { "docid": "65c2374810ad61e32a9cdf9bc59db518", "score": "0.5137426", "text": "def fetch_all_urls(channel_id):\n\n base_video_url = 'https://www.youtube.com/watch?v='\n base_search_url = 'https://www.googleapis.com/youtube/v3/search?'\n\n first_url = base_search_url+'key={}&channelId={}&part=snippet,id&order=date&maxResults=25'.format(api_key, channel_id)\n\n video_links = []\n url = first_url\n while True:\n inp = requests.get(url)\n resp = json.loads(inp.text)\n for i in resp['items']:\n if i['id']['kind'] == \"youtube#video\":\n video_links.append([i['snippet']['title'], base_video_url + i['id']['videoId']])\n\n try:\n next_page_token = resp['nextPageToken']\n url = first_url + '&pageToken={}'.format(next_page_token)\n except:\n break\n return video_links", "title": "" }, { "docid": "b0e75b88e236b3534612206f0b0f3337", "score": "0.51365286", "text": "async def yt(self, ctx):\n url = Youtube.build_url_from_view(ctx.view)\n page = await web.download_page(url)\n\n if not page:\n await ctx.send(\"Couldn't grab the requested page\")\n return\n\n link = Youtube.get_youtube_link(page)\n\n if not link:\n await ctx.send(\"Page returned wasn't Youtube\")\n\n await ctx.send(link)", "title": "" }, { "docid": "117ab64a38d066270e1ea49db63f95ae", "score": "0.51278913", "text": "def get_rec_playlist():\n\n #get spotify creds from the session object\n sess_access_token, sess_refresh_token, sess_token_create_time = session.get(\"access_token\", None) , session.get('refresh_token', None), session.get('token_create', None)\n\n if not sess_access_token or not sess_refresh_token or not sess_token_create_time:\n return \"error, missing token\", 403 # on the frontend, errors from the api call will trigger a page reload \n\n if (datetime.utcnow() - sess_token_create_time).total_seconds() > 3500:\n return \"error, expired token\", 400\n\n #get json data from request \n data = request.get_json()\n\n #fetch recommended tracks and return to user \n tracks = gen_recommendations(data, sess_access_token)\n uris = [i['uri'] for i in tracks] \n resp_dict = {\n \"songs\":tracks,\n \"uris\": json.dumps(uris)\n }\n return jsonify(resp_dict)", "title": "" }, { "docid": "ca0bb3ad36f5d9451757cd3048e613b4", "score": "0.510983", "text": "def save_videos(self):\n\n # First we print the top views videos, for information.\n sorted_videos = sorted(self._api_videos, key=lambda k: int(self._api_videos[k].get('statistics', {}).get('viewCount', -1)), reverse=True)\n print('\\n\\n\\n')\n print('Stats: ')\n # for video in sorted_videos[0:100]:\n # try:\n # print(repr(self._api_videos[video].get('statistics', {})['viewCount']) + ' - ' + self._api_videos[video]['snippet']['title'])\n # except:\n # print('WARNING, A VIDEO IN THE TOP 100 HAS NO VIEWCOUNT')\n self.printGeneralStats()\n\n # Now we save the videos\n print('saving...')\n self.saveToFile(self._channel_stats, self._channel_file)\n self.saveToFile(self._api_videos, self._api_video_file)\n self.saveToFile(self._scrapped_videos, self._scrapped_videos_file)\n self.saveToFile(self._video_to_chan_map, self._video_to_chan_file)\n print('Saved! ')\n print('')", "title": "" }, { "docid": "54ad0efc4c75453a1cd899f1e50e237a", "score": "0.50990194", "text": "def get_music_url_and_image(emotion):\n ret = []\n ret_count = 4\n username, client_id, client_secret = spotify_credentials\n manager = SpotifyClientCredentials(client_id=client_id, client_secret=client_secret)\n spotify = spotipy.Spotify(client_credentials_manager=manager)\n response = spotify.search(q=emotion, limit=ret_count, type='playlist')\n for i in range(ret_count):\n url = response['playlists']['items'][i]['external_urls']['spotify']\n image = response['playlists']['items'][i]['images'][0]['url']\n title = response['playlists']['items'][i]['name']\n ret.append((url, image, title))\n return ret", "title": "" }, { "docid": "20a12bab97219eed0462dbcbfe146695", "score": "0.50976896", "text": "def my_tracks(self, **kwargs):\n return self._get('me/tracks', **kwargs)", "title": "" }, { "docid": "d06d98ce3e2ed8ff8b07bd96b248ea60", "score": "0.509614", "text": "def convert_youtube_to_mp3():\n if 'url' not in request.args:\n return json.dumps({\"error\": \"Missing 'url' query param\"}), 400\n\n try:\n youtube_url = request.args['url']\n video_id = VideoIDExtractor.extract(youtube_url)\n scraped_filename = app_config.scraper.scrape(video_id, app_config.SCRAPING_OUTPUT)\n scraped_video = ScrapedYoutubeVideo(video_id, app_config.HOST + \"/downloads/\" + scraped_filename)\n except ScraperException:\n return json.dumps({\"error\": \"Failed to scrape the video.\"}), 500\n except VideoIDExtractorError:\n return json.dumps({\"error\": \"Can't fetch a video ID from the provided URL.\"}), 400\n\n return json.dumps(scraped_video.__dict__())", "title": "" }, { "docid": "bc666c37fed38b53c53ebc711828ca4c", "score": "0.5083266", "text": "async def play(self, ctx, url):\n\n yt = YouTube(url)\n GLOBALPATH = os.getcwd() #where vids/audio are at\n\n if GLOBALPATH[0] == '/':\n #MAC PATH\n MUSICPATH = GLOBALPATH + '/Bot Music'\n else:\n #WINDOWS PATH \n MUSICPATH = GLOBALPATH + '\\Bot Music'\n\n vidTitle = yt.views #number of views acts as videoID/title\n yt.streams.first().download(MUSICPATH, vidTitle) #downloads vid to specified path\n vidFileName = vidTitle + \".mp4\"\n audioFileName = yt.title + \".mp3\"\n video = VideoFileClip(os.path.join(MUSICPATH,vidFileName)) #creates video clip\n video.audio.write_audiofile(os.path.join(MUSICPATH, audioFileName)) #creates audio clip from video clip\n os.remove(os.path.join(MUSICPATH,vidFileName)) #deletes video file\n\n try:\n self.member = ctx.message.author\n self.server = ctx.message.guild\n self.user_vc = self.member.voice.channel\n\n if self.bot_vc == self.user_vc:\n await ctx.send(\"But I'm already here!\")\n\n else:\n if self.user_vc == None:\n await ctx.send(\"Please join a Voice Channel first so I know where to go\")\n\n else:\n await self.user_vc.connect()\n self.bot_vc = self.user_vc\n \n except discord.errors.ClientException:\n await ctx.send(\"Sorry, I am currently in the voice channel \\\"\" + self.bot_vc.name + \"\\\"\")\n\n except AttributeError:\n await ctx.send(\"An error has occured in joining! Please try again\")", "title": "" }, { "docid": "230f69338837d08c873a2ab68e16e9c1", "score": "0.5080656", "text": "def getBrowserUploadInfo(story=None):\n\n keywords = [\"OurStories\"]\n\n\n if story:\n vid_title = story.title\n vid_description = story.summary\n try:\n vid_lat = float(story.latitude)\n vid_long = float(story.longitude)\n except (ValueError, TypeError):\n vid_lat = None\n vid_long = None\n else:\n vid_title = \"OurStories uploaded video\"\n vid_description = \"This video has been uploaded to OurStories, a UNICEF project.\"\n vid_lat = None\n vid_long = None\n\n # Login to youtube\n client = gdata.youtube.service.YouTubeService(email=YOUTUBE_EMAIL,\n password=YOUTUBE_PASSWORD,\n source=YOUTUBE_SOURCE,\n client_id=YOUTUBE_CLIENT_ID,\n developer_key=YOUTUBE_DEVELOPER_KEY)\n client.ProgrammaticLogin()\n \n # Prepare a media group object to hold our video's meta-data\n mediaGroup = gdata.media.Group(title=gdata.media.Title(text=vid_title),\n description=gdata.media.Description(description_type='plain', text=vid_description),\n keywords=gdata.media.Keywords(text=','.join(keywords)),\n # See the scheme URL below for information on the allowed categories (\"Nonprofit\" may also be a good fit, but I felt \"People\" is a little bit more descriptive) \n category=gdata.media.Category(text='People',\n scheme='http://gdata.youtube.com/schemas/2007/categories.cat',\n label='People'),\n player=None)\n \n # Prepare a geo.where object to hold the geographical location of where the video was recorded\n if vid_lat and vid_long:\n where = gdata.geo.Where()\n where.set_location((vid_lat, vid_long))\n # create the gdata.youtube.YouTubeVideoEntry to be uploaded\n videoEntry = gdata.youtube.YouTubeVideoEntry(media=mediaGroup, geo=where)\n else:\n videoEntry = gdata.youtube.YouTubeVideoEntry(media=mediaGroup)\n\n (url, token) = client.GetFormUploadToken(videoEntry)\n\n return (url, token)", "title": "" }, { "docid": "67aaa7f0bf8db08202f8fa0f6eff0e4d", "score": "0.50774324", "text": "def scrap_the_video(self, video_id, channel_to_counts):\n print('------> Scrapping video '+ video_id)\n\n # Get recommendations for video id either from scrapping or memory.\n recos = self.get_recommendations(video_id)\n\n # Now we get all the recommendations. If we don't have info on the video, we need to get some.\n video_to_get_by_api = ''\n\n for reco in recos:\n if reco not in self._api_videos and reco not in self._video_to_chan_map:\n if video_to_get_by_api != '':\n video_to_get_by_api += ','\n video_to_get_by_api += reco\n if video_to_get_by_api != '':\n self.getVideosFromYouTubeAPI(video_to_get_by_api)\n\n for reco in recos:\n # Sometimes we are skipping videos that we can't get access to.\n try:\n reco_channel = self.getChannelForVideo(reco)\n except KeyError:\n continue\n channel_to_counts[reco_channel] = channel_to_counts.get(reco_channel, 0) + 1", "title": "" }, { "docid": "80a4d0503f28afdfa08ad646e1bbab75", "score": "0.5077117", "text": "def get_hot_songs(self, artist_id):\n url = get_artist_url(artist_id)\n result = self.get_request(url)\n return result['hotSongs']", "title": "" }, { "docid": "df886b31cfb9fe72ee997db5ea8a03a0", "score": "0.5073125", "text": "def get_videos(self, id):\n video_list = []\n try: \n movie = tmdb.Movies(id)\n response = movie.videos()\n for link in response['results']:\n video_list.append(self.youtube_url + link['key'])\n except:\n tv = tmdb.TV(id)\n response = tv.videos()\n for link in response['results']:\n video_list.append(self.youtube_url + link['key'])\n\n return video_list", "title": "" }, { "docid": "5c751ba46fe9ba83d2c82c4c787a22ce", "score": "0.5035267", "text": "def __init__(self, num_of_gifts, favorite_song = 'https://www.youtube.com/watch?v=76WFkKp8Tjs'):\n self.num_of_gifts = num_of_gifts\n self.song = favorite_song", "title": "" }, { "docid": "4c98776947811477b69688cda6fed6d7", "score": "0.5028843", "text": "def getVideosFromYouTubeAPI(self, video_to_get_by_api):\n \n # API call to YouTube.\n video_infos = self._youtube_client.videos_list_multiple_ids(\n part='snippet,contentDetails,statistics',\n id=video_to_get_by_api)\n\n # Storing the date of scrapping up to the second\n scrapDate = time.strftime('%Y%m%d-%H%M%S')\n\n # Converting format and updating the video to channel map\n for video in video_infos['items']:\n video['scrapDate'] = scrapDate\n self._api_videos[video['id']] = video\n self._video_to_chan_map[video['id']] = video['snippet']['channelId']\n if 'snippet' not in video:\n video['snippet'] = {}\n if 'channelTitle' not in video['snippet']:\n video['snippet']['channelTitle'] = ''\n\n try:\n name = video['snippet']['channelTitle']\n self._channel_id_to_name[video['snippet']['channelId']] = name\n except:\n print('UNKNOWN CHANNEL FOUND FROM API CALL, CHANNEL WAS PROBABLY DELETED')\n self._channel_id_to_name[video['snippet']['channelId']] = 'unknown channel'\n\n try:\n id_ = video['snippet']['channelId']\n self._channel_name_to_id[video['snippet']['channelTitle']] = id_\n except:\n print('UNKNOWN CHANNEL FOUND FROM API CALL, CHANNEL WAS PROBABLY DELETED')\n self._channel_name_to_id[video['snippet']['channelTitle']] = 'unknown channel'", "title": "" }, { "docid": "ad157cd60017aaf5029783a76b981bfa", "score": "0.5027284", "text": "def list(self, request):\n liked_songs = LikedSong.objects.all() # This is my query to the database\n serializer = LikedSongSerializer(\n liked_songs, many=True, context={'request': request})\n return Response(serializer.data)", "title": "" }, { "docid": "57fc13393ec7194ba26a501923ba7962", "score": "0.50203586", "text": "def sort_playlists_by_likes(qst):\n qst = qst.order_by('-' + 'likes')\n return qst", "title": "" }, { "docid": "9f527365d1fc524086f58a58b581a71e", "score": "0.5019205", "text": "def my_playlists(self, **kwargs):\n return self._get('me/playlists', **kwargs)", "title": "" }, { "docid": "50c50c5cfbf301e5245c8ac7c8a67336", "score": "0.5012378", "text": "def request_upload_id_playlist(youtube, channel_id):\r\n request = youtube.channels().list(\r\n part=\"contentDetails\",\r\n id=channel_id,\r\n maxResults=50\r\n )\r\n response = request.execute()\r\n print(type(response))\r\n items_tag = response.get('items')\r\n print(type(items_tag)) # Type list\r\n\r\n for data_dic in items_tag:\r\n print('... Extract dict in list ') # Type dict\r\n \r\n for key, value in data_dic.items():\r\n if key == 'contentDetails':\r\n print(f\"{key} and {value}\")\r\n \r\n for key_contentDetails, value_contentDetails in value.items():\r\n if key_contentDetails== 'relatedPlaylists':\r\n print(f\"{key_contentDetails} and {value_contentDetails}\")\r\n\r\n for key_relatedPlaylists, value_relatedPlaylists in value_contentDetails.items():\r\n if key_relatedPlaylists == 'uploads':\r\n print(f'{key_relatedPlaylists} and {value_relatedPlaylists}')\r\n result_upload_playlist_id = value_relatedPlaylists\r\n print(result_upload_playlist_id)\r\n\r\n return result_upload_playlist_id", "title": "" }, { "docid": "e46d1dddb593c193d59051569b9b1f0b", "score": "0.50106364", "text": "def fetch_all_channel_videos(channel_url: str):\n\n api_url = f\"https://www.googleapis.com/youtube/v3/search?key={api_key}\"\n\n params = {\n \"part\": \"snippet\",\n \"order\": \"date\",\n \"maxResults\": 50\n }\n\n channel_id = channel_url.split(\"/\")[-1]\n channel_url = api_url + f\"&channelId={channel_id}\"\n\n index = 1\n page_token = None\n while True:\n next_page_token = f\"&pageToken={page_token}\" if page_token else ''\n channel_url = channel_url + next_page_token\n\n res = requests.get(channel_url, params=params)\n data = res.json()\n\n save_to_json_file(f\"json/videos_{index}.json\", data)\n print(f\"Файл: json/videos_{index}.json записан!\")\n\n index += 1\n sleep(2)\n\n page_token = data.get(\"nextPageToken\")\n if not page_token:\n return True", "title": "" }, { "docid": "ca086aafcf401e9cdece03393de5cee1", "score": "0.5004164", "text": "def make_video_to_chan_map(self):\n\n print('Making video to chan map, current has length '+ repr(len(self._video_to_chan_map)))\n\n video_to_get_by_api = ''\n video_to_get_by_api_nb = 0\n total_videos_got = 0\n\n # First looking at all scrapped videos, and calling YouTube API to get more info about them\n for video in self._scrapped_videos:\n # Looking for video if not in the api_videos\n if video not in self._api_videos and video not in self._video_to_chan_map:\n # The API calls allow to have information about 50 videos, so we call it when\n # we reach that number\n if video_to_get_by_api_nb == 50:\n self.getVideosFromYouTubeAPI(video_to_get_by_api)\n video_to_get_by_api = ''\n video_to_get_by_api_nb = 0\n\n if video_to_get_by_api != '':\n video_to_get_by_api += ','\n video_to_get_by_api += video\n video_to_get_by_api_nb += 1\n total_videos_got += 1\n\n # Getting api information about all recommendations\n for reco in self._scrapped_videos[video]['recommendations']:\n if total_videos_got % 1000 == 0 and total_videos_got > 0:\n self.saveToFile(self._video_to_chan_map, self._video_to_chan_file)\n print('Video to chan saved with length ' + repr(len(self._video_to_chan_map)))\n total_videos_got += 1\n\n if reco not in self._api_videos and reco not in self._video_to_chan_map:\n # The API calls allow to have information about 50 videos, so we call it when\n # we reach that number\n if video_to_get_by_api_nb == 50:\n self.getVideosFromYouTubeAPI(video_to_get_by_api)\n video_to_get_by_api = ''\n video_to_get_by_api_nb = 0\n\n if video_to_get_by_api != '':\n video_to_get_by_api += ','\n video_to_get_by_api += reco\n video_to_get_by_api_nb += 1\n total_videos_got += 1\n \n # Get the remaining videos if there are some.\n if video_to_get_by_api != '':\n self.getVideosFromYouTubeAPI(video_to_get_by_api)\n\n # Update the video to channel map.\n for video in self._api_videos:\n self._video_to_chan_map[video] = self._api_videos[video]['snippet']['channelId']\n self.saveToFile(self._video_to_chan_map, self._video_to_chan_file)\n print('Video to chan made with length ' + repr(len(self._video_to_chan_map)))", "title": "" }, { "docid": "b943ff66f0a6c21f6c3b68787b970b68", "score": "0.498144", "text": "def spotify(request):\n\n import sync\n from roary.models import Song,User\n access_token = request.GET.get('code')\n uni = request.session.get('uni')\n if access_token and uni:\n logged_in = sum(1 for result in User.objects.filter(uni=uni)) > 0\n if logged_in:\n x = User.objects.filter(uni=uni)[0]\n stamp = x.spotify_music_time if x.spotify_music_time and x.spotify_music_time != '' else 0\n songs = sync.spotify_sync(access_token,float(stamp))\n for song in songs:\n exists = sum(1 for result in Song.objects.filter(url=song['url'])) > 0\n\n a= Song(name=song['name'],url=song['url'],year=song['year'],genre=song['genre'],artist=song['artist']) if not exists else Song.objects.filter(url=song['url'])[0]\n a.duration= 0\n a.plays= song.get('plays',0) if not exists else a.plays+song['plays']\n a.users = 0 if not exists else a.users+1\n a.art = song.get('art') if song.get('art') else a.art\n a.save()\n x.spotify_music_time = str(time.time())\n return redirect('/index')\n else:\n return redirect(sync.spotify_link())", "title": "" }, { "docid": "963b7df8cf9297e2d23d5e59e6127ba1", "score": "0.49787518", "text": "def test_youtube_urls(self):\n for url in VALID_YOUTUBE_URLS:\n self.assertEqual(Entry.youtube_url_to_id(url), \"asdf123456\",\n msg=\"failing URL: %s\" % url)", "title": "" }, { "docid": "2e75fb0fe4b12e95ab67509bedc37507", "score": "0.49782583", "text": "def youtube_search(self, lookup_string):\n\n search_params = {\n 'part' : 'snippet',\n 'q' : lookup_string,\n 'maxResults' : 10,\n 'type' : 'video',\n 'order' : 'relevance',\n 'key' : settings.YOUTUBE_DATA_API_KEY\n }\n\n r = requests.get(self.SEARCH_URL, params = search_params)\n results = r.json()['items']\n video_id_list = []\n\n for result in results: \n video_id_list.append(result['id']['videoId'])\n\n main_watch_url = \"http://www.youtube.com/watch?v=\"\n\n video_links = []\n for id in video_id_list:\n video_links.append(main_watch_url +id)\n\n self.generate_video_link_file(video_links)\n\n return video_links", "title": "" }, { "docid": "c7df1bf166b984ea25a04c45a550917a", "score": "0.4970643", "text": "def get_related_artists(): \n\n artist_id = request.json['currentArtistID']\n\n res = spotify.get_related_artists(artist_id)\n\n return res", "title": "" }, { "docid": "71b2eedfbd0c197da6520c6a723e4fcc", "score": "0.49670404", "text": "def getChannelVideosForUser(self, username=\"enockglidden\", fname=None):\n self.query = \"channelSearch: username=%s\" % username\n if fname == None:\n fname = \"%s_data.json\" % username\n videoIds = []\n channels_response = self.youtube.channels().list(\n forUsername=username,\n part=\"contentDetails\"\n ).execute()\n\n print((json.dumps(channels_response, indent=True)))\n\n for channel in channels_response[\"items\"]:\n # From the API response, extract the playlist ID that identifies the list\n # of videos uploaded to the authenticated user's channel.\n uploads_list_id = channel[\"contentDetails\"][\"relatedPlaylists\"][\"uploads\"]\n\n print((\"Videos in list %s\" % uploads_list_id))\n\n # Retrieve the list of videos uploaded to the authenticated user's channel.\n playlistitems_list_request = self.youtube.playlistItems().list(\n playlistId=uploads_list_id,\n #part=\"snippet,recordingDetails\",\n part=\"snippet\",\n maxResults=50\n )\n\n while playlistitems_list_request:\n playlistitems_list_response = playlistitems_list_request.execute()\n\n # Print information about each video.\n for playlist_item in playlistitems_list_response[\"items\"]:\n title = playlist_item[\"snippet\"][\"title\"]\n video_id = playlist_item[\"snippet\"][\"resourceId\"][\"videoId\"]\n print((\"%s (%s)\" % (title, video_id)))\n if 0:\n print((json.dumps(playlist_item, indent=4, sort_keys=True)))\n print()\n videoIds.append(video_id)\n playlistitems_list_request = self.youtube.playlistItems().list_next(\n playlistitems_list_request, playlistitems_list_response)\n video_ids = \",\".join(videoIds)\n print((\"video_ids:\", video_ids))\n self.processIds(video_ids)\n self.saveRecs(fname)", "title": "" }, { "docid": "18655d967bb9e38fbe9933f24c65fcfc", "score": "0.49658564", "text": "def get_playlist_info(\n self, use_proxy=True, use_cache=True, youtube_skip_download=True, options=None\n ):\n youtube_extract_options = dict(\n skip_download=youtube_skip_download, extract_flat=True\n )\n if options:\n youtube_extract_options.update(options)\n return self._get_youtube_info(\n use_proxy=use_proxy, use_cache=use_cache, options=youtube_extract_options\n )", "title": "" }, { "docid": "7ef3857dcb535403ffaa8df8504f6fe3", "score": "0.49616146", "text": "def __init__(self,\n video_id: Union[str, list] = '',\n dev_key: str = '',\n json: Optional[dict] = None,\n client: Optional[googleapiclient.discovery.Resource] = None):\n # The videos requested to lookup\n self.requested_yt_ids: List[str] = []\n self.current_item = None\n\n\n if isinstance(video_id, str):\n self.requested_yt_ids = [video_id]\n elif isinstance(video_id, List):\n self.requested_yt_ids = video_id\n else:\n raise ValueError(\"video_id is not a List or List\")\n\n # The youtube data api client\n if client:\n self.client = client\n elif dev_key:\n self.client = build(YOUTUBE_API_SERVICE_NAME, YOUTUBE_API_VERSION, developerKey=dev_key)\n else:\n raise ValueError(\"`video_id` and `dev_key` must be specified together\")\n\n if video_id:\n self._get_video_metadata(video_id)\n else:\n self._multi_metadata = []\n\n if json:\n # Removes a double list when slicing results\n # The `items` value holds a list of video metadata results.\n # The index of that list tells the current result..\n self._multi_metadata = [x for x in json[\"items\"]]\n\n self.iter_enumerate: Iterator[Tuple[int, dict]] = iter(enumerate(self._multi_metadata, start=0))\n self.current_index = None", "title": "" }, { "docid": "bc0426225726488662c042b52a9c69ba", "score": "0.49603936", "text": "def setChannelNames(pages):\n #videoIds = [item[\"snippet\"][\"resourceId\"][\"videoId\"] for playlistItem in pages for item in playlistItem[\"items\"]]\n videoIds = []\n for playlistItems in pages:\n for items in playlistItems[\"items\"]:\n videoIds.append(items[\"snippet\"][\"resourceId\"][\"videoId\"])\n \n for i in range(1, playlistItems[\"pageInfo\"][\"totalResults\"]):\n videoIdsString = \"\"\n if i % RESULTS_PER_PAGE == 0 or i == playlistItems[\"pageInfo\"][\"totalResults\"] - 1: #Every 50 or on the last iteration.\n for id in videoIds[:RESULTS_PER_PAGE]: #Generate the string of ids to put into the API request.\n videoIdsString += \"{},\".format(id)\n \n videoIdsString = videoIdsString[:-1] #Remove last ','\n videoIds = videoIds[RESULTS_PER_PAGE:]\n \n videos = youtube.videos().list(\n part=\"snippet\",\n id=videoIdsString,\n fields=\"items(snippet(channelTitle))\",\n maxResults=RESULTS_PER_PAGE\n ).execute()\n\n #Associate the channelTitles with their respective videos.\n j = 0\n for items in pages[int((i - 1) / 50)][\"items\"]:\n if j > len(videos[\"items\"]) - 1:\n #print(j)\n break\n \n items[\"snippet\"][\"resourceId\"][\"uploader\"] = videos[\"items\"][j][\"snippet\"][\"channelTitle\"]\n j+=1", "title": "" }, { "docid": "5abb2a19828f9112131254fa7d2eefeb", "score": "0.49569875", "text": "def go_pafy(raw_song, meta_tags=None):\n if internals.is_youtube(raw_song):\n track_info = pafy.new(raw_song)\n else:\n track_url = generate_youtube_url(raw_song, meta_tags)\n\n if track_url:\n track_info = pafy.new(track_url)\n else:\n track_info = None\n\n return track_info", "title": "" }, { "docid": "310bc0626c9b123307d575f6225247a6", "score": "0.49563643", "text": "async def fetch_playlist(spotify, username, playlist_id, previous_date):\n\tresults = spotify.user_playlist(\n\t\tusername, playlist_id, fields='tracks,next,name')\n\ttracks = results['tracks']\n\tnew_songs = []\n\twhile True:\n\t\tfor item in tracks['items']:\n\t\t\ttrack = item['track'] if 'track' in item else item\n\t\t\ttry:\n\t\t\t\ttrack_url = track['external_urls']['spotify']\n\t\t\t\tif convert_time(item['added_at']) > previous_date:\n\t\t\t\t\tnew_songs.append(track_url)\n\t\t\texcept (KeyError, UnicodeEncodeError) as e:\n\t\t\t\tlogging.warn(u'Skipping track {0} by {1} (local only?)'.format(\n\t\t\t\t\ttrack['name'], track['artists'][0]['name']))\n\t\t# 1 page = 50 results\n\t\t# check if there are more pages\n\t\tif tracks['next']:\n\t\t\ttracks = spotify.next(tracks)\n\t\telse:\n\t\t\tbreak\n\treturn new_songs", "title": "" }, { "docid": "7adca18d5036609f8afa3dd5d3f8edea", "score": "0.49515924", "text": "def download_media(update: Update, context: CallbackContext):\n query = update.callback_query\n query.edit_message_text(text=\"Parsing...\")\n assert isinstance(context.user_data, dict)\n url = context.user_data[\"url\"]\n logger.info(f\"Video URL to download: '{url}'\")\n media_type = query.data.split(\"_\")[1]\n name, thumbnail = extractYt(url)\n unique_id = str(uuid4().int)\n ydl_opts = {\"outtmpl\": f\"{unique_id}.%(ext)s\", 'noplaylist': True}\n if media_type == \"mp3\":\n ydl_opts[\"format\"] = \"bestaudio/best\"\n ydl_opts[\"postprocessors\"] = [{\n 'key': 'FFmpegExtractAudio',\n 'preferredcodec': 'mp3',\n 'preferredquality': '192'\n }]\n else:\n ydl_opts[\"format\"] = \"best\"\n ydl_opts['postprocessors'] = [{\n 'key': 'FFmpegVideoConvertor',\n 'preferedformat': 'mp4'\n }]\n query.edit_message_text(text=\"Downloading...\")\n with yt_dlp.YoutubeDL(ydl_opts) as ydl:\n ydl.download([url])\n media_name = unique_id + \".\" + media_type\n\n # upload the media file\n query = update.callback_query\n query.answer()\n query.edit_message_text(text=\"Uploading...\")\n update.callback_query.answer()\n logger.info(\"Uploading the file..\")\n with open(media_name, mode='rb') as video_file:\n assert isinstance(update.effective_message, Message)\n update.effective_message.reply_document(document=video_file,\n filename=name + \".\" +\n media_type,\n caption=name,\n thumb=thumbnail,\n quote=True)\n logger.info(\"Upload finished.\")\n if os.path.exists(media_name):\n os.remove(media_name)", "title": "" }, { "docid": "b6cea44daf0b80801d673dd78cb47a07", "score": "0.49453843", "text": "def like_retweet(self, hashtag, total_tweets, commentsList, like_probability, comment_probability, retweet_probability, follow_probability ):\n comments = 0\n bot = self.bot \n print('==>Moving to results of hashtag: ', hashtag)\n time.sleep(randfloor(2,4)) \n\n # fetches the latest tweets with the provided hashtag \n bot.get( \n 'https://twitter.com/search?q=%23' + hashtag+'&src=typed_query&f=live'\n ) \n \n time.sleep(randfloor(1,3)) \n \n # using set so that only unique links \n # are present and to avoid unnecessary repetition \n links = set() \n \n\n iteration = 0\n\n while len(links) < total_tweets:\n # executing javascript code \n # to scroll the webpage \n bot.execute_script( \n 'window.scrollTo(0, document.body.scrollHeight)'\n ) \n \n time.sleep(randfloor(1,2)) \n\n # using list comprehension \n # for adding all the tweets link to the set \n # this particular piece of code might \n # look very complicated but the only reason \n # I opted for list comprehension because is \n # lot faster than traditional loops \n \n [ \n links.add(elem.get_attribute('href')) for elem in bot.find_elements_by_xpath(\"//a[@dir ='auto']\") \n ] \n print('===>looped at iteration: ', iteration, ' Total links=', len(links), '\\n', links,'\\n')\n iteration += 1\n\n\n print('==>Reducing the extra links if more than total_tweets')\n print('==>total links before: ', len(links))\n if len(links) > total_tweets:\n \n for i in range(total_tweets,len(links)):\n links.pop()\n else:\n print('==>links length is smaller so skipping')\n print('==>total links after: ', len(links))\n\n print('==>Starting now visiting all the tweet links')\n\n tweetIter = 1\n for link in links: \n \n print('===>Moving to tweet: ', tweetIter, '/', len(links))\n time.sleep(randfloor(2, 4)) \n\n bot.get(link) \n \n try: \n\n print('===>Liking the tweet if random probability is <= like_probability: ', like_probability)\n time.sleep(randfloor(3, 5)) \n like_prob = randint(1,10)\n print ('probability found to be: ',like_prob)\n \n if (like_prob <= like_probability):\n print('===>So liking the tweet')\n\n bot.find_element_by_css_selector( \n '.css-18t94o4[data-testid =\"like\"]'\n ).click() \n\n time.sleep(randfloor(3,6)) \n\n print('===>Commenting the tweet if random probability is <= comment_probability: ', comment_probability)\n time.sleep(randfloor(3, 5)) \n comment_prob = randint(1,10)\n print('===>Comment probability found to be: ', comment_prob)\n\n\n if comment_prob <= comment_probability:\n print('===>So commenting')\n\n bot.find_element_by_xpath(\"//div[@aria-label='Reply']\").click()\n\n\n commentIndex = randint(0,len(commentsList)-1)\n print('===>printing comment number {0}, which is {1}'.format(commentIndex+1, commentsList[commentIndex]))\n\n actions = ActionChains(bot) \n\n actions.send_keys(commentsList[commentIndex]).perform()\n time.sleep(randfloor(1,3))\n\n bot.find_element_by_xpath(\"//div[@data-testid='tweetButton']\").click()\n\n comments += 1\n print('===>Commented! now waiting few seconds')\n time.sleep(randint(3,6))\n else: \n print('So skipping commenting')\n\n print('===>Retweeting the tweet if random probability is <= retweet_probability: ', retweet_probability)\n time.sleep(randfloor(2, 3)) \n\n retweet_prob = randint(1,10)\n if (retweet_prob <= retweet_probability):\n print('===>probability found to be: ',retweet_prob,' so retweeting the tweet')\n\n # retweet button selector \n bot.find_element_by_css_selector( \n '.css-18t94o4[data-testid =\"retweet\"]'\n ).click() \n \n time.sleep(randfloor(1, 3)) \n print('===>Confirming retweet without comment')\n # initializes action chain \n actions = ActionChains(bot) \n # sends RETURN key to retweet without comment \n actions.send_keys(Keys.RETURN).perform() \n time.sleep(randfloor(1, 3)) \n else: \n print('===>probability found to be: ',retweet_prob,' so skipping retweeting the tweet')\n\n else:\n print('===>So skipping liking the tweet (as well as any other action on this tweet)')\n\n except Exception as ex:\n template = \"An exception of type {0} occurred. Arguments:\\n{1!r}\"\n message = template.format(type(ex).__name__, ex.args)\n print (message)\n pass\n tweetIter+=1\n print('===>Going back')\n time.sleep(randfloor(2,4))\n bot.execute_script(\"window.history.go(-1)\")\n print('===>Finished liking and retweeting function, now moving to home')\n # fetches the main homepage \n bot.get('https://twitter.com/')", "title": "" }, { "docid": "02bb58013adc354afb20d89d36d53088", "score": "0.4934004", "text": "def add_from_lastfm(self, title, artist, *, commit=False):\n created, sid = self.insert_or_ignore_title_artist(title, artist)\n # Have to let this non-commit go by first\n similars = self.c.execute(\"\"\"\n SELECT title, artist, similarity\n FROM simple_similars\n LEFT JOIN simple_song_id\n ON simple_id_other = simple_id\n WHERE simple_id_orig = ?\n \"\"\", (sid,)).fetchall()\n if similars:\n return similars\n # if self.c.execute(\"SELECT simple_id_orig FROM simple_similars WHERE simple_id_orig = ?\", (sid,)).fetchone():\n # # print(\"Already added song {0} by {1}\".format(title, artist))\n # return list()\n print(\"Adding song {0} by {1}\".format(title, artist))\n j = lastfm_api.track_similars(title, artist, last_call=True)\n # fixed_attributes = j['similartracks'].get('@attr', dict())\n # title = fixed_attributes.get(title, title)\n # artist = fixed_attributes.get(artist, artist)\n all_similar_details = j['similartracks'].get('track', list())\n if commit:\n print(\"Recieved {0} similar songs\".format(len(all_similar_details)))\n if len(all_similar_details) == 0:\n self.c.execute(\"INSERT INTO simple_similars(simple_id_orig) VALUES(?)\", (sid,))\n all_similar_details = [lastfm_api.read_sim_track_json(track_dict) for track_dict in all_similar_details]\n for other_title, other_artist, other_sim in all_similar_details:\n # Check to see if song exists\n other_created, other_sid = self.insert_or_ignore_title_artist(other_title, other_artist)\n self.c.execute(\"INSERT INTO simple_similars(simple_id_orig, simple_id_other, similarity) VALUES(?, ?, ?)\", (sid, other_sid, other_sim))\n if commit:\n self.conn.commit()\n return all_similar_details", "title": "" }, { "docid": "04d58c9d2e1352a4f2886f6a56290840", "score": "0.49274677", "text": "def fetch_playlists(self):\n playlists_offset = 0\n\n while True:\n playlists = self.sp.current_user_playlists(limit=50, offset=playlists_offset)\n for playlist in playlists['items']:\n current_list = {\n 'name': playlist['name'],\n 'collaborative': playlist['collaborative'],\n 'public': playlist['public'],\n 'uri': playlist['uri'],\n 'owner_id': playlist['owner']['id'],\n 'tracks': []\n }\n\n tracks_offset = 0\n while True:\n playlist_tracks = self.sp.user_playlist_tracks(\n user=current_list['owner_id'], playlist_id=playlist['id'],\n fields='items(is_local,track(uri,id,name,artists.name,album.name)),next',\n limit=100,\n offset=tracks_offset\n )\n for playlist_item in playlist_tracks['items']:\n track = playlist_item['track']\n if track: # Fixed bug; sometimes the Spotify API returns just {'is_local': False, 'track': None}\n track_id = track['id']\n self._log.debug('Doing track {}'.format(track_id))\n\n # Local tracks doesn't have a Spotify ID, so we'll use its Spotify URI as an ID instead\n if playlist_item['is_local']:\n track_id = track['uri']\n\n current_list['tracks'].append(track_id)\n if track_id not in self.all_tracks:\n self._log.debug('Adding new track {} to track index'.format(track['name']))\n self.all_tracks[track_id] = {\n 'name': track['name'],\n 'artists': [artist['name'] for artist in track['artists']],\n 'album': track['album']['name']\n }\n else:\n self._log.warning('API returned None for a track in list {}'.format(current_list['name']))\n\n if playlist_tracks['next'] is None:\n break\n tracks_offset += 100\n\n self._log.debug('List {} has {} tracks'.format(current_list['name'], len(current_list['tracks'])))\n self.all_playlists.append(current_list)\n\n if playlists['next'] is None:\n break\n playlists_offset += 50\n\n self._log.debug('Playlists fetched')\n self._log.debug('Total num lists: {}'.format(len(self.all_playlists)))\n self._log.debug('Total num unique tracks: {}'.format(len(self.all_tracks)))", "title": "" }, { "docid": "d976bbfc52bf485ac4c183a76de5839c", "score": "0.4925109", "text": "async def ryt(self, ctx):\n url = Youtube.build_url_from_view(ctx.view)\n page = await web.download_page(url)\n\n if not page:\n await ctx.send(\"Couldn't grab the requested page\")\n return\n\n link = Youtube.get_youtube_link(page, random=True)\n\n if not link:\n await ctx.send(\"Page returned wasn't Youtube\")\n\n await ctx.send(link)", "title": "" }, { "docid": "3fdf2057f765318a87433cc398a6dc60", "score": "0.4920599", "text": "def get_from_html(html):\n soup = BeautifulSoup(html, \"lxml\")\n return {\"https://youtube.com\" + x.get(\"href\") for x in soup.find_all(id=\"video-title\")}", "title": "" }, { "docid": "0a6adc156c778b40665f5eea43b3ab2e", "score": "0.49102834", "text": "def get_youtube_video(video_name, save_folder):\n url = get_video_title_url(video_name)\n\n return download_mp3(url, save_folder)", "title": "" }, { "docid": "21464150d0b188821c51777ac5873e30", "score": "0.49100143", "text": "def update_tweet_output(\n n_clicks, video_id, types\n ):\n if n_clicks is not None:\n return get_youtube_predictions(\n video_id, types, pipeline\n )", "title": "" }, { "docid": "e2999d719c8bd527295e4a399ee7bbe9", "score": "0.49033", "text": "def get_youtube_data(topic, max_videos, region_code, key):\n youtube = build('youtube', 'v3', developerKey=key)\n\n # Search for videos on YouTube\n video_search_response = youtube.search().list(\n part='id, snippet',\n type='video',\n topicId=topic,\n maxResults=max_videos,\n regionCode=region_code,\n order='viewCount'\n ).execute()\n\n videos = {}\n\n for video in video_search_response.get('items', []):\n # Search for the video's channel info\n channel_search_response = youtube.channels().list(\n id=video['snippet']['channelId'],\n part='snippet'\n ).execute()\n channel = channel_search_response.get('items', [])[0]\n\n # Channel's not always have a specified country. In these cases, use the region code.\n country = channel['snippet']['country'] if 'country' in channel['snippet'] else region_code\n\n videos[video['id']['videoId']] = {'title': video['snippet']['title'],\n 'description': video['snippet']['description'],\n 'thumbnail': video['snippet']['thumbnails']['high']['url'],\n 'channel': channel['snippet']['title'],\n 'country': country,\n }\n\n return videos", "title": "" }, { "docid": "4a333d7c87d3cda773ae36d2ea71ce6e", "score": "0.4899078", "text": "def run(self):\n # allow 5 reattempts if error in fetching YouTube videos\n # else just get loaded videos by overriding error handling\n if self.reattempt_count > 5:\n self.override_error = True\n\n try:\n videos_dict = utils.get_youtube_content(\n self.playlist_link, self.override_error\n )\n if not videos_dict:\n # if empty videos_dict returns, throw invalid url warning.\n self.loadStatus.emit(\"invalid url\")\n else:\n self.loadStatus.emit(\"success\")\n self.countChanged.emit(videos_dict, True)\n\n except RuntimeError as error: # handle error from video load fail\n error_message = str(error)\n if any(\n message in error_message\n for message in [\"not a valid URL\", \"Unsupported URL\", \"list\"]\n ):\n self.loadStatus.emit(\"invalid url\")\n elif \"nodename nor servname provided\" in error_message:\n self.loadStatus.emit(\"server error\")\n else:\n self.loadStatus.emit(\"reattempt\")\n self.reattempt_count += 1\n self.run()", "title": "" }, { "docid": "9e8c8205ff5b12b0fec267c16dc5ca74", "score": "0.48973277", "text": "def list_tubes_watched(self):\n request = Bunch(cmd=b'list-tubes-watched', ok=['OK'], read_body=True,\n parse_yaml=True)\n resp = yield Task(self._interact, request)\n raise Return(resp)", "title": "" }, { "docid": "fa3955212a9855a9c354cb76316b6e98", "score": "0.4886508", "text": "def _ExtractYouTubeSearchQuery(self, url):\n return self._ExtractSearchQueryFromURL(url)", "title": "" }, { "docid": "d2fc5c5754ac9a2050ebecd5f774ec91", "score": "0.48811084", "text": "def get_tracks(self):\n return self.get_relations('/music/artist/track', ['/music/recording/artist'])", "title": "" }, { "docid": "765f5eb64d1669ab6717bb761393696e", "score": "0.4876252", "text": "def status(verbose=0, raw=False, _override={}, _return_parsed=False):\n res = Spotify.request('me/player', method='GET')\n if not res:\n raise NoPlaybackError\n\n if raw:\n if verbose >= 0:\n import json\n click.echo(json.dumps(res))\n\n return res\n\n # raw\n data = {}\n data['is_podcast'] = res['currently_playing_type'] == 'episode'\n if data['is_podcast']:\n raise PodcastNotSupported\n\n data['is_shuffle'] = res['shuffle_state']\n data['repeat_state'] = res['repeat_state']\n data['is_playing'] = res['is_playing']\n data['device'] = {\n 'name': res['device']['name'],\n 'type': res['device']['type'],\n 'volume': res['device']['volume_percent'],\n }\n item = res['item']\n context = parse_context(res['context'])\n\n data['music'] = {\n 'context': context,\n 'track': parse_track(item),\n 'album': parse_album(item['album']),\n 'artist': parse_artists(item['artists']),\n }\n music = data['music']\n music['track']['progress'] = format_duration_ms(res['progress_ms'])\n\n # parsed\n if _override:\n data.update(_override)\n\n # artist: name, id, url return first entry\n for key in ['name', 'id', 'url']:\n music['artist'][key] = music['artist'][key + 's'][0]\n music['artist']['long_' + key] = ', '.join(music['artist'][key + 's'])\n if key != 'name':\n music['artist']['long_' + key] = (\n music['artist']['long_' + key].replace(' ', '')\n )\n\n playback_status = 'Playing' if data['is_playing'] else 'Paused'\n playback_options = []\n if data['repeat_state'] == 'track':\n playback_options.append('repeat [track]')\n elif data['repeat_state'] == 'context':\n playback_options.append('repeat')\n\n if data['is_shuffle']:\n playback_options.append('shuffle')\n playback_str = ''\n if data['is_playing']:\n playback_options_str = '{}'.format(\n 'on {}'.format(' and '.join(playback_options) + ', ')\n if playback_options else ''\n )\n playback_str = \"({}{}% volume)\".format(\n playback_options_str, data['device']['volume']\n )\n\n if _return_parsed:\n return data\n\n # output\n if not verbose:\n click.echo(\n '{}: {}{}\\n'\n ' {} - {}'\n .format(\n playback_status,\n ' ' if not data['is_playing'] else '',\n music['track']['name'],\n music['artist']['long_name'],\n music['album']['name']\n )\n )\n\n if verbose >= 1:\n click.echo(\n 'Track {} ({} / {})\\n'\n 'Artist {}\\n'\n 'Album {}\\n'\n 'Status {} {}'\n .format(\n music['track']['name'],\n music['track']['progress'],\n music['track']['duration'],\n music['artist']['long_name'],\n music['album']['name'],\n playback_status,\n playback_str\n )\n )\n\n if verbose >= 2:\n click.echo(\n '\\n'\n 'Device {} ({})\\n'\n 'URL {}'\n .format(\n data['device']['name'],\n data['device']['type'],\n music['track']['url']\n )\n )\n\n return", "title": "" }, { "docid": "48844455e3a17fbdb0313ff210062b78", "score": "0.4874771", "text": "def video_get_track_description(self):\n return track_description_list(libvlc_video_get_track_description(self))", "title": "" }, { "docid": "fbce9f939d394837a9d79263d0f595fe", "score": "0.48732117", "text": "async def splay(self, ctx, playlist_url):\n\n results = spotify.user_playlist_tracks(user=\"\",playlist_id=playlist_url)\n track_list = []\n\n song_number = len(results[\"items\"])\n await ctx.channel.send(f\"Adding {song_number} songs to queue, it can take about {song_number} seconds.\")\n\n for i in results[\"items\"]:\n\n if (i[\"track\"][\"artists\"].__len__() == 1):\n\n track_list.append(i[\"track\"][\"name\"] + \" - \" + i[\"track\"][\"artists\"][0][\"name\"])\n\n else:\n name_string = \"\"\n\n for index, b in enumerate(i[\"track\"][\"artists\"]):\n name_string += (b[\"name\"])\n\n if (i[\"track\"][\"artists\"].__len__() - 1 != index):\n name_string += \", \"\n\n track_list.append(i[\"track\"][\"name\"] + \" - \" + name_string) \n\n for track in track_list:\n await self.play(ctx=ctx, query=track)", "title": "" } ]
f70b95f2a81e1ba3b0a84c67134f2d8b
Retrieves the exam schedule link from McGill's Exam website.
[ { "docid": "19386bba2c459029b87044113e1f7109", "score": "0.641286", "text": "async def exam(self, ctx):\n await ctx.trigger_typing()\n\n r = requests.get(MCGILL_EXAM_URL)\n soup = BeautifulSoup(r.content, \"html.parser\")\n r.close()\n link = soup.find(\"a\", href=re.compile(\"exams/files/exams\"))[\"href\"]\n\n if link[:2] == \"//\":\n link = \"https:\" + link\n\n exam_schedule = discord.Embed(\n title=\"Latest Exam Schedule\", description=\"{}\".format(link))\n\n await ctx.send(embed=exam_schedule)", "title": "" } ]
[ { "docid": "78e21614f6c76a4272bedf073cc94e46", "score": "0.5595076", "text": "def scrape_schedules():", "title": "" }, { "docid": "f8d0628902171ac3ac65d518d92c14ad", "score": "0.53754807", "text": "def get_schedule_url (user_id, year=2011, semester=Semester.Winter):\n\n semester_id = \"%d%s\" % (year, semester)\n return \"http://ug.technion.ac.il/rishum/weekplan.php?RGS=%s&SEM=%s\" % (user_id, semester_id)", "title": "" }, { "docid": "07f28de7ecb4e7d9af0f11271aa9ee98", "score": "0.5246155", "text": "def get_schedule():\n print 'main:get_schedule - called.'\n calendar = gcal.fetch_calendar()\n return calendar", "title": "" }, { "docid": "0050386c358a61b5664668e0d0846ac5", "score": "0.512166", "text": "async def exam(\n self,\n ctx: commands.Context,\n subj: str = \"\",\n course_num: str = \"\",\n sec_numb: str = \"\",\n crn: str = \"\",\n ) -> None:\n sched_heading, table_heading, exams_parsed = TodayAtMun.get_exams(\n subj, course_num, sec_numb, crn\n )\n embed = self.today_embed_template()\n embed.title = sched_heading\n embed.add_field(name=table_heading, value=\"\\u200b\", inline=False)\n for exam in exams_parsed:\n embed.add_field(name=\" | \".join(exam), value=\"\\u200b\", inline=False)\n await ctx.send(embed=embed)", "title": "" }, { "docid": "b7ca62ef9f46c5ac911966556c7ff850", "score": "0.50629956", "text": "def get_schedule(sched_id):\n pass", "title": "" }, { "docid": "c0182d62ba6ce1c7ac6131703530342b", "score": "0.50054234", "text": "def get_schedule_html (tz, password):\n # OP and NEXTOP seem to be the original page and the destination page\n login_url = \"http://techmvs.technion.ac.il:100/cics/wmn/wmnnut02?OP=LI&NEXTOP=WK\"\n\n # Post data - I'm not sure what Login.x and Login.y represent\n post_data = { \"UID\":tz, \"PWD\":password, \"Login.x\":\"22\",\"Login.y\":\"20\"}\n\n # Request the page\n r = requests.post(login_url, post_data)\n\n # Convert Hebrew to utf8\n html = r.content.decode('iso-8859-8').encode('utf-16')\n #html = get_display(r.content, encoding='iso-8859-8')\n #f = open(os.path.expanduser(\"~/out.txt\"), \"w\")\n #f.write(html + \"\\n\\n\\n\" + html2)\n #f.close()\n #print html2\n\n # Get the right <table> and clean the html\n table = str(BeautifulSoup(html).findAll(\"table\", width=\"100%\")[1])\n table = table.replace(\"&nbsp;\", \" \")\n table = table.replace(\"<br />\", \"\\n\")\n table = table.replace(\"'\", \"\")\n #table = table.replace(\"תרגול \", \"תרגול \")\n #table = table.replace(\"הרצאה \", \"הרצאה \")\n return table", "title": "" }, { "docid": "9336d073538749a148c43186f35059d1", "score": "0.4961524", "text": "def workout_url(self, workout_id):\n return \"http://connect.garmin.com/activity/\" % (int(workout_id))", "title": "" }, { "docid": "4acf575534979b0bfe53808a320beb4b", "score": "0.4956269", "text": "def getshowboyurl(show):\n if not show.showboyid: raise AttributeError(\"Show does not have a Showboy ID\")\n return CALENDARURL.format(showboyid=show.showboyid)", "title": "" }, { "docid": "87c131ca048fd2eaee50b1937b7dafa1", "score": "0.48275137", "text": "def schedule(console_url: str):\n return f\"{console_url}ias/v1/schedules\"", "title": "" }, { "docid": "c9023f112cd49db06e19c8fe5272cf17", "score": "0.48191413", "text": "def url(self):\n return \"http://upcoming.yahoo.com/event/\" + str(self.upcoming_event_id) + \"/\"", "title": "" }, { "docid": "33fde17b9e9e1d6a103feb7c367d7767", "score": "0.47667196", "text": "def get_episode_quality_link(self, url: str, sess: requests.sessions.Session) -> Optional[str]:\n logger.debug(\"Getting episode's quality download link\")\n\n with sess.get(url) as resp:\n logger.debug(\"Getting url content\")\n data = resp.content\n\n logger.debug(\"Creating beautiful soup parser\")\n soup = BeautifulSoup(data, \"html.parser\")\n\n dl_pattern = re.compile(\n r'^Click to Download Episode \\d{1,6}(.+)? in HD Mp4 Format$',\n re.IGNORECASE\n )\n dl_pattern2 = re.compile(\n r'^Click to Download Episode \\d{1,6}(.+)? in Mp4 Format$',\n re.IGNORECASE\n )\n\n logger.debug(\"Using beautiful soup object to find elements matching dl_pattern regex\")\n element = soup.find(\"a\", text=dl_pattern)\n if not element:\n element = soup.find(\"a\", text=dl_pattern2)\n\n if element:\n logger.debug(\"Getting href from beautiful soup element\")\n return element.get('href')\n\n logger.warning(\"No episode quality links found\")", "title": "" }, { "docid": "af023a2f0d5028b94cb9ca65e008e301", "score": "0.47447157", "text": "def get_link(extension=None):\n\tif extension is None:\n\t\tr = requests.get(imdb_url)\n\telse:\n\t\tr = requests.get(imdb_url+'/'+extension)\n\treturn r.text", "title": "" }, { "docid": "4c5adecf04f188354f840c64a5a6c866", "score": "0.47411972", "text": "def display_schedule(self, schedule):", "title": "" }, { "docid": "bc4b1d599531dd739fe300d1c8595cb5", "score": "0.47183472", "text": "def get_wmv_link(clip_url):\n clip_id = clip_url.split('/')[-1]\n \n url = '%s/silverlight/getmediaxml.ashx?id=%s&hastighet=2000&vissuper=True' % (BASE_URL_WEBTV, clip_id)\n \n page = HTML.ElementFromURL(url, cacheTime=CACHE_HTML_INTERVAL)\n \n if page is None:\n Log('Error fetching URL from %s' % url)\n return None\n \n # Find the video URL\n try:\n mms_link = page.xpath('//mediadefinition/mediaitems/mediaitem/mediaurl')[0].text\n except:\n mms_link = None\n \n Log('%s -> %s' % (clip_url, mms_link))\n return mms_link", "title": "" }, { "docid": "2cb69244fda0e69550291fddd6da19d7", "score": "0.47149023", "text": "def schedule(self) -> str:\n return pulumi.get(self, \"schedule\")", "title": "" }, { "docid": "293e01a2c19827ddd8f01832118b1752", "score": "0.47073242", "text": "def rtm_url(self):\n return self._get(urls['rtm'])['url']", "title": "" }, { "docid": "d1d5683838bbd109405954b4b524f10f", "score": "0.47028285", "text": "def pythondotorg_individual_job_scraper(link):\n\n headers = {\n \"User-Agent\": \"Mozilla/5.0 (Macintosh; Intel Mac OS X 10.12; rv:73.0) Gecko/20100101 Firefox/73.0\"\n }\n page = requests.get(link, headers=headers)\n soup = BeautifulSoup(page.content, \"html.parser\")\n\n full_text = soup.find(\"div\", class_=\"job-description\").get_text()\n\n sleep(randint(1, 10))\n\n return full_text", "title": "" }, { "docid": "35092136ff595ae67ef34cf395d69d8f", "score": "0.4694904", "text": "def get_absolute_url(self):\n return ('eventtime_detail', None, {\n 'year': self.start_time.strftime(\"%Y\").lower(),\n 'month': self.start_time.strftime(\"%b\").lower(),\n 'day': self.start_time.strftime(\"%d\").lower(),\n 'slug': self.event.slug, \n 'eventtime_id': self.id,\n })", "title": "" }, { "docid": "d1a110aa8b8567226ecfeb5470f79e46", "score": "0.46806994", "text": "def get_schedule(self):\n url = self.get_schedule_url_for_season(self.season)\n soup = get_soup_from_content(get_html_from_url(url))\n\n month = \"\"\n\n games = []\n game_entries = self.get_game_entries(soup)\n for game in game_entries:\n if game['class'][0] == \"month-title\":\n if re.search(r'[a-zA-Z]{3}', game.text):\n month = game.text\n continue\n # Skip the separator elements\n if game['class'][0] == \"month-sep\":\n continue\n\n # See if there is actually a game here.\n if not game.find('td', class_='e_date'):\n continue\n\n # They do not provide a common location field, so\n # we have to assume it is not there. Sometimes it's given in the\n # notes column but that is not standard.\n location = None\n # Site\n site = self.get_game_site(game)\n # Opponent\n opponent = self.get_game_opponent(game)\n # Links\n links = self.get_game_media_urls(game)\n # Timestamp\n game_date = self.get_game_date(game, month, self.season.years())\n game_time = self.get_game_time(game)\n # Game ID\n game_id = self.get_gameid_from_date_time(game_date, game_time)\n # Conference\n conference = self.get_game_conference(game)\n\n game = ScheduleEntry(game_id, game_date, game_time, opponent, site,\n location, links, conference,\n self.season.league, self.season.id,\n self.team_id, self.is_women)\n games.append(game)\n\n return games", "title": "" }, { "docid": "434aac88d3e22e3a95db8be10ddc5918", "score": "0.4678309", "text": "def fetch_schedule(email: str, api: object=api, is_student: bool=True) -> list:\n if is_student:\n return api.schedule(email)\n return api.schedule(email, receiverType=1)", "title": "" }, { "docid": "5e795287157a6a4afcff0d3a7af9193c", "score": "0.46719748", "text": "def schedule(self):\n from models import storage\n try:\n students = self.students\n schedule = []\n for student in students:\n lessons = student.lesson_logs\n if student.first_name and student.last_name:\n full_name = student.first_name + ' ' + student.last_name\n else:\n full_name = student.first_name\n for lesson in lessons:\n if lesson.lesson_time:\n schedule.append([lesson.lesson_time,\n full_name, student.id])\n #my_calendar = self.calendar()\n #lessons = my_calendar.to_dict()[\"lessons\"]\n return schedule\n except:\n return None", "title": "" }, { "docid": "14b3962d4dc834c30310ea4dbadd521f", "score": "0.46540886", "text": "def get_announce_url(self):\n return self.announce_url", "title": "" }, { "docid": "55d5a75f9722a9eeeca5a85a8a0d821e", "score": "0.46383503", "text": "def scrape_schedules_list(alias: str):\n host = _host()\n token = _token(alias=alias)\n\n url = urljoin(host, \"v1/scrape-schedules\")\n headers = {\"Authorization\": token}\n response = requests.get(url, headers=headers)\n if response.status_code >= 400:\n try:\n msg = f\"{Color.LIGHT_RED}{response.json()['message']}{Color.END}\\n\"\n click.echo(msg)\n sys.exit(1)\n except (json.JSONDecodeError, KeyError):\n msg = (\n f\"{Color.LIGHT_RED}Something went wrong.{Color.END}\\n\"\n f\"Got status code {response.status_code} and reponse {response.text}.\"\n )\n click.echo(msg)\n sys.exit(1)\n try:\n data = json.dumps(response.json()[\"data\"], indent=2)\n except (json.JSONDecodeError, KeyError):\n data = response.text\n click.echo(data)", "title": "" }, { "docid": "fe9b26fc72c242665e0c53ba0c6fdc6c", "score": "0.46263227", "text": "def test_03_ExtractSched(self):\n l_yaml = config_tools.Yaml(self.m_pyhouse_obj).read_config_file(CONFIG_NAME)\n l_sched = self.m_config._extract_light_schedule(l_yaml['Schedules'][0]['Light'])\n print(PrettyFormatAny.form(l_sched, 'F1-03-A - Sched', 190))", "title": "" }, { "docid": "19faec2856dc11ec134befa4e839ebd4", "score": "0.46253872", "text": "def findBeamlineSchedule(beamlineName, runName):\n\n runScheduleServiceClient, beamlineScheduleServiceClient, beamline = setup_connection()\n try:\n result = beamlineScheduleServiceClient.service.findBeamlineSchedule(beamlineName, runName)\n except SAXParseException as ex:\n print \"ERROR in findBeamlineSchedule\\n\"\n traceback.print_exc()\n sys.exit(2)\n\n return result", "title": "" }, { "docid": "b8055e59db709f54821459413d1b520c", "score": "0.46142575", "text": "def schedule():\n posts = [\n Post(\n name=\"episode_{0}\".format(index + 1),\n submit_at=datetime(2018, 1, 6 + index * 7, 17, 0, 0),\n subreddit=\"anime\",\n title=\"Slow Start - Episode {0} Discussion\".format(index + 1),\n body_template=\"*Slow Start*, Episode {0}\".format(index + 1),\n ) for index in range(0, 3)\n ]\n return Schedule(subreddit=\"anime\", posts=posts)", "title": "" }, { "docid": "6a4cbb821129aeb0608834d16edb2c08", "score": "0.46130717", "text": "def fetch_schedule_htmls():\n new_or_modified = 0\n\n mainpage_html = fetch_url(THEATER_BASEURL)\n\n soup = BeautifulSoup(mainpage_html, \"html5lib\")\n\n links = soup.find_all(\"a\")\n links = [x.get(\"href\") for x in links]\n cal_links = [x for x in links if x.startswith(\"calendars\")]\n cal_links = list(set(cal_links))\n if \"calendars/index.html\" in cal_links:\n cal_links.remove(\"calendars/index.html\")\n cal_links = [urllib.parse.quote(x) for x in cal_links]\n\n new_files = []\n old_files = []\n for cal_link in cal_links:\n\n cache_date = find_last_cachefile_date(Path(cal_link).name)\n\n this_html = fetch_url(THEATER_BASEURL + cal_link, newer_than_date=cache_date)\n\n if this_html:\n cache_filename = make_cache_filename(Path(cal_link).name)\n\n with open(cache_filename, \"wb\") as cache_fh:\n cache_fh.write(this_html)\n\n new_files.append(cache_filename)\n new_or_modified += 1\n else:\n cache_filename = find_last_cachefile(Path(cal_link).name)\n if cache_filename:\n old_files.append(cache_filename)\n\n # inform user on links and new/modified calendars\n print(\n \"%d calendar link%s found on %s\"\n % (len(cal_links), \"s\" if len(cal_links) > 1 else \"\", THEATER_BASEURL)\n )\n print(\n \"%d calendar%s that %s new or modified\"\n % (\n new_or_modified,\n \"s\" if new_or_modified != 1 else \"\",\n \"are\" if new_or_modified != 1 else \"is\",\n )\n )\n\n return (new_files, old_files)", "title": "" }, { "docid": "7c963cca439a6078e8a445200fb58a40", "score": "0.46122026", "text": "def schedule_url(year, stype, week):\n xmlurl = 'http://www.nfl.com/ajax/scorestrip?'\n if stype == 'POST':\n week += 17\n if week == 21: # NFL.com you so silly\n week += 1\n url = '%sseason=%d&seasonType=%s&week=%d' % (xmlurl, year, stype, week)\n parselog(url)", "title": "" }, { "docid": "604f693cbc46cdcd133be4df2855fd12", "score": "0.45809627", "text": "def schedule_expression(self) -> str:\n return pulumi.get(self, \"schedule_expression\")", "title": "" }, { "docid": "05a8777a092433e3602d6daaab92a0dd", "score": "0.4579413", "text": "def get_run_url(self, action_id):\n return 'https://app.globus.org/flows/%s/runs/%s' % (self.flow_id,action_id)", "title": "" }, { "docid": "95fba51fccecfd9a5ec0b25ec0d6629c", "score": "0.45739025", "text": "def imsdbScraper(link):\n page = requests.get(link)\n soup = BeautifulSoup(page.content, \"html.parser\")\n script = soup.find(\"td\", {\"class\": \"scrtext\"})\n return script.get_text()", "title": "" }, { "docid": "8d1f6ceef7e9d3358f368146cdb3195a", "score": "0.45705658", "text": "def today_schedule():\n return pass", "title": "" }, { "docid": "0ee78d7e41727d9dd5e11ed85f06737d", "score": "0.4539566", "text": "def getChanSchedule(self, srcDate, linkTmpl):\n\n lastWeekDate = getLastWeekDay(srcDate)\n srcDates = [srcDate, lastWeekDate]\n thisWeekDateRange = getThisWeekDateRange(srcDate)\n scheduleItems = []\n\n for date in srcDates:\n monDate = getThisMonday(date)\n year, mon, day = splitDate(monDate)\n link = linkTmpl.format(year, mon, day)\n\n self._params[\"date\"] = monDate\n self._params[\"move\"] = link\n\n html = getHtml(self._url, self._params, self._sleep,\n self._maxRetryCnt)\n soup = bs4.BeautifulSoup(html)\n scheduleItems.extend(self._extSchedule(soup, date))\n\n scheduleItems = [s for s in scheduleItems\n if s[0] in thisWeekDateRange]\n scheduleItems = sorted(scheduleItems, key=getFirstElem)\n schedule = []\n\n for dstDate, scheduleGroup in itertools.groupby(scheduleItems,\n key=getFirstElem):\n scheduleElem = {\n \"date\": dstDate,\n \"daySchedule\": []\n }\n for _, time, title, episodeNum in scheduleGroup:\n scheduleElem[\"daySchedule\"].append({\"time\": time,\n \"title\": title,\n \"episodeNum\": episodeNum})\n\n schedule.append(scheduleElem)\n\n return schedule", "title": "" }, { "docid": "b44547248df9a5149a0e9e39386fb797", "score": "0.45355082", "text": "def schedule(request):\n response = http.HttpResponse(content_type='text/javascript')\n\n schedule = cache.get('schedule')\n if not schedule:\n schedule = urllib2.urlopen('https://us.pycon.org/2012/schedule/json').read()\n cache.set('schedule', schedule, 120)\n response.write(schedule)\n return response", "title": "" }, { "docid": "91de91c3de23eb4c250a454bd1b09ab0", "score": "0.4526568", "text": "def schedule_url(year, stype, week):\r\n xmlurl = 'http://www.nfl.com/ajax/scorestrip?'\r\n if stype == 'POST':\r\n week += 17\r\n if week == 21: # NFL.com you so silly\r\n week += 1\r\n return '%sseason=%d&seasonType=%s&week=%d' % (xmlurl, year, stype, week)", "title": "" }, { "docid": "691e555dd5f7a4689715496303cb33a0", "score": "0.4526254", "text": "def get_absolute_url(self):\n return ('view-reminder', (), {'slug': self.slug})", "title": "" }, { "docid": "43487eabf3a3a211497f30c58a237bc2", "score": "0.4519099", "text": "def submit_url():\n return \"https://class.coursera.org/\" + URL + \"/assignment/submit\"", "title": "" }, { "docid": "74fc6279657f82b5d578d72138a89141", "score": "0.45091185", "text": "def agent_url(self):\n try:\n agent = self._data.find('ul', {'class': 'links'})\n links = agent.find_all('a')\n return links[1]['href']\n except Exception as e:\n if self._verbose:\n logger.error(e.message)\n return", "title": "" }, { "docid": "2ab14d206e67e6d382495691dd08f90d", "score": "0.44949904", "text": "def get_games_for_day(date=datetime.date.today() - datetime.timedelta(days=1)):\n\n url = 'http://gd2.mlb.com/components/game/mlb/year_' + str(date.year) + '/month_'\n if date.month < 10:\n url += '0'\n url += str(date.month) + '/day_'\n if date.day < 10:\n url += '0'\n url += str(date.day)\n r = requests.get(url)\n games = re.findall(r'href=\"(gid_\\d{4}_\\d{2}_\\d{2}_[^_]{6}_[^_]{6}_\\d)', r.text)\n game_links = [url + '/' + x for x in games]\n return game_links", "title": "" }, { "docid": "4b235e6bc3670c8959cca08ff32d2bb4", "score": "0.44827047", "text": "def challenge_url():\n return \"https://class.coursera.org/\" + URL + \"/assignment/challenge\"", "title": "" }, { "docid": "74a9b8031bb587202cc1bce3b0dc631c", "score": "0.44758108", "text": "def href(self):\n return self.entry.self_link", "title": "" }, { "docid": "c19bb08ecfcadbf2df72ca9edb2e67a9", "score": "0.44690502", "text": "def get_time_links(self):\r\n\r\n try:\r\n # Opens page at given url\r\n html = urlopen(self.url)\r\n\r\n except HTTPError as err:\r\n # Prints error message if unable to connect to Internet\r\n print(err)\r\n\r\n else:\r\n # Instantiates BeautifulSoup object\r\n bsObj = BeautifulSoup(html, \"lxml\")\r\n\r\n # Identifies anchor tags on page as time links and adds them to list as strings\r\n time_list = []\r\n time_links = bsObj.findAll(\"a\", {\"href\": \"#\"})\r\n \r\n for link in time_links:\r\n time_list.append(link.get_text())\r\n return time_list", "title": "" }, { "docid": "ed1abcdc31106416b6d8373a5f88021c", "score": "0.44682243", "text": "def schedule(self) -> Optional[str]:\n return pulumi.get(self, \"schedule\")", "title": "" }, { "docid": "5fc0fa45b675bded7f99b961a1868ddc", "score": "0.44590497", "text": "def RunWebscraper():\n driver = GetDriver()\n driver.get(\"https://pubmed.ncbi.nlm.nih.gov/\")\n print(driver.current_url)\n time.sleep(5)\n driver.quit()", "title": "" }, { "docid": "5ec8aa0bce8d1814cfd1672809b7f4d0", "score": "0.44520435", "text": "def XtractHref(self):\n return \"https://www.jesuismort.com\"+self.a['href']", "title": "" }, { "docid": "e03649412867f102a3531963d406219c", "score": "0.44371444", "text": "def get_event_calendar(URL):\n event_calendar = {'events': {}}\n event_calendar['what events do you have'] = ''\n response = requests.get(URL)\n\n content = response.content\n\n soup = BeautifulSoup(content, 'html.parser')\n\n result = soup.find_all(class_= 'details')\n for el in result:\n h1 = el.find('h1').get_text()\n h2 = el.find('h2').get_text()\n event_calendar['events'][f'{h1}'] = {}\n event_calendar['events'][f'{h1}']['when is'] = el.find('h2').get_text() + ' ' + el.find('h2').find_next_sibling('h2').get_text()\n event_calendar['events'][f'{h1}']['where is'] = el.find('h2').find_next_sibling().find_next_sibling().get_text()\n event_calendar['events'][f'{h1}']['what description'] = el.find('p').get_text().split('.')[0]\n event_calendar['events'][f'{h1}']['Tell me about'] = el.find('p').get_text().split('.')[0]\n event_calendar['what events do you have'] += h1 + ', '\n\n return event_calendar", "title": "" }, { "docid": "ac6f8223d51320906908850ddd099592", "score": "0.44318485", "text": "def href(self) -> str:\n return f\"https://coppermind.net/wiki?curid={self._pageid}\"", "title": "" }, { "docid": "73b3aea49459394228c533b2e42141b2", "score": "0.44172135", "text": "def get_schedule(course_code):\n # get tutoring schedule from a local .json file.\n with open(f'json_files/tutoring_hours/{course_code}.json', encoding='UTF8') as file:\n return json.load(file)", "title": "" }, { "docid": "3b072ec1bd4a18cdf735e9228c8d3cc3", "score": "0.44017625", "text": "def _getlink(self):\n return self._link", "title": "" }, { "docid": "1f7a8946827526c9db57b2dad7558e14", "score": "0.43966123", "text": "def get(self, now=None):\n if not now:\n now = datetime.datetime.now()\n values = {'request': self.request}\n cron_info = _ParseCronYaml()\n values['cronjobs'] = []\n values['now'] = str(now)\n if cron_info and cron_info.cron:\n for entry in cron_info.cron:\n job = {}\n values['cronjobs'].append(job)\n if entry.description:\n job['description'] = entry.description\n else:\n job['description'] = '(no description)'\n if entry.timezone:\n job['timezone'] = entry.timezone\n job['url'] = entry.url\n job['schedule'] = entry.schedule\n schedule = groctimespecification.GrocTimeSpecification(entry.schedule)\n matches = schedule.GetMatches(now, 3)\n job['times'] = []\n for match in matches:\n job['times'].append({'runtime': match.strftime(\"%Y-%m-%d %H:%M:%SZ\"),\n 'difference': str(match - now)})\n self.generate('cron.html', values)", "title": "" }, { "docid": "821f33ba136c641a9380c18b217b1238", "score": "0.43883485", "text": "def week_schedule(year, stype, week):\r\n url = schedule_url(year, stype, week)\r\n try:\r\n dom = xml.parse(urllib2.urlopen(url))\r\n except urllib2.HTTPError:\r\n print >> sys.stderr, 'Could not load %s' % url\r\n return []\r\n\r\n games = []\r\n for g in dom.getElementsByTagName(\"g\"):\r\n gsis_id = g.getAttribute('eid')\r\n games.append({\r\n 'eid': gsis_id,\r\n 'wday': g.getAttribute('d'),\r\n 'year': year,\r\n 'month': int(gsis_id[4:6]),\r\n 'day': int(gsis_id[6:8]),\r\n 'time': g.getAttribute('t'),\r\n 'meridiem': None,\r\n 'season_type': stype,\r\n 'week': week,\r\n 'home': g.getAttribute('h'),\r\n 'away': g.getAttribute('v'),\r\n 'gamekey': g.getAttribute('gsis'),\r\n })\r\n\r\n for game in games:\r\n h = int(game['time'].split(':')[0])\r\n m = int(game['time'].split(':')[1])\r\n if 0 < h <= 5: # All games before \"6:00\" are PM until proven otherwise\r\n game['meridiem'] = 'PM'\r\n\r\n if game['meridiem'] is None:\r\n\r\n days_games = [g for g in games if g['wday'] == game['wday']]\r\n preceeding = [g for g in days_games if g['eid'] < game['eid']]\r\n proceeding = [g for g in days_games if g['eid'] > game['eid']]\r\n\r\n # If any games *after* this one are AM then so is this\r\n if any(g['meridiem'] == 'AM' for g in proceeding):\r\n game['meridiem'] = 'AM'\r\n # If any games *before* this one are PM then so is this one\r\n elif any(g['meridiem'] == 'PM' for g in preceeding):\r\n game['meridiem'] = 'PM'\r\n # If any games *after* this one have an \"earlier\" start it's AM\r\n elif any(h > t for t in [int(g['time'].split(':')[0]) for g in proceeding]):\r\n game['meridiem'] = 'AM'\r\n # If any games *before* this one have a \"later\" start time it's PM\r\n elif any(h < t for t in [int(g['time'].split(':')[0]) for g in preceeding]):\r\n game['meridiem'] = 'PM'\r\n\r\n if game['meridiem'] is None:\r\n if game['wday'] not in ['Sat', 'Sun']:\r\n game['meridiem'] = 'PM'\r\n if game['season_type'] == 'POST':\r\n game['meridiem'] = 'PM'\r\n\r\n return games", "title": "" }, { "docid": "005c9b0abcf43d350ce84c033117cf8c", "score": "0.43842012", "text": "def get_first_comic_link(cls):\n return {\n \"href\": \"http://www.spaceavalanche.com/2009/02/02/irish-sea/\",\n \"title\": \"Irish Sea\",\n }", "title": "" }, { "docid": "05ffdd485d7b5dcceb532833a4efe7f0", "score": "0.43826875", "text": "def regex_obj_schedule(self, final_link):\n\n raw_schedule_text = (bs_obj_return(final_link)).text # converts to a string\n regex_name = re.search(\"\\w+\\sCUP\\s\\d{4}\",raw_schedule_text) # Creates regex match object finding the season name & year\n self.get_season_name(regex_name.group()) # Gets season name and names this instance\n regex_division = re.search(\"[A-Z]{3,5}\\sDIVISION\\s\\d[A-Z]?\",raw_schedule_text) # finds the league and division\n regex_games = re.findall(\"([A-Z]{3})\\s([A-Z]{3})\\s*(\\d*)\\s*(\\d*:\\d\\d)\\s*(AM|PM)\\s*(.*)\",raw_schedule_text) # a list of games in this division\n self.organize_raw_games(regex_games, regex_division.group()) #cleans and adds the proper information to self.all_games\n regex_rereg_geadline = re.search(\"([A-Z]{3})\\s([A-Z]{3})\\s*(\\d*)\\s*DEADLINE TO RE-REGISTER FOR THE NEXT SEASON!\",raw_schedule_text)\n self.set_rereg_deadline(regex_rereg_geadline.groups())", "title": "" }, { "docid": "1ef23d537c268918b52f6ec6fa120379", "score": "0.43817335", "text": "def get_absolute_url(self):\n return reverse('anime-detail', args=[str(self.id)])", "title": "" }, { "docid": "d406d750c94870bd8cfdff0cb0f346cc", "score": "0.43802473", "text": "def generate_replay_url(self):\n if self.already_have_replay:\n return None # Not going to be downloading. so dont need. not worth the api call\n else:\n result = json.loads(str(request_(\"https://api.opendota.com/api/replays?match_id=%d\" % self.id)))[0]\n return \"http://replay{0}.valve.net/570/{1}_{2}.dem.bz2\".format(\n result[\"cluster\"], result[\"match_id\"], result[\"replay_salt\"]\n )", "title": "" }, { "docid": "0d6f1cdb33f34472a3145baf63df14d5", "score": "0.43767464", "text": "def get_url(url, **params):\n # Application ID. Allows up to 10K API hits/day per IP.\n params |= {'key': 'FgLEU6zgwYULvStDmrgqxg((', 'client_id': '16105'}\n headers = {\n 'User-Agent': USER_AGENT.format(version=wf.version, url=wf.help_url)\n }\n r = web.get(url, params, headers=headers)\n log.debug(u'[%d] %s', r.status_code, r.url)\n r.raise_for_status()\n return r", "title": "" }, { "docid": "ee5ef679c2a70eae626ba49ab56c1a49", "score": "0.43675497", "text": "def _get_lyric_url(self, artist, title):\n\n url_params = {'artist': artist, 'song': title, 'fmt': 'html'}\n\n try:\n r = requests.get(\"http://lyrics.wikia.com/api.php\", params=url_params)\n except Exception, e:\n raise e\n\n # Return empty string if lyric request failed\n if r.status_code is not 200:\n return None\n\n # Read and parse the response\n html = r.text\n soup = BeautifulSoup(html)\n\n # The first anchor in the response directs to the lyrics page\n # TODO: make this more robost. if lyric wikia changes their api response, this service might break. -Cal\n link = soup.find('a')\n\n if link:\n return link['href']\n else:\n return None", "title": "" }, { "docid": "2c8a5a033581053669818ec6767c9c69", "score": "0.43592733", "text": "def cron_schedule(self) -> pulumi.Output[str]:\n return pulumi.get(self, \"cron_schedule\")", "title": "" }, { "docid": "602db3d973d5e87b75a8b2fcc9262a8b", "score": "0.43562597", "text": "def get_meeting_hrefs(tracks_list):\n construction_tuples = construct_historical_search_tup(tracks_list)\n for tup in construction_tuples:\n url = build_calendar_search_url(tup)\n file_path, file_name = construct_calendar_file_path(tup)\n full_path = create_file_path(file_path, file_name)\n soup_page = open_file(full_path)\n tbody = find_all_tbody(soup_page)\n hrefs = find_href(tbody)\n for ref in hrefs:\n yield ref['href'], tup[0]", "title": "" }, { "docid": "285bd26c892ee74f1bec054b8a584790", "score": "0.43478677", "text": "def getURL (self):\n \n return self.path+self.symbol+\"?interval=\"+self.interval", "title": "" }, { "docid": "a9448121c452c516b5c9363e7a85efd9", "score": "0.43461245", "text": "def get_assignment_url(self):\n return reverse('basecrowd:get_assignment', args=[self.crowd_name])", "title": "" }, { "docid": "e2025387fb0240b6c3bcea4ec4794ef5", "score": "0.43408486", "text": "def query_webpage(date):\n logging.info('Starting query.')\n\n opts = Options()\n opts.headless = True # Uncomment to run headless\n #opts.headless = False\n with webdriver.Firefox(options=opts, executable_path='/usr/bin/geckodriver') as driver:\n driver.get('https://camping.ehawaii.gov/camping/all,details,1692.html')\n\n for element in driver.find_elements_by_tag_name('li'):\n if element.get_attribute('aria-labelledby') == 'ui-id-5':\n element.click()\n\n all_rows = []\n\n # 25-30 days from today\n #\n t_25 = date + dt.timedelta(days=25)\n t_25_str = t_25.strftime(\"%m/%d/%Y\")\n all_rows.extend(get_availability(driver, t_25_str))\n\n # 30-35 days from today\n #\n t_30 = date + dt.timedelta(days=30)\n t_30_str = t_30.strftime(\"%m/%d/%Y\")\n all_rows.extend(get_availability(driver, t_30_str))\n\n #display.stop()\n return all_rows\n\n return None", "title": "" }, { "docid": "2ddcbf5dfd61ea3888355303cc499ac9", "score": "0.43388033", "text": "def schedule(self):\n return self.theIrrigationEngine.getSchedule()", "title": "" }, { "docid": "08def38db74232463dbc44a0d1c48993", "score": "0.43258986", "text": "def gather_schedules(self):\n\n soup = bs_obj_return(self.season_link)\n all_season = soup.findAll(\"ul\", id=\"listyofiles\")\n for season_links in all_season:\n league = season_links.findAll(\"a\")\n for division in league:\n self.regex_obj_schedule(division.attrs['href'])", "title": "" }, { "docid": "3bd9758cee9eff2fd7a6a80f1fcfccc7", "score": "0.43250588", "text": "def get_ex21_url(ticker):\r\n\r\n if company_url(ticker) == \"No matching Ticker Symbol.\":\r\n return \"No matching Ticker Symbol.\" # catch un-matching ticker\r\n\r\n elif get_filings_url(ticker) == \"No records.\":\r\n return \"No records.\" # if there is no filings\r\n\r\n else:\r\n # the content of a filing is stored in a table format on the page\r\n latest_filing_table = pd.read_html(latest_filing_url(ticker), header=0)[0] # extract the HTML table\r\n latest_filing_table.columns = ['Seq', 'Description', 'Document', 'Type', 'Size'] # rename the column indexes\r\n\r\n ex21_row = latest_filing_table.loc[latest_filing_table[\"Type\"].str.contains(\"EX-21\") == True].reset_index(\r\n drop=True) # extract the row that stores Exhibit 21\r\n ex21_doc = ex21_row.loc[0, \"Document\"] # keep only the \"Document\" cell (string)\r\n\r\n # the \"Document\" cell contains the link to the details of Exhibit 21 (list of subsidiaries).\r\n soup = parse_html(latest_filing_url(ticker)) # parse the HTML using BeautifulSoup\r\n link = soup.body.find(text=ex21_doc).parent[\"href\"] # find the link embedded in the designated location\r\n complete_link = \"http://www.sec.gov\" + link # get the complete URL\r\n\r\n return complete_link", "title": "" }, { "docid": "b4d96db206a362f6e9e3277ae6767ceb", "score": "0.4321686", "text": "def theLinky(self):\n theLink = self.absolute_url()\n return theLink", "title": "" }, { "docid": "e8568fb4a7e62eba6649415d434564f8", "score": "0.43106005", "text": "def get_schedule_url_for_season(self, season):\n return \"%s/sports/%s/%s/schedule\" % (self.server, self.sport,\n season.short_id)", "title": "" }, { "docid": "886025f983d2e44a28d65543e9fefeac", "score": "0.43074024", "text": "def test_detail_view_with_a_future_schedule(self):\n future_schedule = create_schedule(question='Future schedule.', days=5)\n response = self.client.get(reverse('schedules:detail', args=(future_schedule.id,)))\n self.assertEqual(response.status_code, 404)", "title": "" }, { "docid": "4da6f2f4b09194300b43eaa2da18dac2", "score": "0.43069398", "text": "def test_schedule(self):\n\n smsMessageRequest = SMSMessageRequest(to=test.sch_to, message=test.sch_message)\n smsMessageRequest.set_schedule(datetime=test.sch_datetime, format=test.sch_format)\n smsMessageResponse = smsMessageRequest.schedule()\n resp_message = smsMessageResponse.get_message()\n self.assertEqual(resp_message, 'Campaign of 1 numbers Scheduled successfully.')\n resp_status = smsMessageResponse.get_dlr_status()\n self.assertEqual(resp_status, 'AWAITED-DLR')", "title": "" }, { "docid": "34ddb21c8cc154775d8e0c7018ca3054", "score": "0.4301776", "text": "def fetch_dns_mx_entry():\n dprint(\"Querying \", lookup_domain, \" MX record\")\n answers = dns.resolver.Resolver().query(lookup_domain, 'MX')\n\n if len(answers) == 1:\n rdata = answers[0]\n cprint(f\"Selected MX entry to [{rdata.exchange}].\")\n\n return str(rdata.exchange)\n else:\n wprint(\"Multiple MX records were found. Selecting the first one.\")\n return str(answers[0].exchange)", "title": "" }, { "docid": "43f8920a8d393325e6d3f1e17a2d1e3b", "score": "0.4297215", "text": "def get_manual_url():\n if VERSION[2] == 0 and VERSION[4] != 'final':\n manual_ver = 'dev'\n else:\n manual_ver = '%s.%s' % (VERSION[0], VERSION[1])\n\n return 'https://www.reviewboard.org/docs/manual/%s/' % manual_ver", "title": "" }, { "docid": "58d57104855e0a9e6ef9ff41ad29ac59", "score": "0.42928463", "text": "def get_studio_url(course, page):\n studio_link = None\n if course.course_edit_method == \"Studio\":\n studio_link = get_cms_course_link(course, page)\n return studio_link", "title": "" }, { "docid": "6c6b968a5bf1ab70d4f7bc78e417181b", "score": "0.4291374", "text": "def get(self):\n header = self.request.headers.get('X-AppEngine-Cron', None)\n if not header:\n raise ValueError('attempt to access cron handler directly, '\n 'missing custom App Engine header')\n # TODO 1\n # use _cacheAnnouncement() to set announcement in Memcache", "title": "" }, { "docid": "ef09d80d179470a0d28fe8b0f3653d48", "score": "0.4277038", "text": "def refresh_schedule(self):\n self.schedule=self.lv.getSchedule()\n if self.schedule:\n print \"Got a schedule containing: %d item(s)\" % len(self.schedule['schedule'])\n print self.schedule['schedule']\n self.urls=self.lv.getDlUrls(self.schedule)\n\n #print \"List of audio URLs to download \\n %s \\n\" % u\n if self.lv.dlAllFiles(self.urls):\n self.lv.confirmScheduleRetrieval()\n\n self.audio_vibration = self.lv.get_adverts(self.schedule, u'1')\n self.audio_nfc = self.lv.get_adverts(self.schedule, u'2')\n self.audio_ir = self.lv.get_adverts(self.schedule, u'3')\n self.audio_magnetic = self.lv.get_adverts(self.schedule, u'4')\n self.audio_pushtocross = self.lv.get_adverts(self.schedule, u'5')\n self.audio_internal = self.lv.get_adverts(self.schedule, u'6')\n self.audio_broadcast = self.lv.get_adverts(self.schedule, u'7')\n self.audio_emergency = self.lv.get_adverts(self.schedule, u'8')", "title": "" }, { "docid": "6e50cf8d8d64701c6852d463cbe1783e", "score": "0.42744115", "text": "def parse_calendar(cal_page_html=None):\n # Make sure we actually got something\n assert cal_page_html is not None\n\n # -----------------------------------\n # Parse Out Event IDs\n # -----------------------------------\n # List for eventIDs parsed from calendar page html\n event_id_list = []\n # Parse with BeautifulSoup\n cal_page_soup = BS(cal_page_html)\n # list of tags with listeventtitle class -- eventIDs are embedded in some of these\n eventtitle_tag_list = cal_page_soup.find_all('div', class_='listeventtitle')\n\n # Loop Through listeventtitle tags\n for eventtitle_ind,eventtitle_tag in enumerate(eventtitle_tag_list):\n\n # This gets the eventtitle <a> tag which has the eventID embedded in the HREF\n event_link_tag = eventtitle_tag.find('a')\n\n # Skip over dummy eventtitle tags\n if event_link_tag is None: \n continue\n \n # Get the href string\n event_link_string = event_link_tag['href']\n # Parse out the eventID \n raw_event_id_string = re.findall('(&id=\\d{6}&)', event_link_string)[0]\n event_id_string = raw_event_id_string.replace('&','').split('=')[-1]\n\n # Add the eventID to the list\n event_id_list.append(event_id_string)\n\n return event_id_list", "title": "" }, { "docid": "be66c1188a725a4a3513c45936a187a7", "score": "0.42737585", "text": "def create_url(date): \n base_url = 'http://www.newyorker.com/magazine/toc/'\n (y, m, d) = str(date).split('-')\n url = base_url + y + '/' + m + '/' + d + '/toc'\n return url", "title": "" }, { "docid": "600dbb87ece65973103cc877d647f0ee", "score": "0.42732593", "text": "def get_page(link):\n r = requests.get(link)\n r.raise_for_status()\n return r.content", "title": "" }, { "docid": "a7c050366a65e95ca9ce40d423109131", "score": "0.42701277", "text": "def make_schedule_list():\n calendar = gcal.fetch_calendar()\n calendar_dict = json.loads(calendar)\n summary_list = calendar_dict[\"events\"]\n schedule = []\n for single_schedule in summary_list:\n start_datetime = dt.strptime(single_schedule[\"start\"], '%Y-%m-%dT%H:%M:%S+09:00')\n text = str('%02d' % start_datetime.hour)\n text = text + u':'\n text = text + str('%02d' % start_datetime.minute)\n text = text + u' - '\n end_datetime = dt.strptime(single_schedule[\"end\"], '%Y-%m-%dT%H:%M:%S+09:00')\n text = text + str('%02d' % end_datetime.hour)\n text = text + u':'\n text = text + str('%02d' % end_datetime.minute)\n text = text + u' '\n text = text + single_schedule[\"summary\"]\n schedule.append(text)\n print schedule\n return schedule", "title": "" }, { "docid": "58e02d8eb944c8413a070e5d65f2f3ab", "score": "0.42692548", "text": "def get_draft_review_url(self, body):\n # The \"\\s*\" denotes arbitrary whitespace; sometimes, this tag is split\n # across multiple lines in the HTML.\n # pylint: disable-msg=anomalous-backslash-in-string\n reg = re.compile(\n '<a href=\"([^\"]*)\">Assignment [0-9]+</a>\\s*\\(Draft\\)')\n # pylint: enable-msg=anomalous-backslash-in-string\n result = reg.search(body)\n if result is None:\n return None\n return result.group(1)", "title": "" }, { "docid": "b9a2d65ea9dbcdf3b1cef14a00739c0b", "score": "0.4269149", "text": "def _get_cactus(smiles):\n site = \"cactus.nci.nih.gov\"\n try:\n page = urllib.urlopen(\"http://%s/cgi-bin/translate.tcl?smiles=%s&format=sdf&astyle=kekule&dim=3D&file=\"%(site, smiles))\n except:\n raise RetrieveSmilesException(\"Could not connect with server\")\n\n for line in page:\n if \"Click here\" in line and 'a href=\"' in line :\n dummy1, url, dummy2 = line.split('\"')\n try:\n path, header = urllib.urlretrieve(\"http://%s%s\"%(site,url))\n except:\n raise RetrieveSmilesException(\"Could not retrieve file\")\n return path", "title": "" }, { "docid": "423e602d0f056d1286808129c1aa57da", "score": "0.42659637", "text": "def getAmPm(self):\r\n return self.driver.execute_script('return TIME.ampm')", "title": "" }, { "docid": "c7f357adb0d6f9f80064e4c6bb15651b", "score": "0.42652133", "text": "def get_first_comic_link(cls):\n return get_soup_at_url(cls.url).find(\"a\", title=\"Oldest comic\")", "title": "" }, { "docid": "457c97d4952947009c9d2d1b41f0102a", "score": "0.4265089", "text": "def get_page_url(page_num):\n page_url = 'https://www.dmmsee.icu/genre/hd/' + str(page_num)\n\n return page_url", "title": "" }, { "docid": "dade509f7cac8bc1559ef724e2a9f12d", "score": "0.42583692", "text": "def handler_get_schedule():\n \n return jsonify(allocation.sched.to_dict())", "title": "" }, { "docid": "f00ef337b14a4c064bbd42ad86aa64d2", "score": "0.42535496", "text": "def view_schedule():\n\n\n cur = db.get_db().cursor()\n # Getting all needed info for schedule from database\n cur.execute(\"\"\"\n SELECT sessions.session_name,\n sessions.course_id,\n sessions.id,\n sessions.location,\n sessions.room_number,\n sessions.times,\n courses.description,\n courses.course_num,\n users.name,\n rosters.user_id\n FROM sessions\n INNER JOIN courses ON courses.course_num = sessions.course_id\n INNER JOIN users ON courses.teacher_id = users.id\n INNER JOIN rosters ON sessions.id = rosters.session_id\n WHERE rosters.user_id = %s\"\"\",\n (g.user['id'],)\n )\n\n infos = cur.fetchall()\n\n cur.close()\n\n return render_template(\"student_views/schedule.html\", infos=infos)", "title": "" }, { "docid": "a72ce5ad3e6d42b15a224a246d300208", "score": "0.42448965", "text": "def getLinkToTheInquiries(self):\n context = aq_inner(self.context)\n return context.aq_inner.aq_parent.absolute_url() + '/#fieldsetlegend-urban_inquiry'", "title": "" }, { "docid": "8f9277223c0bc9c860be7c8776da1357", "score": "0.42416188", "text": "def test_detail_view_with_a_past_schedule(self):\n past_schedule = create_schedule(question='Past schedule.', days=-5)\n response = self.client.get(reverse('schedules:detail', args=(past_schedule.id,)))\n self.assertContains(response, past_schedule.question, status_code=200)", "title": "" }, { "docid": "b3bc56df2a7cc8107a3a92f6c8fd7c83", "score": "0.42390826", "text": "def schedule2() -> list:\n def fetch(url: str, team: str) -> list:\n holding_list = []\n toreturn = []\n\n headers = {'User-Agent': 'Mozilla/5.0 (Macintosh; Intel Mac OS X 10_10_1) '\n 'AppleWebKit/537.36 (KHTML, like Gecko) Chrome/39.0.2171.95'}\n text = requests.get(url, headers=headers).text\n soup = BeautifulSoup(text, features=\"html.parser\")\n\n table = soup.find('div', class_=\"css-177ackh-FixturesCardCSS e13iyrxf0\")\n for index, thing in enumerate(table):\n if index >= 2:\n temp_list = []\n for ind, a in enumerate(thing):\n if ind == 0:\n if a.text:\n temp_list.append(a.text)\n\n else:\n temp_list.append(a.text)\n\n if len(temp_list) == 2:\n # length = 3 is a result, not an upcoming game\n holding_list.append(temp_list)\n\n for match in holding_list:\n time = match[0]\n everything_else = match[1]\n\n # fix date first\n try:\n time_dtime = datetime.datetime.strptime(time[0:11], '%a, %d %b').date()\n except ValueError:\n try:\n time_dtime = datetime.datetime.strptime(time, '%d %B %Y').date()\n except ValueError:\n time_dtime = None\n\n # get time of match\n time = re.compile(r'(\\d)+:(\\d)+').search(everything_else).group()\n hour = int(time.split(':')[0])\n if hour > 12:\n hour -= 12\n flag = 'PM'\n else:\n flag = 'AM'\n fixed_time = '{}:{} {}'.format(hour, time.split(':')[1], flag)\n\n # get team and opponent\n splitter = '/-|'\n everything_else = everything_else.replace(time, splitter)\n team, opponent = everything_else.split(splitter)\n toreturn.append([time_dtime, fixed_time, team, opponent])\n\n return toreturn\n\n return fetch('https://www.fotmob.com/teams/6713/overview/usa', 'MNT')", "title": "" }, { "docid": "f11b8b077376949c50c577efdcf36612", "score": "0.42266884", "text": "def print_schedule():\n print 'main:print_schedule - called.'\n printgcal.print_calendar()\n return 'Done'", "title": "" }, { "docid": "5bbd5c51f2d31d2fc9b69ea49327b98b", "score": "0.4222713", "text": "def getChanSchedule(self, date, chanTypeId, chanId, genre=\"\", page=1):\n\n url = self._chanUrls[chanTypeId]\n self._chanParams[\"channelType\"] = chanTypeId\n self._chanParams[\"date\"] = date\n self._chanParams[\"genre\"] = genre\n self._chanParams[\"channelId\"] = chanId\n self._chanParams[\"page\"] = page\n\n html = getHtml(url, self._chanParams, self._sleep,\n self._maxRetryCnt)\n soup = bs4.BeautifulSoup(html)\n schedule = self._extSchedule(soup, date)\n\n return schedule", "title": "" }, { "docid": "bb8bded8a2ea71a362e34911b04db605", "score": "0.421997", "text": "def get_double_team_schedule(self, team_id):\n path = \"tennis-t2/en/double_teams/{team_id}/schedule\".format(team_id=team_id)\n print(path)\n return self._make_request(path)", "title": "" }, { "docid": "330d8e44e1d1da31aa528e6c6e19b1e9", "score": "0.42156845", "text": "def _get_single_report_url(report):\n href_ptrn = re.compile(r'<a href=\"([\\W\\w]+?)\">')\n url = re.search(href_ptrn, report)\n url = url.group(1)\n url = url.replace('&amp;', '&')\n return url", "title": "" }, { "docid": "7eee2d1b50c49c0c424168a3fb39fa3d", "score": "0.4215103", "text": "def download_link(song_id: str) -> str:\n return check_media_url(get_song_details(song_id).media_url)", "title": "" }, { "docid": "885645365cd79cfa1ed9ec104450a290", "score": "0.42145142", "text": "def get_daily_url(reporting_url):\n disaster_covid_soup = url_to_soup(reporting_url)\n find_txt = 'COVID-19 Data - Daily Report'\n daily_url = disaster_covid_soup.find(\n lambda tag: tag.has_attr('href') and re.search(find_txt, tag.text)\n ).get('href')\n\n if not daily_url:\n raise ValueError('Unable to find Daily Report Archive link')\n # daily report URL is often relative. urljoin fixes this.\n return urljoin(reporting_url, daily_url)", "title": "" }, { "docid": "fea78aea3a9c32969fb81bbb0a8d3003", "score": "0.42143697", "text": "def fetch2(station: str) -> str:\n core.valid_station(station)\n url = 'http://www.aviationweather.gov/metar/data?ids='+station+'&format=raw&date=0&hours=0'\n html = get(url).text\n if station+'<' in html:\n raise InvalidRequest('Station does not exist/Database lookup error')\n #Report begins with station iden\n start = html.find('<code>'+station+' ')+6\n #Report ends with html bracket\n end = html[start:].find('<')\n return html[start:start+end].replace('\\n ', '')", "title": "" }, { "docid": "d47a57114c2c082ce09da34731425ca9", "score": "0.421373", "text": "def fetch_schedule_ids(logger, stack_server, api_user, api_password):\n schedules = []\n\n # get date of the day 60 days old from now and convert to epoch.\n date_passed = datetime.datetime.now().replace(microsecond=0) - datetime.timedelta(\n days=60\n )\n pattern = '%Y-%m-%d %H:%M:%S'\n epoch_time = int(time.mktime(time.strptime(str(date_passed), pattern)))\n connection_instance = None\n\n connection_instance = APIConnection(\n user=api_user,\n password=api_password,\n portal=stack_server,\n retry500=10,\n logger=logger,\n )\n\n # construct url of schedules with start date older than 60 days,\n # CHG in description and with No reccurance.\n connection_instance.uri = (\n '/api/schedule?limit=!&filter.0.dtstart.max={0}\\\n &filter.0.recur_expr.eq=0\\\n &filter.0.description.begins_with=CHG'\n ).format(epoch_time)\n connection_instance.requestType = 'GET'\n connection_instance.httpRequest()\n\n if (\n connection_instance.responseCode > 199\n and connection_instance.responseCode < 300\n and connection_instance.responseJSON['result_set']\n ):\n for result in connection_instance.responseJSON['result_set']:\n schedule_event = re.search(r'schedule[^\\d]+([\\d]+)', result['URI'])\n schedules.append(schedule_event.group(1))\n\n return schedules", "title": "" }, { "docid": "ced791aa801b4e6fe3c95b5dbd7573af", "score": "0.42119783", "text": "def get_exams(\n subj: str = \"\", course_num: str = \"\", sec_numb: str = \"\", crn: str = \"\"\n ) -> tuple[str, str, list[str]]:\n page = TodayAtMun.submit_form(subj, course_num, sec_numb, crn)\n sched_heading = TodayAtMun.parse_sched_heading(page)\n headings = TodayAtMun.parse_headings(page)\n exams = TodayAtMun.parse_form(page)\n return sched_heading, headings, exams", "title": "" }, { "docid": "20f337e02fb63d8e48799e51ce6697f2", "score": "0.42119774", "text": "def url(self):\n\n return url_for('api.get_event', id=self.id, legacy_id=self.legacy_id,\n _external=True)", "title": "" }, { "docid": "c5f3cc573369fd4f69467c5828089728", "score": "0.4196545", "text": "def get_scoreboard_link(self):\n title = _(\"Participants\") if self.is_course else _(\"Scoreboard\")\n return mark_safe(\n u'<a href=\"{}/\">{}</a>'.format(self.get_scoreboard_url(), xss.escape(title))\n )", "title": "" }, { "docid": "b27d44abaed2d74dd304b3acfce901be", "score": "0.41937375", "text": "def issue(self):\r\n return self._url('issue')", "title": "" } ]
16b874686501b17be9428203cbca25c6
String should be returned in reversed order
[ { "docid": "9764c1c5237dd6493396e36ee89a3b24", "score": "0.0", "text": "def test_reverse_three_chars(self):\n diversifier = Diversifier('abc')\n self.assertEqual('cba', diversifier.reverse())", "title": "" } ]
[ { "docid": "b354bd93c532860e00bdc75a57a7cd7d", "score": "0.74614257", "text": "def task_10_get_reverse_string(text: str):\n return text[::-1]", "title": "" }, { "docid": "f0b5b1a3f3eaba1b8d9634b792123c3a", "score": "0.7371981", "text": "def reverse(self): \n letters = list(self.seq) \n letters.reverse() \n return ''.join(letters)", "title": "" }, { "docid": "c215376a9b2ea35a8521ee66dffbb378", "score": "0.73564476", "text": "def rev(string):\r\n return string[::-1]", "title": "" }, { "docid": "221c32947832138842eed90c9dfca855", "score": "0.73407483", "text": "def reverse(s):\n return reverse_to(s, empty_rlist)", "title": "" }, { "docid": "2058ceeea392b12dae35456705c240a6", "score": "0.73218286", "text": "def _reverse(self, word):\n reversed_word = \"\"\n for position in range(len(word)-1, -1, -1):\n reversed_word = reversed_word + word[position]\n return reversed_word", "title": "" }, { "docid": "8663fbacce4d50fe9d1dba3cfd19868e", "score": "0.73169994", "text": "def reverse(s):\n pass", "title": "" }, { "docid": "c69e9b91c1a9c9ba9d2653e50a82268e", "score": "0.73125076", "text": "def question1(string):\n return string[::-1]", "title": "" }, { "docid": "2c41f4f862a82e5475367c69dcacff44", "score": "0.72630453", "text": "def reverse(s):\n return s[::-1]", "title": "" }, { "docid": "2f039bbb889ca9f1b991757e1a8bf0fd", "score": "0.7245307", "text": "def backward_string(val: str) -> str:\n return val[::-1]", "title": "" }, { "docid": "a984cd389e84e92f63a4874da14a41ab", "score": "0.7236288", "text": "def reverse(strng):\n result = ''\n for i in range(len(strng)-1, -1, -1):\n result += strng[i]\n return result", "title": "" }, { "docid": "bae212d4dc3cabc4d77e3ba3cb296a7e", "score": "0.7218215", "text": "def reverse_str_builtin(string):\n return \"\".join(reversed(string))", "title": "" }, { "docid": "88228099226c32e4f6a87f7c8e800872", "score": "0.7171714", "text": "def returnBackwardsString(random_string):\n return \"\".join(reversed(random_string))", "title": "" }, { "docid": "8333706e236b47c9e386aed1fd867971", "score": "0.7158958", "text": "def reverse(line: str) -> str: \r\n\r\n return line[::-1]", "title": "" }, { "docid": "5f74ff4e46f6ff93c0af64d047eb8a96", "score": "0.70825094", "text": "def reverseString(self, s: List[str]) -> None:\n \n return (s.reverse()) #can do it this way\n\n #another way\n #src: https://developers.google.com/edu/python/strings\n #copy the string and then reverse it\n s[:] = s[::-1]\n return s[:]", "title": "" }, { "docid": "3be7d203b5ac06c212ab21f67da8e171", "score": "0.708069", "text": "def loop_reverse(s: str) -> str:\n return s[::-1]", "title": "" }, { "docid": "71362ff3b4d2c52c98c982956c38bceb", "score": "0.70720226", "text": "def string_reverse(string): #0(n + 2)\r\n words = string.split() #0(1)\r\n reverse_string = \" \".join(reversed(words)) #0(n)\r\n print(reverse_string) #0(1)\r", "title": "" }, { "docid": "640b0a9df922e8c90a61d0ea136181ac", "score": "0.70616895", "text": "def reverse_string(self, s):\n left = 0\n right = len(s)-1\n while left < right:\n s[left], s[right] = s[right], s[left]\n left += 1\n right -= 1\n return s", "title": "" }, { "docid": "aa6f24533e7cf35d49b3b23d45f9d009", "score": "0.7061512", "text": "def reverseString(self, s):\n return s[::-1]", "title": "" }, { "docid": "310ee5b2cd2e8f2a9593787f952d2677", "score": "0.70547795", "text": "def reverse_str(s):\n print(s[-1])\n if len(s) > 1:\n return reverse_str(s[:-1])", "title": "" }, { "docid": "5f3cd95841305376592428ead5933e4f", "score": "0.70416117", "text": "def Reverse(self, *args):\n return _snap.TStrV_Reverse(self, *args)", "title": "" }, { "docid": "b6282af457fac7d64688dae23f9211be", "score": "0.70320827", "text": "def reverse(a):\n b = ''\n for i in range(len(a)-1,-1,-1):\n b+=a[i]\n\n return b", "title": "" }, { "docid": "7c3f60ca38445a433ebd7eae6e2d3dd4", "score": "0.7029446", "text": "def reverse():", "title": "" }, { "docid": "e4cad0f4c528f52b2725fcda967abfb2", "score": "0.7023599", "text": "def reverse_string_python(string):\n print(string[::-1])", "title": "" }, { "docid": "c3c5301c869340f7afa4dc09bd802aeb", "score": "0.7014934", "text": "def reverseString(self, s) :\n left,right = 0, len(s)-1\n while left < right:\n s[left],s[right] = s[right],s[left]\n left += 1\n right -=1\n return s", "title": "" }, { "docid": "f14dbc62494aa9130ad8d2b54f619edc", "score": "0.7012892", "text": "def reverseString(self, s: List[str]) -> None:\n n = len(s)\n i = 0\n l = n-1\n while l>i:\n s[i],s[l] = s[l],s[i]\n i = i+1\n l = l-1\n return s", "title": "" }, { "docid": "f063b923b4cee23d231f67b0a32e5513", "score": "0.7011787", "text": "def reverse(strs):\n return ''.join([strs[i] for i in xrange(len(strs)-1, -1, -1)])", "title": "" }, { "docid": "b8222df5657a9aa40d54627b991788e8", "score": "0.7003707", "text": "def activity04(input_string):\n return input_string [::-1]", "title": "" }, { "docid": "4a9b71858aef68d0a7bfe4c30e74fbdc", "score": "0.6995744", "text": "def reverse(input=''):\n return input[:: -1]", "title": "" }, { "docid": "8eaedd0364a1ceb600e42ba9bc8e0a8e", "score": "0.6991887", "text": "def reverseString(self, s):\r\n left = 0\r\n right = len(s) - 1\r\n while (left < right):\r\n s[left], s[right] = s[right], s[left]\r\n left += 1\r\n right -= 1\r\n return s", "title": "" }, { "docid": "104a6583cb34cf8375696c147a6d6f1e", "score": "0.6981512", "text": "def reversed(seq):\n return seq[::-1]", "title": "" }, { "docid": "b5ea98276c057a9a17a050c1eea72a38", "score": "0.6975022", "text": "def reverseString(self, s):\r\n i = 0\r\n j = len(s)-1\r\n while i <= j:\r\n s[i],s[j]=s[j],s[i]\r\n i += 1\r\n j -= 1\r\n return s", "title": "" }, { "docid": "cf3b32e0d242997f26b46e9456054cf8", "score": "0.6972569", "text": "def reverseString(self, s: List[str]) -> None:\n i = 0\n n = len(s) - 1\n while i < n - i:\n s[i], s[n - i] = s[n - i], s[i]\n i += 1\n return s", "title": "" }, { "docid": "7d8ba28c3043c651e4f2de974f97fed0", "score": "0.6971828", "text": "def reverseString(self, s: list) -> None:\n s.reverse()", "title": "" }, { "docid": "7d8ba28c3043c651e4f2de974f97fed0", "score": "0.6971828", "text": "def reverseString(self, s: list) -> None:\n s.reverse()", "title": "" }, { "docid": "b122637f2f48bb41fa6a755e18da7236", "score": "0.6967133", "text": "def reverseString(self, s: List[str]) -> None:\n left, right = 0, len(s) - 1\n while left <= right:\n s[left], s[right] = s[right], s[left]\n left, right = left+1, right-1\n \n return s", "title": "" }, { "docid": "5c5d62aacf8817b8daa5a1dc61c4d702", "score": "0.69542396", "text": "def reverse_compliment(sequence: str) -> str:\n matches = {'A': 'T', 'T': 'A', 'C': 'G', 'G': 'C', 'N': 'N'}\n reverse = ''\n for num in range(1, len(sequence)+1):\n reverse = reverse + matches[sequence[-num]]\n return reverse", "title": "" }, { "docid": "2f56d8571292c09261fc23ad6653b4f8", "score": "0.6950309", "text": "def reverseString(self, s: list):\n if len(s) == 0:\n return s\n else:\n return self.reverseString(s[1:]) + [s[0]]", "title": "" }, { "docid": "9d0e99901ba1770989044f360dc269b4", "score": "0.6938782", "text": "def transform_reverse(str):\n return [str, \"\".join(reversed(str))]", "title": "" }, { "docid": "08f3e256dd18483326a04e2671e24ec3", "score": "0.6909693", "text": "def reverseString(self, s: 'List[str]') -> 'None':\r\n min = 0\r\n max = len(s)-1\r\n print(''.join(reversed(s)))\r\n while min != max and min < max:\r\n s[min], s[max] = s[max], s[min]\r\n min += 1\r\n max -= 1\r\n return", "title": "" }, { "docid": "fcca724ad3b2de80c74d7f6d2f4d69a4", "score": "0.68923515", "text": "def reverse_compliment(seq):\n return ''.join([dna_reverse_compliment[nuc] for nuc in seq])[::-1]", "title": "" }, { "docid": "29e4916ce537309f126ab693ed319982", "score": "0.6888685", "text": "def reverseString(self, s: List[str]) -> None:\n # 使用双指针\n i = 0\n j = len(s)-1\n while i < j:\n s[i],s[j]=s[j],s[i]\n i += 1\n j -= 1\n return s # 这行不用提交", "title": "" }, { "docid": "471fcbf6b36af82609586ed5eeea7817", "score": "0.6877857", "text": "def reverseString(self, s) :\n s.reverse()", "title": "" }, { "docid": "f7bc3ac0bc6a5df9212b01bd0bd55098", "score": "0.6876524", "text": "def reverse(seq):\n return seq[::-1]", "title": "" }, { "docid": "f7bc3ac0bc6a5df9212b01bd0bd55098", "score": "0.6876524", "text": "def reverse(seq):\n return seq[::-1]", "title": "" }, { "docid": "f7bc3ac0bc6a5df9212b01bd0bd55098", "score": "0.6876524", "text": "def reverse(seq):\n return seq[::-1]", "title": "" }, { "docid": "396165a29d0fc33a519abbc4e4376cc3", "score": "0.6876283", "text": "def reverseString(self, s) :\n left,right = 0, len(s)-1\n while left < int(len(s)/2):\n s[left],s[right] = s[right],s[left]\n left += 1\n right -=1\n return s", "title": "" }, { "docid": "0b61336f43a103a17e2ac5d749cea059", "score": "0.6874841", "text": "def reverse(s): \n\tletters = list(s) \n\tletters.reverse() \n\treturn ''.join(letters)", "title": "" }, { "docid": "2dff49b75fabad4c061f0ca5de9fbdf1", "score": "0.686868", "text": "def dir_reverse(self, val:str)->str:\n return val[::-1]", "title": "" }, { "docid": "21a721717bdb9851cdce77d155ebcfcb", "score": "0.6867771", "text": "def reverseString(self, s: List[str]) -> None:\n s[:] = s[::-1]\n return s", "title": "" }, { "docid": "41d7414fe99058be7f34cb48566a9b33", "score": "0.68603283", "text": "def reverse_string(word):\n return \"\".join(reversed(word))", "title": "" }, { "docid": "28d584b9556a34c798a47e4b75e728aa", "score": "0.68576866", "text": "def reverseString1(self, s: List[str]) -> None:\n s.reverse()", "title": "" }, { "docid": "0c5ef1ff254d022f993ecae41c2754cf", "score": "0.6851875", "text": "def reverse_string(my_string):\n stack = Stack()\n for i in my_string:\n stack.push(i)\n reversed_string = ''\n while(not stack.is_empty()):\n reversed_string += stack.pop()\n return reversed_string", "title": "" }, { "docid": "5aae977912c2ae8573d4943b6b717eaa", "score": "0.68492573", "text": "def Reverse(self, *args):\n return _SnapTime.TStrV_Reverse(self, *args)", "title": "" }, { "docid": "ab8af62ab63bae8761d9649fb54462b4", "score": "0.6837797", "text": "def reverseString(self, s: List[str]) -> None:\n for i in range(len(s)//2):\n tmp = s[i]\n s[i] = s[len(s)-i-1]\n s[len(s)-i-1] = tmp \n\n return s", "title": "" }, { "docid": "a93721cd0a083efa81fd9868745f1071", "score": "0.6831436", "text": "def reverseString(aStr):\n #can't do this way according to directions:\n #return aStr[::-1]\n '''also cannot do it with a helper function according to directions\n def helpReverseString(inStr, outStr):\n if inStr == '':\n print outStr\n return outStr\n else:\n lenStr = len(inStr)\n outStr += inStr[-1]\n helpReverseString(inStr[0:(lenStr-1)], outStr )\n return helpReverseString(aStr, '')\n '''\n if aStr == \"\":\n return aStr\n else:\n return reverseString(aStr[1:]) + aStr[0]", "title": "" }, { "docid": "9d7e29b34c39816ec9ebdb5e34a8752f", "score": "0.6821908", "text": "def reverseString(self, s: List[str]) -> None:\n # s.reverse()\n\n N = len(s)\n for i in range(N//2):\n s[i], s[N-i-1] = s[N-i-1], s[i]\n\n return", "title": "" }, { "docid": "1c4a20fd368ae9e6c02acc834e7f2157", "score": "0.6812077", "text": "def decode_reverse(string):\n return str(string)[::-1] # reverse string", "title": "" }, { "docid": "16ede4ba39cbe75006f3a59d7f69b4aa", "score": "0.6811857", "text": "def reverse_pair(string_a):\n list_a = string_a.split()\n return \" \".join(list_a[::-1])", "title": "" }, { "docid": "a53e73e459847cbce1588597dda226e0", "score": "0.6805483", "text": "def reverse(N):\r\n string=str(N)\r\n if string==\"\": return \"\"\r\n elif len(string)==1:\r\n return string[0]\r\n else:\r\n return reverse(string[1:]) + string[0]", "title": "" }, { "docid": "779bd3f0a991523849622244b59969e3", "score": "0.67994744", "text": "def Reverse(self):\n return _SnapTime.TStr_Reverse(self)", "title": "" }, { "docid": "bc3db3c86a4cbfdc43c0c742d674539b", "score": "0.6795398", "text": "def reverseComplement(string):\n rMap = { \"A\":\"T\", \"T\":\"A\", \"C\":\"G\", \"G\":\"C\", \"N\":\"N\"}\n return \"\".join(rMap[i] for i in string[::-1])", "title": "" }, { "docid": "8244c379028da86e75005242eba46bba", "score": "0.6789336", "text": "def reverse(seq):", "title": "" }, { "docid": "54b126b0b83aec10fb210b8fdf45f2dd", "score": "0.6773255", "text": "def reverseString(self, s: List[str]) -> None:\n s = s.reverse()", "title": "" }, { "docid": "764ee741d7edaa4ab366d2f997689211", "score": "0.6756203", "text": "def reverseString(self, s) -> None:\n n = len(s)\n\n start = 0\n end = n - 1\n\n while start < end:\n # Swap elements\n s[start], s[end] = s[end], s[start]\n # Increment start pointer and decrement end pointer\n start += 1\n end -= 1\n\n return s", "title": "" }, { "docid": "1e51e666c7b0c68466198d9d02d51244", "score": "0.6749362", "text": "def reverse_string(input):\n\n n = len(input)\n\n if n <= 1:\n return input\n else:\n return input[-1] + reverse_string(input[:(n-1)])", "title": "" }, { "docid": "a155cbf8a82d02a93b7d7ab7bfcd08d8", "score": "0.67389286", "text": "def reverse_string(input):\n \n if len(input) == 0:\n return input\n output = input[-1:] + reverse_string(input[:-1])\n return output", "title": "" }, { "docid": "a1037b7b78a3768b82052b25785f0af5", "score": "0.6737536", "text": "def reverse_complement(self):\n return \"\".join(type(self).reverse[x] for x in self.sequence)[::-1]", "title": "" }, { "docid": "997e97f45bfee9396981dc7439b279c3", "score": "0.6731747", "text": "def reversecomp(sequence):\n\n result = list(map(watsoncrick, sequence))\n result.reverse()\n return \"\".join(result)", "title": "" }, { "docid": "35514068ed1bf2d681355273cb220970", "score": "0.6728677", "text": "def reverseString(self, s: List[str]) -> None:\n ##先用双指针左,交换左右的值,L++,r--,直到l>= r\n left = 0 \n right = len(s) -1\n while left < right:\n s[left],s[right] = s[right],s[left]\n left = left + 1\n right = right - 1\n return s", "title": "" }, { "docid": "11edcdcd5e40a7b3e41ed9fec020c531", "score": "0.6725448", "text": "def reverse_str_rec(string):\n if len(string) == 0:\n return string\n return Reverser.reverse_str_rec(string[1:]) + string[0]", "title": "" }, { "docid": "fa2dd0f66be4efcabc094b59548ba9ec", "score": "0.6721728", "text": "def reversecomplement(self): \n letters = list(self.seq) \n letters.reverse() \n letters = [self.basecomplement[base] for base in letters] \n return ''.join(letters)", "title": "" }, { "docid": "b0a001ca666412d8f722ef464ecc6472", "score": "0.67186105", "text": "def reverseString(self, s) -> None:\n n = len(s) - 1\n for i in range(n // 2 + 1):\n s[i] ,s[n-i] = s[n-i] , s[i]\n return s", "title": "" }, { "docid": "0421bc4c67a6e13e52c7f0b9c65c25d7", "score": "0.6718562", "text": "def reverseString(self, s: List[str]) -> None:\n if not s or len(s) == 1:\n return s\n \n left, right = 0, len(s) - 1\n \n while left < right:\n s[left], s[right] = s[right], s[left]\n left, right = left + 1, right - 1", "title": "" }, { "docid": "4b31d5fb5a4446a02a3b058588c6ed5d", "score": "0.6708068", "text": "def reverse(s):\n assert type(s) == str, repr(s) + ' is not a string' # get in the habit\n\n # Work on small data (BASE CASE)\n if s == '':\n return s\n\n # Break up the data (RECUSIVE CASE)\n left = s[0] # Reverse of one letter is itself\n right = reverse(s[1:])\n\n # Combine the results\n return right+left", "title": "" }, { "docid": "670572b2fe61a9c0b228453f35c9c44c", "score": "0.67057973", "text": "def reverseString(self, s):\n s.reverse()\n print(s)\n s = s[::-1]\n print(s)", "title": "" }, { "docid": "a45de2e23eb8911eb8819dad5e67be13", "score": "0.6691118", "text": "def reverse(word):\n\n if len(word) <= 1:\n return word\n else:\n return word[-1] + reverse(word[:-1])", "title": "" }, { "docid": "65d1def7f6dcfcbbe517569359418793", "score": "0.667502", "text": "def reverseString(string):\n #Split the string into a list\n a=string.split()\n #Reverse the order of the list\n a.reverse()\n #Place the elements of the list back into a string separated\n #by spaces\n result=\" \".join(a)\n return result", "title": "" }, { "docid": "6df652a629c044500de8384254c7acd8", "score": "0.6658689", "text": "def reverseString(self, s: List[str]) -> None:\n for i in range(len(s)//2):\n s[i], s[-i-1] = s[-i-1], s[i]", "title": "" }, { "docid": "d4b163cd585a52d3be4961be1fb82f97", "score": "0.66547614", "text": "def reverseString(self, s: List[str]) -> None:\n lens = len(s)-1\n i = 0 \n while i < lens:\n c = s[i]\n s[i] = s[lens]\n s[lens] = c\n i = i + 1\n lens = lens-1\n return", "title": "" }, { "docid": "d4f68cecc8fa9697608037f1c26a02d3", "score": "0.66500777", "text": "def reverseString(self, s: List[str]) -> None:\n l, r = 0 , len(s) - 1\n while l < r:\n s[l], s[r] = s[r], s[l]\n l += 1\n r -= 1", "title": "" }, { "docid": "0bd73cf84cedd5d388106a5a60358ced", "score": "0.66454816", "text": "def backward(my_string):\n pass", "title": "" }, { "docid": "d1278d7ef0e7f5bde030fcaeb2d90cd9", "score": "0.6639894", "text": "def reverseString(self, s: List[str]) -> None:\n\n s.reverse()", "title": "" }, { "docid": "969da01ca2f3f46478ab872e308c3741", "score": "0.663876", "text": "def reverse_string(input):\n if len(input) <= 1:\n return input\n temp = len(input) - 1\n return input[temp] + reverse_string(input[:temp])", "title": "" }, { "docid": "ae258e924cfc3b3f123b0d6da98bbbe2", "score": "0.66368055", "text": "def encode_reverse(string):\n return str(string)[::-1] # reverse string", "title": "" }, { "docid": "fb6bbbe720ee039066d69717a1490dea", "score": "0.66306436", "text": "def reverseString(self, s: 'List[str]') -> 'None':\n i,j = 0, len(s)-1\n while j>i:\n s[i] = s[i] + s[j]\n s[j] = s[i][0]\n s[i] = s[i][1]\n i = i+1\n j = j-1", "title": "" }, { "docid": "8ab7c37e0fef3c268f4ab7c5c682f588", "score": "0.6621195", "text": "def reverseString2(self, s: List[str]) -> None:\n # 要求使用递归,提交后内存溢出\n def recur(ss: List[str]) -> List:\n if len(ss) <= 1:\n return ss\n else:\n return recur(ss[1:])+[ss[0]]\n s[:] = recur(s)\n return s #这行不用提交", "title": "" }, { "docid": "2d278a1c100f81debdcfef7e726299fd", "score": "0.66211534", "text": "def Reverse(self, *args):\n return _SnapTime.TStr64V_Reverse(self, *args)", "title": "" }, { "docid": "38f641e9ed1fdadecbf239402aad3433", "score": "0.66154206", "text": "def reverseString(self, s: List[str]) -> None:\n cur_l = 0\n cur_r = len(s) -1\n while cur_l <= cur_r:\n s[cur_r], s[cur_l] = s[cur_l], s[cur_r]\n cur_l += 1\n cur_r -= 1", "title": "" }, { "docid": "62ead968db4a50341e7738d5aaad4c9b", "score": "0.66098386", "text": "def reverseString(self, s: List[str]) -> None:\n if not s:\n return []\n\n n = len(s)\n for i in range(int(n/2)):\n temp = s[i]\n s[i] = s[n-1-i]\n s[n-1-i] = temp", "title": "" }, { "docid": "30ed615862ca2fca70014df2d3a5ab6d", "score": "0.66056806", "text": "def reverseString3(self, s: List[str]) -> None:\n # 使用递归+双指针\n def recur(ss:List[str],i:int,j:int)->List:\n if i>j:\n return\n else:\n ss[i],ss[j]=ss[j],ss[i]\n return recur(ss,i+1,j-1)\n recur(s,0,len(s)-1)\n return s #这行不用提交", "title": "" }, { "docid": "88950e241a321cffc3e704321b680fed", "score": "0.6602957", "text": "def reverseString(self, s: List[str]) -> None:\n l,h = 0,len(s)-1\n while l < h:\n s[l],s[h] = s[h],s[l]\n l += 1\n h -= 1", "title": "" }, { "docid": "7fa42991499f38fee9a366a5beb052b9", "score": "0.65953445", "text": "def _reversed(self):\n return self.elements[::-1]", "title": "" }, { "docid": "a3ee58ca15353c5a8977c0e64bc390b2", "score": "0.65930974", "text": "def reverseStringNonBuiltin(self, s: List[str]) -> None:\n \tt = s.copy()\n\n \tfor x in range(len(s)):\n \t\tt[x] = s[-(x+1)]\n\n \ts[:] = t", "title": "" }, { "docid": "ee6bd8b6a03147f36974c8aa735805af", "score": "0.6587697", "text": "def reverseString(self, s):\n l = len(s)\n i = 0\n while i < l//2:\n s[i], s[l-i-1] = s[l-i-1],s[i]\n i += 1", "title": "" }, { "docid": "8b23fb55506956df4641a71f05b7da07", "score": "0.65782183", "text": "def reverseString(self, s: List[str]) -> None:\n i = 0\n j = -1\n \n while i < len(s)/2:\n s[i], s[j] = s[j], s[i]\n i = i + 1\n j = j - 1", "title": "" }, { "docid": "7d04f65c82f5ef6fc76977b2d7d54775", "score": "0.6575837", "text": "def reverse_rec(tempstr): #must use recursion\r\n if((len(tempstr)) <= 1):\r\n return tempstr\r\n else:\r\n return reverse_rec(tempstr[1:]) + tempstr[0]", "title": "" }, { "docid": "327a650267114627bbc37782a692173c", "score": "0.6572573", "text": "def rev(s):\n\n t = s.split(' ')\n\n return ' '.join(t[::-1])", "title": "" }, { "docid": "8d9dfceb127b1de30ba4b8436f68db56", "score": "0.6571234", "text": "def reverse(a):\r\n\r\n a = re.sub(\"[^A-Za-z]\", \"\", a)\r\n a = a.lower()\r\n return a[::-1]", "title": "" }, { "docid": "6f5d871209ece362fb8727cd8b592420", "score": "0.65704614", "text": "def reverseString(self, s: List[str]) -> None:\n num_to_swap = len(s) // 2\n y = len(s) - 1\n\n for i in range(num_to_swap):\n temp = s[i]\n s[i] = s[y - i]\n s[y - i] = temp\n return s", "title": "" }, { "docid": "224d17be239b139a5e303911b5211868", "score": "0.65604895", "text": "def reverse(x):\n return x[::-1]", "title": "" }, { "docid": "ef8458f35580e5ca758c3d93a9840341", "score": "0.6559089", "text": "def rev_string(my_str):\n s = Stack()\n result = ''\n\n # add each char to the stack\n for char in my_str:\n s.push(char)\n \n # pop each char from the stack to reverse the order, and then append the\n # string to resuly\n while not s.is_empty():\n result += s.pop()\n \n return result", "title": "" } ]
82258dd61b1d65083fa88155a6715429
builder = self.get_builder() builder.right_join( "report_groups as rg",
[ { "docid": "0a76de558e94f35a9b0bb1e7f4901da3", "score": "0.6038224", "text": "def can_compile_right_join_clause_with_lambda(self):\n return \"\"\"SELECT * FROM \"users\" RIGHT JOIN \"report_groups\" AS \"rg\" ON \"bgt\".\"fund\" = \"rg\".\"fund\" OR \"bgt\" IS NULL\"\"\"", "title": "" } ]
[ { "docid": "ccc08f56946948335d1efe68dd64a809", "score": "0.6065083", "text": "def can_compile_join_clause(self):\n return \"\"\"SELECT * FROM \"users\" INNER JOIN \"report_groups\" AS \"rg\" ON \"bgt\".\"fund\" = \"rg\".\"fund\" AND \"bgt\".\"dept\" = \"rg\".\"dept\" AND \"bgt\".\"acct\" = \"rg\".\"acct\" AND \"bgt\".\"sub\" = \"rg\".\"sub\\\"\"\"\"", "title": "" }, { "docid": "9f684adb5e83fafde6981adbd91bc911", "score": "0.59313715", "text": "def can_compile_join_clause_with_value(self):\n return \"\"\"SELECT * FROM \"users\" INNER JOIN \"report_groups\" AS \"rg\" ON \"bgt\".\"active\" = '1' OR \"bgt\".\"acct\" = '1234'\"\"\"", "title": "" }, { "docid": "5f9d4c95236e137899dd74f8fece5aee", "score": "0.5576503", "text": "def can_compile_join_clause_with_lambda(self):\n return \"\"\"SELECT * FROM \"users\" INNER JOIN \"report_groups\" AS \"rg\" ON \"bgt\".\"fund\" = \"rg\".\"fund\" AND \"bgt\" IS NULL\"\"\"", "title": "" }, { "docid": "d6e56377f2a858d5ddc8f925e284eb60", "score": "0.5450178", "text": "def can_compile_left_join_clause_with_lambda(self):\n return \"\"\"SELECT * FROM \"users\" LEFT JOIN \"report_groups\" AS \"rg\" ON \"bgt\".\"fund\" = \"rg\".\"fund\" OR \"bgt\" IS NULL\"\"\"", "title": "" }, { "docid": "57f5de36a644db07334be839889b027a", "score": "0.5374605", "text": "def can_compile_multiple_join(self):\n return \"\"\"SELECT * FROM \"users\" INNER JOIN \"contacts\" ON \"users\".\"id\" = \"contacts\".\"user_id\" INNER JOIN \"posts\" ON \"comments\".\"post_id\" = \"posts\".\"id\\\"\"\"\"", "title": "" }, { "docid": "8a4385930556d940c60c04e59deb9051", "score": "0.5342531", "text": "def right_join(self, other):\n return self.join(other, \"right\")", "title": "" }, { "docid": "23d2d340b84675d7579fcee82ccac70e", "score": "0.53385895", "text": "def join(dataframe_1, dataframe_2, how, left_on, right_on, select):", "title": "" }, { "docid": "16d9ae42149ec0516d69abc3664747d0", "score": "0.52765995", "text": "def join_by(\n self,\n r2,\n key,\n jointype=\"inner\",\n r1postfix=\"1\",\n r2postfix=\"2\",\n defaults=None,\n asrecarray=False,\n asTable=True,\n ):\n # TODO: return a Table by default\n if asTable:\n asrecarray = True\n arr = recfunctions.join_by(\n key,\n self,\n r2,\n jointype=jointype,\n r1postfix=r1postfix,\n r2postfix=r2postfix,\n defaults=defaults,\n usemask=False,\n asrecarray=asrecarray,\n )\n\n return arr", "title": "" }, { "docid": "2efe120040b3e3ba6483cf32038142a3", "score": "0.52677643", "text": "def can_compile_join_clause_with_null(self):\n return \"\"\"SELECT * FROM \"users\" INNER JOIN \"report_groups\" AS \"rg\" ON \"acct\" IS NULL OR \"dept\" IS NOT NULL\"\"\"", "title": "" }, { "docid": "4cd01a0bbc91d703cb80efd2c27b9bee", "score": "0.5223127", "text": "def join(self, group1, group2):\n # verify group1 is the lower group index\n if group1 > group2:\n group1, group2 = group2, group1\n retval = copy.deepcopy(self.data)\n retval[group1] += retval[group2]\n del retval[group2]\n return GeneGroups(retval,'join')", "title": "" }, { "docid": "e315c13944c601b92b850fa4e8b4d033", "score": "0.521349", "text": "def test_query_rule_order_group_left_join(client, recwarn):\n recwarn.clear()\n query = Query(client, \"Rule\", order=['grouping', 'what', 'id'],\n join_specs={\"grouping\": \"LEFT OUTER JOIN\"})\n assert len(recwarn.list) == 0\n print(str(query))\n assert \"Rule\" in query.select_clause\n assert \"LEFT OUTER JOIN\" in query.join_clause\n assert query.where_clause is None\n assert \"what\" in query.order_clause\n res = client.search(query)\n assert len(res) == all_rules", "title": "" }, { "docid": "ed95114516d931e613c1286c58d9566c", "score": "0.52106273", "text": "def join(self, right_r, on_fields, where_template=None, project_fields=None, optimize=True):\n # If no optimizations are possible, do a simple nested loop join and then apply where_clause and\n # project clause to result.\n #\n # At least two vastly different optimizations are be possible. You should figure out two different optimizations\n # and implement them.\n #\n # if either table has indexes or there is a 'where' condition, do optimized join\n _, s_max = self.__get_access_path__(on_fields)\n _, r_max = right_r.__get_access_path__(on_fields)\n if optimize and ((s_max > 0 or rr_max > 0) or where_template is not None):\n result = self.execute_smart_join(right_r, on_fields, where_template=where_template, \\\n project_fields=project_fields, idx_selectivities=(s_max,r_max))\n else:\n result = self.execute_slow_join(right_r, on_fields, where_template=where_template, project_fields=project_fields)\n return self.table_from_rows(\"JOIN(\" + self.name() + \",\" + right_r.name() + \")\", result)", "title": "" }, { "docid": "5b09b710e5afcd0e97840c86778d6aca", "score": "0.5196911", "text": "def doJoin(self, dropextra = False):\n return DataFrame(self.mj.doJoin(dropextra), self.sqlContext)", "title": "" }, { "docid": "04c1d25dd481345699edd2834effbe47", "score": "0.5187923", "text": "def can_compile_join(self):\n return \"\"\"SELECT * FROM \"users\" INNER JOIN \"contacts\" ON \"users\".\"id\" = \"contacts\".\"user_id\\\"\"\"\"", "title": "" }, { "docid": "761effdfe4b1f530dc93751a995a91ce", "score": "0.51204413", "text": "def test_query_rule_order_group_suppress_warn_join(client, recwarn):\n recwarn.clear()\n query = Query(client, \"Rule\", order=['grouping', 'what', 'id'],\n join_specs={\"grouping\": \"INNER JOIN\"})\n assert len(recwarn.list) == 0\n print(str(query))\n assert \"Rule\" in query.select_clause\n assert \"INNER JOIN\" in query.join_clause\n assert query.where_clause is None\n assert \"what\" in query.order_clause\n res = client.search(query)\n assert len(res) == grp_rules", "title": "" }, { "docid": "c90e8194cf641ab060e269144980fe00", "score": "0.51099116", "text": "def get_joined_nodes_and_subsystems():\n return pd.read_sql_query(f\"\"\"\nselect * from {Tables.subsystems}\nleft join {Tables.nodes} \nON {Tables.subsystems}.node_id = {Tables.nodes}.node_id;\n\"\"\", db_connection())", "title": "" }, { "docid": "8844f4b5e257a0022a967c602f9f6d72", "score": "0.50947064", "text": "def _join_operation(self, left, right, op):\n if isinstance(left, SciDBArray):\n left_name = left.name\n left_fmt = '{left.a0f}'\n left_is_sdb = True\n else:\n left_name = None\n left_fmt = '{left}'\n left_is_sdb = False\n\n if isinstance(right, SciDBArray):\n right_name = right.name\n right_fmt = '{right.a0f}'\n right_is_sdb = True\n else:\n right_name = None\n right_fmt = '{right}'\n right_is_sdb = False\n\n # some common names needed below\n op = op.format(left=left_fmt, right=right_fmt)\n aL = aR = None\n permutation = None\n\n # Neither entry is a SciDBArray\n if not (left_is_sdb or right_is_sdb):\n raise ValueError(\"One of left/right needs to be a SciDBArray\")\n\n # Both entries are SciDBArrays\n elif (left_is_sdb and right_is_sdb):\n # array shapes match: use a join\n if left.shape == right.shape:\n if (left.chunk_size != right.chunk_size or\n left.chunk_overlap != right.chunk_overlap):\n raise ValueError(\"join operations require chunk_size/\"\n \"chunk_overlap to match.\")\n attr = _new_attribute_label('x', left, right)\n if left_name == right_name:\n # same array: we can do this without a join\n query = (\"store(project(apply({left}, {attr}, \"\n + op + \"), {attr}), {arr})\")\n else:\n query = (\"store(project(apply(join({left},{right}),{attr},\"\n + op + \"), {attr}), {arr})\")\n\n # array shapes are broadcastable: use a cross_join\n elif broadcastable(left.shape, right.shape):\n join_indices = []\n left_slices = []\n right_slices = []\n\n for tup in zip(reversed(list(enumerate(left.shape))),\n reversed(list(enumerate(right.shape)))):\n (i1, s1), (i2, s2) = tup\n if (s1 == s2):\n join_indices.append((i1, i2))\n if (left.chunk_size[i1] != right.chunk_size[i2] or\n left.chunk_overlap[i1] != right.chunk_overlap[i2]):\n raise ValueError(\"join operations require chunk_\"\n \"size/chunk_overlap to match.\")\n elif s1 == 1:\n left_slices.append(i1)\n elif s2 == 1:\n right_slices.append(i2)\n else:\n # should never get here, but just in case...\n raise ValueError(\"shapes cannot be broadcast\")\n\n # build the left slice query if needed\n if left_slices:\n dims = ','.join(\"{{left.d{0}f}},0\".format(sl)\n for sl in left_slices)\n left_query = \"slice({left},\" + dims + \") as {aL}\"\n aL = ArrayAlias(left, \"alias_left\")\n else:\n left_query = \"{aL}\"\n aL = left\n\n # build the right slice query if needed\n if right_slices:\n dims = ','.join(\"{{right.d{0}f}},0\".format(sl)\n for sl in right_slices)\n right_query = \"slice({right},\" + dims + \") as {aR}\"\n aR = ArrayAlias(right, \"alias_right\")\n else:\n right_query = \"{aR}\"\n aR = right\n\n # build the cross_join query\n dims = ','.join(\"{{aL.d{0}f}}, {{aR.d{1}f}}\".format(i, j)\n for i, j in join_indices)\n if dims:\n dims = ',' + dims\n\n query = (\"store(project(apply(cross_join(\" +\n left_query + \",\" + right_query + dims + \"),{attr},\" +\n op + \"), {attr}), {arr})\")\n attr = _new_attribute_label('x', left, right)\n\n # determine the dimension permutation\n # Here's the problem: cross_join puts all the left array dims\n # first, and the right array dims second. This is different\n # than numpy's broadcast behavior. It's also difficult to do\n # in a single operation because conflicting dimension names\n # have a rename scheme that might be difficult to duplicate.\n # So we compromise, and perform a dimension permutation on\n # the result if needed.\n left_shape = list(left.shape)\n right_shape = list(right.shape)\n i_left = 0\n i_right = len(join_indices) + len(right_slices)\n permutation = [-1] * max(left.ndim, right.ndim)\n\n # first pad the shapes so they're the same length\n if left.ndim > right.ndim:\n i_right += left.ndim - right.ndim\n right_shape = [-1] * (left.ndim - right.ndim) + right_shape\n else:\n left_shape = [-1] * (right.ndim - left.ndim) + left_shape\n\n # now loop through dimensions and build permutation\n for i, (L, R) in enumerate(zip(left_shape, right_shape)):\n if L == R or R == -1 or (R == 1 and L >= 0):\n permutation[i] = i_left\n i_left += 1\n elif L == -1 or (L == 1 and R >= 0):\n permutation[i] = i_right\n i_right += 1\n else:\n # This should never happen, but just to be sure...\n raise ValueError(\"shapes are not compatible\")\n\n if permutation == range(len(permutation)):\n permutation = None\n\n else:\n raise ValueError(\"Array of shape {0} can not be \"\n \"broadcasted with array of shape \"\n \"{1}\".format(left.shape, right.shape))\n\n # only left entry is a SciDBArray\n elif left_is_sdb:\n try:\n _ = float(right)\n except:\n raise ValueError(\"rhs must be a scalar or SciDBArray\")\n attr = _new_attribute_label('x', left)\n query = (\"store(project(apply({left}, {attr}, \"\n + op + \"), {attr}), {arr})\")\n\n # only right entry is a SciDBArray\n elif right_is_sdb:\n try:\n _ = float(left)\n except:\n raise ValueError(\"lhs must be a scalar or SciDBArray\")\n attr = _new_attribute_label('x', right)\n query = (\"store(project(apply({right}, {attr}, \"\n + op + \"), {attr}), {arr})\")\n\n arr = self.new_array()\n self.query(query, left=left, right=right,\n aL=aL, aR=aR,\n attr=attr, arr=arr)\n\n # reorder the dimensions if needed (for cross_join)\n if permutation is not None:\n arr = arr.transpose(permutation)\n return arr", "title": "" }, { "docid": "703d67ff6b5f2f92dedf3048356c8a80", "score": "0.5048455", "text": "def join_reports(dict_rb: dict, dict_ml: dict):\n all_types = set(dict_rb.keys()).union(dict_ml.keys())\n full_report = {}\n for t in all_types:\n rb_list, ml_list = [], []\n if t in dict_rb:\n rb_list = dict_rb[t]\n if t in dict_ml:\n ml_list = dict_ml[t]\n if rb_list and ml_list:\n\t # some black magic to merge both tables\n rb_df = pd.DataFrame(rb_list).replace(False, 0.0)\n original_names = list(rb_df[\"colonne\"].values)\n rb_df[\"colonne\"] = rb_df[\"colonne\"].str.lower()\n ml_df = pd.DataFrame(ml_list).replace(False, 0.0)\n merged_df = pd.merge(rb_df, ml_df, on=\"colonne\", how=\"left\").fillna(\"0.0\")\n merged_df.iloc[:len(original_names), 0] = original_names\n merged_df = merged_df.replace(False, 0.0)\n full_report[t] = merged_df.to_dict(\"records\")\n else:\n full_report[t] = rb_list or ml_list\n # add empty values to score_ml/score_rb\n completed_list = []\n for d in full_report[t]:\n if \"score_ml\" in d:\n d[\"score_rb\"] = 0.0\n else:\n d[\"score_ml\"] = 0.0\n completed_list.append(dict(d))\n full_report[t] = completed_list\n return full_report", "title": "" }, { "docid": "a812129dfaff00a8ce8d49aa97f10f17", "score": "0.503459", "text": "def test_get_report_group_list(self):\n pass", "title": "" }, { "docid": "8b7951da4b50d9e5957c77316afaf9be", "score": "0.50343204", "text": "def test_add_report_group(self):\n pass", "title": "" }, { "docid": "f0f69cde31ad74a4d79948ccb6feed0a", "score": "0.49680036", "text": "def transform(self, **kwargs):\n agg_results = self.agg(**kwargs)\n return self.left_join(agg_results, mapping={k: k for k in self.groups})", "title": "" }, { "docid": "48ba65425f07c2c61e90905f8bda39da", "score": "0.4966855", "text": "def test_join_advanced(self):\n a = models.Article\n\n # === Test: join x2, project, limit, sort\n for sorting, desc in (('theme', ''), ('theme-', ' DESC'), ('theme+', '')):\n mq = a.mongoquery().query(project=['title'],\n join={'comments': dict(project=['aid'],\n join={'user': dict(project=['name'])})},\n limit=2,\n sort=[sorting])\n\n qs = self.assertQuery(mq.end(),\n # A subquery\n \"FROM (SELECT a.\",\n # Ordering within, LIMIT within\n \"FROM a ORDER BY a.theme{} \\n LIMIT 2) AS anon_1 \"\n .format(desc),\n # Joins outside of the subquery\n \") AS anon_1 LEFT OUTER JOIN c AS c_1 ON anon_1.a_id = c_1.aid \"\n \"LEFT OUTER JOIN u AS u_1 ON u_1.id = c_1.uid\",\n # Another ORDER BY on the outside query\n \"ORDER BY anon_1.a_theme{}\"\n .format(desc)\n )\n self.assertSelectedColumns(qs,\n 'anon_1.a_id', 'anon_1.a_title',\n 'u_1.id', 'u_1.name',\n 'c_1.id', 'c_1.aid',\n # side-effect: columns mentioned in ORDER BY are now included into the results\n 'anon_1.a_theme',\n )\n\n # === Test: join x3, project, limit\n mq = a.mongoquery().query(project=['title'],\n join={'comments': dict(project=['aid'],\n join={'user': dict(project=['name'],\n join={'roles': dict(project=['title'])})})},\n limit=2)\n qs = self.assertQuery(mq.end(),\n # Subquery, LIMIT\n \"FROM (SELECT a.\",\n \"FROM a \\nLIMIT 2) AS anon_1\",\n # Joins\n \"LEFT OUTER JOIN c AS c_1 ON anon_1.a_id = c_1.aid \"\n \"LEFT OUTER JOIN u AS u_1 ON u_1.id = c_1.uid \"\n \"LEFT OUTER JOIN r AS r_1 ON u_1.id = r_1.uid\",\n )\n self.assertSelectedColumns(qs,\n 'anon_1.a_id', 'anon_1.a_title',\n 'c_1.id', 'c_1.aid',\n 'u_1.id', 'u_1.name',\n 'r_1.id', 'r_1.title'\n )\n\n # More tests\n u = models.User\n\n # === Test: two joins to the same model\n # Okay\n mq = u.mongoquery().query(join={'articles': dict(project=('title',)), 'comments': dict()})\n self.assertQuery(mq.end(),\n 'FROM u',\n 'LEFT OUTER JOIN a',\n #'LEFT OUTER JOIN c' # selectinload() used here, no join\n )\n\n # Unknown relation\n mq = u.mongoquery()\n with self.assertRaises(InvalidRelationError):\n mq.query(join=['???'])", "title": "" }, { "docid": "3c2d90b747d71f00c930213cb3c9e05b", "score": "0.4965774", "text": "def join(self, ab=None, pq=None, rs=None): # real signature unknown; restored from __doc__\n pass", "title": "" }, { "docid": "4c4b8596992491d9ce7ec69133db349f", "score": "0.49367872", "text": "def execute_slow_join(self, right_r, on_fields, where_template=None, project_fields=None):\n left_r = self\n left_rows = self.__rows__\n right_rows = right_r.get_row_list()\n result_rows = []\n\n left_rows_processed = 0\n for lr in left_rows:\n on_template = self.get_on_template(lr, on_fields)\n for rr in right_rows:\n if self.matches_template(rr, on_template):\n new_r = {**lr, **rr}\n result_rows.append(new_r)\n left_rows_processed += 1\n if left_rows_processed % 10 == 0 and left_rows_processed > 0:\n print (\"Processed {}/{} left rows...\".format(left_rows_processed, len(left_rows)))\n\n join_result = self.table_from_rows(\"JOIN(\" + left_r.name() + \",\" + right_r.name() + \")\", result_rows)\n result = join_result.find_by_template(where_template, fields=project_fields)\n return result", "title": "" }, { "docid": "96d91716c710b580e64c73aa865a86e0", "score": "0.49351513", "text": "def _generate_join(collection, local_id, foreign_id):\n text = '{{!join from={fid} to={lid} fromIndex={collection}}}'\n text = text.format(\n collection=collection,\n lid=local_id,\n fid=foreign_id)\n return text", "title": "" }, { "docid": "7892a689ed65796da1a71a28eef247cf", "score": "0.4910881", "text": "def can_compile_left_join(self):\n return \"\"\"SELECT * FROM \"users\" LEFT JOIN \"contacts\" ON \"users\".\"id\" = \"contacts\".\"user_id\\\"\"\"\"", "title": "" }, { "docid": "7aead0ddfa3426c9de3dd3f2a1c12aa6", "score": "0.48939103", "text": "def join_2_query1():\n\n desired_year = 2017\n desired_leaguename = \"UEFA Champions League\"\n desired_goals = 4\n\n sql = text('''SELECT distinct t.teamID, t.location, t.dateCreated FROM Team t, Game g, Competition c, Season s WHERE\n g.seasonID = s.seasonID AND g.competitionID = c.competitionID AND\n (g.winningTeamID = t.teamID OR g.losingTeamID = t.teamID) AND t.goals > ''' +\n str(desired_goals) + ''' AND c.name = \"''' + desired_leaguename + '''\"''' +\n ''' AND s.seasonID = ''' + str(desired_year))\n\n data = db.engine.execute(sql)\n a_data = [list(row) for row in data]\n print(a_data)", "title": "" }, { "docid": "4fa7e9e4d2df32b6b0297cacae683084", "score": "0.48874822", "text": "def left_join(self, other):\n return self.join(other, \"left\")", "title": "" }, { "docid": "f59143715d3555724c59826ef1472411", "score": "0.48618075", "text": "def join(a: DataFrame, b: DataFrame, on: list, how=\"left\") -> DataFrame:\n return a.join(b, on=on, how=how)", "title": "" }, { "docid": "f16952ac0033060f6191bfc7cc0580e7", "score": "0.48456067", "text": "def join(left_result, right_result, conditions, left_name, right_name):\n\n final_result = dict()\n if any(\"@iot.navigationLink\" in c for c in conditions):\n if \"@iot.navigationLink\" in conditions[0]:\n index = 0\n else:\n index = 2\n\n if left_name in conditions[index]:\n result = left_result\n opposite = right_result\n else:\n result = right_result\n opposite = left_result\n right_name = left_name\n\n for l in result:\n address = l[conditions[index]]\n left = get(sending_address=address)\n if type(left) != list:\n return left\n i = 0\n for l_result in left:\n for r in opposite:\n partial_result = dict()\n if l_result[\"@iot.selfLink\"] == r[\"[\" + right_name + \"]\" + \"@iot.selfLink\"]:\n partial_result.update(l)\n partial_result.update(r)\n final_result[i] = partial_result\n i += 1\n return final_result", "title": "" }, { "docid": "638c1170459619b588449bb6f1024785", "score": "0.48229092", "text": "def test_get_report_group_by_id(self):\n pass", "title": "" }, { "docid": "7b92522874c2d294787b383c36f1aca0", "score": "0.48073816", "text": "def record_join(records):\n return RECORD_SEPARATOR.join(records)", "title": "" }, { "docid": "3387d23d29ef0c2d1193d7448e7f1aeb", "score": "0.47836003", "text": "def compile_join_field(expr, join_type):\n if join_type is tq_ast.JoinType.CROSS:\n assert expr is None, (\n \"Cross joins do not allow join conditions.\")\n return [None]\n if isinstance(expr, tq_ast.BinaryOperator):\n if expr.operator == 'and':\n return list(itertools.chain(\n compile_join_field(expr.left, join_type),\n compile_join_field(expr.right, join_type)))\n elif (expr.operator in ('=', '==') and\n isinstance(expr.left, tq_ast.ColumnId) and\n isinstance(expr.right, tq_ast.ColumnId)):\n # For evaluation, we want the ordering of the columns in\n # the JoinField to match the ordering of the join, left to\n # right, but bigquery allows either order. Thus we need to\n # reorder them if they're reversed.\n # TODO(colin): better error message if we don't find an\n # alias?\n lhs_alias_idx = next(\n idx\n for idx, alias in enumerate(aliases)\n if expr.left.name.startswith(alias + \".\")\n )\n rhs_alias_idx = next(\n idx\n for idx, alias in enumerate(aliases)\n if expr.right.name.startswith(alias + \".\")\n )\n left_column_id = self.compile_ColumnId(\n expr.left,\n type_contexts[lhs_alias_idx])\n right_column_id = self.compile_ColumnId(\n expr.right,\n type_contexts[rhs_alias_idx])\n\n if lhs_alias_idx < rhs_alias_idx:\n return [typed_ast.JoinFields(left_column_id,\n right_column_id)]\n elif rhs_alias_idx < lhs_alias_idx:\n return [typed_ast.JoinFields(right_column_id,\n left_column_id)]\n # Fall through to the error case if the aliases are the\n # same for both sides.\n raise exceptions.CompileError(\n 'JOIN conditions must consist of an AND of = '\n 'comparisons between two field on distinct '\n 'tables. Got expression %s' % expr)", "title": "" }, { "docid": "ed732dad3b86e4cf6c0d14a015bbbe6c", "score": "0.47645697", "text": "def join(cls, cond, a, b):\n\n if cls._options is None or cond._options is None:\n raise DBMapperException('Static database options have not been set.')\n\n dba = cls._options\n obja = cls(dba['database'])\n dbb = cond._options\n objb = cond(dbb['database'])\n cur = dba['database'].cursor()\n primaryKeyA = dba['keys'][dba['options'][DBMapper.INDEX_PRIMARY]]\n primaryKeyB = dbb['keys'][dba['options'][DBMapper.INDEX_PRIMARY]]\n\n query = \"select A.%s, B.%s from %s as A join %s as B on A.%s=B.%s\" % (\n primaryKeyA, primaryKeyB, obja._table, objb._table, a, b)\n result = obja.__execute(cur, query, fetch = DBMapper.FETCH_ALL)\n\n load_pair_a = []\n load_pair_b = []\n for row in result:\n load_pair_a.append({primaryKeyA : row[0]})\n load_pair_b.append({primaryKeyB : row[1]})\n\n return (DBResultList([cls.load(**pair) for pair in load_pair_a]), DBResultList([cond.load(**pair) for pair in load_pair_b]),)", "title": "" }, { "docid": "e60600513d2783b2e157ab9abc3f861e", "score": "0.47458452", "text": "def execute_smart_join(self, right_r, on_fields, where_template=None, \\\n project_fields=None, idx_selectivities=(-1,-1)):\n # set table with maximally selective index to probe table\n # check left table for relevant indexes\n s_max, r_max = idx_selectivities\n if s_max > r_max:\n left_r = right_r\n right_r = self\n selected_l = left_r.find_by_template(where_template) # optimization\n selected_l = left_r.table_from_rows(\"LEFTSELECTED\", selected_l)\n left_rows = selected_l.get_row_list()\n right_rows = self.get_row_list()\n else:\n left_r = self\n selected_l = self.find_by_template(where_template) # optimization = select before join\n selected_l = self.table_from_rows(\"LEFTSELECTED\", selected_l)\n left_rows = selected_l.get_row_list()\n right_rows = right_r.get_row_list()\n result_rows = []\n\n left_rows_processed = 0\n for lr in left_rows:\n on_template = self.get_on_template(lr, on_fields)\n # use index to find matches\n matching_rows = right_r.find_by_template(on_template)\n result_rows += [{**lr, **rr} for rr in matching_rows]\n left_rows_processed += 1\n if left_rows_processed % 10000 == 0 and left_rows_processed > 0:\n print (\"Processed {}/{} left rows...\".format(left_rows_processed, len(left_rows)))\n\n join_result = self.table_from_rows(\"JOIN(\" + left_r.name() + \",\" + \\\n right_r.name() + \")\", result_rows)\n result = join_result.find_by_template(where_template, fields=project_fields)\n return result", "title": "" }, { "docid": "b61217b86bd3d9e637767d1d62f50272", "score": "0.47379592", "text": "def build_select_query(self, grid_obj):\n\n select_query = 'SELECT '\n select_count_query = 'SELECT COUNT(*) '\n from_query = ''\n\n if self.view_type == reportManagerConstants.REPORT_BOOKING:\n self.default_sort_col = \" b.updated_at \"\n # set the where columns that will be used if server side filtering or sorting is used.\n self.master_sql_col_map = reportManagerConstants.REPORT_BOOKING_MASTER_SQL_WHERE_COLS_MAP\n self.group_by_query = ' group by b.id '\n\n # select columns\n select_query += ','.join(reportManagerConstants.REPORT_BOOKING_MASTER_SQL_COLS)\n from_query = \"\"\"\n from\n core_booking b\n inner join core_bumperuser bu on b.user_id=bu.id\n inner join core_bookingstatus bs on b.status_id=bs.id\n inner join core_usercar uc on uc.id = b.usercar_id\n left outer join core_bumperuser bu1 on b.assigned_to_id=bu1.id\n left outer join core_bookingopsstatus bss on b.ops_status_id=bss.id\n left outer join core_carmodel cm on uc.car_model_id=cm.id\n left outer join core_carbrand cb on cm.brand_id=cb.id\n left outer join core_workshop w on w.id = b.workshop_id\n \"\"\"\n\n from_query += \" WHERE bu.phone is not null \"\n if grid_obj['user'].groups.filter(name__in=['DriverManager', 'Driver']).exists() and not grid_obj['user'].groups.filter(name__in=['OpsManager', 'OpsAdmin']).exists():\n from_query += \" and ((bs.flow_order_num >= 3 and bs.flow_order_num <= 10 and (b.ops_status_id is null or b.ops_status_id !=8)) \" \\\n \" or (bs.flow_order_num >= 19 and bs.flow_order_num < 22)) \"\n\n elif self.view_type == reportManagerConstants.REPORT_BOOKING_HISTORY:\n self.default_sort_col = \" b.updated_at \"\n # set the where columns that will be used if server side filtering or sorting is used.\n self.master_sql_col_map = reportManagerConstants.REPORT_BOOKING_HISTORY_MASTER_SQL_WHERE_COLS_MAP\n self.group_by_query = ' '\n\n # select columns\n select_query += ','.join(reportManagerConstants.REPORT_BOOKING_HISTORY_MASTER_SQL_COLS)\n from_query = \"\"\"\n from\n core_historicalbooking b\n inner join core_bumperuser bu on b.user_id=bu.id\n inner join core_bookingstatus bs on b.status_id=bs.id\n inner join core_usercar uc on uc.id = b.usercar_id\n left outer join core_bumperuser bu1 on b.assigned_to_id=bu1.id\n left outer join core_bumperuser bu2 on b.updated_by_id=bu2.id\n left outer join core_bookingopsstatus bss on b.ops_status_id=bss.id\n left outer join core_carmodel cm on uc.car_model_id=cm.id\n left outer join core_carbrand cb on cm.brand_id=cb.id\n left outer join core_workshop w on w.id = b.workshop_id\n \"\"\"\n\n from_query += \" WHERE b.id > 0 \"\n\n elif self.view_type == reportManagerConstants.REPORT_BOOKINGS_BY_USERS:\n self.default_sort_col = \" bu.id \"\n # set the where columns that will be used if server side filtering or sorting is used.\n self.master_sql_col_map = reportManagerConstants.REPORT_BOOKINGS_BY_USERS_MASTER_SQL_WHERE_COLS_MAP\n # select columns\n select_query += ','.join(reportManagerConstants.REPORT_BOOKINGS_BY_USERS_MASTER_SQL_COLS)\n from_query = \"\"\"\n from core_bumperuser bu\n left outer join core_booking b on b.user_id=bu.id\n left outer join core_bookingstatus bs on bs.id=b.status_id\n \"\"\"\n\n from_query += \"\"\"\n WHERE not exists (select 1 from core_internalaccounts ia where ia.phone=bu.phone)\n AND exists(select 1 from core_bumperuser_groups bug\n inner join auth_group ag on ag.id=bug.group_id\n where bug.bumperuser_id=bu.id and ag.name='BumperUser')\n \"\"\"\n\n elif self.view_type == reportManagerConstants.REPORT_USER:\n self.default_sort_col = \" bu.id \"\n self.group_by_query = ' group by bu.id '\n # set the where columns that will be used if server side filtering or sorting is used.\n self.master_sql_col_map = reportManagerConstants.REPORT_USER_MASTER_SQL_WHERE_COLS_MAP\n # select columns\n select_query += ','.join(reportManagerConstants.REPORT_USER_MASTER_SQL_COLS)\n from_query = \"\"\"\n from core_bumperuser bu\n left outer join core_city c on bu.city_id = c.id\n left outer join core_usercar uc on uc.user_id = bu.id\n left outer join core_carmodel cm on uc.car_model_id = cm.id\n left outer join core_booking b on b.user_id=bu.id\n \"\"\"\n\n from_query += \" WHERE bu.id>0 \"\n\n elif self.view_type == reportManagerConstants.REPORT_NOTIFY_USER:\n self.default_sort_col = \" bu.id \"\n self.group_by_query = ' group by bu.id '\n # set the where columns that will be used if server side filtering or sorting is used.\n self.master_sql_col_map = reportManagerConstants.REPORT_NOTIFY_USER_MASTER_SQL_WHERE_COLS_MAP\n # select columns\n select_query += ','.join(reportManagerConstants.REPORT_NOTIFY_USER_MASTER_SQL_COLS)\n from_query = \"\"\"\n from core_bumperuser bu\n left outer join core_city c on bu.city_id = c.id\n left outer join core_usercar uc on uc.user_id = bu.id\n left outer join core_carmodel cm on uc.car_model_id = cm.id\n left outer join core_booking b on b.user_id=bu.id\n \"\"\"\n\n from_query += \" WHERE exists(select 1 from core_userdevices uds where uds.user_id=bu.id) \"\n\n elif self.view_type == reportManagerConstants.REPORT_BOOKING_NOTIFICATIONS_SENT:\n self.default_sort_col = \" m.id \"\n self.group_by_query = ' group by m.id '\n\n # set the where columns that will be used if server side filtering or sorting is used.\n self.master_sql_col_map = reportManagerConstants.REPORT_BOOKING_NOTIFICATIONS_SENT_MASTER_SQL_WHERE_COLS_MAP\n # select columns\n select_query += ','.join(reportManagerConstants.REPORT_BOOKING_NOTIFICATIONS_SENT_MASTER_SQL_COLS)\n from_query = \"\"\"\n from core_messages m\n left outer join core_messageuser mu on mu.message_id=m.id\n left outer join core_bumperuser bu1 on m.sent_by_id=bu1.id\n left outer join core_notifications n on m.notification_id=n.id\n \"\"\"\n\n from_query += \" WHERE m.id>1 \"\n\n elif self.view_type == reportManagerConstants.REPORT_BOOKING_FOLLOWUPS:\n self.default_sort_col = \" f.id \"\n\n # set the where columns that will be used if server side filtering or sorting is used.\n self.master_sql_col_map = reportManagerConstants.REPORT_BOOKING_FOLLOWUPS_MASTER_SQL_WHERE_COLS_MAP\n # select columns\n select_query += ','.join(reportManagerConstants.REPORT_BOOKING_FOLLOWUPS_MASTER_SQL_COLS)\n from_query = \"\"\"\n from core_followup f\n inner join core_booking_followup bf on bf.followup_id = f.id\n inner join core_booking b on b.id = bf.booking_id\n inner join core_bumperuser bu on bu.id=f.updated_by_id\n inner join core_bumperuser bu1 on b.user_id=bu1.id\n inner join core_bookingstatus bs on bs.id=b.status_id\n \"\"\"\n\n from_query += \" WHERE f.note is not null \"\n\n elif self.view_type == reportManagerConstants.REPORT_INQUIRY_FOLLOWUPS:\n self.default_sort_col = \" f.id \"\n\n # set the where columns that will be used if server side filtering or sorting is used.\n self.master_sql_col_map = reportManagerConstants.REPORT_INQUIRY_FOLLOWUPS_MASTER_SQL_WHERE_COLS_MAP\n # select columns\n select_query += ','.join(reportManagerConstants.REPORT_INQUIRY_FOLLOWUPS_MASTER_SQL_COLS)\n from_query = \"\"\"\n from core_followup f\n inner join core_userinquiry_followup uif on uif.followup_id = f.id\n inner join core_userinquiry ui on ui.id = uif.userinquiry_id\n inner join core_bumperuser bu on bu.id=f.updated_by_id\n inner join core_bumperuser bu1 on ui.user_id=bu1.id\n \"\"\"\n\n from_query += \" WHERE f.note is not null \"\n\n elif self.view_type == reportManagerConstants.REPORT_USER_INQUIRY:\n self.default_sort_col = \" ui.id \"\n\n # set the where columns that will be used if server side filtering or sorting is used.\n self.master_sql_col_map = reportManagerConstants.REPORT_USER_INQUIRY_MASTER_SQL_WHERE_COLS_MAP\n # select columns\n select_query += ','.join(reportManagerConstants.REPORT_USER_INQUIRY_MASTER_SQL_COLS)\n from_query = \"\"\"\n from core_userinquiry ui\n inner join core_bumperuser bu on ui.user_id = bu.id\n left outer join core_bumperuser bu1 on ui.assigned_to_id = bu1.id\n left outer join core_carmodel cm on cm.id = ui.car_model_id\n left outer join core_carbrand cb on cb.id = cm.brand_id\n \"\"\"\n\n from_query += \" WHERE ui.id > 0 \"\n\n elif self.view_type == reportManagerConstants.REPORT_BOOKING_IN_STATUS:\n self.default_sort_col = \" bs.flow_order_num \"\n self.default_sort_ord = \" \"\n self.group_by_query = ' group by bs.id '\n\n # set the where columns that will be used if server side filtering or sorting is used.\n self.master_sql_col_map = reportManagerConstants.REPORT_BOOKING_IN_STATUS_MASTER_SQL_WHERE_COLS_MAP\n # select columns\n select_query += ','.join(reportManagerConstants.REPORT_BOOKING_IN_STATUS_MASTER_SQL_COLS)\n from_query = \"\"\"\n from core_booking b\n inner join core_bookingstatus bs on bs.id = b.status_id\n \"\"\"\n\n from_query += \" WHERE b.id > 0 \"\n\n elif self.view_type == reportManagerConstants.REPORT_USER_INQUIRY_IN_STATUS:\n self.default_sort_col = \" ui.status \"\n self.default_sort_ord = \" \"\n self.group_by_query = ' group by ui.status '\n\n # set the where columns that will be used if server side filtering or sorting is used.\n self.master_sql_col_map = reportManagerConstants.REPORT_USER_INQUIRY_IN_STATUS_MASTER_SQL_WHERE_COLS_MAP\n # select columns\n select_query += ','.join(reportManagerConstants.REPORT_USER_INQUIRY_IN_STATUS_MASTER_SQL_COLS)\n from_query = \"\"\"\n from core_userinquiry ui\n \"\"\"\n\n from_query += \" WHERE ui.id > 0 \"\n\n elif self.view_type == reportManagerConstants.REPORT_WORKSHOP_LIVE:\n self.default_sort_col = \" b.id \"\n self.default_sort_ord = \" \"\n self.group_by_query = ' '\n\n # set the where columns that will be used if server side filtering or sorting is used.\n self.master_sql_col_map = reportManagerConstants.REPORT_WORKSHOP_LIVE_MASTER_SQL_WHERE_COLS_MAP\n # select columns\n select_query += ','.join(reportManagerConstants.REPORT_WORKSHOP_LIVE_MASTER_SQL_COLS)\n from_query = \"\"\"\n FROM core_booking b\n inner join core_bumperuser bu ON b.user_id = bu.id\n inner join core_bookingstatus bs ON bs.id = b.status_id\n inner join core_workshop w ON w.id = b.workshop_id\n inner join core_usercar AS uc ON uc.id = b.usercar_id\n inner join core_carmodel cm ON cm.id=uc.car_model_id\n inner join core_city c on c.id=b.city_id\n left outer join core_bookingopsstatus bos ON bos.id = b.ops_status_id\n left outer join core_bumperuser buw ON b.workshop_asst_mgr_id = buw.id\n \"\"\"\n\n from_query += \"\"\"\n WHERE not exists(SELECT 1 FROM core_internalaccounts ia WHERE ia.phone=bu.phone)\n AND exists (SELECT 1 FROM core_bookingpackage bp\n inner join core_packageprice pp ON pp.id=bp.package_id\n inner join core_package p ON pp.package_id=p.id\n WHERE bp.booking_id=b.id AND p.category in (2,3) limit 1)\n AND bs.flow_order_num >= 9\n AND bs.flow_order_num < 20\n \"\"\"\n\n elif self.view_type == reportManagerConstants.REPORT_SUMMARY_PICKED:\n self.default_sort_col = \" b.id \"\n self.default_sort_ord = \" \"\n self.group_by_query = \" \"\n\n # set the where columns that will be used if server side filtering or sorting is used.\n self.master_sql_col_map = reportManagerConstants.REPORT_SUMMARY_PICKED_MASTER_SQL_WHERE_COLS_MAP\n # select columns\n select_query += ','.join(reportManagerConstants.REPORT_SUMMARY_PICKED_MASTER_SQL_COLS)\n from_query = \"\"\"\n FROM core_booking b\n inner join core_bumperuser bu ON b.user_id = bu.id\n inner join core_bookingstatus bs on bs.id = b.status_id\n \"\"\"\n\n from_query += \"\"\"\n WHERE not exists(SELECT 1 FROM core_internalaccounts ia WHERE ia.phone=bu.phone)\n AND bu.phone is not null\n AND bs.flow_order_num >= 9\n AND b.status_id !=24\n AND b.return_reason_id is null\n AND b.rework_booking_id is null\n AND exists(select 1 from core_bookingpackage bp inner join core_packageprice pp ON pp.id=bp.package_id\n inner join core_package p ON pp.package_id=p.id WHERE bp.booking_id=b.id and p.category in (2,3) limit 1)\n \"\"\"\n\n elif self.view_type == reportManagerConstants.REPORT_SUMMARY_TO_BE_PICKED:\n self.default_sort_col = \" b.id \"\n self.default_sort_ord = \" \"\n self.group_by_query = \" \"\n\n # set the where columns that will be used if server side filtering or sorting is used.\n self.master_sql_col_map = reportManagerConstants.REPORT_SUMMARY_TO_BE_PICKED_MASTER_SQL_WHERE_COLS_MAP\n # select columns\n select_query += ','.join(reportManagerConstants.REPORT_SUMMARY_TO_BE_PICKED_MASTER_SQL_COLS)\n from_query = \"\"\"\n FROM core_booking b\n inner join core_bumperuser bu ON b.user_id = bu.id\n inner join core_bookingstatus bs on bs.id = b.status_id\n \"\"\"\n\n from_query += \"\"\"\n WHERE not exists(SELECT 1 FROM core_internalaccounts ia WHERE ia.phone=bu.phone)\n AND bu.phone is not null\n AND b.return_reason_id is null\n AND b.rework_booking_id is null\n AND bs.flow_order_num = 3 and (b.ops_status_id is null or b.ops_status_id !=8)\n AND exists(select 1 from core_bookingpackage bp inner join core_packageprice pp ON pp.id=bp.package_id\n inner join core_package p ON pp.package_id=p.id WHERE bp.booking_id=b.id and p.category in (2,3) limit 1)\n \"\"\"\n\n elif self.view_type == reportManagerConstants.REPORT_ALERTS_RAISED:\n\n self.default_sort_col = \" ta.resolved,ta.id desc\"\n self.default_sort_ord = \" \"\n\n # set the where columns that will be used if server side filtering or sorting is used.\n self.master_sql_col_map = reportManagerConstants.REPORT_ALERTS_RAISED_MASTER_SQL_WHERE_COLS_MAP\n # select columns\n select_query += ','.join(reportManagerConstants.REPORT_ALERTS_RAISED_MASTER_SQL_COLS)\n from_query = \"\"\"\n FROM core_teamalert ta\n inner join core_teamalertreason tar ON ta.alert_reason_id = tar.id\n inner join core_bumperuser bu on ta.updated_by_id = bu.id\n left outer join core_workshop w on ta.workshop_id = w.id\n \"\"\"\n\n elif self.view_type == reportManagerConstants.REPORT_SCRATCH_FINDER_USERS:\n\n self.default_sort_col = \" sfu.date_joined\"\n self.default_sort_ord = \" desc \"\n\n # set the where columns that will be used if server side filtering or sorting is used.\n self.master_sql_col_map = reportManagerConstants.REPORT_SCRATCH_FINDER_USERS_MASTER_SQL_WHERE_COLS_MAP\n # select columns\n select_query += ','.join(reportManagerConstants.REPORT_SCRATCH_FINDER_USERS_MASTER_SQL_COLS)\n from_query = \"\"\"\n FROM core_bumperuser sfu\n \"\"\"\n\n from_query += \"\"\"\n WHERE exists(select 1 from core_bumperuser_groups bug\n inner join auth_group ag on ag.id=bug.group_id\n where bug.bumperuser_id=sfu.id and ag.name='ScratchFinder')\n \"\"\"\n\n elif self.view_type == reportManagerConstants.REPORT_SCRATCH_FINDER_LEADS:\n\n self.default_sort_col = \" sfl.created_at\"\n self.default_sort_ord = \" desc \"\n\n # set the where columns that will be used if server side filtering or sorting is used.\n self.master_sql_col_map = reportManagerConstants.REPORT_SCRATCH_FINDER_LEADS_MASTER_SQL_WHERE_COLS_MAP\n # select columns\n select_query += ','.join(reportManagerConstants.REPORT_SCRATCH_FINDER_LEADS_MASTER_SQL_COLS)\n from_query = \"\"\"\n FROM core_scratchfinderlead sfl\n inner join core_bumperuser bu on bu.id = sfl.user_id\n left outer join core_carmodel cm on sfl.car_model_id=cm.id\n left outer join core_carbrand cb on cm.brand_id=cb.id\n left outer join core_bumperuser bu1 on bu1.id = sfl.updated_by_id\n left outer join core_media m on m.id= sfl.media_id\n \"\"\"\n\n elif self.view_type == reportManagerConstants.REPORT_FEEDBACK_BY_CUSTOMER:\n\n self.default_sort_col = \" bcf.id\"\n self.default_sort_ord = \" desc \"\n\n # set the where columns that will be used if server side filtering or sorting is used.\n self.master_sql_col_map = reportManagerConstants.REPORT_FEEDBACK_BY_CUSTOMER_MASTER_SQL_WHERE_COLS_MAP\n # select columns\n select_query += ','.join(reportManagerConstants.REPORT_FEEDBACK_BY_CUSTOMER_MASTER_SQL_COLS)\n from_query = \"\"\"\n FROM core_bookingcustfeedback bcf\n inner join core_booking b on b.id = bcf.booking_id\n inner join core_bumperuser bu on bu.id = b.user_id\n \"\"\"\n\n elif self.view_type == reportManagerConstants.REPORT_FEEDBACK_BY_OPS:\n\n self.default_sort_col = \" bf.id\"\n self.default_sort_ord = \" desc \"\n\n # set the where columns that will be used if server side filtering or sorting is used.\n self.master_sql_col_map = reportManagerConstants.REPORT_FEEDBACK_BY_OPS_MASTER_SQL_WHERE_COLS_MAP\n # select columns\n select_query += ','.join(reportManagerConstants.REPORT_FEEDBACK_BY_OPS_MASTER_SQL_COLS)\n from_query = \"\"\"\n FROM core_bookingfeedback bf\n inner join core_booking b on b.id = bf.booking_id\n inner join core_bumperuser bu on bu.id = b.user_id\n \"\"\"\n\n elif self.view_type == reportManagerConstants.REPORT_CREW_DASHBOARD_PICKUP:\n\n self.default_sort_col = \" b.id\"\n self.default_sort_ord = \" \"\n\n # set the where columns that will be used if server side filtering or sorting is used.\n self.master_sql_col_map = reportManagerConstants.REPORT_CREW_DASHBOARD_PICKUP_MASTER_SQL_WHERE_COLS_MAP\n # select columns\n select_query += ','.join(reportManagerConstants.REPORT_CREW_DASHBOARD_PICKUP_MASTER_SQL_COLS)\n from_query = \"\"\"\n FROM core_booking b\n inner join core_bumperuser bu ON bu.id = b.user_id\n inner join bumper2.core_bookingstatus bs ON bs.id = b.status_id\n left outer join bumper2.core_bookingopsstatus bos ON bos.id = b.ops_status_id\n left outer join bumper2.core_workshop w ON w.id = b.workshop_id\n left outer join bumper2.core_bookingaddress bap on bap.booking_id=b.id and bap.type=1\n left outer join bumper2.core_address ap on ap.id=bap.address_id\n left outer join bumper2.core_bookingaddress bad on bad.booking_id=b.id and bad.type=2\n left outer join bumper2.core_address ad on ad.id=bad.address_id\n left outer join bumper2.core_bumperuser bup on bup.id=b.pickup_driver_id\n left outer join bumper2.core_bumperuser bud on bud.id=b.drop_driver_id\n \"\"\"\n\n from_query += \"\"\"\n WHERE not exists(SELECT 1 FROM bumper2.core_internalaccounts ia WHERE ia.phone=bu.phone)\n AND bs.flow_order_num >= 3\n AND b.status_id !=24\n AND (b.ops_status_id is null or b.ops_status_id !=8)\n \"\"\"\n\n elif self.view_type == reportManagerConstants.REPORT_PART_DOCS:\n\n self.default_sort_col = \" bpd.id\"\n self.default_sort_ord = \" desc \"\n\n # set the where columns that will be used if server side filtering or sorting is used.\n self.master_sql_col_map = reportManagerConstants.REPORT_PART_DOCS_MASTER_SQL_WHERE_COLS_MAP\n # select columns\n select_query += ','.join(reportManagerConstants.REPORT_PART_DOCS_MASTER_SQL_COLS)\n from_query = \"\"\"\n from core_bookingpartdoc bpd\n inner join core_partdocstatus pds on pds.id=bpd.status_id\n inner join core_bookingpackagepanel bpp on bpp.id=bpd.booking_part_id\n inner join core_bookingpackage bp on bp.id=bpp.booking_package_id\n inner join core_booking b on b.id=bp.booking_id\n inner join core_bumperuser bu on bu.id=b.user_id\n inner join core_city c on c.id=b.city_id\n inner join core_carpanelprice cpp on cpp.id=bpp.panel_id\n inner join core_carpanel cp on cp.id=cpp.car_panel_id\n inner join core_usercar uc on uc.id=b.usercar_id\n inner join core_carmodel cm on cm.id=uc.car_model_id\n inner join core_carbrand cb on cb.id=cm.brand_id\n left outer join core_carmodelvariant cmv on cmv.id = uc.variant_id\n \"\"\"\n\n from_query += \"\"\"\n WHERE not exists(SELECT 1 FROM bumper2.core_internalaccounts ia WHERE ia.phone=bu.phone)\n \"\"\"\n\n select_query += from_query\n select_count_query += from_query\n return select_query, select_count_query", "title": "" }, { "docid": "3d15007f775d42f700ec4073bb92ee20", "score": "0.47366187", "text": "def ReferenceEntities(entities, group_results):", "title": "" }, { "docid": "ccb6ee7662e22fab7b813ec1bc5ad7db", "score": "0.47183698", "text": "def join_condition_expression(self):\n s = \"\"\n\n if hasattr(self.fk_field, \"related_fields\"):\n for related_fields in self.fk_field.related_fields:\n\n if s:\n s += \" AND \"\n fk = related_fields[0]\n f = related_fields[1]\n s += f'\"{fk.model._meta.db_table}\".\"{fk.column}\" = \"{f.model._meta.db_table}\".\"{f.column}\"'\n elif isinstance(self.fk_field, ManyToOneRel):\n fk = self.fk_field\n for f_from, f_to in fk.get_joining_columns():\n if s:\n s += \" AND \"\n s += f'\"{self.alias or fk.model._meta.db_table}\".\"{f_from}\" = \"{fk.related_model._meta.db_table}\".\"{f_to}\"'\n elif isinstance(self.fk_field, ManyToManyField):\n\n pass\n else:\n m = f\"\"\"\n op: {self.join_operator}\n model: {self.model_table}\n fk_field: {self.fk_field}\n fk_relation: {self.fk_relation}\n \"\"\"\n raise Exception(f\"Could not find 'related_fields' in 'self.fk_field': {m}\")\n\n return f\"({s})\"", "title": "" }, { "docid": "4f47e544c6f752a79b35ebf3fc8c12c9", "score": "0.47172454", "text": "def join_2_query2():\n\n desired_goals = 2\n desired_city = \"Europe\"\n\n sql = text('''SELECT distinct a.name, a.teamID, a.status, a.salary FROM Athlete a, Competition c, Game g, Season s WHERE c.winner = a.teamID AND\n g.competitionID = c.competitionID AND g.seasonID = s.seasonID AND a.goals > ''' + str(desired_goals) + ''' AND\n s.location = \"''' + desired_city + '''\"''')\n\n data = db.engine.execute(sql)\n a_data = [list(row) for row in data]\n print(a_data)", "title": "" }, { "docid": "e62a901511df47fee1dadea2fc544e0b", "score": "0.47075334", "text": "def compile_join_fields(self, type_contexts, aliases, conditions,\n join_types):\n def compile_join_field(expr, join_type):\n \"\"\"Compile a single part of the join.\n\n This results in a list of one or more join fields, depending on\n whether or not multiple are ANDed together.\n \"\"\"\n if join_type is tq_ast.JoinType.CROSS:\n assert expr is None, (\n \"Cross joins do not allow join conditions.\")\n return [None]\n if isinstance(expr, tq_ast.BinaryOperator):\n if expr.operator == 'and':\n return list(itertools.chain(\n compile_join_field(expr.left, join_type),\n compile_join_field(expr.right, join_type)))\n elif (expr.operator in ('=', '==') and\n isinstance(expr.left, tq_ast.ColumnId) and\n isinstance(expr.right, tq_ast.ColumnId)):\n # For evaluation, we want the ordering of the columns in\n # the JoinField to match the ordering of the join, left to\n # right, but bigquery allows either order. Thus we need to\n # reorder them if they're reversed.\n # TODO(colin): better error message if we don't find an\n # alias?\n lhs_alias_idx = next(\n idx\n for idx, alias in enumerate(aliases)\n if expr.left.name.startswith(alias + \".\")\n )\n rhs_alias_idx = next(\n idx\n for idx, alias in enumerate(aliases)\n if expr.right.name.startswith(alias + \".\")\n )\n left_column_id = self.compile_ColumnId(\n expr.left,\n type_contexts[lhs_alias_idx])\n right_column_id = self.compile_ColumnId(\n expr.right,\n type_contexts[rhs_alias_idx])\n\n if lhs_alias_idx < rhs_alias_idx:\n return [typed_ast.JoinFields(left_column_id,\n right_column_id)]\n elif rhs_alias_idx < lhs_alias_idx:\n return [typed_ast.JoinFields(right_column_id,\n left_column_id)]\n # Fall through to the error case if the aliases are the\n # same for both sides.\n raise exceptions.CompileError(\n 'JOIN conditions must consist of an AND of = '\n 'comparisons between two field on distinct '\n 'tables. Got expression %s' % expr)\n return [compile_join_field(expr, join_type)\n for expr, join_type in zip(conditions, join_types)]", "title": "" }, { "docid": "ad0dd2a3723f59f273ca9de4148cf7bb", "score": "0.4668281", "text": "def join(self, _):\n return None", "title": "" }, { "docid": "58492afe044357696a70a1ffaeddb7ee", "score": "0.4657782", "text": "def inner_join(self, other):\n return self.join(other, \"inner\")", "title": "" }, { "docid": "b2881d479eea3bea53dd9af83aa7096d", "score": "0.4633822", "text": "def test_query_order_one_to_many_warning_suppressed(client, recwarn):\n recwarn.clear()\n query = Query(client, \"Investigation\",\n order=['investigationInstruments.instrument.fullName'],\n join_specs={\"investigationInstruments\": \"INNER JOIN\"})\n assert len(recwarn.list) == 0\n print(str(query))\n assert \"Investigation\" in query.select_clause\n assert \"INNER JOIN\" in query.join_clause\n assert \"instrument\" in query.join_clause\n assert query.where_clause is None\n assert \"fullName\" in query.order_clause\n res = client.search(query)\n assert len(res) == 3", "title": "" }, { "docid": "e479dc209de12756cf5f7400499a206b", "score": "0.46297687", "text": "def CreateJoinedSummary(df_chicago_collision_data, df_flight_call, df_light_levels):\n # Join between chicago_collision_data and light_level\n df_summary = pd.merge(df_chicago_collision_data, df_light_levels,\n how='inner', left_on=['Date'], right_on=['Date'])\n # Join between df_new and flight_call using 'Genus','Species' tables\n df_summary = pd.merge(df_summary, df_flight_call, how='inner', left_on=[\n 'Genus', 'Species'], right_on=['Genus', 'Species'])\n return df_summary", "title": "" }, { "docid": "3316616e1afab2a2b709e1500d45871b", "score": "0.4623603", "text": "def join(self, other, join_type=\"inner\"):\n return self._transform(transformations.join_t(other, join_type))", "title": "" }, { "docid": "80c71453954e1b6c0ba60a2065897cc4", "score": "0.46222085", "text": "def test_patch_report_group_by_id(self):\n pass", "title": "" }, { "docid": "814f588a9d2d9d6cf2faaf92fb624d4a", "score": "0.45802537", "text": "def _build_all_groups(self):", "title": "" }, { "docid": "661114ecb72964ff7f460c162f47d368", "score": "0.4576408", "text": "def join(self):\n pass", "title": "" }, { "docid": "661114ecb72964ff7f460c162f47d368", "score": "0.4576408", "text": "def join(self):\n pass", "title": "" }, { "docid": "b3d900cd60565095aaed32bef03ca208", "score": "0.45669937", "text": "def find_sql_join(logger, table_names,filters, on_conditions, type='INNER',columns=''):\n try:\n data = None\n db = get_db_connect(logger)\n cursor = db.cursor(pymysql.cursors.DictCursor)\n\n if columns:\n columns = ','.join(columns)\n else:\n columns = '*'\n\n on = query_from_filter_join(on_conditions)\n params = query_from_filter(filters)\n\n join = ''\n for table in table_names:\n join += '%s %s %s JOIN '%(table,table,type)\n join = join[:-(len(type)+7)] # Removing Join String\n\n\n query = 'SELECT %s FROM %s ON %s WHERE %s'%(columns,join,on,params)\n cursor.execute(query)\n data = cursor.fetchall()\n logger.msg_logger('>>>>>>>> MYSQL Find Success : %s' % (query))\n except Exception as e:\n logger.error_logger('find_sql_join : %s || %s'%(str(e),query))\n finally:\n if db: db.close()\n return data", "title": "" }, { "docid": "c3c7346c16869dc059aecc23d66439ef", "score": "0.45664436", "text": "def test_update_report_group_by_id(self):\n pass", "title": "" }, { "docid": "7df8d980bb625b168084393cee64fda6", "score": "0.45659903", "text": "def test_join(data_fmodel, check_isomorphous, sg):\n other = data_fmodel.copy(deep=True)\n other.spacegroup = sg\n if check_isomorphous and sg.number == 19:\n with pytest.raises(ValueError):\n result = data_fmodel.join(other, lsuffix=\"x\", rsuffix=\"y\", \n check_isomorphous=check_isomorphous)\n else:\n result = data_fmodel.join(other, lsuffix=\"x\", rsuffix=\"y\",\n check_isomorphous=check_isomorphous)\n assert isinstance(result, rs.DataSet)\n assert len(result) == len(data_fmodel)\n assert len(result.columns) == len(data_fmodel.columns)*2\n for attr in data_fmodel._metadata:\n assert result.__getattr__(attr) == data_fmodel.__getattr__(attr)", "title": "" }, { "docid": "e556c367645ccd215ef5533dea4966ee", "score": "0.45641404", "text": "def join(self):\n pass", "title": "" }, { "docid": "6f49c0a3541303b007c20a23ef316540", "score": "0.45622268", "text": "def autoJoin(self):\n return self.__autoJoin", "title": "" }, { "docid": "e7dc1290f4d5c418835a93764b42c5af", "score": "0.45518154", "text": "def left_join(table_1, table_2):\n output = []\n for elem in table_1.table:\n if elem:\n current = elem.head_val\n while current:\n value_1 = current.value\n value_2 = table_2.get(value_1[0])\n value_1.append(value_2)\n output.append(value_1)\n current = current.next_val\n\n return output", "title": "" }, { "docid": "ea2ccc04c94f045dd7a209ee52234762", "score": "0.4540083", "text": "def build_report(report_data, request_data, report_property, sections):\n # 'report_name' - report's headline\n rt = breeze.models.ReportType.objects.get(type=report_data['report_type'])\n report_name = report_data['report_type'] + ' Report' + ' :: ' + report_data['instance_name'] + ' <br> ' + str(rt.description)\n\n # This trick is to extract users's names from the form\n # buddies = list()\n # for e in list(report_property.cleaned_data['share']):\n # buddies.append( str(e) )\n\n # shared_users = breeze.models.User.objects.filter(username__in=buddies)\n\n shared_users = aux.extract_users(request_data.POST.get('Groups'), request_data.POST.get('Individuals'))\n insti = breeze.models.UserProfile.objects.get(user=request_data.user).institute_info\n # create initial instance so that we can use its db id\n dbitem = breeze.models.Report(\n type=breeze.models.ReportType.objects.get(type=report_data['report_type']),\n name=str(report_data['instance_name']),\n author=request_data.user,\n progress=0,\n project=breeze.models.Project.objects.get(id=request_data.POST.get('project')),\n institute=insti\n # project=breeze.models.Project.objects.get(name=report_property.cleaned_data['Project'])\n )\n dbitem.save()\n\n if shared_users:\n dbitem.shared = shared_users\n\n # define location: that is report's folder name\n path = slugify(str(dbitem.id) + '_' + dbitem.name + '_' + dbitem.author.username)\n loc = str(settings.MEDIA_ROOT) + str(\"reports/\") + path\n dochtml = loc + '/report'\n dbitem.home = str(\"reports/\") + path\n dbitem.save()\n\n # BUILD R-File\n script_string = 'setwd(\\\"%s\\\")\\n' % loc\n script_string += 'require( Nozzle.R1 )\\n\\n'\n script_string += 'path <- \\\"%s\\\"\\n' % loc\n script_string += 'report_name <- \\\"%s\\\"\\n' % report_name\n # define a function for exception handler\n script_string += 'failed_fun_print <- function(section_name, error_report){\\n'\n script_string += ' Error_report_par <- newParagraph(\"<br>\", asStrong( \"Error Log Details: \" ),\"<br><br>\",asCode(paste(error_report,collapse=\"\"))); \\n'\n script_string += ' section_name <- addTo( section_name, newParagraph( \"This section FAILED! Contact the development team... \" ), Error_report_par )\\n'\n script_string += ' return (section_name)\\n}\\n\\n'\n\n script_string += dump_project_parameters(dbitem.project, dbitem)\n script_string += dump_pipeline_config(rt, report_data['instance_id'])\n\n script_string += 'REPORT <- newCustomReport(report_name)\\n'\n\n dummy_flag = False\n for tag in sections:\n secID = 'Section_dbID_' + str(tag.id)\n if secID in request_data.POST and request_data.POST[secID] == '1':\n tree = xml.parse(str(settings.MEDIA_ROOT) + str(tag.docxml))\n script_string += '##### TAG: %s #####\\n' % tag.name\n if tag.name == \"Import to FileMaker\":\n dummy_flag = True\n\n # source main code segment\n code_path = str(settings.MEDIA_ROOT) + str(tag.code)\n script_string += '# <---------- body ----------> \\n' + open(code_path, 'r').read() + '\\n'\n script_string += '# <------- end of body --------> \\n'\n # input parameters definition\n script_string += '# <---------- parameters ----------> \\n'\n script_string += gen_params_string(tree, request_data.POST, str(settings.MEDIA_ROOT) + dbitem.home, request_data.FILES)\n script_string += '# <------- end of parameters --------> \\n'\n # final step - fire header\n header_path = str(settings.MEDIA_ROOT) + str(tag.header)\n script_string += '# <---------- header ----------> \\n' + open(header_path, 'r').read() + '\\n\\n'\n script_string += 'new_section <- newSection( section_name )\\n'\n script_string += 'tag_section <- tryCatch({section_body(new_section)}, error = function(e){ failed_fun_print(new_section,e) })\\n'\n script_string += 'REPORT <- addTo( REPORT, tag_section )\\n'\n script_string += '# <------- end of header --------> \\n'\n script_string += '##### END OF TAG #####\\n\\n\\n'\n script_string += 'setwd(\\\"%s\\\")\\n' % loc\n\n else: # if tag disabled - do nothing\n pass\n # render report to file\n script_string += '# Render the report to a file\\n' + 'writeReport( REPORT, filename=toString(\\\"%s\\\"))\\n' % dochtml\n script_string += 'system(\"chmod -R 770 .\")'\n\n # save r-file\n dbitem.rexec.save('script.r', base.ContentFile(script_string))\n dbitem.save()\n\n # configure shell-file\n config_path = loc + '/sgeconfig.sh'\n config = open(config_path, 'w')\n\n # config should be executble\n st = os.stat(config_path)\n os.chmod(config_path, st.st_mode | stat.S_IEXEC)\n\n command = '#!/bin/bash \\n' + str(settings.R_ENGINE_PATH) + 'CMD BATCH --no-save ' + str(settings.MEDIA_ROOT) + str(dbitem.rexec)\n config.write(command)\n config.close()\n\n # open report's folder for others\n st = os.stat(loc)\n os.chmod(loc, st.st_mode | stat.S_IRWXG)\n\n # submit r-code\n p = Process(target=run_report, args=(dbitem,dummy_flag))\n #print(dbitem)\n #run_report(dbitem,dummy_flag)\n p.start()\n\n return True", "title": "" }, { "docid": "5813611c7eef5b50589b34a9edf944c0", "score": "0.45375982", "text": "def join_3_query():\n desired_country = \"Brazil\"\n desired_gameDest = \"Europe\"\n desired_gameYear = 2017\n\n sql = text('''SELECT distinct a.name, a.teamID, a.status, a.salary FROM Athlete a, GameGoal gg, Game g, Season s WHERE\n a.id = gg.athleteID AND gg.gameID = g.gameID AND s.seasonID = g.seasonID AND a.countryID LIKE \"''' + desired_country + '''\"'''\n ''' AND s.seasonID = ''' + str(desired_gameYear) + ''' AND s.location LIKE \"''' + desired_gameDest + '''\"''')\n\n data = db.engine.execute(sql)\n a_data = [list(row) for row in data]\n print(a_data)", "title": "" }, { "docid": "033b2d3f9d8efea5016685e585bdac19", "score": "0.45303717", "text": "def create_join_string(table1, table2, schema, join_type):\n\n assert join_type in {'JOIN_FROM', 'JOIN_WHERE'}, f'join function {join_type} is unknown'\n\n if table1 == table2:\n logging.warning('attempted aggregation over one table')\n if join_type == 'JOIN_FROM':\n return table1\n else:\n return ''\n\n # joins = []\n # paths = bfs_paths(schema.links, table1, table2)\n #\n # best_p = next(paths, None)\n # if not best_p:\n # return None\n #\n # if join_type == 'JOIN_FROM':\n # return ' JOIN '.join(best_p)\n #\n # for i in range(0, len(best_p) - 1):\n # joins.append(best_p[i] + '.' + schema.links[best_p[i]][best_p[i + 1]] + ' = ' + best_p[i + 1] + '.' +\n # schema.links[best_p[i + 1]][best_p[i]])\n #\n # return ' AND '.join(joins)\n if table2 in schema.links[table1]:\n if join_type == 'JOIN_FROM':\n return f'{table1} JOIN {table2}'\n else:\n return f'{table1}.{schema.links[table1][table2]} = {table2}.{schema.links[table2][table1]}'\n else:\n logging.info('No link found for JOIN with directly linked tables. Recursive JOIN not yet implemented')\n return None", "title": "" }, { "docid": "7d0c294d943e996eb8db130940b15a48", "score": "0.4526899", "text": "def test_plural_methods(self):\n new_relation = Relation(self.Test).where('foo', 'bar').select('poop')\n relation = self.relation.group('baz').where('baz')\n\n merged = relation.merge(new_relation)\n\n self.assertEqual(relation.params['group'], ['baz'])\n self.assertEqual(relation.params['where'], ['baz'])\n\n self.assertEqual(merged.params['where'], ['baz', 'foo', 'bar'])\n self.assertEqual(merged.params['group'], ['baz'])\n self.assertEqual(merged.params['select'], ['poop'])", "title": "" }, { "docid": "6faefa2d8941c95a238d627d40979f16", "score": "0.45122653", "text": "def field_join(record):\n return FIELD_SEPARATOR.join(str(field) for field in record)", "title": "" }, { "docid": "534ecdc7ebca0363f632276f73ad7744", "score": "0.44828525", "text": "def build_query(self, query):\n root_join = self.root_join(query)\n if query != root_join:\n query.append_from(root_join)\n return query", "title": "" }, { "docid": "c3f7e2569be79114951d32c338f8c639", "score": "0.44684303", "text": "def serialize(self, joiner=\" \"):\n stringList = []\n stringList.append(\"select %s\" % self._all_selects())\n stringList.append(\"from <#ri>\")\n stringList.append(\"where\")\n if self._and_clauses:\n stringList.append(self._all_ands())\n if self._or_clauses:\n stringList.append(self._all_ors())\n if self._get_order_by() and self._get_order_by() in self._all_selects().split(\" \"):\n stringList.append(\"order by %s\" % self._get_order_by())\n return joiner.join(stringList)", "title": "" }, { "docid": "5f179fe11fc28079afa4415ac5e29e96", "score": "0.44585225", "text": "def join_function():\n definition_id = foreign(CustomAttributeDefinition.definition_id)\n definition_type = foreign(CustomAttributeDefinition.definition_type)\n return and_(definition_id == self.id,\n definition_type == self._inflector.table_singular)", "title": "" }, { "docid": "e8ba179dc14e3554f1ba0124c0f44404", "score": "0.4455694", "text": "def combine(self, agg1, agg2):", "title": "" }, { "docid": "30e3cd3a1c2da282c0c4cda901e69170", "score": "0.44513357", "text": "def mergeTable(self,base_table_id,second_table_id,base_col,second_col,merge_table_name):\n\n query = []\n query.append(\"CREATE VIEW '{0}' AS (\".format(merge_table_name))\n query.append(\"SELECT * \")\n # Use the two lines below instead if you want to specify cols to include\n # query.append(\"SELECT MyBaseTable.{0} AS myFirstColumn, \".format(base_col))\n # query.append(\"MySecondBaseTable.{0} AS mySecondColumn \".format(second_col))\n query.append(\"FROM {0} AS MyBaseTable \".format(base_table_id))\n query.append(\"LEFT OUTER JOIN {0} AS MySecondBaseTable \".format(second_table_id))\n # if use alias, can use those alias1 = alias2\n query.append(\"ON MyBaseTable.{0} = MySecondBaseTable.{1})\".format(base_col,second_col))\n\n return ''.join(query)", "title": "" }, { "docid": "e488fe92603488aaf4460577b7903ac5", "score": "0.4444383", "text": "def join(self, table: Union[str, sa.Table], left_where: Union[str, sa.Column, BinaryExpression], right_where: Union[str, sa.Column] = None, alias: str = None, method: str = 'join') -> B[B, E]:\n # Get table and tablename\n conn = self._connection()\n if type(table) == str:\n if '.' in table: conn, table = tuple(table.split('.'))\n table = uvicore.db.table(table, conn)\n tablename = str(table.name)\n\n # Get left, right and onclause expressions\n left = None\n right = None\n if type(left_where) == BinaryExpression:\n onclause = left_where\n else:\n left = self._column(left_where)\n right = self._column(right_where)\n onclause = left.sacol == right.sacol\n\n # Set alias to tablename if not defined\n if not alias: alias = tablename\n\n # Add new Join() expression\n self.query.joins.append(Join(table, tablename, left, right, onclause, alias, method))\n return self", "title": "" }, { "docid": "183c1510c50e1c4402911218277aba36", "score": "0.44429955", "text": "def test_raw_join_defaults(self):\n source_list = [1, 2, 3, 4]\n self.assertEqual(\n sequences.raw_join(source_list),\n '1,2,3,4')", "title": "" }, { "docid": "9358783162dddcfbcff74da5f65344ad", "score": "0.44167536", "text": "def gbandjoin(cls, bands):\n image = cls.from_vimage(\n super(VImage, cls).gbandjoin(bands)\n )\n # Hold on to the other band to prevent garbage collection\n image._buf = bands\n return image", "title": "" }, { "docid": "3e26edb6ab3446c088aa0fa05a7da556", "score": "0.44106263", "text": "def join(left, right):\n return map(merge, it.product(left, right))", "title": "" }, { "docid": "ba3ad898a04bd6e1a29db5196bb1b491", "score": "0.44092992", "text": "def joinComponents(self, results):\n return results", "title": "" }, { "docid": "c117a2790c2486cf095acc9ba45bafea", "score": "0.440664", "text": "def get_report_data(self):\n\n query = '''SELECT AML.DATE AS DATE,\n \tCASE\n \t\t\t\t\tWHEN INV.SUPPLIER_INVOICE_NUMBER IS NOT NULL THEN AJ.NAME || ' ' || INV.SUPPLIER_INVOICE_NUMBER\n \t\t\t\t\tWHEN INV.NUMBER IS NOT NULL THEN AJ.NAME || ' ' || INV.NUMBER\n \t\t\t\t\tELSE AJ.NAME\n \tEND AS DESCRIPTION,\n \tCASE\n \t\t\t\t\tWHEN (SUM(AML.DEBIT) - SUM(AML.CREDIT)) > 0 THEN ROUND((SUM(AML.DEBIT) - SUM(AML.CREDIT)),2)\n \t\t\t\t\tELSE 0.00\n \tEND AS DEBIT,\n \tCASE\n \t\t\t\t\tWHEN SUM(AML.DEBIT) - SUM(AML.CREDIT) < 0 THEN -1 * ROUND((SUM(AML.DEBIT) - SUM(AML.CREDIT)),2)\n \t\t\t\t\tELSE 0.00\n \tEND AS CREDIT,\n \tCASE\n\t\t\t\t\tWHEN ABS(SUM (AML.AMOUNT_CURRENCY)) > 0 THEN ROUND(ABS(SUM(AML.DEBIT) - SUM(AML.CREDIT)) / ABS(SUM (AML.AMOUNT_CURRENCY)),5)\n\t\t\t\t\tELSE 0.00\n \tEND AS CURRENCY_RATE,\n \tCASE\n\t\t\t\t\tWHEN ROUND(SUM (AML.AMOUNT_CURRENCY),4) > 0 THEN ROUND(SUM (AML.AMOUNT_CURRENCY),4)\n\t\t\t\t\tELSE 0.00\n \tEND AS DEBIT_CURRENCY,\n \tCASE\n\t\t\t\t\tWHEN ROUND(SUM (AML.AMOUNT_CURRENCY),4) < 0 THEN -1 * ROUND(SUM (AML.AMOUNT_CURRENCY),4)\n\t\t\t\t\tELSE 0.00\n \tEND AS CREDIT_CURRENCY,\n \tROUND(SUM (AML.AMOUNT_CURRENCY),4) AS AMOUNT_CURRENCY,\n \tRC.SYMBOL AS SYMBOL\n FROM ACCOUNT_MOVE_LINE AML\n LEFT JOIN ACCOUNT_JOURNAL AJ ON AJ.ID = AML.JOURNAL_ID\n LEFT JOIN ACCOUNT_INVOICE INV ON INV.ID = AML.INVOICE_ID\n LEFT JOIN RES_CURRENCY RC ON RC.ID = AML.CURRENCY_ID\n WHERE AML.FULL_RECONCILE_ID = {0}\n GROUP BY AML.DATE,\n AJ.NAME,\n \tINV.NUMBER,\n \tINV.SUPPLIER_INVOICE_NUMBER,\n \tRC.SYMBOL\n ORDER BY AML.DATE,RC.SYMBOL'''.format(str(self.id))\n\n self.env.cr.execute(query)\n res = self.env.cr.dictfetchall()\n\n return res", "title": "" }, { "docid": "450f8b50e3b53458163df88e3877a46a", "score": "0.4402124", "text": "def get_join_instruction(self, fields, n_tables=1, join=None):\n if not join:\n join = []\n __ini = \"SELECT :fields FROM :table0\"\n if n_tables > 1:\n for index in range(n_tables - 1):\n to_join = join[index]\n str_table = \":table\" + str(index + 1)\n str_join = \"\"\n if to_join.startswith(\"l__\"):\n __ini += \" LEFT JOIN \" \n elif to_join.startswith(\"r__\"):\n __ini += \" RIGHT JOIN \"\n else:\n __ini += \" INNER JOIN \"\n __ini += str_table\n print(\"to_join\", to_join)\n for field in to_join:\n print(\"field\", field)\n if str_join:\n str_join += \" AND \"\n str_join += str_table + \".\" + field\n str_join += \"= :table0.\" + field\n __ini += \" ON \" + str_join\n\n __inst = \"\"\n\n for field in fields:\n if __inst:\n __inst += \",\"\n\n __inst += field\n\n if not fields:\n __inst = \"*\"\n\n response = __ini.replace(\":fields\", __inst)\n return response", "title": "" }, { "docid": "d6317877ac0a721856be751a610dd468", "score": "0.43911442", "text": "def generate_join_statments(self, table_names: List[str], exclude_from_statment: List[str]=[]) -> str:\n self.logger.debug(\"Generating join statement for tables: %r, excluding: %r\", table_names, exclude_from_statment)\n join_req = \"\"\n for table_name in table_names:\n fks = self._schema_utils.fetch_foreign_keys(table_name=table_name)\n\n for fk in fks.values():\n join_rules = []\n fk_referenced_table = None\n as_nullable_field = False\n for col_constraint in fk.matching_columns:\n join_rules.append(\n \"{foreign.table_name}.{foreign.column_name} = {referenced.table_name}.{referenced.column_name}\"\n .format(foreign=col_constraint.foreign_col, referenced=col_constraint.referenced_col)\n )\n fk_referenced_table = col_constraint.referenced_col.table_name\n\n # if it's a composite key all fields will be nullable\n as_nullable_field = self._schema_utils.is_nullable(column=col_constraint.foreign_col)\n\n if fk_referenced_table in exclude_from_statment:\n continue\n\n fk_req_rules = \" AND \".join(join_rules)\n\n # Using LEFT JOIN when the foreign key is nullable, to prevent unecessary restriction if the key is null\n fk_join_type = \"LEFT\" if as_nullable_field else \"INNER\"\n\n fk_join_req = \"{join_type} JOIN {referenced_table} ON {rules} \".format(\n join_type=fk_join_type,\n referenced_table=fk_referenced_table,\n rules=fk_req_rules\n )\n join_req += \"\\n\" + fk_join_req\n\n return join_req", "title": "" }, { "docid": "af1c22d5b19af94504b5d4d8370b5703", "score": "0.43839923", "text": "def test_join__one_to_one__twice(self):\n c = models.Comment\n\n # === Test: Make two LEFT JOINs\n query_obj = dict(\n project=['id'],\n join={\n # 1-1, MongoJoin will choose RELSTRATEGY_LEFT_JOIN\n 'article': dict(project=['id']),\n # 1-1, MongoJoin will choose RELSTRATEGY_LEFT_JOIN\n 'user': dict(project=['id']),\n },\n # two LEFT JOINs here\n )\n mq = c.mongoquery().query(**query_obj)\n\n qs = self.assertQuery(mq.end(),\n 'FROM c',\n 'LEFT OUTER JOIN a AS a_1 ON a_1.id = c.aid',\n # 'LEFT OUTER JOIN u AS u_1 ON u_1.id = c.uid' # not here because selectinload() would handle it\n )\n self.assertNotIn('JOIN u', qs) # not here because selectinload() would handle it\n\n self.assertSelectedColumns(qs,\n 'c.id',\n 'a_1.id',\n # 'u_1.id', # not here because selectinload() would handle it\n )\n\n # === Test: same, with LIMIT\n # When MongoSQL used the RELSTRATEGY_LEFT_JOIN with LIMIT, it used to corrupt the query beyond recognition,\n # and the second LEFT JOIN was unable to attach to that mutilated query at all.\n # This test sees what happens if we join two relations by LEFT JOIN\n mq = c.mongoquery().query(\n **query_obj,\n # two LEFT JOINs here\n limit=1\n )\n\n # This is the sort of query you'd expect if I fixed it properly\n qs = self.assertQuery(mq.end(),\n 'FROM (SELECT c.id',\n 'FROM c',\n 'LIMIT 1) AS anon_1',\n 'LEFT OUTER JOIN a AS a_1 ON a_1.id = anon_1.c_aid',\n # This second line used to contain a wrong, unaliased ON clause: \"ON u_1.id = c.uid\"\n # In fact, I didn't fix it at all ; I moved it into a selectinquery() handler\n # 'LEFT OUTER JOIN u AS u_1 ON u_1.id = anon_1.c_aid'\n )\n self.assertNotIn('JOIN u', qs) # not here because selectinload() would handle it\n\n self.assertSelectedColumns(qs,\n 'anon_1.c_id', 'anon_1.c_aid',\n 'a_1.id',\n # 'u_1.id', # not here because selectinload() would handle it\n )", "title": "" }, { "docid": "4a99fb1374abcff19affbdf1a3d92171", "score": "0.43748668", "text": "def JoinSeries(*args):\n return _SimpleITK.JoinSeries(*args)", "title": "" }, { "docid": "e7aef42e30cf9fa95e49fa3c49991b75", "score": "0.43728262", "text": "def join_and(tables, needed_data):\n\tfinal_data = []\n\ttable1 = (re.sub(' +', ' ', str(tables[0]))).strip()\n\ttable2 = (re.sub(' +', ' ', str(tables[1]))).strip()\n\tfor item1 in needed_data[table1]:\n\t for item2 in needed_data[table2]:\n \tfinal_data.append(item1 + item2)\n\treturn final_data", "title": "" }, { "docid": "ba0aea36e76fd00c7cc0f13afe6584d3", "score": "0.43723947", "text": "def build_query_with_sel(self, sel, query, add_join = None):\n root_join = self.root_join(query)\n if add_join:\n for item in add_join:\n table, l_col, r_col = item\n root_join = root_join.join(table, l_col == r_col)\n sel.append_from(root_join)\n return sel", "title": "" }, { "docid": "8166536fc5a71e764669e5f1e86faa26", "score": "0.43681005", "text": "def test_raw_join(self):\n source_list = [1, 2, 3, 4]\n self.assertEqual(\n sequences.raw_join(source_list,\n prefix='(',\n separator=' :: ',\n final_separator=' & ',\n suffix=')'),\n '(1 :: 2 :: 3 & 4)')", "title": "" }, { "docid": "5cc68d4cc7a7c7afb863f117674c4ecd", "score": "0.4367432", "text": "def test_join_self_referential_model(self):\n u = models.User\n\n # === Test: load a self-referential relationship\n mq = u.mongoquery().query(\n project=['id', 'master_id'],\n join={'master': dict(project=['id'])}\n )\n qs = self.assertQuery(mq.end(),\n # Properly aliased\n 'FROM u',\n 'LEFT OUTER JOIN u AS u_1 ON u_1.id = u.master_id'\n )\n self.assertSelectedColumns(qs,\n 'u.id', 'u.master_id', 'u_1.id',\n )", "title": "" }, { "docid": "1e10e729bc7ea68aaee015c91d1cf3ea", "score": "0.43460572", "text": "def join_columns_with_divider(table, decorator):\n return [decorator.join(row) for row in table]", "title": "" }, { "docid": "37cd5ca5bcca39250592474782558440", "score": "0.43454066", "text": "def test_join__one_to_one__twice__same_model(self):\n e = models.Edit\n\n # === Test: join to multiple relationships\n mq = e.mongoquery().query(project=['description'],\n join={'user': dict(project=['name']),\n 'creator': dict(project=['tags'],\n filter={'id': {'$lt': 1}})})\n qs = self.assertQuery(mq.end(),\n \"FROM e \",\n \"LEFT OUTER JOIN u AS u_1 ON u_1.id = e.uid \",\n # \"LEFT OUTER JOIN u AS u_2 ON u_2.id = e.cuid AND u_2.id < 1\" # not here because selectinload() would handle it\n )\n self.assertNotIn('JOIN u AS u_2', qs) # not here because selectinload() would handle it\n self.assertSelectedColumns(qs,\n 'u_1.id', 'u_1.name',\n # 'u_2.id', 'u_2.tags', # not here because selectinload() would handle it\n 'e.id', 'e.description'\n )", "title": "" }, { "docid": "15a55f5cc70339244cf608e2994feeb5", "score": "0.43355215", "text": "def build_sql_statement(self, outer_scope=None):\n\n # self.relations.reverse()\n # master_relation = self.relations.pop()\n # self.relations.reverse()\n master_relation = self.relations[0]\n\n ## SELECT EXPRESSIONS\n select = master_relation.select\n\n if len(self.relations) > 1:\n for r in self.relations[1:]:\n if r.select:\n select = f\"{select}, {r.select}\"\n\n if self.distinct:\n s = f\"SELECT DISTINCT {select} FROM {master_relation.model_table}\"\n else:\n s = f\"SELECT {select} FROM {master_relation.model_table}\"\n\n ## FROM JOINS\n if not outer_scope:\n for relation in self.relations[1:]:\n s += f\" {relation.join_operator} {relation.model_table} {relation.alias or ''} ON {relation.join_condition_expression} \"\n\n ## WHERE\n where = \"\\n\"\n if master_relation.where:\n where += f\" WHERE {master_relation.where}\"\n for relation in self.relations[1:]:\n if relation.where:\n if where:\n where += \" AND \"\n else:\n where += \"WHERE \"\n where += relation.where\n s += where + \"\\n\"\n\n ## GROUP BY\n if master_relation.group_by:\n gb = master_relation.group_by_columns()\n if gb:\n s += f\" GROUP BY {gb}\"\n\n ## ORDER BY\n order = \"\"\n if master_relation.order_by:\n order += f\" ORDER BY {master_relation.order_by}\"\n for relation in self.relations[1:]:\n if relation.order_by:\n if not order:\n order = \" ORDER BY \"\n order += relation.order_by\n if relation.order_by_direction:\n order += \" DESC \" if relation.order_by_direction == \"-\" else \" ASC \"\n s += order\n\n if self._limit:\n s += f\" LIMIT {int(self._limit)}\"\n\n if self._offset:\n s += f\" OFFSET {int(self._offset)}\"\n\n # replace variables placeholders to be valid dict placeholders\n s = re.sub(PLACEHOLDER_PATTERN, lambda x: f\"%({x.group(1)})s\", s)\n\n self.sql = s\n self.master_relation = master_relation\n\n return self.sql", "title": "" }, { "docid": "4025c4b069701f995b740dbb90d86cca", "score": "0.43273258", "text": "def after_onJoin(self, details):\n pass", "title": "" }, { "docid": "27ce8d9aa0741dccbb157727f8097163", "score": "0.43241796", "text": "def join(self):\n yield", "title": "" }, { "docid": "5c97eabf7c83477060d4c7bb0ba54463", "score": "0.4315517", "text": "def outer_join(self, other):\n return self.join(other, \"outer\")", "title": "" }, { "docid": "82bf14cb19ff6f5afeac0923b50d135e", "score": "0.42969674", "text": "def group_join(self, other, key_f, value_f, select_f):\n linq = Linq()\n for item in self:\n key_value = key_f(item)\n value_linq = Linq()\n for item2 in other:\n value_value = value_f(item2)\n if key_value == value_value:\n value_linq.append(item2)\n linq.append(select_f(item, value_linq))\n res = linq\n return res", "title": "" }, { "docid": "3df43208c3e423c90c6f767ae20cea1a", "score": "0.42855364", "text": "def __add__(self, other):\n return MyriaFluentQuery(self, UnionAll([self.query, other.query]))", "title": "" }, { "docid": "2fea15dc9b406472458eccfcbfbbb911", "score": "0.4283562", "text": "def join(self, right_join_table, left_on, right_on, join_type):\n assert left_on == '_auto_key' or isinstance(left_on, Attr)\n assert right_on is None or isinstance(right_on, Attr)\n if right_on is None:\n if join_type is None or join_type == 'cross':\n join_type = 'cross'\n else:\n raise ValueError('The join column must be specified for inner, left, and right join.')\n if join_type is None:\n join_type = 'inner' # default is inner join\n assert (join_type.lower() == 'inner'\n or join_type.lower() == 'left'\n or join_type.lower() == 'right'\n or join_type.lower() == 'cross')\n return DerivedTable(self, 'join', [right_join_table, left_on, right_on, join_type])", "title": "" }, { "docid": "9afc6a23e479ddd80392fe9b6bcf8a6f", "score": "0.4275489", "text": "def test_join_list_dataseries(data_fmodel, check_isomorphous, sg):\n other = data_fmodel.copy(deep=True)\n other.spacegroup = sg\n other = [ other[c] for c in other.columns ]\n other = [ c.rename(c.name + str(i)) for i, c in enumerate(other) ]\n result = data_fmodel.join(other, lsuffix=\"x\", rsuffix=\"y\",\n check_isomorphous=check_isomorphous)\n assert isinstance(result, rs.DataSet)\n assert len(result) == len(data_fmodel)\n assert len(result.columns) == len(data_fmodel.columns)*2\n for attr in data_fmodel._metadata:\n assert result.__getattr__(attr) == data_fmodel.__getattr__(attr)", "title": "" }, { "docid": "e05e5eda4c06138aa20463d26aea02c7", "score": "0.4261199", "text": "def merge_join_inputs(self):\n def _input_to_merge():\n merges = [\n y for (x, y) in self.modelled_methods()\n if x == 'CallMerge'\n ][0]()\n\n lefts = self.exec(\n call(from_set(merges, 'gid'))\n | where | the_function()\n | isa | attribute()\n | where | the_object()\n | is_ | anything() % 'left'\n )\n\n rights = self.exec(\n call(from_set(merges, 'gid'))\n | where | the_first_arg()\n | is_ | anything() % 'right'\n )\n\n lefts['gid'] = lefts.gid_left\n lefts['pretty'] = 'LeftSideOfMerge[]'\n\n rights['gid'] = rights.gid_right\n rights['pretty'] = 'RightSideOfMerge[]'\n\n return pd.concat([\n lefts, rights\n ])\n\n def _input_to_join():\n joins = [\n y for (x, y) in self.modelled_methods()\n if x == 'CallJoin'\n ][0]()\n\n lefts = self.exec(\n call(from_set(joins, 'gid'))\n | where | the_function()\n | isa | attribute()\n | where | the_object()\n | is_ | anything() % 'left'\n )\n\n rights = self.exec(\n call(from_set(joins, 'gid'))\n | where | the_first_arg()\n | is_ | anything() % 'right'\n )\n\n lefts['gid'] = lefts.gid_left\n lefts['pretty'] = 'LeftSideOfJoin[]'\n\n rights['gid'] = rights.gid_right\n rights['pretty'] = 'RightSideOfJoin[]'\n\n return pd.concat([\n lefts, rights\n ])\n\n return [\n ('MergeInput', _input_to_merge),\n ('JoinInput', _input_to_join)\n ]", "title": "" }, { "docid": "a85ac9ed20cfd1ead91177cb8d66d491", "score": "0.42527324", "text": "def append_group_col(edges_df, subset_groups):\n subset_groups = subset_groups[['label', 'group']]\n\n # Tag rows with a unique ID, which we'll use in the final inner-merge below\n # to avoid double-duplications in the case of duplicated label pairs in which\n # one or more of the labels is involved in multiple groups.\n edges_df['row'] = np.arange(len(edges_df), dtype=int)\n\n # Assign label_a groups, (duplicating edges as necessary if label_a belongs to multiple groups)\n edges_a_df = edges_df.merge(subset_groups.rename(columns={'label': 'label_a'}, copy=False), 'left', on='label_a')\n\n # Assign label_b groups, (duplicating edges as necessary if label_b belongs to multiple groups)\n edges_b_df = edges_df[['label_a', 'label_b', 'row']].merge(subset_groups.rename(columns={'label': 'label_b'}, copy=False), 'left', on='label_b')\n\n # Keep rows that have matching groups on both sides\n edges_df = edges_a_df.merge(edges_b_df, 'inner', ['label_a', 'label_b', 'row', 'group'])\n\n # Put group first, and sort by group\n cols = edges_df.columns.tolist()\n cols.remove('group')\n cols.insert(0, 'group')\n edges_df = edges_df[cols]\n del edges_df['row']\n\n return edges_df", "title": "" }, { "docid": "5fd4f678efdffe1935ccb3f6963742f4", "score": "0.42494535", "text": "def get_df_relation_journal(df):\n # drug -> date -> id ou journal name\n df_relation_journal = df.select(\"drug\", \"journal\", \"date\").distinct().withColumn(\"type\", lit(\"relation\"))\n return df_relation_journal", "title": "" }, { "docid": "183daca45c000d48e1908bf4e03d1ee0", "score": "0.42492682", "text": "def join(self, other, by=None, by_left=None, by_right=None, join_type='inner'):\n join_types = ['inner', 'left', 'right', 'full', 'leftsemi']\n join_type = join_type.lower()\n\n if join_type not in join_types:\n raise ValueError('Invalid join type: {}, valid types are: {}'.format(join_type, ', '.join(join_types)))\n\n if isinstance(by, basestring):\n by = [by]\n if isinstance(by_left, basestring):\n by_left = [by_left]\n if isinstance(by_right, basestring):\n by_right = [by_right]\n\n if by is None and by_left is None and by_right is None:\n by = list(set(self.colnames) & set(other.colnames))\n if len(by) == 0:\n raise ValueError('Unable to get the intersection of the column names for joining. Please specify '\n 'the columns using by, by_x or by_y')\n\n if join_type == 'inner':\n jjtype = self._gateway_client.jvm.io.ddf.etl.Types.JoinType.INNER\n elif join_type == 'left':\n jjtype = self._gateway_client.jvm.io.ddf.etl.Types.JoinType.LEFT\n elif join_type == 'right':\n jjtype = self._gateway_client.jvm.io.ddf.etl.Types.JoinType.RIGHT\n elif join_type == 'full':\n jjtype = self._gateway_client.jvm.io.ddf.etl.Types.JoinType.FULL\n else:\n jjtype = self._gateway_client.jvm.io.ddf.etl.Types.JoinType.LEFTSEMI\n\n return DistributedDataFrame(self._jddf.join(other._jddf, jjtype,\n util.to_java_list(by, self._gateway_client),\n util.to_java_list(by_left, self._gateway_client),\n util.to_java_list(by_right, self._gateway_client)),\n self._gateway_client)", "title": "" }, { "docid": "833671d9a9230402496c9e212578d2be", "score": "0.42489615", "text": "def _handle_merge(self, assign, lhs, rhs, label):\n if len(rhs.args) < 2:\n raise ValueError(\"left and right arguments required for merge\")\n left_df = rhs.args[0]\n right_df = rhs.args[1]\n kws = dict(rhs.kws)\n if 'on' in kws:\n left_on = get_constant(self.func_ir, kws['on'], None)\n right_on = left_on\n else: # pragma: no cover\n if 'left_on' not in kws or 'right_on' not in kws:\n raise ValueError(\"merge 'on' or 'left_on'/'right_on'\"\n \"arguments required\")\n left_on = get_constant(self.func_ir, kws['left_on'], None)\n right_on = get_constant(self.func_ir, kws['right_on'], None)\n if left_on is None or right_on is None:\n raise ValueError(\"merge key values should be constant strings\")\n scope = lhs.scope\n loc = lhs.loc\n # add columns from left to output\n left_colnames = self._get_df_col_names(left_df)\n df_col_map = {col: ir.Var(scope, mk_unique_var(col), loc)\n for col in left_colnames}\n # add columns from right to output\n right_colnames = self._get_df_col_names(right_df)\n df_col_map.update({col: ir.Var(scope, mk_unique_var(col), loc)\n for col in right_colnames})\n self._create_df(lhs.name, df_col_map, label)\n return [hiframes_join.Join(lhs.name, self._get_renamed_df(left_df).name,\n self._get_renamed_df(right_df).name,\n left_on, right_on, self.df_vars, lhs.loc)]", "title": "" }, { "docid": "97cf30b6e41784e12def38b9a3cb7cdd", "score": "0.42403954", "text": "def smvJoinByKey(self, other, keys, joinType):\n jdf = self._jPythonHelper.smvJoinByKey(self._jdf, other._jdf, _to_seq(keys), joinType)\n return DataFrame(jdf, self._sql_ctx)", "title": "" }, { "docid": "55409bdf5a35257ddf2b89d9256c95ed", "score": "0.42303714", "text": "def _AddReportingEntitiesFromLinks(\n link: Link, entity_instance: EntityInstance, fields: List[FieldTranslation]\n) -> None:\n for source_field, target_field in link.field_map.items():\n for field in fields:\n if (\n link.source == field.reporting_entity_guid\n and field.std_field_name == target_field\n ):\n field.entity_guid = entity_instance.guid\n field.reporting_entity_field_name = target_field\n field.std_field_name = source_field", "title": "" }, { "docid": "c188848e68e0540c834aca8f652a3d1f", "score": "0.4221561", "text": "def test_join_list_datasets(data_fmodel, check_isomorphous, sg):\n other = data_fmodel.copy(deep=True)\n other.spacegroup = sg\n other = [other]*3\n other = [ o.rename(lambda x: x + str(i), axis=1) for i, o in enumerate(other) ]\n if check_isomorphous and sg.number == 19:\n with pytest.raises(ValueError):\n result = data_fmodel.join(other, lsuffix=\"x\", rsuffix=\"y\", \n check_isomorphous=check_isomorphous)\n else:\n result = data_fmodel.join(other, lsuffix=\"x\", rsuffix=\"y\",\n check_isomorphous=check_isomorphous)\n assert isinstance(result, rs.DataSet)\n assert len(result) == len(data_fmodel)\n assert len(result.columns) == len(data_fmodel.columns)*4\n for attr in data_fmodel._metadata:\n assert result.__getattr__(attr) == data_fmodel.__getattr__(attr)", "title": "" }, { "docid": "73179f9092388621681423c54b491d84", "score": "0.42209285", "text": "def test_query_order_one_to_many(client, recwarn):\n recwarn.clear()\n query = Query(client, \"Investigation\",\n order=['investigationInstruments.instrument.fullName'])\n w = recwarn.pop(icat.QueryOneToManyOrderWarning)\n assert issubclass(w.category, icat.QueryOneToManyOrderWarning)\n assert \"investigationInstruments\" in str(w.message)\n print(str(query))\n assert \"Investigation\" in query.select_clause\n assert \"instrument\" in query.join_clause\n assert query.where_clause is None\n assert \"fullName\" in query.order_clause\n res = client.search(query)\n assert len(res) == 3", "title": "" }, { "docid": "a3822d54cd860a8e04f09fd60a554ec0", "score": "0.42166367", "text": "def join_fields(self, dict):\n try:\n return self._join_string.join([dict[field_name] for field_name in self._field_names])\n except KeyError:\n return ''", "title": "" }, { "docid": "a746af7889e867e320857553913c1cdd", "score": "0.41948634", "text": "def build_relational_field(self, field_name, relation_info):\n ...", "title": "" } ]
88ea6a4368326b09e886361e32fdef8d
utility function to normalize a tensor. Arguments
[ { "docid": "bb0fecb9a020b414f099d2bdb96e8270", "score": "0.63248867", "text": "def normalize(x):\n return x / (K.sqrt(K.mean(K.square(x))) + K.epsilon())", "title": "" } ]
[ { "docid": "6464ad47b7d44ed5d05c06acd8bd6fae", "score": "0.77866507", "text": "def tensor_normalize(tensor):\n _tensor = tensor.detach().clone()\n _tensor_each_sum = _tensor.sum(dim=1)\n _tensor /= _tensor_each_sum.unsqueeze(1)\n\n _tensor[torch.isnan(_tensor)] = 0.0\n _tensor = 2*_tensor - 1\n return _tensor", "title": "" }, { "docid": "f49babe0b21c3142a22049170cea3e68", "score": "0.7770623", "text": "def normalize_tensor(tensor: torch.tensor):\n\n tensor -= tensor.min(1, keepdim=True)[0]\n tensor /= tensor.max(1, keepdim=True)[0]\n return tensor", "title": "" }, { "docid": "5759e48ac55bd8dcf99423f0e9e0d53c", "score": "0.77434206", "text": "def tensor_normalize(tensor, mean, std):\n if tensor.dtype == torch.uint8:\n tensor = tensor.float()\n tensor = tensor / 255.0\n if type(mean) == list:\n mean = torch.tensor(mean)\n if type(std) == list:\n std = torch.tensor(std)\n tensor = tensor - mean\n tensor = tensor / std\n return tensor", "title": "" }, { "docid": "d07366e1ac3e596f984b0df6cfe4999b", "score": "0.74967617", "text": "def normalize(input_tensor):\r\n\r\n max_val = tf.math.reduce_max(input_tensor)\r\n min_val = tf.math.reduce_min(input_tensor)\r\n\r\n normalized_tensor = tf.math.divide(\r\n input_tensor - min_val,\r\n max_val - min_val\r\n )\r\n\r\n return normalized_tensor", "title": "" }, { "docid": "363a9b5c7c4344420bfa98050106e4fb", "score": "0.74430805", "text": "def normalize(tensor, stats):\n if stats is None:\n return tensor\n return (tensor - stats.mean) / stats.std", "title": "" }, { "docid": "5598f49ddaeaacda3b51cff12c416ac6", "score": "0.7165644", "text": "def normalize_tensor(data_tensor, glob_mean, glob_std):\n \n data_tensor = np.divide(np.subtract(data_tensor, glob_mean), glob_std)\n \n return data_tensor", "title": "" }, { "docid": "ddab401618029667057ef8eafb3b6008", "score": "0.70338386", "text": "def normalize(x, axis=None, keepdims=None, name='normalization'):\n with tf.variable_scope(name):\n m = tf.reduce_mean(x, axis=axis, keepdims=keepdims)\n std = reduce_std(x, axis=axis, keepdims=keepdims)\n norm_x = (x - m) / std\n return norm_x", "title": "" }, { "docid": "e192e5f524d4419c016961fc75483d57", "score": "0.6967897", "text": "def UnNormalize(tensor, mean=[0.4914, 0.4822, 0.4465], std=[0.2023, 0.1994, 0.2010]):\n temp = copy.deepcopy(tensor)\n for i in range(tensor.shape[0]):\n temp[i] = temp[i]*std[i]+mean[i]\n return torch.clip(temp, 0, 1)", "title": "" }, { "docid": "42da61e6a09edc364e97d04c5f5a3421", "score": "0.6945369", "text": "def normalize(x: np.ndarray):\n return x/np.linalg.norm(x, ord=2, axis=0, keepdims=True)", "title": "" }, { "docid": "4151150e857a5363f8b31aaf947e9416", "score": "0.6927568", "text": "def normalize_fn(tensor, mean, std):\n # here we assume the color channel is in at dim=1\n mean = mean[None, :, None, None]\n std = std[None, :, None, None]\n return tensor.sub(mean).div(std)", "title": "" }, { "docid": "4151150e857a5363f8b31aaf947e9416", "score": "0.6927568", "text": "def normalize_fn(tensor, mean, std):\n # here we assume the color channel is in at dim=1\n mean = mean[None, :, None, None]\n std = std[None, :, None, None]\n return tensor.sub(mean).div(std)", "title": "" }, { "docid": "4151150e857a5363f8b31aaf947e9416", "score": "0.6927568", "text": "def normalize_fn(tensor, mean, std):\n # here we assume the color channel is in at dim=1\n mean = mean[None, :, None, None]\n std = std[None, :, None, None]\n return tensor.sub(mean).div(std)", "title": "" }, { "docid": "0922c73360a32119d9a0598211d49a7b", "score": "0.6910181", "text": "def normalize(tensor, mean, std):\n if _is_tensor_image(tensor):\n for t, m, s in zip(tensor, mean, std):\n t.sub_(m).div_(s)\n return tensor\n elif _is_numpy_image(tensor):\n return (tensor.astype(np.float32) - 255.0 * np.array(mean))/np.array(std)\n else:\n raise RuntimeError('Undefined type')", "title": "" }, { "docid": "0922c73360a32119d9a0598211d49a7b", "score": "0.6910181", "text": "def normalize(tensor, mean, std):\n if _is_tensor_image(tensor):\n for t, m, s in zip(tensor, mean, std):\n t.sub_(m).div_(s)\n return tensor\n elif _is_numpy_image(tensor):\n return (tensor.astype(np.float32) - 255.0 * np.array(mean))/np.array(std)\n else:\n raise RuntimeError('Undefined type')", "title": "" }, { "docid": "885c1c44003a80cd62ca7248562155ce", "score": "0.6835364", "text": "def normalize(x, axis=-1):\r\n x = 1. * x / (torch.norm(x, 2, axis, keepdim=True).expand_as(x) + 1e-12)\r\n return x", "title": "" }, { "docid": "2378d3110b53c82b2aad462ded2466ab", "score": "0.67744726", "text": "def normalize(x, axis=-1):\n x = 1. * x / (torch.norm(x, 2, axis, keepdim=True).expand_as(x) + 1e-12)\n return x", "title": "" }, { "docid": "2378d3110b53c82b2aad462ded2466ab", "score": "0.67744726", "text": "def normalize(x, axis=-1):\n x = 1. * x / (torch.norm(x, 2, axis, keepdim=True).expand_as(x) + 1e-12)\n return x", "title": "" }, { "docid": "2378d3110b53c82b2aad462ded2466ab", "score": "0.67744726", "text": "def normalize(x, axis=-1):\n x = 1. * x / (torch.norm(x, 2, axis, keepdim=True).expand_as(x) + 1e-12)\n return x", "title": "" }, { "docid": "321e0b42e6fffb4f6a895bb6ef836070", "score": "0.6733177", "text": "def normalize(tensors, mean, std):\n if not torch.is_tensor(tensors):\n raise TypeError('tensor is not a torch image.')\n for tensor in tensors:\n for t, m, s in zip(tensor, mean, std):\n t.sub_(m).div_(s)\n return tensors", "title": "" }, { "docid": "68ac94455c267b831afcad91cd990cd3", "score": "0.66329974", "text": "def normalize(data):\n return data/np.linalg.norm(data,axis=1,keepdims=True)", "title": "" }, { "docid": "95cfd3d75894960ed231a2539a3c9e88", "score": "0.6631212", "text": "def tensor_norm_01(data):\n data = np.array(data)\n if np.sum(np.isnan(data))>0:\n print('NaN detected before Normalization')\n return 'variable has NaN values'\n if len(data.shape)>3:\n n_imgs = data.shape[0]\n data = np.float32(data)\n if data.shape[-1]==3:\n for i in range(n_imgs):\n img = data[i,:,:,:]\n data[i,:,:,:] = image_normalization(img,img_min=0,img_max=1)\n\n elif data.shape[-1]==4:\n print('it is a little naive, check it in line 64 seg utils.py')\n for i in range(n_imgs):\n nir = data[i, :, :, -1]\n nir = image_normalization(nir,img_min=0,img_max=1)\n img = data[i, :, :, 0:3]\n img = image_normalization(img,img_min=0,img_max=1)\n data[i, :, :, 0:3] = img\n data[i, :, :, -1] = nir\n elif data.shape[-1]==2:\n #normalization according to channels\n print('check line 70 utils_seg.py')\n for i in range(n_imgs):\n im = data[i,:,:,0]\n N = data[i,:,:,-1]\n data[i,:,:,0]= image_normalization(im,img_min=0,img_max=1)\n data[i, :, :, -1] = image_normalization(N,img_min=0,img_max=1)\n\n elif data.shape[-1]==1:\n x=[]\n for i in range(n_imgs):\n img = data[i, :, :, 0]\n img= image_normalization(img,img_min=0,img_max=1)\n x.append(img)\n data=x\n else:\n print(\"error normalizing line 83\")\n if np.sum(np.isnan(data)) > 0:\n print('NaN detected after normalization')\n return 'variable has NaN values'\n return data\n else:\n print('Please use image_normalization() function')", "title": "" }, { "docid": "a6676c29b97979e4c118a208249ebd68", "score": "0.65911555", "text": "def normalize_image(x):\r\n ma = tf.reduce_max(x)\r\n mi = tf.reduce_min(x)\r\n d = ma - mi if ma != mi else 1e5\r\n return (x - mi) / d", "title": "" }, { "docid": "ec514b2c6d004cd5b0f70a2e40dca25b", "score": "0.65735257", "text": "def batch_normalization(input_tensor, variance_epsilon, name):\n return tf.keras.layers.BatchNormalization(epsilon=variance_epsilon, name=name, trainable=False)(input_tensor)", "title": "" }, { "docid": "25e5268b898633a3f8c9afd21b9a8c25", "score": "0.6555836", "text": "def layer_normalization(self, x):\n filter_size = x.get_shape()[-1] # last dimension of x, e.g. 512\n with tf.variable_scope('layer_normalization_{}_{}'.format(self.layer_i, self.model_type)):\n # 1. Normalize input by using mean and variance according to last dimension\n mean = tf.reduce_mean(x, axis=-1, keep_dims=True) # [batch_size, seq_len, 1]\n variance = tf.reduce_mean(tf.square(x - mean), axis=-1, keep_dims=True) # [batch_size, seq_len, 1]\n norm_x = (x - mean) * tf.rsqrt(variance + 1e-6) # [batch_size, seq_len, d_model]\n\n # 2. Rescale normalized input\n scale = tf.get_variable('layer_norm_scale', [filter_size], initializer=tf.ones_initializer) # [filter_size]\n bias = tf.get_variable('layer_norm_bias', [filter_size], initializer=tf.ones_initializer) # [filter_size]\n return norm_x * scale + bias # [batch_size, seq_len, d_model]", "title": "" }, { "docid": "256fbe52495b74530bd06f519f665b98", "score": "0.6475017", "text": "def normalize(xs, axis=None):\n if axis is None:\n return xs / xs.sum()\n elif axis == 0:\n return xs / xs.sum(0)\n else:\n return xs / xs.sum(1)[:, None]", "title": "" }, { "docid": "7d446f31899870bfca382967191cea2b", "score": "0.6471471", "text": "def __batch_norm(_input, name, reuse=False):\n return tf.layers.batch_normalization(_input, name=name, reuse=reuse)", "title": "" }, { "docid": "e2b21922f740d07ba035a38930f73462", "score": "0.6404205", "text": "def norm255_tensor(arr): #转float 0-1\r\n return arr / 255.", "title": "" }, { "docid": "d696fad784128a052cd8fae6f621a0c7", "score": "0.6399288", "text": "def normalise(tx):\n mean = np.mean(tx, axis=0)\n tx = tx - mean\n std = np.std(tx, axis=0)\n tx = tx / std\n \n min_ = np.min(tx, axis=0)\n max_ = np.max(tx, axis=0)\n tx = (tx-min_)/(max_-min_)\n return tx", "title": "" }, { "docid": "0e746c8fe96cf6274d5e058925514b57", "score": "0.63980657", "text": "def batchnorm(input):\n with tf.variable_scope(\"batchnorm\"):\n # this block looks like it has 3 inputs on the graph unless we do this\n input = tf.identity(input)\n channels = input.get_shape()[3]\n offset = tf.get_variable(\"offset\", [channels],\n dtype=tf.float32,\n initializer=init_zeros)\n scale = tf.get_variable(\"scale\", [channels],\n dtype=tf.float32,\n initializer=tf.random_normal_initializer(1.0, 0.02))\n mean, variance = tf.nn.moments(input, axes=[0, 1, 2], keep_dims=False)\n epsilon = 1e-5\n normalized = tf.nn.batch_normalization(input, mean, variance, offset,\n scale,\n variance_epsilon=epsilon)\n return normalized", "title": "" }, { "docid": "385ad76e224e4bea0a6b2089e46696ed", "score": "0.6377975", "text": "def normalizeAudio(samples):\n # normalisation - zero mean & jednotkova variance (unit variation)\n numpy = np.array(samples)\n #normalizace\n numpy = numpy / 2**15\n tensor = torch.as_tensor(numpy)\n tensor_float32 = torch.tensor(tensor, dtype=torch.float32)\n return tensor_float32", "title": "" }, { "docid": "4bf33d5fcc7a69a9420e2d0d3fe70bb0", "score": "0.63266057", "text": "def normalize(a, axis=None):\r\n a_sum = a.sum(axis)\r\n if axis and a.ndim > 1:\r\n # Make sure we don't divide by zero.\r\n a_sum[a_sum == 0] = 1\r\n shape = list(a.shape)\r\n shape[axis] = 1\r\n a_sum.shape = shape\r\n\r\n a /= a_sum", "title": "" }, { "docid": "9c6ad7847ca74b9ef3aebbe15ed7433c", "score": "0.6325423", "text": "def _normalize(datapoint, h, w, num_of_classes):\n image = datapoint['image']\n image = tf.cast(image, tf.float32)\n image = tf.image.resize_with_pad(image, target_width=h,\n target_height=w) # Final Output Shape\n image = image / 255 # Normalize\n if \"objects\" in datapoint:\n return image, datapoint['objects']\n else:\n return image, tf.one_hot(datapoint['label'], num_of_classes)", "title": "" }, { "docid": "de897f0ad23cb4609f12e3bbcb3e1ae9", "score": "0.6322167", "text": "def _l2_normalize(tensor, axis=None, eps=1e-12):\n return tensor * jax.lax.rsqrt((tensor * tensor).sum(\n axis=axis, keepdims=True) + eps)", "title": "" }, { "docid": "b77cc720992f0f7336dc6065ebb3a304", "score": "0.6309007", "text": "def normalize_vector(u):\n return u / length_vector(u)", "title": "" }, { "docid": "eb4f2ced2493463fd56e4509f8a0f131", "score": "0.6303926", "text": "def normalization(inputs, name=\"norm\", reuse=False, type=\"batch\",\n training=True):\n if type == \"instance\":\n with tf.variable_scope(name, reuse=reuse):\n depth = inputs.get_shape()[3]\n gamma = tf.get_variable(\n \"scale\",\n [depth],\n initializer=tf.random_normal_initializer(1.0, 0.01))\n beta = tf.get_variable(\n \"offset\",\n [depth],\n initializer=tf.constant_initializer(1.0))\n mean, variance = tf.nn.moments(inputs, axes=[1, 2], keep_dims=True)\n std_inverse = tf.rsqrt(variance + 0.00001)\n outputs = gamma * (inputs - mean) * std_inverse + beta\n else:\n with tf.variable_scope(name, reuse=reuse):\n outputs = tf.layers.batch_normalization(inputs,\n training=training,\n reuse=reuse)\n return outputs", "title": "" }, { "docid": "8e9096b1b7c5ecb04066feacafa4353a", "score": "0.6275353", "text": "def normalize_input(x_flatten):\n m = x_flatten.shape[1]\n\n # Normalizing the data into the range between 0 and 1.\n x_norm = np.divide(x_flatten, 255.)\n\n assert (x_norm.shape == (784, m))\n\n return x_norm", "title": "" }, { "docid": "750c8da9bf946881cf051977c8d708ed", "score": "0.626458", "text": "def custom_replace_normalise(tensor, on_zero, on_non_zero):\n res = tensor.clone()\n # res = res * 0.5 + 0.5\n res[tensor==0] = on_zero\n res[tensor!=0] = on_non_zero\n return res", "title": "" }, { "docid": "1c763f4f1c60d6e013333dbbb1f8c078", "score": "0.6259873", "text": "def normalize(A,axis=-1):\n A = asarray(A)\n shape = list(A.shape)\n shape[axis] = 1\n return A / length(A,axis).reshape(shape)", "title": "" }, { "docid": "a601220e4a0036947d12f88c616389fa", "score": "0.62587106", "text": "def standard_normalize(values, axes=(0,)):\n values_mean, values_var = tf.nn.moments(x=values, axes=axes, keepdims=True)\n epsilon = np.finfo(values.dtype.as_numpy_dtype).eps\n normalized_values = ((values - values_mean) / (tf.sqrt(values_var) + epsilon))\n return normalized_values", "title": "" }, { "docid": "20cc7ac818f319f61f8004e9600ca9d3", "score": "0.6252619", "text": "def normalize( x ):\n x /= np.max(x) \n return x", "title": "" }, { "docid": "de2f2309904a6e5e0017b8355b8720c6", "score": "0.625227", "text": "def normalize(array):\n quat = np.array(array)\n if quat.ndim == 1:\n return quat / np.sqrt(np.dot(quat, quat))\n elif quat.ndim == 2:\n return quat / np.sqrt(np.sum(quat * quat, axis=-1)[:, np.newaxis])\n else:\n raise TypeError(\"Input must be 1 or 2d\")", "title": "" }, { "docid": "d2091012090548503b73dbdbee2f2f1d", "score": "0.6249753", "text": "def normalize(self):\n self /= self[...,3:]", "title": "" }, { "docid": "b30f8e4b73deb38c0e4958548035a266", "score": "0.624922", "text": "def normalize(image, mean=None, std=None):\n if image.max() > 1:\n image = image / 255\n if mean and std:\n image = (image - mean) / std\n # in addition, roll the axis so that they suit pytorch\n return image.swapaxes(2, 0).swapaxes(1, 2)", "title": "" }, { "docid": "52da07b2b9e7127d50a885655f8d4579", "score": "0.6248463", "text": "def normalization(image, label):\n image = tf.cast(image, tf.float32) * (1. / 255) - 0.5\n return image, label", "title": "" }, { "docid": "faa271a4d033d6b77808d3109bf30953", "score": "0.6241075", "text": "def normalize(input_image):\n image = tf.cast(input_image, tf.float32) / LCD.TRAIN_PIXELS_MAX\n return image", "title": "" }, { "docid": "f615077a1027fb392450231c92d4067f", "score": "0.6240774", "text": "def normalize(x):\n return x / (K.sqrt(K.mean(K.square(x))) + 1e-5)", "title": "" }, { "docid": "a78d4db4354530db0fc6ffb463e7c0ce", "score": "0.6237472", "text": "def _normalize_features(self, features, axis = -1):\n return safe_divide(features,\n jnp.linalg.norm(features, axis=axis, keepdims=True))", "title": "" }, { "docid": "1740aaf14940e587a8a4c7791cab5256", "score": "0.6235301", "text": "def normalize(image, label):\n x = tf.cast(image, tf.float32) / 255.0\n image = (x - 0.1307) / 0.3081 # image = (x - mean) / std\n return image, label", "title": "" }, { "docid": "d1555693c6a324c6d1e3ac4b530c5e1b", "score": "0.62261313", "text": "def normalize_img(\n input: torch.Tensor, \n mean: Tuple[float,float,float], \n std: Optional[Tuple[float,float,float]] = None\n) -> None:\n if std is None:\n for t, m in zip(input, mean):\n t.sub_(m)\n else:\n for t, m, s in zip(input, mean, std):\n t.sub_(m).div_(s)", "title": "" }, { "docid": "3f10756ee1fd6363542f2fe3ebe4adbc", "score": "0.621658", "text": "def normalize(arr):\n norm = np.linalg.norm(arr)\n if norm == 0:\n return arr\n return arr / norm", "title": "" }, { "docid": "42eaa934ad434a793e7a6ffd37ef4901", "score": "0.6208467", "text": "def normalize_rows(x: np.ndarray):\n return x/np.linalg.norm(x, ord=2, axis=1, keepdims=True)", "title": "" }, { "docid": "d4e00072ddedd2b9492f1dedb5e44eae", "score": "0.618817", "text": "def normalize(x_train, x_test, x_validate):\n return (x_train - 256 / 2) / 256, (x_test - 256 / 2) / 256, (x_validate - 256 / 2) / 256", "title": "" }, { "docid": "4b600f28cc5bae6dc5fa276296f7aac1", "score": "0.6187655", "text": "def normalize_points(points):\n imDims = 512.0 #each image in our dataset is 512x512\n points = list(points)\n for i in range(len(points)):\n points[i] /= imDims\n return np.array(points).astype(np.float32)", "title": "" }, { "docid": "2b27e0601993b53920a2817d8302790f", "score": "0.6182727", "text": "def _normalize_fn(example):\n # Type cast from uint8 to float32 and saturate\n example['fundus'] = tf.image.convert_image_dtype(example['fundus'],\n dtype=tf.float32, saturate=True)\n\n # Rescale image to range from -1 to 1\n example['fundus'] = 2*(example['fundus']) - 1\n return example", "title": "" }, { "docid": "fc7fef880014b4e1dabb66552aa8fd87", "score": "0.61797595", "text": "def normalize(self):\n self.u = self.u / np.sqrt(np.square(self.u) + np.square(self.v))\n self.v = self.v / np.sqrt(np.square(self.u) + np.square(self.v))\n self.vectors = self._get_vector_table()", "title": "" }, { "docid": "bc415b7fba46503c1f668714368cad82", "score": "0.6174537", "text": "def unnormalize(image, mean, std, out_type='array'):\n\n if type(image) == torch.Tensor:\n image = np.transpose(image.clone().numpy(), (1, 2, 0))\n \n\n normal_image = image * std + mean\n if out_type == 'tensor':\n return torch.Tensor(np.transpose(normal_image, (2, 0, 1)))\n elif out_type == 'array':\n return normal_image\n return None # No valid value given", "title": "" }, { "docid": "783e6c0400373af8670837a7adc2d8ac", "score": "0.6172038", "text": "def normalize(x):\n return (x + 1e-10) / (K.sqrt(K.mean(K.square(x))) + 1e-10)", "title": "" }, { "docid": "8fdeefd59954c845502fc72e5e8c1d6c", "score": "0.615891", "text": "def normalize(array, norm = norm):\n return array / norm(array)", "title": "" }, { "docid": "0972bcc0f29ddd1135edfb0762f13b2a", "score": "0.6148062", "text": "def _normalise(self, vector):\n #assert chktype(1, vector, Numeric.array([]), SparseArray)\n return vector / math.sqrt(_dot(vector, vector))", "title": "" }, { "docid": "52cd17cba34a842f452c28245967b425", "score": "0.61333936", "text": "def test_norm_tensor_single_vector():\n test_tensor = fwd.Tensor([2, 2, 1]) * fwd.Variable('x', 4)\n norm = test_tensor.norm()\n\n assert norm.value == 12\n assert norm.d['x'] == 3", "title": "" }, { "docid": "0d00fbd6b2637f1f2d6bf592e2240b0c", "score": "0.6122825", "text": "def instance_normalization(inputs, name=\"inorm\", reuse=False):\n with tf.variable_scope(name, reuse=reuse):\n depth = inputs.get_shape()[3]\n gamma = tf.get_variable(\"scale\", [depth], initializer=tf.random_normal_initializer(1.0, 0.01))\n beta = tf.get_variable(\"offset\", [depth], initializer=tf.constant_initializer(1.0))\n mean, variance = tf.nn.moments(inputs, axes=[1, 2], keep_dims=True)\n std_inverse = tf.rsqrt(variance + 0.00001)\n outputs = gamma * (inputs - mean) * std_inverse + beta\n return outputs", "title": "" }, { "docid": "fa172bde1792bf83292768ed5a4ef9f8", "score": "0.6121599", "text": "def normalise(x):\n if isinstance(x, list):\n x = np.array(x)\n return x / x.sum()", "title": "" }, { "docid": "446cb5171a536fb634691d7e9f7d528b", "score": "0.61169696", "text": "def normalize(data):\n return (data - np.min(data, axis=0)[None,:,:]) / (np.max(data, axis=0)[None,:,:] - np.min(data, axis=0)[None,:,:])", "title": "" }, { "docid": "10e995e6bc90522cb9451fd6b127530d", "score": "0.6104279", "text": "def normalization(data, mu=None, std=None):\n data = torch.tensor(data)\n if mu is None and std is None:\n mu = torch.mean(data, dim=0)\n std = torch.std(data, dim=0)\n std[torch.where(std == 0)] = 1\n data = (data - mu) / std\n return data, mu, std", "title": "" }, { "docid": "042f8ce10deeec3b3d50313d56f77da4", "score": "0.60882396", "text": "def __normalize(input, type, a, b):\n return cv2.normalize(input, None, a, b, type)", "title": "" }, { "docid": "c9d56026f256007946cc6fedd03aa282", "score": "0.6074887", "text": "def normalize(self):\n sum = self.get_sum()\n self.w /= sum\n self.x /= sum\n self.y /= sum\n self.z /= sum", "title": "" }, { "docid": "ef6ef36f3c328b04344da8d07d7a30b9", "score": "0.60714924", "text": "def norm_data_in_batch(self, tensor, inplace=True, data_norm_type=None):\n\n if not inplace:\n tensor = tensor.clone()\n\n if data_norm_type is None:\n data_norm_type = self.opt.data_norm_type\n mean, std = self.mean, self.std\n else:\n mean, std = self.norm_mean_std(data_norm_type)\n\n if data_norm_type == 'original':\n return tensor\n\n if data_norm_type in ['imagenet', 'gray']:\n tensor.div_(255.0)\n \n dtype = tensor.dtype\n mean = torch.as_tensor(mean, dtype=dtype, device=tensor.device)\n std = torch.as_tensor(std, dtype=dtype, device=tensor.device)\n if tensor.ndim == 4:\n tensor.sub_(mean[None, :, None, None]).div_(std[None, :, None, None])\n elif tensor.ndim == 5:\n tensor.sub_(mean[None, :, None, None, None]).div_(std[None, :, None, None, None])\n else:\n raise AssertionError('invalid number of tensor dims')\n return tensor", "title": "" }, { "docid": "6d4d72313a67aa1dee7cd06297378a70", "score": "0.6067312", "text": "def normalize(data):\n return (data - np.mean(data, axis=0, keepdims=True)) / np.sqrt(np.var(data, axis=0, dtype=np.float64, keepdims=True))", "title": "" }, { "docid": "92d24cde092bbb4ffa5af695695b832f", "score": "0.6065348", "text": "def normalizeRows(x):\n \n ### START CODE HERE ### (≈ 2 lines of code)\n # Compute x_norm as the norm 2 of x. Use np.linalg.norm(..., ord = 2, axis = ..., keepdims = True)\n x_norm = np.linalg.norm(x, ord =2, axis = 1, keepdims = True )\n \n # Divide x by its norm.\n x = x/x_norm\n ### END CODE HERE ###\n\n return x", "title": "" }, { "docid": "ca03e6c2193748d4a24aee473b1d4582", "score": "0.6057347", "text": "def Normalize(self):\n return self.Times(1 / self.Norm2())", "title": "" }, { "docid": "4fe00b7096c241ef915c668e5e64a446", "score": "0.6051712", "text": "def normalize(self):\n scaler = Normalizer()\n self.X_train = scaler.fit_transform(self.X_train)\n self.X_test = scaler.fit_transform(self.X_test)", "title": "" }, { "docid": "4fe00b7096c241ef915c668e5e64a446", "score": "0.6051712", "text": "def normalize(self):\n scaler = Normalizer()\n self.X_train = scaler.fit_transform(self.X_train)\n self.X_test = scaler.fit_transform(self.X_test)", "title": "" }, { "docid": "4fe00b7096c241ef915c668e5e64a446", "score": "0.6051712", "text": "def normalize(self):\n scaler = Normalizer()\n self.X_train = scaler.fit_transform(self.X_train)\n self.X_test = scaler.fit_transform(self.X_test)", "title": "" }, { "docid": "ce8aec476b180d3a26a93032cf66163d", "score": "0.6046455", "text": "def __normalize(x):\n acc = np.sum(x)\n if acc == 0:\n return x\n return x / acc", "title": "" }, { "docid": "4baac0ff5e3f4ccbbbb7a2f8db6373b9", "score": "0.6039049", "text": "def norm_layer( bottom, training, name):\n top = tf.layers.batch_normalization( bottom, axis=-1, # channels last,\n training=training,\n name=name )\n return top", "title": "" }, { "docid": "ad49cce5209f52fb78d8abfd76237bb5", "score": "0.6035908", "text": "def normalize(self, embeddings):\n\n # Calculation is different for matrices vs vectors\n if len(embeddings.shape) > 1:\n embeddings /= np.linalg.norm(embeddings, axis=1)[:, np.newaxis]\n else:\n embeddings /= np.linalg.norm(embeddings)", "title": "" }, { "docid": "ad69cbaef056cf9b0edb018531026b2d", "score": "0.6029327", "text": "def _transform_input(input: Tensor) -> Tensor:\n if not torch.is_tensor(input):\n raise TypeError(f\"Input type is not a Tensor. Got {type(input)}\")\n\n if len(input.shape) not in [2, 3, 4]:\n raise ValueError(f\"Input size must have a shape of either (H, W), (C, H, W) or (*, C, H, W). Got {input.shape}\")\n\n if len(input.shape) == 2:\n input = input.unsqueeze(0)\n\n if len(input.shape) == 3:\n input = input.unsqueeze(0)\n\n return input", "title": "" }, { "docid": "7568156e1ac2676bbf02ff5b92419afb", "score": "0.6023648", "text": "def product_normalize(x: torch.FloatTensor, dim: int = -1) -> torch.FloatTensor:\n return x / at_least_eps(at_least_eps(x.abs()).log().mean(dim=dim, keepdim=True).exp())", "title": "" }, { "docid": "d8d421971b82bf8848de6d1faa67f81a", "score": "0.60231686", "text": "def normalizeSignalTensor(m):\n rv = np.zeros(np.shape(m))\n rv[:,0,:] = allBasesStandardize(m[:,0,:])\n rv[:,1,:] = allBasesStandardize(m[:,1,:])\n rv[:,2,:] = allBasesStandardize(m[:,2,:])\n rv[:,3,:] = minMaxMedianInserts(m[:,3,:])\n rv[:,4,:] = minMaxMedianInserts(m[:,4,:])\n return rv", "title": "" }, { "docid": "4dabe9658064273d6ff1e127a7d6c71f", "score": "0.6020257", "text": "def test_norm_tensor_single_value():\n test_tensor = fwd.Variable('x', 2) + fwd.Variable('y', 3)\n norm = test_tensor.norm()\n\n assert norm.value == 5\n assert norm.d['x'] == 1\n assert norm.d['y'] == 1", "title": "" }, { "docid": "7920ae13322ed10eaa488995f997058c", "score": "0.60199755", "text": "def normalize(X, axis = -1, order = 2):\n l2 = np.atleast_1d(np.linalg.norm(X, order, axis))\n l2[l2 == 0] = 1\n return X / np.expand_dims(l2, axis)", "title": "" }, { "docid": "3a8a7e1de70dbee36b33b00db230bf98", "score": "0.60160947", "text": "def BatchNormalization(input_tensor, phase, use_relu=False, name=None):\n\tnormed = tf.contrib.layers.batch_norm(input_tensor, center=True, scale=True, is_training=phase, scope=name)\n\t\n\tif use_relu:\n\t\tnormed = tf.nn.relu(normed)\n\t\t\n\treturn normed", "title": "" }, { "docid": "5165e2735f11ffd5ef6c0f15dc59d417", "score": "0.6013386", "text": "def batch_norm(self, inputs, training, activation):\n # We set fused=True for a significant performance boost. See\n # https://www.tensorflow.org/performance/performance_guide#common_fused_ops\n epsilon = _BATCH_NORM_EPSILON\n fused = True\n inputs = tf.layers.batch_normalization(\n inputs=inputs, axis=1 if self.data_format == 'channels_first' else -1,\n momentum=self.batch_norm_decay , epsilon=epsilon, center=True,\n scale=True, training=training, fused=fused)\n\n if activation!=None:\n inputs = activation(inputs)\n if self.IsShowModel: self.log('%30s'%('BN RELU'))\n else:\n if self.IsShowModel: self.log('%30s'%('BN'))\n\n return inputs", "title": "" }, { "docid": "eb523bc941d2291c357c76fde89b0ded", "score": "0.6003981", "text": "def normalize(x):\n return (x + 1e-10) / (K.sqrt(K.mean(K.square(x))) + 1e-10)", "title": "" }, { "docid": "0a9317932cad86809e2842005b0122b0", "score": "0.60023504", "text": "def _normalize(self, X_l, X_u):\n \n pass", "title": "" }, { "docid": "9521cf03c6471a15a77908b62cb27c37", "score": "0.5996868", "text": "def normalize(nparray, order=2, axis=0):\n norm = np.linalg.norm(nparray, ord=order, axis=axis, keepdims=True)\n return nparray / (norm + np.finfo(np.float32).eps)", "title": "" }, { "docid": "287c2098857db574a1169129672e699c", "score": "0.5993196", "text": "def standardize_batch(audio, mean, var):\n standardized_audio = tf.nn.batch_normalization(audio, mean, var, None, None, 1e-6)\n return standardized_audio", "title": "" }, { "docid": "5082ff45cba708ceb83f17bbe52cb3b6", "score": "0.5992371", "text": "def _add_and_norm(x):\n return tf.keras.layers.LayerNormalization()(tf.keras.layers.Add()(x))", "title": "" }, { "docid": "c222e921b62ada5ca53d07da2ec9da44", "score": "0.59896743", "text": "def get_normalised(self):\n lth = self.get_length()\n if lth != 0:\n return Vector([c / lth for c in self._vec])\n else:\n return self.null()", "title": "" }, { "docid": "b93ee390286af11f3a7e45aa77d188b6", "score": "0.598801", "text": "def normalize(vec: np.ndarray, axis=None) -> np.ndarray:\n vec = np.array(vec, dtype=np.float64)\n if axis is None:\n return vec/norm(vec)\n else:\n return np.divide(vec,\n np.tile(norm(vec, axis=axis),\n vec.shape[axis],\n ).reshape(vec.shape,\n order='F' if axis == 1 else 'C',\n )\n )", "title": "" }, { "docid": "c9d791e04d778068709d6a3488238c6b", "score": "0.5987778", "text": "def Xnormalize(x):\n return x / np.linalg.norm(x)", "title": "" }, { "docid": "ddbe740cc5303d03fb0be10f2602e532", "score": "0.598118", "text": "def normalizer(x, mi , ma, eps = 1e-20, dtype = np.float32):\n\n\n if dtype is not None:\n x = x.astype(dtype, copy = False)\n mi = dtype(mi) if np.isscalar(mi) else mi.astype(dtype, copy = False)\n ma = dtype(ma) if np.isscalar(ma) else ma.astype(dtype, copy = False)\n eps = dtype(eps)\n\n try:\n import numexpr\n x = numexpr.evaluate(\"(x - mi ) / (ma - mi + eps)\")\n except ImportError:\n x = (x - mi) / (ma - mi + eps)\n\n x = normalizeZeroOne(x)\n return x", "title": "" }, { "docid": "786babfe18fca1e0966aa7188c352cc7", "score": "0.597908", "text": "def normalize_target(true_x, true_y, feature_x, feature_y):\n masked_true_x = tf.boolean_mask(true_x, tf.logical_not(tf.is_nan(true_x)))\n\n norm_mean = tf.reduce_mean(masked_true_x)\n norm_std = tf.sqrt(tf.reduce_mean(tf.squared_difference(masked_true_x, norm_mean)))\n norm_x = (true_x - norm_mean) / norm_std\n return true_x, true_y, feature_x, feature_y, norm_x, norm_mean, norm_std", "title": "" }, { "docid": "79583c8286b28cc0a12bb460d543fffc", "score": "0.59618384", "text": "def normalize(X,s):\n return (X-s[1])*s[0]", "title": "" }, { "docid": "11a9d08e95ab9c0186f320669c40dbfd", "score": "0.5961315", "text": "def unfold_tensor(self, tensor: np.ndarray) -> np.ndarray:\n return tensor.T.reshape(self.x.shape, order=\"F\")", "title": "" }, { "docid": "b3b1d25148296c0aac7f9fc5f7f433f6", "score": "0.59596527", "text": "def denormalize(tensor):\n\n means = [0.485, 0.456, 0.406]\n stds = [0.229, 0.224, 0.225]\n\n denormalized = tensor.clone()\n\n for channel, mean, std in zip(denormalized[0], means, stds):\n channel.mul_(std).add_(mean)\n\n return denormalized", "title": "" }, { "docid": "609c3cefc0f200504051154cfcf95b8a", "score": "0.59586984", "text": "def norm_data(self, tensor, inplace=True, data_norm_type=None):\n\n if not inplace:\n tensor = tensor.clone()\n\n if data_norm_type is None:\n data_norm_type = self.opt.data_norm_type\n mean, std = self.mean, self.std\n else:\n mean, std = self.norm_mean_std(data_norm_type)\n\n if data_norm_type == 'original':\n return tensor\n\n # if data_norm_type in ['imagenet', 'gray', 'normal', 'activitynet', 'kinetics', 'competition']:\n # tensor.div_(255.0)\n \n dtype = tensor.dtype\n mean = torch.as_tensor(mean, dtype=dtype, device=tensor.device)\n std = torch.as_tensor(std, dtype=dtype, device=tensor.device)\n if tensor.ndim == 3:\n tensor.sub_(mean[:, None, None]).div_(std[:, None, None])\n elif tensor.ndim == 4:\n tensor.sub_(mean[:, None, None, None]).div_(std[:, None, None, None])\n else:\n raise AssertionError('invalid number of tensor dims')\n return tensor", "title": "" }, { "docid": "b9dc771cacfd3bf5bedffa1734130d77", "score": "0.59577113", "text": "def normalize(self):\n if 0:\n Y, attn_total = self.Y, self.attn_total\n #print Y.shape, attn_total.shape\n eps = 1.0e-10\n Z = Y / (attn_total.unsqueeze(1) + eps)\n self.Y = Z\n elif 0:\n Z = hardtanh(self.Y, -1.0, 1.0)\n Z = torch.tanh(3.0 * self.Y)\n self.Y = Z\n else: # no normalization\n Z = self.Y\n return Z", "title": "" }, { "docid": "aae0ab039fe4bdcab019217d458f995c", "score": "0.5955882", "text": "def normalize(data):\n normalized_data = data/np.linalg.norm(data,axis=1).reshape((data.shape[0], 1))\n return normalized_data", "title": "" }, { "docid": "54d15e5b9922b8cda6cc94dc7def6ef0", "score": "0.5951801", "text": "def normalize(self) -> Vector:\n return Vector.from_array(normalized_vector(self._a), copy=False)", "title": "" } ]
5504b1209d1a26576a35bdcbaafc8d6e
Main text parsing function
[ { "docid": "cc3440f37b4917c9629a8c5e29edb25b", "score": "0.0", "text": "def parse(\n data: str,\n raw: bool = False,\n quiet: bool = False\n) -> List[Dict]:\n jc.utils.compatibility(__name__, info.compatible, quiet)\n jc.utils.input_type_check(data)\n\n raw_output: List = []\n output_line: Dict = {}\n\n if jc.utils.has_data(data):\n\n for line in filter(None, data.splitlines()):\n area, size, details = line.split(maxsplit=2)\n start, end = area.split('-', maxsplit=1)\n detail_split = details.split()\n caller = ''\n options: List = []\n\n if details == 'unpurged vm_area':\n caller = 'unpurged vm_area'\n\n else:\n caller = detail_split[0]\n for item in detail_split[1:]:\n if '=' in item:\n key, val = item.split('=')\n output_line.update({key: val})\n else:\n options.append(item)\n\n output_line = {\n 'start': start,\n 'end': end,\n 'size': size,\n 'caller': caller or None,\n 'options': options\n }\n\n if output_line:\n raw_output.append(output_line)\n\n return raw_output if raw else _process(raw_output)", "title": "" } ]
[ { "docid": "cdaedad6e486837c12364bf2b5dfa2fe", "score": "0.8498604", "text": "def parse(self, text):", "title": "" }, { "docid": "e2d89aedac93ac6c6e41cbc1a364b65d", "score": "0.74919677", "text": "def parse(self, text) -> Generator:", "title": "" }, { "docid": "e2d70c25ed7ca7086ea10af06cdd9443", "score": "0.7287194", "text": "def parse():", "title": "" }, { "docid": "45ccb65fc12c282b20a9457d38d64fa0", "score": "0.71522886", "text": "def parse(text):\n if text.startswith('[conv:'):\n text = ext.conversion.convertparse(text.rstrip())\n elif text.startswith('[srch:'):\n text = ext.search.searchparse(text.rstrip())\n return text", "title": "" }, { "docid": "810d470dd3206c2558d1ca43e72aa79f", "score": "0.7062957", "text": "def parsefile():", "title": "" }, { "docid": "ed559fd90695b25a4165891ffe744a12", "score": "0.6984111", "text": "def parse_text(self, offset, start, stop):\n \n text = self.read_bytes(offset, start, stop)\n #TODO: add support for suplement text segment\n return parse_pairs(text)", "title": "" }, { "docid": "b8c000461a033dba6fe7181fea05502e", "score": "0.69024384", "text": "def simpleparsefile():", "title": "" }, { "docid": "16d2f9f93144c40f3364c7eb48b08dcc", "score": "0.68339634", "text": "def parse(self, text):\n for c in text:\n self._next = self._next(c)", "title": "" }, { "docid": "6ccde397c520efc2e4e413ea7a650de1", "score": "0.68305683", "text": "def fancyparsefile():", "title": "" }, { "docid": "2953b8fef060cc07c30417a8cc57f939", "score": "0.6796957", "text": "def _parse_(self, text):\n comment_position = text.find('#')\n line = text[:comment_position].strip()\n self.description = text[comment_position + 1:].strip()\n parts = line.split()\n if len(parts) != 48:\n sys.stdout.write(\"expect 48 space split parts, get %d\" %\n (len(parts)))\n return None\n # format : 0 qid:10 1:0.000272 2:0.000000 ....\n self.relevance_score = int(parts[0])\n self.query_id = int(parts[1].split(':')[1])\n for p in parts[2:]:\n pair = p.split(':')\n self.feature_vector.append(float(pair[1]))\n return self", "title": "" }, { "docid": "a068774e38ccf96fca656dd2082104df", "score": "0.676638", "text": "def parse(self, wikitext):\n # obviously still some work to do here ;)\n return wikitext", "title": "" }, { "docid": "a83081df8488808fa3d984508fda214f", "score": "0.6764544", "text": "def parse(self, text):\n for _ in self._grammar.scanString(text):\n pass", "title": "" }, { "docid": "46b15e43103285ee89f666bb19d6db40", "score": "0.67559797", "text": "def i__parseText(self, TextObject):\n numbersParse = False # True si on est en train de parser les datas\n for line in TextObject.split('\\n'):\n self.lineNumber = self.lineNumber+1\n # On debarasse la ligne des espaces & tabulations redondants et des caracs de fin de ligne\n line = self.cleanUpLine(line)\n if 0 <> len( line):\n if numbersParse:\n # Ligne de datas\n self.processDataLine(line)\n else:\n resultList = []\n tokens = line.split()\n for token in tokens:\n myTupple = self.processDeclarationName(token)\n resultList.append(myTupple)\n self.variablesList = resultList\n \n # On va maintenant calculer le nombre de datas attendues par ligne\n finalSize = 0\n for elt in resultList:\n finalSize = finalSize + elt[2]\n self.datasPerLine = finalSize\n # On est en train de commencer le parsing des datas\n numbersParse = True", "title": "" }, { "docid": "55a55a4e9fad1d94b210253223fa1cd2", "score": "0.67547846", "text": "def parse(self):\n\n with open(self.INPUT_FILE_NAME) as input_file:\n for _line in input_file:\n line = _line.strip()\n # print len(line)\n if len(line) == 0:\n if self.entry_valid():\n if self.CLEANUP_TEXT:\n self.cleanup_text()\n self.ENTRY['text_only'] = self.textonly_string(self.ENTRY['text'])\n self.ENTRY['words_count'] = len(self.ENTRY['text'].split())\n self.append_entry()\n # print \" #### Empty: reset\"\n self.reset_entry()\n continue\n elif self.RE_ID.match(line) and not self.reading_text():\n self.ENTRY['id'] = int(float(line))\n # print \" #### ID: {0}\".format(self.ENTRY['id'])\n elif '-->' in line and not self.reading_text():\n lst_str_time = [str_time.strip() for str_time in line.split('-->')]\n if len(lst_str_time) != 2:\n continue\n self.ENTRY['start'] = self.ms_from_string(lst_str_time[0])\n self.ENTRY['end'] = self.ms_from_string(lst_str_time[1])\n # print \" #### Start/end: {0:d}/{1:d}\".format(self.ENTRY['start'], self.ENTRY['end'])\n else:\n if self.ENTRY['text'] == None:\n self.ENTRY['text'] = ''\n if self.ENTRY['text_line_count'] > 0:\n self.ENTRY['text'] += ' '\n self.ENTRY['text'] += line.strip()\n self.ENTRY['text_line_count'] += 1\n # print \" #### Text: {0}\".format(self.ENTRY['text'])\n\n return self", "title": "" }, { "docid": "b415f1f01fc4effdf80787874c7f38ae", "score": "0.6696598", "text": "def analyseText(self, txt):\n # bourrin:\n for i in range(4):\n txt = txt.replace(\" \", \" \")\n txt = txt.replace(\" \\n\", \"\\n\")\n txt = txt.strip()\n \n print(\"-\"*40)\n print(\"DBG: analyseText: received:\\n%s\" % txt )\n print(\"-\"*40)\n sentences = []\n i = 0\n start = 0\n while i < len(txt):\n if i == len(txt)-1 or \\\n ( txt[i] == '.' and (txt[i+1].isupper() or txt[i+1] == '\\n' or (txt[i+1] == ' ' and txt[i+2].isupper() ) ) ) \\\n :\n if i == len(txt)-1:\n i += 1\n s = txt[start:i]\n start = i+1\n print(\"DBG: analyseText: sentence cut:\\n%s\" % s )\n sentences.append(s)\n i += 1\n \n listSentenceTrees = []\n for s in sentences:\n print(\"DBG: analyseText: sentence analysed:\\n%s\" % s )\n listPos = self._preprocessText(s)\n print(\"DBG: analyseSentence: listPos: \" + str(listPos) )\n result = self.parser.parse(listPos)\n print(\"#\"*20)\n print(\"DBG: analyseText: res:\\n\" + str(result) )\n print()\n listSentenceTrees.append(result)\n return listSentenceTrees", "title": "" }, { "docid": "7b72d4923d69e06e940fae5ecb06b1b8", "score": "0.6675486", "text": "def _parse_plain_text(self, text):\n lines = text.split(\"\\n\")\n\n progress = QtGui.QProgressDialog(self.tr(\"Parsing text...\"), self.tr(\"Abort\"), 0, len(lines), self.parent())\n progress.setWindowModality(QtCore.Qt.WindowModal)\n\n for i, line in enumerate(lines):\n progress.setValue(i)\n\n line = line.strip()\n utterance = line\n clause_unit = re.sub(\"[.,;:]\", \"\", line)\n words = clause_unit.split()\n\n il_elements = list()\n for w in words:\n if self.annotation_tree.data_structure_type == \\\n poioapi.data.GRAID:\n il_elements.append([\n { 'id' : self.annotation_tree.next_annotation_id,\n 'annotation' : w },\n { 'id' : self.annotation_tree.next_annotation_id,\n 'annotation' : '' },\n { 'id' : self.annotation_tree.next_annotation_id,\n 'annotation' : '' }])\n elif self.annotation_tree.data_structure_type ==\\\n poioapi.data.GRAIDDIANA:\n il_elements.append([\n { 'id' : self.annotation_tree.next_annotation_id,\n 'annotation' : w },\n # morphemes\n [\n [\n { 'id' : self.annotation_tree.next_annotation_id,\n 'annotation' : '' },\n { 'id' : self.annotation_tree.next_annotation_id,\n 'annotation' : '' }\n ]\n ],\n # graid1, graid 3\n { 'id' : self.annotation_tree.next_annotation_id,\n 'annotation' : '' },\n { 'id' : self.annotation_tree.next_annotation_id,\n 'annotation' : '' }])\n\n elements = [ [\n { 'id' : self.annotation_tree.next_annotation_id,\n 'annotation' : clause_unit },\n il_elements,\n { 'id' : self.annotation_tree.next_annotation_id,\n 'annotation' : '' }] ]\n\n utterance = [ { 'id' : self.annotation_tree.next_annotation_id,\n 'annotation' : utterance },\n elements,\n { 'id' : self.annotation_tree.next_annotation_id,\n 'annotation' : '' },\n { 'id' : self.annotation_tree.next_annotation_id,\n 'annotation' : '' } ]\n if self.annotation_tree.data_structure_type ==\\\n poioapi.data.GRAIDDIANA:\n utterance.append(\n { 'id' : self.annotation_tree.next_annotation_id,\n 'annotation' : '' })\n\n self.annotation_tree.append_element(utterance)\n if (progress.wasCanceled()):\n initCorpusReader()\n break\n\n progress.setValue(len(lines))", "title": "" }, { "docid": "ccc3252ad322f4778039fbb59504ee3b", "score": "0.6670464", "text": "def parse(parser):", "title": "" }, { "docid": "624cf51b760733430f40b92a90222f5e", "score": "0.66411865", "text": "def parse(self, text):\n #text = re.sub(r'</?event>', '', text) # remove pointless tag\n status = 'first' \n for line in text.split('\\n'):\n line = line.strip()\n if not line: \n continue\n elif line[0] == '#':\n self.comment += '%s\\n' % line\n continue\n elif line.startswith('<event'):\n if '=' in line:\n found = re.findall(r\"\"\"(\\w*)=(?:(?:['\"])([^'\"]*)(?=['\"])|(\\S*))\"\"\",line)\n #for '<event line=4 value=\\'3\\' error=\"5\" test=\" 1 and 2\">\\n'\n #return [('line', '', '4'), ('value', '3', ''), ('error', '5', ''), ('test', ' 1 and 2', '')]\n self.eventflag = dict((n, a1) if a1 else (n,a2) for n,a1,a2 in found)\n # return {'test': ' 1 and 2', 'line': '4', 'value': '3', 'error': '5'}\n continue\n \n elif 'first' == status:\n if '<rwgt>' in line:\n status = 'tag'\n else:\n self.assign_scale_line(line)\n status = 'part' \n continue\n if '<' in line:\n status = 'tag'\n \n if 'part' == status:\n part = Particle(line, event=self)\n if part.E != 0:\n self.append(part)\n elif self.nexternal:\n self.nexternal-=1\n else:\n if '</event>' in line:\n line = line.replace('</event>','',1)\n self.tag += '%s\\n' % line\n \n self.assign_mother()", "title": "" }, { "docid": "8317923ed568ef8e75a7dad140dded80", "score": "0.663979", "text": "def parse_text(self, text, orig, warn_func, get_text):\n result = \"\"\n current_color = \"0\"\n # default speed depends on the context\n current_speed = \"-1\"\n\n for type_, value in self.lex_that_text(text, warn_func):\n if type_ is self.TEXT:\n yield RenderedText.text(value)\n elif type_ is self.DELAY:\n pass\n elif type_ is self.ESCAPE:\n warn_func(\"notice\", \"\\\\ present in text, is this intended ?\")\n elif type_ is self.COLOR:\n actual_color = self.gamecolor.get(value)\n if actual_color is None:\n warn_func(\"error\", r\"bad \\c[] command\")\n continue\n if value == current_color:\n warn_func(\"warn\", \"same color assigned twice\")\n current_color = value\n ansi_color = self.colors.get(actual_color)\n if ansi_color:\n yield RenderedText.color(ansi_color)\n elif type_ is self.SPEED:\n if len(value) != 1 or value not in \"01234567\":\n warn_func(\"error\", r\"bad \\s[] command\")\n continue\n if result:\n warn_func(\"notice\", \"speed not at start of text, unusal\")\n if current_speed == value:\n warn_func(\"warn\", \"same speed specified twice\")\n current_speed = value\n elif type_ is self.ICON:\n if value not in self.find_stuff_in_orig(orig, self.ICON):\n warn_func(\"notice\", \"icon not present in original text\")\n yield RenderedText.icon(value)\n elif type_ is self.VARREF:\n value = self.lookup_var_checked(value, warn_func, orig,\n get_text)\n yield RenderedText.text(value)\n else:\n assert False\n if current_color != \"0\":\n warn_func(\"notice\", \"color does not end with 0\")", "title": "" }, { "docid": "2a20cfdddbd8a84df94d458a9e50d949", "score": "0.66364473", "text": "def process_text(self):\n\n self.get_words()\n self.get_freq_dist()\n self.get_num_words()\n\n # FILL IN THE CALLS TO THE REMAINING FUNCTIONS\n\n # After all of the functions have been called, all of the\n # attributes defined in `__init__` will have values.\n # After finishing this part, go back to the `main` method where\n # we left off.", "title": "" }, { "docid": "0b4db856be394a015c4dafa3cd9cc134", "score": "0.66181666", "text": "def _callLarkParse(self,bodytext):\n return self._frontend.parse(bodytext)", "title": "" }, { "docid": "86c7f7f311466931017ed4d7416f29dc", "score": "0.65970933", "text": "def main():\n parse_file(\"testfile\")", "title": "" }, { "docid": "38dc88e76fbe58a1f6d777e9eb79b9c7", "score": "0.6576457", "text": "def parse_text(text_value):\n\n if get_tess_mode() == 'sentence':\n return _parse_tess_sentence(text_value)\n else:\n return _parse_tess_line(text_value)", "title": "" }, { "docid": "0f07e4151bf5c3e8f960acae42db196d", "score": "0.6575264", "text": "def parse_text(html):\r\n parser = MainTextParser()\r\n parser.feed(html)\r\n return parser.data", "title": "" }, { "docid": "b2b4380b23f721160f09c94f0306718d", "score": "0.6568617", "text": "def parse_text(self):\n while True:\n try:\n index = self.input.index('#')\n if index > 0:\n self.output.append({'text':self.input[0:index]})\n self.input = self.input[index:]\n else:\n self.parse_tag()\n except ValueError:\n #No more hashes, take the whole thing.\n self.output.append({'text':self.input})\n return", "title": "" }, { "docid": "1a651172506f6615c8c90a62d68a0956", "score": "0.65683365", "text": "def process(self, text_input: str):", "title": "" }, { "docid": "34e4edff7d87867165438da86bc8dbc9", "score": "0.65407467", "text": "def text_parse(text):\n text = reduce(lambda a, kv: a.replace(*kv), REPLS, text) # Tokenize punctuation symbols\n shell_cmd = 'echo ' + text + ' | ./parse.sh ../lang_models/Spanish'\n connl_bin_output = subprocess.check_output([shell_cmd], shell=True)\n connl_txt = connl_bin_output.decode('utf-8') # From Binary to String\n result = ConnlTree()\n return result.parse(connl_txt)", "title": "" }, { "docid": "63b4d2c7028ad9f40119b6c560d80b44", "score": "0.6529718", "text": "def parserText(self):\n\n domObj = self.parser()\n objList = []\n for dom_stuff in domObj:\n text = dom_stuff.text_content()\n objList.append(text)\n objList = [x.strip() for x in objList]\n if len(objList) > 1:\n return {self.data: objList}\n else:\n return {self.data: objList[0]}", "title": "" }, { "docid": "618ccc20c6742f57328b654bee580bff", "score": "0.6506071", "text": "def parseOutText(f):\n print('\\nBegin parse_out_email_text.py parseOutText function\\n')\n\n\n myReturnString = ''\n f.seek(0) ### go back to beginning of file (annoying)\n all_text = f.read()\n # print(\"all_text - begin .............................\")\n # print(all_text)\n # print(\"all_text - end .............................\\n\")\n # print(\"type(all_text) - {}\\n\".format(type(all_text)))\n # type(all_text) - <class 'str'>\n\n ### split off metadata\n \n content = all_text.split(\"X-FileName:\") # split on text in email\n # example from email - X-FileName: Stokley, Chris (Non-Privileged).pst\n\n #print(\"len(content) - {}\\n\".format(len(content)))\n\n # print(\"content[0] - {}\".format(content[0]))\n # print(\"type(content[0]) - {}\".format(type(content[0])))\n # type(content[0]) - <class 'str'>\n\n # content[1] - With original punctuation from email \n # print(\"content[1] - begin .....\")\n # print(content[1])\n # print(\"content[1] - end .....\\n\")\n # print(\"type(content[1]) - {}\".format(type(content[1])))\n # type(content[1]) - <class 'str'>\n \n words = \"\"\n if len(content) > 1:\n ### remove punctuation\n # text_string = content[1].translate(string.maketrans(\"\", \"\"), string.punctuation) # no older Python\n \n # print(\"string.punctuation - {}\\n\".format(string.punctuation))\n # string.punctuation - !\"#$%&'()*+,-./:;<=>?@[\\]^_`{|}~ # all of these - None \n # print(\"type(string.punctuation) - {}\\n\".format(type(string.punctuation)))\n # type(string.punctuation) - <class 'str'>\n \n # print('str.maketrans(\"\", \"\", string.punctuation) - ')\n # print(str.maketrans(\"\", \"\", string.punctuation))\n # {64: None, 124: None, 125: None, 91: None, 92: ....\n # Python documentation - dictionary mapping Unicode ordinals (integers) or characters (strings of length 1) to Unicode ordinals, strings (of arbitrary lengths) or None.\n # print(\"type(str.maketrans(\"\", \"\", string.punctuation)) - {}\\n\".format(type(str.maketrans(\"\", \"\", string.punctuation))))\n # type(str.maketrans(, , string.punctuation)) - <class 'dict'>\n\n text_string = content[1].translate(str.maketrans(\"\", \"\", string.punctuation))\n \n # Without original punctuation from email\n # print(\"text_string (punctuation stripped out) - \")\n # print(text_string)\n # print()\n # print(\"type(text_string) - {}\\n\".format(type(text_string)))\n # type(text_string) - <class 'str'>\n\n ### project part 2: comment out the line below\n words = text_string\n # print(\"words - \")\n # print(words)\n # print()\n # print(\"type(words) - {}\\n\".format(type(words)))\n # type(words) - <class 'str'>\n\n ### split the text string into individual words, stem each word,\n ### and append the stemmed word to words (make sure there's a single\n ### space between each stemmed word)\n \n mySplitOutput = text_string.split()\n # print(\"mySplitOutput - {}\\n\".format(mySplitOutput))\n # mySplitOutput - ['Hi', 'Everyone', 'If', 'you', 'can', 'read', 'this', 'message', 'youre', 'properly', 'using', 'parseOutText', 'Please', 'proceed', 'to', 'the', 'next', 'part', 'of', 'the', 'project']\n # print(\"type(mySplitOutput) - {}\\n\".format(type(mySplitOutput)))\n # type(mySplitOutput) - <class 'list'>\n \n # done AFTER stemmimg \n # vectorizer = CountVectorizer()\n myStemmer = SnowballStemmer('english')\n # print(\"myStemmer - {}\".format(myStemmer))\n # myStemmer - <nltk.stem.snowball.SnowballStemmer object at 0x10b4b57f0>\n # print(\"type(myStemmer) - {}\\n\".format(type(myStemmer)))\n # type(myStemmer) - <class 'nltk.stem.snowball.SnowballStemmer'>\n for myWord in mySplitOutput:\n # print(\"myWord - {}\".format(myWord))\n # print(\"type(myWord) - {}\\n\".format(type(myWord)))\n # type(myWord) - <class 'str'>\n myStemmedWord = myStemmer.stem(myWord)\n # print(\"myStemmedWord - {}\\n\".format(myStemmedWord))\n # print(\"type(myStemmedWord) - {}\\n\".format(type(myStemmedWord)))\n # type(myStemmedWord) - <class 'str'>\n # print(\"{} - {}\".format(myWord, myStemmedWord))\n myReturnString = myReturnString + myStemmedWord + ' '\n # print()\n \n print('\\nEnd parse_out_email_text.py parseOutText function\\n')\n\n return myReturnString\n # return words", "title": "" }, { "docid": "9319637ea6a0d609981e20df34edc942", "score": "0.65057105", "text": "def processFullText(self):\n pass", "title": "" }, { "docid": "732cfb872b0d46e1a68498573e8772b2", "score": "0.64789474", "text": "def parse(self):", "title": "" }, { "docid": "50e5157853ea03fe6bc415f6a6feef1e", "score": "0.6473375", "text": "def tgr_parser(contents):\n id_pat = re.compile(r\"^<text id=(\\w+)>$\")\n contains = \"normal\"\n formatted = []\n\n for line in contents.split('\\n'):\n line = line.rstrip()\n m = id_pat.match(line)\n if m:\n text_id = m.group(1)\n extracted = Extracted(text_id)\n\n if line == \"<contents>\":\n contains = \"contents\"\n text = \"\"\n elif line == \"</contents>\":\n contains = \"normal\"\n extracted.set_contents(text)\n elif line == \"<tags>\":\n contains = \"tags\"\n tags = \"\"\n elif line == \"</tags>\":\n contains = \"normal\"\n extracted.set_tags(tags)\n elif line == \"</text>\":\n formatted.append(extracted)\n else:\n\n if contains == \"contents\":\n text += line + '\\n'\n elif contains == \"tags\":\n tags += line + '\\n'\n\n return formatted", "title": "" }, { "docid": "d17a3fce9d4c27328a3af9d606ef4222", "score": "0.6472761", "text": "def scan(self, text):", "title": "" }, { "docid": "0b9ffec71669aefb0144db63cfd71e30", "score": "0.6399041", "text": "def __call__(self, text):\n output = []\n self.parse_text(text, 0, len(text), output)\n return \"\".join(output)", "title": "" }, { "docid": "0b87af0418c65b94c5ad66902ea38fa2", "score": "0.63920647", "text": "def ParseTextToDict(text):\n template = open(sys.argv[2])\n #print template.read()\n re_table = textfsm.TextFSM(template)\n fsm_results = re_table.ParseText(text)\n return fsm_results", "title": "" }, { "docid": "2d26c46941479b1776bda1fe94d1eaa2", "score": "0.6378467", "text": "def parseLine():", "title": "" }, { "docid": "254de7835b37ca679920582a51617021", "score": "0.63761586", "text": "def parse(self, text):\n text, result = self.parse_item(text, self.method())\n text = self.skip(text)\n if len(text):\n raise SyntaxError('unparsed text: %s' % repr(text))\n return result", "title": "" }, { "docid": "512387dd563bc38a11fda1c022d35442", "score": "0.6371685", "text": "def process(self, text):\n pass", "title": "" }, { "docid": "64aab503c1d224e3e2ebeb7c78d993f9", "score": "0.63701653", "text": "def parse(self, argv):", "title": "" }, { "docid": "e2fe1eebca89f7663cc96be7da0bcaac", "score": "0.6331882", "text": "def parse(raw_text, file_name):\n\n\t# set regex\n\tANCHOR = \"\\*\\*\\*[^\\*]+(Project Gutenberg|PROJECT GUTENBERG)[^\\*]+\\*\\*\\*\"\n\n\t# parse document body\n\ttext = re.search(\"%s.+%s\" % (ANCHOR, ANCHOR), raw_text, re.DOTALL).group(0)\n\ttext = re.sub(ANCHOR, \"\", text)\n\n\t# generate list of characters\n\tchar_list = [c for c in re.sub(\"\\n\", \"\", text.lower())]\n\t\n\t# generate list of unique characters\n\tunique_char = list(set(char_list))\n\n\t# generate list of words\n\tword_list = [w for w in text.split()]\n\n\t# generate list of word/pos tuples\n\tword_pos = [\"-\".join(w) for w in nltk.pos_tag(word_list)]\n\n\treturn {\n\t\t'source_path': file_name,\n\t\t'text': text,\n\t\t'char_list': char_list,\n\t\t'word_list': word_list,\n\t\t'word_pos': word_pos\n\t}", "title": "" }, { "docid": "0d2b125aaac56fb2167a7b8a36966159", "score": "0.63285714", "text": "def parse_text_tag(self):\n self.tag['text'] = \"\"\n self.input = self.input[1:] #Strip the (\n while True:\n index_close = self.input.index(')')\n if self.input[index_close - 1] == \"\\\\\":\n self.tag['text'] = self.tag['text']+self.input[0:index_close - 1] + \")\"\n self.input = self.input[index_close + 1:]\n else:\n self.tag['text'] = self.tag['text']+self.input[0:index_close]\n self.input = self.input[index_close + 1:]\n self.output.append(self.tag)\n return", "title": "" }, { "docid": "efa1bfffcc60634c536869d52751db42", "score": "0.6325796", "text": "def parse (self, text):\n \n doc = self.nlp (text)\n tokens = [\n {\n \"token\" : token.text,\n \"lemma\" : token.lemma_,\n \"pos\" : token.pos_,\n \"tag\" : token.tag_,\n \"dep\" : token.dep_,\n \"shape\" : token.shape_,\n \"alpha\" : token.is_alpha,\n \"stop\" : token.is_stop,\n \"sri\" : Semantic.lookup (token.text) if token.pos_ == 'NOUN' else None\n } for token in doc ]\n entities = [\n {\n \"text\" : ent.text,\n \"start_char\" : ent.start_char,\n \"end_char\" : ent.end_char,\n \"label\" : ent.label\n } for ent in doc.ents ]\n return {\n \"text\" : text,\n \"bl_parse\" : Semantic.biolink_lookup (text),\n \"parse\" : {\n \"tok\" : tokens,\n \"ent\" : entities \n }\n }", "title": "" }, { "docid": "c81a3988007ba2ce8212af33ec74353a", "score": "0.6322226", "text": "def parseText(text1, nlp):\n doc = nlp(text1)\n return doc", "title": "" }, { "docid": "e7d25826087740069b16cabf0b715617", "score": "0.6320478", "text": "def _parse(self):\n pass", "title": "" }, { "docid": "56617327271742135ea44f9b819786d7", "score": "0.63075376", "text": "def parse_txt(self, fnameIn, fnameOut='', encoding='utf-8-sig',\n glossing=False):\n self.REMEMBER_PARSES = True\n if len(fnameOut) <= 0:\n fnameOut = fnameIn + '-processed.xml'\n try:\n fIn = open(fnameIn, 'r', encoding=encoding)\n text = fIn.read()\n processedText = '<text>\\n'\n fIn.close()\n except IOError:\n self.raise_error('The text file ' + fnameIn + ' could not be opened.')\n return 0, 0\n rawTokens = text.split()\n wordsAnalyzed = totalWords = 0\n for token in rawTokens:\n if len(token) <= 0:\n continue\n m = self.rxTokenSearch.search(token)\n processedText += ' '\n if m is None:\n processedText += token\n continue\n puncl = m.group(1)\n wf = m.group(2)\n puncr = m.group(3)\n processedText += puncl\n if len(wf) > 0:\n anas = self.parse(wf.lower())\n if len(anas) > 0:\n wordsAnalyzed += 1\n processedText += Parser.ana2xml(wf, anas, glossing=glossing)\n totalWords += 1\n processedText += puncr + '\\n'\n processedText += '</text>'\n fOut = open(fnameOut, 'w', encoding='utf-8')\n fOut.write(processedText)\n fOut.close()\n return totalWords, wordsAnalyzed", "title": "" }, { "docid": "40641bf283e453615a69223f5d497a9a", "score": "0.63048756", "text": "def parseContent(content, wiki):", "title": "" }, { "docid": "d34c2d0a8e5b8b1b0baeb1efdc048e65", "score": "0.6302042", "text": "def parse(cls,txt):\n return yacc.parse(txt)", "title": "" }, { "docid": "4f475291ca5a68d9b5124f7a4fec423c", "score": "0.6299622", "text": "def do_parse(self, args):\r\n self.core.parse()", "title": "" }, { "docid": "049124021c5875e538f3f2f29fbec62d", "score": "0.6299399", "text": "def text_preprocess(self):\n tfp = TmFileParser(self.input_type)\n tfp.parse(self.input_file)\n\n srcTexts = self.humanize_texts(tfp.srcTexts, self.srcLang)\n tgtTexts = self.humanize_texts(tfp.tgtTexts, self.tgtLang)\n\n srcTexts = self.tokenize_texts(srcTexts, self.srcLang)\n tgtTexts = self.tokenize_texts(tgtTexts, self.tgtLang)\n\n return srcTexts, tgtTexts", "title": "" }, { "docid": "5e46b4fe2a3df80795280b2b54ed6ddc", "score": "0.6291491", "text": "def __init__(self, text):\n self._extract_from_text(text)", "title": "" }, { "docid": "237c8f4bf02ecbf4d9b3e7a5d3d05f68", "score": "0.6290856", "text": "def scriptReadText():\n pass", "title": "" }, { "docid": "0aa59d95db82490cc83abc2edb1717f7", "score": "0.6269318", "text": "def parse_func(self, text):\n raise NotImplementedError()", "title": "" }, { "docid": "9653b466b41afa28e4f643df055b0e89", "score": "0.62530625", "text": "def parse_text(text, cont):\n i = 0\n in_typedef = False\n LEN_STRUCT, LEN_UNION, LEN_ENUM, LEN_TYPEDEF = len(\"struct\"), len(\"union\"), len(\"enum\"), len(\"typedef\")\n max_chunk_len = max(LEN_STRUCT, LEN_UNION, LEN_ENUM, LEN_TYPEDEF)\n while i < len(text):\n \n # skip spaces\n if text[i].isspace():\n i += 1\n continue\n\n slice_max = min(i + max_chunk_len, len(text))\n next_chunk = text[i:slice_max]\n might_be_def = True\n\n if next_chunk.startswith(\"struct\"):\n might_be_def = False\n if is_def(text, i + LEN_STRUCT):\n i += LEN_STRUCT\n i = parse_struct(text, i, cont, in_typedef)\n in_typedef = False\n continue\n \n if might_be_def and next_chunk.startswith(\"union\"):\n might_be_def = False\n if is_def(text, i + LEN_UNION):\n i += LEN_UNION\n i = parse_union(text, i, cont, in_typedef)\n in_typedef = False\n continue\n \n if might_be_def and next_chunk.startswith(\"enum\"):\n might_be_def = False\n if is_def(text, i + LEN_ENUM):\n next_i = ignore_until(text, i, \"{\")\n enum_name = text[i:next_i-1].strip(\" {\")\n cont.add_enum(enum_name)\n\n i = ignore_until_matched(text, i, [\"{\", \"(\"], [\"}\", \")\"])\n next_i = ignore_until(text, i, [\";\"])\n var_name = parse_names(text[i:next_i-1])\n if var_name and not in_typedef:\n cont.add_names(var_name)\n \n i = next_i\n in_typedef = False\n continue\n\n # assume it's a global function pointer\n if next_chunk.startswith(\"extern\"):\n i = ignore_until_matched(text, i, [\"{\", \"(\"], [\"}\", \")\"], [\";\"])\n continue\n\n if next_chunk.startswith(\"typedef\"):\n in_typedef = True\n i += LEN_TYPEDEF\n continue\n\n # in a typedef and we weren't defining a struct or something useful\n if in_typedef:\n in_typedef = False\n next_i = ignore_until_matched(text, i, [\"{\", \"(\"], [\"}\", \")\"], [\";\"])\n continue\n\n # we're looking at a function/field declaration\n next_i = ignore_until_matched(text, i, [\"{\", \"(\"], [\"}\", \")\"], [\";\"])\n cont.add_names(parse_names(text[i:next_i-1]))\n in_typedef = False\n i = next_i", "title": "" }, { "docid": "fec8ad6d741eeb8d5c62bf81a70dde60", "score": "0.6252303", "text": "def parsehtml():", "title": "" }, { "docid": "1ac8af2658b3d9e9aae73744a4a75ec0", "score": "0.6245271", "text": "def parse_text(text):\n tokenized = tokenize(text)\n print(\"tokenized:\")\n print(tokenized)\n atomized = atomize(tokenized)\n print(\"atomized:\")\n print(atomized)\n return atomized", "title": "" }, { "docid": "0fe6378b1681ef34e3ef28258e8ead89", "score": "0.62400174", "text": "def __parsesingle(self, text):\n node = parser.st2tuple(parser.suite(text), line_info = 1)\n n = node[0]\n if n == symbol.encoding_decl:\n self.encoding = node[2]\n node = node[1]\n n = node[0]\n return self.file_input(node[1:], False)", "title": "" }, { "docid": "8ce84604849867cda0928391cd28c623", "score": "0.62103754", "text": "def parse(self,arbiter='Line',init=None,namestr=[]):\n\t\t\n\t\tprint \"parsing!\"\n\t\t\n\t\timport time\n\t\tif entity.time==0:\n\t\t\tentity.time=time.clock()\n\t\t\n\t\tif self.classname().lower()==\"corpus\":\n\t\t\tfor child in self.children:\n\t\t\t\tchild.parse()\n\t\t\treturn None\n\t\t\n\t\tif not init:\n\t\t\tinit=self\n\t\t\tif not hasattr(init,'meter_stats'):\n\t\t\t\tinit.meter_stats={'lines':{},'positions':{},'texts':{}, '_ot':{},'_constraints':{}}\n\t\t\tif not hasattr(init,'bestparses'):\n\t\t\t\tinit.bestparses=[]\n\t\t\tfrom Meter import Meter\n\t\t\tinit.meter=Meter(config['constraints'].split(),(config['maxS'],config['maxW']),config['splitheavies'])\n\t\t\tinit.meter_stats['_constraints']=sorted(init.meter.constraints)\n\t\t\t\n\t\t\tinit.ckeys=\"\\t\".join(sorted([str(x) for x in init.meter.constraints]))\n\t\t\t#self.om(\"\\t\".join([makeminlength(str(\"text\"),being.linelen),\t\t\t\tmakeminlength(str(\"parse\"),being.linelen),\t\"meter\",init.ckeys]))\n\t\t\t\n\t\t\tif being.omms:\n\t\t\t\tself.scansion_prepare()\n\t\t\t\n\t\tif (hasattr(self,'name')):\n\t\t\tprint \"## parsing: \"+str(self.name)\n\t\t\t\n\t\t\t\n\t\tif arbiter != self.classname():\n\t\t\traise Exception('spam', 'eggs')\n\n\t\t\tfor child in self.children:\n\t\t\t\tchild.parse(arbiter,init,namestr)\n\t\telse:\n\t\t\tif self.isBroken(): return []\n\t\t\tif hasattr(self,'ignoreMe') and self.ignoreMe: return []\n\t\t\twords = self.words()\n\t\t\tnumSyll=0\n\t\t\tfor word in words:\n\t\t\t\tif type(word)==type([]):\n\t\t\t\t\tfor wrd in word:\n\t\t\t\t\t\tif wrd.isBroken():\n\t\t\t\t\t\t\t#print wrd\n\t\t\t\t\t\t\treturn []\n\t\t\t\t\tnumSyll+=word[0].getNumSyll()\n\t\t\t\telse:\n\t\t\t\t\tif word.isBroken():\n\t\t\t\t\t\treturn []\n\t\t\t\t\tnumSyll+=word.getNumSyll()\n\t\t\tif not words: return []\n\t\t\t##\n\t\t\t\n\t\t\tif (numSyll < config['line_minsylls']):\n\t\t\t\t#print \"\\t>skipping (\"+str(numSyll)+\" is fewer than minimum of \"+str(config['parse_line_numsyll_min'])+\" sylls)\"\n\t\t\t\treturn []\n\t\t\telif(numSyll > config['line_maxsylls']):\n\t\t\t\t#print \"\\t>skipping (\"+str(numSyll)+\" is more than maximum of \"+str(config['parse_line_numsyll_max'])+\" sylls)\"\n\t\t\t\treturn []\n\t\t\t\n\t\t\t#print \"\\n\\t>parsing:\\t\"+str(self)+\"\\t(\"+str(numSyll)+\" sylls)\"\n\t\t\t\n\t\t\t\n\t\t\tself.parses=init.meter.parse(words,numSyll)\n\t\t\tself.numparses=len(self.parses)\n\t\t\tself.__bestparse=self.parses[0]\n\t\t\t\n\t\t\traise Exception('spam', 'eggs')\n\t\t\tprint \"headedness!\"\n\t\t\tif hasattr(being,'line_headedness'):\n\t\t\t\tfor parse in self.parses:\n\t\t\t\t\tprint parse.str_meter()\n\t\t\t\t\tprint parse.str_meter().startswith(str(being.line_headedness))\n\t\t\t\t\tif parse.str_meter().startswith(str(being.line_headedness)):\n\t\t\t\t\t\tself.__bestparse=parse\n\t\t\t\t\t\tbreak\n\t\t\telse:\n\t\t\t\tprint \"nolh\"\n\t\t\tinit.bestparses.append(self.__bestparse)\n\n\n\t\t\tif being.omms:\n\t\t\t\tself.scansion()\n\n\t\t\ttextname=self.findattr('name')\n\t\t\tif not textname:\n\t\t\t\ttextname=str(self).replace(\" \",\"_\")\n\t\t\t\n\t\t\t## store stats\n\t\t\tif (not textname in init.meter_stats['lines']):\n\t\t\t\tinit.meter_stats['lines'][textname]={}\n\t\t\tif (not textname in init.meter_stats['positions']):\n\t\t\t\tinit.meter_stats['positions'][textname]={}\n\t\t\tif (not textname in init.meter_stats['texts']):\n\t\t\t\tinit.meter_stats['texts'][textname]={}\n\t\t\tif (not textname in init.meter_stats['_ot']):\n\t\t\t\tinit.meter_stats['_ot'][textname]=makeminlength(\"line\",being.linelen)+\"\\tmeter\\t\"+init.ckeys+\"\\n\"\n\t\t\t\n\t\t\tparsedat=[]\n\t\t\tfor k,v in sorted(self.__bestparse.constraintScores.items()):\n\t\t\t\tif (not k in init.meter_stats['texts'][textname]):\n\t\t\t\t\tinit.meter_stats['texts'][textname][k]=[]\n\t\t\t\tinit.meter_stats['texts'][textname][k].append(v)\n\t\t\t\t\n\t\t\t\t#parsedat.append(v/len(self.__bestparse.positions))\t#???\n\t\t\t\tparsedat.append(v)\n\t\t\t\t\n\t\t\tlinekey=str(len(init.meter_stats['lines'][textname])+1).zfill(6)+\"_\"+str(self.__bestparse.posString())\n\t\t\tinit.meter_stats['lines'][textname][linekey]=parsedat\n\t\t\t\n\t\t\t## OT stats\n\t\t\tinit.meter_stats['_ot'][textname]+=makeminlength(str(self),being.linelen)+\"\\t\"+self.parses[0].str_ot()+\"\\n\"\n\t\t\tif len(self.parses)>1:\n\t\t\t\tfor parse in self.parses[1:]:\n\t\t\t\t\tinit.meter_stats['_ot'][textname]+=makeminlength(\"\",being.linelen)+\"\\t\"+parse.str_ot()+\"\\n\"\n\t\t\t\t\n\t\t\t\n\t\t\t\n\t\t\tfor posn in range(len(self.__bestparse.positions)):\n\t\t\t\tpos=self.__bestparse.positions[posn]\n\t\t\t\t(posdat,ckeys)=pos.formatConstraints(normalize=True,getKeys=True)\n\t\t\t\t\n\t\t\t\tfor cnum in range(len(ckeys)):\n\t\t\t\t\tif (not posn in init.meter_stats['positions'][textname]):\n\t\t\t\t\t\tinit.meter_stats['positions'][textname][posn]={}\n\t\t\t\t\tif (not ckeys[cnum] in init.meter_stats['positions'][textname][posn]):\n\t\t\t\t\t\tinit.meter_stats['positions'][textname][posn][ckeys[cnum]]=[]\n\t\t\t\t\tinit.meter_stats['positions'][textname][posn][ckeys[cnum]].append(posdat[cnum])\n\n\t\t\treturn self.parses\n\t\t\n\t\tif self==init:\n\t\t\tinit.maxparselen=0\n\t\t\tinit.minparselen=None\n\t\t\tfor parse in self.__bestparses:\n\t\t\t\tif not init.maxparselen:\n\t\t\t\t\tinit.maxparselen=len(parse.positions)\n\t\t\t\t\tinit.minparselen=len(parse.positions)\n\t\t\t\t\tcontinue\n\n\t\t\t\tif len(parse.positions)>init.maxparselen:\n\t\t\t\t\tinit.maxparselen=len(parse.positions)\n\t\t\t\tif len(parse.positions)<init.minparselen:\n\t\t\t\t\tinit.minparselen=len(parse.positions)", "title": "" }, { "docid": "f13860747dc03b8874f2ec54fdeefd66", "score": "0.62061787", "text": "def main():\n initParser()\n common()", "title": "" }, { "docid": "e0a3772577a0941c7e00c2f8fde7c840", "score": "0.61964303", "text": "def parse(self, text):\n return self.dict.txt2vec(text)", "title": "" }, { "docid": "e0a3772577a0941c7e00c2f8fde7c840", "score": "0.61964303", "text": "def parse(self, text):\n return self.dict.txt2vec(text)", "title": "" }, { "docid": "e5e918b3ecd4c98c6ba5d21ee9fae402", "score": "0.618036", "text": "def tokenize(text):", "title": "" }, { "docid": "6027092d8ece88d04f4c8ec43e862757", "score": "0.61746526", "text": "def parse(self, text):\n if isinstance(text, bytes):\n text = text.decode(\"ascii\")\n text = re.sub(\"\\s+\", \" \", unidecode(text))\n return self.communicate(text + \"\\n\")", "title": "" }, { "docid": "61feb561bccd61583849fe683f31f4d9", "score": "0.61650217", "text": "def parse(self, s):\n # TODO: Implement this", "title": "" }, { "docid": "41e7c3201d42756433bb0ee6e2663d88", "score": "0.6163392", "text": "def Parse(self, stat, knowledge_base):", "title": "" }, { "docid": "41e7c3201d42756433bb0ee6e2663d88", "score": "0.6163392", "text": "def Parse(self, stat, knowledge_base):", "title": "" }, { "docid": "03770a88d8db5edd0515ecb37b0307b7", "score": "0.61507773", "text": "def test(cls):\n parse(\"\"\"Let's try plain text first.\n \n \"\"\", cls)", "title": "" }, { "docid": "a1e95a9a45d559cf906ffd36d41c298b", "score": "0.61322004", "text": "def data_analysis(text):\r\n # print('cleaned_text_pre_data_analysis',text)\r\n #TODO: check if the entered text is a string or not. If its integer, output directly. Check the same in date_parser method\r\n monthsList = ['january', 'jan', 'february', 'march', 'april', 'feb', 'mar', 'apr', 'may', 'june', 'july', 'august', 'september', 'past', 'october', 'november', 'dec', 'december', 'nov', 'oct', 'sept', 'aug']\r\n weekPattern1 = r'^\\d+(wks|wk)$'\r\n weekPattern2 = r'^(wks|wk)\\d+$'\r\n dayPattern1 = r'^\\d+(day|days|d)$'\r\n dayPattern2 = r'^(day|days|d)\\d+$'\r\n monthPattern3 = r'^\\d+(mths|mth)$'\r\n monthPattern4 = r'^(mths|mth)\\d+$'\r\n splitData = text.split()\r\n # print('splitData:',splitData)\r\n if (len(splitData) == 2):\r\n if ((splitData[0].startswith('wk') or splitData[0].startswith('wks') ) and (type(eval(splitData[1])) == int)):\r\n splitData[0] = 'weeks'\r\n splitData[0], splitData[1] = splitData[1], splitData[0]\r\n parsedValue = date_parser(' '.join(splitData))\r\n elif ((splitData[1].startswith('wk') or splitData[1].startswith('wks')) and (type(eval(splitData[0])) == int)):\r\n splitData[1] = 'weeks'\r\n parsedValue = date_parser(' '.join(splitData))\r\n elif ((splitData[0].startswith('mth') or splitData[0].startswith('mths') ) and (type(eval(splitData[1])) == int)):\r\n splitData[0] = 'months'\r\n splitData[0], splitData[1] = splitData[1], splitData[0]\r\n parsedValue = date_parser(' '.join(splitData))\r\n elif ((splitData[1].startswith('mth') or splitData[1].startswith('mths')) and (type(eval(splitData[0])) == int)):\r\n splitData[1] = 'months'\r\n parsedValue = date_parser(' '.join(splitData))\r\n elif ((splitData[0] in monthsList) or (splitData[1] in monthsList)):\r\n parsedValue = date_parser(' '.join(splitData))\r\n elif not (splitData[0].startswith('week') or splitData[0].startswith('weeks') or splitData[1].endswith('week') or splitData[1].endswith('weeks') or splitData[0].startswith('day') or splitData[0].startswith('days') or splitData[1].endswith('day') or splitData[1].endswith('days') or splitData[1].endswith('month') or splitData[1].endswith('months') or splitData[0].startswith('month') or splitData[0].startswith('months')):\r\n date_parser(' '.join(splitData))\r\n try:\r\n if ((splitData[0].startswith('month') or splitData[0].startswith('months') or splitData[0].startswith('day') or splitData[0].startswith('days') or splitData[0].startswith('week') or splitData[0].startswith('weeks')) and (type(eval(splitData[1])) == int)):\r\n splitData[0], splitData[1] = splitData[1], splitData[0]\r\n parsedValue = date_parser(' '.join(splitData))\r\n elif ((splitData[1].endswith('month') or splitData[1].endswith('months') or splitData[1].endswith('day') or splitData[1].endswith('days') or splitData[1].endswith('week') or splitData[1].endswith('weeks')) and (type(eval(splitData[0])) == int)):\r\n parsedValue = date_parser(' '.join(splitData))\r\n except:\r\n pass\r\n elif (len(splitData) == 1) and ('week' in splitData[0] or 'weeks' in splitData[0]):\r\n if re.findall(r'^[0-9][week].*', splitData[0]):\r\n parsedValue = date_parser(''.join(splitData))\r\n elif re.findall(r'^[week|weeks].*', splitData[0]):\r\n splitData[0] = 'one week'\r\n parsedValue = date_parser(''.join(splitData))\r\n elif len(splitData) == 1:\r\n if splitData[0] in monthsList:\r\n parsedValue = date_parser(''.join(splitData))\r\n elif splitData[0] == 'month' or splitData[0] == 'months':\r\n splitData[0] = 'one month'\r\n parsedValue = date_parser(''.join(splitData))\r\n elif re.findall(weekPattern1, splitData[0]):\r\n newstring = re.sub(r'wks|wk',' weeks',splitData[0])\r\n data_analysis(newstring)\r\n elif re.findall(weekPattern2, splitData[0]):\r\n newstring1 = re.sub(r'wks|wk','weeks ',splitData[0])\r\n data_analysis(newstring1)\r\n elif re.findall(dayPattern1, splitData[0]):\r\n newDayString = re.sub(r'days|day|d',' days',splitData[0])\r\n data_analysis(newDayString)\r\n elif re.findall(dayPattern2, splitData[0]):\r\n newDayString1 = re.sub(r'days|day|d','days ',splitData[0])\r\n data_analysis(newDayString1)\r\n elif re.findall(monthPattern3, splitData[0]):\r\n newDMonthString1 = re.sub(r'mths|mth',' months',splitData[0])\r\n data_analysis(newDMonthString1)\r\n elif re.findall(monthPattern4, splitData[0]):\r\n newDMonthString2 = re.sub(r'mths|mth','months ',splitData[0])\r\n data_analysis(newDMonthString2)\r\n else:\r\n parsedValue = date_parser(' '.join(splitData))\r\n# TODO: If the number associated with weeks is greater than 52, revert output to 9999\r\n # print('output_value',parsedValue)\r\n return parsedValue", "title": "" }, { "docid": "d506d054802697ccd1c1708708d1ea38", "score": "0.6113445", "text": "def _parse_data(self):", "title": "" }, { "docid": "5291c740511b3dbbf164ae508a6777c4", "score": "0.6113256", "text": "def on_parsing(options, args):", "title": "" }, { "docid": "f4524912b20c7936a639b3ec7e7e6089", "score": "0.6110519", "text": "def parse(text, path=None, log=None):\n root = get_root(text)\n return parse_report(root, path, log)", "title": "" }, { "docid": "da76c6918995304543f55e8a2a3622b8", "score": "0.6107022", "text": "def test_parse():", "title": "" }, { "docid": "2b96d1f9cc5645f54a283711f855c9fa", "score": "0.61055934", "text": "def interpret(text):\n\n interpreter = Interpreter(text)\n try:\n result = interpreter.parse()\n print(result)\n except ParserError as error:\n print(error)", "title": "" }, { "docid": "525001b441c175316f218e2e7dda8222", "score": "0.6102652", "text": "def parse_text(self, text):\n text = text.replace(\" \", \"\").upper()\n index = 0\n shunting_yard_ready = Queue()\n\n while index < len(text):\n text = text[index:]\n\n # Check for number\n match = re.search(\"^[-0123456789.]+\", text)\n if match is not None:\n shunting_yard_ready.push(float(match.group(0)))\n index = match.end(0)\n continue\n\n # Check for function\n match = re.search(\"|\".join([\"^\" + func for func in self.functions.keys()]), text)\n if match is not None:\n shunting_yard_ready.push(self.functions[match.group(0)])\n index = match.end(0)\n continue\n\n # Check for operator\n match = re.search(\"|\".join([\"^\" + op for op in self.operators.keys()]), text)\n if match is not None:\n shunting_yard_ready.push(self.operators[match.group(0)])\n index = match.end(0)\n continue\n\n # Check for paranthases\n match = re.search(\"^[()]\", text)\n if match is not None:\n shunting_yard_ready.push(match.group(0))\n index = match.end(0)\n continue\n\n return shunting_yard_ready", "title": "" }, { "docid": "bc036425684e4d60e47e12f52aa42e68", "score": "0.60963887", "text": "def text_to_parse(self):\n return self.__text_to_parse", "title": "" }, { "docid": "fe47f3f2a6f7902884610309db304b5d", "score": "0.60837525", "text": "def _parse_file(f):", "title": "" }, { "docid": "1d5cf8fece060a442f5acc778a470c83", "score": "0.6081058", "text": "def __init__(self, text):\n i1 = None\n i2 = None\n\n lines = text.splitlines()\n\n tokens = ['***', 'PROJECT', 'GUTENBERG']\n s_tokens = tokens + ['START']\n e_tokens = tokens + ['END']\n\n for i, line in enumerate(lines):\n\n # Match \"start\" line.\n if False not in [token in line for token in s_tokens]:\n i1 = i+1\n\n # Match \"end\" line.\n if False not in [token in line for token in e_tokens]:\n i2 = i\n\n self.text = '\\n'.join(lines[i1:i2])", "title": "" }, { "docid": "a8b6c8a09045d320caa798fa660ef28b", "score": "0.6079232", "text": "def __text_init(filename):\n \n return_dict = {}\n \n predicates = set()\n \n with open(filename, \"r\") as file: \n text = file.read() \n \n # Find the language and keep the part of the text with the relevent\n # information.\n pattern=\"\\n\\s*language\\(\\'(\\w*)\\'\\)(.*)$\"\n language,text = re.findall(pattern, text, flags=re.DOTALL)[0]\n return_dict['language']=language\n text = re.sub(\"(?:^\\s*%.*?$)|(?:%+.*?%+)\", \"\", text, flags=re.MULTILINE)\n \n # Store the causal clauses.\n pattern=\"^\\s*(-?\\w*)\\s*<===\\s*(.*)\\s*$\"\n causal_links_text = re.findall(pattern, text, flags=re.MULTILINE)\n \n causal_links = []\n incompatibility_links = []\n dictionary = {}\n\n for conseq_text,causes_text in causal_links_text:\n \n conseq = list(re.findall(\"([\\w-]+)\", conseq_text))[0]\n causes = list(re.findall(\"([\\w-]+)\", causes_text))\n causal_links.append((causes,conseq))\n \n for cause in causes:\n predicates.add(cause)\n predicates.add(conseq)\n \n return_dict['causal_links']=causal_links\n \n \n #Store the incompatibilities\n pattern=\"^\\s*incompatible\\(\\[(.*)\\]\"\n incompatibility_list_texts = re.findall(pattern, text, flags=re.MULTILINE)\n for incompatibility_text in incompatibility_list_texts:\n \n incompatibility = list(re.findall(\"([\\w-]+)\", incompatibility_text))\n incompatibility_links.append(incompatibility)\n for pred in incompatibility:\n predicates.add(pred)\n \n return_dict['incompatibility_links']=incompatibility_links\n \n \n \n # Store the preferences.\n pattern=\"preference\\(\\s*?(-?\\w*)\\s*?,\\s*?(-?\\d*)\\s*?\\)\"\n preferences_text = re.findall(pattern, text, flags=re.MULTILINE)\n preferences = {}\n for predicat,value in preferences_text:\n preferences[predicat]=int(value)\n \n return_dict['preferences'] = preferences\n \n # Store the actions.\n pattern=\"action\\(\\s*?(\\w*)\\s*?\\)\"\n return_dict['actions'] = re.findall(pattern, text, flags=re.MULTILINE)\n \n for action in return_dict['actions']:\n predicates.add(action)\n \n # Store the defaults. \n pattern=\"default\\(\\s*?(-?\\w*)\\s*?\\)\"\n return_dict['defaults'] = re.findall(pattern, text, flags=re.MULTILINE)\n \n # Store the initial situations.\n pattern=\"initial_situation\\(\\s*?(-?\\w*)\\s*?\\)\"\n return_dict['initial_situations'] = list(re.findall(pattern, text, flags=re.MULTILINE))\n \n # Store the predicate negations.\n for predicate in set(predicates):\n if predicate[0]==\"-\":\n predicates.add(predicate[1:])\n else:\n predicates.add(\"-\"+predicate)\n \n \n return_dict['predicates']=predicates\n \n\n \n # Store the dictionary\n\n pattern=\"dictionary\\((.*)\\)\"\n dictionary_list_texts = re.findall(pattern, text, flags=re.MULTILINE)\n for dictionary_text in dictionary_list_texts:\n \n word = list(re.findall(\"([-?\\w\\s*?]+)\", dictionary_text))\n dictionary[word[0]] = word[1]\n\n \n return_dict['dictionary']=dictionary\n \n return return_dict", "title": "" }, { "docid": "d70bb2dd4585e6c78020d6b8687580e7", "score": "0.60660183", "text": "def main():\n\tpadding = \"---------------------------------------------\"\n\n\t\"\"\"\n>>>>-------------------------------------------------------------------------------------Start--MAIN\n\t\"\"\"\n\n\tcurrentDirectory = os.path.dirname(os.path.realpath(__file__)) + \"/Texts/\"\n\n\tlistOfFolders, listOfFiles = [], []\n\t\n\tfor root, dirs, files in os.walk(currentDirectory):\n\t\tfor dir in dirs:\n\t\t\tlistOfFolders.append(dir)\n\n\tdictOfFolders = {}\n\n\tfor each in range(len(listOfFolders)):\n\t\tdictOfFolders[each] = listOfFolders[each]\n\n\tprint \"Which directory would you like to search?\\n\"\n\n\tfor each in dictOfFolders:\n\t\tprint str(each+1) + \": \" + dictOfFolders[each]\n\n\tfolderPicked = raw_input(\">>\")\n\tfolderPicked = dictOfFolders[int(folderPicked)-1]\n\n\ttraverseDirectory = currentDirectory + '/' + folderPicked + '/'\n\n\tfor root, dirs, files in os.walk(traverseDirectory):\n\t\t\tfor file in files:\n\t\t\t\tlistOfFiles.append(file)\n\n\tdictOfFiles = {}\n\n\tfor each in range(len(listOfFiles)):\n\t\tdictOfFiles[each] = listOfFiles[each]\n\n\tprint \"\"\"Which file would you like to read into the program?\n\t\t\"\"\" + padding\n\n\tfor each in dictOfFiles:\n\t\tprint str(each+1) + \": \" + dictOfFiles[each]\n\n\tfilePicked = raw_input(\">>\")\n\tfilePicked = dictOfFiles[int(filePicked)-1]\n\n\tfileToBeParsed = traverseDirectory + filePicked\n\n\t\"\"\"\n>>>>-------------------------------------------------------------------------------------Start--Options\n\t\"\"\"\n\n\tquoteDelim = raw_input(\"\"\"\n\tWhat is the quote delimiter for this text?\n\n\t\\\" or \\' \n\n\t>>\"\"\")\n\n\tstripApos = raw_input(\"\"\"\n\tStrip beginning and trailing apostrophes from words? \n\t\n\tY or N\n\n\t>>\"\"\")\n\n\tif stripApos.lower() != 'y' and stripApos.lower() != 'n':\n\t\tprint \"Defaulting to Y\"\n\n\tstopWords = raw_input(\"\"\"\n\tRun the file against a list to remove common words? \n\n\t(If you choose yes the program will use a stock stop words list \n\tin the 'src' folder. \n\n\tYou can replace this file with your own.) \n\n\tY or N\n\n\t>>\"\"\")\n\n\tif stopWords.lower() == 'y': \n\t\tstopWordsFile = open('src/stopwords.txt', 'r')\n\t\tstopWordsList = stopWordsFile.read().split()\n\telse:\n\t\tstopWordsList = False\n\t\n\t\"\"\"\n>>>>-------------------------------------------------------------------------------------Tokenize\n\t\"\"\"\n\n\twords = parseFileIntoWords(readFile(fileToBeParsed))\n\n\tpool = Pool()\n\n\t#markers = findMarkers(words)\n\n\tt2 = time.time()\n\n\tif quoteDelim == \"'\" or stripApos.lower() == 'y':\n\t\twords = pool.map(aposStrip, words)\n\n\twordCounter = frequencyDistribution(words, \"word\", stopWordsList)\n\n\tprint \"\\n\\n\" + padding + \"Completed in \" + str(round((time.time() - t2),2)) + \" sec\" + padding\n\t\n\t\"\"\"\n>>>>-------------------------------------------------------------------------------------Write--Tokenizer--Output\n\t\"\"\"\n\n\tprint \"\\n\\nPrinting tokenizer output to tokenizer_ouput_\" + filePicked[0:-4] + \".txt in\\n\\n\" + os.path.dirname(os.path.realpath(__file__)) + \"\\n\\n\"\n\n\tfileWrite = open(\"tokenizer_output_\" + filePicked[0:-4] + \".txt\", \"w\")\n\tfor tup in sorted(wordCounter.most_common(), key = lambda word: word[0]):\n\t\tfileWrite.write(str(tup[0]) + \", \" + str(tup[1]) +\"\\n\")\n\tfileWrite.close()\n\n\t#wordLengthCounter = frequencyDistribution(words, \"length\", quoteDelim, stripApos)\n\n\tprint \"\\n\"\n\tinput = raw_input(\"\"\"\n\tShow the top _____ number of words in the text:\n\n\tEnter value here\n\n\t>>\"\"\")\n\n\tprint \"Generating top \" + input + \" words in \" + filePicked\n\n\tprint padding\n\tfor tup in wordCounter.most_common(int(input)):\n\t\tprint \"\\nWord: \" + tup[0] + \", Count: \" + str(tup[1])\n\tprint padding\n\n\t\"\"\"\n>>>>-------------------------------------------------------------------------------------Call--Visualizer\n\t\"\"\"\n\n\tprint \"\"\"\n\tVisualize the tokenizer's output using the options below:\n\n\t1: Frequency Plot - display the frequency with which a word \n\tapears over the life of the text.\n\n\tYou may want to select one of the words in the top \"\"\" + input + \"\"\"\n\tdisplayed above. \n\n\tType '1' to select this option.\n\n\t2. Histogram - chart the most often used words in the text \n\tin a standard bar chart. \n\n\tType '2' to select this option.\n\n\tType 'Q' to exit.\"\"\"\n\t\n\n\tvisualizationType = raw_input(\"\"\"\n\n\t>>\"\"\")\n\n\twhile visualizationType.lower() != 'q':\n\t\tif visualizationType == '1':\n\t\t\twordToTest = raw_input(\"\"\"\n\tWhich word(s) would you like to plot? \n\n\tEnter a list of up to five words seperated by spaces:\n\n\t>>\"\"\"\n\t\t\t)\n\t\t\twordToTest = wordToTest.split()\n\t\t\tfrequencyPlot(wordToTest, words, filePicked)\n\t\telif visualizationType == '2':\n\t\t\thistogram(filePicked, wordCounter.most_common(25), len(words))\n\t\telse:\n\t\t\tprint \"\"\"\n\tVisualize the tokenizer's output using the options below:\n\n\t1: Frequency Plot - display the frequency with which a word \n\tapears over the life of the text.\n\n\tYou may want to select one of the words in the top \"\"\" + input + \"\"\"\n\tdisplayed above. \n\n\tType '1' to select this option.\n\n\t2. Histogram - chart the most often used words in the text \n\tin a standard bar chart. \n\n\tType '2' to select this option.\n\n\tType 'Q' to exit.\n\t\t\t\"\"\"\n\t\n\n\t\tvisualizationType = raw_input(\"\"\"\n\n\t>>\"\"\"\n\t\t)\n\n\tsys.exit()", "title": "" }, { "docid": "8335c6d427629cda64fcaea21218a043", "score": "0.6063464", "text": "def parse(self, text):\n if type(text) != str:\n corenlp_text = text.encode('utf8')\n if corenlp_text[0].isspace(): # dont strip white spaces\n corenlp_text = '.' + corenlp_text[1:]\n\n # Quote (with percent-encoding) reserved characters in URL for CoreNLP\n corenlp_text = urllib.quote(corenlp_text)\n output = self.corenlp.annotate(corenlp_text, properties=self.props)\n\n # flatten sentences and tokens\n tokenlists = [s['tokens'] for s in output['sentences']]\n tokens = itertools.chain.from_iterable(tokenlists)\n names = []\n for token in tokens:\n if token['ner'] != 'O':\n name = {\n 'label': token['ner'],\n 'begin': token['characterOffsetBegin'],\n 'end': token['characterOffsetEnd'],\n 'text': token['originalText'],\n 'source': 'corenlp'\n }\n names.append(name)\n\n # Handle multi-word tokens:\n # Merge any adjacent Target tokens, if of the same type and\n # separated by a space, into one span.\n names.sort(key=lambda x: int(x['begin']))\n new_names = []\n skip_names = []\n for n in names:\n if n in skip_names:\n continue\n next_name = [n2 for n2 in names if\n n['label'] == 'Target' and\n n2['label'] == 'Target' and\n int(n2['begin']) == int(n['end']) + 1]\n if len(next_name) > 0:\n n['text'] += ' ' + next_name[0]['text']\n n['end'] = next_name[0]['end']\n skip_names.append(next_name[0])\n\n # Either way, save this one\n new_names.append(n)\n\n if self.gazette_targets:\n # Get all matching targets\n matching_targets = self.gazette_target_match(text,\n self.gazette_targets)\n\n # Remove duplicates\n for target_dict in matching_targets:\n for name_dict in new_names:\n if target_dict['label'] == name_dict['label'] and \\\n target_dict['begin'] == name_dict['begin'] and \\\n target_dict['end'] == name_dict['end'] and \\\n target_dict['text'] == name_dict['text']:\n matching_targets.remove(target_dict)\n break\n\n if len(matching_targets) > 0:\n # Update the token 'ner' fields too\n tokenlists = [s['tokens'] for s in output['sentences']]\n for target_dict in matching_targets:\n tokens = itertools.chain.from_iterable(tokenlists)\n # Targets can be multi-word, but we need to annotate tokens.\n # We will make an assumption that any token in the valid range\n # with a matching term should be updated.\n match_tokens = [t for t in tokens \n if (t['characterOffsetBegin'] >= target_dict['begin'] and \\\n t['characterOffsetEnd'] <= target_dict['end'] and \\\n t['originalText'] in target_dict['text'])]\n for t in match_tokens:\n t['ner'] = target_dict['label']\n #print('Updated %s to %s' % (t['originalText'], target_dict['label']))\n\n # Combine NER items and gazette targets\n new_names += matching_targets\n\n return {\n 'ner': new_names,\n 'X-Parsed-By': CoreNLPParser.CORENLP_PARSER,\n 'sentences': output['sentences']\n }", "title": "" }, { "docid": "b49fa38f4b3f2eca543172d3165f235d", "score": "0.60630786", "text": "def analyse_text(self):\n result = ''\n \n if self.text_name == 'None chosen':\n tkinter.messagebox.showinfo(title='Error: no text file chosen. ',\n message=\"Please choose a text to analyse.\", icon='error')\n \n else:\n result = analyse_this(self.text_path_and_name,\n self.dict_path_and_name,\n SortByAlpha=self.SortByAlpha.get(),\n StartTrigger=self.StartTrigger.get(),\n StopTrigger=self.StopTrigger.get(),\n CheckingSpelling=self.spellcheck.get(),\n SplitAndListCompounds=self.SplitCompoundsAndList.get(),\n IgnoreNListProperNouns=self.IgnoreAndListPropers.get(),\n ListingContractions=self.ListContractions.get(),\n ListingAdverbs=False,\n ListingGerunds=False)\n \n self.save_as_file(result)\n return", "title": "" }, { "docid": "acb50b4ccdf6608fbc59e20760250ea9", "score": "0.60626537", "text": "def _Parse(self, line):\n line_stripped = line.strip()\n \n # Completed\n if line_stripped.startswith('x '):\n completed = True\n line_stripped = line_stripped[2:]\n else:\n completed = False\n\n # Convenience string splitting function without the traceback mess\n def head_tail(s, split_on=None):\n if s:\n try:\n h,t = s.split(split_on, 1)\n except ValueError:\n h = s\n t = ''\n return h,t\n else:\n return '', ''\n\n # Completion date\n completion_date = None\n if completed:\n word, tail = head_tail(line_stripped)\n try:\n time_struct = time.strptime(word, '%Y-%m-%d')\n except ValueError:\n pass\n else:\n completion_date = datetime.date(*time_struct[:3])\n line_stripped = tail\n\n # Priority\n if line_stripped.startswith('('):\n end_pri = line_stripped.find(') ')\n if end_pri != -1:\n pri = line_stripped[1:end_pri].strip()\n if len(pri) == 1 and pri in string.uppercase:\n priority = pri\n else:\n priority = None\n line_stripped = line_stripped[end_pri+1:].strip()\n else:\n priority = None\n else:\n priority = None\n\n # Creation date\n creation_date = None\n word, tail = head_tail(line_stripped)\n try:\n time_struct = time.strptime(word, '%Y-%m-%d')\n except ValueError:\n pass\n else:\n creation_date = datetime.date(*time_struct[:3])\n line_stripped = tail\n\n # Body - main part of text after priority/dates but with contexts/projects in-tact\n body = line_stripped\n\n # Contexts and projects\n contexts = []\n projects = []\n for word in line_stripped.split():\n if word.startswith('+'):\n prj = word[1:]\n if prj:\n projects.append(prj)\n elif word.startswith('@'):\n ctx = word[1:]\n if ctx:\n contexts.append(ctx)\n\n return {'text': line,\n 'body': body,\n 'priority': priority,\n 'creation_date': creation_date,\n 'completion_date': completion_date,\n 'completed': completed,\n 'contexts': contexts,\n 'projects': projects,\n }", "title": "" }, { "docid": "a4b5972a15ac01f1012972f764d2b790", "score": "0.6060676", "text": "def parseText(self, text):\n text = HSHelper.removeQuotes(text)\n cards = self.__getCards(text)\n answer = ''\n\n if cards:\n log.debug(\"found cards: %s\", cards)\n cards = self.constants.replaceSpecial(cards) #expand\n cards = [card for card in cards if card in self.cardDB]\n cards = cards[:self.constants.CARD_LIMIT]\n answer = formatter.createAnswer(self.cardDB, cards)\n\n return cards, answer", "title": "" }, { "docid": "8500015cde7be54679c26d1c4c797c57", "score": "0.60532886", "text": "def _parse_tb_style_text(self, text):\n block = list()\n\n lines = text.split(\"\\n\")\n line = lines.pop(0)\n title = []\n while not line.startswith(\"\\\\\"):\n if line:\n title.append(line)\n line = lines.pop(0)\n\n self.title = \" \".join(title)\n\n for line in lines:\n if line and line.startswith(\"\\\\id\") and len(block):\n utterance = self._parse_element_from_tb_style(block)\n self.annotation_tree.append_element(utterance)\n block = list()\n elif line:\n if line.startswith(\"\\\\\"):\n block.append(line.strip())\n\n utterance = self._parse_element_from_tb_style(block)\n self.annotation_tree.append_element(utterance)\n\n #print self.annotation_tree.tree", "title": "" }, { "docid": "7b07119ce2caa968dd52da9c69b821bf", "score": "0.60359377", "text": "def main():\r\n\r\n print \"Parsing text file.\"\r\n (number_emails, graph) = graphs.process_file(\"data/graph.txt\")\r\n\r\n _dump_stats(number_emails, graph)\r\n\r\n _run_pagerank(graph)\r\n\r\n _run_hits(graph)\r\n\r\n print \"Finished!\"", "title": "" }, { "docid": "ce7e7d50228dcde2f24691284fdac97e", "score": "0.60344046", "text": "def process(self, rawText, fileAttributes):\n\n newRaw = re.sub(r'[\\s+\\.\\?!,\\\"\\%@#\\^\\(\\)\\n\\\\]',' ', rawText)\n newnewRaw = re.sub(r'\\'','*', newRaw)\n tokens = newnewRaw.split(None)\n \n\n# File attributes passed in from SLICEngine\n\n \n #self.numSentences = fileAttributes['numSentences'] #total number of sentences in file\n self.numWords = fileAttributes['NumWords'] #total number of words in file\n #self.numChars = fileAttributes['numChars'] #total number of chars in file\n #self.numCharsMinusSpacesAndPunctuation = fileAttributes['numCharsMinusSpacesAndPunctuation'] #total number of chars from words only\n #self.avgSentenceLength = fileAttributes['avgSentenceLength'] #average sentence length\n #self.avgWordLength = fileAttributes['avgWordLength'] #average word length\n #self.numSyllables = fileAttributes['numSyllables'] #total number of syllables in file\n #self.avgSyllablesPerWord = fileAttributes['avgSyllablesPerWord'] #average syllables per word\n #self.numWordsWith3OrMoreSyllables = fileAttributes['numWordsWith3OrMoreSyllables'] #number of words with three or more syllables\n\n #read in hedging adverbs\n lAdverbs = open(r'dictionary_hedging_adverbs.txt').readlines()\n lAdverbs = [words.rstrip('\\n') for words in lAdverbs] #remove the end of line character \\n from each line\n\n \n #read in hedging adjectives\n lAdj = open(r'dictionary_hedging_adj.txt').readlines()\n lAdj = [words.rstrip('\\n') for words in lAdj] #remove the end of line character \\n from each line\n\n \n #read in hedging conjunctions\n lConj = open(r'dictionary_hedging_conjunctions.txt').readlines()\n lConj = [words.rstrip('\\n') for words in lConj] #remove the end of line character \\n from each line\n\n \n #read in hedging verbs\n lVerb = open(r'dictionary_hedging_verbs.txt').readlines()\n lVerb = [words.rstrip('\\n') for words in lVerb] #remove the end of line character \\n from each line\n\n\n #Declare variables\n intHedgingModal = 0\n intNonhedgingModal = 0\n intHedgingAdverbs = 0\n intHedgingAdj = 0\n intHedgingConj = 0\n intHedgingVerbs = 0\n\n #populate Nonhedging modal verbs, rawText is not tokenized and in one long string, run this line only once, \"have got to\" is counted in the slang \"got to\"\n intNonhedgingModal += (rawText.lower().count('have to') + rawText.lower().count('had to') + rawText.lower().count('got to') + rawText.lower().count('need to'))\n\n \n #populate values by finding modal verbs, repeat for each word in the token list\n for word in tokens:\n if word.lower() in ['may', 'might', 'ought', 'should', 'would', 'wouldn*t', 'could', 'couldn*t']:\n intHedgingModal +=1\n \n #populate additional Nonhedging modal verbs\n if word.lower() in ['must', 'mustn*t', 'shall', 'can', 'can*t', 'cannot', 'will', 'won*t']: # The astrict is becuase SpliceEngine removes the apostrophy and replaces with *\n intNonhedgingModal +=1\n\n #populate hedging adverbs\n if word.lower() in lAdverbs:\n intHedgingAdverbs +=1 \n\n #populate hedging Adjectives\n if word.lower() in lAdj:\n intHedgingAdj +=1\n\n\n #populate hedging Conjunctions\n if word.lower() in lConj:\n intHedgingConj +=1\n \n #populate hedging Verb\n if word.lower() in lVerb:\n intHedgingVerbs +=1\n \n \n# Fill pluginDict with plugin results for new linguistic cue \n pluginDict = {}\n try:\n pluginDict['NonhedgeModal'] = self.formatAnswer(intNonhedgingModal / self.numWords) #convert raw count to ratio\n pluginDict['HedgeModal'] = self.formatAnswer(intHedgingModal / self.numWords ) \n pluginDict['HedgeAdv'] = self.formatAnswer(intHedgingAdverbs / self.numWords)\n pluginDict['HedgeAdj'] = self.formatAnswer(intHedgingAdj / self.numWords)\n pluginDict['HedgeConj'] = self.formatAnswer(intHedgingConj / self.numWords)\n pluginDict['HedgeVerb'] = self.formatAnswer(intHedgingVerbs / self.numWords)\n pluginDict['HedgeAll'] = self.formatAnswer((intHedgingModal + intHedgingAdverbs + intHedgingAdj + intHedgingConj + intHedgingVerbs ) / self.numWords)\n except ZeroDivisionError:\n pluginDict['NonhedgeModal'] = 0\n pluginDict['HedgeModal'] = 0\n pluginDict['HedgeAdv'] = 0\n pluginDict['HedgeAdj'] = 0\n pluginDict['HedgeConj'] = 0\n pluginDict['HedgeVerb'] = 0\n pluginDict['HedgeAll'] = 0\n \n \n#Return the pluginDict. The Dictionary keys will be the column headers.\n \n return pluginDict", "title": "" }, { "docid": "d04f6c64be90d5407eaedfb68183a4f3", "score": "0.60317725", "text": "def parse(text):\n p = ttp.Parser()\n result = p.parse(text)\n users = result.users\n tags = result.tags\n urls = result.urls\n html = result.html\n return [users, tags, urls, html]", "title": "" }, { "docid": "e959e0e0f20961522308ca35dfd29191", "score": "0.6030227", "text": "def __call__(self, text):\n if isinstance((text), (str)):\n return self.process(text)\n elif isinstance((text), (list)):\n tokens = []\n for i in range(len(text)):\n tokens.append( self.process(text[i]) )\n return(tokens)\n else:\n print('tokenizer: cannot process a non-string-like parsed arg')\n return None", "title": "" }, { "docid": "a2145d42ba022917e4a5d22d0346ed2a", "score": "0.6029321", "text": "def parse( self, text, ttlfile=None, debuglevel=0 ):\n # Parser Initialize\n self._initialize( ttlfile=ttlfile )\n # parse and get the Translation Unit\n self.ast = self.parser.parse(text, lexer=self.ttllex, debug=debuglevel)\n return self.ast", "title": "" }, { "docid": "d001b5981a90f5a004e9afcc68e8fb54", "score": "0.6028987", "text": "def _parse_input(text):\n text = \"name mockname\\nversion 1.0\\n\" + text\n lexer = blackbirdLexer(antlr4.InputStream(text))\n stream = antlr4.CommonTokenStream(lexer)\n parser = blackbirdParser(stream)\n\n tree = parser.start()\n\n bb = BlackbirdListener()\n walker = antlr4.ParseTreeWalker()\n walker.walk(bb, tree)\n\n return bb.program", "title": "" }, { "docid": "dac2778de5ae4729facbc3321ca44674", "score": "0.6016612", "text": "def parse_data(text):\n\t# first counter holds `token : number of occurrence in the topic` tuples\n\t# the list holds Article objects extracted\n\t# second one holds `token : number of articles that it occurrences in the topic` tuples within the set of course\n\tdata_collection = {\n\t\t\t\t\t\t\t'train':{\n\t\t\t\t\t\t\t\t'earn': c_t(Counter(), [], Counter()),\n\t\t\t\t\t\t\t\t'acq': c_t(Counter(), [], Counter()),\n\t\t\t\t\t\t\t\t'money-fx': c_t(Counter(), [], Counter()),\n\t\t\t\t\t\t\t\t'grain': c_t(Counter(), [], Counter()),\n\t\t\t\t\t\t\t\t'crude': c_t(Counter(), [], Counter())\n\t\t\t\t\t\t\t},\n\t\t\n\t\t\t\t\t\t\t'test':{\n\t\t\t\t\t\t\t\t'earn': c_t(Counter(), [], Counter()),\n\t\t\t\t\t\t\t\t'acq': c_t(Counter(), [], Counter()),\n\t\t\t\t\t\t\t\t'money-fx': c_t(Counter(), [], Counter()),\n\t\t\t\t\t\t\t\t'grain': c_t(Counter(), [], Counter()),\n\t\t\t\t\t\t\t\t'crude': c_t(Counter(), [], Counter())\n\t\t\t\t\t\t\t},\n\t\t\n\t\t\t\t\t\t\t'not-used':{\n\t\t\t\t\t\t\t\t'earn': c_t(Counter(), [], Counter()),\n\t\t\t\t\t\t\t\t'acq': c_t(Counter(), [], Counter()),\n\t\t\t\t\t\t\t\t'money-fx': c_t(Counter(), [], Counter()),\n\t\t\t\t\t\t\t\t'grain': c_t(Counter(), [], Counter()),\n\t\t\t\t\t\t\t\t'crude': c_t(Counter(), [], Counter())\n\t\t\t\t\t\t\t}\n\t\t\t\t\t\t}\n\t\n\t# get article texts as a list\n\tarticle_texts_list = re.findall(pattern=article_pattern, string=text)\n\t\n\t# traverse all article texts\n\tfor article in article_texts_list:\n\t\t\n\t\t# get topic\n\t\ttopic = the_topic_of(article)\n\t\tif topic is None:\n\t\t\tcontinue\n\t\t\n\t\t# get text : title + body\n\t\ttext = text_of(article)\n\t\tif text.isspace():\n\t\t\tcontinue\n\t\t\n\t\t# get id\n\t\tid = id_of(article)\n\t\t\n\t\t# get type\n\t\ttype = type_of(article)\n\t\t\n\t\t# instabtiate Article object\n\t\tarticle = Article(id,topic,text,type)\n\t\t\n\t\t# make tokenization of text internally\n\t\tarticle.tokenize(stopwords=utils.stopwords,re_pattern=ascii_word_pattern)\n\t\t\n\t\t# update data_collection appropriately\n\t\ttemp_counter, temp_lst ,unqs_counter = data_collection[type][topic] # tuples are immutable so we cheat\n\t\ttemp_counter += article.token_counter # += wouldn't work on a tuples index | adding new words to this topic counter of words\n\t\ttemp_lst.append(article) # add the Article object to Articles of this topic\n\t\tunqs_counter.update(set(article.token_counter)) # For each token counting number of articles in this topic having it\n\t\n\treturn data_collection", "title": "" }, { "docid": "b16ec13c5f98fab34632dee128dcab3d", "score": "0.6015309", "text": "def _parse_freetext(self, typ, value):\n evidences = self._extract_evidences(value)\n # There might not be any tags, we provide a token one\n if not evidences:\n evidences.append(Evidence())\n # handling body of annotations\n body = value.split(\" {\")[0]\n statements = re.split(\"\\. \", body)\n from_pubmed = []\n from_sim = []\n from_unclear = []\n for statement in statements:\n if \"PubMed:\" in statement:\n from_pubmed.append(statement)\n elif \"(By similarity)\" in statement:\n from_sim.append(statement)\n elif \"(Probable)\" in statement:\n from_pubmed.append(statement)\n else:\n from_unclear.append(statement)\n for s in from_pubmed:\n text = s.split(\" (PubMed:\")[0]\n for ev in evidences:\n if ev.source in s:\n yield Annotation(\n self.entry.primary_accession, Statement(text, typ), evidence=ev\n )\n for s in from_sim:\n text = s.split(\" (By sim\")[0]\n sim_tags = [tag for tag in evidences if tag.is_sim()]\n if len(sim_tags) == 1:\n yield Annotation(\n self.entry.primary_accession,\n Statement(text, typ),\n evidence=sim_tags[0],\n )\n else:\n yield Annotation(\n self.entry.primary_accession,\n Statement(text, typ),\n evidence=Evidence(code=\"ECO:0000250\"),\n )\n for s in from_unclear:\n text = s.rstrip(\". \")\n if (\n not from_sim and not from_pubmed and len(evidences) == 1\n ): # 1 tag applicable to all\n yield Annotation(\n self.entry.primary_accession,\n Statement(text, typ),\n evidence=evidences[0],\n )\n elif (\n len(evidences) > 1 and len(set([e.code for e in evidences])) == 1\n ): # 1 type of tag applicable to all but unclear source\n this_code = evidences[0].code\n yield Annotation(\n self.entry.primary_accession,\n Statement(text, typ),\n evidence=Evidence(code=this_code),\n )\n elif (\n len(evidences) > 1\n and len(set([e.code for e in evidences])) > 1\n and from_sim\n ):\n code = list(\n set([e.code for e in evidences if not e.code == \"ECO:0000250\"])\n )[\n 0\n ] # a hack\n yield Annotation(\n self.entry.primary_accession,\n Statement(text, typ),\n evidence=Evidence(code=code),\n )\n else:\n yield Annotation(self.entry.primary_accession, Statement(text, typ))", "title": "" }, { "docid": "b4fe0083b64a252b10d3cc2843d73fc9", "score": "0.6009529", "text": "def _parse_text(path):\n split_path = path.split(\".\")\n\n if split_path[-1] == \"gz\":\n lang = split_path[-2]\n with epath.Path(path).open(\"rb\") as f, gzip.GzipFile(fileobj=f) as g:\n return g.read().decode(\"utf-8\").split(\"\\n\"), lang\n\n if split_path[-1] == \"txt\":\n # CWMT\n lang = split_path[-2].split(\"_\")[-1]\n lang = \"zh\" if lang in (\"ch\", \"cn\") else lang\n else:\n lang = split_path[-1]\n sentences = read_sentences(path)\n return sentences, lang", "title": "" }, { "docid": "984b914e611f4e894d0fd997d929a6ba", "score": "0.60093224", "text": "def Parse(self, stat, file_object, knowledge_base):", "title": "" }, { "docid": "8c7443e8097288edab6e961e8aeb9d04", "score": "0.6008104", "text": "def parse_for_text_file(name):\n return \"{} {}\".format(remove_extension(name), parse_name(name))", "title": "" }, { "docid": "ce2f37a9f02dcc4fcba79510df5946ae", "score": "0.59999275", "text": "def parse( self, str ): \n\n # Find the topic\n topic = re.compile( r\"Topic: .*\" ).search( str ).group()\n # Chop out fragments at beginning and end \n self.threadName = topic[7:][:-11] \n\n # Find the author\n # Rather brute-force match; the re.M second argument means \"Multiline\"\n author = re.compile( \\\n r\"<FONT SIZE=\\\"2\\\" .*><B>\\n.*\\n</B></font>\", re.M )\\\n\t .search( str ).group()\n # Chop out fragments at beginning/end\n self.threadAuthor = author[64:][:-12] \n\n self.lastPoster = \"Not implemented yet\"\n self.lastPostTime = \"Not implemented yet\"", "title": "" }, { "docid": "72dc646a48fd9ebdf75076421e11f84e", "score": "0.59991586", "text": "def process_text(self):\n\n \n self.all_text = ''.join(self.text_list)\n self.doc_list = []\n self.doc_psum = [0]\n self.sentences = []\n self.sent_text_idx = []\n\n for i, doc in enumerate(self.corpus.nlp.pipe(self.text_list, batch_size=self.batch_size,\n n_threads=self.n_threads)):\n self.doc_list.append(Doc(doc, i))\n self.doc_psum.append(self.doc_psum[-1] + len(self.text_list[i]))\n for doc, idx in zip(self.doc_list, self.doc_psum):\n for span in doc.sents:\n self.sentences.append(span)\n sent_range = (idx+doc[span.start].idx, idx+doc[span.end-1].idx)\n self.sent_text_idx.append(sent_range)\n for i,sent_span in enumerate(self.sentences):\n for token in sent_span:\n token.sent_idx = i\n token.text_unit_idx = i", "title": "" }, { "docid": "6bb0c938dd413883204ee73f21adf8ab", "score": "0.5998122", "text": "def prepare_text(Text, Mode, Pos, Forms, Stoplist):\n if Mode == \"plain\": \n Prepared = Text.lower()\n Prepared = re.split(\"\\W\", Prepared)\n Prepared = [Token for Token in Prepared if len(Token) > 1] \n if Mode == \"tag\": \n Tagger = treetaggerwrapper.TreeTagger(TAGLANG=\"fr\")\n print(\"---tagging\")\n Tagged = Tagger.tag_text(Text)\n print(\"---done tagging\")\n Prepared = []\n for Line in Tagged:\n Line = re.split(\"\\t\", Line)\n if len(Line) == 3: \n #print(len(Line), Line)\n if Forms == \"lemmas\":\n Prepared.append(Line[2])\n elif Forms == \"words\": \n Prepared.append(Line[0])\n elif Forms == \"pos\": \n Prepared.append(Line[1])\n Prepared = [Token for Token in Prepared if len(Token) > 1] \n if Mode == \"sel\": \n Tagger = treetaggerwrapper.TreeTagger(TAGLANG=\"fr\")\n print(\"---tagging\")\n Tagged = Tagger.tag_text(Text)\n print(\"---done tagging\")\n Prepared = []\n for Line in Tagged:\n Line = re.split(\"\\t\", Line)\n if len(Line) == 3: \n #print(len(Line), Line)\n if Line[1][0:2] in Pos:\n if Forms == \"lemmas\":\n Prepared.append(Line[2])\n elif Forms == \"words\": \n Prepared.append(Line[0])\n elif Forms == \"pos\": \n Prepared.append(Line[1])\n if Mode == \"posbigrams\": \n Tagger = treetaggerwrapper.TreeTagger(TAGLANG=\"fr\")\n print(\"---tagging\")\n Tagged = Tagger.tag_text(Text)\n print(\"---done tagging\")\n Prepared = []\n for i in range(0,len(Tagged)-1): \n Line = re.split(\"\\t\", Tagged[i])\n NextLine = re.split(\"\\t\", Tagged[i+1])\n Prepared.append(Line[1]+\"-\"+NextLine[1])\n if Mode == \"wordbigrams\": \n Text = Text.lower()\n Text = re.split(\"\\W\", Text)\n Text = [Token for Token in Text if len(Token) > 1] \n Prepared = []\n for i in range(0,len(Text)-1): \n Prepared.append(Text[i]+\"-\"+Text[i+1])\n Prepared = [Item.lower() for Item in Prepared if Item not in Stoplist]\n print(Prepared[0:50])\n return Prepared", "title": "" }, { "docid": "a6a1653a4597ac1a87cb66933f8752b2", "score": "0.59948045", "text": "def _parse_input(text):\n lexer = blackbirdLexer(antlr4.InputStream(text))\n stream = antlr4.CommonTokenStream(lexer)\n parser = blackbirdParser(stream)\n\n tree = parser.start()\n\n bb = BlackbirdListener()\n walker = antlr4.ParseTreeWalker()\n walker.walk(bb, tree)\n return bb.program", "title": "" }, { "docid": "a81520c524860991e8df2e825a7bb291", "score": "0.5974746", "text": "def parse_text(text):\n text = text.lower()\n text = re.sub(\"[^a-z]\", \" \", text)\n # d = d.replace(\"\\t\", \" \")\n # d = d.replace(\"\\n\", \"\")\n wordlist = text.split(\" \")\n stopWords = ['a', 'able', 'about', 'across', 'after', 'all', 'almost', 'also',\n 'am', 'among', 'an', 'and', 'any', 'are', 'as', 'at', 'be',\n 'because', 'been', 'but', 'by', 'can', 'cannot', 'could', 'dear',\n 'did', 'do', 'does', 'either', 'else', 'ever', 'every', 'for',\n 'from', 'get', 'got', 'had', 'has', 'have', 'he', 'her', 'hers',\n 'him', 'his', 'how', 'however', 'i', 'if', 'in', 'into', 'is',\n 'it', 'its', 'just', 'least', 'let', 'like', 'likely', 'may',\n 'me', 'might', 'most', 'must', 'my', 'neither', 'no', 'nor',\n 'not', 'of', 'off', 'often', 'on', 'only', 'or', 'other', 'our',\n 'own', 'rather', 'said', 'say', 'says', 'she', 'should', 'since',\n 'so', 'some', 'than', 'that', 'the', 'their', 'them', 'then',\n 'there', 'these', 'they', 'this', 'tis', 'to', 'too', 'twas', 'us',\n 've', 'wants', 'was', 'we', 'were', 'what', 'when', 'where', 'which',\n 'while', 'who', 'whom', 'why', 'will', 'with', 'would', 'yet',\n 'you', 'your', 'http', 'rt', 'https', 'co']\n wordlist = [w for w in wordlist if (len(w) >= 2 and w not in stopWords)]\n text = ' '.join(wordlist)\n text = re.sub(\" +\", \" \", text)\n return text.strip()", "title": "" }, { "docid": "c6b42a290f51e78c1b366e005836899d", "score": "0.59691584", "text": "def analyze_text(text):\n # Your code here\n len_of_string = len(text)\n data = str(text)", "title": "" }, { "docid": "0aef1f78f1242e52cdddacfebbd94e4a", "score": "0.5968248", "text": "def from_text(self, text):\n pass", "title": "" } ]
9f3c663242af5a3bcfda75eb6d31d72f
Get next pos according to input position.
[ { "docid": "b126f20ee600d1229fde2642ed1ebe60", "score": "0.0", "text": "def _parse_pos(self, pos):\n elements = pos.split(':')\n try:\n idx = int(elements[-1])\n except ValueError:\n log.error(\"Invalid index. The index in pos should be digit but get pos:%s\", pos)\n raise DebuggerParamValueError(\"Invalid pos.\")\n\n if idx < 0 or idx >= self.max_limit:\n log.error(\"Invalid index. The index in pos should between [0, %d)\", self.max_limit)\n raise DebuggerParamValueError(f\"Invalid pos. {idx}\")\n flag = elements[0] if len(elements) == 2 else ''\n\n return flag, idx", "title": "" } ]
[ { "docid": "398dfb63dd007099fb0067135088b9b6", "score": "0.76904935", "text": "def getNextPos(self):\n\t\ti=0\n\t\tfor p in self.positions:\n\t\t\tif p[1]>self.pos[1]: return p\n\t\t\tif len(self.positions)<i+2: return self.pos\n\t\t\ti+=1\n\t\traise Exception('getNextPos is not expected to come this far.')", "title": "" }, { "docid": "6d96238331c0cd53873320e5bec1894e", "score": "0.7665374", "text": "def next_position(self, position):\n if len(self) - 1 <= position:\n raise IndexError\n return position + 1", "title": "" }, { "docid": "44fd6a28f0002e9284ba77723f5a52d4", "score": "0.7644706", "text": "def next(self):\r\n if self.posIndex < len(self.positions):\r\n pos = self.positions[self.posIndex]+self.posOffsets[self.posIndex]\r\n self.posIndex = self.posIndex+1\r\n return pos\r\n elif self.repeatfirstpos and self.posIndex == len(self.positions):\r\n pos = self.positions[0]+self.posOffsets[0]\r\n self.posIndex = self.posIndex+1\r\n return pos\r\n else:\r\n self.posIndex = 0\r\n raise StopIteration()", "title": "" }, { "docid": "0bf9e081a999942c3818270de6a05a93", "score": "0.71912307", "text": "def get_position(self, position):\n current = self.head\n try:\n for i in range(1, position):\n current = current.next\n except:\n current = None\n\n return current", "title": "" }, { "docid": "cf9749a827f4c974fc7e1aa808c0223a", "score": "0.71245813", "text": "def get_position(self, position):\n pos = 1\n current = self.head\n while current and pos <= position:\n if pos == position:\n return current\n current = current.next\n pos += 1\n return None", "title": "" }, { "docid": "5342aafe6e17d2219013055d10c812cf", "score": "0.7075163", "text": "def getNext(self, position):\n if self.moveStep > 0:\n destination = self.moveForward(position)\n elif self.moveStep < 0:\n destination = self.moveBackward(position)\n else:\n destination = {'type':'wrong', 'side':side, 'index':index}\n\n return destination", "title": "" }, { "docid": "bd9453862c0f22ddfcb2b40f64d6e703", "score": "0.6917675", "text": "def next(self):\n res = self.nums[self.pos]\n self.pos += 1\n return res", "title": "" }, { "docid": "22961acde6d257a30ee0d0c87e3083a2", "score": "0.6896091", "text": "def get_position(self, position):\n element = self.head\n counter = 1\n \n if position < 1:\n return None\n \n while element and counter <= position:\n if counter == position:\n return element\n element = element.next\n counter += 1\n return None", "title": "" }, { "docid": "317a18a955482e872855b7c0730e45b3", "score": "0.6855648", "text": "def _next_pos(self):\n # circular\n while True:\n # for each position in the content\n for i in range(self.widget.length):\n # yield the position\n yield i", "title": "" }, { "docid": "1774689f59d1c5b030924d161997fc00", "score": "0.67382616", "text": "def get_next_position(self):\n if self._positions is None:\n center, offset = np.zeros((self.n_part_per_shot,3)), np.zeros((1,3))\n if self.n_part_per_shot > 1:\n aggregate = build_bpca(num_pcles=self.n_part_per_shot, radius=self.particle_radius)\n center = aggregate.pos\n if self.jet_radius is not None:\n offset = random_positions_in_beam(1, self.beam.get_focus()[0]/2, self.jet_radius)\n return center + offset\n\n if self._i_positions >= len(self._positions):\n raise StopIteration(\"No more position available.\")\n\n position = self._positions[self._i_positions:\n self._i_positions+self.n_part_per_shot]\n\n self._i_positions += self.n_part_per_shot\n return position", "title": "" }, { "docid": "07467abeb9061bdf1274d145e6915c74", "score": "0.668062", "text": "def _next_step(position, direction):\n # Access the values stored in the pairs\n (row, column), (row_step, column_step) = position, direction\n # Calculate the new position\n new_position = (new_row, new_column) = (row + row_step, column + column_step)\n # Check whether the new position is still on the board.\n if 0 <= new_column < 8 and 0 <= new_row < 8:\n return new_position\n else:\n return None", "title": "" }, { "docid": "dabfa0ae7292bd744529943c3d1659f8", "score": "0.65535116", "text": "def __find_next_pos(self, row, col):\n base_row = self.__get_base_row()\n part_0 = [(base_row, i) for i in range(4, -1, -1)]\n part_1 = [(1, i) for i in range(8)]\n part_2 = [(base_row, i) for i in range(7, 4, -1)]\n layout = part_0 + part_1 + part_2\n curr_position_index = layout.index((row, col))\n next_position_index = curr_position_index + sum(self.__current_dice_roll)\n return None if next_position_index >= len(layout) else layout[next_position_index]", "title": "" }, { "docid": "97019325a33aa9c9bcf8f7cfa891f075", "score": "0.65085435", "text": "def getNext(self):\n return self.findForwards(lambda x: 1)", "title": "" }, { "docid": "20478fc90cbcbf29acfbbe3a7fe202c7", "score": "0.65019333", "text": "def find_next_block(self, pos):\n\n while pos < len(self.chunks):\n\n pos, chunk = self.find_next_chunk(pos, [0x100, 0x102])\n\n if pos == None:\n\n return None\n else:\n if len(chunk[1]) > 1:\n\n # Found a block, return this position\n return pos\n\n # Otherwise continue looking\n pos = pos + 1\n\n return None", "title": "" }, { "docid": "82cda7d83cb65b45a6117405c03cf362", "score": "0.6498824", "text": "def next(self):\n t = self.peek()\n if t:\n self.pos += 1\n return t", "title": "" }, { "docid": "506396b62e8229706beb0984d6e5337b", "score": "0.64889663", "text": "def next_free_pos(self, **kwargs):\n args = dict(self.defaults)\n args.update(**kwargs)\n original_place_point = args['place_point']\n found = False\n while not found:\n page = self.page(args['page'], **args)\n bounds = page.bounds\n #print \"Page %s (%s)\" % (args['page'], page.side)\n for colrect in page.columns:\n rect = colrect\n if rect[1]>bounds[1]: # right side\n rect[1] -= bounds[1]\n rect[3] -= bounds[1]\n if 'place_point' in kwargs:\n size = size_of(rect)\n rect[0] = args['place_point'][1] # move y1\n if rect[1] < args['place_point'][0]:\n rect[1] = args['place_point'][0] # move x1\n rect[3] = rect[1] + size[0] # calculate x2 from x1 and width\n \n #args['rect'] = rect\n #pprint(args)\n point = (rect[1], rect[0])\n #print \"Is %s within %s? %s\" % (point, rect, in_bounds(point, rect))\n obj = self.object_at(point, **args)\n if not obj:\n args['rect'] = rect\n return args\n args['page']+=1\n args['place_point'] = original_place_point", "title": "" }, { "docid": "78e760132717c28f6afc1df273cc574c", "score": "0.64694566", "text": "def next_left(self):\n if self.positions == 1:\n return 0\n elif self.position == 0:\n return self.positions - 1\n else:\n return self.position - 1", "title": "" }, { "docid": "3baa04303db0780aa6e911b578d8a82b", "score": "0.64240175", "text": "def get_next_move(self):\n pass", "title": "" }, { "docid": "02dd47ded5273f133eaeb29e6a3d6ce1", "score": "0.6414938", "text": "def next_right(self):\n if self.positions > self.position + 1:\n return self.position + 1\n else:\n return 0", "title": "" }, { "docid": "fb321e47ee115c746ab2eb34a9377005", "score": "0.6408498", "text": "def get_next_position(self):\n if self.orientation == NORTH:\n return self.x, self.y + 1\n elif self.orientation == SOUTH:\n return self.x, self.y - 1\n elif self.orientation == WEST:\n return self.x - 1, self.y\n elif self.orientation == EAST:\n return self.x + 1, self.y\n else:\n raise ValueError(\"Inconsistent orientation\")", "title": "" }, { "docid": "4a0667dee6681e54d772829358713b1b", "score": "0.6401335", "text": "def next(self):\n self.idx += 1\n\n return self.nums[self.idx - 1]", "title": "" }, { "docid": "7a8321a0410272f000e5a28618d0f624", "score": "0.63919", "text": "def next_position(self):\n dt = self.simulation.time_increment\n x = self.x + self.vx * dt\n y = self.y + self.vy * dt\n\n return x, y", "title": "" }, { "docid": "6e61c07fdfc4ee5b6dc83a1c10d5d25d", "score": "0.6357579", "text": "def perform_get_next_valid_offset(self, addr):\n\t\tif addr < self.perform_get_start():\n\t\t\treturn self.perform_get_start()\n\t\treturn addr", "title": "" }, { "docid": "73e16eba649134813f7d3772447ca524", "score": "0.6347709", "text": "def next_pos(self):\n return ':'.join([self._cur_flag, str(self._next_idx)])", "title": "" }, { "docid": "fdc4a464e6a4c02c63caaa1a1f9fc6d2", "score": "0.6324426", "text": "def getNextPoint(point, direction):\r\n if direction == UP:\r\n return (point[0], point[1] - 1)\r\n elif direction == DOWN:\r\n return (point[0], point[1] + 1)\r\n elif direction == LEFT:\r\n return (point[0] - 1, point[1])\r\n else:\r\n return (point[0] + 1, point[1])", "title": "" }, { "docid": "718d77341e50309c6163d1322fe89b86", "score": "0.62942445", "text": "def perform_get_next_valid_offset(self, addr: int) -> int:\n\t\tif addr < self.perform_get_start():\n\t\t\treturn self.perform_get_start()\n\t\treturn addr", "title": "" }, { "docid": "d242aea7dbe0141e5301c9043f3f8d55", "score": "0.6266027", "text": "def step(self):\n self.pos = (self.pos + 1) % self.num_positions\n return self.pos", "title": "" }, { "docid": "b6fe862fb7d7cfd3a00205c978db1a1e", "score": "0.6230997", "text": "def get_position(pn, sl):", "title": "" }, { "docid": "d94bd49d07617908ccfe4df04b7d8562", "score": "0.621892", "text": "def find_next_slide(pos, elements):\n if pos+1 < len(elements):\n next = elements[pos+1]\n if isSlide(next):\n return next\n elif next.isa(\"Transition\"):\n return None\n else:\n return find_next_slide(pos+1, elements)\n else:\n return None", "title": "" }, { "docid": "a1625dacd47c828a0afeaafa1621877f", "score": "0.6213485", "text": "def get_pos_step(self, pos):\n for step, layer in enumerate(self.layers):\n if pos in layer:\n return step\n return None", "title": "" }, { "docid": "8302ecf312ae0a2100d30f74352c9ae5", "score": "0.6189288", "text": "def next(self):\n val = self._nums[self._i]\n self._i += 1\n return val", "title": "" }, { "docid": "35b620063abc708fe3d843d85ed9131e", "score": "0.61826116", "text": "def next(self):\n if self.num < self.n:\n current, self.num = self.num, self.num + 1\n return current\n else:\n raise StopIteration", "title": "" }, { "docid": "e054e80e7db3229a28b424e43ab66e33", "score": "0.61797506", "text": "def _get_next_non_static_char_pos(self, pos, direction=Direction.LEFT,\n skip=0):\n text = self._entry.get_text()\n validators = self._mask_validators\n i = pos+direction+skip\n while 0 <= i < len(text):\n if isinstance(validators[i], int):\n return i\n i += direction\n\n return None", "title": "" }, { "docid": "8d6a1c94f89af74b33471710888eb03b", "score": "0.61434305", "text": "def next(self):\n\t\tif (self.current is None):\n\t\t\treturn None\n\t\t\n\t\tif (self.current == []):\n\t\t\tself.current = range(1, self.min + 1)\n\t\telse:\n\t\t\tself.current = Pattern_maker.increment(self.current)\n\t\t\n\t\twhile (not Pattern_maker.contains_relative(self.current, self.pat, self.rotate)):\n\t\t\tself.current = Pattern_maker.increment(self.current)\n\t\t\tif (self.current == None):\n\t\t\t\tif (self.min < self.max):\n\t\t\t\t\tself.min += 1\n\t\t\t\t\tself.current = range(1, self.min + 1)\n\t\t\t\telse:\n\t\t\t\t\treturn None\n\t\t\n\t\treturn self.current", "title": "" }, { "docid": "f7a6a5cb606c90fe06a9663081894070", "score": "0.61257267", "text": "def get_next(self):\n ret_current = copy.copy(self._current)\n self._current += self._jump\n\n if self._limit is None:\n return ret_current\n\n if self._jump > 0:\n if ret_current > self._limit:\n raise LimitError(\"Next ID is over the limit.\")\n return ret_current\n\n if self._jump < 0:\n if ret_current < self._limit:\n raise LimitError(\"Next ID is under the limit.\")\n return ret_current\n\n else: # Should not be able to get here, means self._jump = 0.\n raise ValueError(\"'jump' is equal to 0.\")", "title": "" }, { "docid": "4d74441af7af48dc71797f6c93fbaaed", "score": "0.61213833", "text": "def scanner_position(r, ix):\r\n mod_r = (2 * r) - 2\r\n pos = ix % mod_r\r\n #print \"r = %d, ix = %d, pos = %d\" % (r, ix, pos)\r\n if pos >= r:\r\n #print \"bounce back, %d, %d\" % (mod_r, ix)\r\n # ix: 0 1 2 3 4 5 6 7 8 9 A\r\n # r=3: 0 1 2 3|0 1 2 3|0 1 2 - mod_r = 4\r\n # 0 1 2 1|0 1 2 1|0 1 2\r\n # r=5: 0 1 2 3 4 5 6 7|0 1 2 - mod_r = 8\r\n # 0 1 2 3 4 3 2 1|0 1 2\r\n pos = (mod_r - pos)\r\n return pos", "title": "" }, { "docid": "26df48ae13f138d8723fd05092791fb8", "score": "0.61071956", "text": "def next(self):\n num = self.nums[self.top]\n self.top += 1\n return num", "title": "" }, { "docid": "880f0182ee13f8d015942b59f7eb1ad8", "score": "0.61067003", "text": "def get_next_positions(self, position):\n return [self.board[position.x + x][position.y + y] for x in range(-2, 3) for y in range(-2, 3)\n if self.validate_position(position, x, y)]", "title": "" }, { "docid": "8d52b38dc2cbfd6a2b63be826b80cf28", "score": "0.6096722", "text": "def __next__(self):\n if self._index < self._track.num:\n spot = self._track.at(self._index)\n self._index += 1\n return spot\n raise StopIteration", "title": "" }, { "docid": "9cd02919e77a35fd101d687ec65edef9", "score": "0.60748214", "text": "def __next_direction(xy):\n p, q = tuple(xy)\n nbh = [[-q, p], [p, q], [q, -p]]\n return nbh[npr.randint(3)]", "title": "" }, { "docid": "5b860e0345560c7152768416f0bfe54d", "score": "0.6070597", "text": "def get_next(self, state, action):\n cur_x, cur_y = state\n dx, dy = VideoGameActions.get_vector(action)\n return (cur_x+dx, cur_y+dy)", "title": "" }, { "docid": "3756009009ead454ff2fb9192ac4c71b", "score": "0.6065201", "text": "def _get_next_position(context):\n event_id = context.current_parameters['event_id']\n res = (db.session.query(db.func.max(AbstractEmailTemplate.position))\n .filter(AbstractEmailTemplate.event_id == event_id)\n .one())\n return (res[0] or 0) + 1", "title": "" }, { "docid": "5a444f9df40124c30a952749cd00bc87", "score": "0.6044375", "text": "def get_loop_offset(pos, size):\n\n return [pos[0] % size[0], pos[1] % size[1]]", "title": "" }, { "docid": "93b2836edc4afe54e1ad10436919c535", "score": "0.6040025", "text": "def trouveNext(self,tag):\n pos=self.position\n position=None\n for p in self.bookmark[tag] :\n if p>pos :\n position=p\n break\n return position", "title": "" }, { "docid": "b81ec625d6b7f52dd01df7ed43e849a3", "score": "0.6020705", "text": "def next(self) -> int:\n res = self.sorted[self.min_index]\n self.min_index += 1\n return res", "title": "" }, { "docid": "0f01e4a925cd422ed2c9c09ffb04cb34", "score": "0.6017851", "text": "def __next__(self):\n # Si on est arrive au bout de la liste de paires\n if self.index2 == len(self.nums):\n raise StopIteration\n\n # Cree une paire\n\n p = (self.nums[self.index1], self.nums[self.index2])\n # Avance la position pour la prochaine fois\n\n if self.index2 < len(self.nums) - 1:\n self.index2 += 1\n else:\n self.index1 += 1\n self.index2 = self.index1 + 1\n return p", "title": "" }, { "docid": "c62d3b5e8bbbab31ec6a03bb3a0658db", "score": "0.599853", "text": "def getPosition(self, n):\n return self._get_at(n).obj", "title": "" }, { "docid": "a2cd3d4bceaf862e374dac798c1c4cac", "score": "0.5997963", "text": "def next(self, node):\n return self.shift(node, +1)", "title": "" }, { "docid": "dcd84f5fdd735366d1a67fd755ba12ce", "score": "0.5963988", "text": "def __next__(self):\n current = self.value\n if self.iterator >= self.count and self.count != 0:\n self.value = self.start_value\n self.iterator = 1\n else:\n self.value = self.value + self.increment\n if self.value > 65535:\n diff = self.value - 65535\n self.value = diff - 1\n if self.value < 0:\n diff = self.value\n self.value = 65536 + diff\n self.iterator += 1\n return current", "title": "" }, { "docid": "1256c5079a5a1311cc49b7bc51cf7dfa", "score": "0.5960003", "text": "def find_next_chunk(self, pos, IDs):\n\n while pos < len(self.chunks):\n\n if self.chunks[pos][0] in IDs:\n\n # Found a chunk with ID in the list\n return pos, self.chunks[pos]\n\n # Otherwise continue looking\n pos = pos + 1\n\n return None, None", "title": "" }, { "docid": "5582b3d44ae71307f823c5402835f6a6", "score": "0.59590805", "text": "def find_next_transition(pos, elements):\n if pos+1 < len(elements):\n next = elements[pos+1]\n if next.isa(\"Transition\"):\n return next\n elif isSlide(next):\n return None\n else:\n return find_next_transition(pos+1, elements)\n else:\n return None", "title": "" }, { "docid": "1b71e25f2744533f3ca87220ef4dd0fd", "score": "0.595189", "text": "def next_move(self, board, max_level=4):\n value, position = self.minimax(board, max_level=max_level)\n return position", "title": "" }, { "docid": "7dac5be9f52380a091ebce752d8c9272", "score": "0.5946751", "text": "def getIndexOfPosition(position):\n # This is not a good approach \n # but it gets the job done\n x = 0\n y = 0\n for i in Movement.A_copy:\n if i == position:\n return (x,y)\n y += 1\n if y > 7:\n y = 0\n x+=1", "title": "" }, { "docid": "0f03866511dad44e4aff44e526ba88ac", "score": "0.59178275", "text": "def getNext(self, carPos):\n if carPos.lane != self:\n print \"car is on other lane\"\n return None\n\n nextLanePos = None\n shortestDist = sys.maxint\n for cp in self.carsPosition.itervalues():\n if cp.isGoalFlag:\n nextLanePos.append(cp)\n continue\n if cp.position is None:\n print \"the car has no position\"\n continue\n if cp.car.id == carPos.car.id:\n continue\n distance = cp.position - carPos.position\n if not cp.free and (0 < distance < shortestDist): # only pick the cars in front of current car\n shortestDist = distance\n nextLanePos = cp\n return nextLanePos", "title": "" }, { "docid": "0bb0427e87a89ae8af26dc6b3cc27545", "score": "0.5917788", "text": "def prev_position(self, position):\n if position <= 0:\n raise IndexError\n return position - 1", "title": "" }, { "docid": "302abd6425249e75220705df134b87da", "score": "0.5905974", "text": "def __next__(self):\n current = self.value\n if self.iterator >= self.count and self.count != 0:\n self.value = self.start_value\n self.iterator = 1\n else:\n self.value = self.value + self.increment\n if self.value > 65535:\n diff = self.value - 65535\n self.value = 1 + diff\n if self.value < 1:\n diff = self.value - 1\n self.value = 65535 + diff\n self.iterator += 1\n return current", "title": "" }, { "docid": "fa96b0b8c69084c58aaa1309a498886a", "score": "0.5905653", "text": "def next(self) -> int:\n result = self.tree[self.current]\n self.current += 1\n return result", "title": "" }, { "docid": "d42b4e866307dd0d62e502e717e2b238", "score": "0.590088", "text": "def get_position(self, current_step, step_position, distance_traveled):\n if current_step > self.get_step_count():\n return (current_step, step_position + 100)\n elif current_step < 1:\n return (0, 0)\n else:\n step_length = self.get_step_length(current_step)\n remaining = step_length - step_position\n if distance_traveled < remaining:\n step_position += distance_traveled\n return (current_step, step_position)\n else:\n distance_traveled -= remaining\n current_step += 1\n step_length = self.get_step_length(current_step)\n step_position = 0\n while step_position + distance_traveled > step_length and current_step <= self.get_step_count():\n current_step += 1\n distance_traveled -= step_length\n step_position = distance_traveled\n return (current_step, step_position)", "title": "" }, { "docid": "c73668e934554c96d555049864905a00", "score": "0.58825445", "text": "def GetNextPlayer(self, p):\n next = p + 1\n if next == self.numberOfPlayers:\n next = 0\n return next", "title": "" }, { "docid": "9f3905f1866588cf13274802263000ff", "score": "0.5875449", "text": "def infer_next_position(raw: str, line_no: int, line_pos: int) -> Tuple[int, int]:\n # No content?\n if not raw:\n return line_no, line_pos\n split = raw.split(\"\\n\")\n return (\n line_no + len(split) - 1,\n line_pos + len(raw) if len(split) == 1 else len(split[-1]) + 1,\n )", "title": "" }, { "docid": "83ceeba8148c6f0c80b414b51b759f9f", "score": "0.5872775", "text": "def __next__(self):\n if self.has_more():\n self._current_position += 1\n return self._cache[self._current_position]", "title": "" }, { "docid": "53f99b3ffb6fdbc51e9979196f3a58a4", "score": "0.5871915", "text": "def __next__(self) -> Point:\n\n self.cur_point = self.cur_point.shift(next(self.shifts_iterator))\n return self.cur_point", "title": "" }, { "docid": "607d4541f0dd46c9ebe7a75e49de10aa", "score": "0.5870603", "text": "def current_position(self) -> Tuple[int, int]:\n return self.focus + 1, len(self.lines)", "title": "" }, { "docid": "372e4aecb7b1d4ba9ac56c9728420000", "score": "0.58410764", "text": "def fakeGetNext(self, position, step):\n if step > 0:\n destination = self.moveForward(position)\n elif step < 0:\n destination = self.moveBackward(position)\n else:\n destination = {'type':'wrong', 'side':side, 'index':index}\n\n return destination", "title": "" }, { "docid": "a58cb7b104e5c304b1b8fad1d4d49370", "score": "0.58388627", "text": "def next(self):\n self.currentIndex += 1\n if self.currentIndex < len(values):\n result = 0\n else:\n result = 1\n self.currentIndex = 0\n return result", "title": "" }, { "docid": "bd34b58d803be6c68595674061d89b58", "score": "0.58167374", "text": "def next(cls, curr):\n\n return curr + cls.step", "title": "" }, { "docid": "82a163ba9080e66126fd3dd7c0c3b949", "score": "0.58106625", "text": "def player_next_to(self,player):\n return (player+1)%self.no_of_players", "title": "" }, { "docid": "ebb8639ccc81e2f25bbb0b09b7aa3c16", "score": "0.5808661", "text": "def next_move(self, board):\n #do something with board\n #return the [row, col] you want to move in.\n return [0, 0]", "title": "" }, { "docid": "373109fe2564e2a1cac69f0535c67ed7", "score": "0.5798598", "text": "def _loc(self, pos, idx):", "title": "" }, { "docid": "cd5b8159898327fef75792b5675c2eb5", "score": "0.57933944", "text": "def _get_next_loc(self, loc, direction, action):\n # Update Direction\n if action == 0:\n next_direction = (direction - 1)%4\n elif action == 2:\n next_direction = (direction + 1)%4\n else:\n next_direction = direction \n # Update Last Location\n if next_direction == 0:\n next_loc = (loc[0]-1, loc[1])\n elif next_direction == 1:\n next_loc = (loc[0], loc[1]+1)\n elif next_direction == 2:\n next_loc = (loc[0]+1, loc[1])\n else:\n next_loc = (loc[0], loc[1]-1)\n \n return next_loc, next_direction", "title": "" }, { "docid": "4372d27b6b7bb3247989f4b971919c79", "score": "0.5775859", "text": "def getNextElement(d, idx):\n\tt = np.where(d[:,2]>0)[0]\n\tt = t[t > idx]\n\tif len(t):\n\t\treturn d[t[0],0], d[t[0],1], t[0]\n\treturn None, None, None", "title": "" }, { "docid": "4b68be1f67e0b129e11680ced3eb2279", "score": "0.576655", "text": "def get_next_token(self) -> Token:\n token = self.peek()\n self.position += 1\n return token", "title": "" }, { "docid": "5a4cafc1b7c03d144464dfc12ddce80c", "score": "0.57665104", "text": "def next(self):\n try:\n value = self.frames[self.current_item]\n except IndexError:\n raise StopIteration\n self.current_item += 1\n return value[0:-1]", "title": "" }, { "docid": "44433158a093f163d911e692c67945b8", "score": "0.57633215", "text": "def next_waypoint(self, position, theta): # theta is yaw\n #rospy.logdebug('WaypointUpdater::next_waypoint')\n # find closest waypoint first\n self.index = self.closest_waypoint(position)\n map_coords = get_position(self.base_waypoints[self.index].pose)\n # get coordination of closest waypoints\n map_x = map_coords[0]\n map_y = map_coords[1]\n # check that closest waypoint is really ahead of me\n heading = math.atan2(map_y - position[1], map_x - position[0])\n angle = math.fabs(theta - heading)\n # if not, the next surely will be\n if math.pi * 1.5 > angle > math.pi / 2:\n self.index += 1\n # if waypoint index exceeds length of list, wrap around to the start of list\n self.index = self.norm_index(self.index)\n return self.index", "title": "" }, { "docid": "ae624a8913c19a087cb60629acf668d5", "score": "0.57577455", "text": "def _read_position(self, input, index):\n if (len(self.position) != 0): self.position = [] #resets position to an empty list\n for i in range(3):\n self.position.append(float(input[index]))\n index += 1\n return index", "title": "" }, { "docid": "521c911b0abf45d6766bf17cb250ae66", "score": "0.5755216", "text": "def eval_next_loc(self, p, i, j):\n # If parition number is even, traverse column\n if p % 2 == 0:\n # If modulus 0 or 1, traverse column downward\n if (p % 4 < 2):\n i += 1\n # If modulus 2 or 3, traverse column upward\n if (p % 4 > 1):\n i -= 1\n # If odd, traverse row\n else:\n # If modulus 0 or 1, traverse row backward\n if (p % 4 < 2):\n j -= 1\n # If modulus 2 or 3, traverse row forward\n if (p % 4 > 1):\n j += 1\n return i, j", "title": "" }, { "docid": "68949198a31e11d45ae26cb766bdd287", "score": "0.5751591", "text": "def position(self, node):\n node = int(node)\n return self.node_to_position[node]", "title": "" }, { "docid": "9a3d6642645ef62e30c00b707a4fb76c", "score": "0.574727", "text": "def predict_position(self):\r\n #print(\"Current position: %d, Velocity: %d -> Next position: %d\" % (self._pos_cur, self._velocity, self._pos_cur + self._velocity))\r\n return self._pos_cur + self._velocity", "title": "" }, { "docid": "fab726343dc0782d9af18b6e7bea1d64", "score": "0.5745759", "text": "def get_point(self):\n if self._current_segment is -1:\n return None\n l = len(self._waypoints[self._current_segment])\n if(self._current_idx >= l):\n self.next_segment()\n\n if self._current_segment is -1:\n return None\n\n if len(self._waypoints[self._current_segment]) < 1:\n self.next_segment()\n\n if self._current_segment is -1:\n return None\n\n desired_point = (self._waypoints[self._current_segment])[\n self._current_idx]\n self._current_idx += 1\n\n return desired_point", "title": "" }, { "docid": "39a2ff8fb8db2e8047fa099904218f87", "score": "0.5738875", "text": "def advance(self):\n if self.pos < len(self.tokens):\n value = self.tokens[self.pos][1]\n else:\n value = EOF\n self.pos += 1\n if self.pos < len(self.tokens):\n self.t = self.tokens[self.pos][0]\n else:\n self.t = EOF\n return value", "title": "" }, { "docid": "5233a45ba04b4ed58646b2925571289f", "score": "0.57301384", "text": "def find_next_position(self, player, scene_grid):\n\t\tif not (self.attack_position and Util.vector3_equal(self.attack_position, player.position)):\n\t\t\tself.attack_position = [x for x in player.position]\n\t\t\tnew_path = PathFinding.run(self.position, self.attack_position, scene_grid)\n\t\t\tif new_path: # assign when we find a path\n\t\t\t\tself.path = new_path\n\t\t\telse:\n\t\t\t\t# should log here\n\t\t\t\tprint \"Find path error\"\n\t\t# update position\n\t\tself.update_position_from_path()", "title": "" }, { "docid": "f835ada0a1309d2ec329051981d09168", "score": "0.57191986", "text": "def peek(self, pos):\n [index_i, index_j] = [pos[1] // ROW_HEIGHT, pos[0] // COLUMN_WIDTH]\n peek_idx = index_i + index_j * GRID_SIZE[0]\n if peek_idx < len(self.game.get_strings()):\n self.game.peek(peek_idx)", "title": "" }, { "docid": "b6f6476a92e29a0c3f50ff3cbf0a1655", "score": "0.5719126", "text": "def getRelativePosition(selector):\n\n if selector[\"__name\"] in [\"end\", \"beginning\", \"token\"]:\n return 0\n\n if selector[\"__name\"] == \"previous\":\n return int(-selector[\"__position\"])\n else:\n return int(selector[\"__position\"])", "title": "" }, { "docid": "97f7f74cea80e1367351dbbf557defec", "score": "0.5717137", "text": "def next_item(self) -> (int, int, int):\n from_start, from_prev, id_ = super().next_item()\n while from_start > self.__next_shuffle:\n self.__next_shuffle += self.__shuffle_window\n np.random.shuffle(self.__shuffle_map)\n\n index = id_ - self._id_shift - 1\n return from_start, from_prev, self.__shuffle_map[index]", "title": "" }, { "docid": "b858718b85ad40bee0664a6b6890aea1", "score": "0.5711791", "text": "def __next__(self):\n current = self.value\n if self.value == self.end_value:\n self.value = self.start_value\n else:\n self.value += self.increment\n return current", "title": "" }, { "docid": "061c808c7b9192fc9bbc4993c9dc6906", "score": "0.57088757", "text": "def position(self):\n if self.p:\n if self._finished:\n return None\n try:\n p = int(self._nano_pos/1000000000.0 + 0.5)\n t = time()\n if p == self.last_position:\n if self.last_position_ts + 2 < t:\n if not self.paused:\n return None\n else:\n self.last_position = p\n self.last_position_ts = t\n return p\n except:\n return None", "title": "" }, { "docid": "0588fb4db45c0a055d0d1348dbd8662b", "score": "0.5699857", "text": "def __next__(self):\n self._k += 1 #advance to next index\n if self._k < len(self._seq):\n return (self._seq[self._k]) #return the data element\n else:\n raise StopIteration() #there are no more elements", "title": "" }, { "docid": "c676a4a9d8ab4effaf7e93041e9b22b5", "score": "0.56930643", "text": "def get_pos(self, key=None):\n self.set_pos()\n if key:\n return self._pos[key]\n return self._pos", "title": "" }, { "docid": "c3a5e968505093406ad8bf2d1a9aa171", "score": "0.5681042", "text": "def inputnextmove(self):\n possibles = self._possiblemoves()\n next = None\n if len(possibles):\n while not next in possibles: # ensure valid moves are taken by the human\n print (\"Current board position is: \")\n print (self)\n if self.mustslide():\n movetext = input(\"Slide a piece using > char - enter from > to: eg. 0,2 > 1,1: \")\n movecoords = re.search(\"(\\d+).*,.*(\\d+).*>.*(\\d+).*,.*(\\d+)\", movetext)\n if movecoords:\n frmove = [int(x) for x in movecoords.groups()[:2]]\n tomove = [int(x) for x in movecoords.groups()[2:4]]\n next = self.setslide(frmove, tomove)\n else:\n movetext = input(\"Your move - enter location coordinate pair: e.g. 0,2: \")\n coords = re.search(\"(\\d+).*,.*(\\d+)\", movetext)\n if coords:\n move = [int(x) for x in coords.groups()]\n next = self.setmove(move)\n return next\n else:\n return None # no more moves possible", "title": "" }, { "docid": "97c9ce2310d3cf89ffb3dd241fb77f9b", "score": "0.5679372", "text": "def get_next_move(self):\n return None if self.plan.empty() else self.plan.get()", "title": "" }, { "docid": "dd4bcbffa3182159999f9e4511ed2e3b", "score": "0.56782824", "text": "def next_token(self):\r\n try:\r\n token = self.tokens[self.index]\r\n self.index += 1\r\n return token\r\n except IndexError:\r\n return None", "title": "" }, { "docid": "a6c4051460d67cc60cc4555d3382eb25", "score": "0.56751686", "text": "def __next__(self):\n current = self.value\n if self.iterator >= self.count and self.count != 0:\n self.value = self.start_value\n self.iterator = 1\n else:\n self.value = self.value + self.increment\n if self.value > 1048575:\n diff = self.value - 1048575\n self.value = diff - 1\n if self.value < 0:\n diff = self.value\n self.value = 1048576 + diff\n self.iterator += 1\n return current", "title": "" }, { "docid": "c30aa7e13543f1b72b57188f9511ea16", "score": "0.5674438", "text": "def position(x: int, y: int, n: int) -> Tuple[int, int]:\n\n pos = [\n (y, x), # 0\n (y + 1, x + 1), # 1\n (y, x + 1), # 2\n (y - 1, x + 1), # 3\n (y - 1, x), # 4\n (y - 1, x - 1), # 5\n (y, x - 1), # 6\n (y + 1, x - 1), # 7\n (y + 1, x), # 8\n ]\n pos_n = pos[n]\n return pos_n", "title": "" }, { "docid": "d445cb808042b78774e204876f9abbd6", "score": "0.56564355", "text": "def get_position(self):", "title": "" }, { "docid": "fe24ae87da7781957b261c884a1d68f9", "score": "0.56455976", "text": "def find_next_np(self, tc):\n while True:\n tc += 1\n try:\n token = self.tokens[tc]\n except IndexError: # EAFP\n return (None, tc)\n if token.pos == Pos.NP:\n return (token, tc)", "title": "" }, { "docid": "6d3de5bb3643711c343f295d539295b2", "score": "0.5641161", "text": "def next(self):\n if self.index < len(self.layout.sequence) - 1:\n return self.layout.rows[self.layout.sequence[self.index + 1]]\n else:\n return None", "title": "" }, { "docid": "03205c55f4c227c2b1c2ea105376687c", "score": "0.5628272", "text": "def next_item(self) -> Any:\n self.current_item = self._cycle_dict[self.current_item]\n return self.current_item", "title": "" }, { "docid": "90fc62dd9c30490beff21ff823c970f2", "score": "0.56267387", "text": "def get_position(self, position):\n return self.highscores[position - 1]", "title": "" }, { "docid": "047bf4cc75b01b014286c78c99dc28ce", "score": "0.5625568", "text": "def advance_start_pos(self, start_pos, advance, t):\n # Current position\n this_pos = np.full((1, self.dh), t)\n # New start position assuming we go to the next state\n new_start_pos = np.concatenate([this_pos, start_pos[:-1]])\n return np.where(advance == 1, new_start_pos, start_pos)", "title": "" }, { "docid": "40b9fee876ccd8c4aadce979c41c11a5", "score": "0.5624949", "text": "def get_next_valid_offset(self, addr: int) -> int:\n\t\treturn core.BNGetNextValidOffset(self.handle, addr)", "title": "" }, { "docid": "8e3bda6526f94ca87448139c0b8afd4e", "score": "0.5612048", "text": "def next_move(self):\n raise NotImplementedError('Implement next_move in GameState subclass')", "title": "" } ]
64d5b66935cc2b6d65ce1d359380ed1d
use lists to check if two strings are anagrams
[ { "docid": "5be43e10ece0a00d0d8d210511210b03", "score": "0.74138683", "text": "def anagram_list(s1, s2):\n # largely copied from exercise\n c1 = [0] * 26\n c2 = [0] * 26\n\n for i in range(len(s1)):\n pos = ord(s1[i]) - ord('a') # gets index\n c1[pos] = c1[pos] + 1\n\n # just in case diff lengths..\n for i in range(len(s2)):\n pos = ord(s2[i]) - ord('a')\n c2[pos] = c2[pos] + 1\n\n for i, ch in enumerate(c1): # not as clear but works\n if c2[i] != ch: # two arrays with same char set\n return False\n\n return True", "title": "" } ]
[ { "docid": "6cf901e881211c3bcf9daf5f308acaef", "score": "0.8629491", "text": "def anagrams_lst(str1: str, str2: str) -> bool:\n return sorted(list(str1)) == sorted(list(str2))", "title": "" }, { "docid": "eed012ad0a439017fbd2fde094cdb8f1", "score": "0.8194693", "text": "def anagrams(string1, string2):\r\n\r\n hash1 = HashTable()\r\n hash2 = HashTable()\r\n string1 = string1.replace(\" \", \"\")\r\n string2 = string2.replace(\" \", \"\")\r\n string1 = string1.lower()\r\n string2 = string2.lower()\r\n for char in string1:\r\n searchnode = hash1.search(char)\r\n if searchnode is not None and searchnode.key == char:\r\n hash1.insert(char, searchnode.value + 1)\r\n else:\r\n hash1.insert(char, 1)\r\n\r\n for char2 in string2:\r\n searchnode2 = hash2.search(char2)\r\n if searchnode2 is not None:\r\n hash2.insert(char2, searchnode2.value + 1)\r\n else:\r\n hash2.insert(char2, 1)\r\n\r\n for character in string1:\r\n if hash2.search(character) is None:\r\n return False\r\n if hash1.search(character).value != hash2.search(character).value:\r\n return False\r\n return True", "title": "" }, { "docid": "aca78ebb9b589c8336a834b5ff8bf8f1", "score": "0.81507266", "text": "def is_anagram(stringone, stringtwo):#This function will takes two strings and returns True if they are anagrams.\n one=list(stringone.lower())\n two=list(stringtwo.lower())\n if' 'in two :\n two.remove(' ')\n one.sort()\n two.sort()\n if one != two:\n return False\n else:\n return True", "title": "" }, { "docid": "548c45eb4e903c5fc000bd4540e2fcfb", "score": "0.80204785", "text": "def is_anagram(str1,str2):\n l1 = []\n l2 = []\n s1 = str1.lower()\n s2 = str2.lower()\n for c in s1:\n if c != ' ':\n l1.append(c)\n for c in s2:\n if c != ' ':\n l2.append(c)\n l1.sort()\n l2.sort()\n return l1 == l2", "title": "" }, { "docid": "0d00f07aae888e68102d77dc764ed5e2", "score": "0.80107343", "text": "def is_anagram(text1,text2):\n t1 = list(text1)\n t2 = list(text2)\n t1.sort()\n t2.sort()\n if ''.join(t1) == ''.join(t2):\n return True\n else:\n return False", "title": "" }, { "docid": "7b11008814136170ba5e3745a112ab5b", "score": "0.7977517", "text": "def are_anagrams(word1, word2):\n if word1 == word2:\n return False\n elif word1 != word2:\n wordone = list(word1)\n wordtwo = list(word2)\n wordone.sort()\n wordtwo.sort()\n if wordone == wordtwo:\n return True\n else:\n return False", "title": "" }, { "docid": "31ec797c57964ef50f7c7e96259958f1", "score": "0.7926698", "text": "def anagram(s1, s2):\n l1 = []\n l2 = []\n for s1_letter in s1:\n if s1_letter != \" \":\n l1.append(s1_letter)\n for s2_letter in s2:\n if s2_letter != \" \":\n l2.append(s2_letter)\n l1.sort()\n l2.sort()\n return l1 == l2", "title": "" }, { "docid": "7f51501a97ede0ad700692c7bb646d48", "score": "0.7889058", "text": "def is_anagram(word1, word2):\n \n word1_list = [i for i in word1.lower() if i != \" \"]\n word2_list = [j for j in word2.lower() if j != \" \"]\n \n word1_list.sort()\n word2_list.sort()\n \n return word1_list == word2_list\n pass", "title": "" }, { "docid": "ba90066ce2335a55691aebcc055cb28e", "score": "0.7801172", "text": "def anagrams_dd(str1: str, str2: str) -> bool:\n anag_dd: DefaultDict = defaultdict(int)\n for c in str1:\n anag_dd[c] += 1\n for c in str2:\n anag_dd[c] -= 1\n return not any(anag_dd.values())", "title": "" }, { "docid": "2f54e8cd098034f8a87521552ef79b17", "score": "0.77820987", "text": "def test_4_two_string_are_anagrams(word_1, word_2):\n error = list()\n if len(word_1) != len(word_2):\n error.append(\"Length of {0}-{1} != {2}-{3} length\".format(word_1, len(word_1), len(word_2), word_2))\n else:\n alphabets = list(word_1)\n for i in range(0, len(word_1)):\n if str(alphabets[i]).lower() in word_2.lower():\n pass\n else:\n error.append(\"The alphabet '{0}' is not in {1}\\n\".format(alphabets[i], word_2))\n\n if not error:\n print(\"Problems with the two words are: \\n{}\".format(error))\n assert error == []", "title": "" }, { "docid": "8023cdeea2dc3809c70efe23077c1d1a", "score": "0.775162", "text": "def is_anagram(s1,s2):\n for s in s1:\n if(s1.lower().count(s)!=s2.lower().count(s)):\n return False\n else:\n return True", "title": "" }, { "docid": "923abf0dab295677124b68c3e02fc104", "score": "0.77464455", "text": "def anagrams_cntr(str1: str, str2: str) -> bool:\n return Counter(str1) == Counter(str2)", "title": "" }, { "docid": "bbf99cb430141145ac83e7c91d767de2", "score": "0.76937556", "text": "def anagrams_with_sort(self, string1: str, string2: str) -> bool:\n if(len(string1) == len(string2)): \n # Sort both words\n string1 = sorted(string1)\n string2 = sorted(string2)\n \n return(string1 == string2)\n else:\n return(False)", "title": "" }, { "docid": "6222a1a774e06fbb9be8c9d8874aa723", "score": "0.76389", "text": "def valid_anagrams(self, string1: str, string2: str) -> bool:\n string1 = string1.replace(' ', '')\n string2 = string2.replace(' ', '')\n # if the lengths of s and t differ, they are not anagrams\n if(len(string1) != len(string2)):\n return(False)\n else:\n lettersDict: Dict[str, str] = {}\n\n # count how many times a letter occurs in s\n for letter in string1:\n if(letter in lettersDict):\n lettersDict[letter] += 1\n else:\n lettersDict[letter] = 1\n\n # see if all the letters in t match in s\n for t_letter in string2:\n if(t_letter in lettersDict):\n lettersDict[t_letter] -= 1\n if(lettersDict[t_letter] == 0):\n popped = lettersDict.pop(t_letter)\n else:\n lettersDict[t_letter] = 1\n\n return(lettersDict == {})", "title": "" }, { "docid": "b25edf1a56f311c8a9acd88b631380bf", "score": "0.75928456", "text": "def is_anagram(str1, str2):\n # anagrams if they have the same number of characters\n # initial check for string length\n if len(str1) != len(str2):\n return False\n\n map1 = {}\n map2 = {}\n\n for ch in str1:\n if ch in map1:\n map1[ch] += 1\n else:\n map1[ch] = 0\n\n for ch in str2:\n if ch in map2:\n map2[ch] += 1\n else:\n map2[ch] = 0\n\n if map1 != map2:\n return False\n else:\n return True", "title": "" }, { "docid": "5551b50882ecce8e6a330a50fe92c22f", "score": "0.75842124", "text": "def is_anagram(word1, word2):\n word1 = word1.replace(\" \", \"\").lower()\n word2 = word2.replace(\" \", \"\").lower()\n if sorted(word1) == sorted(word2):\n return True\n else:\n return False", "title": "" }, { "docid": "44ec26bbc632877cc3c352e7d0ae0700", "score": "0.757299", "text": "def are_anagrams(word, original):\n return sorted(word) == sorted(original)", "title": "" }, { "docid": "dc7269529464e07e2c7005e72dcae8bb", "score": "0.75402504", "text": "def isAnagram(self, s1, s2):\n s1 = \"\".join([c for c in sorted(s1)])\n s2 = \"\".join([c for c in sorted(s2)])\n return s1 == s2", "title": "" }, { "docid": "7d44d03608a04626ad492189e02e8e38", "score": "0.7483904", "text": "def anagram_checker(str1, str2):\n\n # Clean strings and convert to lower case\n str1 = str1.replace(\" \", \"\").lower()\n str2 = str2.replace(\" \", \"\").lower()\n\n # Compare the length of both strings\n if len(str1) == len(str2):\n # Sort each string and compare\n if sorted(str1) == sorted(str2):\n return True\n\n return False", "title": "" }, { "docid": "01387765f53c1b0603b563ee47dbf03b", "score": "0.7478102", "text": "def anagram(string1, string2):\n dict_1 = {}\n dict_2 = {}\n for i in string1:\n if i not in dict_1:\n dict_1[i] = 1\n else:\n dict_1[i] += 1\n for j in string2:\n if j not in dict_2:\n dict_2[j] = 1\n else:\n dict_2[j] += 1\n if dict_1 != dict_2:\n return False\n else:\n return True", "title": "" }, { "docid": "013f2cc7c1496a64cb17f14e67648b70", "score": "0.74741286", "text": "def words_with_anagrams(list1,list2):\n\n # Sort in anagram order\n for i in range(len(list1)):\n list1[i] = anagram_counting_sort(list1[i],True)\n \n for j in range(len(list2)):\n list2[j] = anagram_counting_sort(list2[j],False)\n\n # Run radix sort\n list1 = optimized_radix_sort_task2(list1)\n list2 = optimized_radix_sort_task2(list2)\n\n # Remove duplicates in list2\n list2 = remove_duplicates(list2)\n\n pointer_right = 0\n pointer_left = 0\n res = []\n\n # Compares elements of list1 with elements of list2\n while pointer_left < len(list1) and pointer_right < len(list2):\n left_item = list1[pointer_left]\n right_item = list2[pointer_right]\n \n # Perform length comparison\n if len(right_item[0]) > len(left_item[0]):\n pointer_left += 1\n elif len(left_item[0]) > len(right_item[0]):\n pointer_right += 1\n else: \n if left_item[0] == right_item[0]:\n res.append(left_item[1])\n pointer_left += 1\n else: \n # Perform character comparison\n for i in range(len(left_item[0])): \n if left_item[0][i] > right_item[0][i]:\n pointer_right += 1\n break\n elif left_item[0][i] < right_item[0][i]:\n pointer_left += 1\n break\n \n return res", "title": "" }, { "docid": "35222fab02bec994b8e2893148c57dac", "score": "0.7404496", "text": "def anagram(main_str, str_list):\n return [_str for _str in str_list if str_list and Counter(_str) == Counter(main_str)]", "title": "" }, { "docid": "957b7d9c252f88be6d0621bfbb0087d4", "score": "0.7359042", "text": "def is_anagram(w1, w2):\n\n if len(w1) != len(w2):\n return False\n else:\n w1_dict = create_word_dict(w1)\n w2_dict = create_word_dict(w2)\n return sorted(w1_dict.keys()) == sorted(w2_dict.keys()) and sorted(w1_dict.values()) == sorted(w2_dict.values())", "title": "" }, { "docid": "16c7af0c9cb186c8892e5fe068ca8dbf", "score": "0.7251008", "text": "def anagram(first, second):\n return Counter(first) == Counter(second)", "title": "" }, { "docid": "38dfc13205d3b39e00e1904a5f34e538", "score": "0.72006154", "text": "def is_it_an_anangram(input1, input2):\n sorted_word1 = sorted(input1)\n sorted_word2 = sorted(input2)\n\n if sorted_word1 == sorted_word2:\n print(\"Yipee, these words are anagrams!\")\n else:\n print(\"Hate to break it to ya, no anagram here\")\n # return (do I need return here?)", "title": "" }, { "docid": "cdc88af708fb6cf7320968d84fb987ca", "score": "0.71521044", "text": "def group_anagrams_ver1(strs: list) -> list:\n anagrams = []\n for i, word in enumerate(strs):\n if i == 0:\n anagrams.append([word])\n else:\n exist = False\n # Compare a word and the first element in anagram.\n for anagram in anagrams:\n chars = [char for char in word if char in anagram[0]]\n # Add a word to a anagram when have same letters and length\n if len(chars) == len(anagram[0]):\n anagram.append(word)\n exist = True\n else:\n continue\n\n # Add new anagram when a word can not belong to any anagram.\n if exist == False:\n anagrams.append([word])\n\n return anagrams", "title": "" }, { "docid": "d9e74e5f98eae103c06b0aed8b5f9cdb", "score": "0.70998657", "text": "def isAnagram(s, p):\n return sorted(s) == sorted(p)", "title": "" }, { "docid": "c824f44f6bc07754412800a922ae4ac9", "score": "0.7062874", "text": "def _word_combo_to_anagrams(self, word_list):\n\n # generate all permutations of word combo\n perms = permutations(word_list)\n # for each permutation, create md5 hash string, compare against provided hashes and print if there is a match\n for perm in perms:\n gram = \" \".join(perm).encode('utf-8')\n hash_str = hashlib.md5(gram).hexdigest()\n if hash_str in self.hashes:\n idx = self.hashes.index(hash_str)\n print(f\"Correct match found for hash {self.hashes[idx]}!: {gram}\")", "title": "" }, { "docid": "855c3b620da58b8f5c7fdf1c16e897c5", "score": "0.7036624", "text": "def group_anagrams(strs):\n str_dict = {}\n for string in strs:\n str_sorted = ''.join(sorted(string))\n if str_sorted not in str_dict:\n str_dict[str_sorted] = [string]\n else:\n str_dict[str_sorted].append(string)\n result = []\n for v in str_dict.values():\n if len(v) > 1:\n result.extend(v)\n return result", "title": "" }, { "docid": "70985463bb8293e5e826c4378e9e2b0f", "score": "0.70132715", "text": "def is_anagram(word1, word2):\n\n word1 = word1.lower()\n word2 = word2.lower()\n\n word_2_counts = defaultdict(int)\n\n\n for character in word2:\n if character != ' ':\n ? ? += 1\n\n\n for character in word1:\n if character == ' ':\n continue\n word_2_counts[character] -= 1\n if word_2_counts[character] == 0:\n del word_2_counts[character]\n \n\n\n return not word_2_counts", "title": "" }, { "docid": "208add9487edf76da476675f04464921", "score": "0.69505113", "text": "def anagrams(self, strings):\n str_dict = dict() # sorted string: its anagrams indices\n order = [] # order in which we added keys to the dictionary\n for i, word in enumerate(strings, start=1):\n curr = \"\".join(sorted(word)) # current key\n if curr in str_dict:\n str_dict[curr].append(i)\n else: # we don't have this anagram in the dictionary yet\n order.append(curr)\n str_dict[curr] = [i]\n\n result = [] # build the resulting array based on str_dict and order\n for key_word in order:\n result.append(str_dict[key_word])\n return result", "title": "" }, { "docid": "87ee66bc1fd886c483cd6257f7edc3df", "score": "0.6910758", "text": "def group_anagram(strs):\n ana = {}\n for string in strs:\n s = ''.join(sorted(string))\n if s in ana:\n ana[s].append(string)\n else:\n ana[s] = [string]\n return [ana[x] for x in ana]", "title": "" }, { "docid": "01d24127e5824aeec151b5aca3131947", "score": "0.68959045", "text": "def anagramSolution2(s1, s2):\n alist1 = list(s1)\n alist2 = list(s2)\n\n alist1.sort() #Has a cost\n alist2.sort() #has a cost\n\n pos = 0\n matches = True\n\n while pos < len(s1) and matches: #loop through n times\n if alist1[pos] == alist2[pos]:\n pos = pos + 1\n else: \n matches = False\n return matches", "title": "" }, { "docid": "3d61e9affa462dd9e7426f9addcb7d37", "score": "0.6887974", "text": "def isAnagram(self, s: str, t: str) -> bool:\n return sorted(s)==sorted(t)", "title": "" }, { "docid": "250eb215822f99e986cad586e78fbc0f", "score": "0.68783396", "text": "def anagrams(*, key_word: str, words_to_check: list) -> list:\n # result = []\n # for word in words_to_check:\n # if sorted(key_word) == sorted(word):\n # result.append(word)\n result = [word for word in words_to_check if sorted(key_word) == sorted(word)]\n return result", "title": "" }, { "docid": "4a617007fd44ff9387b6b5382c8fe393", "score": "0.68088514", "text": "def anagrams(S): # S is a set of strings\n d = {} # maps s to list of words with signature s\n for word in S: # group words according to the signature\n s = ''.join(sorted(word)) # calculate the signature\n if s in d:\n d[s].append(word) # append a word to an existing signature\n else:\n d[s] = [word] # add a new signature and its first word\n # -- extract anagrams, ingoring anagram groups of size 1\n return [d[s] for s in d if len(d[s]) > 1]", "title": "" }, { "docid": "44a3d5b9314b4fb825a369aa65b97368", "score": "0.67924196", "text": "def anagram_func(arg_1, arg_2):\n arg_1.lower()\n arg_2.lower()\n str_compute = {}\n char_count = 0\n for lp_1 in arg_1:\n if lp_1 not in str_compute:\n str_compute[lp_1] = 1\n else:\n str_compute[lp_1] += 1\n print(str_compute)\n for lp_2 in arg_2:\n if lp_2 not in str_compute:\n char_count += 1\n else:\n if str_compute[lp_2] == 0:\n char_count += 1\n else:\n str_compute[lp_2] -= 1\n print(str_compute)\n for lp_3 in str_compute.values():\n char_count += lp_3\n\n print(char_count)", "title": "" }, { "docid": "9f48204ddbdf21f0945f33bbda9d4fed", "score": "0.67430246", "text": "def scrabble_helper(word_list, char_set_list):\r\n anagrams = [[] for _ in range(len(char_set_list))]\r\n word_sort = []\r\n\r\n # Sort each character in word alphabetically\r\n for i in range(len(word_list)):\r\n word_in_char = [x for x in word_list[i]]\r\n word_sort.append([word_list[i], \"\".join(word_counting_sort(word_in_char))])\r\n\r\n # Sort each alphabetically word in the second element alphabetically\r\n word_sort = word_radix_sort(word_sort, True)\r\n\r\n # Sort each character in word alphabetically\r\n for i in range(len(char_set_list)):\r\n char_list = [c for c in char_set_list[i]]\r\n char_sort = \"\".join(word_counting_sort(char_list))\r\n\r\n # Find the first and last occurrences\r\n first = first_occurrence_bs(word_sort, char_sort)\r\n last = last_occurrence_bs(word_sort, char_sort)\r\n\r\n # Append all word_sort[first: last+1] to anagrams\r\n if first is not None and last is not None:\r\n for j in range(first, last + 1):\r\n anagrams[i].append(word_sort[j][0])\r\n elif first is None and last is not None:\r\n anagrams[i].append(word_sort[last][0])\r\n\r\n # Sort the each anagram alphabetically\r\n for i in range(len(anagrams)):\r\n anagrams[i] = word_radix_sort(anagrams[i])\r\n\r\n return anagrams", "title": "" }, { "docid": "74f373dbd65071ea2630818c70145277", "score": "0.67405146", "text": "def check_permutation(a: str, b: str):\n if len(a) != len(b):\n return False\n map_a = [0] * 256 # map_a[i] = number of occurrences of character i\n map_b = [0] * 256\n for c in a:\n map_a[ord(c)] += 1 # Increment occurrence\n for c in b:\n map_b[ord(c)] += 1\n\n return map_a == map_b # Equality compare our \"buckets\"", "title": "" }, { "docid": "bf75d6d46e8c8a11ebab130c8db2cb3d", "score": "0.6729018", "text": "def isomorphic(word1, word2):\n letter_map = {}\n for index, letter in enumerate(word1):\n if letter in letter_map and letter_map[letter] != word2[index]:\n return False\n letter_map[letter] = word2[index]\n return True", "title": "" }, { "docid": "e831a71193e686cc294c5ef91d69c6ef", "score": "0.6724029", "text": "def find_anagrams(key, words):\n anagrams = []\n key = sorted(key.lower())\n for word in words:\n if key == sorted(word.lower()):\n anagrams.append(word)\n\n return anagrams", "title": "" }, { "docid": "d8a7e95143cda9e062c10dd54900994e", "score": "0.6722318", "text": "def detect_anagrams(word, possibilities):\n # alphabetize the incoming word for matching\n reordered_word = ''.join(sorted(word.lower()))\n matches = []\n for match in possibilities:\n # check against case differences\n if word.lower() == match.lower():\n pass\n # try to match alphabetized word against alphabetized matches in list\n elif reordered_word == ''.join(sorted(match.lower())):\n matches.append(match)\n return matches", "title": "" }, { "docid": "6bf2597fc8c6689ad5e5ddff12cb69cc", "score": "0.6705135", "text": "def find_anagrams_helper(word_list, anagrams_list, ans_lst, index_list):\n global count\n\n if len(word_list) == len(ans_lst):\n word = string_manipulation(ans_lst)\n if word in words:\n if word not in anagrams_list:\n print(f'Found: {word}')\n print('Searching...')\n anagrams_list.append(word)\n count += 1\n else:\n for i in range(len(word_list)):\n if i not in index_list:\n # Choose\n index_list.append(i)\n ans_lst.append(word_list[i])\n word = string_manipulation(ans_lst)\n if has_prefix(word):\n # Explore\n find_anagrams_helper(word_list, anagrams_list, ans_lst, index_list)\n # Un-choose\n index_list.pop()\n ans_lst.pop()", "title": "" }, { "docid": "55b6100d6c52da3966c57884e664330c", "score": "0.6682671", "text": "def isAnagram(self, s: str, t: str) -> bool:\n if len(s) != len(t):\n return False\n letter_counter = Counter(s)\n for ch in t:\n if ch not in letter_counter:\n return False\n else:\n if letter_counter[ch] <= 0:\n return False\n else:\n letter_counter[ch] -= 1\n return True", "title": "" }, { "docid": "f880aae50196b92df4e966eba093ee6b", "score": "0.6637756", "text": "def find_anagrams(name, word_list):\n name_letter_map = Counter(name)\n anagrams = []\n for word in word_list:\n test = ''\n word_letter_map = Counter(word.lower())\n for letter in word:\n if word_letter_map[letter] <= name_letter_map[letter]:\n test += letter\n if Counter(test) == word_letter_map:\n anagrams.append(word)\n print(*anagrams, sep='\\n')\n print()\n print(f'Remaining letters = {name}')\n print(f'Number of remaining letters = {len(name)}')\n print(f'Number of remaining (real word) anagrams = {len(anagrams)}')", "title": "" }, { "docid": "7e1891450f04b48bafa00d4c10d52067", "score": "0.66264087", "text": "def anagram_dict(s1, s2):\n\n d1 = {}\n for ch in s1:\n d1.setdefault(ch, 0) # check current value or set to 0\n d1[ch] += 1\n\n d2 = {}\n for ch in s2:\n d2.setdefault(ch, 0)\n d2[ch] += 1\n\n if d1 == d2: # python is smart enough to do a shallow compare free\n return True\n else:\n return False", "title": "" }, { "docid": "645d444a88aa89fb2fd1ce7ae2b592d7", "score": "0.66217303", "text": "def anagram(charArray):\n anagrams = []\n charCount = len(charArray)\n for Dictword in words:\n if len(Dictword) == charCount:\n testArray = []\n for chars in Dictword:\n testArray.append(chars)\n if sorted(testArray) == sorted(charArray) and testArray != charArray:\n anagrams.append(testArray)\n else:\n if len(anagrams)>0:\n return(printWord(random.choice(anagrams)))\n else:\n return \"No anagram...\"", "title": "" }, { "docid": "ae2cceff3d3af8b537c2c2f78e0323c8", "score": "0.6610625", "text": "def find_anagrams(s):\n word_list = sorted(s)\n anagrams_list = []\n find_anagrams_helper(word_list, anagrams_list, [], [])\n print(f'{count} anagrams: {anagrams_list}')", "title": "" }, { "docid": "4a6b213c12fe22a6933d16f66c309a1f", "score": "0.66044205", "text": "def check_permutation_naive(a: str, b: str):\n return sorted(a) == sorted(b) # Sort & compare", "title": "" }, { "docid": "d06f463e68cd53c818896916381e4721", "score": "0.66005576", "text": "def find_anagrams(word: str, candidates: list[str]) -> list[str]:\n return [\n candidate for candidate in candidates if is_anagram(word, candidate)\n ]", "title": "" }, { "docid": "48bd25f77e8f2c1555b2800335d6f351", "score": "0.6583791", "text": "def scramble(s1, s2):\n for l in set(s2):\n if not s1.count(l) >= s2.count(l):\n return False\n return True", "title": "" }, { "docid": "186ab2aea7018d4f9deb858b1387f413", "score": "0.65830684", "text": "def find_anagrams_from_phrase(phrase_dict, words):\n sub_anagrams = []\n\n for word in words:\n word_dict = Counter(word.lower())\n if phrase_dict & word_dict == word_dict:\n sub_anagrams.append(word)\n return sub_anagrams", "title": "" }, { "docid": "7b1150a948d9e436224ef6762d5a7ccb", "score": "0.65726775", "text": "def is_anagram(source, candidate):\n\n # make sure we are comparing two strings here\n # TODO: find out of 'silent' is an anagram of ['l','i','s','t','e','n']\n if not isinstance(candidate, str):\n return False\n\n if not isinstance(source, str):\n return False\n\n candidate = clean(candidate)\n source = clean(source)\n\n if len(source) != len(candidate):\n return False\n\n # assuming a word cannot be an anagram of itself\n if source == candidate:\n return False\n\n return sorted(source) == sorted(candidate)", "title": "" }, { "docid": "7502c5df8c478f4928f0609e042fd002", "score": "0.656633", "text": "def is_anagram_sort(s, t):\n # basic case\n if len(s) != len(t):\n return False\n\n # sort strings\n s_sorted = sorted(s)\n t_sorted = sorted(t)\n\n # check equality\n for i in range(len(s_sorted)):\n if s_sorted[i] != t_sorted[i]:\n return False\n\n return True", "title": "" }, { "docid": "c6684dd326f139bdcde66dd37f036a30", "score": "0.65545905", "text": "def anagram_counting_sort(string,islist1):\n base = 26\n\n # Initialize count array \n count_array = [0] * (base+1)\n\n # Update count array\n for item in string:\n val = item[0] \n count_array[ord(val)-97] = count_array[ord(val)-97] + 1\n \n # Update string\n old_str = string\n new_str = \"\"\n for i in range(len(count_array)): \n item = i\n frequency = count_array[i]\n for j in range(frequency):\n new_str += chr(item+97)\n\n if islist1: # Only store list1 original strings\n return [new_str,old_str]\n else:\n return [new_str]", "title": "" }, { "docid": "95ce86e895a0f191f2991992e2b962a9", "score": "0.65490264", "text": "def check_permutation(a: str, b: str) -> bool:\n chars = defaultdict(int)\n for ch in a:\n chars[ch] += 1\n for ch in b:\n if ch not in chars:\n return False\n elif chars[ch] > 1:\n chars[ch] -= 1\n else:\n del chars[ch]\n return len(chars) == 0", "title": "" }, { "docid": "53d1cb4948f0aa57074f39b3ae53d8c1", "score": "0.65451443", "text": "def anagram(letter_list):\n results = []\n pat = '.*'.join(sorted(letter_list))\n for word in WORDS:\n sorted_word = ''.join(sorted(word))\n if re.search(pat, sorted_word):\n for letter in letter_list:\n pattern = re.compile('('+letter+')')\n word = pattern.sub(lambda pat: pat.group(1).upper(), word, 1)\n results.append(word)\n return map_results(sorted(results, key=len, reverse=False))", "title": "" }, { "docid": "22560b579d053852ff8090a7e0b814c5", "score": "0.65129626", "text": "def find_palingrams(words):\n palingrams = []\n for word in words:\n if len(word) > 1:\n for i in range(1, len(word)):\n if word[i:] == word[i:][::-1] and word[:i][::-1] in words:\n palingrams.append(\" \".join([word, word[:i][::-1]]))\n if word[:i] == word[:i][::-1] and word[i:][::-1] in words:\n palingrams.append(\" \".join([word[i:][::-1], word]))\n return palingrams", "title": "" }, { "docid": "41cf9e576562ae2ff2a8317f9b97c63f", "score": "0.65116256", "text": "def is_permutation(text1, text2):\n return sorted(list(text1)) == sorted(list(text2))", "title": "" }, { "docid": "0da750bc8038cf8a488f860c9a04a500", "score": "0.6490802", "text": "def find_anagrams(word, dictionary):\n permutations = itertools.permutations(word, len(word))\n possible = (''.join(x) for x in permutations)\n found = {word for word in possible if word in dictionary}\n return list(found)", "title": "" }, { "docid": "673f5585b61416925a5bc2bfdc982e98", "score": "0.6469875", "text": "def is_anagram_of_pal(word):\n\n d = {}\n for char in word:\n if char not in d:\n d[char] = 1\n else:\n d[char] += 1\n\n check = 0\n for amt in d.values():\n if amt % 2 != 0:\n check += 1\n if check > 1:\n return False\n\n return True", "title": "" }, { "docid": "e5d9ce763ba52f0b30029880c494d4dc", "score": "0.6450239", "text": "def is_anagram_prime(s1, s2):\n # check the lengths\n if len(s1) != len(s2):\n return False\n\n s1_hash = hash_string(s1)\n s2_hash = hash_string(s2)\n\n if s1_hash == s2_hash:\n return True\n else:\n return False", "title": "" }, { "docid": "dcd541bb37d23fe3e6c6e991114d8a7b", "score": "0.6449905", "text": "def anagram(s1, s2):\n map1 = buildMap(s1)\n map2 = buildMap(s2)\n\n diff_cnt = 0\n for key in map2.keys():\n if key not in map1:\n diff_cnt += map2[key]\n else:\n diff_cnt += max(0, map2[key]-map1[key])\n\n for key in map1.keys():\n if key not in map2:\n diff_cnt += map1[key]\n else:\n diff_cnt += max(0, map1[key]-map2[key])\n\n return diff_cnt", "title": "" }, { "docid": "2b332c2b0ea7f9f64c945d0a17715ec4", "score": "0.644906", "text": "def character_mapping(string_a, string_b):\n\n\tdico_a = {}\n\tdico_b = {}\n\n\tfor char in string_a:\n\t\tdico_a[char] = dico_a[char]+1 if char in dico_a else 1\n\tfor char in string_b:\n\t\tdico_b[char] = dico_b[char]+1 if char in dico_b else 1\n\n\tcount_a = sorted(dico_a.values(), reverse=True)\n\tcount_b = sorted(dico_b.values(), reverse=True)\n\n\treturn count_a == count_b and len(string_a) == len(string_b)", "title": "" }, { "docid": "fcc30f443c4e3c415a91b17b0c869055", "score": "0.64419085", "text": "def isScramble(self, s1, s2):\n memo = {}\n \n def helper(s1, s2):\n if not s1 or not s2: return False\n if s1 == s2: return True\n if sorted(s1) != sorted(s2): return False\n \n if (s1, s2) in memo: return memo[s1,s2]\n \n ans = False\n for i in range(len(s1)):\n if ((helper(s1[:i], s2[:i]) and helper(s1[i:], s2[i:])) or\n (helper(s1[:i], s2[-i:]) and helper(s1[i:], s2[:-i]))):\n ans = True\n break\n\n memo[(s1, s2)] = ans\n return ans\n \n return helper(s1, s2)", "title": "" }, { "docid": "710e7caa025bc4300a5fa6ea1147cd72", "score": "0.64121205", "text": "def isAnagram(s, p):\n counter = {}\n for c in s:\n counter[c] = counter.get(c, 0) + 1\n for c in p:\n if c not in counter:\n return False\n counter[c] -= 1\n if counter[c] < 0:\n return False\n return all(x == 0 for x in counter.values())", "title": "" }, { "docid": "5a66679212748537fd4c8a62c79700b5", "score": "0.63986754", "text": "def findAnagrams(self, s: str, p: str) -> List[int]:\n \n # SOLUTION 1 - HASH MAP - Time Complexity - O(n), Space Complexity - \n \"\"\"\n s_map = [0]*26\n p_map = [0]*26\n \n if len(p) > len(s):\n return []\n \n for i in p:\n p_map[ord(i) - 97] += 1\n \n output = []\n \n for i in range (0,len(s)):\n s_map[ord(s[i]) - 97]+= 1\n \n if i >= len(p):\n s_map[ (ord(s[i - len(p)])) - 97]-=1\n \n if (s_map == p_map):\n output.append(i - len(p) + 1)\n \n return output\n \"\"\"", "title": "" }, { "docid": "56be166d55717b4b30b36ebce2defabf", "score": "0.63590467", "text": "def is_anagram_of_palindrome(word):\n\n # Check to see if this is NOT a anogram first\n \n if len(word) == 1:\n return True\n\n\n elif word == word[::-1]:\n return False\n\n else:\n word_length = len(word)\n letter_dict = {letter:word.count(letter) for letter in set(word)}\n num_of_letter_set = set(letter_dict.values())\n\n # if even number word jas all letter in pairs return True\n if word_length % 2 == 0:\n for number in num_of_letter_set:\n if number % 2 == 0:\n return True\n else:\n return False\n # if odd number word has all letters in pairs except for one return True\n else:\n count=[number for number in num_of_letter_set if number % 2 != 0]\n if len(count)==1:\n return True\n else:\n return False", "title": "" }, { "docid": "d94bc89a7e084a0cc0cc9ac563bb2861", "score": "0.6342409", "text": "def pal_anagram(string):\n letter_count = {}\n\n for char in string:\n count = letter_count.get(char, 0)\n letter_count[char] = count + 1\n odd = False\n # iterate over the values\n # if a value is 1, set is_pal to True\n for v in letter_count.values():\n if count % 2 != 0:\n if odd:\n return False\n odd = True\n return True", "title": "" }, { "docid": "79a99ca5fe76e44c47f360e0b97cd875", "score": "0.62968653", "text": "def isomorphic(astring1, astring2):\n\n if len(astring1) != len(astring2):\n return \"'{0}' and '{1}' are not isomorphic\".format(astring1, astring2)\n else:\n d = {}\n for (idx, char) in enumerate(astring1):\n if char not in d:\n if astring2[idx] not in d.values():\n d[char] = astring2[idx]\n else:\n return \"'{0}' and '{1}' are not isomorphic\".format(astring1, astring2)\n elif astring2[idx] != d[char]:\n return \"'{0}' and '{1}' are not isomorphic\".format(astring1, astring2)\n \n return \"'{0}' and '{1}' are isomorphic because we can map: {2}\".format(astring1, astring2, list(d.items()))", "title": "" }, { "docid": "967177702a36d96be1c780a80089cfb6", "score": "0.6283468", "text": "def check_permutation(str1, str2):\n if len(str1) != len(str2):\n return False\n\n d = defaultdict(int)\n\n for k in str1:\n d[k] += 1\n\n for k in str2:\n if k not in d or d[k] == 0:\n return False\n else:\n d[k] -= 1\n\n return True", "title": "" }, { "docid": "119389bb93e1841b895360df29727d2d", "score": "0.62673914", "text": "def solution2(string1, string2):\n if len(string1)!=len(string2):\n return False\n # Add all characters from string one to \n string1_chars = {}\n for c in string1:\n if c in string1_chars:\n string1_chars[c]+=1\n else:\n string1_chars[c]=1\n # check whether exact characetrs appear for exact number of times\n for c in string2:\n if c not in string1_chars:\n return False\n else:\n string1_chars[c]-=1\n if string1_chars[c]<0:\n # If count goes below zero, c is appearing in string2 more times.\n return False\n return True", "title": "" }, { "docid": "d0b9051957323994c5817be7359e714d", "score": "0.6261738", "text": "def animal_cracker(str1, str2):\n if str1[0] == str2[0]:\n print(f'{str1} and {str2} both have the same beginning letter')\n else:\n print(f'{str1} and {str2} does not have the same beginning letter')", "title": "" }, { "docid": "3c5d27016e01a2dc8f3d178029469e1c", "score": "0.6255924", "text": "def is_permutation(a, b):\r\n \r\n return sorted(list(str(a))) == sorted(list(str(b)))", "title": "" }, { "docid": "bf8e191be119150af68878c42a40622f", "score": "0.62215877", "text": "def find_anagrams(dictionary):\n t = []\n for anagram in dictionary.values():\n if len(anagram) > 1:\n t.append([len(anagram), anagram])\n return t", "title": "" }, { "docid": "a85f75d8c69919fd5b8201b0764641f2", "score": "0.62161195", "text": "def is_perm(s1: str, s2: str) -> bool:\n if len(s1) != len(s2):\n return False\n counts = defaultdict(int)\n for c in s1:\n counts[c] += 1\n for c in s2:\n if counts[c] == 0:\n return False\n counts[c] -= 1\n return True", "title": "" }, { "docid": "d63a64892e85a554b942bb5463b37a52", "score": "0.6215825", "text": "def isPermutation(str1, str2):\n\n if sorted(str1) == sorted(str2):\n\n return True\n\n return False", "title": "" }, { "docid": "1eb0d731129bae2453132e2221705598", "score": "0.6203033", "text": "def is_anagram_counter(s, t):\n return len(s) == len(t) and Counter(s) == Counter(t)", "title": "" }, { "docid": "fb24bd6e8fea0acbb0ea15e8d143fc8e", "score": "0.61958146", "text": "def anagram():\n word = sorted(input(\"Enter first word: \").lower())\n word2 = sorted(input(\"Enter second word: \").lower())\n\n if word == word2:\n print(\"Match\")\n return 0\n print(\"No Match\")", "title": "" }, { "docid": "839ead8a9cc8f3bd4bf015a6f178dfd2", "score": "0.61946136", "text": "def handle_same_string(str1, alist):\n\n if str1 in alist:\n for i in range(1, 1000):\n str1_ = str1 + ' (%i)' % i\n if str1_ not in alist:\n return str1_\n else:\n return str1", "title": "" }, { "docid": "d6930b2ff73bba19e1bde4f60984923f", "score": "0.6187312", "text": "def is_perm2(str1, str2):\n if len(str1) != len(str2):\n return False\n str1_count = Counter(str1)\n for char in str2:\n if not str1_count[char]:\n return False\n str1_count[char] -= 1\n return True", "title": "" }, { "docid": "879f5df851eca179388f917b35a03c6b", "score": "0.61787534", "text": "def is_perm1(str1, str2):\n if len(str1) != len(str2):\n return False\n return Counter(str1) == Counter(str2)", "title": "" }, { "docid": "0ce7b82325c0eb63b49f9306e6d7af16", "score": "0.61758435", "text": "def anagrams(self) -> None:\n # Generate an instance of StdIn.\n reader: StdIn = StdIn()\n\n # Read stdin and create set of words for anagrams.\n words: list[str] = reader.string()\n words_set = set(\" \".join(words).split())\n\n # If the input is empty, just pass in the empty set.\n if len(words_set) == 0:\n words_set = {\"\"}\n\n # Call the function.\n result: list[list[str]] = anagrams(words_set)\n\n # Print results to stdout.\n print(result)", "title": "" }, { "docid": "1b16b307ecdf3d3cea8603043a3651ef", "score": "0.6128955", "text": "def is_anagram(word: str, candidate: str) -> bool:\n # Make the strings lowercase\n word = word.lower()\n candidate = candidate.lower()\n\n # If the length of the word and the candidate is not the same, the candidate\n # cannot be an anagram if word. Same yields if word and candidate is the\n # same string.\n if len(word) != len(candidate) or word == candidate:\n return False\n\n # Check that the count of each character is the same in both word and\n # candidate. If this is the case, the candidate is an anagram of word.\n for char in candidate:\n if word.count(char) != candidate.count(char):\n return False\n\n return True", "title": "" }, { "docid": "dc7dd5107ac01f51ebee63fc6263c577", "score": "0.6126914", "text": "def check_perm(string_one, string_two): \n if string_one and string_two:\n if len(string_one) != len(string_two): return False\n \n for character in string_one:\n if character not in string_two:\n return False\n return True \n raise ValueError('empty string input given')", "title": "" }, { "docid": "46441cc777b06cc3ec4201c2eee89fa6", "score": "0.61115026", "text": "def all_anagrams_dict(words_list):\r\n anagrams_dict = collections.defaultdict(lambda :[])\r\n\r\n for word in words_list:\r\n if len(word) == 1:\r\n continue\r\n anagram_key = ''.join(sorted(word))\r\n anagrams_dict[anagram_key].append(word)\r\n\r\n true_anagrams_dict = \\\r\n {key : value for key, value in anagrams_dict.items() if len(value) > 1}\r\n\r\n return true_anagrams_dict", "title": "" }, { "docid": "d220b776f6ae8d76041d12e32170e54a", "score": "0.61102974", "text": "def is_anagram_hashmap(s, t):\n # basic case\n if len(s) != len(t):\n return False\n\n # construct counter\n counter = [0 for _ in range(256)]\n for i in range(len(s)):\n counter[ord(s[i])] += 1\n counter[ord(t[i])] -= 1\n\n # cost more time (maybe ord())\n # if len(s) >= 256:\n # for i in range(len(counter)):\n # if counter[i] != 0:\n # return False\n # else:\n # for i in range(len(s)):\n # if counter[ord(s[i])] != 0:\n # return False\n\n # compare counter\n for i in range(len(counter)):\n if counter[i] != 0:\n return False\n\n return True", "title": "" }, { "docid": "36627fa26aa7c0d48fe9f54b5cbd3f54", "score": "0.6027305", "text": "def hamming(str1, str2):\n return sum(a!=b and not( a=='N' or b=='N' ) for a,b in zip(str1, str2))", "title": "" }, { "docid": "6c4abc7b12dacb2e389974940c4f7237", "score": "0.6025042", "text": "def isPairing(lst1, lst2):\n\n # PUT YOUR IMPLEMENTATION HERE\n bool_return=False\n if lst1.size==lst2.size and (lst1.size==0 and lst2.size==0):\n bool_return=True\n elif lst1.size==lst2.size and (lst1.size>0 and lst2.size>0):\n cursor1=lst1.head\n cursor2=lst2.head\n number_of_matches=1\n while cursor1.next != None:\n if is_nucleotide(cursor1.data) and is_nucleotide(cursor2.data):\n if is_match(cursor1.data, cursor2.data):\n number_of_matches+=1\n cursor1=cursor1.next\n cursor2=cursor2.next\n if number_of_matches==(lst1.size):\n bool_return=True\n return bool_return", "title": "" }, { "docid": "a9f89ecd91a280afbee1fc6774f65b15", "score": "0.60222733", "text": "def gen_anagram(anagram, possible_words):\n for word in possible_words:\n x = check_word(anagram, word)\n if x == True:\n yield [word]\n elif x:\n for y in gen_anagram(x, possible_words):\n yield [word] + y", "title": "" }, { "docid": "784fee70fd7e11049b7714a4fee64384", "score": "0.6014355", "text": "def anagrams_of(string: str):\n if len(string) == 1:\n # if the string is a single character, return a list containing that\n # single character.\n return [string[0]]\n # instantiate an empty placeholder\n collection = []\n # Get all the anagrams of the rest of the string, besides the first\n # character, until the end of the string.\n substring_anagrams = anagrams_of(string[1:])\n\n for substring_anagram in substring_anagrams:\n # now, while looping through this new list of sub-anagrams,\n for index in range(len(substring_anagram)+1):\n # loop through each character in this anagram, insert the first\n # character into a place dictated by the index of the sub-anagrams\n anagram = substring_anagram[:index] + \\\n string[0] + substring_anagram[index:]\n # append to our collection\n collection.append(anagram)\n # return the result\n return collection", "title": "" }, { "docid": "3509306e8828c294e84469f521064918", "score": "0.6012629", "text": "def test_sort_by_anagram(self):\n unsorted = [\"a\", \"b\"]\n expected = [\"a\", \"b\"]\n self.assertListEqual(sort_by_anagram(unsorted), expected)\n\n unsorted = [\"c\", \"b\", \"a\"]\n expected = [\"c\", \"b\", \"a\"]\n self.assertListEqual(sort_by_anagram(unsorted), expected)\n\n unsorted = [\"ab\", \"c\", \"ba\"]\n expected = [\"ab\", \"ba\", \"c\"]\n self.assertListEqual(sort_by_anagram(unsorted), expected)\n\n unsorted = [\"cabde\", \"c\", \"ebadc\"]\n expected = [\"cabde\", \"ebadc\", \"c\"]\n self.assertListEqual(sort_by_anagram(unsorted), expected)\n\n unsorted = [\"xyz\", \"xyw\", \"yzx\"]\n expected = [\"xyz\", \"yzx\", \"xyw\"]\n self.assertListEqual(sort_by_anagram(unsorted), expected)\n\n unsorted = [\"xyz\", \"yyy\", \"xzx\", \"yxz\", \"jef\", \"xyz\"]\n expected = [\"xyz\", \"yxz\", \"xyz\", \"yyy\", \"jef\", \"xzx\"]\n self.assertListEqual(sort_by_anagram(unsorted), expected)\n\n unsorted = [\"xyz\", \"yyy\", \"xzx\", \"yxz\", \"yyy\", \"xyz\", \"yyy\"]\n expected = [\"xyz\", \"yxz\", \"xyz\", \"yyy\", \"yyy\", \"yyy\", \"xzx\"]\n self.assertListEqual(sort_by_anagram(unsorted), expected)", "title": "" }, { "docid": "daf0842d41002f7a46178a9e9e3245d9", "score": "0.6002062", "text": "def one_away(str_1, str_2):\n if str_1 == str_2:\n return True\n \n if abs(len(str_1)-len(str_2)) >= 2:\n return False\n\n my_dictionary = {}\n\n for c in str_1:\n if c in my_dictionary:\n my_dictionary[c] += 1\n else:\n my_dictionary[c] = 1\n \n for c in str_2:\n if c in my_dictionary:\n my_dictionary[c] -= 1\n else:\n my_dictionary[c] = 1\n\n if 0 <= sum(my_dictionary.values()) <= 2:\n return True\n else:\n return False", "title": "" }, { "docid": "b4d4548753d6de50180f54380d1a500d", "score": "0.5985047", "text": "def find_anagrams(word_input, file_input):\n\n print \"Finding anagrams for the word '%s' in file '%s'\" % (word_input, file_input.name)\n found = []\n start = time.time()\n\n for word in file_input.readlines():\n if is_anagram(word_input, word):\n found.append(word.strip())\n\n found = set(found)\n time_taken = time.time() - start\n print \"found %s anagrams in %s seconds\" % (len(found), time_taken)\n print \", \".join(found)\n return found", "title": "" }, { "docid": "3a4251bd09ebe973f336aa1e51b88777", "score": "0.5973171", "text": "def find_anagrams(s: str, p: str):\n result_list = []\n p_counter = Counter(p)\n s_counter = Counter(s[:len(p) - 1])\n for i in range(len(p) - 1, len(s)):\n # Include a new character in the window.\n s_counter[s[i]] += 1\n\n # This step is O(1), since there are at most 26 English letters.\n if s_counter == p_counter:\n # Append the starting index.\n result_list.append(i - len(p) + 1)\n\n # Decrease the count of oldest character in the window.\n s_counter[s[i - len(p) + 1]] -= 1\n if s_counter[s[i - len(p) + 1]] == 0:\n # Remove the count if it is zero.\n del s_counter[s[i - len(p) + 1]]\n return result_list", "title": "" }, { "docid": "bebf19ca329c926fd92bab99bb5c16e8", "score": "0.5969723", "text": "def scrabble_helper(word_list, char_set_list):\n alpha_word_list = []\n max_word_len = 0\n # sorts words into tuple: (word with letters in alphabetical order, original word, original position)\n # e.g. apple becomes aelpp\n for i in range(len(word_list)):\n if len(word_list[i]) > max_word_len:\n max_word_len = len(word_list[i])\n alpha_word_list.append((singleword_radix_sort(word_list[i]), word_list[i], i))\n\n # Sorts char set list into tuples: (chars in alphabetical order, original char_set_list tileset, original position)\n # Does not include tilesets that are longer than the longest word in word_list\n alpha_char_set_list = []\n for i in range(len(char_set_list)):\n if len(char_set_list[i]) <= max_word_len:\n alpha_char_set_list.append((singleword_radix_sort(char_set_list[i]), char_set_list[i], i))\n\n # Sorts all words into alphabetical order (Each word is currently all the letters in alphabetical order)\n sorted_alpha_word_list = multiword_radix_sort(alpha_word_list, max_word_len)\n\n # The output from multiword_radix_sort is a list of numbers (ascii values of each letter) so this part turns\n # the numbers back into letters\n for i in range(len(sorted_alpha_word_list)):\n word = []\n for j in range(max_word_len):\n if sorted_alpha_word_list[i][0][j] != 0:\n word.append(chr(sorted_alpha_word_list[i][0][j]+96))\n sorted_alpha_word_list[i][0] = ''.join(word)\n\n # Combine all of the same anagrams into one list. E.g. 'apple' and 'aeppl' would combine into one list as they are\n # anagrams of each other\n grouped_anagrams = [[sorted_alpha_word_list[0]]]\n group_counter = 0\n for i in range(1, len(sorted_alpha_word_list)):\n if sorted_alpha_word_list[i][0] == sorted_alpha_word_list[i-1][0]:\n grouped_anagrams[group_counter].append(sorted_alpha_word_list[i])\n else:\n grouped_anagrams.append([sorted_alpha_word_list[i]])\n group_counter += 1\n\n # Prepares lists in a format for output. Just the word without the alphabetical order letters or original position\n output_grouped_anagrams = []\n for i in range(len(grouped_anagrams)):\n group = []\n for j in range(len(grouped_anagrams[i])):\n group.append(grouped_anagrams[i][j][1])\n output_grouped_anagrams.append(group)\n\n # This chunk sorts each list of anagrams into alphabetical order so the final output is in alphabetical order\n for i in range(len(output_grouped_anagrams)):\n if len(output_grouped_anagrams[i]) > 1:\n max_len = len(output_grouped_anagrams[i][0])\n output_grouped_anagrams[i] = multiword_radix_sort2(output_grouped_anagrams[i], max_len)\n\n # This turns the words back into their string representation rather than list of ascii values\n for k in range(len(output_grouped_anagrams[i])):\n word = []\n for j in range(max_len):\n if output_grouped_anagrams[i][k][0][j] != 0:\n word.append(chr(output_grouped_anagrams[i][k][0][j] + 96))\n output_grouped_anagrams[i][k] = ''.join(word)\n\n # Finds the words that can be made using each tileset in char_set_list using binary search\n # Only compares to the groups of anagrams rather than the whole list\n final_output_list = []\n for i in range(len(char_set_list)):\n final_output_list.append([])\n for i in range(len(alpha_char_set_list)):\n index = binary_search_anagram_groups(grouped_anagrams, alpha_char_set_list[i][0])\n if index is not None:\n final_output_list[alpha_char_set_list[i][2]] = output_grouped_anagrams[index]\n return final_output_list", "title": "" }, { "docid": "5b812310b35f8d125873330e53132712", "score": "0.5955871", "text": "def dictAnagrams():\n\n\t# Variables\n\td1 = dict()\n\td2 = dict()\n\n\t# Procesos\n\tfor word in open('words.txt'):\n\t\tw = tuple(sorted(word.strip()))\n\t\tif not w in d1:\n\t\t\td1[w] = [word.strip()]\n\t\telse:\n\t\t\td1[w].append(word.strip())\n\n\tfor k, v in d1.iteritems():\n\t\tif len(v) > 1:\n\t\t\td2[k] = v\n\t\n\treturn d2", "title": "" }, { "docid": "30952874439b82ee259cd0192c11c036", "score": "0.59472626", "text": "def exist_matching(s1, s2):\n if (len(s1) != len(s2)):\n return False\n\n mapping = {}\n\n #Check character of s1 and s2 one by one\n for i in range(len(s1)):\n if s1[i] not in mapping.keys():\n mapping[s1[i]] = s2[i]\n else:\n if mapping[s1[i]] != s2[i]:\n return False\n\n return True", "title": "" }, { "docid": "3d3de278452e12d7fa694d94d626ee51", "score": "0.59465146", "text": "def word_in_both(word0, word1):\n mention1 = word0.split(\"_\")\n mention2 = word1.split(\"_\")\n return not set(mention1).isdisjoint(mention2)", "title": "" }, { "docid": "cb5ea1058a2e61994b46b2d412457cce", "score": "0.5941161", "text": "def isPermutation(a, b):\n l1 = [i for i in str(a)]\n l2 = [i for i in str(b)]\n list.sort(l1)\n list.sort(l2)\n return l1 == l2", "title": "" } ]
855b796ec640af7d550f4c3b672d38b6
Checks RapidMiner moved potential label to right of attribute list
[ { "docid": "ede05908fcdb8de37184d37b3047135d", "score": "0.63274306", "text": "def check_label(attr, rem_labels):\n attr_len = attr.__len__()\n for item in rem_labels:\n idx_position = attr.index(item)\n if (idx_position / attr_len) > 0.5:\n # valid label present\n return True\n return False", "title": "" } ]
[ { "docid": "ca5b20e23f3723ffef1524b16fed64f4", "score": "0.55617946", "text": "def find_ignore_label(self):\n for key, val in self.index2rel.items():\n if val == self.ign_label:\n self.label2ignore = key\n assert self.label2ignore != -1", "title": "" }, { "docid": "2705f28ba63a93217a547bfb88e466a9", "score": "0.5500105", "text": "def checkLabel( operand, reservedWords):\n\tif operand is None:\n\t\treturn None\n\tfor op in operand:\n\t\tif op not in reservedWords: \n\t\t\treturn op\n\treturn None", "title": "" }, { "docid": "cce4d3d3dcc68c9905a662da61a29034", "score": "0.5454336", "text": "def isLabelValid(label,lc):\n\tif(label in symbolTable and symbolTable[label]==\"label\"):\n\t\tsymbolTable[label] = [\"-\",decimalToBinary(lc)]\n\t\treturn\n\n\tif (label in opcodes.keys() or label == \"END\" or label == \"START\" or label in symbolTable):\n\t\tsys.exit(\"ERROR at line \"+str(lc)+\": Invalid label \"+label+\" .Check documentation and try again.\")", "title": "" }, { "docid": "d9fbb80268b9e8d703997807083f1691", "score": "0.53601736", "text": "def test_label(self, node):\n try:\n label = node.getAttribute('label')\n except AttributeError:\n return\n\n if label is None:\n return\n\n if ':' not in label:\n return\n\n log.warning('No colons in labels, please: %s.', label)\n\n label = clean_label(label)\n node.setAttribute('label', label)\n node.argSource = label\n\n log.info('Replacement label: %s.', label)", "title": "" }, { "docid": "054a1e1c7e9d1da16c7933a418cbdeae", "score": "0.53494775", "text": "def bad_label(node):\n\n if node.node_type == struct.Node.REGTEXT:\n for i, l in enumerate(node.label):\n if i == 0 and not l.isdigit():\n return True\n elif i == 1 and not l.isdigit():\n return True\n elif i > 1 and l not in p_levels[i - 2]:\n return True\n return False", "title": "" }, { "docid": "24d4074fa668e11d09f5ca541b648eb3", "score": "0.53302944", "text": "def test_clean_user_labels_4(self):\n label1 = len(TEST_ARTICLE['labels'][2]) * [0]\n label1[-1] = 1\n label2 = len(TEST_ARTICLE['labels'][3]) * [0]\n label2[0] = 1\n labels = label1 + label2\n relative_authors = [1, 2, 3]\n absolute_authors = [25, 26, 27]\n \" As an extra sentence is loaded above it\"\n data = clean_user_labels(TEST_ARTICLE['sentence_ends'], [3], 2, 3, labels, relative_authors)\n self.assertEquals(len(data), 2)\n added_1 = data[0]\n added_2 = data[1]\n self.check_clean_keys(added_1)\n self.assertEquals(added_1['index'], 3)\n self.assertEquals(added_1['labels'], label2)\n self.assertEquals(added_1['authors'], absolute_authors)\n self.check_clean_keys(added_2)\n self.assertEquals(added_2['index'], 2)\n self.assertEquals(added_2['labels'], label1)\n self.assertEquals(added_2['authors'], absolute_authors)", "title": "" }, { "docid": "19e03b9fbae6f56675ee695c4dbf90e1", "score": "0.5259638", "text": "def test_clean_user_labels_7(self):\n label1 = len(TEST_ARTICLE['labels'][2]) * [0]\n label1[-1] = 1\n label2 = len(TEST_ARTICLE['labels'][3]) * [0]\n label2[0] = 1\n labels = label1 + label2\n relative_authors = [1, 2, 3]\n absolute_authors = [25, 26, 27]\n \" As an extra sentence is loaded above it\"\n data = clean_user_labels(TEST_ARTICLE['sentence_ends'], [2], 2, 3, labels, relative_authors)\n self.assertEquals(len(data), 2)\n added_1 = data[0]\n added_2 = data[1]\n self.check_clean_keys(added_1)\n self.assertEquals(added_1['index'], 2)\n self.assertEquals(added_1['labels'], label1)\n self.assertEquals(added_1['authors'], absolute_authors)\n self.check_clean_keys(added_2)\n self.assertEquals(added_2['index'], 3)\n self.assertEquals(added_2['labels'], label2)\n self.assertEquals(added_2['authors'], absolute_authors)", "title": "" }, { "docid": "06d3c343d1902afdba6857236634878f", "score": "0.52559984", "text": "def test_ldr_label(self):\n self.feed('ldr r5, lab1')\n self.feed('ldr r11, lab1')\n self.feed('ldr r10, lab1')\n self.feed('lab1:')\n self.feed('dd 0x12345566')\n self.check('04509fe5 00b09fe5 04a01fe5 66553412')", "title": "" }, { "docid": "0a224fda47bca4ed25409d83df4deaa4", "score": "0.521274", "text": "def _post_process_label_attributes(self):\n name_int = 'classlabels_int64s' if hasattr(\n self, 'classlabels_int64s') else 'classlabels_ints'\n if (hasattr(self, 'classlabels_strings') and\n len(self.classlabels_strings) > 0): # pylint: disable=E0203\n if hasattr(self, name_int) and len(getattr(self, name_int)) != 0:\n raise RuntimeError( # pragma: no cover\n f\"'{name_int}' must be empty if 'classlabels_strings' is not.\")\n setattr(self, name_int, numpy.arange(len(self.classlabels_strings), # pylint: disable=E0203\n dtype=numpy.int64))\n self._classlabels_int64s_string = self.classlabels_strings # pylint: disable=E0203\n self.classlabels_strings = numpy.empty(\n shape=(0, ), dtype=numpy.str_)\n else:\n self._classlabels_int64s_string = None", "title": "" }, { "docid": "972325e7170db3be47fff53f92056382", "score": "0.5205399", "text": "def test_clean_user_labels_5(self):\n label1 = TEST_ARTICLE['labels'][1]\n label2 = TEST_ARTICLE['labels'][2]\n label3 = TEST_ARTICLE['labels'][3]\n labels = label1 + label2 + label3\n absolute_authors = TEST_ARTICLE['authors'][3]\n relative_authors = TEST_ARTICLE['relative_authors'][3]\n \" As an extra sentence is loaded above it\"\n data = clean_user_labels(TEST_ARTICLE['sentence_ends'], [3], 1, 3, labels, relative_authors)\n self.assertEquals(len(data), 1)\n added_1 = data[0]\n self.check_clean_keys(added_1)\n self.assertEquals(added_1['index'], 3)\n self.assertEquals(added_1['labels'], label3)\n self.assertEquals(added_1['authors'], absolute_authors)", "title": "" }, { "docid": "80af8647130087bf28d33143fab7d84a", "score": "0.5159413", "text": "def relabelstwo(self,another,selflabel,anotherlabel,reserved=[]):\n newlabel = 0\n exist = 1\n while (exist):\n newlabel = newlabel + 1\n exist = 0\n if (self.usesindexlabel(newlabel)):\n exist = 1\n if (another.usesindexlabel(newlabel)):\n exist = 1\n if (newlabel in reserved):\n exist = 1\n self.relabelsone(selflabel,newlabel)\n another.relabelsone(anotherlabel,newlabel)", "title": "" }, { "docid": "48db480b7fe5e21ed6846414065e9b69", "score": "0.515095", "text": "def _adjust_labels(labels, new_minimum):\n labels[labels != -1] = labels[labels != -1] + new_minimum", "title": "" }, { "docid": "bb4d12f6876371360d96b29693988927", "score": "0.51014555", "text": "def testsymlabels(self):\r\n # A calculation without symmetry, meaning it belongs to the C1 point\r\n # group, only has the `A` irreducible representation.\r\n sumwronglabels = sum(x not in {'A'} for x in self.data.mosyms[0])\r\n assert sumwronglabels == 0", "title": "" }, { "docid": "6bbff8a3b836ba0f27353286067835a2", "score": "0.5094844", "text": "def AssertNotDeprecatedAttribute(name, value, filename):\n msg = None\n if name in ATTRIBUTES_TO_MAP_REVERSED:\n msg = '{0} should use {1} instead of {2}'.format(filename,\n ATTRIBUTES_TO_MAP_REVERSED[name], name)\n elif name in GRAVITY_ATTRIBUTES and ('left' in value or 'right' in value):\n msg = '{0} should use start/end instead of left/right for {1}'.format(\n filename, name)\n\n if msg:\n msg += ('\\nFor background, see: http://android-developers.blogspot.com/'\n '2013/03/native-rtl-support-in-android-42.html\\n'\n 'If you have a legitimate need for this attribute, discuss with '\n '[email protected] or [email protected]')\n raise Exception(msg)", "title": "" }, { "docid": "97143640b10840f900e73d99121e56ed", "score": "0.50832254", "text": "def isLabelCorrect(self, token, label):\n if label != 'other':\n return token.hasAnnotation(label) \n else:\n for mType in self.entityTypes:\n if token.hasAnnotation(mType):\n return False # false negative. token should have this label\n return True", "title": "" }, { "docid": "8b93b84f6ae097c000f066d2135f65ec", "score": "0.5081603", "text": "def impossible_label(n, amended_labels):\n test = n.label_id().startswith\n return not any(filter(test, amended_labels))", "title": "" }, { "docid": "220efe17cba5cb6510012d8324c9ca38", "score": "0.5081378", "text": "def _label_names_correct(self, labels: LabelsType) -> bool:\n for k, v in labels.items():\n # Check reserved labels\n if k in RESTRICTED_LABELS_NAMES:\n raise ValueError(\"Invalid label name: {}\".format(k))\n\n if self.kind == MetricsTypes.histogram:\n if k in (\"le\",):\n raise ValueError(\"Invalid label name: {}\".format(k))\n\n # Check prefixes\n if any(k.startswith(i) for i in RESTRICTED_LABELS_PREFIXES):\n raise ValueError(\"Invalid label prefix: {}\".format(k))\n\n return True", "title": "" }, { "docid": "32c6faadbf0e26589d2b718aa67bdc25", "score": "0.50737655", "text": "def check_attribute(targattr):\n # This is used if the attribute is optional (or if error checking is handled by the calling\n # routine); if an error needs to be raised, use get_attribute()\n if (targattr in PETRglobals.AttributeList):\n return (\n PETRglobals.AttributeList[\n PETRglobals.AttributeList.index(targattr) + 1]\n )\n else:\n return \"\"", "title": "" }, { "docid": "350297ee4a405cd91214dcdc3aa940a5", "score": "0.50167406", "text": "def test_remove_names_wo_target(self):\n\n self.testInst['ICON_L27_Blurp'] = self.testInst['dummy1']\n gen.remove_leading_text(self.testInst)\n\n # Check variables unchanged\n assert len(self.testInst['ICON_L27_Blurp']) == self.npts\n\n # Check other names untouched\n assert len(self.testInst['dummy1']) == self.npts\n return", "title": "" }, { "docid": "b7add215253961d056b74f6071142e68", "score": "0.49919924", "text": "def test_clean_user_labels_6(self):\n label1 = TEST_ARTICLE['labels'][0]\n label2 = TEST_ARTICLE['labels'][1]\n label3 = TEST_ARTICLE['labels'][2]\n labels = label1 + label2 + label3\n absolute_authors = TEST_ARTICLE['authors'][0]\n relative_authors = TEST_ARTICLE['relative_authors'][0]\n data = clean_user_labels(TEST_ARTICLE['sentence_ends'], [0], 0, 2, labels, relative_authors)\n self.assertEquals(len(data), 1)\n data = data[0]\n self.check_clean_keys(data)\n self.assertEquals(data['index'], 0)\n self.assertEquals(data['labels'], label1)\n self.assertEquals(data['authors'], absolute_authors)", "title": "" }, { "docid": "e16c6b3dbd4dce36f3188aefbf1fa788", "score": "0.49873027", "text": "def clean_labels(self):\r\n self.label_df = self.label_df[['x','y','z',self.atlas_label]]", "title": "" }, { "docid": "0a4a252f1d3a56741f5474a14367d7dc", "score": "0.4969435", "text": "def test_clean_user_labels_3(self):\n label1 = TEST_ARTICLE['labels'][2]\n label2 = TEST_ARTICLE['labels'][3]\n labels = label1 + label2\n absolute_authors = [24, 25]\n relative_authors = [0, 1]\n data = clean_user_labels(TEST_ARTICLE['sentence_ends'], [3], 2, 3, labels, relative_authors)\n self.assertEquals(len(data), 1)\n data = data[0]\n self.check_clean_keys(data)\n self.assertEquals(data['index'], 3)\n self.assertEquals(data['labels'], label2)\n self.assertEquals(data['authors'], absolute_authors)", "title": "" }, { "docid": "b9c15bd5d514deb6ad436c9a506dca7c", "score": "0.49666777", "text": "def clean_attrib(self, current):\n if \"eos\" in current.attrib:\n current.attrib.pop(\"eos\")\n if \"rule\" in current.attrib:\n current.attrib.pop(\"rule\")", "title": "" }, { "docid": "4fa475596f602dab6a24e78342d0c34a", "score": "0.4955316", "text": "def parse_as_label(self, line):\n label = line[1:-1]\n ASMTools.check_if_valid_symbol(label)\n if label in self.symbol_table:\n print(\"Label '{}' occurs in 2 places!\".format(label))\n raise Exception(\"You can't have the same label in more than one place!\")\n self.symbol_table[label] = ASMTools.as_bin(self.line_count)", "title": "" }, { "docid": "c52fafb213ffb817ec6b11a3c932b586", "score": "0.49288017", "text": "def assignLabel(self, token, label): \n if label != 'other' and self.safeToLabelNumber(token, label):\n # only one label\n token.addLabel(label)", "title": "" }, { "docid": "02b49c1858c7f38e5fd2bd5bbd7a64d3", "score": "0.49187744", "text": "def verify_attribute(cls, line):\n if len(line) < 3: # checks if input has an attr name\n print(\"** attribute name missing **\")\n return False\n elif len(line) < 4: # checks if attr name has a value\n print(\"** value missing **\")\n return False\n return True", "title": "" }, { "docid": "40c0854a320efc7b8aea3e2d67fe1d21", "score": "0.4917328", "text": "def check_for_title_attr(attrs):", "title": "" }, { "docid": "25996a3c0563978f0c31feb84a40d578", "score": "0.49022847", "text": "def _check_model_attributes(self):\n check_model_attributes(self.attr)", "title": "" }, { "docid": "7661ca02a40dc09e113a4ea41912e3af", "score": "0.48906183", "text": "def test_check_label_validity_1(self):\n labels = [2]\n self.assertFalse(check_label_validity(labels))\n labels = [-1]\n self.assertFalse(check_label_validity(labels))\n labels = 10 * [0] + [2]\n self.assertFalse(check_label_validity(labels))\n labels = 10 * [0] + [-1]\n self.assertFalse(check_label_validity(labels))\n labels = [2] + 10 * [0]\n self.assertFalse(check_label_validity(labels))\n labels = [-1] + 10 * [0]\n self.assertFalse(check_label_validity(labels))\n labels = 10 * [1] + [2]\n self.assertFalse(check_label_validity(labels))\n labels = 10 * [1] + [-1]\n self.assertFalse(check_label_validity(labels))\n labels = [1, 1, 1, 1, 1, 2, 0, 0, 0, 0, 0]\n self.assertFalse(check_label_validity(labels))\n labels = [0, 0, 0, 0, 2, 1, 1, 1, 1, 1, 1]\n self.assertFalse(check_label_validity(labels))\n labels = [1, 1, 1, 1, 1, -1, 0, 0, 0, 0, 0]\n self.assertFalse(check_label_validity(labels))\n labels = [0, 0, 0, 0, -1, 1, 1, 1, 1, 1, 1]\n self.assertFalse(check_label_validity(labels))\n labels = [0, 0, 1, 1, 1, 1, 1, 1, 0, 0, 1]\n self.assertFalse(check_label_validity(labels))\n labels = [1, 0, 1, 1, 1, 1, 1, 1, 0, 0, 0]\n self.assertFalse(check_label_validity(labels))\n labels = [1, 0, 1, 0, 0, 0]\n self.assertFalse(check_label_validity(labels))\n labels = [0, 0, 1, 0, 0, 1]\n self.assertFalse(check_label_validity(labels))\n labels = [1, 0, 0, 0, 0, 1]\n self.assertFalse(check_label_validity(labels))", "title": "" }, { "docid": "efc1ae8fe787bde3178cc5aeb70c4dd8", "score": "0.4872168", "text": "def validate_attributes(self):\n return True", "title": "" }, { "docid": "931ae7ebaf3f8be5bddde8180574d868", "score": "0.48660785", "text": "def normalisesym(self, label):\r\n pass", "title": "" }, { "docid": "091441479c7ba5b4752c53d5faad049c", "score": "0.4863897", "text": "def test_remove_prefix_w_bad_target(self):\n\n self.testInst['ICON_L27_Blurp'] = self.testInst['dummy1']\n\n testing.eval_bad_input(gen.remove_leading_text, ValueError,\n 'target must be a string or list of strings',\n input_args=[self.testInst],\n input_kwargs={'target': 17.5})\n return", "title": "" }, { "docid": "209c8b1683ee04ac860343259c542214", "score": "0.4827385", "text": "def fix_abundance_labels(output_consensus_fp, filtered_consensus_fp):\n \n consensus_f = open(output_consensus_fp, \"U\")\n \n filtered_f = open(filtered_consensus_fp, \"w\")\n \n for label, seq in MinimalFastaParser(consensus_f):\n fasta_label = label.split()[0]\n size = \"size=\" + label.split('size=')[1].replace(';', '')\n final_label = \"%s;%s\" % (fasta_label, size)\n filtered_f.write(\">%s\\n%s\\n\" % (final_label, seq))\n \n consensus_f.close()\n filtered_f.close()", "title": "" }, { "docid": "7ce1c33274978481fd384898ad7799a5", "score": "0.48120967", "text": "def _populate_label2idx(self):\n self._label_to_idx = {PAD_token: 2, EOS_token: 1, SOS_token: 0,\n UNK_token: 3}\n self._idx_to_label = {2: PAD_token, 1: EOS_token, 0: SOS_token,\n 3: UNK_token}\n\n current = 4\n for line in self.__tags:\n for tag in line:\n if tag not in self._label_to_idx:\n self._label_to_idx[tag] = current\n self._idx_to_label[current] = tag\n current += 1\n print(\"There are totally {} unique labels in this dataset\".\n format(len(self._label_to_idx) - 4))", "title": "" }, { "docid": "d762d7ee4976f3ccd3e4c1727e0a5f87", "score": "0.48078048", "text": "def test_remove_names_w_target(self):\n\n self.testInst['ICON_L27_Blurp'] = self.testInst['dummy1']\n gen.remove_leading_text(self.testInst, target='ICON_L27')\n\n # Check prepended text removed\n assert len(self.testInst['_Blurp']) == self.npts\n\n # Check other names untouched\n assert len(self.testInst['dummy1']) == self.npts\n\n # Check prepended text removed from metadata\n assert '_Blurp' in self.testInst.meta.keys()\n return", "title": "" }, { "docid": "b1d705ae408c1dee47fe0ffb280d9ee8", "score": "0.48038578", "text": "def relabels(self,oldlabel,newlabel):\n for nindex in range(len(self.indexes)):\n index = self.indexes[nindex]\n if (index.label == oldlabel):\n self.indexes[nindex].label = newlabel\n if ((self.type == \"i\") or (self.type == \"j\")):\n parity = self.sortindexes()", "title": "" }, { "docid": "8b6b27a38f57464358c20b39f9ed6466", "score": "0.4793339", "text": "def test_additional_shipping_label(self):\n self.check_attribute(\n \"additional_shipping_label\", self.PRODUCT_DATA[\"AdditionalShippingLabel\"]\n )", "title": "" }, { "docid": "dfc52ebd2eda3f1ade3cd5ac8f556e90", "score": "0.4785717", "text": "def _clean_labels(\n self, ends: Tuple[int, ...], rule: AbstractRule\n ) -> Tuple[int, ...]:\n cleaned_ends = []\n for comb_class, child_label in zip(rule.children, ends):\n if rule.possibly_empty and self.classdb.is_empty(comb_class, child_label):\n logger.debug(\"Label %s is empty.\", child_label)\n self.searcher.classqueue.set_stop_yielding(child_label)\n continue\n cleaned_ends.append(child_label)\n return tuple(sorted(cleaned_ends))", "title": "" }, { "docid": "e6d76561468f67dcf4b65008553c1519", "score": "0.4782487", "text": "def check(node):\n if node.label[-1] == label_last:\n return node", "title": "" }, { "docid": "960d2821493635e0441b7dd87f924ffc", "score": "0.47747582", "text": "def cAddLostAttr(attrType, state, *args):\n \n attrList = []\n attrList = cmds.textField('tfAttr', tx=1, q=1).split()\n mySel = cmds.ls(os=1, type='transform')\n\n for attrName in attrList:\n for sel in mySel:\n print attrName\n cAddAttrib(sel, attrType, attrName, 'x', 'x', 'x', state)", "title": "" }, { "docid": "1379cf309197ab37ae516070667b8e58", "score": "0.4769852", "text": "def test_remove_names_w_target_list(self):\n\n self.testInst['ICON_L27_Blurp'] = self.testInst['dummy1']\n self.testInst['ICON_L23_Bloop'] = self.testInst['dummy1']\n gen.remove_leading_text(self.testInst,\n target=['ICON_L27', 'ICON_L23_B'])\n\n # Check prepended text removed\n assert len(self.testInst['_Blurp']) == self.npts\n assert len(self.testInst['loop']) == self.npts\n\n # Check other names untouched\n assert len(self.testInst['dummy1']) == self.npts\n\n # Check prepended text removed from metadata\n assert '_Blurp' in self.testInst.meta.keys()\n assert 'loop' in self.testInst.meta.keys()\n return", "title": "" }, { "docid": "3e77156eba77ee49e011c2599c3a7d56", "score": "0.4762835", "text": "def attribute_text_label(node, current_word):\n node.text = normalize_string(current_word)\n node.text = node.text.strip(\" \")\n node.udepth = 1\n if len(node.text) > 0 and node.text[0].isdigit():\n split_sent = node.text.split(\" \", 1)\n label = split_sent[0]\n if len(split_sent) > 1:\n text = split_sent[1]\n node.text = text\n\n if all(c.isdigit() for c in label):\n node.label = int(label)\n else:\n text = label + \" \" + text\n node.text = text\n\n if len(node.text) == 0:\n node.text = None", "title": "" }, { "docid": "987241fc19d1cc33d8e9f40f94c5d68f", "score": "0.4759794", "text": "def match(self, label):\n x = self.init\n x_new = self.acpt\n change = []\n\n for r in range(self.robot):\n flag = True\n # the line connecting two points crosses an obstacle\n for (obs, boundary) in iter(self.ts['obs'].items()):\n if LineString([Point(x[0][r]), Point(x_new[0][r])]).intersects(boundary):\n change.append(r)\n flag = False\n break\n\n if not flag:\n continue\n\n for (region, boundary) in iter(self.ts['region'].items()):\n if LineString([Point(x[0][r]), Point(x_new[0][r])]).intersects(boundary) \\\n and region + '_' + str(r + 1) != label[r] \\\n and region + '_' + str(r + 1) != self.tree.nodes[x]['label'][r]:\n\n change.append(r)\n return change", "title": "" }, { "docid": "7ccb5933932bcbf5927da31005a41cb2", "score": "0.47525012", "text": "def repair_labels(labels):\n ret = np.copy(labels)\n ret[:, 0] = 10 # overwrite length to be stop seq\n ret = np.roll(ret, -1, axis=1) # move first to last\n return ret", "title": "" }, { "docid": "0b3e1a203446bde063755f01e3c68a22", "score": "0.47483096", "text": "def add_attributes(self, new_attributes):\n self.attributes = self.attributes[:-1]\n self.attributes += new_attributes\n self.attributes.append(\"loss_scores\")", "title": "" }, { "docid": "7bc897f9a87905eae416634e96b7df4b", "score": "0.4745186", "text": "def match_except_info(self,fpentry):\n # quick and easy checks first\n if self.route.root != fpentry[0] or self.code != fpentry[4]:\n return False\n # now label matches\n if len(self.labels) > 0 and self.labels[0] != fpentry[1]:\n return False\n if len(self.labels) > 1 and self.labels[1] != fpentry[2]:\n return False\n if len(self.labels) > 2 and self.labels[2] != fpentry[3]:\n return False\n return True", "title": "" }, { "docid": "f124da2fbb49fcb50d44fbfc025767cb", "score": "0.47448897", "text": "def attribute_line_checker(input_xml_term, input_line):\n xml_term = input_xml_term\n line = input_line\n\n line_validation = False\n term_validation = False\n validation = False\n\n # Validate if the line is the\n if all(x in line for x in ['@XmlAttribute(name', '=']) or all(x in line for x in ['@XmlElement(name', '=']):\n line_validation = True\n\n # Splitting the line, find the last element and dropping ) and \" chars which are the results of JAXB.\n # We obtain the pure attribute name from the xml file\n line_elements = line.split()\n line_last_element_cleaned = line_elements[2].replace(')', '').replace('\"', '').replace(',', '')\n\n if xml_term == line_last_element_cleaned:\n term_validation = True\n else:\n term_validation = False\n else:\n line_validation = False\n\n validation = term_validation and line_validation\n\n return validation", "title": "" }, { "docid": "6e358ff402e06742eb767a6a856bae65", "score": "0.4740897", "text": "def _process_labels_train(self, data_obj):\n data_obj['num_labels'] = self.num_labels\n valid_labels = self.labels.remove_invalid()\n data_obj['valid_labels'] = valid_labels", "title": "" }, { "docid": "6e358ff402e06742eb767a6a856bae65", "score": "0.4740897", "text": "def _process_labels_train(self, data_obj):\n data_obj['num_labels'] = self.num_labels\n valid_labels = self.labels.remove_invalid()\n data_obj['valid_labels'] = valid_labels", "title": "" }, { "docid": "b09da2f70e5d2c8ca7fcffc2351483e9", "score": "0.4740467", "text": "def test_clean_user_labels_1(self):\n\n def check_clean_wrong(task_id, labels, authors):\n data = clean_user_labels(TEST_ARTICLE['sentence_ends'], task_id, task_id[0], task_id[0], labels, authors)\n self.assertEquals(len(data), 0)\n\n # Check for wrong length of labels\n for i in range(10):\n check_clean_wrong([i], [1] + len(TEST_ARTICLE['labels'][i]) * [0], [0, 1])\n check_clean_wrong([i], (len(TEST_ARTICLE['labels'][i]) * [0])[:-1], [0, 1])\n check_clean_wrong([i], [0], [0, 1])\n\n # Check for labels with wrong values\n for i in range(10):\n check_clean_wrong([i], len(TEST_ARTICLE['labels'][i]) * [2], [0, 1])\n check_clean_wrong([i], len(TEST_ARTICLE['labels'][i]) * [-1], [0, 1])\n real_labels = TEST_ARTICLE['labels'][i].copy()\n real_labels[2] = 2\n check_clean_wrong([i], real_labels, [0, 1])\n real_labels = TEST_ARTICLE['labels'][i].copy()\n real_labels[2] = -1\n check_clean_wrong([i], real_labels, [0, 1])\n\n # Check for labels with wrong or no author values\n for i in range(10):\n loaded_length = len(TEST_ARTICLE['labels'][i])\n check_clean_wrong([i], TEST_ARTICLE['labels'][i], [loaded_length, loaded_length + 1])\n check_clean_wrong([i], TEST_ARTICLE['labels'][i], [-1, 0])\n check_clean_wrong([i], len(TEST_ARTICLE['labels'][i]) * [1], [])", "title": "" }, { "docid": "edb7888068195c5b57fc4058dd2911de", "score": "0.47404048", "text": "def changing_labels(self): \r\n keys = self.score_tracking_dict.keys()\r\n seq_set = set()\r\n for item in keys:\r\n value = self.score_tracking_dict[item]\r\n for ite in value:\r\n seq_set.add(ite[0])\r\n number_of_unique_sequences = len(seq_set) \r\n \r\n for key in keys: ## Slow! Look at possible rework\r\n value = self.score_tracking_dict[key]\r\n value_len = len(value) \r\n value_count = 0 \r\n for item in seq_set:\r\n pass_count = 0\r\n entry = 0\r\n while entry < value_len:\r\n if item == value[entry][0]:\r\n if value[entry][1] == \"pass\":\r\n pass_count += 1\r\n else:\r\n pass\r\n else:\r\n pass\r\n entry += 1\r\n if pass_count > 0:\r\n value_count += 1\r\n string = \"{0} out of {1}\".format(value_count, number_of_unique_sequences)\r\n if key == 'MinInt':\r\n self.ScrOvw_MinimumIntenstity.config(text = string)\r\n elif key == 'MinProd':\r\n self.ScrOvw_ScrMinimumProducts.config(text = string)\r\n elif key == 'MinMinProdPAA':\r\n self.ScrOvw_ScrMinimumProductsPerAminoAcid.config(text = string)\r\n elif key == 'MinConProd':\r\n self.ScrOvw_ScrMinimumConsecutiveProducts.config(text = string)\r\n elif key == 'MinProdSum':\r\n self.ScrOvw_ScrMinimumProdIntSum.config(text = string)\r\n elif key == 'MinScr':\r\n self.ScrOvw_ScrMinimumScore.config(text = string)\r\n elif key == 'MinNoOffRep' :\r\n self.ScrOvw_ScrNoOffReplicates.config(text = string)\r\n else:\r\n print(\"You done fuck'ed up buddy\")", "title": "" }, { "docid": "a8ae2f993be43b21dbcb82880823fe49", "score": "0.47375253", "text": "def _check_required_attributes(self) -> None:\n missing = [x for x in self.required_attrs\n if not hasattr(self.ccdata, x)]\n if missing:\n raise MissingAttributeError(\n f\"Could not parse required attributes to write file: {missing}\")", "title": "" }, { "docid": "316d793567f561a287f233198fde1b6c", "score": "0.47342885", "text": "def rrdlabel(self):\n\n return self.clean_label[0:19]", "title": "" }, { "docid": "33d8d80d83fcdfd43fe0a561685c71fe", "score": "0.47341272", "text": "def is_complete_label(self, label):\n return self.actual_label_counts.get(label, 0) >= self.expected_label_counts[label]", "title": "" }, { "docid": "0e055e91d3cacdec0b8fe9da2f8cfe7b", "score": "0.4724913", "text": "def check_label_dict(self):\n if self._case != \"regression\":\n return check_label_dict(self.label_dict, self._case, self._classes)", "title": "" }, { "docid": "40c8bae67f9c44d99313f64ba4aad94e", "score": "0.47195175", "text": "def __len__(self): \n return len(self.positive_label_path) + self.negetive_label_path", "title": "" }, { "docid": "0a2f3d1a1b866046375ccc5940c3c3b0", "score": "0.47179112", "text": "def checkExample(self, example):\r\n if self.values:\r\n for a in self.attributes:\r\n if example[a] not in self.values[a]:\r\n raise ValueError('Unvalid value {} for attribute {} in {}'\r\n .format(example[a], self.attrnames[a], example))", "title": "" }, { "docid": "ef2947284eac093da55ee23b322ea208", "score": "0.4710816", "text": "def test_str(self):\n self.assertEqual(str(self.a2), '>x\\nABC\\n>y\\nDEF\\n')\n #should work if labels diff length\n self.a2.Names[-1] = 'yyy'\n self.assertEqual(str(self.a2), '>x\\nABC\\n>yyy\\nDEF\\n')", "title": "" }, { "docid": "80bd32250ec43d699956ca20cff267e8", "score": "0.47104162", "text": "def hasLabel(line):\n return line[:1].isalpha()", "title": "" }, { "docid": "39d6e8d3235eca8689e4a5d85faf7c4e", "score": "0.47082475", "text": "def updateLabel(self,label):\n # birdSelectedMenu flips the state of each label\n # so need to pass all labels for deletion, or clean before updating\n \n # Need to keep track of self.multipleBirds\n multipleTemp = self.multipleBirds\n self.multipleBirds = True\n self.segments[self.box1id][4] = []\n for l in label:\n self.birdSelectedMenu(l)\n if label==[]:\n self.birdSelectedMenu(\"Don't Know\")\n self.multipleBirds = multipleTemp\n\n if self.listRectanglesa2[self.box1id] is not None:\n self.listRectanglesa1[self.box1id].setBrush(self.prevBoxCol)\n self.listRectanglesa1[self.box1id].update()\n if self.config['transparentBoxes'] and type(self.listRectanglesa2[self.box1id]) == self.ROItype:\n col = self.prevBoxCol.rgb()\n col = QtGui.QColor(col)\n col.setAlpha(255)\n self.listRectanglesa2[self.box1id].setPen(col, width=1)\n else:\n self.listRectanglesa2[self.box1id].setBrush(self.prevBoxCol)\n\n self.listRectanglesa2[self.box1id].update()\n self.segmentsToSave = True", "title": "" }, { "docid": "e042e26eba681ba451b54177420d3fb0", "score": "0.47070616", "text": "def load_attribute(self):\n\n attributes = scipy.io.loadmat(self.attribute_file)\n #just get the attriutes\n attributes = attributes['GT']\n #labels contain NAN I just converted it to 0\n self.attributes = np.nan_to_num(attributes)", "title": "" }, { "docid": "543ed6a4e1a7a7ecc88ace814f0a8324", "score": "0.4705611", "text": "def _on_update_attribute(self, attr_name, flag):\n\n if not self._current_asset:\n return\n\n if attr_name not in self._current_asset.attrs.keys():\n logger.warning('Impossible to udpate attribute {} because node {} has no that attribute!'.format(attr_name, self._current_asset.asset_item))\n return\n\n self._current_asset.attrs[attr_name] = flag", "title": "" }, { "docid": "0b894b67a143e30c0dd4d5a29b547aee", "score": "0.47044963", "text": "def test_unmatched_msg(self):\n self.throws(\n 'raise AttributeError(\"unmatched ATTRIBUTEERROR\")',\n UNKNOWN_ATTRIBUTEERROR)", "title": "" }, { "docid": "7e131d2f943941e3a64421d098a438aa", "score": "0.47007394", "text": "def test_label_nominal(self):\n element = ElementDao()\n self.assertEquals(element.label, '')\n element.label = \"my label\"\n self.assertEquals(element.label, \"my label\")\n self.assertEquals(str(element), element.label)", "title": "" }, { "docid": "82ca1d1804862545c5bc2ac68294c8aa", "score": "0.47002918", "text": "def mismatch_training(names, train_df):\n count = 0\n candidate_name = train_df.label\n for i in range(len(names)):\n c = candidate_name[i].lower().strip()\n #print(names[i], c)\n if(names[i][0] == 'O'):\n names[i][0] = names[i][0] + names[i][1]\n if (names[i][0].lower().replace(\"'\", \"\") != c):\n print(names[i][0], c)\n count += 1\n print(\"There are\", count, \"mismatch in training files\")\n return(count)", "title": "" }, { "docid": "d32154b60bf0d1bbeff83e88581d3623", "score": "0.46870676", "text": "def check_attributes(item_attributes, errors):\n for attribute, message in errors:\n if item_attributes.get(attribute) not in message[0]:\n raise InvalidDataError(f\"{message[1]}\")", "title": "" }, { "docid": "81d4f1427884a632c195f5178bb37844", "score": "0.46814704", "text": "def check_backwardsTip(self, frame):\n try:\n fingertips = ['R_T', 'R_I', 'R_M', 'R_R', 'R_L', 'L_T', 'L_I', 'L_M', 'L_R', 'L_I']\n for f in fingertips:\n if f=='R_T':\n if not 'Hands_R_T3' in [m.name for m in self.markers]:\n return\n \n M2 = self.getMarkerByName('Hands_'+f+'2')\n M3 = self.getMarkerByName('Hands_'+f+'3')\n M4 = self.getMarkerByName('Hands_'+f+'4')\n \n b23 = np.array(M3.getdata(frame)) - np.array(M2.getdata(frame))\n b34 = np.array(M4.getdata(frame)) - np.array(M3.getdata(frame))\n \n #swapped\n b24 = np.array(M4.getdata(frame)) - np.array(M2.getdata(frame))\n b43 = np.array(M3.getdata(frame)) - np.array(M4.getdata(frame))\n \n if helper.angle_between(b23, b34) >= self.BACKWARDS_TIP_THRESH:\n #switch 3 and 4 data, ONLUY if it would help\n if helper.angle_between(b24, b43) < self.BACKWARDS_TIP_THRESH:\n self._swapMarkerData(frame, M3, M4)\n \n except Exception:\n print (\"WARNING:backwards tip check not possible, marker names not recognized.\")", "title": "" }, { "docid": "126d383735176da6ae296cf0b12ce65c", "score": "0.46743867", "text": "def check_tags(self):\n tags = set(Lists.TAGS)\n if self.type != 'labeled':\n tags = tags.difference(['entry','label','term'])\n missing = tags.difference(self.tag.keys())\n if missing:\n self.error('missing tag(s): %s' % ','.join(missing), halt=True)", "title": "" }, { "docid": "f1ffc9d6feaa43fdc551b00212b3ef95", "score": "0.46691847", "text": "def testsymlabels(self):", "title": "" }, { "docid": "f1ffc9d6feaa43fdc551b00212b3ef95", "score": "0.46691847", "text": "def testsymlabels(self):", "title": "" }, { "docid": "f1ffc9d6feaa43fdc551b00212b3ef95", "score": "0.46691847", "text": "def testsymlabels(self):", "title": "" }, { "docid": "02c42f144978c8f71375478307bfd9b2", "score": "0.466665", "text": "def normalize_labels(full_label):\n try:\n label = full_label.split('<>')[1] # split and get second item\n except IndexError:\n label = full_label.split('<>')[0] # split and get first item\n\n return label", "title": "" }, { "docid": "dae99e4d61a61f52dce35b01a48fc81a", "score": "0.46636534", "text": "def _update_label(self):\n \n if None in [self._atlas, self._image] :\n return\n else :\n position_physical = self._image.cursor_physical_position\n position_index = tuple(\n self._label_image.physical_to_index(\n position_physical).round().astype(int))\n if self._label_image.is_inside(position_index) :\n label = self._label_image[position_index]\n if self.atlas.type == Atlas.Type.probabilistic and label != 0 :\n name = self._atlas.labels[label-1]\n elif self.atlas.type == Atlas.Type.label and label != 0 :\n name = self._atlas.labels[label]\n else :\n name = \"(no label)\"\n else :\n name = \"(out of atlas)\"\n \n self._label.ChangeValue(name)", "title": "" }, { "docid": "f156a20a28e6aeb6a66263f640174bf2", "score": "0.46589553", "text": "def relabel(self,l):\n if l == 'left_hand': return 0\n elif l == 'right_hand': return 1\n else: return 2", "title": "" }, { "docid": "755e7ae93ec23174b50db24286c863f9", "score": "0.46490127", "text": "def checkLabelsForDuplicates(labels='/u/ghezgroup/data/gc/source_list/label.dat'):\n if type(labels) is 'gcwork.starTables.Labels':\n lab = labels\n else:\n lab = Labels(labelFile=labels)\n\n rdx = lab.r.argsort()\n lab.take(rdx)\n\n duplicateCnt = 0\n\n dummy = np.arange(len(lab.name))\n\n for ii in dummy:\n dx = lab.x - lab.x[ii]\n dy = lab.y - lab.y[ii]\n dm = np.abs(lab.mag - lab.mag[ii])\n\n dr = np.hypot(dx, dy)\n\n # Search for stars within 50 mas\n rdx = np.where((dr < 0.05) & (dm < 1) & (dummy >= ii))[0]\n\n if len(rdx) > 1:\n duplicateCnt += 1\n\n print('')\n print('Found stars close to %s' % lab.name[ii])\n print(' %-13s %5s %7s %7s %7s %7s' % \\\n ('Name', 'mag', 'x', 'y', 'vx', 'vy'))\n\n for rr in rdx:\n print(' %-13s %5.2f %7.3f %7.3f %7.3f %7.3f' % \\\n (lab.name[rr], lab.mag[rr], lab.x[rr], lab.y[rr],\n lab.vx[rr], lab.vy[rr]))\n\n print('')\n print('Found %d duplicates' % duplicateCnt)", "title": "" }, { "docid": "ef6dd5f3fb884a527dce363825fe8110", "score": "0.46482667", "text": "def __change_label(self, label, up_lbl, left_lbl, i, j):\n for x in range(i):\n for y in range(j):\n if label[x][y] == left_lbl:\n label[x][y] = up_lbl\n\n for n in range(j):\n if label[i][n] == left_lbl:\n label[i][n] = up_lbl", "title": "" }, { "docid": "4b3957adb4c428be4303393ef8b1f080", "score": "0.4636886", "text": "def is_label(line):\n return line and line.startswith(\"(\") and line.endswith(\")\")", "title": "" }, { "docid": "e1a18233bc8125ff96a48da761e28ebc", "score": "0.4626394", "text": "def test_mnemonic_as_label(self):\n self.feed('cmp:')\n self.feed('b cmp')\n self.check('feffffea')", "title": "" }, { "docid": "18e5596d0f8db3aef278d3b2fd28a240", "score": "0.46227416", "text": "def check_flag(self, flag, name, namelist):\n allowname = self.attrs.get(flag, self.inherit_value(flag))\n if not allowname and name not in self.attrs[namelist]:\n self.attrs[namelist].append(name)\n elif name in self.attrs[namelist]:\n self.attrs[flag] = False", "title": "" }, { "docid": "a85f43df1c442ce8388b15a35200da8f", "score": "0.46184897", "text": "def is_labels(line):\n labelstr = (\" center cont flux eqw core gfwhm\"\n \" lfwhm\\n\")\n if line == labelstr:\n return True\n return False", "title": "" }, { "docid": "746a226e2c9606a53252b267e96b0694", "score": "0.46182683", "text": "def __len__(self):\n return len(self.label_list)", "title": "" }, { "docid": "6d473c9b8ec41acdacf9e4c7328c1363", "score": "0.4616349", "text": "def check_out_of_bounds(labels):\n vehicle_counter = 0\n image_counter = 0\n for image_label in labels.values():\n for i, vehicle in enumerate(image_label['vehicles']):\n # Not using min/max to be able to count occurances\n oob = False\n\n # # AABB\n if vehicle['AABB']['x1'] < 0:\n oob = True\n if vehicle['AABB']['y1'] < 0:\n oob = True\n if vehicle['AABB']['x2'] > constants.WIDTH:\n oob = True\n if vehicle['AABB']['y2'] > constants.HEIGHT:\n oob = True\n\n # # rear\n if vehicle['rear'] is not None:\n if vehicle['rear']['x1'] < 0:\n oob = True\n if vehicle['rear']['y1'] < 0:\n oob = True\n if vehicle['rear']['x2'] > constants.WIDTH:\n oob = True\n if vehicle['rear']['y2'] > constants.HEIGHT:\n oob = True\n\n # # side\n if vehicle['side'] is not None:\n for point in vehicle['side'].keys():\n if vehicle['side'][point]['x'] < 0:\n oob = True\n if vehicle['side'][point]['y'] < 0:\n oob = True\n if vehicle['side'][point]['x'] > constants.WIDTH:\n oob = True\n if vehicle['side'][point]['y'] > constants.HEIGHT:\n oob = True\n if oob:\n vehicle_counter += 1\n image_counter += 1\n print('Image number', image_counter, 'of', len(labels.values()))\n print('Found', vehicle_counter, 'annotations that are out of image bounds')", "title": "" }, { "docid": "f7d970e3d4d2866a5c6f2f3f40e906d8", "score": "0.46134958", "text": "def get_label_not_equals(self):\n return []", "title": "" }, { "docid": "7229b539c8f0b98e7bf12f425ad08512", "score": "0.46092325", "text": "def find_optimal_label(dataset, target_attr):\n best_value = target_attr.values.pop()\n best_cmp = len(split(dataset, target_attr, best_value))\n target_attr.values.add(best_value)\n\n for value in target_attr.values:\n split_set = split(dataset, target_attr, value)\n if len(split_set) > best_cmp:\n best_value = value\n best_cmp = len(split_set)\n return best_value", "title": "" }, { "docid": "5c19a99e92dd4c3a18951004cc3c8f67", "score": "0.46051943", "text": "def setLabel(self, label):\n if label not in (\"UNEXPLORED\", \"VISITED\"): # validation check\n print(\"The label is not valid...\")\n return\n \n self._label = label", "title": "" }, { "docid": "71004cb351fa1d0889eed22b73ec57e0", "score": "0.4599067", "text": "def allows_duplicate_labels(self) -> bool:\n return self._allows_duplicate_labels", "title": "" }, { "docid": "dded9ca103625788a360135f7fa08e95", "score": "0.45918864", "text": "def add_forbidden_label(self, label):\n self._forbidden_border_labels.append(label)", "title": "" }, { "docid": "380e61757fd551916ec0b36225290f74", "score": "0.45896766", "text": "def sort_regtext_label(label):\n sortable = [make_label_sortable(l)[0] for l in label]\n if len(sortable) > 4:\n sortable[4] = make_label_sortable(sortable[4], roman=True)[0]\n return sortable", "title": "" }, { "docid": "69a5c530215d7f140a7651429d10f5ed", "score": "0.45881474", "text": "def label_map(y):\n if y==\"functional\":\n return 2\n elif y==\"functional needs repair\":\n return 1\n else:\n return 0", "title": "" }, { "docid": "090a7716d4176184b0be75d980b041af", "score": "0.45840907", "text": "def scrub_to_marked_frame(self, label):\n return False", "title": "" }, { "docid": "61fb169cad85c9b95e7fda8d2328af16", "score": "0.45806554", "text": "def test_same_length_attribute(self):\n arg = {'src': '.',\n 'classBy': '.',\n 'classes': ['.', '.'],\n 'rules': ['.'],\n 'keepOthers': '.',\n 'multiMatch': '.',\n 'target': '.'}\n \n with self.assertRaises(ValidationError):\n self.classing.validate(arg)\n\n arg = {'src': '.',\n 'classBy': '.',\n 'classes': ['.'],\n 'rules': ['.', '.'],\n 'keepOthers': '.',\n 'multiMatch': '.',\n 'target': '.'}\n \n with self.assertRaises(ValidationError):\n self.classing.validate(arg)", "title": "" }, { "docid": "1419c527e2eb2f1cca8e6edf0e950708", "score": "0.4580567", "text": "def checkAndCorrectBIOEncoding(predictions): \n errors = 0\n labels = 0\n \n for sentenceIdx in range(len(predictions)):\n labelStarted = False\n labelClass = None\n \n\n for labelIdx in range(len(predictions[sentenceIdx])): \n label = predictions[sentenceIdx][labelIdx]\n labelNext = predictions[sentenceIdx][labelIdx+1] if labelIdx < len(predictions[sentenceIdx]) - 1 else 'O'\n labelPrev = predictions[sentenceIdx][labelIdx-1] if labelIdx > 0 else 'O'\n labelClass = label[2:] if len(label) > 1 else label\n labelClassNext = labelNext[2:] if len(labelNext) > 1 else labelNext\n labelClassPrev = labelPrev[2:] if len(labelPrev) > 1 else labelPrev\n if label != 'O':\n if label.startswith('I-') and labelClassPrev != labelClass:\n errors += 1\n predictions[sentenceIdx][labelIdx] = 'B'+predictions[sentenceIdx][labelIdx][1:] \n if errors > 0:\n labels += errors\n logging.info(\"Wrong BIO-Encoding %d/%d labels when setting incorrect labels\" % (errors, labels))", "title": "" }, { "docid": "a6448f04718e0d5da68cbe8aae8de1f0", "score": "0.45780087", "text": "def check_and_nicefy_attribute(self, name, params):\n # check if the attribute should exist, i.e.,\n # 'required', 'lenlim' for an 'str' restriction\n # are valid but 'someInvalidAttribute' isn't\n attr_style = self.attributes.get(name, None)\n self._check_attribute_existance(attr_style, name)\n\n # check if the attribute should have parameters,\n # eg: 'required' should not have parameters,\n # but 'lenlim' should\n param_styles = attr_style.get(\"parameters\", [])\n self._check_parameter_presence(param_styles, name, params)\n\n to_ret_params = []\n for i, param in enumerate(params):\n param_style = param_styles[i]\n para_type = param_style[\"type\"]\n param = self._check_parameter_type(para_type, name, param)\n to_ret_params.append(param)\n\n return name, to_ret_params", "title": "" }, { "docid": "b97fcb3fea7ad626009db2479b5ed160", "score": "0.45679286", "text": "def check_label(data_inst):\n\n LOGGER.debug('checking label')\n label_checker = ClassifyLabelChecker()\n num_class, class_set = label_checker.validate_label(data_inst)\n if num_class != 2:\n raise ValueError(\n 'ftl only support binary classification, however {} labels are provided.'.format(num_class))\n\n if 1 in class_set and -1 in class_set:\n return data_inst\n else:\n soreted_class_set = sorted(list(class_set))\n new_label_mapping = {soreted_class_set[1]: 1, soreted_class_set[0]: -1}\n reset_label = functools.partial(FTL.reset_label, mapping=new_label_mapping)\n new_table = data_inst.mapValues(reset_label)\n new_table.schema = copy.deepcopy(data_inst.schema)\n return new_table", "title": "" }, { "docid": "176087c8b0b028acb80193260e9f12ea", "score": "0.4564874", "text": "def test_attributes_updated_warnings(self, warning_list=None):\n attribute_name = 'attribute_to_update'\n changes = 'second_value'\n warning_msg = \"Adding or updating attribute\"\n result = update_attribute(self.cube, attribute_name, changes,\n warnings_on=True)\n self.assertTrue(any(item.category == UserWarning\n for item in warning_list))\n self.assertTrue(any(warning_msg in str(item)\n for item in warning_list))\n self.assertEqual(result.attributes['attribute_to_update'],\n 'second_value')", "title": "" }, { "docid": "139b5196cce9e355a2609cee35fc0e1d", "score": "0.45628616", "text": "def mark_label(self, label):\n\t\tcore.BNLowLevelILMarkLabel(self.handle, label.handle)", "title": "" }, { "docid": "c56227eb3549ba04836a61613d6cbc06", "score": "0.45628038", "text": "def min_attribute_difference(self, difference, vocabulary, attribute_pair):\n new_list = []\n for word in vocabulary:\n if abs(vocabulary[word][attribute_pair[0]] - vocabulary[word][attribute_pair[1]])/vocabulary[word]['total'] < difference:\n if word not in self.stoplist:\n new_list.append(word)\n return new_list", "title": "" }, { "docid": "9158c84d97fd9870035128ed6d0d3f6b", "score": "0.45590928", "text": "def check_example(self, example):\n if self.values:\n for a in self.attrs:\n if example[a] not in self.values[a]:\n raise ValueError('Bad value {} for attribute {} in {}'\n .format(example[a], self.attr_names[a], example))", "title": "" }, { "docid": "344368179e2c2d6a7706b82b7b927357", "score": "0.45546058", "text": "def __parse_attributes(arff_file, labels: Set[str]) -> (str, List[Attribute]):\n\n label_location = 'end'\n attributes: List[Attribute] = []\n\n with open(arff_file) as file:\n for line in file:\n if line.startswith('@attribute') or line.startswith('@ATTRIBUTE'):\n attribute_definition = line[len('@attribute'):].strip()\n\n if attribute_definition.endswith('numeric') or attribute_definition.endswith('NUMERIC'):\n # Numerical attribute\n attribute_name = attribute_definition[:(len(attribute_definition) - len('numeric'))]\n numeric = True\n elif attribute_definition.endswith('real') or attribute_definition.endswith('REAL'):\n # Numerical attribute\n attribute_name = attribute_definition[:(len(attribute_definition) - len('real'))]\n numeric = True\n else:\n # Nominal attribute\n attribute_name = attribute_definition[:attribute_definition.find(' {')]\n numeric = False\n\n attribute_name = __parse_attribute_or_label_name(attribute_name)\n\n if attribute_name not in labels:\n attribute = Attribute(attribute_name) if numeric else NominalAttribute(attribute_name)\n attributes.append(attribute)\n elif len(attributes) == 0:\n label_location = 'start'\n\n return label_location, attributes", "title": "" }, { "docid": "24351a0d1aa38dcb03f93fdfaefcf219", "score": "0.45510423", "text": "def test_set_attributes_missing_index(self):\r\n\r\n _values = (0, 1, 'Prescribed Action', 'Action Taken',\r\n 1, 0, 3, 0, False, 2, 0, False)\r\n\r\n (_error_code,\r\n _error_msg) = self.DUT.set_attributes(_values)\r\n self.assertEqual(_error_code, 40)", "title": "" } ]
188861d9c40850d37e5c9ef4dd7d3ad7
Adds vmcheckerupdatedb speciffic options to an already populated family of options.
[ { "docid": "2f1fb51907ca0215f88a861da4623f2e", "score": "0.0", "text": "def add_update_db_optparse(cmdline):\n group = optparse.OptionGroup(cmdline, 'update_db.py')\n group.add_option('-f', '--force', action='store_true', dest='force',\n default=False, help='Force updating all marks ignoring '\n 'modification times')\n cmdline.add_option_group(group)", "title": "" } ]
[ { "docid": "fc76c15d97e63233f5d826ce76ad1e94", "score": "0.62515515", "text": "def _update_options(self):\n # set verbosity level (also of self.logger, in case of a custom one)\n if self.options[\"verbose\"] <= 0:\n logging.getLogger(\"pyffi\").setLevel(logging.WARNING)\n self.logger.setLevel(logging.WARNING)\n elif self.options[\"verbose\"] == 1:\n logging.getLogger(\"pyffi\").setLevel(logging.INFO)\n self.logger.setLevel(logging.INFO)\n else:\n logging.getLogger(\"pyffi\").setLevel(logging.DEBUG)\n self.logger.setLevel(logging.DEBUG)\n # check errors\n if self.options[\"createpatch\"] and self.options[\"applypatch\"]:\n raise ValueError(\n \"options --diff and --patch are mutually exclusive\")\n if self.options[\"diffcmd\"] and not(self.options[\"createpatch\"]):\n raise ValueError(\n \"option --diff-cmd can only be used with --diff\")\n if self.options[\"patchcmd\"] and not(self.options[\"applypatch\"]):\n raise ValueError(\n \"option --patch-cmd can only be used with --patch\")\n # multiprocessing available?\n if (multiprocessing is None) and self.options[\"jobs\"] > 1:\n self.logger.warn(\n \"multiprocessing not supported on this platform\")\n self.options[\"jobs\"] = 1\n # update include and exclude types\n self.include_types = tuple(\n getattr(self.FILEFORMAT, block_type)\n for block_type in self.options[\"include\"])\n self.exclude_types = tuple(\n getattr(self.FILEFORMAT, block_type)\n for block_type in self.options[\"exclude\"])\n # update skip and only regular expressions\n self.skip_regexs = tuple(\n re.compile(regex) for regex in self.options[\"skip\"])\n self.only_regexs = tuple(\n re.compile(regex) for regex in self.options[\"only\"])", "title": "" }, { "docid": "99ed80d5124eef11656d2e04d5a5a2d5", "score": "0.62049663", "text": "def update_options(cls, options, items):", "title": "" }, { "docid": "7e1108e2dbe64f0054572a3ac71ec3bc", "score": "0.60818106", "text": "def update_options(self, options):\n\n for key in self.options:\n if key in options and not options[key]:\n self.options[key] = b''\n elif (key in self.options and self.options[key]) or\\\n (key in options and options[key]):\n if key == 'debug_verbose':\n self.options['debug_verbose'] = tuple([b'-ddd'])\n elif key == 'key_data':\n self.options[key] = tuple([b'-K'])\n elif key == 'timestamp':\n self.options[key] = tuple([b'-t'])\n elif key == 'version':\n self.options[key] = tuple([b'-v'])\n elif key == 'mute':\n self.options[key] = tuple([b'-s'])\n elif key == 'eloop_term_disable':\n self.options[key] = tuple([b'-E'])\n elif key in self.options and not self.options[key]:\n self.options[key] = b''", "title": "" }, { "docid": "881a1e5a27d2d842281da379542bd2b4", "score": "0.59011185", "text": "def addAdditionalOptions(self):\n methodsDict = {}\n reflect.accumulateMethods(self.options, methodsDict, 'opt_')\n methodToShort = {}\n for name in methodsDict.copy():\n if len(name) == 1:\n methodToShort[methodsDict[name]] = name\n del methodsDict[name]\n\n for methodName, methodObj in methodsDict.items():\n long = methodName.replace('_', '-') # t.p.usage does this\n # if this option is already defined by the optFlags or\n # optParameters then we don't want to override that data\n if long in self.optAll_d:\n continue\n\n descr = self.getDescription(long)\n\n short = None\n if methodObj in methodToShort:\n short = methodToShort[methodObj]\n\n reqArgs = methodObj.im_func.func_code.co_argcount\n if reqArgs == 2:\n self.optParams.append([long, short, None, descr])\n self.optParams_d[long] = [short, None, descr]\n self.optAll_d[long] = [short, None, descr]\n elif reqArgs == 1:\n self.optFlags.append([long, short, descr])\n self.optFlags_d[long] = [short, descr]\n self.optAll_d[long] = [short, None, descr]\n else:\n raise TypeError, '%r has wrong number ' \\\n 'of arguments' % (methodObj,)", "title": "" }, { "docid": "64ebea00d8402d1b2a463450adf20b75", "score": "0.5726903", "text": "def add_options(self, option_dict):\n option_dict.update(self._options)", "title": "" }, { "docid": "4849d1dc71f338f5c4a0b7dff32ec73d", "score": "0.56465876", "text": "def set_options(new_option_dict):\n if Options() is not new_option_dict:\n for k, v in new_option_dict.__dict__.items():\n Options().__setattr__(k, v)", "title": "" }, { "docid": "7ea4734f603530c5133ba7cd8a9eb26c", "score": "0.56399053", "text": "def initialize_options(self):", "title": "" }, { "docid": "7ea4734f603530c5133ba7cd8a9eb26c", "score": "0.56399053", "text": "def initialize_options(self):", "title": "" }, { "docid": "6053ec511ad0df15fd0aff0a8b6d8951", "score": "0.5607641", "text": "def updateOptions(self):\n\t\treturn {\n\t\t\t\"threshold\": self.thresholdEdit.text(),\n\t\t\t\"filt\": self.filtCheckBox.isChecked()\n\t\t}", "title": "" }, { "docid": "57555d47acc52e727889bdb448211897", "score": "0.55903774", "text": "def _freebayes_options_from_config(items, config, out_file, region=None):\n opts = []\n opts += [\"--ploidy\", str(ploidy.get_ploidy(items, region))]\n\n variant_regions = bedutils.merge_overlaps(utils.get_in(config, (\"algorithm\", \"variant_regions\")),\n items[0])\n target = subset_variant_regions(variant_regions, region, out_file, items)\n if target:\n if isinstance(target, basestring) and os.path.isfile(target):\n opts += [\"--targets\", target]\n else:\n opts += [\"--region\", region_to_freebayes(target)]\n resources = config_utils.get_resources(\"freebayes\", config)\n if resources.get(\"options\"):\n opts += resources[\"options\"]\n return opts", "title": "" }, { "docid": "ed4f154fc9c9f0c74f6dea4bde910934", "score": "0.55820084", "text": "def initialize_options(self) -> None:", "title": "" }, { "docid": "ed4f154fc9c9f0c74f6dea4bde910934", "score": "0.55820084", "text": "def initialize_options(self) -> None:", "title": "" }, { "docid": "ed4f154fc9c9f0c74f6dea4bde910934", "score": "0.55820084", "text": "def initialize_options(self) -> None:", "title": "" }, { "docid": "954b04269a27a9f4b8f59ca6ebf07404", "score": "0.55660045", "text": "def vcsSetOptions(self, options):\n if self.vcsSupportCommandOptions():\n for key in options:\n try:\n self.options[key] = options[key]\n except KeyError:\n pass", "title": "" }, { "docid": "39ca37f99e430874f2fabe709293ec8a", "score": "0.555064", "text": "def modify_options(parser):\r\n return parser", "title": "" }, { "docid": "6e8a1971fef9340d5b9c6ad8f4e10a77", "score": "0.55504555", "text": "def _processTbOptions(self, opts):\n if not opts.has_key(\"clusteredIndex\"):\n self._logger.info(\n \"param 'clusteredIndex' not found, will use default: ''\")\n opts[\"clusteredIndex\"] = \"NULL\"\n if not opts.has_key(\"isRefMatch\"):\n self._logger.info(\"param 'isRefMatch' not found, will use default: No\")\n opts[\"isRefMatch\"] = \"No\"\n # these are required options for createTable\n _crTbOpts = {\n \"table_info\":(\"tableName\",\n \"partitioning\",\n \"schemaFile\",\n \"clusteredIndex\",\n \"isRefMatch\",\n \"isView\")}\n _crTbPSOpts = {\n \"sphBox\":(\"overlap\",\n \"lonColName\", \n \"latColName\")}\n return opts", "title": "" }, { "docid": "dcdd5d0ba10a7dc7cbfcf62475b3e680", "score": "0.5545255", "text": "def add_options(self, *_options):\n for _opt in _options:\n if _opt.__class__ in self.supported_options and _opt.value is not None:\n self._options.add_option(_opt)\n else:\n log.warning('Option %s was rejected because unsupported by %s', str(_opt),\n self.__class__.__name__)", "title": "" }, { "docid": "2ca172044fe723bc83d2351d0f77c39d", "score": "0.5472952", "text": "def AddUpdateInstanceFlags(parser):\n accelerator_choices = [\n 'NVIDIA_TESLA_K80', 'NVIDIA_TESLA_P100',\n 'NVIDIA_TESLA_V100', 'NVIDIA_TESLA_P4', 'NVIDIA_TESLA_T4',\n 'NVIDIA_TESLA_A100', 'NVIDIA_TESLA_A100_80GB',\n 'NVIDIA_TESLA_T4_VWS', 'NVIDIA_TESLA_P100_VWS', 'NVIDIA_TESLA_P4_VWS'\n ]\n AddInstanceResource(parser)\n gce_setup_group = parser.add_group(\n help=(\n 'Gce Setup for the instance'))\n accelerator_group = gce_setup_group.add_group(\n help='Accelerator configurations.'\n )\n accelerator_group.add_argument(\n '--accelerator-type',\n help='Type of this accelerator.',\n choices=accelerator_choices,\n default=None)\n accelerator_group.add_argument(\n '--accelerator-core-count',\n help='Count of cores of this accelerator.',\n type=int)\n gpu_group = gce_setup_group.add_group(help='GPU driver configurations.')\n gpu_group.add_argument(\n '--install-gpu-driver',\n help='Install gpu driver',\n type=bool)\n gpu_group.add_argument(\n '--custom-gpu-driver-path',\n help='custom gpu driver path',\n type=str)\n shielded_vm_group = gce_setup_group.add_group(\n help='Shielded VM configurations.'\n )\n shielded_vm_group.add_argument(\n '--shielded-secure-boot',\n help='Boot instance with secure boot enabled',\n type=bool)\n shielded_vm_group.add_argument(\n '--shielded-vtpm',\n help='Boot instance with TPM (Trusted Platform Module) enabled',\n type=bool)\n shielded_vm_group.add_argument(\n '--shielded-integrity-monitoring',\n help='Enable monitoring of the boot integrity of the instance',\n type=bool)\n parser.add_argument(\n '--labels',\n help=('Labels to apply to this instance. These can be later modified '\n 'by the setLabels method.'),\n type=arg_parsers.ArgDict(),\n metavar='KEY=VALUE')\n gce_setup_group.add_argument(\n '--metadata',\n help='Custom metadata to apply to this instance.',\n type=arg_parsers.ArgDict(),\n metavar='KEY=VALUE')\n gce_setup_group.add_argument(\n '--machine-type',\n help=(\n 'The '\n '[Compute Engine machine type](https://cloud.google.com/sdk/gcloud/reference/compute/machine-types) ' # pylint: disable=line-too-long\n 'of this instance.'))", "title": "" }, { "docid": "132e2f9a67d57bdae907e727c2c360ab", "score": "0.5455676", "text": "def update_options(cls, options, items):\n # Get new backend\n backend_spec = items.get('backend', Store.current_backend)\n split = backend_spec.split(':')\n backend, mode = split if len(split)==2 else (split[0], 'default')\n if ':' not in backend_spec:\n backend_spec += ':default'\n\n if 'max_branches' in items:\n print('Warning: The max_branches option is now deprecated. Ignoring.')\n del items['max_branches']\n\n # Get previous backend\n prev_backend = Store.current_backend\n renderer = Store.renderers[prev_backend]\n prev_backend_spec = prev_backend+':'+renderer.mode\n\n # Update allowed formats\n for p in ['fig', 'holomap']:\n cls.allowed[p] = list_formats(p, backend_spec)\n\n # Return if backend invalid and let validation error\n if backend not in Store.renderers:\n options['backend'] = backend_spec\n return options\n\n # Get backend specific options\n backend_options = dict(cls._backend_options[backend_spec])\n cls._backend_options[prev_backend_spec] = {k: v for k, v in cls.options.items()\n if k in cls.remembered}\n\n # Fill in remembered options with defaults\n for opt in cls.remembered:\n if opt not in backend_options:\n backend_options[opt] = cls.defaults[opt]\n\n # Switch format if mode does not allow it\n for p in ['fig', 'holomap']:\n if backend_options.get(p) not in cls.allowed[p]:\n backend_options[p] = cls.allowed[p][0]\n\n # Ensure backend and mode are set\n backend_options['backend'] = backend_spec\n backend_options['mode'] = mode\n\n return backend_options", "title": "" }, { "docid": "c219c80ca133550917862bb8c96b6534", "score": "0.53697187", "text": "def add_options(self) -> None:\n if not self.config_entry.options:\n options = {\n CONF_UPDATE_RATE: DEFAULT_UPDATE_RATE,\n CONF_READ_TIMEOUT: DEFAULT_READ_TIMEOUT,\n }\n self.hass.config_entries.async_update_entry(\n self.config_entry, options=options\n )\n else:\n options = dict(self.config_entry.options)\n if CONF_UPDATE_RATE not in self.config_entry.options:\n options[CONF_UPDATE_RATE] = DEFAULT_UPDATE_RATE\n if CONF_READ_TIMEOUT not in self.config_entry.options:\n options[CONF_READ_TIMEOUT] = DEFAULT_READ_TIMEOUT\n self.hass.config_entries.async_update_entry(\n self.config_entry, options=options\n )", "title": "" }, { "docid": "f12a3ef450966839b84274d8e457b4f3", "score": "0.5357892", "text": "def add_options(normal, expert):\n expert.add_option(\"\", \"--cw-min\", type=\"int\", default=5,\n help=\"set minimum contention window (CWmin) [default=%default]\")\n expert.add_option(\"\", \"--sifs\", type=\"eng_float\", default=.0002,\n help=\"set SIFS time [default=%default]\")\n #expert.add_option(\"\", \"--difs\", type=\"eng_float\", default=.005,\n # help=\"set DIFS time [default=%default]\")\n expert.add_option(\"\", \"--ctl\", type=\"eng_float\", default=.04,\n help=\"set control packet time [default=%default]\")\n expert.add_option(\"\", \"--backoff\", type=\"eng_float\", default=.0001,\n help=\"set backoff time [default=%default]\")\n expert.add_option(\"\", \"--packet-lifetime\", type=\"int\", default=5,\n help=\"set number of attempts to send each packet [default=%default]\")\n expert.add_option(\"\", \"--log-mac\", action=\"store_true\", default=False,\n help=\"log all MAC layer tx/rx data [default=%default]\")", "title": "" }, { "docid": "a1a1115976d8be5f7c046220a37a85c2", "score": "0.53560615", "text": "def addOptions(self,options):\n y=1\n for option in options:\n key=option[0]\n desc=option[1]\n self.__addOption(y,key,desc)\n y+=1", "title": "" }, { "docid": "d6091dec20755362b1f1d0fa3a3b7f66", "score": "0.5335003", "text": "def options_json(self, new_options=None):", "title": "" }, { "docid": "8605d2198a3831db5c92b5bd11d1111a", "score": "0.52954787", "text": "def add_options(normal, expert):\n normal.add_option(\"-W\", \"--bandwidth\", type=\"eng_float\",\n default=500e3,\n help=\"set symbol bandwidth [default=%default]\")\n normal.add_option(\"-v\", \"--verbose\", action=\"store_true\", default=False)\n expert.add_option(\"\", \"--log\", action=\"store_true\", default=False,\n help=\"Log all parts of flow graph to files (CAUTION: lots of data)\")", "title": "" }, { "docid": "1860fc582f1ea5a49f478e3737badef9", "score": "0.52750677", "text": "def _update_options(self, change):\n if change is not None and \"new\" in change:\n self._w_select_alert.options = {\n alert_desc: alert_id\n for alert_desc, alert_id in self._select_items.items()\n if change[\"new\"].casefold() in alert_desc.casefold()\n }", "title": "" }, { "docid": "909f11e9a1be2874813357ff3a3e82e5", "score": "0.52723986", "text": "def initialize_options(self):\n pass", "title": "" }, { "docid": "909f11e9a1be2874813357ff3a3e82e5", "score": "0.52723986", "text": "def initialize_options(self):\n pass", "title": "" }, { "docid": "0740dd2e9314271200cce495b00bbb5a", "score": "0.5256049", "text": "def tox_addoption(parser):", "title": "" }, { "docid": "a6f7e9e5439d22ce0daed2a52a0009ec", "score": "0.51991326", "text": "def define_options(self, **kwargs):\n pass", "title": "" }, { "docid": "0d518a731d720daf4ab1f3093a0522a7", "score": "0.5195367", "text": "def set_options(options):\n _options.update(options)", "title": "" }, { "docid": "19a2853184c0f2326693dea0a0d3cfb4", "score": "0.5186147", "text": "def _add_opts(self, params):\n if params.get('driver_opts', None):\n opts = parse_kv_as_dict(params['driver_opts'], False)\n params['driver_opts'] = opts\n return params", "title": "" }, { "docid": "783873456927be27aa5d9efc0700ad66", "score": "0.51814806", "text": "def initialize_options(self):\n self.include_extras = None\n self.include_all_extras = None\n self.extra_pkgs = []", "title": "" }, { "docid": "aad20500cb5fcae8ecd22e00ca59f4a0", "score": "0.5155876", "text": "def _options_changed(self, change=None):\n self.options = self._loc_options[self._algo_sel.index].options", "title": "" }, { "docid": "9af7087f41aedbc7e2a1d7c1ea5985bc", "score": "0.5150465", "text": "def setup_options(self):\n addMBMGroup(self.parser)\n rf.addGenRegArgumentGroup(self.parser)\n lsq6.addLSQ6ArgumentGroup(self.parser)\n lsq12.addLSQ12ArgumentGroup(self.parser)\n nlin.addNlinRegArgumentGroup(self.parser)\n st.addStatsArguments(self.parser)", "title": "" }, { "docid": "fff31a7552254f36522dbcb36c78ff19", "score": "0.51435924", "text": "def initialize_options(self):\r\n \r\n self.verbose = False", "title": "" }, { "docid": "fff31a7552254f36522dbcb36c78ff19", "score": "0.51435924", "text": "def initialize_options(self):\r\n \r\n self.verbose = False", "title": "" }, { "docid": "a6be58c1a4f3ddfb6a9ffaec2a1d2b8e", "score": "0.5137842", "text": "def update(self):\n\t\tself._is_optional = False\n\t\tfor option in self.info.required_opts:\n\t\t\tself._type_dependant_functions[option.type](option)\n\n\t\tself._is_optional = True\n\t\tfor option in self.info.optional_opts:\n\t\t\tself._type_dependant_functions[option.type](option)", "title": "" }, { "docid": "721dffc1631b72a91aceee8f88ef5b30", "score": "0.51343924", "text": "def package_options(func):\n return option(\n '--update/--upgrade',\n '-d/-g',\n 'update_current_stage',\n default=False,\n help='Update stage or upgrade to next stage. [default: upgrade]',\n )(func)", "title": "" }, { "docid": "96beb7f766a6be26e5ed9ac402ef9753", "score": "0.5133902", "text": "def _set_command_specific_options(self, parser):\n pass", "title": "" }, { "docid": "b58f1448a42e45b416fb416cc8a95113", "score": "0.5122877", "text": "def _ApplyFlags(cls, config_values, flag_values):\n super(ProfitBricksVmSpec, cls)._ApplyFlags(config_values, flag_values)\n if flag_values['machine_type'].present:\n config_values['machine_type'] = yaml.safe_load(flag_values.machine_type)\n if flag_values['profitbricks_location'].present:\n config_values['location'] = flag_values.profitbricks_location\n if flag_values['profitbricks_boot_volume_type'].present:\n config_values['boot_volume_type'] = \\\n flag_values.profitbricks_boot_volume_type\n if flag_values['profitbricks_boot_volume_size'].present:\n config_values['boot_volume_size'] = \\\n flag_values.profitbricks_boot_volume_size\n if flag_values['availability_zone'].present:\n config_values['availability_zone'] = flag_values.availability_zone\n if flag_values['profitbricks_image_alias'].present:\n config_values['image_alias'] = flag_values.profitbricks_image_alias", "title": "" }, { "docid": "f83364ec6ecded82b7446e9b2402c9f9", "score": "0.5115164", "text": "def setup_options(self):\n rf.addGenRegArgumentGroup(self.parser)\n addLSQ6ArgumentGroup(self.parser)", "title": "" }, { "docid": "d653f2a0fc3f1e85213faccc8bfbcc01", "score": "0.5113958", "text": "def set_options(**kwargs: Any) -> None:\n for key in kwargs:\n if key not in _NUMPOLY_OPTIONS:\n raise KeyError(f\"option '{key}' not recognized.\")\n _NUMPOLY_OPTIONS.update(**kwargs)", "title": "" }, { "docid": "7584ad25dbbeb46a9b4f96f9aa8b8723", "score": "0.51101446", "text": "def resolveDerivedOptions(self):", "title": "" }, { "docid": "a6868c93682c481248319f1cffb959b1", "score": "0.5107287", "text": "def updateOptions(self):\n\t\treturn {\n\t\t\t\"y_analyte\": self.y_analyteCombo.currentText(),\n\t\t\t\"x_analyte\": self.x_analyteCombo.currentText(),\n\t\t\t\"window\": self.windowEdit.text(),\n\t\t\t\"r_threshold\": self.r_thresholdEdit.text(),\n\t\t\t\"p_threshold\": self.p_thresholdEdit.text(),\n\t\t\t\"filt\": self.filtCheckBox.isChecked()\n\t\t}", "title": "" }, { "docid": "2e57d0100b75fed6cee2774fea7b4d41", "score": "0.51053125", "text": "def set_flavor_extra_spec(self, flavor_id, specs):\n extra_specs = xml_utils.Element(\"extra_specs\")\n for key in specs.keys():\n extra_specs.add_attr(key, specs[key])\n resp, body = self.post('flavors/%s/os-extra_specs' % flavor_id,\n str(xml_utils.Document(extra_specs)))\n body = xml_utils.xml_to_json(etree.fromstring(body))\n return resp, body", "title": "" }, { "docid": "d37dcf7442170555af127a3476c071ee", "score": "0.50813884", "text": "def finalize_options(self):", "title": "" }, { "docid": "d37dcf7442170555af127a3476c071ee", "score": "0.50813884", "text": "def finalize_options(self):", "title": "" }, { "docid": "d37dcf7442170555af127a3476c071ee", "score": "0.50813884", "text": "def finalize_options(self):", "title": "" }, { "docid": "a98569a9d056eaa6c06b35fae64082d5", "score": "0.50437313", "text": "def _options_trait_changed(self, change=None):\n self._loc_options[self._algo_sel.index].options = self.options", "title": "" }, { "docid": "aefb74f891acd3dfd6ec28fd322179a0", "score": "0.49913657", "text": "def add_options(self, items: Iterable[NewOptionListContent]) -> Self:\n # Only work if we have items to add; but don't make a fuss out of\n # zero items to add, just carry on like nothing happened.\n if items:\n # Turn any incoming values into valid content for the list.\n content = [self._make_content(item) for item in items]\n self._contents.extend(content)\n # Pull out the content that is genuine options and add them to the\n # list of options.\n self._options.extend([item for item in content if isinstance(item, Option)])\n self._refresh_content_tracking(force=True)\n self.refresh()\n return self", "title": "" }, { "docid": "5451ddae9920fd7ec7bd9a838db79db7", "score": "0.49872944", "text": "def calculate_specs(\n options_bootstrapper: OptionsBootstrapper,\n options: Options,\n session: SchedulerSession,\n working_dir: str,\n) -> Specs:\n global_options = options.for_global_scope()\n unmatched_cli_globs = global_options.unmatched_cli_globs\n specs = SpecsParser(working_dir=working_dir).parse_specs(\n options.specs,\n description_of_origin=\"CLI arguments\",\n unmatched_glob_behavior=unmatched_cli_globs,\n )\n\n changed_options = ChangedOptions.from_options(options.for_scope(\"changed\"))\n logger.debug(\"specs are: %s\", specs)\n logger.debug(\"changed_options are: %s\", changed_options)\n\n if specs and changed_options.provided:\n changed_name = \"--changed-since\" if changed_options.since else \"--changed-diffspec\"\n specs_description = specs.arguments_provided_description()\n assert specs_description is not None\n raise InvalidSpecConstraint(\n f\"You used `{changed_name}` at the same time as using {specs_description}. You can \"\n f\"only use `{changed_name}` or use normal arguments.\"\n )\n\n if not changed_options.provided:\n return specs\n\n bootstrap_environment = determine_bootstrap_environment(session)\n\n (git_binary,) = session.product_request(GitBinary, [Params(bootstrap_environment)])\n (maybe_git_worktree,) = session.product_request(\n MaybeGitWorktree, [Params(GitWorktreeRequest(), git_binary, bootstrap_environment)]\n )\n if not maybe_git_worktree.git_worktree:\n raise InvalidSpecConstraint(\n \"The `--changed-*` options are only available if Git is used for the repository.\"\n )\n\n changed_files = tuple(changed_options.changed_files(maybe_git_worktree.git_worktree))\n file_literal_specs = tuple(FileLiteralSpec(f) for f in changed_files)\n\n changed_request = ChangedRequest(changed_files, changed_options.dependents)\n (changed_addresses,) = session.product_request(\n ChangedAddresses,\n [Params(changed_request, options_bootstrapper, bootstrap_environment)],\n )\n logger.debug(\"changed addresses: %s\", changed_addresses)\n\n address_literal_specs = []\n for address in cast(ChangedAddresses, changed_addresses):\n address_input = AddressInput.parse(address.spec, description_of_origin=\"`--changed-since`\")\n address_literal_specs.append(\n AddressLiteralSpec(\n path_component=address_input.path_component,\n target_component=address_input.target_component,\n generated_component=address_input.generated_component,\n parameters=FrozenDict(address_input.parameters),\n )\n )\n\n return Specs(\n includes=RawSpecs(\n # We need both address_literals and file_literals to cover all our edge cases, including\n # target-aware vs. target-less goals, e.g. `list` vs `count-loc`.\n address_literals=tuple(address_literal_specs),\n file_literals=file_literal_specs,\n unmatched_glob_behavior=unmatched_cli_globs,\n filter_by_global_options=True,\n from_change_detection=True,\n description_of_origin=\"`--changed-since`\",\n ),\n ignores=RawSpecs(description_of_origin=\"`--changed-since`\"),\n )", "title": "" }, { "docid": "e3972038aaa7066d9b3ac9f92d822d11", "score": "0.49858135", "text": "def AddAdditionalOptions(parser):\n\n cmd_options.debug_port(parser, '')\n cmd_options.gdb(parser, '')\n cmd_options.gdb_options(parser, '')", "title": "" }, { "docid": "741452e9ca0f64af1852d4ea09ef12e3", "score": "0.49831793", "text": "def validate_override(cls, name, config:dict, **override_options):\n\n options = {}\n for key, value in override_options.items():\n lookup_key = key.lower().replace(\"-\", \"\").replace(\"_\", \"\")\n obj = cls._OPTIONS_TABLE.get(lookup_key)\n if obj is not None:\n if obj.get(\"abbreviation\"):\n obj = cls._OPTIONS_TABLE.get(obj.get(\"abbreviation\"))\n if obj.get(\"readonly\"):\n raise ValueError(f\"option '{key}' in {name} is readony, cannot be set\")\n cls._convert(name, obj, key, value)\n cls._validate_config_trait(name, obj, key, value, config)\n options[obj.get(\"flag\") or lookup_key] = value\n else:\n raise ValueError(f\"unknown option '{key}' in {name}\")\n return options", "title": "" }, { "docid": "af2943cd5575cb1357bd39469e3778b8", "score": "0.49733296", "text": "def options(self, parser, env):\n super(PYCC, self).options(parser, env=env)\n parser.add_option('--pycc-rel', type='string', dest='pycc_rel',\n help='Rel file path, res/deploy/r2deploy.yml by default',\n default='res/deploy/r2deploy.yml')\n parser.add_option('--pycc-enablegb', action='store_true', dest='enablegb',\n default='False', help='Enable gevent block monitor')", "title": "" }, { "docid": "55e56b94432e893d21c5b4ce09cefc93", "score": "0.49545103", "text": "def set_options(self, **kwargs):\n for k, v in kwargs.items():\n if k == 'newton_fallback':\n self.newton_fallback = v\n else:\n raise ValueError(f'Unknown option {k}')", "title": "" }, { "docid": "31b0be715c40dd37f08678526cb1f204", "score": "0.49535853", "text": "def _declare_options(self):\n super()._declare_options()\n opt = self.options\n\n # Remove unused options from base options here, so that users\n # attempting to set them will get KeyErrors.\n for unused_option in (\"atol\", \"rtol\", \"maxiter\", \"err_on_non_converge\"):\n opt.undeclare(unused_option)", "title": "" }, { "docid": "a367713a151e66f02667408ff4271121", "score": "0.4948981", "text": "def options(self, parser, env):\n booloptions = [\n (('-W',), 'store_true', 'nwrite', \n \"Don't write values into reg_settings\",),\n (('-r','--reg'), 'store_true', 'regonly', \n \"Only run regression tests\",),\n (('-n','--new'), 'store_true', 'newonly', \n \"Only run tests that should fail\",),\n ]\n options = [\n (('-f','--file'), 'store', 'filename', \n \"Path to reg_settings.py\", 'FILE',),\n ]\n for com, action, dest, help in booloptions:\n if com[0] in settings.options:\n parser.add_option(*com, action=action, dest=dest, \n help=help, default=True)\n else:\n parser.add_option(*com, action=action, dest=dest, \n help=help, default=False)\n for com, action, dest, help, metavar in options:\n if com[0] in settings.options:\n parser.add_option(*com, action=action, dest=dest, \n help=help, metavar=metavar, \n default=settings.options[com[0]])\n else:\n parser.add_option(*com, action=action, dest=dest, help=help, \n metavar=metavar, default=settings.__file__)\n Plugin.options(self, parser, env=env)", "title": "" }, { "docid": "a022d7b2a9e95bdd3ed16f5ca6b92a94", "score": "0.4947803", "text": "def ap_tag_modifications(vmf: VMF):\n if vbsp_options.get(str, 'game_id') != utils.STEAM_IDS['APTAG']:\n return # Wrong game!\n\n LOGGER.info('Performing Aperture Tag modifications...')\n\n has = vbsp.settings['has_attr']\n # This will enable the PaintInMap property.\n has['Gel'] = True\n\n # Set as if the player spawned with no pgun\n has['spawn_dual'] = False\n has['spawn_single'] = False\n has['spawn_nogun'] = True\n\n # Add paint fizzlers to all normal fizzlers\n for fizz in vmf.by_class['trigger_portal_cleanser']:\n p_fizz = fizz.copy()\n p_fizz['classname'] = 'trigger_paint_cleanser'\n vmf.add_ent(p_fizz)\n\n if p_fizz['targetname'].endswith('_brush'):\n p_fizz['targetname'] = p_fizz['targetname'][:-6] + '-br_fizz'\n\n del p_fizz['drawinfastreflection']\n del p_fizz['visible']\n del p_fizz['useScanline']\n\n for side in p_fizz.sides():\n side.mat = 'tools/toolstrigger'\n side.scale = 0.25\n\n transition_ents = instanceLocs.get_special_inst('transitionents')\n for inst in vmf.by_class['func_instance']:\n if inst['file'].casefold() not in transition_ents:\n continue\n inst['file'] = 'instances/bee2/transition_ents_tag.vmf'\n\n # Because of a bug in P2, these folders aren't created automatically.\n # We need a folder with the user's ID in portal2/maps/puzzlemaker.\n try:\n puzz_folders = os.listdir('../aperturetag/puzzles')\n except FileNotFoundError:\n LOGGER.warning(\"Aperturetag/puzzles/ doesn't exist??\")\n else:\n for puzz_folder in puzz_folders:\n new_folder = os.path.abspath(os.path.join(\n '../portal2/maps/puzzlemaker',\n puzz_folder,\n ))\n LOGGER.info('Creating', new_folder)\n os.makedirs(\n new_folder,\n exist_ok=True,\n )", "title": "" }, { "docid": "07ad7b3f7142e4dd6a4252ba76a0bf20", "score": "0.49456075", "text": "def finalize_options(self) -> None:", "title": "" }, { "docid": "07ad7b3f7142e4dd6a4252ba76a0bf20", "score": "0.49456075", "text": "def finalize_options(self) -> None:", "title": "" }, { "docid": "07ad7b3f7142e4dd6a4252ba76a0bf20", "score": "0.49456075", "text": "def finalize_options(self) -> None:", "title": "" }, { "docid": "bebff5c5dc84f9e055496c785ba8958f", "score": "0.494515", "text": "def update(self):\n\t\tself._is_optional = False\n\t\tfor option in self.info.required_opts:\n\t\t\tself._type_dependant_functions[option.type](option)\n\n\t\tself._is_optional = True\n\t\tfor option in self.info.optional_opts:\n\t\t\tself._type_dependant_functions[option.type](option)\n\n\t\tself.command_text = self._uncompress_command + self.command_text + self._compress_command", "title": "" }, { "docid": "c275cbcbdbe21f869448a3ab1834e289", "score": "0.4933818", "text": "def setDefaultOptionsGVB(resolution):\n\n pass", "title": "" }, { "docid": "a2013ba83c95f6aad04f06b5a1bbbbda", "score": "0.49295175", "text": "def _setOptions(self, obj, options):\n for name, value in options.items():\n option = getattr(obj.options, name, None)\n if option is not None:\n setattr(obj.options, name, value)", "title": "" }, { "docid": "8a91a2e834bed6dade330851a32e9c9c", "score": "0.49291962", "text": "def optionsChanged(self, options: ghidra.framework.options.Options, program: ghidra.program.model.listing.Program) -> None:\n ...", "title": "" }, { "docid": "024d7165309016638aa7b9d10378247e", "score": "0.49241617", "text": "def _declare_options(self):\n super()._declare_options()\n opt = self.options\n opt.declare(\n 'bound_enforcement', default='scalar', values=['vector', 'scalar', 'wall'],\n desc=\"If this is set to 'vector', the entire vector is backtracked together \" +\n \"when a bound is violated. If this is set to 'scalar', only the violating \" +\n \"entries are set to the bound and then the backtracking occurs on the vector \" +\n \"as a whole. If this is set to 'wall', only the violating entries are set \" +\n \"to the bound, and then the backtracking follows the wall - i.e., the \" +\n \"violating entries do not change during the line search.\")\n opt.declare('print_bound_enforce', default=False,\n desc=\"Set to True to print out names and values of variables that are pulled \"\n \"back to their bounds.\")", "title": "" }, { "docid": "8ab9f14e95b9d158394680a156cf1b3d", "score": "0.49235466", "text": "def update_known(self, **kwargs):\n known, unknown = {}, {}\n for k, v in kwargs.items():\n if k in self._options:\n known[k] = v\n else:\n unknown[k] = v\n updated = set(known.keys())\n if updated:\n with self.rollback(updated, reraise=True):\n for k, v in known.items():\n self._options[k].set(v)\n self.changed.send(self, updated=updated)\n return unknown", "title": "" }, { "docid": "a498928ba144cffcbd6a3976ec399a8f", "score": "0.49204737", "text": "def AddUpdateLabelsFlags(parser):\n UPDATE_LABELS_FLAG.AddToParser(parser)\n REMOVE_LABELS_FLAG.AddToParser(parser)", "title": "" }, { "docid": "7a14b99016103e665a6c939cc2d17874", "score": "0.4907534", "text": "def edit_calculator_options(options: Options, sections: list) -> dict:\n return edit_options(\n options.votca_calculators_options, sections, options.path_optionfiles)", "title": "" }, { "docid": "638e42ce73a5756daeea109d3352cdd9", "score": "0.48979598", "text": "def addOptions(self, options: List[Option]):\n self.options.extend([o.option for o in options])\n self._setSchema()", "title": "" }, { "docid": "dfaa266f18af8bb0c313d443def92203", "score": "0.4896546", "text": "def setOptions(self):\n self.parser.add_option('--site',\n dest = 'sitename',\n default = None,\n help = 'The PhEDEx node name of the site to be checked.')\n self.parser.add_option('--lfn',\n dest = 'userlfn',\n default = None,\n help = 'A user lfn address.')", "title": "" }, { "docid": "6321ecb27db4500015fd44d7abec2722", "score": "0.4894725", "text": "def set_options(self, **fields):\n for field in fields:\n if not hasattr(self._options, field):\n raise AttributeError(\n f\"Options field {field} is not valid for {type(self).__name__}\"\n )\n\n self._options.update_options(**fields)\n self._set_options = self._set_options.union(fields)", "title": "" }, { "docid": "5d229642af4d352aa3e4091786b68d0f", "score": "0.48895237", "text": "def addOptionsToWidget(self):\r\n for option in self.options:\r\n self.widget._qwidget.addItem(option.text)", "title": "" }, { "docid": "76b32aa2e8c6bbbe12bd6b499c15c463", "score": "0.48845968", "text": "def create_arnold_options():\n\n core.createOptions()\n\n return", "title": "" }, { "docid": "97885f3526d8c84c9f2e60beddd814fe", "score": "0.4884518", "text": "def SetupOptions(self):\n\n parser = pykPlayer.SetupOptions(self, usage = \"%prog [options] <CDG file>\")\n\n # Remove irrelevant options.\n parser.remove_option('--font-scale')\n \n return parser", "title": "" }, { "docid": "949322e1b599a5a26f4da276c398852d", "score": "0.4876573", "text": "def blit_opts(self):\n split_width = self.rect.width // (1 + len(self.opts))\n for i, surf in enumerate(self.make_opt_surfs(), 1):\n self.opt_rects.append(self.blit_opt(surf, split_width * i))", "title": "" }, { "docid": "5e338181b3a8324955823420f08311d6", "score": "0.48693496", "text": "def setup_options(self, options, default_conf=None):\n for ii, pset in enumerate(self.psets):\n previous = self.psets[ii - 1] if ii > 0 else None\n pset.override(options, default_conf[0], previous)", "title": "" }, { "docid": "7dfe73e35ee5599bb00ebf028d6cebcf", "score": "0.48555058", "text": "def add_custom_commandline_options(self, parser):\n pass", "title": "" }, { "docid": "ff5ac3dfe38155b7dd659c688164e7c6", "score": "0.48550949", "text": "def parse_extension_options(\n self,\n option_spec: Dict[str, Callable[[Optional[str]], object]],\n datalines: statemachine.StringList,\n ) -> Dict[str, object]:\n node = nodes.field_list()\n newline_offset, blank_finish = self.nested_list_parse(\n datalines, 0, node, initial_state=ExtensionOptions, blank_finish=True\n )\n if newline_offset != len(datalines): # incomplete parse of block\n raise MarkupError(\"invalid option block\")\n try:\n options = utils.extract_extension_options(node, option_spec)\n except KeyError as detail:\n raise MarkupError('unknown option: \"%s\"' % detail.args[0])\n except (ValueError, TypeError) as detail:\n raise MarkupError(\"invalid option value: %s\" % \" \".join(detail.args))\n # except utils.ExtensionOptionError as detail:\n # return 0, (\"invalid option data: %s\" % \" \".join(detail.args))\n if blank_finish:\n return options\n else:\n raise MarkupError(\"option data incompletely parsed\")", "title": "" }, { "docid": "b9f11eb405d7a6bfd4f2aba6d48d4a5d", "score": "0.48537734", "text": "def add_cmdline_options(ctx):\n def print_version(option, opt, value, parser):\n print('BDE Tools version: %s' % BDE_TOOLS_VERSION)\n sys.exit(0)\n\n ctx.add_option('--bde-tools-version',\n action='callback', callback=print_version)\n\n configure_opts = [\n (('verify',),\n {'action': 'store_true',\n 'default': False,\n 'help': 'perform additional checks to verify repository structure'}),\n (('use-dpkg-install',),\n {'action': 'store_true',\n 'default': False,\n 'help': \"configure install options according to dpkg \"\n \"conventions (this options supercedes the options \"\n \"'use-flat-include-dir', 'libdir', and 'lib-suffix')\"}),\n (('use-flat-include-dir',),\n {'action': 'store_true',\n 'default': False,\n 'help': 'install all headers into $PREFIX/include '\n 'instead of $PREFIX/include/<package_group>, and '\n 'change .pc files accordingly'}),\n (('libdir',),\n {'type': 'string',\n 'default': 'lib',\n 'dest': 'libdir',\n 'help': 'the name of the directory under $PREFIX where '\n 'library files are installed [default: %default]'}),\n (('bindir',),\n {'type': 'string',\n 'default': 'bin',\n 'dest': 'bindir',\n 'help': 'the name of the directory under $PREFIX where '\n 'binaries are installed [default: %default]'}),\n (('lib-suffix',),\n {'type': 'string',\n 'default': '',\n 'help': '(deprecated) add a suffix to the names of the package '\n 'group library files being built [default: %default]'}),\n (('debug-opt-keys',),\n {'type': 'string',\n 'default': None,\n 'help': 'debug rules in the opts files for the specified '\n '(comma separated) list of opts keys'}),\n (('werror',),\n {'choices': ('none', 'cpp'),\n 'default': None,\n 'help': 'whether to treat all compiler warning as errors when '\n 'building with clang or gcc (cpp/none). '\n \"none: don't enable -Werror, \"\n 'cpp: enable -Werror for .cpp files but not .t.cpp files '\n '[default value depends on compiler]'})\n ]\n configure_group = ctx.get_option_group('configure options')\n configure_opts = optionsutil.get_ufid_cmdline_options() + configure_opts\n cmdlineutil.add_options(configure_group, configure_opts)\n\n waf_platform = Utils.unversioned_sys_platform()\n if waf_platform == 'win32':\n win_opts = [\n (('msvc-runtime-type',),\n {'choices': ('static', 'dynamic'),\n 'default': 'dynamic',\n 'help': 'whether to build using the static or dynamic version '\n 'of the C run-time library on Windows '\n '[default: %default]'})\n ]\n cmdlineutil.add_options(configure_group, win_opts)\n\n install_group = ctx.get_option_group(\n 'Installation and uninstallation options')\n install_opts = [\n (('install-dep',),\n {'choices': ('yes', 'no'),\n 'default': 'yes',\n 'help': 'when doing a targeted install, whether to also '\n 'install the dependencies of the targets (yes/no) '\n '[default: %default]'}),\n (('install-parts',),\n {'choices': ('all', 'lib', 'bin', 'h', 'pc'),\n 'default': 'all',\n 'help': 'what parts to install (all/h/lib/pc). '\n 'all -- everything, '\n 'lib -- lib files only, '\n 'bin - executable files only, '\n 'h -- header files only, '\n 'pc -- pkg-config files only '\n '[default: %default]'}),\n ]\n cmdlineutil.add_options(install_group, install_opts)\n\n build_group = ctx.get_option_group('build and install options')\n build_opts = [\n (('clang-compilation-database',),\n {'action': 'store_true',\n 'default': False,\n 'help': 'Generate a clang compilation database '\n '(compile_commands.json) in the build output directory'})\n ]\n cmdlineutil.add_options(build_group, build_opts)\n\n # Set the upper bound of the default number of jobs to 24\n jobs = ctx.parser.get_option('-j').default\n if jobs > 24:\n jobs = 24\n ctx.parser.remove_option('-j')\n ctx.parser.add_option('-j', '--jobs',\n dest='jobs',\n default=jobs,\n type='int',\n help='amount of parallel jobs (%r)' % jobs)", "title": "" }, { "docid": "5c020853e6dc72ca7da9d65582c80579", "score": "0.4853445", "text": "def finalize_options(self):\n include_extras = self.include_extras.split(',')\n\n try:\n for name, pkgs in self.distribution.extras_require.items():\n if self.include_all_extras or name in include_extras:\n self.extra_pkgs.extend(pkgs)\n\n except TypeError: # Mostly for old setuptools (< 30.x)\n for name, pkgs in self.distribution.command_options['options.extras_require'].items():\n if self.include_all_extras or name in include_extras:\n self.extra_pkgs.extend(pkgs)", "title": "" }, { "docid": "0804d651effa92aa8aa4017b30042679", "score": "0.48369905", "text": "def update_flavor_extra_spec(self, flavor_id, key, **kwargs):\n doc = xml_utils.Document()\n for (k, v) in kwargs.items():\n element = xml_utils.Element(k)\n doc.append(element)\n value = xml_utils.Text(v)\n element.append(value)\n\n resp, body = self.put('flavors/%s/os-extra_specs/%s' %\n (flavor_id, key), str(doc))\n body = xml_utils.xml_to_json(etree.fromstring(body))\n return resp, {key: body}", "title": "" }, { "docid": "f80e7d47eb66521aaf5b7f08f4252611", "score": "0.4836855", "text": "def _process_options(self):\r\n\r\n # Process options as relevant\r\n self.options.fs_root = os.path.abspath(self.options.fs_root)\r\n self.options.output_file = os.path.abspath(self.options.output_file)\r\n\r\n # Convert options from attributes to keys\r\n self.options = dict((k, getattr(self.options, k)) for k in\r\n self._options_seq)", "title": "" }, { "docid": "b1133e4dae6d14380706a342caac791b", "score": "0.48348233", "text": "def AddDeviceConfigFlagsToParser(parser):\n base.Argument(\n '--version-to-update',\n type=int,\n help=\"\"\"\\\n The version number to update. If this value is `0` or unspecified, it\n will not check the version number of the server and will always update\n the current version; otherwise, this update will fail if the version\n number provided does not match the latest version on the server. This\n is used to detect conflicts with simultaneous updates.\n \"\"\").AddToParser(parser)\n data_group = parser.add_mutually_exclusive_group(required=True)\n base.Argument(\n '--config-file',\n help='Path to a local file containing the data for this configuration.'\n ).AddToParser(data_group)\n base.Argument(\n '--config-data',\n help=('The data for this configuration, as a string. For any values '\n 'that contain special characters (in the context of your shell), '\n 'use the `--config-file` flag instead.')\n ).AddToParser(data_group)", "title": "" }, { "docid": "9d87fcb87d16e8913a66e409e8f485f0", "score": "0.48318073", "text": "def set_options(self, option_dict):\n\n for option_name, value in option_dict.items():\n if not hasattr(self.options, option_name):\n warn(\n \"Option '{}' is unknown and will be ignored.\".format(option_name),\n stacklevel=2,\n )\n continue\n setattr(self.options, option_name, value)", "title": "" }, { "docid": "b77acd1f8ddffc7babec0467bc58a911", "score": "0.48055083", "text": "def test_option_code_update(self):\n pass", "title": "" }, { "docid": "b6001f4cff98dea1249b91127efb04be", "score": "0.47978112", "text": "def after_configuration_update(self, options: List[Union[tuple, str]]):\n providers_to_update = set({})\n for option in options:\n if option == 'completions_wait_for_ms':\n self.wait_for_ms = self.get_conf(\n 'completions_wait_for_ms')\n elif isinstance(option, tuple):\n option_name, provider_name, *__ = option\n if option_name == 'enabled_providers':\n provider_status = self.get_conf(\n ('enabled_providers', provider_name))\n if provider_status:\n self.start_provider_instance(provider_name)\n else:\n self.shutdown_provider_instance(provider_name)\n elif option_name == 'provider_configuration':\n providers_to_update |= {provider_name}\n\n # FIXME: Remove this after migrating the ConfManager to an observer\n # pattern.\n editor_method_sec_opts = {\n 'set_code_snippets_enabled': (self.CONF_SECTION,\n 'enable_code_snippets'),\n 'set_hover_hints_enabled': (self.CONF_SECTION,\n 'provider_configuration',\n 'lsp',\n 'values',\n 'enable_hover_hints'),\n 'set_format_on_save': (self.CONF_SECTION,\n 'provider_configuration',\n 'lsp',\n 'values',\n 'format_on_save'),\n 'set_automatic_completions_enabled': ('editor',\n 'automatic_completions'),\n 'set_completions_hint_enabled': ('editor', 'completions_hint'),\n 'set_completions_hint_after_ms': ('editor',\n 'completions_hint_after_ms'),\n 'set_underline_errors_enabled': ('editor', 'underline_errors'),\n 'set_automatic_completions_after_chars': (\n 'editor', 'automatic_completions_after_chars'),\n 'set_automatic_completions_after_ms': (\n 'editor', 'automatic_completions_after_ms'),\n 'set_edgeline_columns': (self.CONF_SECTION,\n 'provider_configuration',\n 'lsp',\n 'values',\n 'pycodestyle/max_line_length'),\n 'set_edgeline_enabled': ('editor', 'edge_line'),\n }\n\n for method_name, (sec, *opt) in editor_method_sec_opts.items():\n opt = tuple(opt)\n if len(opt) == 1:\n opt = opt[0]\n if opt in options:\n opt_value = self.get_conf(opt, section=sec)\n self.sig_editor_rpc.emit('call_all_editorstacks',\n (method_name, (opt_value,)),\n {})\n\n # Update entries in the source menu\n # FIXME: Delete this after CONF is moved to an observer pattern.\n # and the editor migration starts\n self.sig_editor_rpc.emit('update_source_menu', (options,), {})", "title": "" }, { "docid": "c1e0810308e88c606ffdcf3b5f1eca9c", "score": "0.47974354", "text": "def test_update_non_standard(self):\n self.group_option.convert_data()\n self.group_option.update_non_standard()", "title": "" }, { "docid": "d603049c339ceb902e9d5e6648dcc7e8", "score": "0.47931582", "text": "def update_countries_b(country_a: str):\n countries_b = configs['scope'][country_a].split(',')\n options = [\n {\"label\": x, \"value\": x}\n for x in countries_b\n ]\n return options", "title": "" }, { "docid": "75239d7d21f0a21e61b11b2c4b36f7ac", "score": "0.4791688", "text": "def AddOpts(parser):\n group = optparse.OptionGroup(parser, 'Common `slave_utils.py` Options')\n group.add_option('--slave-utils-gsutil-py-path', metavar='PATH',\n help='The path to the `gsutil` command to use for Google Storage '\n 'operations. This file lives in the <depot_tools> repository.')\n parser.add_option_group(group)\n\n return _AddArgsCallback", "title": "" }, { "docid": "4047578aee859196e059204c5ea83cb6", "score": "0.47898042", "text": "def initialize_options(self):\n self.warning_as_errors = None", "title": "" }, { "docid": "893fcdbf22ee9d668388d0561dea926b", "score": "0.4788137", "text": "def add_options(self, parser):\n parser.add_option(\"-a\", \"--addons\",\n dest=\"addons\",\n action=\"append\",\n metavar=\"ADDONS\",\n help=\"add-ons to be installed\")\n parser.add_option(\"--application\",\n dest=\"application\",\n default=\"firefox\",\n choices=APPLICATION_BINARY_NAMES.keys(),\n metavar=\"APPLICATION\",\n help=\"application name [default: %default]\")\n parser.add_option(\"--junit\",\n dest=\"junit_file\",\n metavar=\"PATH\",\n help=\"JUnit XML style report file\")\n parser.add_option(\"--report\",\n dest=\"report_url\",\n metavar=\"URL\",\n help=\"send results to the report server\")\n parser.add_option(\"--repository\",\n dest=\"repository_url\",\n metavar=\"URL\",\n help=\"URL of a custom repository\")\n parser.add_option(\"--restart\",\n dest=\"restart\",\n default=False,\n action=\"store_true\",\n help=\"restart the application between tests\")\n parser.add_option(\"--tag\",\n dest=\"tags\",\n action=\"append\",\n metavar=\"TAG\",\n help=\"Tag to apply to the report\")\n parser.add_option(\"--workspace\",\n dest=\"workspace\",\n metavar=\"PATH\",\n help=\"path to the workspace folder, which contains \"\n \"the testrun data [default: %tmp%]\")\n\n mozmill = optparse.OptionGroup(parser, \"Mozmill options\")\n mozmill.add_option(\"-l\", \"--logfile\",\n dest=\"logfile\",\n metavar=\"PATH\",\n help=\"path to log file\")\n parser.add_option_group(mozmill)", "title": "" }, { "docid": "5ffea8e16027583d768bf123dd528f55", "score": "0.47587046", "text": "def setupGlobalParserOptions(self):\n\t\tfileLoop2.FileLoop.setupGlobalParserOptions(self)\n\t\t### Input value options\n\t\tself.parser.add_option(\"--lowpass\", \"--lp\", dest=\"lowpass\", type=\"float\",\n\t\t\thelp=\"Low pass filter radius in Angstroms\", metavar=\"FLOAT\")\n\t\tself.parser.add_option(\"--highpass\", \"--hp\", dest=\"highpass\", type=\"float\",\n\t\t\thelp=\"High pass filter radius in Angstroms\", metavar=\"FLOAT\")\n\t\tself.parser.add_option(\"--median\", dest=\"median\", type=\"int\",\n\t\t\thelp=\"Median filter radius in Pixels\", metavar=\"INT\")\n\t\tself.parser.add_option(\"--pixlimit\", dest=\"pixlimit\", type=\"float\",\n\t\t\thelp=\"Limit pixel values to within <pixlimit> standard deviations\", metavar=\"FLOAT\")\n\t\tself.parser.add_option(\"--bin\", \"--shrink\", \"--binby\", dest=\"bin\", type=\"int\", default=4,\n\t\t\thelp=\"Bin the image\", metavar=\"INT\")\n\t\t### True / False options\n\t\tself.parser.add_option(\"--invert\", dest=\"invert\", default=False,\n\t\t\taction=\"store_true\", help=\"Invert image density before processing\")\n\t\tself.parser.add_option(\"--planereg\", dest=\"planereg\", default=False,\n\t\t\taction=\"store_true\", help=\"Fit a 2d plane regression to the data and subtract\")", "title": "" }, { "docid": "4bb7270f44cc21f92685ebec1bf374bd", "score": "0.47567454", "text": "def registerOptions(self, options: ghidra.framework.options.Options, program: ghidra.program.model.listing.Program) -> None:\n ...", "title": "" }, { "docid": "1ffcad72220691fdcd131cef6c6e2fb9", "score": "0.4754621", "text": "def update_bayesian_parameters(self, u_delta_dict, f_dict):\n if self.option == 'FFG':\n self.u_weight += u_delta_dict[self.weight]\n self.f_weight = f_dict[self.weight]\n if self.bias is not None:\n self.u_bias += u_delta_dict[self.bias]\n self.f_bias = f_dict[self.bias]\n elif self.option == 'MVG':\n self.u_weight += u_delta_dict[self.weight]\n self.a_weight = f_dict[self.weight]['a_inverse']\n self.s_weight = f_dict[self.weight]['s_inverse']", "title": "" }, { "docid": "928162db06f9af8560b4c6f4987b11ee", "score": "0.475311", "text": "def _register_opt(parser, *args, **kwargs):\n try:\n # Flake8 3.x registration\n parser.add_option(*args, **kwargs)\n except (optparse.OptionError, TypeError):\n # Flake8 2.x registration\n parse_from_config = kwargs.pop('parse_from_config', False)\n option = parser.add_option(*args, **kwargs)\n if parse_from_config:\n parser.config_options.append(option.get_opt_string().lstrip('-'))", "title": "" }, { "docid": "4c8bbfb92f1b0011edf603dc32648bbf", "score": "0.47506297", "text": "async def test_options_update(\n hass: HomeAssistant,\n vizio_connect: pytest.fixture,\n vizio_update: pytest.fixture,\n) -> None:\n await _test_setup_speaker(hass, True)\n config_entry = hass.config_entries.async_entries(DOMAIN)[0]\n assert config_entry.options\n new_options = config_entry.options.copy()\n updated_options = {CONF_VOLUME_STEP: VOLUME_STEP}\n new_options.update(updated_options)\n hass.config_entries.async_update_entry(\n entry=config_entry,\n options=new_options,\n )\n assert config_entry.options == updated_options\n await hass.async_block_till_done()\n await _test_service(\n hass, MP_DOMAIN, \"vol_up\", SERVICE_VOLUME_UP, None, num=VOLUME_STEP\n )", "title": "" }, { "docid": "27651c40e683c78425c17335677e1a3c", "score": "0.47492838", "text": "def merge(self, opts):\n toset = {}\n for k, v in opts.items():\n if v is not None:\n if isinstance(v, (list, tuple)):\n toset[k] = getattr(self, k) + v\n else:\n toset[k] = v\n self.update(**toset)", "title": "" }, { "docid": "267de8a24c2e4531820bbc2415a06497", "score": "0.47483096", "text": "def modify_commandline_options(parser: ArgumentParser, is_train):\n if is_train:\n parser.add_argument(\"--warp_mode\", default=\"gan\", choices=(\"gan\", \"ce\"))\n parser.add_argument(\n \"--lambda_ce\",\n type=float,\n default=100,\n help=\"weight for cross entropy loss in final term\",\n )\n # based on the num entries in self.visual_names during training\n parser.set_defaults(display_ncols=4)\n # https://stackoverflow.com/questions/26788214/super-and-staticmethod-interaction\n parser = super(WarpModel, WarpModel).modify_commandline_options(\n parser, is_train\n )\n return parser", "title": "" }, { "docid": "c9370530b2f1780b3aa0df18b4829e6c", "score": "0.474189", "text": "def UpdateOption(self) -> int:", "title": "" }, { "docid": "0f522e6c4dc21171582a790d299be091", "score": "0.47412032", "text": "def pytest_addoption(parser):\n group = parser.getgroup(\"general\")\n group.addoption(\"--check-links\", action=\"store_true\", help=\"Check links for validity\")\n group.addoption(\"--check-anchors\", action=\"store_true\", help=\"Check link anchors for validity\")\n group.addoption(\n \"--links-ext\",\n action=StoreExtensionsAction,\n default=default_extensions,\n help=\"Which file extensions to check links for, \"\n \"as a comma-separated list of values. Supported \"\n \"extensions are: %s.\" % extensions_str(supported_extensions),\n )\n group.addoption(\n \"--check-links-ignore\",\n action=\"append\",\n help=\"A list of regular expressions that match URIs that should not be checked.\",\n )\n group.addoption(\n \"--check-links-cache\", action=\"store_true\", help=\"Cache requests when checking links\"\n )\n group.addoption(\"--check-links-cache-name\", action=StoreCacheAction, help=\"Name of link cache\")\n group.addoption(\n \"--check-links-cache-backend\", action=StoreCacheAction, help=\"Cache persistence backend\"\n )\n group.addoption(\n \"--check-links-cache-expire-after\",\n action=StoreCacheAction,\n help=\"Time to cache link responses (seconds)\",\n )\n group.addoption(\n \"--check-links-cache-allowable-codes\",\n action=StoreCacheAction,\n help=\"HTTP response codes to cache\",\n )\n group.addoption(\n \"--check-links-cache-backend-opt\",\n action=StoreCacheAction,\n help=\"Backend-specific options for link cache, specfied as `opt:value`\",\n )", "title": "" } ]
dc5c4d99d8041ad7be98aaba4665c596
Flag all ec2s that have been running for longer than 1 week (WARN) or 2 weeks (FAIL) if any contain any strings from `flag_names` in their names, or if they have no name.
[ { "docid": "ab45cc8da72d33d0c824df675203a820", "score": "0.61171246", "text": "def check_long_running_ec2s(connection, **kwargs):\n check = CheckResult(connection, 'check_long_running_ec2s')\n if Stage.is_stage_prod() is False:\n check.summary = check.description = 'This check only runs on Foursight prod'\n return check\n\n client = boto3.client('ec2')\n # flag instances that contain any of flag_names and have been running\n # longer than warn_time\n flag_names = ['awsem']\n warn_time = (datetime.datetime.now(datetime.timezone.utc) -\n datetime.timedelta(days=7))\n fail_time = (datetime.datetime.now(datetime.timezone.utc) -\n datetime.timedelta(days=14))\n ec2_res = client.describe_instances(\n Filters=[{'Name': 'instance-state-name', 'Values': ['running']}]\n )\n check.full_output = []\n check.brief_output = {'one_week': [], 'two_weeks': []}\n for ec2_info in ec2_res.get('Reservations', []):\n instances = ec2_info.get('Instances', [])\n if not instances:\n continue\n # for multiple instance (?) just check if any of them require warnings\n for ec2_inst in instances:\n state = ec2_inst.get('State')\n created = ec2_inst.get('LaunchTime')\n if not state or not created:\n continue\n inst_name = [kv['Value'] for kv in ec2_inst.get('Tags', [])\n if kv['Key'] == 'Name']\n other_tags = {kv['Key']: kv['Value'] for kv in ec2_inst.get('Tags', [])\n if kv['Key'] != 'Name'}\n ec2_log = {\n 'state': state['Name'], 'name': inst_name,\n 'id': ec2_inst.get('InstanceId'),\n 'type': ec2_inst.get('InstanceType'),\n 'date_created_utc': created.strftime('%Y-%m-%dT%H:%M')\n }\n if not inst_name:\n flag_instance = True\n # include all other tags if Name tag is empty\n ec2_log['tags'] = other_tags\n elif any([wn for wn in flag_names if wn in ','.join(inst_name)]):\n flag_instance = True\n else:\n flag_instance = False\n # see if long running instances are associated with a deleted WFR\n if flag_instance and inst_name and created < warn_time:\n search_url = 'search/?type=WorkflowRunAwsem&awsem_job_id='\n search_url += '&awsem_job_id='.join([name[6:] for name in inst_name if name.startswith('awsem-')])\n wfrs = ff_utils.search_metadata(search_url, key=connection.ff_keys)\n if wfrs:\n ec2_log['active workflow runs'] = [wfr['@id'] for wfr in wfrs]\n deleted_wfrs = ff_utils.search_metadata(search_url + '&status=deleted', key=connection.ff_keys)\n if deleted_wfrs:\n ec2_log['deleted workflow runs'] = [wfr['@id'] for wfr in deleted_wfrs]\n # always add record to full_output; add to brief_output if\n # the instance is flagged based on 'Name' tag\n if created < fail_time:\n if flag_instance:\n check.brief_output['two_weeks'].append(ec2_log)\n check.full_output.append(ec2_log)\n elif created < warn_time:\n if flag_instance:\n check.brief_output['one_week'].append(ec2_log)\n check.full_output.append(ec2_log)\n\n if check.brief_output['one_week'] or check.brief_output['two_weeks']:\n num_1wk = len(check.brief_output['one_week'])\n num_2wk = len(check.brief_output['two_weeks'])\n check.summary = ''\n if check.brief_output['two_weeks']:\n check.status = 'FAIL'\n check.summary = '%s suspect EC2s running longer than 2 weeks' % num_2wk\n if check.brief_output['one_week']:\n if check.status != 'FAIL':\n check.status = 'WARN'\n if check.summary:\n check.summary += ' and %s others longer than 1 week' % num_1wk\n else:\n check.summary = '%s suspect EC2s running longer than 1 week' % num_1wk\n check.description = check.summary + '. Flagged because name is empty or contains %s. There are also %s non-flagged instances.' % (flag_names, len(check.full_output) - (num_1wk + num_2wk))\n else:\n check.status = 'PASS'\n check.summary = '%s EC2s running longer than 1 week' % (len(check.full_output))\n return check", "title": "" } ]
[ { "docid": "9d8ee2ccefb74207e8b9fc29c0cf0d40", "score": "0.53161347", "text": "def flag_ignored_mousedays(self):\n dirname_in = self.binary_dir + 'HCM_data/qc/flagged/'\n dirname_in2 = self.binary_dir + 'HCM_data/qc/flagged_msgs/'\n flagged = []\n reason = []\n for strain in xrange(20): # wide enough\n for mouse in xrange(20):\n for day in xrange(40): \n id_string = \"group%d_individual%d_day%d.npy\" %(strain, mouse, day)\n fname = dirname_in + id_string\n fname2 = dirname_in2 + id_string\n try:\n mouseNumber, dayNumber, flag = np.load(fname)\n if flag:\n act, msgs = np.load(fname2)[0]\n assert (len(msgs) > 0)\n flagged.append((mouseNumber, dayNumber))\n reason.append((act, msgs))\n except IOError:\n pass\n self.flagged = flagged\n self.reason = reason \n\n return flagged, reason", "title": "" }, { "docid": "2383b7bb73c8eac17624864a2dc90e17", "score": "0.51817876", "text": "def check_emp_type_flags(toks, verbose=False):\n part_time, full_time = (0,0)\n for tok in toks:\n if tok._.is_part_time == True:\n part_time += 1 \n elif tok._.is_full_time == True:\n full_time += 1\n if part_time and full_time:\n if verbose == True:\n print(\"Part_time and full_time flags found.\")\n return 'Other Employees'\n if part_time:\n return 'Part-Time Employees'\n if full_time:\n return 'Full-Time Employees'\n return 'Other Employees'", "title": "" }, { "docid": "1948b52dada7f863171358fc4e1853c7", "score": "0.5024941", "text": "def warning_flags(d, p):\n\n if confirm_present(f'{d.base}/compiled_metrics.tsv'):\n compiled = open(f'{d.base}/compiled_metrics.tsv', 'r')\n flags_dict = {}\n for line in compiled:\n if line.startswith('Sample'):\n pass\n else:\n columns = line.strip().split('\\t')\n sample = columns[0]\n organism = columns[1]\n mash_prob = float(columns[3])\n n50 = float(columns[9])\n genome_len = float(columns[11])\n avg_coverage = float(columns[12])\n read_quality = float(columns[15])\n reads_mapped = float(columns[18])\n\n if mash_prob < p.thresh_mash_prob:\n mash_flag = 'PASS'\n else:\n mash_flag = 'FAIL'\n\n if n50 >= p.thresh_n50:\n n50_flag = 'PASS'\n else:\n n50_flag = 'FAIL'\n\n if p.genome_specific_compare.upper() == 'YES': # this switch incorporates expected genome size by organism type from the config file\n genomes_dict = {}\n sizes = p.specific_genome_sizes.strip().split(';')\n for entry in sizes:\n entry_org = entry.split(',')[0].strip()\n genomesizes = entry.split(',')[1].strip()\n genomes_dict[entry_org] = genomesizes\n\n genome_min = int(p.thresh_genome_sz.strip().split(':')[0]) # Default is going to be that there isnt an entry for this specific organism as some organisms have _OtypeHtype extensions in MASH output\n genome_max = int(p.thresh_genome_sz.strip().split(':')[1])\n for org in genomes_dict.keys(): # if an entry in the dictionary is found to be within the current organism, it wil adjust the genome size to reflect this.\n if org in organism:\n genome_exp_sizes = genomes_dict[org]\n genome_min = float(genome_exp_sizes.split(':')[0])\n genome_max = float(genome_exp_sizes.split(':')[1])\n matchfound = True\n else: # Will compare all genomes to the same size parameters\n genome_min = int(p.thresh_genome_sz.strip().split(':')[0])\n genome_max = int(p.thresh_genome_sz.strip().split(':')[1])\n\n if genome_min <= genome_len and genome_len <= genome_max:\n genome_flag = 'PASS'\n else:\n genome_flag = 'FAIL'\n\n if avg_coverage >= p.thresh_avg_coverage:\n coverage_flag = 'PASS'\n else:\n coverage_flag = 'FAIL'\n\n if read_quality >= p.thresh_read_quality:\n readqual_flag = 'PASS'\n else:\n readqual_flag = 'FAIL'\n\n if reads_mapped >= p.thresh_mapping:\n readmap_flag = 'PASS'\n else:\n readmap_flag = 'FAIL'\n\n flags_dict[sample] = [mash_flag, genome_flag, n50_flag, coverage_flag, readmap_flag, readqual_flag]\n compiled.close()\n\n flags = open(f'{d.metrics}/warning_flags.tsv', 'w')\n flags.write(f'Sample\\tMashPvalue<{p.thresh_mash_prob}\\tGenomeSize\\tn50>{round(p.thresh_n50/1000)}KB\\tAvgCoverage>{p.thresh_avg_coverage}X\\tReadMapping>{p.thresh_mapping}%\\tReadQuality>{p.thresh_read_quality}\\n')\n for sample in d.sample_list:\n values = flags_dict[sample]\n cols = [sample, *values]\n line_form = '\\t'.join(cols)\n flags.write(f'{line_form}\\n')\n flags.close()\n\n flags = open(f'{d.metrics}/warning_flags.tsv', 'r')\n flags_mqc = open(f'{d.for_multiqc}/warning_flags_multiqc.tsv', 'w')\n for line in flags:\n flags_mqc.write(line.replace('PASS', '0.1').replace('FAIL', '1'))\n flags.close()\n flags_mqc.close()\n\n else:\n logger(f'Cannot find compiled_metrics.tsv file in {d.base}. Quitting.')\n quit()", "title": "" }, { "docid": "2f26f1083b2282a31449253b711f3658", "score": "0.4976341", "text": "def when_not_all(*desired_flags):\n return helpers._when_not_all(desired_flags)", "title": "" }, { "docid": "cfede21d72524ce5ad8b7c8f9850b3b6", "score": "0.48628184", "text": "def test_skip_check_level(self):\n ds = self.cs.load_dataset(static_files[\"ru07\"])\n score_groups = self.cs.run_all(\n ds,\n [\"cf\"],\n skip_checks=[\n \"check_flags:A\",\n \"check_convention_possibly_var_attrs:M\",\n \"check_standard_name:L\",\n ],\n )\n\n name_set = {sg.name for sg in score_groups[\"cf\"][0]}\n # flattened set of messages\n msg_set = {msg for sg in score_groups[\"cf\"][0] for msg in sg.msgs}\n\n expected_excluded_names = {\n \"§3.5 flag_meanings for lat\",\n \"§3.5 flag_meanings for lon\",\n \"§3.5 lat is a valid flags variable\",\n \"§3.5 lon is a valid flags variable\",\n }\n\n self.assertTrue(len(expected_excluded_names & name_set) == 0)\n\n # should skip references\n ref_msg = \"references global attribute should be a non-empty string\"\n self.assertTrue(ref_msg not in msg_set)\n # check_standard_name is high priority, but we requested only low,\n # so the standard_name check should still exist\n standard_name_hdr = \"§3.3 Standard Name\"\n self.assertTrue(standard_name_hdr in name_set)", "title": "" }, { "docid": "2217dd678260c5a660d4818d07871491", "score": "0.48462257", "text": "def sleep_stage_check(self):\n try:\n annotations = self.event_dictionary['stage_2_sleep']\n except:\n annotations = None\n print('stage 2 missing')\n tol = self.tol * self.sfreq\n time_find = self.time_find\n mean_peak_power = self.mean_peak_power\n Duration = self.Duration\n front = self.front\n last = self.raw.last_samp - self.back\n if annotations is not None:\n temp_time_find,temp_mean_peak_power,temp_duration = [],[],[]\n # seperate out stage 2\n On = annotations[annotations['Annotation'].apply(lambda x:'Markon:2' in x)]\n Off = annotations[annotations['Annotation'].apply(lambda x:'Markoff:2' in x)]\n if On.Onset.values[0] > Off.Onset.values[0]:\n On = np.concatenate([[Off.Onset.values[0] - 30],On.Onset.values])\n Off = Off.Onset.values\n else:\n On = On.Onset.values\n Off = Off.Onset.values\n stage_on_off = np.vstack([On, Off]).T\n for single_time_find, single_mean_peak_power, single_duration in zip(time_find,mean_peak_power,Duration):\n for on_time,off_time in stage_on_off:\n if intervalCheck([on_time,off_time],single_time_find,tol=tol):\n temp_time_find.append( single_time_find)\n temp_mean_peak_power.append(single_mean_peak_power)\n temp_duration.append( single_duration)\n time_find = temp_time_find\n mean_peak_power = temp_mean_peak_power\n Duration = temp_duration\n self.time_find = temp_time_find\n self.mean_peak_power= temp_mean_peak_power\n self.Duration = temp_duration\n \n result = pd.DataFrame({'Onset':time_find,'Duration':Duration,'Annotation':['spindle']*len(Duration)})\n result = result[(result['Onset'] > front) & (result['Onset'] < last)]\n self.auto_scores = result", "title": "" }, { "docid": "b05db1f96977f7e61878a5cac39a20fd", "score": "0.4845288", "text": "def _CheckFlags(self):\n unused_flags = [\n f for f in appcommands.GetCommandArgv()\n if f.startswith('--') or f.startswith('-')\n ]\n for flag in unused_flags:\n flag_name = flag[4:] if flag.startswith('--no') else flag[2:]\n flag_name = flag_name.split('=')[0]\n if flag_name not in FLAGS:\n print((\"FATAL Flags parsing error: Unknown command line flag '%s'\\n\"\n \"Run 'bq help' to get help\" % flag))\n sys.exit(1)\n else:\n print((\"FATAL Flags positioning error: Flag '%s' appears after final \"\n 'command line argument. Please reposition the flag.\\n'\n \"Run 'bq help' to get help.\" % flag))\n sys.exit(1)", "title": "" }, { "docid": "871a2838575c5224a1fa395715250408", "score": "0.4792826", "text": "def checkFailed(self):\n return self.updateRuns(self.config.failed,self.config.archive.failed,self.config.st_failed,archive=False)", "title": "" }, { "docid": "a3fc2e96461de4235f07a4d6539c4647", "score": "0.47548205", "text": "def flag_timeslots (msname=\"$MS\",begin=0,end=0,ifrs=\"all\",flagset=\"badts\"):;\n msname,ifrs,flagset = interpolate_locals(\"msname ifrs flagset\");\n begin *= FLAG_TIMESLOTS_MULTIPLIER;\n end *= FLAG_TIMESLOTS_MULTIPLIER;\n _flagms(msname,\"-I $ifrs -T $begin~$end -f $flagset -c\");", "title": "" }, { "docid": "4f2d5fbcdacd1a5913f4eb1cd49a2f21", "score": "0.4729313", "text": "def _WorkerStatusChecker(self):\r\n workers = [self._WorkerName(i) for i in xrange(self.flags.num_workers)]\r\n while True:\r\n running_workers = 0\r\n for worker_name in workers:\r\n if self._CheckInstanceRunning(worker_name):\r\n running_workers += 1\r\n if running_workers == self.flags.num_workers:\r\n return\r\n yield running_workers", "title": "" }, { "docid": "b4eea1799e3ea9e14b949c9e7e1838c6", "score": "0.47135532", "text": "def when_all(*desired_flags):\n return helpers._when_all(desired_flags)", "title": "" }, { "docid": "52424299c1915dc5caffc090e0ec0a6e", "score": "0.47054702", "text": "def flag_list():\n flags = (\n 'no_trk_error',\n 'is_contained',\n 'endpoint_accept',\n 'wrong_end_flag',\n 'ridge_accept',\n 'moments_accept',\n 'early_scatter_flag',\n )\n return flags", "title": "" }, { "docid": "2d2037a30ee688cf3d3389270242a6ac", "score": "0.467601", "text": "def is_asking_for_time(cls, tags):\n need_these = [\n ('what', 'WP'), # what as a 'WH-pronoun'\n ('time', 'NN'), # time as a noun\n ]\n\n return all([x in tags for x in need_these])", "title": "" }, { "docid": "4be5cd785f0f7ad1bf07e5e090565328", "score": "0.46627337", "text": "def configure_reruns_timeout_features(items):\n for item in items:\n test_name = item.name.split(\"[\")[0]\n if global_var['config'].exist(test_name + \"_0\"):\n test_name = test_name + \"_\" + item.name.split(\"[\")[1][0]\n if global_var['config'].exist(test_name):\n if global_var['config'].reruns(test_name) is not None:\n item.keywords.__dict__[\"reruns\"] = 0\n item.add_marker(pytest.mark.flaky(\n reruns=global_var['config'].reruns(test_name)))\n if global_var['config'].timeout(test_name) is not None:\n item.add_marker(pytest.mark.timeout(\n global_var['config'].timeout(test_name)))", "title": "" }, { "docid": "14aac4836116f1ce1192b744536e6288", "score": "0.4658913", "text": "def check_status(adate):\n\n processed_list = os.listdir(HWRFsummary)\n processed = any(adate in x for x in processed_list)\n \n if processed:\n return processed\n \n # extra check\n processed_list = os.listdir(HWRFraw)\n zipped_list = [x for x in processed_list if \"zip\" in x]\n processed = any(adate in x for x in zipped_list)\n\n return processed", "title": "" }, { "docid": "ac336e7d468eabf45616561f842503c8", "score": "0.46354637", "text": "def listWAScheck(wordInds, WASthresh):\n # check to make sure no two words in the list are too similar\n listGood = True\n for word1 in wordInds:\n for word2 in wordInds:\n val = semMat[word1][word2]\n if val >= WASthresh and val < 1:\n listGood = False\n return listGood\n return listGood", "title": "" }, { "docid": "df9ceb5d33a37c06eae3a1fe6c6ed7c7", "score": "0.46267018", "text": "def check(self, listRunning=True, listNonRunning=False):", "title": "" }, { "docid": "528a79534bdc928a8c13597b7ed57f69", "score": "0.46261787", "text": "def _ignore_failures(self, test_name, result, result_msg):\n for tname in self.ignore_failures:\n if test_name not in tname:\n continue\n\n found_match = False\n match_msg = \"\"\n for issue in self.ignore_failures[tname]:\n fail_regexes = issue['fail-regex']\n reason = issue['reason']\n for regex in fail_regexes:\n if re.findall(regex, result_msg):\n found_match = True\n match_msg += ('Match: \"%s\" Reason: \"%s\"' %\n (regex, reason))\n if found_match:\n result = 'SKIP'\n result_msg += 'Test failure ignored. (%s)' % match_msg\n return result, result_msg", "title": "" }, { "docid": "bb4eba3769e2641927fac669b9aaddad", "score": "0.46242583", "text": "def _check_water_restrictions(self):\n last_date = None\n for s, e, wr in self.water_restrictions[:-1]:\n if e is None:\n logger.warning(\n \"The water restriction started on %s for %s not finished!\",\n s, self.nickname)\n # Ensure the water restrictions are ordered.\n if last_date:\n assert s >= last_date\n last_date = s", "title": "" }, { "docid": "028e2c972cedb228cc34afa6e47ecdd1", "score": "0.46114075", "text": "def test_lagLimitExceeded(self):\n logger = OperationLogger(outfile=StringIO())\n for lag in [100.0, 1100.0, 1200.0]:\n logger.observe(dict(\n type='operation', phase='start', user='user01',\n label='testing', lag=lag)\n )\n self.assertEqual(\n [\"Median TESTING scheduling lag greater than 1000.0ms\"],\n logger.failures())", "title": "" }, { "docid": "3e58255a1bec2f738f740a955f074851", "score": "0.45996106", "text": "def _ValidateFlagFailures(self, failure_modes):\n for failure_arg in failure_modes:\n with self.assertRaises(flags.IllegalFlagValue):\n print 'Validating expected failure setting %s = %s' % (\n failure_arg[0], failure_arg[1])\n self._flag_values_copy.__setattr__(failure_arg[0], failure_arg[1])", "title": "" }, { "docid": "05f022c90180af336e6aa8ad87160646", "score": "0.4581913", "text": "def main():\n parser = argparse.ArgumentParser(\n description='Check for services with deferred restarts')\n parser.add_argument(\n '--application', help='Check services belonging to this application only')\n\n args = parser.parse_args()\n\n services = set(get_deferred_restart_services(args.application))\n\n if len(services) == 0:\n print('OK: No deferred service restarts.')\n sys.exit(0)\n else:\n print(\n 'CRITICAL: Restarts are deferred for services: {}.'.format(', '.join(services)))\n sys.exit(1)", "title": "" }, { "docid": "27231d76f0627bc4e6b8d49b83ed2174", "score": "0.45731992", "text": "def check_warnings(self):\n warn_map = {\n 61: bwarn.BlitzUnreadWarning,\n 62: bwarn.BlitzMessageWarning,\n 63: bwarn.BlitzShutdownWarning,\n 66: bwarn.BlitzNewMailWarning\n }\n\n out = []\n self._cmd0('WARN')\n exp = [60] + warn_map.keys()\n\n while True:\n key, data = self._expect(*exp)\n if key == 60:\n break\n\n out.append(warn_map[key](data))\n\n return out", "title": "" }, { "docid": "ae9cea2c2aad6b6239838c75dbb19345", "score": "0.45712563", "text": "def test_freshstatus_scheduled_maintenance_planned_multiple_services_over_limit(self) -> None:\n expected_topic = \"Expect some services downtime due to server maintenance\"\n expected_message = \"\"\"\nThe following scheduled maintenance has been opened: **Expect some services downtime due to server maintenance**\n**Description:** As part of the upgrade routine, we will be carrying out server maintenance work for this Service. This work will affect the Service to be unavailable during the maintenance window. We apologize for any inconvenience this may cause. Please do not hesitate to contact our support team at [email protected] if you have any questions regarding this server upgrading exercise.\n**Scheduled Start Time:** 2021-04-12 17:08 UTC\n**Scheduled End Time:** 2021-04-12 18:08 UTC\n**Affected Services:**\n* Sample Service\n* Sample Service 2\n* Sample Service 3\n* Sample Service 4\n* Sample Service 5\n[and 2 more service(s)]\n \"\"\".strip()\n self.check_webhook(\n \"freshstatus_scheduled_maintenance_planned_multiple_services_over_limit\",\n expected_topic,\n expected_message,\n )", "title": "" }, { "docid": "53bcd3ad32f27c8684da8606b7a694f2", "score": "0.45521876", "text": "async def _any_tools_are_running(self, tool_state):\n return any(t['running'] for t in tool_state.values())", "title": "" }, { "docid": "f7a66e299a6541b2989fd98a0885af14", "score": "0.45459154", "text": "def filter_warnings(\n colour_runtime_warnings: bool | LiteralWarning | None = None,\n colour_usage_warnings: bool | LiteralWarning | None = None,\n colour_warnings: bool | LiteralWarning | None = None,\n python_warnings: bool | LiteralWarning | None = None,\n):\n\n for action, category in [\n (colour_warnings, ColourWarning),\n (colour_runtime_warnings, ColourRuntimeWarning),\n (colour_usage_warnings, ColourUsageWarning),\n (python_warnings, Warning),\n ]:\n if action is None:\n continue\n\n if is_string(action):\n action = cast(LiteralWarning, str(action)) # noqa: PLW2901\n else:\n action = \"ignore\" if action else \"default\" # noqa: PLW2901\n\n filterwarnings(action, category=category)", "title": "" }, { "docid": "decf24c97cf7c8d69cd35d7a6051d01b", "score": "0.45265508", "text": "def testFlagsLike(self):\n self.log('testFlagsLike')\n \n gwi = getWordInfo11\n wfList = [(r'.\\period\\period', 'period'),\n (r',\\comma\\comma', 'comma'),\n (r'-\\hyphen\\hyphen', 'hyphen'),\n #( (10,), 'number'), ## testing number later\n ]\n \n for w,t in wfList:\n varInNsformat = 'flags_like_%s'% t\n if type(w) == tuple:\n flags = w\n else:\n wInfo = gwi(w)\n self.assertTrue(wInfo != None, \"word info for word: %s could not be found. US user???\"% w)\n flags = wordInfoToFlags(wInfo)\n flags.discard(3)\n flags = tuple(flags) # no delete flag not interesting\n fromNsFormat = globals()[varInNsformat]\n self.assertTrue(fromNsFormat == flags, \"flags_like variable |%s| not as expected\\nIn nsformat.py: %s (%s)\\nFrom actual word infoExpected: %s (%s)\"%\n (varInNsformat, fromNsFormat, showStateFlags(fromNsFormat), flags, showStateFlags(flags)))", "title": "" }, { "docid": "462bc4e1cd93f46554d422866933829d", "score": "0.45188835", "text": "def all_stop_words(token_list, span):\n\n for i in range(span[0], span[1] + 1):\n if token_list[i].lower() not in STOP_WORDS:\n return False\n else:\n return True", "title": "" }, { "docid": "ce44371742d077c6779a50b7ed42e1ea", "score": "0.45102945", "text": "def test_failureLimitExceeded(self):\n logger = OperationLogger(outfile=StringIO())\n for _ignore in range(98):\n logger.observe(dict(\n type='operation', phase='end', user='user01',\n duration=0.25, label='testing', success=True)\n )\n logger.observe(dict(\n type='operation', phase='end', user='user01',\n duration=0.25, label='testing', success=False)\n )\n self.assertEqual(\n [\"Greater than 1% TESTING failed\"],\n logger.failures())", "title": "" }, { "docid": "caa9c633da63b228475a164e67a529ca", "score": "0.4508673", "text": "def warns(self):\n return self._warns", "title": "" }, { "docid": "8ae5c9035e4f8eb693825e05a2e1369a", "score": "0.4505967", "text": "def bad_and_rejected(names, sh=None, bad=None, rej=None, verbose=False):\n # Pre-process lists into regex list\n if bad is None:\n bad = BAD_NAMES\n if rej is None:\n rej = REJ_NAMES\n bad = \"|\".join(bad) # pipe symbol as OR in regex\n rej = \"|\".join(rej) # pipe symbol as OR in regex\n\n # Remove bad\n good_names = names[~names.str.fullmatch(bad)]\n\n # Remove rejected\n rej_names = good_names[good_names.str.fullmatch(rej)]\n\n # Update rejected names in google sheet\n if sh is not None:\n rej_names = pd.DataFrame(rej_names)\n df_rej = ut.get_full_sheet(sh, ws=\"Rejected\") # Remote names\n rej_all = \"|\".join(df_rej.OBJECT.tolist())\n rej_all = \"|\".join([rej, rej_all])\n rej_mask = rej_names.OBJECT.isin(df_rej.OBJECT)\n if not rej_mask.all(): # Update only if new objects\n if verbose:\n print(\"Updating rejected sheet with new objects\")\n df_rej = df_rej.append(rej_names[~rej_mask], ignore_index=True)\n df_rej = df_rej.sort_values(\"OBJECT\")\n ut.update_full_sheet(sh, df_rej, ws=\"Rejected\")\n else:\n rej_all = rej\n\n good_names = good_names[~good_names.str.fullmatch(rej)]\n\n return good_names", "title": "" }, { "docid": "0127ee29db71641e4b13713780d7aa83", "score": "0.4493983", "text": "def VerifyGKEFlags(args, release_track, product):\n error_msg = ('The `{flag}` flag is not supported with Cloud Run for Anthos '\n 'deployed on Google Cloud. Specify `--platform {platform}` or '\n 'run `gcloud config set run/platform {platform}` to work with '\n '{platform_desc}.')\n\n if FlagIsExplicitlySet(args, 'allow_unauthenticated'):\n raise serverless_exceptions.ConfigurationError(\n 'The `--[no-]allow-unauthenticated` flag is not supported with '\n 'Cloud Run for Anthos deployed on Google Cloud. All deployed '\n 'services allow unauthenticated requests. The `--connectivity` '\n 'flag can limit which network a service is available on to reduce '\n 'access.')\n\n if (FlagIsExplicitlySet(args, 'connectivity') and\n FlagIsExplicitlySet(args, 'ingress')):\n raise serverless_exceptions.ConfigurationError(\n 'Cannot specify both the `--connectivity` and `--ingress` flags.'\n ' `--connectivity` is deprecated in favor of `--ingress`.')\n\n if FlagIsExplicitlySet(args, 'region'):\n raise serverless_exceptions.ConfigurationError(\n error_msg.format(\n flag='--region',\n platform=platforms.PLATFORM_MANAGED,\n platform_desc=platforms.PLATFORM_SHORT_DESCRIPTIONS[\n platforms.PLATFORM_MANAGED]))\n\n if FlagIsExplicitlySet(args, 'execution_environment'):\n raise serverless_exceptions.ConfigurationError(\n error_msg.format(\n flag='--execution-environment',\n platform=platforms.PLATFORM_MANAGED,\n platform_desc=platforms.PLATFORM_SHORT_DESCRIPTIONS[\n platforms.PLATFORM_MANAGED]))\n\n if FlagIsExplicitlySet(args, 'vpc_connector'):\n raise serverless_exceptions.ConfigurationError(\n error_msg.format(\n flag='--vpc-connector',\n platform=platforms.PLATFORM_MANAGED,\n platform_desc=platforms.PLATFORM_SHORT_DESCRIPTIONS[\n platforms.PLATFORM_MANAGED]))\n\n if FlagIsExplicitlySet(args, 'clear_vpc_connector'):\n raise serverless_exceptions.ConfigurationError(\n error_msg.format(\n flag='--clear-vpc-connector',\n platform=platforms.PLATFORM_MANAGED,\n platform_desc=platforms.PLATFORM_SHORT_DESCRIPTIONS[\n platforms.PLATFORM_MANAGED]))\n\n if FlagIsExplicitlySet(args, 'vpc_egress'):\n raise serverless_exceptions.ConfigurationError(\n error_msg.format(\n flag='--vpc-egress',\n platform=platforms.PLATFORM_MANAGED,\n platform_desc=platforms.PLATFORM_SHORT_DESCRIPTIONS[\n platforms.PLATFORM_MANAGED]))\n\n if FlagIsExplicitlySet(args, 'binary_authorization'):\n raise serverless_exceptions.ConfigurationError(\n error_msg.format(\n flag='--binary-authorization',\n platform=platforms.PLATFORM_MANAGED,\n platform_desc=platforms.PLATFORM_SHORT_DESCRIPTIONS[\n platforms.PLATFORM_MANAGED]))\n\n if FlagIsExplicitlySet(args, 'clear_binary_authorization'):\n raise serverless_exceptions.ConfigurationError(\n error_msg.format(\n flag='--clear-binary-authorization',\n platform=platforms.PLATFORM_MANAGED,\n platform_desc=platforms.PLATFORM_SHORT_DESCRIPTIONS[\n platforms.PLATFORM_MANAGED]))\n\n if FlagIsExplicitlySet(args, 'breakglass'):\n raise serverless_exceptions.ConfigurationError(\n error_msg.format(\n flag='--breakglass',\n platform=platforms.PLATFORM_MANAGED,\n platform_desc=platforms.PLATFORM_SHORT_DESCRIPTIONS[\n platforms.PLATFORM_MANAGED]))\n\n if FlagIsExplicitlySet(args, 'key'):\n raise serverless_exceptions.ConfigurationError(\n error_msg.format(\n flag='--key',\n platform=platforms.PLATFORM_MANAGED,\n platform_desc=platforms.PLATFORM_SHORT_DESCRIPTIONS[\n platforms.PLATFORM_MANAGED]))\n\n if FlagIsExplicitlySet(args, 'post_key_revocation_action_type'):\n raise serverless_exceptions.ConfigurationError(\n error_msg.format(\n flag='--post-key-revocation-action-type',\n platform=platforms.PLATFORM_MANAGED,\n platform_desc=platforms.PLATFORM_SHORT_DESCRIPTIONS[\n platforms.PLATFORM_MANAGED]))\n\n if FlagIsExplicitlySet(args, 'clear_key'):\n raise serverless_exceptions.ConfigurationError(\n error_msg.format(\n flag='--clear-key',\n platform=platforms.PLATFORM_MANAGED,\n platform_desc=platforms.PLATFORM_SHORT_DESCRIPTIONS[\n platforms.PLATFORM_MANAGED]))\n\n if FlagIsExplicitlySet(args, 'clear_post_key_revocation_action_type'):\n raise serverless_exceptions.ConfigurationError(\n error_msg.format(\n flag='--clear-post-key-revocation-action-type',\n platform=platforms.PLATFORM_MANAGED,\n platform_desc=platforms.PLATFORM_SHORT_DESCRIPTIONS[\n platforms.PLATFORM_MANAGED]))\n\n if FlagIsExplicitlySet(args, 'custom_audiences'):\n raise serverless_exceptions.ConfigurationError(\n error_msg.format(\n flag='--custom-audiences',\n platform=platforms.PLATFORM_MANAGED,\n platform_desc=platforms.PLATFORM_SHORT_DESCRIPTIONS[\n platforms.PLATFORM_MANAGED]))\n\n if FlagIsExplicitlySet(args, 'clear_custom_audiences'):\n raise serverless_exceptions.ConfigurationError(\n error_msg.format(\n flag='--clear-custom-audiences',\n platform=platforms.PLATFORM_MANAGED,\n platform_desc=platforms.PLATFORM_SHORT_DESCRIPTIONS[\n platforms.PLATFORM_MANAGED]))\n\n if FlagIsExplicitlySet(args, 'session_affinity'):\n raise serverless_exceptions.ConfigurationError(\n error_msg.format(\n flag='--session-affinity',\n platform=platforms.PLATFORM_MANAGED,\n platform_desc=platforms.PLATFORM_SHORT_DESCRIPTIONS[\n platforms.PLATFORM_MANAGED]))\n\n if FlagIsExplicitlySet(args, 'kubeconfig'):\n raise serverless_exceptions.ConfigurationError(\n error_msg.format(\n flag='--kubeconfig',\n platform=platforms.PLATFORM_KUBERNETES,\n platform_desc=platforms.PLATFORM_SHORT_DESCRIPTIONS[\n platforms.PLATFORM_KUBERNETES]))\n\n if FlagIsExplicitlySet(args, 'context'):\n raise serverless_exceptions.ConfigurationError(\n error_msg.format(\n flag='--context',\n platform=platforms.PLATFORM_KUBERNETES,\n platform_desc=platforms.PLATFORM_SHORT_DESCRIPTIONS[\n platforms.PLATFORM_KUBERNETES]))", "title": "" }, { "docid": "6db83e790e2dba43fb159471e03cfc3b", "score": "0.44877705", "text": "def add_alerts(self):\n\n flagged_as_outlier = self.current_unit_id in self.by_sample\n if flagged_as_outlier:\n alerts_list = self.by_sample.get(self.current_unit_id,\n None) # None, if id not in dict\n print('\\n\\tFlagged as a possible outlier by these measures:\\n\\t\\t{}'.format(\n '\\t'.join(alerts_list)))\n\n strings_to_show = ['Flagged as an outlier:', ] + alerts_list\n self.current_alert_msg = '\\n'.join(strings_to_show)\n self.update_alerts()\n else:\n self.current_alert_msg = None", "title": "" }, { "docid": "1a02e6da64f9d2b55028170b97ec0df8", "score": "0.44838637", "text": "def testFlagValidation(self):\n # Validators get invoked on __setattr__ (overload for '=').\n failure_modes = [\n ('configuration_file', None),\n ('name', None),\n ('name', ''),\n ]\n self._ValidateFlagFailures(failure_modes)", "title": "" }, { "docid": "d6b66a7597455ba413f81532503e89c5", "score": "0.44830254", "text": "def collect_asg_status_change(name, instance_id, ignores, test_result):\n describe_res = as_client.describe_auto_scaling_groups(\n AutoScalingGroupNames=[name]\n )[\"AutoScalingGroups\"][0]\n\n for instance in describe_res[\"Instances\"]:\n if instance[\"InstanceId\"] == instance_id:\n state = instance[\"LifecycleState\"]\n if state in ignores:\n continue\n\n if len(test_result) > 0:\n if test_result[-1] != state:\n LOGGER.info(\"ASG Change Detection: {}\".format(state))\n test_result.append(state)\n else:\n LOGGER.info(\"ASG Change Detection: {}\".format(state))\n test_result.append(state)", "title": "" }, { "docid": "3253c55dc83b06b32d2aa968a580aadd", "score": "0.44821888", "text": "def test_grace_period_condition(db_conn, stolen_list_importer, mocked_config, monkeypatch):\n stolen_list_importer.import_data()\n db_conn.commit()\n\n cond_list = [{\n 'label': 'local_stolen',\n 'grace_period_days': 87,\n 'blocking': True,\n 'sticky': False,\n 'reason': 'IMEI found on local stolen list',\n 'dimensions': [{'module': 'stolen_list'}]\n }]\n\n invoke_cli_classify_with_conditions_helper(cond_list, mocked_config, monkeypatch, curr_date='20170408',\n classify_options=['--no-safety-check'])\n\n # date_days_ago is '2017-1-11'\n date_days_ago = (datetime.date(2017, 4, 8) - timedelta(days=87)).isoformat()\n\n with db_conn.cursor() as cursor:\n cursor.execute(sql.SQL(\"\"\"SELECT imei_norm, block_date, cond_name\n FROM classification_state\n WHERE end_date IS NULL\n AND block_date IS NOT NULL\n AND block_date > %s::date\n ORDER BY imei_norm, cond_name\"\"\"), [date_days_ago])\n\n attr_list = [[res.imei_norm, res.block_date.strftime('%Y%m%d'), res.cond_name] for res in cursor.fetchall()]\n\n # The following list contains IMEIs, block dates and reason info.\n # All the IMEIs in the list have block date greater than 2017-1-11.\n # This date is 87 days (grace_period) before the classification date (2017, 4, 8).\n b = [['12432807272315', '20170704', 'local_stolen'],\n ['12640904324427', '20170704', 'local_stolen'],\n ['12640904372723', '20170704', 'local_stolen'],\n ['12727231272313', '20170704', 'local_stolen'],\n ['12875502464321', '20170704', 'local_stolen'],\n ['12875502572723', '20170704', 'local_stolen'],\n ['12875507272312', '20170704', 'local_stolen'],\n ['12904502843271', '20170704', 'local_stolen'],\n ['12909602432585', '20170704', 'local_stolen'],\n ['12909602872723', '20170704', 'local_stolen'],\n ['12922902206948', '20170704', 'local_stolen'],\n ['12922902243260', '20170704', 'local_stolen'],\n ['12922902432742', '20170704', 'local_stolen'],\n ['12922902432776', '20170704', 'local_stolen'],\n ['12957272313271', '20170704', 'local_stolen'],\n ['17272317272723', '20170704', 'local_stolen'],\n ['56773605727231', '20170704', 'local_stolen'],\n ['64220204327947', '20170704', 'local_stolen'],\n ['64220297727231', '20170704', 'local_stolen'],\n ['72723147267231', '20170704', 'local_stolen'],\n ['72723147267631', '20170704', 'local_stolen']]\n\n assert attr_list == b", "title": "" }, { "docid": "5507a4d289bb7c94a2b3296eb602ffd7", "score": "0.4473425", "text": "def test_multiple_invalid_flag(self):\n # P and G are invalid, should raise InvalidFlag\n # with 'P' as invalid flag\n flags = 'yBPGr'\n with self.assertRaises(InvalidFlag) as e:\n self.printy.format(self.sample_text, flags)\n self.assertEqual(e.exception.flag, 'P')", "title": "" }, { "docid": "6f20f2a8232d687385819e578e7607f3", "score": "0.4471742", "text": "def test_job_status(self):\n accepted = ( # These job statuses are all described as \"terminated\".\n '123|batch|||2013-10-25T12:11:20|2013-10-25T12:11:36|00:00:16|16||1|1|wn|28K|20K|CANCELLED',\n '123|batch|||2013-10-25T12:11:20|2013-10-25T12:11:36|00:00:16|16||1|1|wn|28K|20K|COMPLETED',\n '123|batch|||2013-10-25T12:11:20|2013-10-25T12:11:36|00:00:16|16||1|1|wn|28K|20K|FAILED',\n '123|batch|||2013-10-25T12:11:20|2013-10-25T12:11:36|00:00:16|16||1|1|wn|28K|20K|NODE_FAIL',\n '123|batch|||2013-10-25T12:11:20|2013-10-25T12:11:36|00:00:16|16||1|1|wn|28K|20K|PREEMPTED',\n '123|batch|||2013-10-25T12:11:20|2013-10-25T12:11:36|00:00:16|16||1|1|wn|28K|20K|TIMEOUT',\n )\n\n rejected = ( # These jobs would be unstarted or still running.\n '123|batch|||2013-10-25T12:11:20|2013-10-25T12:11:36|00:00:16|16||1|1|wn|28K|20K|BOOT_FAIL',\n '123|batch|||2013-10-25T12:11:20|2013-10-25T12:11:36|00:00:16|16||1|1|wn|28K|20K|CONFIGURING',\n '123|batch|||2013-10-25T12:11:20|2013-10-25T12:11:36|00:00:16|16||1|1|wn|28K|20K|COMPLETING',\n '123|batch|||2013-10-25T12:11:20|2013-10-25T12:11:36|00:00:16|16||1|1|wn|28K|20K|DEADLINE',\n '123|batch|||2013-10-25T12:11:20|2013-10-25T12:11:36|00:00:16|16||1|1|wn|28K|20K|PENDING',\n '123|batch|||2013-10-25T12:11:20|2013-10-25T12:11:36|00:00:16|16||1|1|wn|28K|20K|RUNNING',\n '123|batch|||2013-10-25T12:11:20|2013-10-25T12:11:36|00:00:16|16||1|1|wn|28K|20K|RESIZING',\n '123|batch|||2013-10-25T12:11:20|2013-10-25T12:11:36|00:00:16|16||1|1|wn|28K|20K|SUSPENDED',\n )\n for line in accepted:\n self.assertNotEqual(self.parser.parse(line), None,\n \"Line incorrectly rejected: %s\" % line)\n\n for line in rejected:\n self.assertEqual(self.parser.parse(line), None,\n \"Line incorrectly accepted: %s\" % line)", "title": "" }, { "docid": "31de946e79d685bc63d98ef6e2ff1ff8", "score": "0.4471575", "text": "def _FlagFromGroupAlreadyUsed(flag_group):\n return any(_FlagAlreadyUsed(flag) for flag in flag_group)", "title": "" }, { "docid": "00cb5f7b1016699de3ce8f11e56a2139", "score": "0.4452337", "text": "def _check_informative(self, feat, flag: bool = ...):\n ...", "title": "" }, { "docid": "7a795243e6e880b12cb2fb298aa1e5c6", "score": "0.44246483", "text": "def makeFlags(*args):\n return [name for check, name in args if check]", "title": "" }, { "docid": "32c1e4863b46a6477d4594f7b8bd6723", "score": "0.44142064", "text": "async def _not_all_tools_passed(self, tool_state):\n return not all(t['return code'] == 0 for t in tool_state.values())", "title": "" }, { "docid": "51c9dc54ab7bdaa3b5fed1dfb2cfda0e", "score": "0.441127", "text": "def warning():\n timing_pattern = factories.TimingPatternFactory.create()\n factories.TimingPatternStopFactory.create_batch(\n 10,\n timing_pattern=timing_pattern,\n common_service_pattern=timing_pattern.service_pattern,\n )\n\n return factories.SlowLinkWarningFactory.create(\n timing_pattern=timing_pattern,\n # 5 of the 10 timing pattern stops should be considered \"effected\" by\n # the warning and stored as timings\n timings=(5,),\n common_service_pattern=timing_pattern.service_pattern,\n service_links=(1,),\n )", "title": "" }, { "docid": "0d30812d15584d947fe521f0348cc2ac", "score": "0.43883002", "text": "def test_lizard_tool_plugin_valid_flag_filter():\n ltp = setup_lizard_tool_plugin()\n flag_list = [\"-f\", \"--input_file\", \"-o\", \"--output_file\", \"-Edumpcomments\"]\n filtered_list = ltp.remove_invalid_flags(flag_list)\n assert not filtered_list", "title": "" }, { "docid": "530731a08aecd69555ba5c13d76d7783", "score": "0.43779397", "text": "def testFlagValidation(self):\n # Validators get invoked on __setattr__ (overload for '=').\n failure_modes = [\n ('configuration_file', None),\n ('name', None),\n ('name', ''),\n ('value', None),\n ]\n self._ValidateFlagFailures(failure_modes)", "title": "" }, { "docid": "3b7cd9238c2e8a19e7c5504eb2111ceb", "score": "0.43696305", "text": "def add_custom_crash_state_if_needed(fuzzer_name, output_lines, parsed_stats):\n if not parsed_stats['oom_count'] and not parsed_stats['timeout_count']:\n return\n\n summary_index = None\n\n for index, line in enumerate(output_lines):\n if 'SUMMARY:' in line or 'DEATH:' in line:\n summary_index = index\n break\n\n if summary_index is not None:\n output_lines.insert(summary_index, 'custom-crash-state: ' + fuzzer_name)", "title": "" }, { "docid": "8367dd0647570f76183384acf2f538c0", "score": "0.4368262", "text": "def threshold2(los, t):\n return all([len(s)>t for s in los])", "title": "" }, { "docid": "eb9171094174b4da3ced3e02728aed36", "score": "0.4367217", "text": "def test_ping_failure_over_threshold_flag_ping_fail_count(pingboy, mock_ping_fail):\n pingboy.flag_ping_fail_count = 1\n pingboy.flag_restarting = False\n pingboy.config_values['ping_fails_needed_for_restart'] = 2\n \n pingboy.ping()\n\n assert pingboy.flag_ping_fail_count == 3", "title": "" }, { "docid": "1374b7db85d04812d4c3b80b05520ac4", "score": "0.43583316", "text": "def Warning(switch, defaulttip, defaultstate, before=False):\n sw = \"-gnatw\" + switch\n return Check(sw, \"-gnatw\" + switch.upper(),\n Label(sw, defaulttip),\n Tip(sw, defaulttip),\n defaultstate, before)", "title": "" }, { "docid": "bab2adb387b3276d21125dc1c73bcb27", "score": "0.4352313", "text": "def check_loop_run(self):\r\n max_exceptions_before_stop = 50\r\n \"\"\"max minutes to remmember the last excption\"\"\"\r\n max_minutes_from_last_exception = 1\r\n \r\n current_dt = now()\r\n if not (self._last_exception_dt is None):\r\n if (self._last_exception_dt.year == current_dt.year and self._last_exception_dt.month == current_dt.month and self._last_exception_dt.day == current_dt.day):\r\n calc_dt = current_dt - self._last_exception_dt\r\n diff = divmod(calc_dt.days * 86400 + calc_dt.seconds, 60)\r\n if (diff[0] > max_minutes_from_last_exception):\r\n self._exception_count = 0\r\n else:\r\n self._exception_count += 1\r\n else:\r\n self._exception_count = 0\r\n else:\r\n self._exception_count = 0\r\n\r\n if not (max_exceptions_before_stop > self._exception_count):\r\n _LOGGER.error(\"max exceptions allowed in watch loop exceeded, stoping watch loop\")\r\n self._ok_to_run = False\r\n\r\n self._last_exception_dt = current_dt", "title": "" }, { "docid": "659592f7aea5fd60aa63ecab9a2e86d5", "score": "0.43484968", "text": "def test_averaging_duplicate_threshold_multiple_days(db_conn, operator_data_importer, mocked_config,\n tmpdir, logger, monkeypatch):\n operator_data_importer.import_data()\n\n # Verify one duplicate IMEI found when averaged over multiple days\n cond_list = [{\n 'label': 'duplicate_daily_avg',\n 'reason': 'duplicate daily avg',\n 'dimensions': [{\n 'module': 'duplicate_daily_avg',\n 'parameters': {\n 'threshold': 2.0,\n 'period_days': 5,\n 'min_seen_days': 5}}]\n }]\n matched_imeis = invoke_cli_classify_with_conditions_helper(cond_list, mocked_config, monkeypatch,\n classify_options=['--no-safety-check'],\n db_conn=db_conn, curr_date='20161121')\n assert matched_imeis == ['21123131308879']\n\n # Verify no duplicate IMEIs found when threshold value greater than average\n cond_list = [{\n 'label': 'duplicate_daily_avg',\n 'reason': 'duplicate daily avg',\n 'dimensions': [{\n 'module': 'duplicate_daily_avg',\n 'parameters': {\n 'threshold': 2.1,\n 'period_days': 5,\n 'min_seen_days': 5}}]\n }]\n matched_imeis = invoke_cli_classify_with_conditions_helper(cond_list, mocked_config, monkeypatch,\n classify_options=['--no-safety-check'],\n db_conn=db_conn, curr_date='20161121')\n assert len(matched_imeis) == 0\n\n # Verify one duplicate IMEI found when threshold value lesser than average\n cond_list = [{\n 'label': 'duplicate_daily_avg',\n 'reason': 'duplicate daily avg',\n 'dimensions': [{\n 'module': 'duplicate_daily_avg',\n 'parameters': {\n 'threshold': 1.9,\n 'period_days': 5,\n 'min_seen_days': 5}}]\n }]\n matched_imeis = invoke_cli_classify_with_conditions_helper(cond_list, mocked_config, monkeypatch,\n classify_options=['--no-safety-check'],\n db_conn=db_conn, curr_date='20161121')\n assert matched_imeis == ['21123131308879']\n\n # Verify multiple duplicate IMEIs found when averaged over multiple days and min_seen_days\n # value is applicable to multiple IMEIs and ratio is a float value\n cond_list = [{\n 'label': 'duplicate_daily_avg',\n 'reason': 'duplicate daily avg',\n 'dimensions': [{\n 'module': 'duplicate_daily_avg',\n 'parameters': {\n 'threshold': 1.1,\n 'period_days': 19,\n 'min_seen_days': 2}}]\n }]\n matched_imeis = invoke_cli_classify_with_conditions_helper(cond_list, mocked_config, monkeypatch,\n classify_options=['--no-safety-check'],\n db_conn=db_conn, curr_date='20161124')\n assert matched_imeis.sort() == ['21123131308879', '13768038709433', '21260934121733'].sort()\n\n # Verify min_seen_days excludes duplicate IMEIs having less than 5 days of data.\n # Same test scenario as above with min_seen_days value set to 5.\n cond_list = [{\n 'label': 'duplicate_daily_avg',\n 'reason': 'duplicate daily avg',\n 'dimensions': [{\n 'module': 'duplicate_daily_avg',\n 'parameters': {\n 'threshold': 1.1,\n 'period_days': 19,\n 'min_seen_days': 5}}]\n }]\n matched_imeis = invoke_cli_classify_with_conditions_helper(cond_list, mocked_config, monkeypatch,\n classify_options=['--no-safety-check'],\n db_conn=db_conn, curr_date='20161124')\n assert matched_imeis == ['21123131308879']\n\n # Verify multiple (2) duplicate IMEIs (21123131308879, 21260934121733) found\n # ratio is a float value. when averaged over multiple days and and min_seen_days value is applicable\n # to multiple IMEIs and IMEIs and ratio is a float value. IMEI 13768038709433 is not found as\n # a duplicate since it was seen only in 2 days and the first time was post --curr-date 20161121.\n # Duplicates after the curr_date value is not used.\n cond_list = [{\n 'label': 'duplicate_daily_avg',\n 'reason': 'duplicate daily avg',\n 'dimensions': [{\n 'module': 'duplicate_daily_avg',\n 'parameters': {\n 'threshold': 1.1,\n 'period_days': 19,\n 'min_seen_days': 2}}]\n }]\n matched_imeis = invoke_cli_classify_with_conditions_helper(cond_list, mocked_config, monkeypatch,\n classify_options=['--no-safety-check'],\n db_conn=db_conn, curr_date='20161121')\n assert matched_imeis.sort() == ['21123131308879', '21260934121733'].sort()\n\n # Verify no duplicate IMEI is found with min_seen_days excludes duplicate IMEIs having less than 5 days\n # of data and with min_seen_days value set to 5 and post --curr-date 20161119. Duplicates after the\n # curr_date value is not used.\n cond_list = [{\n 'label': 'duplicate_daily_avg',\n 'reason': 'duplicate daily avg',\n 'dimensions': [{\n 'module': 'duplicate_daily_avg',\n 'parameters': {\n 'threshold': 1.1,\n 'period_days': 19,\n 'min_seen_days': 5}}]\n }]\n matched_imeis = invoke_cli_classify_with_conditions_helper(cond_list, mocked_config, monkeypatch,\n classify_options=['--no-safety-check'],\n db_conn=db_conn, curr_date='20161119')\n assert len(matched_imeis) == 0\n\n # Verify one duplicate IMEI found when averaged over multiple days and ratio is a float value\n # We expect IMEI 21123131308879 to have duplication of 1.833 (11 pairs seen over 6 days)\n cond_list = [{\n 'label': 'duplicate_daily_avg',\n 'reason': 'duplicate daily avg',\n 'dimensions': [{\n 'module': 'duplicate_daily_avg',\n 'parameters': {\n 'threshold': 1.83,\n 'period_days': 30,\n 'min_seen_days': 5}}]\n }]\n matched_imeis = invoke_cli_classify_with_conditions_helper(cond_list, mocked_config, monkeypatch,\n classify_options=['--no-safety-check'],\n db_conn=db_conn, curr_date='20161130')\n assert matched_imeis == ['21123131308879']\n\n # Verify no duplicate IMEIs found when averaged over multiple days and ratio is a float value\n # and threshold is greater than that matches average for any IMEI\n cond_list = [{\n 'label': 'duplicate_daily_avg',\n 'reason': 'duplicate daily avg',\n 'dimensions': [{\n 'module': 'duplicate_daily_avg',\n 'parameters': {\n 'threshold': 1.84,\n 'period_days': 30,\n 'min_seen_days': 5}}]\n }]\n matched_imeis = invoke_cli_classify_with_conditions_helper(cond_list, mocked_config, monkeypatch,\n classify_options=['--no-safety-check'],\n db_conn=db_conn, curr_date='20161130')\n assert len(matched_imeis) == 0", "title": "" }, { "docid": "534935729faf87a93df350faba245c2f", "score": "0.43464142", "text": "def _filter_tests(tests, verbose=True):\n def _filter_by_blacklist(tests, verbose=True):\n type_keys_map = {\n 'not-supported': ['feature'],\n 'not-in-plan': ['reason'],\n 'bug-wont-fix': ['bug'],\n 'bug-not-fixed': ['bug'],\n 'case-updating': ['task'],\n 'hazardous-case': ['reason'],\n 'need-env': ['env'],\n }\n blacklist_path = os.path.join(CONFIG_PATH, 'blacklist.yaml')\n\n with open(blacklist_path) as blacklist_fp:\n blacklists = yaml.load(blacklist_fp)\n\n blacklist = {}\n for blacklist_type in type_keys_map:\n blacklist[blacklist_type] = []\n\n for scenario in blacklists:\n # Prepare variables for eval\n # pylint: disable=unused-variable,eval-used\n params = self.params # noqa\n info = self # noqa\n # If 'when' don't exists, it means the blacklist applies\n # for any condition\n any_package = package.any_package\n if 'when' not in scenario or eval(scenario['when']):\n for blacklist_type, entries in scenario.items():\n if blacklist_type in ['when', 'description']:\n continue\n\n for entry in entries:\n entry['scenario'] = scenario['description']\n blacklist[blacklist_type].extend(\n scenario[blacklist_type])\n\n blacked_tests = set()\n for blacklist_type, entries in blacklist.items():\n for entry in entries:\n if blacklist_type == 'not-supported':\n reason = (\"Feature '%s' is not supported %s\" %\n (entry['feature'], entry['scenario']))\n elif blacklist_type == 'not-in-plan':\n reason = (\"Case is not in test plan %s because \"\n \"%s\" % (entry['scenario'],\n entry['reason']))\n elif blacklist_type == 'bug-wont-fix':\n reason = (\"Bug '%s' won't be fixed %s\" %\n (entry['bug'], entry['scenario']))\n elif blacklist_type == 'bug-not-fixed':\n reason = (\"Bug '%s' is not fixed yet %s\" %\n (entry['bug'], entry['scenario']))\n elif blacklist_type == 'case-updating':\n reason = (\"Case is updating %s: https://projects.\"\n \"engineering.redhat.com/browse/%s\" %\n (entry['scenario'], entry['task']))\n elif blacklist_type == 'hazardous-case':\n reason = (\"Case is hazardous %s: %s\" %\n (entry['scenario'], entry['reason']))\n elif blacklist_type == 'need-env':\n reason = (\"Case require env '%s' to run %s\" %\n (entry['env'], entry['scenario']))\n else:\n raise Exception(\"Unknown blacklist type %s\" %\n blacklist_type)\n\n patterns = entry['test']\n if isinstance(patterns, (str, unicode)):\n patterns = [patterns]\n elif not isinstance(patterns, (list, tuple)):\n raise Exception('Unknown test type %s for %s' %\n type(patterns), entry)\n\n blacked = []\n for test in tests:\n if any(utils.test_match(patt, test)\n for patt in patterns):\n blacked.append(test)\n\n if 'skip-test' in entry and not entry['skip-test']:\n for test in blacked:\n self.ignore_failures[test].append({\n 'fail-regex': entry['fail-regex'],\n 'reason': reason,\n })\n continue\n else:\n for test in blacked:\n self.update_report(test, \"SKIP\", \"[BLACKLISTED] %s\" % reason,\n 0, [], None)\n\n if blacked:\n LOGGER.info('%d tests blacked because \"%s\"',\n len(blacked), reason)\n if verbose:\n for test in blacked:\n LOGGER.info('\\t%s', test)\n blacked_tests.update(blacked)\n\n filtered_tests = []\n for test in tests:\n if test not in blacked_tests:\n filtered_tests.append(test)\n\n return filtered_tests\n\n LOGGER.info('Found %s cases', len(tests))\n if self.onlys:\n LOGGER.info(\"Filtering with only:\")\n for only in self.onlys:\n LOGGER.info(\"\\t%s\", only)\n if self.nos:\n LOGGER.info(\"Filtering with no:\")\n for no_ in self.nos:\n LOGGER.info(\"\\t%s\", no_)\n\n filtered_tests = []\n for test in tests:\n if self.onlys is not None:\n if all(not utils.test_match(only, test)\n for only in self.onlys):\n continue\n if self.nos is not None:\n if any(utils.test_match(no, test)\n for no in self.nos):\n continue\n filtered_tests.append(test)\n LOGGER.info('Filtered to %s cases', len(filtered_tests))\n\n tests = filtered_tests\n filtered_tests = _filter_by_blacklist(tests, verbose)\n LOGGER.info('Filtered to %s cases by blacklist',\n len(filtered_tests))\n\n return filtered_tests", "title": "" }, { "docid": "1aecd0ebc2b08bbfd0f81383baae61fa", "score": "0.43373865", "text": "def _get_warning_regular_workers(self):\n sheet = self._get_active_sheet()\n\n warning_message = \"\"\n if sheet:\n for added_shift in sheet.added_shift_ids:\n is_regular_worker = (\n added_shift.worker_id.working_mode == \"regular\"\n )\n is_compensation = added_shift.is_compensation\n\n if is_regular_worker and not is_compensation:\n warning_message += (\n _(\n \"\\n%s attended its shift as a normal one but was not expected. \"\n \"Something may be wrong in his/her personnal informations.\\n\"\n )\n % added_shift.worker_id.name\n )\n return warning_message", "title": "" }, { "docid": "afd3da0d05acbec07c6b46c67e44e0c6", "score": "0.43350625", "text": "def run_checks(self, **kwargs):\n result = True\n full_report = {}\n workbench = self.plugin.workbench\n\n self._write_infos_in_task()\n self.root_task.run_time = \\\n self.dependencies.get_runtime_dependencies('main')\n\n msg = 'Running checks for pre-measurement hook %s for measurement %s'\n for id, hook in self.pre_hooks.items():\n LOGGER.debug(msg, id, self.name)\n answer = hook.check(workbench, **kwargs)\n if answer is not None:\n check, errors = answer\n if errors:\n full_report[id] = errors\n result = result and check\n\n msg = 'Running checks for post-measurement hook %s for measurement %s'\n for id, hook in self.post_hooks.items():\n LOGGER.debug(msg, id, self.name)\n answer = hook.check(workbench, **kwargs)\n if answer is not None:\n check, errors = answer\n if errors:\n full_report[id] = errors\n result = result and check\n\n self.root_task.run_time.clear()\n\n return result, full_report", "title": "" }, { "docid": "0eaadff5e3b01965065063581fe935ee", "score": "0.43296823", "text": "def kills_count(work_db):\n return sum(r.is_killed for _, r in work_db.results)", "title": "" }, { "docid": "cb75ee963803c3d0afec810d7cca8cf3", "score": "0.43176135", "text": "def weeklies(self) -> Optional[pulumi.Input[Sequence[pulumi.Input['AlertProcessingRuleSuppressionScheduleRecurrenceWeeklyArgs']]]]:\n return pulumi.get(self, \"weeklies\")", "title": "" }, { "docid": "3ad8c66e9ace4959edbab0c34818756d", "score": "0.43128178", "text": "def _validate(self):\n\n # 1. Test if start|stop_time_sec are within a day\n for targ in [\"start_time_sec\", \"stop_time_sec\"]:\n val = getattr(self, targ)\n if val < 0 or val > 86400:\n self._status = 1\n self._status_context += \"%s out of bounds (%g);\" % (targ, val)", "title": "" }, { "docid": "b6abb9d9bc74c9fb3bee8e6fd23f833c", "score": "0.43104506", "text": "def testFlagValidation(self):\n # Validators get invoked on __setattr__ (overload for '=').\n failure_modes = [\n ('configuration_file', None),\n ('source_configuration_file', None),\n ('source_configuration_file', 'unreadable_file'),\n ]\n self._ValidateFlagFailures(failure_modes)", "title": "" }, { "docid": "2d6d7507134473ec9dfcee81884e315b", "score": "0.43104365", "text": "def checked_updates(*appliance_names):\n for row in get_appliance_rows(*appliance_names):\n if row.last_checked_for_updates.text == '':\n return False\n return True", "title": "" }, { "docid": "f4f28743e68aba350e0fbd99482e92c2", "score": "0.43074852", "text": "def bad_words(self, mask, target, args):\n global BAD_WORDS\n add, delete, get, word, gravity = args.get('add'), args.get('del'), args.get('get'), args.get(\n '<word>'), args.get('<gravity>')\n if add:\n try:\n word = word.lower()\n BAD_WORDS, _, _ = self.__db_add(['badwords', 'words'], word, int(gravity), True)\n return 'Added \"{word}\" to watched badwords with gravity {gravity}'.format(**{\n \"word\": word,\n \"gravity\": gravity,\n })\n except Exception as ex:\n return \"Failed adding the word. Did you not use a number for the gravity?\"\n elif delete:\n if BAD_WORDS.get(word):\n BAD_WORDS = self.__db_del(['badwords', 'words'], word)\n return 'Removed \"{word}\" from watched badwords'.format(**{\n \"word\": word,\n })\n else:\n return 'Word not found in the list.'\n elif get:\n words = BAD_WORDS\n self.bot.privmsg(mask.nick, str(len(words)) + \" checked badwords:\")\n for word in words.keys():\n self.bot.privmsg(mask.nick, '- word: \"%s\", gravity: %s' % (word, words[word]))", "title": "" }, { "docid": "130740f158f300af50a65aeca39c7846", "score": "0.43036574", "text": "def allowed_flags(args, flags):\n flags = set(flags)\n\n for arg in args.keys():\n try:\n if Options.__options__[arg].is_Flag and arg not in flags:\n raise FlagError(\n \"'%s' flag is not allowed in this context\" % arg)\n except KeyError:\n raise OptionError(\"'%s' is not a valid option\" % arg)", "title": "" }, { "docid": "ee234b30623344399b40d211a87bf432", "score": "0.43032405", "text": "def run_only(to_run_hooks, current_hooks):\n to_run_hooks = csv_to_set(to_run_hooks)\n logging.info(\" Running only the hooks %s\", to_run_hooks)\n to_run = []\n for hook in to_run_hooks:\n if hook in current_hooks:\n to_run.append(hook)\n return to_run", "title": "" }, { "docid": "1272e6dc268b514c984c097649f8f599", "score": "0.42913082", "text": "def test_failing_check(self, *args):\n self.run_check(check_config)\n self.assertServiceCheck(\"fargate_check\", status=AgentCheck.CRITICAL, tags=['foo:bar'], count=1)", "title": "" }, { "docid": "874dc403093a56bc984f3d53da7466ee", "score": "0.42876104", "text": "def compute_status_filter(ctxt, request_spec):\n # We're called before scheduler utils resources_from_request_spec builds\n # the RequestGroup stuff which gets used to form the\n # GET /allocation_candidates call, so mutate the flavor for that call but\n # don't persist the change.\n trait_name = os_traits.COMPUTE_STATUS_DISABLED\n request_spec.flavor.extra_specs['trait:%s' % trait_name] = 'forbidden'\n request_spec.obj_reset_changes(fields=['flavor'], recursive=True)\n LOG.debug('compute_status_filter request filter added forbidden '\n 'trait %s', trait_name)\n return True", "title": "" }, { "docid": "732dcde59cf32b51f83cde7bad7aeafa", "score": "0.42838615", "text": "async def check_filtered_words(self, ctx):\n all_words_and_types = self.get_list_of_words(ctx)\n await ctx.channel.send(embed=build_word_list_embed(ctx, all_words_and_types[0], all_words_and_types[1],\n all_words_and_types[2]))", "title": "" }, { "docid": "c3fa3538c01e3ec5a7d48a059d5ab1af", "score": "0.42730433", "text": "def test_old_naming_convention(self):\n plugin = WeatherSymbols()\n plugin.coord_named_threshold = True\n test_condition = self.dummy_queries['significant_precipitation']\n result = plugin.create_condition_chain(test_condition)\n expected = (\"(cubes.extract(iris.Constraint(name='probability_of_\"\n \"rainfall_rate_above_threshold', threshold=lambda \"\n \"cell: 0.03 * {t_min} < \"\n \"cell < 0.03 * {t_max}))[0].data >= 0.5) | (cubes.extract\"\n \"(iris.Constraint(\"\n \"name='probability_of_lwe_snowfall_rate_above_threshold',\"\n \" threshold=lambda cell: 0.03 * {t_min} < cell < \"\n \"0.03 * {t_max}))[0].data >= 0.5)\".format(\n t_min=(1. - WeatherSymbols().float_tolerance),\n t_max=(1. + WeatherSymbols().float_tolerance)))\n self.assertIsInstance(result, list)\n self.assertIsInstance(result[0], str)\n self.assertEqual(result[0], expected)", "title": "" }, { "docid": "1cc0b4752ca79c2fe04cdd9c69a53386", "score": "0.42715496", "text": "def mark_bad_wavelength_groups(src_file: H5File, filtered_paths: List[str]) -> None:\n bad_paths = set(filtered_paths)\n for wav_path in rawnav.wavelengths_under_rounds_paths(src_file):\n paths_below = dataset_paths_below(src_file, wav_path)\n if any(p in bad_paths for p in paths_below):\n src_file[wav_path].attrs['isbad'] = True\n else:\n src_file[wav_path].attrs['isbad'] = False\n return", "title": "" }, { "docid": "7ae92f5b95487767c167968635d028d8", "score": "0.42702764", "text": "def check_constraints(event_data, path_i, timing_list):\n \n # Initialize all flags:\n feature_flag = True\n smaller_flag = True\n larger_flag = True\n \n # check features:\n feature_loc = event_data.shape[1]-1\n ce_features = event_data[path_i[0], feature_loc]\n for i in path_i:\n if event_data[i, feature_loc] != ce_features:\n feature_flag = False\n \n # check smaller:\n time_loc = event_data.shape[1] - 2\n for i in range(len(path_i)-1):\n if event_data[path_i[i+1], time_loc] - event_data[path_i[i], time_loc] > timing_list[0, i]:\n smaller_flag = False\n \n # check larger:\n for i in range(len(path_i)-1):\n if event_data[path_i[i+1], time_loc] - event_data[path_i[i], time_loc] < timing_list[1, i]:\n larger_flag = False \n \n final_flag = feature_flag and smaller_flag and larger_flag\n# print(feature_flag , smaller_flag , larger_flag)\n return final_flag", "title": "" }, { "docid": "cd8e99a44f02b854050b9430a8bc4d98", "score": "0.42689812", "text": "def MBTSTimeDiffEventInfoAlgCfg(flags, **kwargs):\n\n acc = ComponentAccumulator()\n\n from TileConditions.TileCablingSvcConfig import TileCablingSvcCfg\n acc.merge( TileCablingSvcCfg(flags) )\n\n from TileGeoModel.TileGMConfig import TileGMCfg\n acc.merge(TileGMCfg(flags))\n\n from LArGeoAlgsNV.LArGMConfig import LArGMCfg\n acc.merge(LArGMCfg(flags))\n\n MBTSTimeDiffEventInfoAlg=CompFactory.MBTSTimeDiffEventInfoAlg\n acc.addEventAlgo(MBTSTimeDiffEventInfoAlg(**kwargs), primary = True)\n\n return acc", "title": "" }, { "docid": "07f4802ec255994c225fa0a6db32e8aa", "score": "0.42674512", "text": "def solve_for_unused_flags(trim_first=0):\n \n # Add instructions to the solver\n for instruction in machine_state.instructions:\n machine_state.solver.add(instruction)\n\n current_timeout = 0 # No timeout. Try to solve forever or until we run out of memory and are killed by the mainframe\n \n # We can skip flags. Makes it easier to run a parallel script if we're stuck\n chosen_flags = machine_state.unused_flags[trim_first:]\n # Print the set of flags to be analyzed\n print(chosen_flags)\n print (\"Sat means an error-inducing input has been found.\")\n print (\"Unsat means that no such input exists\")\n print (\"Unknown means that the solver didn't come to a conclusion in the given time\")\n\n z3.set_param(\"timeout\", current_timeout)\n solution_results = dict()\n unsolved_flags = copy(chosen_flags)\n\n # Do until we've solved all\n while len(unsolved_flags) != 0:\n\n z3.set_param(\"timeout\", current_timeout)\n temp_unsolved = copy(unsolved_flags)\n temp_solved = []\n # For each flags\n for (i, flag) in enumerate(temp_unsolved):\n print (\"Solving for\",flag)\n # Save the machine state with all the instructions\n machine_state.solver.push()\n # Set the flag to 1\n machine_state.solver.add(flag == 1)\n # Set timeout\n machine_state.solver.set(\"timeout\", current_timeout)\n # Execute the solver\n result = machine_state.solver.check()\n # Print result (sat, unsat, unknown)\n print(f\"{i+1}/{len(temp_unsolved)}\", flag, result)\n # If not unknown, then we are either sure that it can't be set or found a bug\n if result != z3.unknown:\n temp_solved.append(flag)\n solution_results[flag] = result\n \n # If we haven't found a bug, go to next\n if result != z3.sat:\n\n machine_state.solver.pop()\n continue\n \n # If there is a bug, print solution\n print_outputs()\n machine_state.solver.pop()\n\n # Remove those that were solved\n for x in temp_solved:\n unsolved_flags.remove(x)\n # Double the timeout\n current_timeout *= 2\n # Print what's been found by now\n for flag in solution_results.keys():\n print(flag, solution_results[flag])\n # Print what's left to do\n print(\"Flags left to solve:\", len(unsolved_flags))", "title": "" }, { "docid": "401e442764536015d4148512d53228f8", "score": "0.42674372", "text": "def _check_warnings(self, *, params):\n params = {} if params is None else params\n warn_params = {\n prop\n for prop, alias in self._requests.items()\n if alias == WARN and prop in params\n }\n for param in warn_params:\n warn(\n f\"Support for {param} has recently been added to this class. \"\n \"To maintain backward compatibility, it is ignored now. \"\n \"You can set the request value to False to silence this \"\n \"warning, or to True to consume and use the metadata.\"\n )", "title": "" }, { "docid": "48d01ff7514d43cf3114897150a64444", "score": "0.42671558", "text": "def check_thresholds(self):\n for titem in self.tlist:\n ##\n ## Get the resource value for the process.\n ##\n method = methodcaller(titem[THRESHOLD].resource)\n resource_value = method(self)\n\n ##\n ## If the resource value exceeds the threshold limit, then execute\n ## actions for the threshold.\n ##\n pos = self.threshold_exceeded(titem, resource_value)\n if (pos != -1):\n self.exec_actions(titem, pos, resource_value)\n\n self.print_threshold(titem, resource_value)", "title": "" }, { "docid": "7bc25d8909e6aa209e9f2e2d0be7b68e", "score": "0.42602816", "text": "def test_warn_count_all_unnecessary(marker_trackerstore: TrackerStore):\n with pytest.warns(UserWarning):\n MarkerTrackerLoader(marker_trackerstore, STRATEGY_ALL, 3)", "title": "" }, { "docid": "954ad7f7aae94d797c023eef138522c7", "score": "0.42599148", "text": "def get_allowed_status():\n return ['started', 'finished']", "title": "" }, { "docid": "05147f16cb2c83fd90e135a0478cfc4c", "score": "0.42592102", "text": "def all_tasks_stopped(tasks_state: Any) -> bool:\n for t in tasks_state[\"tasks\"]:\n if t[\"lastStatus\"] in (\"PENDING\", \"RUNNING\"):\n return False\n return True", "title": "" }, { "docid": "4ebea2729c216504d467a8ed07d67258", "score": "0.4251148", "text": "def test_skip_checks(self):\n ds = self.cs.load_dataset(static_files[\"2dim\"])\n # exclude title from the check attributes\n score_groups = self.cs.run_all(ds, [\"acdd\"], skip_checks=[\"check_high\"])\n msg_set = {\n msg\n for sg in score_groups[\"acdd\"][0]\n for msg in sg.msgs\n if sg.weight == BaseCheck.HIGH\n }\n skipped_messages = {\n att + \" not present\" for att in ACDDBaseCheck().high_rec_atts\n }\n # none of the skipped messages should be in the result set\n self.assertTrue(len(msg_set & skipped_messages) == 0)", "title": "" }, { "docid": "ed298383fd32bc0143dada4359d6b013", "score": "0.42471716", "text": "def check_for_time_optimization(penalty_functions):\n\n for i, penalty_functions_phase in enumerate(penalty_functions):\n for pen_fun in penalty_functions_phase:\n if not pen_fun:\n continue\n if (\n pen_fun.type == ObjectiveFcn.Mayer.MINIMIZE_TIME\n or pen_fun.type == ObjectiveFcn.Lagrange.MINIMIZE_TIME\n or pen_fun.type == ConstraintFcn.TIME_CONSTRAINT\n ):\n raise ValueError(\"Time cannot be optimized in Receding Horizon Optimization\")", "title": "" }, { "docid": "ef03ed1a661af6464222e37344a09029", "score": "0.42401606", "text": "def test_invalid_response_check(self, *args):\n self.run_check(check_config)\n self.assertServiceCheck(\"fargate_check\", status=AgentCheck.WARNING, tags=['foo:bar'], count=1)", "title": "" }, { "docid": "68a06dff875a0617c580444396ecdc10", "score": "0.4240132", "text": "def get_failed_cluster_checks(con, service_names):\n\n failed_checks = {}\n for service_name in service_names:\n for service_results in con.health.service(service_name)[1]:\n tags = service_results[\"Service\"][\"Tags\"]\n for check in service_results[\"Checks\"]:\n LOG.info(\"Service %s check %s on %s. Status: %s\",\n service_name, check[\"CheckID\"], check[\"Node\"], check[\"Status\"])\n if check[\"Status\"] != \"passing\":\n if not ignore_maintenance_check(check[\"CheckID\"], tags):\n failed_checks[check[\"CheckID\"]] = check\n\n return failed_checks", "title": "" }, { "docid": "0ebf4d2b15005114d1f636ccd6066374", "score": "0.42338967", "text": "def validate(self):\r\n for wrapper in self.dexy(False):\r\n filters_used = wrapper.batch.filters_used\r\n\r\n for f in self.__class__.filters_used:\r\n msg = \"filter %s not used by %s\" % (f, self.__class__.__name__)\r\n assert f in filters_used, msg\r\n \r\n for f in filters_used:\r\n if not f.startswith('-') and not f in self.__class__.filters_used:\r\n msg = s(\"\"\"filter %(filter)s used by %(template)s\r\n but not listed in klass.filters_used,\r\n adjust list to: filters_used = [%(list)s]\"\"\")\r\n msgargs = {\r\n 'filter' : f,\r\n 'template' : self.__class__.__name__,\r\n 'list' : \", \".join(\"'%s'\" % f for f in filters_used)\r\n }\r\n print msg % msgargs\r\n\r\n return wrapper.state == 'ran'", "title": "" }, { "docid": "8d046af264f51c584f7a8cdf26be5a52", "score": "0.42310262", "text": "def _HasChanges(args, flags):\n return any(FlagIsExplicitlySet(args, flag) for flag in flags)", "title": "" }, { "docid": "1c277b2f1fd700b599ea4a29fe8c3e21", "score": "0.4226419", "text": "def check_data(database, name):\n if database.notes[0] in [None, \"none\", \"None\"]:\n database.notes[0] = \"\"\n elif database.notes[0] == \"\":\n pass\n elif database.notes[0][-1] != \";\":\n database.notes[0] += \";\"\n # check start times\n column_bool = database.columns.str.contains(name)\n name_labels = database.columns[column_bool].tolist()\n value_arr = np.array(\n database[database.columns[column_bool]], dtype=np.int\n ).flatten()\n test = np.array([value_arr[0] == value_arr[ii] for ii in range(value_arr.size)])\n if not (test == True).sum() == test.size:\n\n # tf_dict = dict(zip(*np.unique(test, return_counts=True)))\n\n # if tf_dict[True] > tf_dict[False]:\n if test[0] == True and (test[1:] == False).sum() == test[1:].size:\n database.notes[0] += \" {0} is off;\".format(name_labels[0])\n else:\n for comp, test in zip(name_labels, test):\n if test == False:\n database.notes[0] += \" {0} is off;\".format(comp)\n # elif tf_dict[True] < tf_dict[False]:\n # for comp, test in zip(name_labels, test):\n # if test == True:\n # database.notes[0] += ' {0} is off;'.format(comp)\n return database", "title": "" }, { "docid": "f7f1139112380a5fe3a8b90dbdac4c7e", "score": "0.42251778", "text": "def _check_terminate_condition(vehicle, name_string):\n status = False\n exit_number = 0\n if shared.status['abort']:\n util.log_warning(\"Abort detected!\")\n status = True\n exit_number = 1\n \n elif shared.status['thread_flag'] & shared.FLOCKING_FLAG:\n util.log_info(\"FLOCKING_FLAG detected.\")\n status = True\n exit_number = 2\n \n elif vehicle.mode != 'GUIDED':\n util.log_warning(\"Mode switched out from 'GUIDED'!\")\n status = True\n exit_number = 3\n \n if status: util.log_info(\"Stopping %s\" % name_string)\n \n return [status, exit_number]", "title": "" }, { "docid": "0394924b2e822879a17c90ae596e99fa", "score": "0.42195225", "text": "def _check_status(self, board):\n has_2048 = any([x > 10 for x in board])\n return ['You Lose', 'You Win'][has_2048]", "title": "" }, { "docid": "dc9669f7208d13e7e6a09fe426869fb7", "score": "0.4215618", "text": "def _ShouldPrioritizeUnusedRequiredFlag(flag):\n return FlagIsRequired(suggestions[flag]) and not _FlagAlreadyUsed(flag)", "title": "" }, { "docid": "9b8171f407b4a29fd0ee1da4577da5a1", "score": "0.41948983", "text": "def main(argv=None, prog=None):\n\n parser = get_argument_parser(prog)\n args = parser.parse_args(argv)\n\n if args.help:\n print('usage: {} [-h] [OPTIONS ...]'.format(parser.prog))\n print()\n print(' Checks can be turned off with the respective -wno-{option} flag.')\n print()\n print('options:')\n options = get_all_options()\n width = max(map(len, options.keys())) + 3\n for key, value in sorted(options.items(), key=lambda x: x[0]):\n print(' -w{} {}'.format(key.ljust(width), value))\n return 0\n\n options = {k: v for k, v in vars(args).items() if v is not None}\n for cls in ProjectCheck.__subclasses__():\n for warning in cls().do_check(options):\n print(warning)", "title": "" }, { "docid": "6cd3e88af2b31d50c004c1e1831c4405", "score": "0.4192305", "text": "def checklist(entries, factor=None, weight=1.0, filter=lambda x: True):\n n = len(entries)\n # If there's nothing there assume 0 as result\n if n == 0: return 0\n # Calculate the default falloff\n if factor == None: factor = 1.0-1.0/n\n\n errors = [ entry for entry in entries if filter(entry['type']) and not check(entry['expected'], entry['got']) ]\n correct = n - len(errors)\n # Compute the grade as exponential falloff\n grade = (float(correct)/float(n))*(factor**(n - correct))\n return { 'grade': grade, 'correct': correct, 'total': n, 'weight': weight, 'errors': errors }", "title": "" }, { "docid": "a1170ae2f90efabbe0ac75253d383c83", "score": "0.41909295", "text": "def test_compute_testsuite_status_noimpact():\n suite_status = True\n tc_status = True\n tc_impact = \"noimpact\"\n result = testsuite_utils.compute_testsuite_status(suite_status, tc_status, tc_impact)\n assert result == True", "title": "" }, { "docid": "314cb40a63a1f9505293812912d46ca2", "score": "0.41892692", "text": "def set_watched_flags(self, infolabels, flag, resume_time=0):\n if flag == WatchedStatus.UNWATCHED:\n infolabels['playcount'] = 0\n infolabels['overlay'] = 4\n elif flag == WatchedStatus.WATCHED:\n infolabels['playcount'] = 1\n infolabels['overlay'] = 5\n elif flag == WatchedStatus.PARTIAL and plugin_addon.getSetting('file_resume') == 'true':\n self.setProperty('ResumeTime', str(resume_time))", "title": "" }, { "docid": "35bd0f6a99cd239d87513fb0fd4ce2c2", "score": "0.4188595", "text": "def check_flags():\n envs = []\n for flag in COMP_FLAGS:\n envs += [flag] if os.environ.get(flag, None) else []\n if envs:\n letter = 's' if len(envs) > 1 else ''\n pron = 'are' if len(envs) > 1 else 'is'\n fjoint = ', '.join(envs)\n msg = (\n 'Environment variable'+letter+' '+fjoint+' '+pron+' defined, '\n 'this could cause problems with the compilation of certain '\n 'packages'\n )\n warnings.warn(msg)", "title": "" }, { "docid": "d90df422f570a36ff6c2a85c3b23326d", "score": "0.4185061", "text": "def create_email_flag_queue(names, action, flag=\"(\\\\Seen)\"):\n\tclass Found(Exception):\n\t\tpass\n\n\tif not all([names, action, flag]):\n\t\treturn\n\n\tfor name in json.loads(names or []):\n\t\tuid, seen_status = frappe.db.get_value(\"Communication\", name, \n\t\t\t[\"ifnull(uid, -1)\", \"ifnull(seen, 0)\"])\n\n\t\tif not uid or uid == -1:\n\t\t\tcontinue\n\n\t\tseen = 1 if action == \"Read\" else \"Unread\"\n\t\t# check if states are correct\n\t\tif (action =='Read' and seen_status == 0) or (action =='Unread' and seen_status == 1):\n\t\t\ttry:\n\t\t\t\tqueue = frappe.db.sql(\"\"\"select name, action, flag from `tabEmail Flag Queue`\n\t\t\t\t\twhere communication = %(name)s\"\"\", {\"name\":name}, as_dict=True)\n\t\t\t\tfor q in queue:\n\t\t\t\t\t# is same email with same flag\n\t\t\t\t\tif q.flag == flag:\n\t\t\t\t\t\t# to prevent flag local and server states being out of sync\n\t\t\t\t\t\tif q.action != action:\n\t\t\t\t\t\t\tfrappe.delete_doc(\"Email Flag Queue\", q.name)\n\t\t\t\t\t\traise Found\n\n\t\t\t\tflag_queue = frappe.get_doc({\n\t\t\t\t\t\"doctype\": \"Email Flag Queue\",\n\t\t\t\t\t\"communication\": name,\n\t\t\t\t\t\"action\": action,\n\t\t\t\t\t\"flag\": flag\n\t\t\t\t})\n\t\t\t\tflag_queue.save(ignore_permissions=True);\n\t\t\t\tfrappe.db.set_value(\"Communication\", name, \"seen\", seen, \n\t\t\t\t\tupdate_modified=False)\n\t\t\texcept Found:\n\t\t\t\tpass", "title": "" }, { "docid": "61ab7a401853df2b0a6e01fe8d754b43", "score": "0.4184796", "text": "def test_bad_wavelength_groups_are_marked_bad(file_for_general_filtering):\n filtered_paths = ['/rounds/round000/76487/pump/par']\n fake_paths = ['foo']\n wav_path = '/rounds/round000/76487'\n mark_bad_wavelength_groups(file_for_general_filtering, fake_paths)\n assert not file_for_general_filtering[wav_path].attrs['isbad']\n mark_bad_wavelength_groups(file_for_general_filtering, filtered_paths)\n assert file_for_general_filtering[wav_path].attrs['isbad']", "title": "" }, { "docid": "7de6648965e5328a8df866e4ed189f12", "score": "0.41815355", "text": "def _disable_delta_updates_check(f):\n return click.option('--disable-delta-updates-check',\n default=False,\n is_flag=True,\n help='If in delta mode, disable verification that updates in delta list are already in DB.')(f)", "title": "" }, { "docid": "1854b7cd4143d546fd1334348c533a89", "score": "0.41806197", "text": "def status_check(builds):\n # dictionary of the type release_job_type: count\n # e.g. {'mxnet_lib/static':0, 'python/pypi':0}\n global release_job_type\n success_count = 0\n release_job_type_dict = {el: 0 for el in release_job_type}\n\n # iterate over the builds to count number of the desired release job types\n for build in builds:\n build_release_job_type = get_release_job_type(build)\n if build.get_status() == 'SUCCESS':\n logging.info(f'Successful build {build_release_job_type} {build.get_number()}')\n else:\n logging.info(f'Failure build {build_release_job_type} {build.get_number()}')\n release_job_type_dict[build_release_job_type] += 1\n\n # iterate over the map of release_job_type: count\n # if 'mxnet_lib/static':1 indicates static jobtype job ran in the pipeline\n # else 'mxnet_lib/static':0 indicates static jobtype never ran -> log as failed\n for release_job_type_name, release_job_type_count in release_job_type_dict.items():\n if release_job_type_count == 0:\n logging.info(f'Failure build {release_job_type_name}')\n elif release_job_type_count == 1:\n success_count += 1\n else:\n logging.info(f'{release_job_type} ran {release_job_type_count} times')\n # if success_count = 2 [i.e. len of release_job_type], it means both static & pypi jobs have run\n if success_count == len(release_job_type):\n logging.info(f'All the required jobs ran')\n else:\n logging.info(f'1/more of the required jobs did not run')", "title": "" }, { "docid": "95d2e740736d65112c9abe6ace1b1cbe", "score": "0.41747725", "text": "def __extract_side_effects(self, desc):\n side_effects = []\n for pattern_index, pattern in enumerate(DDI_SIDE_EFFECTS):\n pg = re.match(pattern, desc)\n if pg is not None:\n se_name_list = []\n se_name = pg.group(\"se\").lower()\n mode = pg.group(\"mode\")\n\n # Handle the case of multiple activities eg x, y and z activities\n has_word_activities = (\"activities\" in se_name)\n if has_word_activities:\n se_name = se_name.replace(\" activities\", \"\")\n mode_name = DDI_MODE_MAP[mode]\n if \", and\" in se_name:\n se_name_list = [sanatize_se_txt(se) for se in se_name.replace(\"and\", \"\").split(\", \")]\n elif \"and\" in se_name:\n se_name_list = [sanatize_se_txt(se) for se in se_name.split(\" and \")]\n else:\n se_name_list = [sanatize_se_txt(se_name)]\n\n if has_word_activities:\n se_name_list = [txt + \"_activities\" for txt in se_name_list]\n\n for side_effect in se_name_list:\n if side_effect in DDI_SE_NAME_MAP:\n side_effect = DDI_SE_NAME_MAP[side_effect]\n side_effects.append(f'{mode_name}_{side_effect}')\n\n # decrease_excretion_rate\n if pattern_index == 5:\n side_effects.append('decrease_excretion_rate')\n elif pattern_index == 6:\n side_effects.append('increase_excretion_rate')\n\n break\n return side_effects", "title": "" }, { "docid": "b329390722650635262c3d29da9982f1", "score": "0.41728893", "text": "def bonus2(word_list):\n\n results = list()\n # All words with 5 result words must necessarily have at least five letters\n for idx in range(5, len(word_list)):\n for word in word_list[idx]:\n if len(bonus(word, word_list)) == 5:\n results.append(word)\n return results", "title": "" }, { "docid": "ed69a25079fa1a8f60a14035a56ebab4", "score": "0.41714412", "text": "def checkLocalWQStatus(self, dbname):\n results = {}\n\n for st in ('Available', 'Negotiating', 'Acquired', 'Running'):\n if dbname == \"workqueue\":\n elements = self.localBackend.getElements(status=st, returnIdOnly=True)\n else:\n elements = self.localBackend.getInboxElements(status=st, returnIdOnly=True)\n results[st] = len(elements)\n return results", "title": "" }, { "docid": "946b6ec4280958cb2a872c7a434bd79d", "score": "0.4169822", "text": "def test_ping_failure_under_threshold_flag_ping_fail_count(pingboy, mock_ping_fail):\n pingboy.flag_ping_fail_count = 0\n pingboy.config_values['ping_fails_needed_for_restart'] = 2\n\n pingboy.ping()\n\n assert pingboy.flag_ping_fail_count == 1", "title": "" }, { "docid": "ae710be647136328c540a77df46d9845", "score": "0.4166059", "text": "def test_not_enough_consecutive_failures(self):\n self.metrics_check.consecutive_failures = 3\n\n # Not enough points above the high-alert threshold, so we should get a warning\n self.metrics_check.warning_value = 8.0\n self.metrics_check.high_alert_value = 9.0\n result, tags = self.metrics_check._run()\n self.assertEqual(result.status_check, self.metrics_check)\n self.assertFalse(result.succeeded)\n self.assertEqual(result.error, u'WARNING prod.good.data: 3 consecutive points not <= 8.0')\n self.assertEqual(self.metrics_check.importance, Service.WARNING_STATUS)\n self.assertEqual(tags, ['warning:prod.good.data', 'warning:stage.cool.data'])\n\n # Not enough points above the warning threshold, so we shouldn't get an alert\n self.metrics_check.warning_value = 9.0\n self.metrics_check.high_alert_value = 10.0\n result, tags = self.metrics_check._run()\n self.assertTrue(result.succeeded)\n self.assertIsNone(result.error)\n self.assertEqual(tags, [])", "title": "" }, { "docid": "c3013c0421e7cc6437ead5040665471a", "score": "0.41629088", "text": "def kill_workers(self, workspec_list):\n retList = []\n for workspec in workspec_list:\n retList.append((True, ''))\n return retList", "title": "" } ]
95f40e89781ba8bb8c7616c89317f79f
Return the diagnostic results.
[ { "docid": "8e36f61f8339717c88e9778af469e118", "score": "0.0", "text": "def get_results(self):\n return copy.deepcopy(self._results)", "title": "" } ]
[ { "docid": "fb824862fc25c212b60aa8351eb76e37", "score": "0.688687", "text": "def GatherResults(self):\n\t\tResults = self.ProbeManager.GetResult()\n\n\t\t#ordino i risultati in base alla run prima di restituirli\n\t\tResults.sort(key=lambda ele: (ele[1]))\n\t\treturn Results", "title": "" }, { "docid": "2ad154e0e2e63a94abfa408609e33a40", "score": "0.68794763", "text": "def _print_results(self, out=sys.stdout):\n self._results['SUCCESS'] = []\n self._results['WARNING'] = []\n self._results['DANGER'] = []\n\n for metric, score in self._metric_averages.items():\n if pd.isna(score):\n continue\n if score >= 0.9:\n self._results['SUCCESS'].append(\n DIAGNOSTIC_REPORT_RESULT_DETAILS[metric]['SUCCESS'])\n elif score >= 0.5:\n self._results['WARNING'].append(\n DIAGNOSTIC_REPORT_RESULT_DETAILS[metric]['WARNING'])\n else:\n self._results['DANGER'].append(\n DIAGNOSTIC_REPORT_RESULT_DETAILS[metric]['DANGER'])\n\n out.write('\\nDiagnosticResults:\\n')\n print_results_for_level(out, self._results, 'SUCCESS')\n print_results_for_level(out, self._results, 'WARNING')\n print_results_for_level(out, self._results, 'DANGER')", "title": "" }, { "docid": "8d706a3fdd5ff1212a63725acdb63851", "score": "0.6768503", "text": "def results(self):\n pass", "title": "" }, { "docid": "e9a70051bad12d5c1e380e53f1be0b56", "score": "0.676415", "text": "def get_result_summary(self):\r\n pass", "title": "" }, { "docid": "cc2c973a00c27f4c91431ea3534002b4", "score": "0.66853684", "text": "def getResults(self):", "title": "" }, { "docid": "6f1660faa47306e594204aa801ae73b2", "score": "0.6642721", "text": "def results(self):\n\t\traise NotImplementedError()", "title": "" }, { "docid": "3a67bf9756bf86ccdb56fa34327049b4", "score": "0.66364217", "text": "def get_results(self):\n res = {\n 'cea': self.get_CEA(),\n 'cta': self.get_CTA(),\n 'cpa': self.get_CPA(),\n 'errors': self.get_Errors(),\n 'audit': self.get_AuditRecords(),\n 'checkpoints': self.get_CheckPoints()}\n return res", "title": "" }, { "docid": "71efa0b12dac134ace2e2d117a4ccedd", "score": "0.6628336", "text": "def results_info():\n if len(pycsverter.result_list) == 0:\n Console.print_red('Parsed result not found...run: load')\n else:\n Console.print_blue('List of {} elements'.format(len(pycsverter.result_list)))", "title": "" }, { "docid": "fab12ea544f3a6c856136bdf2c4c7e1d", "score": "0.657802", "text": "def compute_results( self, accepted_messages ):\n return \"Result!\"", "title": "" }, { "docid": "aa848ee253f3befe0513ea1346b252fd", "score": "0.65521353", "text": "def print_results(self): # type: () -> ()\n pass", "title": "" }, { "docid": "e8c48e4a603ba4f60aa3540220aa63dd", "score": "0.6551062", "text": "def _extract_results(self):\n\t\tprint \"Extracting the results... \",\n\t\tactionPotentials = self._nn.extract_action_potentials()\n\t\tfirings = {}\n\t\tself._meanFr = {}\n\t\tself._estimatedEMG = {}\n\t\tself._nSpikes = {}\n\t\tself._nActiveCells = {}\n\t\tfor muscle in actionPotentials:\n\t\t\tfirings[muscle]={}\n\t\t\tself._meanFr[muscle]={}\n\t\t\tself._estimatedEMG[muscle]={}\n\t\t\tself._nSpikes[muscle]={}\n\t\t\tself._nActiveCells[muscle]={}\n\t\t\tfor cellName in actionPotentials[muscle]:\n\t\t\t\tfirings[muscle][cellName] = tlsf.exctract_firings(actionPotentials[muscle][cellName].spiketrains,self._get_tstop())\n\t\t\t\tself._nActiveCells[muscle][cellName] = np.count_nonzero(np.sum(firings[muscle][cellName],axis=1))\n\t\t\t\tself._nSpikes[muscle][cellName] = np.sum(firings[muscle][cellName])\n\t\t\t\tself._meanFr[muscle][cellName] = tlsf.compute_mean_firing_rate(firings[muscle][cellName])\n\t\t\t\tif cellName in self._nn.get_motoneurons_names():\n\t\t\t\t\tself._estimatedEMG[muscle][cellName] = tlsf.synth_rat_emg(firings[muscle][cellName])\n\t\tprint \"...completed.\"", "title": "" }, { "docid": "ac37126c09af2c9aad50ffe7f1ddd2f1", "score": "0.6531893", "text": "def __present_test_results(self):\n\n # Prepares data\n test_result = self.test_results.get(self.test_name)\n\n # Informs developer testing has been concluded.\n tl.log_alert(f\"Test '{self.test_name}' completed. Results:\")\n tl.log('.')\n\n # Checks test results per function.\n # present_order = [x['function_name'] for x in self.expected]\n # tl.log(str(test_result.items()))\n for function_name, results in test_result.items():\n\n # Logs analyzed function name.\n tl.log(tl.underline(f\"-> Function '{function_name}'\"))\n\n # Explodes function execution logs.\n execution_confirmation = results.get('execution_confirmation')\n print_on_screen = self.PRINT_FUNCTION_LOGS_ON_SCREEN if execution_confirmation else True\n log_raw = results.get('log_raw')\n if log_raw:\n for log_line in log_raw:\n tl.log(f\"{log_line}\", print_on_screen=print_on_screen)\n tl.log('.', print_on_screen=print_on_screen)\n else:\n tl.log(f\"No log captured.\", print_on_screen=print_on_screen)\n\n # Prepares and displays log report data such as duration and memory usage.\n log_report = results.get('log_report')\n if log_report:\n init_duration = log_report.get('init_duration', 'N.A.').replace(' ', '').replace('ms', '')\n duration = log_report.get('duration', 'N.A.').replace(' ', '').replace('ms', '')\n billed = log_report.get('billed_duration', 'N.A.')\n max_memory = log_report.get('max_memory_used', 'N.A.').replace(' ', '').replace('MB', '')\n memory_size = log_report.get('memory_size', 'N.A.')\n try:\n billed_in_seconds = float(billed.replace('ms', '').strip())/1000\n memory_size_in_gb = float(memory_size.replace('MB', '').strip())/1024\n gbs = round(billed_in_seconds*memory_size_in_gb, 5)\n except:\n gbs = 'N.A.'\n report_str = f\"Time: {init_duration}/{duration}/{billed} | Memory: {max_memory}/{memory_size} | \" \\\n f\"GB-s: {gbs}\"\n tl.log(report_str)\n\n # Exposes criteria by which the log has been selected.\n pick_by = results.get('pick_by')\n if pick_by:\n tl.log(f\"Picked by '{pick_by}'.\")\n\n # Initiates assertion counter.\n assertion_success = 0\n\n # Shows execution confirmation status\n tl.log(f\"{tl.get_status_string(execution_confirmation)} Execution confirmation.\")\n if execution_confirmation: assertion_success += 1\n\n # Shows individual function assertion statuses.\n assertion_list = list(results.items())[4:]\n for assertion, status in assertion_list:\n tl.log(f\"{tl.get_status_string(status)} {assertion}\")\n if status: assertion_success += 1\n\n # Shows final function assertion status.\n assertion_no = len(assertion_list)\n if assertion_no+1 == assertion_success:\n tl.log(tl.paint_status_bg(f\"'{function_name}' assertion PASSED ({assertion_success}/{assertion_no+1} \"\n f\"checks).\", True))\n else:\n tl.log(tl.paint_status_bg(f\"'{function_name}' assertion FAILED ({assertion_success}/{assertion_no+1} \"\n f\"checks).\", False))\n tl.log('.')", "title": "" }, { "docid": "509db13a0b92ef7b7059aa5f3c255ce8", "score": "0.6514848", "text": "def getResults(self):\n raise NotImplementedError(\"Not yet implemented!\")", "title": "" }, { "docid": "75ed63515dcd08f01bf3ef35a00cfd08", "score": "0.6506083", "text": "def get_results(self):\n return self.result", "title": "" }, { "docid": "ef634cb24157128dd80bc89bed2d313f", "score": "0.64976263", "text": "def report(self):\n print('Warnings:')\n for err in self.results['warnings']:\n print(err)\n print('Errors:')\n for err in self.results['errors']:\n print(err)", "title": "" }, { "docid": "7485bc1998fa374c6721706b5261ab04", "score": "0.6464204", "text": "def result(self):\n if self._exc_info is not None:\n t = self._exc_info\n print('Service has error %s: '%t)\n \n try:\n print [self.index, self._result]\n return [self.index, self._result]\n except AttributeError:\n raise", "title": "" }, { "docid": "6d101e3f5b04f1146b55ee557465518e", "score": "0.64410347", "text": "def get_results(self):\n raise NotImplementedError('Method `get_results` must be defined')", "title": "" }, { "docid": "b063b6766df36abed89f1c889c9cf1dc", "score": "0.64109826", "text": "def get_results():\n return {'water_demand': get_cooling_water_demand(),\n 'total_cost': get_total_cost(),\n 'total_emissions': get_total_emissions(),\n 'electricity_prices': get_prices()}", "title": "" }, { "docid": "306edc1d5ae70562978f7e96b09740f2", "score": "0.6398196", "text": "def get_result(self):\n return self.results", "title": "" }, { "docid": "5058a37b97588569aa8b5ab3c220b019", "score": "0.6393333", "text": "def get_diagnostics(self):\n return self.evaluator, self.lists_analyzer", "title": "" }, { "docid": "2cb6bc4b0f1e21928878ee97fb309ee6", "score": "0.63785726", "text": "def get_results_info(self):\n\n return self.volume, self.nuc_list, self.burn_list, self.mat_tally_ind", "title": "" }, { "docid": "bb9db0d7da2a0db38a006f909d03fce6", "score": "0.63411427", "text": "def return_results(self):\n\n # log some stuff in report\n\n # a text file should be written and stored as single file data and\n #parameter data node in the database\n\n #produce a single file data with all the numbers\n\n all_res = self.ctx.all_results\n bm_dic = {}\n bmd_dic = {}\n vol_dic = {}\n\n for elem,val in all_res:\n vol_dic[elem] = val[0]\n bm_dic[elem] = val[1]\n bmd_dic[elem] = val[2]\n\n outputnode_dict ={}\n\n\n outputnode_dict['workflow_name'] = self.__class__.__name__\n outputnode_dict['warnings'] = self.ctx.warnings\n outputnode_dict['successful'] = self.ctx.successful\n outputnode_dict['eos_uuids'] = self.ctx.eos_uuids\n outputnode_dict['eos_success'] = self.ctx.all_succ\n outputnode_dict['bulk_modulus'] = bm_dic\n outputnode_dict['bulk_modulus_units'] = 'GPa'\n outputnode_dict['bulk_modulus_dev'] = bmd_dic\n outputnode_dict['volumes'] = vol_dic\n outputnode_dict['volumes_units'] = 'A^3/per atom'\n outputnode_dict['delta_factor'] = {'Wien2K' : '', 'Fleur_026' : ''}\n\n #outputnode = ParameterData(dict=outputnode_dict)\n\n if self.ctx.successful:\n self.report('INFO: Done, delta worklfow complete')\n #print 'Done, delta worklfow complete'\n else:\n self.report('INFO: Done, but something went wrong.... Properly some '\n 'individual eos workchain failed. Check the log.')\n #print('Done, but something went wrong.... Properly some '\n # 'individual eos workchain failed. Check the log.')\n\n delta_file = SingleData.filename = self.ctx.outfilepath\n\n print delta_file\n\n # output must be aiida Data types.\n outnodedict = {}\n outnode = ParameterData(dict=outputnode_dict)\n outnodedict['results_node'] = outnode\n for label in self.ctx.labels:\n eos_res = self.ctx[label]\n #print(calc)\n outpara1 = eos_res.get_outputs_dict()\n #print outpara1\n try:\n outpara = outpara1['output_eos_wc_para']\n except KeyError:\n #self.report('ERROR: Eos wc for element: {} failed. I retrieved {} '\n # 'I skip the results retrieval for that element.'.format(label, eos_res))\n continue\n outnodedict[label] = outpara\n\n outputnode = create_delta_result_node(**outnodedict)\n\n outdict = {}\n outdict['output_delta_wc_para'] = outputnode.get('output_delta_wc_para')\n #outdict['delta_file'] = delta_file\n #print outdict\n for link_name, node in outdict.iteritems():\n self.out(link_name, node)", "title": "" }, { "docid": "24bfac1b7fe663502e5b274b79210851", "score": "0.6323926", "text": "def check_sim_results(self):\n errors = (\n self._check_sim_results_unique() +\n self._check_sim_results_readable()\n )\n\n if len(errors) == 0:\n print('all good')\n\n else:\n print('\\n'.join(errors))", "title": "" }, { "docid": "d8350763fd54ce524ffbdbafe288e22e", "score": "0.6322442", "text": "def getDetailedInfo(self):\n return [self.getHostName(), self.getResult().lower(),\n self.getResultInfo(), self.getExecution()]", "title": "" }, { "docid": "1dfd2543c43f7867487141a3872abd18", "score": "0.62999535", "text": "def _AddAnalysisResults(self):\n analyses = []\n keys = []\n\n for i in range(0, 5):\n crash_identifiers = {'signature': 'sig%d' % i}\n keys.append(crash_identifiers)\n\n analysis = self._CreateAnalysisResult(crash_identifiers)\n analysis.signature = 'sig%d' % i\n analysis.crashed_version = '53.0.275%d.0' % i\n analysis.stack_trace = 'dummy\\nframe1\\nframe2'\n analysis.platform = 'android'\n analysis.channel = 'canary'\n analyses.append(analysis)\n\n analyses[0].status = analysis_status.COMPLETED\n analyses[1].status = analysis_status.COMPLETED\n analyses[2].status = analysis_status.ERROR\n analyses[3].status = analysis_status.COMPLETED\n analyses[4].status = analysis_status.ERROR\n\n suspected_cl = {\n 'url': 'https://chromium.googlesource.com/chromium/src/+/346a',\n 'review_url': 'https://review',\n 'revision': '346a',\n 'project_path': 'src/',\n 'author': '[email protected]',\n 'time': '2016-06-04 00:00:00 UTC',\n 'reason': 'some reason',\n 'confidence': 1\n }\n analyses[0].result = {'found': True,\n 'suspected_cls': [suspected_cl],\n 'suspected_components': ['Blink>API', 'Blink>DOM'],\n 'suspected_project': 'chromium',\n 'regression_range': None}\n analyses[0].found_suspects = True\n analyses[1].result = {'found': False,\n 'suspected_cls': [],\n 'suspected_components': ['Blink>API', 'Blink>DOM'],\n 'suspected_project': 'chromium',\n 'regression_range': None}\n analyses[1].found_suspects = False\n analyses[2].result = {'found': False,\n 'suspected_cls': [],\n 'suspected_components': ['Blink>API', 'Blink>DOM'],\n 'suspected_project': 'chromium',\n 'regression_range': ['53.0.2749.0', '53.0.2750.0']}\n analyses[2].regression_range = ['53.0.2749.0', '53.0.2750.0']\n analyses[2].found_suspects = False\n analyses[3].result = {'found': True,\n 'suspected_cls': [suspected_cl],\n 'suspected_components': ['Blink>API'],\n 'suspected_project': 'chromium',\n 'regression_range': ['53.0.2749.0', '53.0.2750.0']}\n analyses[3].regression_range = ['53.0.2749.0', '53.0.2750.0']\n analyses[3].found_suspects = True\n analyses[4].result = {'found': False,\n 'suspected_cls': [],\n 'suspected_components': ['Blink>API', 'Blink>DOM'],\n 'suspected_project': 'chromium',\n 'regression_range': ['53.0.2749.0', '53.0.2750.0']}\n analyses[4].regression_range = ['53.0.2749.0', '53.0.2750.0']\n analyses[4].found_suspects = False\n\n analyses[0].culprit_cls = ['https://chromium.googlesource.com/'\n 'chromium/src/+/346aqerq3']\n self._SetResultsTriageStatus(analyses[0], triage_status.TRIAGED_INCORRECT)\n\n analyses[1].culprit_cls = ['https://chromium.googlesource.com/'\n 'chromium/src/+/346aqerq3']\n self._SetResultsTriageStatus(analyses[1], triage_status.TRIAGED_CORRECT)\n analyses[3].culprit_cls = ['https://chromium.googlesource.com/'\n 'chromium/src/+/346aqerq3']\n self._SetResultsTriageStatus(analyses[3], triage_status.TRIAGED_CORRECT)\n self._SetResultsTriageStatus(analyses[4], triage_status.TRIAGED_UNSURE)\n\n for i, analysis in enumerate(analyses):\n analysis.requested_time = (datetime(2016, 7, 4, 12, 50, 17, 0) +\n timedelta(hours=24 * i))\n analysis.has_regression_range = not analysis.result[\n 'regression_range'] is None\n analysis.put()\n\n return keys", "title": "" }, { "docid": "293bb628d89b2e1976a6a3192472782c", "score": "0.62981385", "text": "def get_results(self):\r\n\r\n return self._results", "title": "" }, { "docid": "28593b5dbd075f20994183285c5834b7", "score": "0.6268796", "text": "def get_result(self):\n ...", "title": "" }, { "docid": "28593b5dbd075f20994183285c5834b7", "score": "0.6268796", "text": "def get_result(self):\n ...", "title": "" }, { "docid": "28593b5dbd075f20994183285c5834b7", "score": "0.6268796", "text": "def get_result(self):\n ...", "title": "" }, { "docid": "e8877eaeed378b2828ce473c592b48d6", "score": "0.6262882", "text": "def result(self):\n return []", "title": "" }, { "docid": "2b6f9775d09e7f0ec79b3bf80a1ccd4e", "score": "0.62624246", "text": "def _report_results(self):\n operations = []\n while not self.results.empty():\n operations.append(self.results.get())\n\n total_operations = self._get_completed_operations(operations)\n self.logger.info(\"\")\n operations_per_second = self._get_operations_per_second(operations)\n if operations_per_second:\n seconds_per_operation = 1 / operations_per_second\n weighted_average_seconds = total_operations / operations_per_second\n self.logger.info(\n \"Completed {:,} operations in a weighted-average of {}s ({} ops/s, {} s/op)\".format(\n total_operations,\n self._format_number(weighted_average_seconds, 4),\n self._format_number(operations_per_second, 4),\n self._format_number(seconds_per_operation, 4)\n )\n )\n else:\n self.logger.info(\"Completed without generating operation statistics.\")\n self.logger.info(\"\")", "title": "" }, { "docid": "8afd853da01970b6c13e7d73516ae96d", "score": "0.6248415", "text": "def _GetResultsImpl(self):\n with open(self._log_path, 'r') as outfile:\n result = outfile.read() + ']'\n return tracing_agents.TraceResult('traceEvents', ast.literal_eval(result))", "title": "" }, { "docid": "947a457586d205037553ce7f580b186b", "score": "0.6241239", "text": "def getResults(self):\n return self.results", "title": "" }, { "docid": "9d547ff8c9d2234d08997d21ed020162", "score": "0.6223813", "text": "def get_results(self):\n while not self.output_queue.empty():\n print '[ Result ] : ', self.output_queue.get()", "title": "" }, { "docid": "6e3da76d477b93306660c15a002426e0", "score": "0.6215097", "text": "def get_diagnostics(self):\n url = self._base_url + \"diagnostics\"\n response = self.session.get(url)\n response.raise_for_status()\n return response.json()", "title": "" }, { "docid": "29a10f48e645f0d8be584be0435e9429", "score": "0.61985886", "text": "def get_results(self):\n return self._get('results')", "title": "" }, { "docid": "8bf7678831b26506963d4d5c332f06be", "score": "0.6168256", "text": "def result(self, *l_resu, **kwargs):\n nf = len(self.test_result)\n self.test_result.extend(l_resu)\n for values in l_resu:\n job, diag = values[0], values[2]\n nf += 1\n if self.info >= 2:\n self.run.Mess(ufmt(_(u'%s completed (%d/%d), diagnostic : %s'),\n job, nf, self.nbitem, diag), 'SILENT')", "title": "" }, { "docid": "5277f88d14973bd90883c704cd4014d2", "score": "0.61661565", "text": "def _check_results(self):\n pass", "title": "" }, { "docid": "1b3cc36e12a3843a1a0d0e4191f184fe", "score": "0.61568326", "text": "def results(self):\n return self._results", "title": "" }, { "docid": "bd59307974c953e87eae738c356e6c11", "score": "0.61486036", "text": "def read_results(self):\n self.read_energy()\n self.read_forces(self.atoms)\n self.read_eigenvalues()\n self.read_dipole()\n self.read_xray()", "title": "" }, { "docid": "326ae50ae922829632f46ca2bc2b49ef", "score": "0.6131526", "text": "def print_results(self):\n self.report('\\n' + '='*75)\n total_injections = len(self.hash_found) + len(self.hash_notfound)\n if len(self.hash_found) + len(self.hash_notfound) == 0:\n pass\n else:\n self.report(\"[*] Final Results:\")\n self.report('='*75 + '\\n')\n self.report(\"- Injections:\", total_injections)\n self.report(\"- Failed:\", len(self.hash_notfound))\n self.report(\"- Sucessfull:\", len(self.hash_found))\n try:\n _accur = len(self.hash_found) * 100 / total_injections\n except ZeroDivisionError:\n _accur = 0\n self.report(\"- Accur: %s %%\\n\" % _accur)\n if not len(self.hash_found) and self.hash_notfound:\n self.report('='*75 + '\\n')\n pass\n else:\n self.report('='*75)\n self.report(\"[*] List of possible XSS injections:\")\n self.report('='*75 + '\\n')\n #XXX better control of flow\n for line in self.hash_found: \n self.save_result(line)\n attack_url = self.apply_postprocessing(line[0], line[1], line[2], line[3], line[4], line[5], line[6])\n if self.options.fileoutput:\n fout = open(\"XSSlist.dat\", \"a\")\n if line[2] == \"xsr\":\n self.xsr_founded = self.xsr_founded + 1\n xsr_vulnerable_host = [{\"payload\":str(line[4]), \"target\":str(line[6])}] \n if xsr_vulnerable_host[0][\"payload\"] == line[4] and xsr_vulnerable_host[0][\"target\"] == line[6] and self.xsr_founded > 1:\n self.xsr_founded = self.xsr_founded - 1\n pass\n else:\n self.report(\"[I] Target:\", line[6])\n self.report(\"[+] Injection:\",str(line[6])+\"/\"+str(line[4]), \"[\", Curl.referer, \"]\")\n self.report(\"[!] Special:\", \"This injection looks like a Cross Site Referer Scripting\")\n self.report(\"[-] Method:\", line[2])\n self.report('-'*50, \"\\n\")\n if self.options.fileoutput:\n fout.write(\"\\n\" + \"XSSer Security Report: \" + str(datetime.datetime.now()) + \"\\n\")\n fout.write(\"---------------------\" + \"\\n\")\n fout.write(\"[I] Target: \" + line[6] + \"\\n\")\n fout.write(\"[+] Injection: \" + str(line[6])+\"/\"+str(line[4]) + \"[\" + Curl.referer + \"]\" + \"\\n\")\n fout.write(\"[!] Special: \" + \"This injections looks like a Cross Site Referer Scripting\" + \"\\n\")\n fout.write(\"[-] Method: \" + line[2] + \"\\n\" + '-'*50 +\"\\n\")\n elif line[2] == \"xsa\":\n self.xsa_founded = self.xsa_founded + 1\n xsa_vulnerable_host = [{\"payload\":str(line[4]), \"target\":str(line[6])}]\n if xsa_vulnerable_host[0][\"payload\"] == line[4] and xsa_vulnerable_host[0][\"target\"] == line[6] and self.xsa_founded > 1:\n self.xsa_founded = self.xsa_founded - 1\n pass\n else:\n self.report(\"[I] Target:\", line[6])\n self.report(\"[+] Injection:\",str(line[6])+\"/\"+str(line[4]),\n \"[\", Curl.agent, \"]\")\n self.report(\"[!] Special:\", \"This injection looks like a Cross Site Agent Scripting\")\n self.report(\"[-] Method:\", line[2])\n self.report('-'*50, \"\\n\")\n if self.options.fileoutput:\n fout.write(\"\\n\" + \"XSSer Security Report: \" + str(datetime.datetime.now()) + \"\\n\")\n fout.write(\"---------------------\" + \"\\n\")\n fout.write(\"[I] Target: \" + line[6] + \"\\n\")\n fout.write(\"[+] Injection: \"+ str(line[6])+\"/\"+str(line[4]) + \"[\" + Curl.agent + \"]\" + \"\\n\")\n fout.write(\"[!] Special: \" + \"This injection looks like a Cross Site Agent Scripting \" + \"\\n\")\n fout.write(\"[-] Method: \" + line[2] + \"\\n\" + '-'*50 +\"\\n\")\n elif line[2] == \"coo\":\n self.coo_founded = self.coo_founded + 1\n coo_vulnerable_host = [{\"payload\":str(line[4]), \"target\":str(line[6])}]\n if coo_vulnerable_host[0][\"payload\"] == line[4] and coo_vulnerable_host[0][\"target\"] == line[6] and self.coo_founded > 1:\n self.coo_founded = self.coo_founded - 1\n pass\n else:\n self.report(\"[I] Target:\", line[6])\n self.report(\"[+] Injection:\",str(line[6])+\"/\"+str(line[4]),\"[\",\n Curl.cookie, \"]\")\n self.report(\"[!] Special:\", \"This injection looks like a Cross Site Cookie Scripting\")\n self.report(\"[-] Method:\", line[2])\n self.report('-'*50, \"\\n\")\n if self.options.fileoutput:\n fout.write(\"\\n\" + \"XSSer Security Report: \" + str(datetime.datetime.now()) + \"\\n\")\n fout.write(\"---------------------\" + \"\\n\")\n fout.write(\"[I] Target: \" + line[6] + \"\\n\")\n fout.write(\"[+] Injection: \"+ str(line[6])+\"/\"+str(line[4]) + \"[\" + Curl.cookie + \"]\" + \"\\n\")\n fout.write(\"[!] Special: \" + \"This injection looks like a Cross Site Cookie Scripting \" + \"\\n\")\n fout.write(\"[-] Method: \" + line[2] + \"\\n\" + '-'*50 +\"\\n\")\n elif line[1] == \"[Data Control Protocol Injection]\":\n self.dcp_founded = self.dcp_founded + 1\n self.report(\"[I] Target:\", line[6])\n self.report(\"[+] Injection:\", str(line[6])+\"/\"+str(line[4]),\n \"[\", line[5][\"payload\"] , \"]\")\n self.report(\"[!] Special:\", \"This injection looks like a Data Control Protocol flaw\")\n if self.options.finalpayload or self.options.finalremote:\n self.report(\"[*] Final Attack: \", attack_url)\n else:\n self.report(\"[*] Final Attack: \", line[5][\"payload\"])\n self.report(\"[-] Method: dcp\")\n self.report('-'*50, \"\\n\")\n if self.options.fileoutput:\n fout.write(\"\\n\" + \"XSSer Security Report: \" + str(datetime.datetime.now()) + \"\\n\")\n fout.write(\"---------------------\" + \"\\n\")\n fout.write(\"[I] Target: \" + line[6] + \"\\n\")\n fout.write(\"[+] Injection: \" + str(line[6]) + \"/\" + str(line[4]) + \"[\" + line[5][\"payload\"] + \"]\" + \"\\n\")\n fout.write(\"[!] Special: \" + \"This injection looks like a Data Control Protocol flaw\" + \"\\n\")\n if self.options.finalpayload or self.options.finalremote:\n fout.write(\"[*] Final Attack: \" + attack_url + \"\\n\")\n else:\n fout.write(\"[*] Final Attack: \" + line[5][\"payload\"] + \"\\n\")\n fout.write(\"[-] Method: dcp\" + \"\\n\" + '-'*50 +\"\\n\")\n elif line[1] == \"[Document Object Model Injection]\":\n self.dom_founded = self.dom_founded + 1 \n self.report(\"[I] Target:\", line[6])\n self.report(\"[+] Injection:\", str(line[0]))\n self.report(\"[!] Special:\", \"This injection looks like a Document Object Model flaw\")\n if self.options.finalpayload or self.options.finalremote or self.options.doss or self.options.dos or self.options.b64:\n self.report(\"[*] Final Attack: \", attack_url)\n else:\n pass\n self.report(\"[-] Method: dom\")\n self.report('-'*50, \"\\n\")\n if self.options.fileoutput:\n fout.write(\"\\n\" + \"XSSer Security Report: \" + str(datetime.datetime.now()) + \"\\n\")\n fout.write(\"---------------------\" + \"\\n\")\n fout.write(\"[I] Target: \" + line[6] + \"\\n\") \n fout.write(\"[+] Injection: \" + str(line[0]) + \"\\n\")\n fout.write(\"[!] Special: \" + \"This injection looks like a Document Object Model flaw\" + \"\\n\")\n if self.options.finalpayload or self.options.finalremote or self.options.doss or self.options.dos or self.options.b64:\n fout.write(\"[*] Final Attack: \" + attack_url + \"\\n\")\n else:\n pass\n fout.write(\"[-] Method: dom\" + \"\\n\" + '-'*50 +\"\\n\")\n\n elif line[1] == \"[Induced Injection]\":\n self.httpsr_founded = self.httpsr_founded +1\n self.report(\"[I] Target:\", line[6])\n self.report(\"[+] Injection:\", str(line[0]))\n self.report(\"[!] Special:\", \"This injection looks like a HTTP Splitting Response\")\n if self.options.finalpayload or self.options.finalremote or self.options.doss or self.options.dos or self.options.b64:\n self.report(\"[*] Final Attack: \", attack_url)\n else:\n pass\n self.report(\"[-] Method: ind\")\n self.report('-'*50, \"\\n\")\n if self.options.fileoutput:\n fout.write(\"\\n\" + \"XSSer Security Report: \" + str(datetime.datetime.now()) + \"\\n\")\n fout.write(\"---------------------\" + \"\\n\")\n fout.write(\"[I] Target: \" + line[6] + \"\\n\")\n fout.write(\"[+] Injection: \" + str(line[0]) + \"\\n\")\n fout.write(\"[!] Special: \" + \"This injection looks like a HTTP Splitting Response\" + \"\\n\")\n if self.options.finalpayload or self.options.finalremote or self.options.doss or self.options.dos or self.options.b64:\n fout.write(\"[*] Final Attack: \" + attack_url + \"\\n\")\n else:\n pass\n fout.write(\"[-] Method: ind\" + \"\\n\" + '-'*50 +\"\\n\")\n elif line[5][\"browser\"] == \"[hashed_precheck_system]\": \n self.false_positives = self.false_positives + 1\n self.report(\"[I] Target:\", line[6])\n self.report(\"[+] Injection:\", str(line[0]))\n self.report(\"[!] Checker: This injection looks like a -false positive- result!. Verify it manually!\")\n self.report(\"[-] Method: hash\")\n self.report('-'*50, \"\\n\")\n if self.options.fileoutput:\n fout.write(\"\\n\" + \"XSSer Security Report: \" + str(datetime.datetime.now()) + \"\\n\")\n fout.write(\"---------------------\" + \"\\n\")\n fout.write(\"[I] Target: \" + line[6] + \"\\n\")\n fout.write(\"[+] Injection: \" + str(line[0]) + \"\\n\")\n fout.write(\"[!] Checker: This injection looks like a -false positive- result!. Verify it manually!\" + \"\\n\")\n fout.write(\"[-] Method: hash\" + \"\\n\" + '-'*50 +\"\\n\")\n elif line[5][\"browser\"] == \"[manual_injection]\":\n self.manual_founded = self.manual_founded + 1\n self.report(\"[I] Target:\", line[6])\n self.report(\"[+] Injection:\", str(line[0]))\n self.report(\"[-] Method: manual\")\n self.report('-'*50, \"\\n\")\n if self.options.fileoutput:\n fout.write(\"\\n\" + \"XSSer Security Report: \" + str(datetime.datetime.now()) + \"\\n\")\n fout.write(\"---------------------\" + \"\\n\")\n fout.write(\"[I] Target: \" + line[6] + \"\\n\")\n fout.write(\"[+] Injection: \" + str(line[0]) + \"\\n\")\n fout.write(\"[-] Method: manual\" + \"\\n\" + '-'*50 +\"\\n\")\n elif line[5][\"browser\"] == \"[Heuristic test]\":\n if str(line[5][\"payload\"]).strip('XSS') == \"\\\\\" or str(line[5][\"payload\"]).strip('XSS') == \"/\" or str(line[5][\"payload\"]).strip('XSS') == \">\" or str(line[5][\"payload\"]).strip('XSS') == \"<\" or str(line[5][\"payload\"]).strip('XSS') == \";\" or str(line[5][\"payload\"]).strip('XSS') == \"'\" or str(line[5][\"payload\"]).strip('XSS') == '\"' or str(line[5][\"payload\"]).strip('XSS') == \"=\":\n self.report(\"[I] Target:\", line[6])\n self.report(\"[+] Parameter(s):\", \"[\",\n str(line[5][\"payload\"]).strip('XSS') , \"]\")\n self.report(\"[!] Special:\", \"This parameter(s) looks like is NOT -completly- FILTERED on target code\")\n self.report(\"[-] Method: heuristic\")\n self.report('-'*50, \"\\n\")\n if self.options.fileoutput:\n fout.write(\"\\n\" + \"XSSer Security Report: \" + str(datetime.datetime.now()) + \"\\n\")\n fout.write(\"---------------------\" + \"\\n\")\n fout.write(\"[I] Target: \" + line[6] + \"\\n\")\n fout.write(\"[+] Parameter(s): \" + \"[\" + str(line[5][\"payload\"]).strip('XSS') + \"]\" + \"\\n\")\n fout.write(\"[!] Special: \" + \"This parameter(s) looks like is NOT -completly- FILTERED on target code\" + \"\\n\")\n fout.write(\"[-] Method: heuristic\" + \"\\n\" + '-'*50 +\"\\n\")\n else:\n pass\n else:\n self.auto_founded = self.auto_founded + 1\n self.report(\"[I] Target:\", line[6])\n self.report(\"[+] Injection:\", line[0])\n if self.options.finalpayload or self.options.finalremote or self.options.doss or self.options.dos or self.options.b64:\n self.report(\"[*] Final Attack: \", attack_url)\n else:\n pass\n self.report(\"[-] Method: xss\")\n self.report(\"[-] Browsers:\", line[1], \"\\n\", '-'*50, \"\\n\")\n if self.options.fileoutput:\n fout.write(\"\\n\" + \"XSSer Security Report: \" + str(datetime.datetime.now()) + \"\\n\")\n fout.write(\"---------------------\" + \"\\n\")\n fout.write(\"[I] Target: \" + line[6] + \"\\n\")\n fout.write(\"[+] Injection: \" + line[0] + \"\\n\")\n if self.options.finalpayload or self.options.finalremote or self.options.doss or self.options.dos or self.options.b64:\n fout.write(\"[*] Final Attack: \" + attack_url + \"\\n\")\n else:\n pass\n fout.write(\"[-] Method: xss\" + \"\\n\")\n fout.write(\"[-] Browsers: \"+ line[1] + \"\\n\" + '-'*50 + \"\\n\")\n\n if self.options.tweet:\n # XXX recover sns and username automatically\n self.report(\"[!] Trying to publish on: \" + self.sn_service + \"/\" + self.sn_username)\n if self.options.fileoutput:\n fout.write(\"[!] Published on: \" + self.sn_service + \"/\" + self.sn_username + \"\\n\")\n fout.write(\"=\"*75 + \"\\n\")\n\n if self.options.launch_browser:\n if self.options.dcp:\n #XXX implement DCP autolauncher\n self.report(\"\\n[@] DCP autolauncher not implemented, yet. (http://docs.python.org/library/webbrowser.html)\")\n self.report(\"[!] Aborting all launching process!!. If you want to 'auto-launch' other results, try without --Dcp option\\n\")\n self.report(\"[I] If you have some DCP success injections discovered, try to open -manually- these results in the website of your target. You will see that works! ;)\\n\")\n else:\n if attack_url == \"\":\n pass\n else:\n self._webbrowser.open(attack_url)\n\n # heuristic always with statistics\n if self.options.heuristic:\n self.options.statistics = True\n # some statistics reports\n if self.options.statistics:\n # heuristic test results\n if self.options.heuristic:\n self.report('='*75)\n self.report(\"[*] Heuristic:\")\n self.report('='*75)\n self.report('-'*50)\n self.report(' ', \" <not-filt>\", \" <filtered>\", \" =\" , \" ASCII\",\n \" +\", \" UNE/HEX\", \" +\", \" DEC\")\n # semicolon results\n heuris_semicolon_total_founded = self.heuris_semicolon_notfounded + self.heuris_une_semicolon_founded + self.heuris_dec_semicolon_founded\n self.report('; ', \" \", self.heuris_semicolon_founded, \" \", heuris_semicolon_total_founded, \" \",\n self.heuris_semicolon_notfounded, \" \",\n self.heuris_une_semicolon_founded, \" \",\n self.heuris_dec_semicolon_founded)\n # backslash results\n heuris_backslash_total_founded = self.heuris_backslash_notfounded + self.heuris_une_backslash_founded + self.heuris_dec_backslash_founded\n self.report('\\\\ ', \" \", self.heuris_backslash_founded, \" \", heuris_backslash_total_founded, \" \",\n self.heuris_backslash_notfounded, \" \",\n self.heuris_une_backslash_founded, \" \",\n self.heuris_dec_backslash_founded)\n # slash results\n heuris_slash_total_founded = self.heuris_slash_notfounded + self.heuris_une_slash_founded + self.heuris_dec_slash_founded\n self.report(\"/ \", \" \", self.heuris_slash_founded, \" \",\n heuris_slash_total_founded, \" \",\n self.heuris_slash_notfounded, \" \",\n self.heuris_une_slash_founded, \" \",\n self.heuris_dec_slash_founded)\n # minor results\n heuris_minor_total_founded = self.heuris_minor_notfounded + self.heuris_une_minor_founded + self.heuris_dec_minor_founded\n self.report(\"< \", \" \", self.heuris_minor_founded, \" \",\n heuris_minor_total_founded, \" \",\n self.heuris_minor_notfounded, \" \",\n self.heuris_une_minor_founded, \" \",\n self.heuris_dec_minor_founded)\n # mayor results\n heuris_mayor_total_founded = self.heuris_mayor_notfounded + self.heuris_une_mayor_founded + self.heuris_dec_mayor_founded\n self.report(\"> \", \" \", self.heuris_mayor_founded, \" \",\n heuris_mayor_total_founded, \" \",\n self.heuris_mayor_notfounded, \" \",\n self.heuris_une_mayor_founded, \" \",\n self.heuris_dec_mayor_founded)\n # doublecolon results\n heuris_doublecolon_total_founded = self.heuris_doublecolon_notfounded + self.heuris_une_doublecolon_founded + self.heuris_dec_doublecolon_founded\n self.report('\" ', \" \", self.heuris_doublecolon_founded, \" \", heuris_doublecolon_total_founded, \" \",\n self.heuris_doublecolon_notfounded, \" \",\n self.heuris_une_doublecolon_founded, \" \",\n self.heuris_dec_doublecolon_founded)\n # colon results\n heuris_colon_total_founded = self.heuris_colon_notfounded + self.heuris_une_colon_founded + self.heuris_dec_colon_founded\n self.report(\"' \", \" \", self.heuris_colon_founded, \" \",\n heuris_colon_total_founded, \" \",\n self.heuris_colon_notfounded, \" \",\n self.heuris_une_colon_founded, \" \",\n self.heuris_dec_colon_founded)\n # equal results\n heuris_equal_total_founded = self.heuris_equal_notfounded + self.heuris_une_equal_founded + self.heuris_dec_equal_founded\n self.report(\"= \", \" \", self.heuris_equal_founded, \" \",\n heuris_equal_total_founded, \" \",\n self.heuris_equal_notfounded, \" \",\n self.heuris_une_equal_founded, \" \",\n self.heuris_dec_equal_founded)\n self.report('-'*70)\n total_heuris_founded = heuris_semicolon_total_founded + heuris_backslash_total_founded + heuris_slash_total_founded + heuris_minor_total_founded + heuris_mayor_total_founded + heuris_doublecolon_total_founded + heuris_colon_total_founded + heuris_equal_total_founded\n\n total_heuris_params = total_heuris_founded + self.heuris_semicolon_founded + self.heuris_backslash_founded + self.heuris_slash_founded + self.heuris_minor_founded + self.heuris_mayor_founded + self.heuris_doublecolon_founded + self.heuris_colon_founded + self.heuris_equal_founded\n try:\n _accur = total_heuris_founded * 100 / total_heuris_params\n except ZeroDivisionError:\n _accur = 0\n self.report('Target(s) Filtering Accur: %s %%' % _accur)\n self.report('-'*70)\n # statistics block\n if len(self.hash_found) + len(self.hash_notfound) == 0:\n pass\n else:\n self.report('='*75)\n self.report(\"[*] Statistic:\")\n self.report('='*75)\n test_time = datetime.datetime.now() - self.time\n self.report('-'*50)\n self.report(\"Test Time Duration: \", test_time)\n self.report('-'*50 )\n total_connections = self.success_connection + self.not_connection + self.forwarded_connection + self.other_connection\n self.report(\"Total Connections:\", total_connections)\n self.report('-'*25)\n self.report(\"200-OK:\" , self.success_connection , \"|\", \"404:\" ,\n self.not_connection , \"|\" , \"503:\" ,\n self.forwarded_connection , \"|\" , \"Others:\",\n self.other_connection)\n try:\n _accur = self.success_connection * 100 / total_connections\n except ZeroDivisionError:\n _accur = 0\n self.report(\"Connec: %s %%\" % _accur)\n self.report('-'*50)\n total_payloads = self.check_positives + self.manual_injection + self.auto_injection + self.dcp_injection + self.dom_injection + self.xsa_injection + self.xsr_injection + self.coo_injection \n self.report(\"Total Payloads:\", total_payloads)\n self.report('-'*25)\n self.report(\"Checker:\", self.check_positives, \"|\", \"Manual:\",\n self.manual_injection, \"|\" , \"Auto:\" ,\n self.auto_injection ,\"|\", \"DCP:\",\n self.dcp_injection, \"|\", \"DOM:\", self.dom_injection,\n \"|\", \"Induced:\", self.httpsr_injection, \"|\" , \"XSR:\",\n self.xsr_injection, \"|\", \"XSA:\",\n self.xsa_injection , \"|\", \"COO:\",\n self.coo_injection)\n self.report('-'*50)\n self.report(\"Total Injections:\" , \n len(self.hash_notfound) + len(self.hash_found))\n self.report('-'*25)\n self.report(\"Failed:\" , len(self.hash_notfound), \"|\",\n \"Sucessfull:\" , len(self.hash_found))\n try:\n _accur = len(self.hash_found) * 100 / total_injections\n except ZeroDivisionError:\n _accur = 0\n self.report(\"Accur : %s %%\" % _accur)\n self.report('-'*25)\n total_discovered = self.false_positives + self.manual_founded + self.auto_founded + self.dcp_founded + self.dom_founded + self.xsr_founded + self.xsa_founded + self.coo_founded\n self.report(\"Total Discovered:\", total_discovered)\n self.report('-'*25)\n self.report(\"Checker:\", self.false_positives, \"|\",\n \"Manual:\",self.manual_founded, \"|\", \"Auto:\",\n self.auto_founded, \"|\", \"DCP:\", self.dcp_founded,\n \"|\", \"DOM:\", self.dom_founded, \"|\", \"Induced:\",\n self.httpsr_founded, \"|\" , \"XSR:\", self.xsr_founded,\n \"|\", \"XSA:\", self.xsa_founded, \"|\", \"COO:\",\n self.coo_founded)\n self.report('-'*50)\n self.report(\"False positives:\", self.false_positives, \"|\",\n \"Vulnerables:\",\n total_discovered - self.false_positives)\n self.report('-'*25)\n # efficiency ranking:\n # algor= vulnerables + false positives - failed * extras\n # extras: \n ## 1 vuln -> identi.ca: +10000\n ## >3 vuln -> 1 test: +4500\n ## 1 vuln -> 1 test: +500 \n ## >100 payloads: +150\n ## proxy and heuristic: +100\n ## final payload injected: +100\n ## --Cem and --Doo: +75\n ## manual payload injected and --Dcp: +25\n ## checker: +10\n mana = 0\n if self.hash_found and self.options.tweet:\n mana = mana + 10000\n if self.hash_found > 3:\n mana = mana + 4500\n if self.hash_found == 1:\n mana = mana + 500\n if total_payloads > 100:\n mana = mana + 150\n if self.options.proxy:\n mana = mana + 100\n if self.options.heuristic:\n mana = mana + 100\n if self.options.finalpayload or self.options.finalremote:\n mana = mana + 100\n if self.options.Cem or self.options.Doo:\n mana = mana + 75\n if self.options.heuristic:\n mana = mana + 50\n if self.options.script and not self.options.fuzz:\n mana = mana + 25\n if self.options.followred and self.options.fli:\n mana = mana + 25\n if self.options.dcp:\n mana = mana + 25\n if self.options.hash:\n mana = mana + 10\n mana = (len(self.hash_found) * mana) + mana -4500\n # enjoy it :)\n self.report(\"Mana:\", mana)\n self.report(\"-\"*50)\n #self.report('='*75 + '\\n')\n # end statistics block\n\n c = Curl()\n if not len(self.hash_found) and self.hash_notfound:\n if self.options.hash:\n self.report(\"[!] Checker: looks like your target(s) does not repeat all received code.\\n\")\n if self.options.fuzz or self.options.dcp or self.options.script:\n self.report(\"[I] Could not find any vulnerability!. Try another combination or hack it -manually- :)\\n\")\n else:\n self.report(\"[I] Could not find any vulnerability!. Try another combination or hack it -manually- :)\\n\")\n self.report('='*75 + '\\n')\n if self.options.fileoutput:\n fout = open(\"XSSlist.dat\", \"a\")\n fout.write(\"\\n\" + \"XSSer Security Report: \" + str(datetime.datetime.now()) + \"\\n\")\n fout.write(\"---------------------\" + \"\\n\")\n fout.write(\"[!] Not reported 'positive' results for: \\n\" + \"[-] \" + str('\\n[-] '.join([u[0] for u in self.hash_notfound])) + \"\\n\")\n fout.write(\"=\"*75 + \"\\n\")\n fout.close()\n else:\n # some exits and info for some bad situations:\n if len(self.hash_found) + len(self.hash_notfound) == 0 and not Exception:\n self.report(\"\\nXSSer cannot send data :( ... maybe is -something- blocking our connections!?\\n\")\n if len(self.hash_found) + len(self.hash_notfound) == 0 and self.options.crawling:\n self.report(\"\\nCrawlering system cannot recieve feedback from 'mosquitoes' on target host... try again :(\\n\")\n #if len(self.hash_found) + len(self.hash_notfound) == 0 and c.info()[\"http-code\"] != \"200\":\n # self.report(\"\\nTarget responses with different HTTP code to: 200 [\" + c.info()[\"http-code\"] + \"] ... cannot inject! :(\\n\")\n #self.report('='*75 + '\\n')\n\n # print results to xml file\n if self.options.filexml:\n xml_report_results = xml_reporting(self)\n xml_report_results.print_xml_results(self.options.filexml)\n\n # publish discovered vulnerabilities\n if self.options.tweet and self.hash_found:\n try:\n shortener = ShortURLReservations('is.gd')\n shorturl_host = shortener.process_url(str(line[0]))\n\n for line in self.hash_found:\n sns_publish_results = publisher(self)\n tags = '#xss '\n if not self.options.tt:\n msg = tags + 'vulnerable target: ' + shorturl_host\n else:\n tags = tags + self.options.tt\n msg = tags + ' vulnerable target: ' + shorturl_host \n username = self.sn_username\n password = self.sn_password\n url = self.sn_url\n sns_publish_results.send_to_identica(msg, username, password, url)\n except:\n self.report(\"\\n[I] Error publishing some discovered XSS injections\\n\")\n pass", "title": "" }, { "docid": "061364e66f89bf3306c11098ed8b7156", "score": "0.6130296", "text": "def report_evaluation_results(self):\n\n # report\n print(\"Performance Report {}\".format(self.prefix))\n print(\"Accuracy: {}\".format(self.logger.best_acc))\n print(\"{} : {}\".format(self.criteria.capitalize(), self.logger.best_criteria_metric))", "title": "" }, { "docid": "796ebc88bfe3c1670acc9d326fe70434", "score": "0.61258924", "text": "def GetResults(self):\n return self.results", "title": "" }, { "docid": "43509a886d0902017e268174173d9a8a", "score": "0.61254483", "text": "def get_results(self):\n if self.cached_results:\n return self.results\n\n self.voc_evaluator = ConfusionMatrix(21)\n self.voc_evaluator.update(self.targets.astype(np.int64), self.outputs.astype(np.int64))\n\n acc_global, acc, iu = self.voc_evaluator.compute()\n\n self.results = {\n \"Accuracy\": acc_global.item(),\n \"Mean IOU\": iu.mean().item(),\n }\n\n self.speed_mem_metrics['Max Memory Allocated (Total)'] = get_max_memory_allocated()\n\n return self.results", "title": "" }, { "docid": "6f74cdf5e18fd6397a24e1f27cc12809", "score": "0.6122826", "text": "def get_result(self):\r\n pass", "title": "" }, { "docid": "d04a8cdc91d2c0794c18ef72d432939e", "score": "0.6098181", "text": "def get_results(self):\n return self.y_approx, self.coef, self.coef_cov, self.coef_var, self.eps", "title": "" }, { "docid": "bb2802b1b1dbf048d5426090c5cf2c06", "score": "0.60909927", "text": "def _get_results(self):\n # Read the statepoint file.\n statepoint = glob.glob(os.path.join(os.getcwd(), self._sp_name))[0]\n with StatePoint(statepoint) as sp:\n # Write out k-combined.\n outstr = 'k-combined:\\n'\n outstr += '{0:12.6E} {1:12.6E}\\n'.format(*sp.k_combined)\n\n # Write out entropy data.\n outstr += 'entropy:\\n'\n results = ['{0:12.6E}'.format(x) for x in sp.entropy]\n outstr += '\\n'.join(results) + '\\n'\n\n return outstr", "title": "" }, { "docid": "081b2815fdd3358ce15b7bde31574fb0", "score": "0.6090463", "text": "def result(self, *l_resu, **kwargs):\n nf = len(self.exec_result)\n self.exec_result.extend(l_resu)\n for values in l_resu:\n job, diag = values[0], values[2]\n nf += 1\n if self.info >= 2:\n self.run.Mess(ufmt(_(u'%s completed (%d/%d), diagnostic : %s'),\n job, nf, self.nbitem, diag), 'SILENT')", "title": "" }, { "docid": "20d569ecdd0f4e43647c3f3cd56ce5b0", "score": "0.6086968", "text": "def get_full_results(self):\n power_results, time_results = [], []\n full_res_dir = os.path.join(self.project_path, self.output_path, 'region1', 'signal1')\n\n power_results_file = os.path.join(full_res_dir, 'power_results.txt')\n print(power_results_file)\n if os.path.isfile(power_results_file):\n power_results = pd.read_csv(power_results_file, delimiter='\\t', index_col='#Power (W)', na_values='---')\n \n time_results_file = os.path.join(full_res_dir, 'time_results.txt') \n print(time_results_file)\n if os.path.isfile(time_results_file):\n time_results = pd.read_csv(time_results_file, delimiter='\\t')\n\n self.power_results = power_results\n self.time_results = time_results\n return power_results, time_results", "title": "" }, { "docid": "bcec841ea0f49a66f8a7c4bdb62c5337", "score": "0.6084571", "text": "def gather_result(self, master):\n print(\"Calculated in {} seconds!\".format(time.time() - self.start_time))\n return (self.result, None)", "title": "" }, { "docid": "5613e8f2c77f88ab714dc6033fbcddf9", "score": "0.60733324", "text": "def extractResultsPoses(self):\n\t\t# TODO figure out if we want some more from the result?\n\t\t# so far it is good in this way...\n\t\tif self.mode > self.totRuns:\n\t\t\tself.mode = self.totRuns\n\t\tfor i in range(0, self.mode):\n\t\t\tself.results.append(self.poses[i])\n\t\t\tself.results[i]['leff'] = self.results[i]['energy'] / float( len( self.atomTypes))\n\t\t\tself.results[i]['vdw_contacts'] = []\n\t\t\tself.results[i]['metal_coord'] = [] # TODO \n\t\t\tacc, don = self.findHbAccepDon( self.results[i]['true_ligand'] )\n\t\t\tself.results[i]['hba_atoms'] = acc\n\t\t\tself.results[i]['hbd_atoms'] = don\n\t\tif self.DEBUG:\n# \t\t writeList(self.ligName+\"_lig_acceptors.pdb\", acc)\n# \t\t writeList(self.ligName+\"_lig_donors.pdb\", don)\n\t\t\tprint self.ligName+\"_lig_acceptors.pdb\", acc\n\t\t\tprint self.ligName+\"_lig_donors.pdb\", don", "title": "" }, { "docid": "3c0d7cea3c45cb079c2706e40f3bc0e9", "score": "0.60615027", "text": "def get_results(self):\n return self._results", "title": "" }, { "docid": "b72f65f6353bc557ab2e65a337d89ccb", "score": "0.6058568", "text": "def raw_results(self):\n return self.results", "title": "" }, { "docid": "bf2aa42104ce8f7274b00fd080a44e53", "score": "0.60580987", "text": "def _collect_test_result(duthost, ptfhost, request):\n logger.info(\"Collecting test result and related information.\")\n # TODO : collect DUT test report\n _collect_sonic_os_and_platform_info(duthost, request)\n _collect_sai_test_report_xml(ptfhost, request)", "title": "" }, { "docid": "da4b252c0ed510493f0f71b312774da4", "score": "0.60498965", "text": "def get_results(self):\n\n # Header.\n start_time_string = self.model.time_start.strftime(\"%d %b %Y, %X %Z\")\n results = ('# Results for the run conducted on ' \n + start_time_string + '\\n\\n')\n\n # Timings calculations.\n time_model_creation = (\n self.model.time_after_model_creation - self.model.time_start)\n mod_creation_s = time_model_creation.total_seconds()\n\n time_solve = (\n self.model.time_after_solve - self.model.time_after_model_creation)\n solve_s = time_solve.total_seconds()\n\n time_total = self.model.time_after_solve - self.model.time_start\n total_s = time_total.total_seconds()\n\n # Timings output.\n results += '# timings\\n'\n results += ('time_model_creation_seconds: ' + str(mod_creation_s) + \n '\\n')\n results += ('time_solve_seconds: ' + str(solve_s) + '\\n')\n results += ('time_total_seconds: ' + str(total_s) + '\\n\\n') \n\n if self.optimal_size == -1:\n results += 'Infeasible'\n return results\n\n # Optimal matching statistics.\n results += '# optimal matching statistics\\n'\n\n results += 'optimal_size: ' + str(self.optimal_size) + '\\n'\n results += ('optimal_maxsizemincost: ' + \n str(self.optimal_maxsizemincost) + '\\n')\n results += ('optimal_maxsizemindegree: ' + \n str(self.optimal_maxsizemindegree) + '\\n')\n results += ('optimal_maxsizeminsqcost: ' + \n str(self.optimal_maxsizeminsqcost) + '\\n')\n results += ('optimal_generousmaxprofile: ' + \n str(self.model._get_profile_string(\n self.optimal_generousmaxprofile)) + '\\n')\n results += ('optimal_greedymaxprofile: ' + \n str(self.model._get_profile_string(\n self.optimal_greedymaxprofile)) + '\\n')\n results += ('optimal_greedyprofile: ' + \n str(self.model._get_profile_string(\n self.optimal_greedyprofile)) + '\\n')\n results += ('optimal_max_lec_abs_diff: ' + \n str(self.optimal_max_lec_abs_diff) + '\\n')\n results += ('optimal_sum_lec_abs_diff: ' + \n str(self.optimal_sum_lec_abs_diff) + '\\n\\n')\n\n return results", "title": "" }, { "docid": "036ee5e5f4aa29471ab0c1551d6fa580", "score": "0.6039652", "text": "def _get_results(self):\n return ClipDetection._get_results(self)", "title": "" }, { "docid": "579dd4261e664fb77ed8078db41b3592", "score": "0.60358834", "text": "def stderr(self):\n result = {}\n all = self.results_raw.get(\"contacted\")\n for key, value in all.iteritems():\n if value.get(\"stderr\") or value.get(\"warnings\"):\n result[key] = {\n \"stderr\": value.get(\"stderr\"),\n \"warnings\": value.get(\"warnings\"),\n }\n return result", "title": "" }, { "docid": "80a8aa1465fdd0f6932ce0a6a51530c7", "score": "0.6025335", "text": "def result(self):\n return self.calculate_completed_results()", "title": "" }, { "docid": "560ce4b77a240c41fe5afea7e08f83f4", "score": "0.60221374", "text": "def results(self):\n try:\n return self._results\n except AttributeError:\n self._results = self.get_results()\n return self._results", "title": "" }, { "docid": "d4b78e6721324ce83b1960c7aa178124", "score": "0.6018027", "text": "def show_results(self, n_decimals):\r\n phrase = 'method {}'\r\n \r\n print('\\nLOGISTIC REGRESSION SUMMARY\\n')\r\n if self._model.mle_retvals['converged']==True:\r\n print('Estimation was converged successfully.')\r\n else:\r\n print('Estimation was NOT converged successfully.')\r\n print('Please enlarge the number of iterations.')\r\n print('------------------\\n')\r\n print('Dependent variable encoding')\r\n display(self.get_dependent_variable_codes().style\\\r\n .set_caption(phrase.format('.get_dependent_variable_codes()')))\r\n print('------------------\\n')\r\n print('Model summary')\r\n display(self.summary_r2().style\\\r\n .set_caption(phrase.format('.summary_r2()'))\\\r\n .format(precision=n_decimals))\r\n print('------------------\\n')\r\n print('Classification table')\r\n display(self.get_classification_table().style\\\r\n .set_caption(phrase.format('.get_classification_table()'))\\\r\n .format(precision=n_decimals))\r\n print('------------------\\n')\r\n print('Precision and recall')\r\n display(self.get_precision_and_recall().style\\\r\n .set_caption(phrase.format('.get_precision_and_recall()'))\\\r\n .format(precision=n_decimals))\r\n print('------------------\\n')\r\n print('Coefficients')\r\n display(self.summary().style\\\r\n .format(None, na_rep=\"\", precision=n_decimals)\\\r\n .set_caption(phrase.format('.summary()')))", "title": "" }, { "docid": "883f4b6eece00f45a08d64dbdc303fd8", "score": "0.60051614", "text": "def analyse( self ) :\n \n return SUCCESS", "title": "" }, { "docid": "a93c869ccdc335753ebf8e837d09a1a5", "score": "0.60049284", "text": "def show_result(self):\n print(\"Survey results are:\")\n for result in self.response:\n print(f\"-{result}\")", "title": "" }, { "docid": "a9fb3682e33435eb68090e9d4eff602e", "score": "0.6000294", "text": "def results(self) -> Types.RESULTS:\n return self._results", "title": "" }, { "docid": "0cab4faf0f8524dee48500ad9c70b493", "score": "0.59981126", "text": "def get_results(self):\n\n # group (comparison) metrics\n props = self._ts_props()\n cols = [self.original_candidate_col_name, self.reference_col_name]\n adj_name = None\n\n if props['adjusted_name'] is not None and props['adjust_failed']:\n cols.append(self.adjusted_col_name)\n adj_name = 'ADJ'\n\n\n group_stats, vertical_metrics, hor_errors = self.get_validation_stats(\n self.df_frame, columns=cols, can_name='CAN', ref_name='REF',\n adj_name=adj_name, as_dict=True)\n\n # error codes and check stats\n checkstats_test, checkstats_adjust = self.get_checkstats()\n checkstats = merge_dicts(checkstats_test, checkstats_adjust)\n\n if self.adjusted_col_name is None: # before adjustment\n # test stats\n testresults_ifirst = self.initial_test_obj.get_flat_results()\n\n if self.isbreak:\n if self.force_supress_adjust:\n return testresults_ifirst, None, None, None, group_stats, \\\n vertical_metrics, hor_errors, checkstats\n else:\n # models stats from init (unadjusted)\n return testresults_ifirst, None, None, None, group_stats, \\\n vertical_metrics, hor_errors, checkstats\n else:\n # no break --> no model created\n return testresults_ifirst, None, None, None, group_stats, \\\n vertical_metrics, hor_errors, checkstats\n else:\n # models stats from init (unadjusted)\n models_ifirst = self.initial_adjust_obj.get_model_params()\n # models stats from current (last) adjust object\n models_ilast = self.adjust_obj.get_model_params()\n testresults_ifirst = self.initial_test_obj.get_flat_results()\n\n # also add the error code for the adjustment to the test results?\n testresults_ilast = self.current_test_obj.get_flat_results()\n\n return testresults_ifirst, models_ifirst, testresults_ilast, \\\n models_ilast, group_stats, vertical_metrics, hor_errors, \\\n checkstats", "title": "" }, { "docid": "3c008347884a60dcd45ed83566d93af4", "score": "0.59920573", "text": "def get_diagnostics(self, instance):\n return self._vmops.get_diagnostics(instance)", "title": "" }, { "docid": "bc26521d567ab1c57d329d6cd9cb0a57", "score": "0.59891784", "text": "def print_results(self):\n\n results_str_list = []\n\n denom_of_gt = 1\n denom_of_pred = 1\n\n num_of_gt = self.save_results_gt_num\n num_of_pred = self.save_results_pred_num\n tpc = self.save_results_PR_table\n\n pos_level = self.pr_pos_level\n rot_level = self.pr_rot_level\n\n my_str = \"# of gt: {}\".format(num_of_gt)\n print(my_str)\n results_str_list.append(my_str)\n\n my_str = \"# of pred: {}\".format(num_of_pred)\n print(my_str)\n results_str_list.append(my_str)\n\n format_value_str = \"{} m TP: {}\"\n\n for i in range(3):\n my_str = \"{} deg\".format(rot_level[i])\n print(my_str)\n results_str_list.append(my_str)\n\n my_str = format_value_str.format(pos_level[0], tpc[i][0])\n print(my_str)\n results_str_list.append(my_str)\n\n my_str = format_value_str.format(pos_level[1], tpc[i][1])\n print(my_str)\n results_str_list.append(my_str)\n\n my_str = format_value_str.format(pos_level[2], tpc[i][2])\n print(my_str)\n results_str_list.append(my_str)\n\n return results_str_list", "title": "" }, { "docid": "176e3fde2eb69f2f223769dd1b69159b", "score": "0.5988327", "text": "def run_results(self):\n self.report('workchain completed after {} iterations'.format(self.ctx.iteration))\n self.out('output_parameters', self.ctx.restart_calc.out.output_parameters)\n self.out('remote_folder', self.ctx.restart_calc.out.remote_folder)\n self.out('retrieved', self.ctx.restart_calc.out.retrieved)\n\n if 'output_structure' in self.ctx.restart_calc.out:\n self.out('output_structure', self.ctx.restart_calc.out.output_structure)", "title": "" }, { "docid": "1f2f2068a2ff5fa1da3aa9fe032c2f88", "score": "0.59773475", "text": "def __transfmResult(self, results):\n\t\tcheckresult = []\n\t\tfor testid in results:\n\t\t\ttest = self.testsuite[testid]\n\t\t\tresult = results[testid]\n\t\t\tcheckresult.append( ccReg.CheckResult(testid, result[\"result\"],\n\t\t\t\tresult[\"note\"], result[\"data\"]) )\n\t\treturn checkresult", "title": "" }, { "docid": "fe4df96aeceb17d78e3c2d560cc37ad7", "score": "0.5973829", "text": "def run(self):\n results = {}\n for name in self._test_cases:\n results[name] = {}\n results[name]['reasonMsg'] = ''\n results[name]['status'] = FAILED\n\n # Ensure result file exists\n ostools.write_file(get_result_file_name(self._testWordDir), \"\")\n\n sim_ok = self._simulate()\n\n results = self._read_test_results(file_name=get_result_file_name(self._testWordDir))\n\n # Do not run post check unless all passed\n for status in results.values():\n if status != PASSED:\n return results\n\n #if not self._config.call_post_check(output_path, read_output):\n # for name in self._test_cases:\n # results[name] = FAILED\n\n return results", "title": "" }, { "docid": "34974aa97299d11078d5375377bb34f4", "score": "0.5971727", "text": "def print_results(self):\n\t\t# for m_str, m in self.model_with_separate_variables.items():\t\t\t\n\t\t# \ttry: \n\t\t# \t\tprint(\"\\nModel {}\".format(m.ModelName))\n\t\t# \t\tprint(\"The value of the objective function after optimisation is {}\".format(m.objVal))\n\t\t# \t\tprint(\"Variables are as follows:\")\n\t\t# \t\tfor partition in self.partitions:\n\t\t# \t\t\t# for c, (interval_start, interval_end) in enumerate(self.interval_ends):\n\t\t# \t\t\t# \t# x = round(self.model_with_separate_variables_vars[m_str, partition, interval_start, interval_end].X, 2)\n\t\t# \t\t\t# \t# x = self.model_with_separate_variables_vars[m_str, partition, interval_start, interval_end].X\n\t\t# \t\t\t# \t# print(\"X_{}_{}_{} = {}\".format(partition, interval_start, interval_end, x))\n\t\t# \t\t\t# \tx = self.model_with_separate_variables_vars.sum(m_str, partition, '*', '*').getValue()\n\t\t# \t\t\t# \tprint(\"X_{} = {}\".format(partition, x))\n\t\t# \t\t\tx = self.model_with_separate_variables_vars.sum(m_str, partition, '*', '*').getValue()\n\t\t# \t\t\tprint(\"X_{} = {}\".format(partition, x))\n\t\t# \texcept AttributeError:\n\t\t# \t\tprint(\"\\nModel {} is infeasible!\".format(m_str))\n\n\t\t# for m_str, m in self.model_with_pwl_costs.items():\t\t\t\n\t\t# \ttry: \n\t\t# \t\tprint(\"\\nModel {}\".format(m.ModelName))\n\t\t# \t\tprint(\"The value of the objective function after optimisation is {}\".format(m.objVal))\n\t\t# \t\tprint(\"Variables are as follows:\")\n\t\t# \t\tfor partition in self.partitions:\n\t\t# \t\t\tx = self.model_with_pwl_costs_vars[m_str, partition].X\n\t\t# \t\t\tprint(\"X_{} = {}\".format(partition, x))\n\t\t# \texcept AttributeError:\n\t\t# \t\tprint(\"\\nModel {} is infeasible!\".format(m_str))\n\n\t\tprint(\"\\n MODEL COMPARISON\")\n\t\tfor m_str in self.model_with_separate_variables:\n\t\t\tprint(\"\\n{} (objective {}) | {} (objective {})\".format(self.model_with_separate_variables[m_str].ModelName, self.model_with_separate_variables[m_str].objVal,\\\n\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t \tself.model_with_pwl_costs[m_str].ModelName, self.model_with_pwl_costs[m_str].objVal))\n\t\t\tprint(\"runtimes (in seconds): {} | {}\".format(self.model_with_separate_variables_runtime[m_str], self.model_with_pwl_costs_runtime[m_str]))\n\t\t\tfor partition in self.partitions:\n\t\t\t\tx_model_with_separate_variables = self.model_with_separate_variables_vars.sum(m_str, partition, '*', '*').getValue()\n\t\t\t\tx_model_with_pwl_costs = self.model_with_pwl_costs_vars[m_str, partition].X\n\t\t\t\tprint(\"X_{}: {} | {}\".format(partition, x_model_with_separate_variables, x_model_with_pwl_costs))", "title": "" }, { "docid": "df1e25eeef2016026248a515fc72148b", "score": "0.59701633", "text": "def report_results(self):\n print('+--------------------------------')\n print('| {0} Tests run'.format(self.__total_count))\n print('| {0} Passed'.format(self.__total_count - self.__error_count))\n print('| {0} Failed'.format(self.__error_count))\n print('+--------------------------------')", "title": "" }, { "docid": "24e882c06d3613fe453ab8edf8b0229a", "score": "0.5965686", "text": "def get_details(self, property_name):\n columns = []\n metrics = []\n scores = []\n errors = []\n details = pd.DataFrame()\n\n if property_name == 'Synthesis':\n metric_name = self.METRICS[property_name][0].__name__\n metric_result = self._metric_results[metric_name]\n details = pd.DataFrame({\n 'Metric': [metric_name],\n 'Diagnostic Score': [metric_result.get('score', np.nan)],\n 'Num Matched Rows': [metric_result.get('num_matched_rows', np.nan)],\n 'Num New Rows': [metric_result.get('num_new_rows', np.nan)],\n })\n errors.append(metric_result.get('error', np.nan))\n\n else:\n for metric in self.METRICS[property_name]:\n for column, score_breakdown in self._metric_results[metric.__name__].items():\n metric_score = score_breakdown.get('score', np.nan)\n metric_error = score_breakdown.get('error', np.nan)\n if pd.isna(metric_score) and pd.isna(metric_error):\n continue\n\n columns.append(column)\n metrics.append(metric.__name__)\n scores.append(metric_score)\n errors.append(metric_error)\n\n details = pd.DataFrame({\n 'Column': columns,\n 'Metric': metrics,\n 'Diagnostic Score': scores,\n })\n\n if pd.Series(errors).notna().sum() > 0:\n details['Error'] = errors\n\n return details", "title": "" }, { "docid": "02d572c9cc54f8f8ab4f9593b9cfdf2d", "score": "0.5963338", "text": "def __collect_test_result(duthost, ptfhost, request):\n logger.info(\"Collecting test result and related information.\")\n # TODO : collect DUT test report\n __collect_sonic_os_and_platform_info(duthost, request)\n __collect_sai_test_report_xml(ptfhost, request)", "title": "" }, { "docid": "de2a0079fb994b412cc9be1615eb2e92", "score": "0.5962252", "text": "def testResults(self):\n\n def logic():\n self.assertEqual(\n self.query.get_results_list(self.FORMAT),\n self.EXPECTED_RESULTS)\n self.assertEqual(\n self.template.get_results_list(self.FORMAT),\n self.EXPECTED_RESULTS)\n\n self.do_unpredictable_test(logic)", "title": "" }, { "docid": "6eb2fc91861ea88c9aac7ca12b07cc8a", "score": "0.59585744", "text": "def extractResultsPoses(self):\n\t\t# TODO speed here can be improved\n\t\tLCpose = \"\"\n\t\tLCsize = -1\n\t\t# LCenergy = 10000. Changed for catastrophyc results with energy bigger than that\n\t\t# (i.e. too-small grid box)\n\t\tLCenergy = 1E99\n\t\tLEpose = \"\"\n\t\tLEsize = -1\n\t\tLEenergy = 1E99\n\t\tc = 0\n\n\n\t\tfor pop in self.clusters:\n\t\t\tcurr_csize = len(pop)\n\t\t\tcurr_cenergy = pop[0]['energy']\n\t\t\tself.histogram.append([curr_cenergy, curr_csize])\n\t\t\tif (curr_csize > LCsize) or (curr_csize == LCsize and curr_cenergy < LCenergy ):\n\t\t\t\tLCsize = curr_csize\n\t\t\t\tLCpose = pop[0]\n\t\t\t\tLCenergy = curr_cenergy\n\t\t\t\tLCindex = c\n\t\t\tif curr_cenergy < LEenergy:\n\t\t\t\tLEenergy = curr_cenergy\n\t\t\t\tLEpose = pop[0]\n\t\t\t\tLEsize = curr_csize\n\t\t\t\tLEindex = c\n\t\t\tc += 1\n\t\tself.results.append(LEpose)\n\t\tself.results[0]['csize'] = LEsize\n\t\tself.results[0]['cpercent'] = (float(LEsize) / float(self.totRuns))*100\n\t\tself.results[0]['leff'] = self.results[0]['energy'] / float( len( self.atomTypes))\n\t\tself.results[0]['vdw_contacts'] = []\n\t\tself.results[0]['metal_coord'] = [] \n\t\tacc, don = self.findHbAccepDon( self.results[0]['text'] )\n\t\tself.results[0]['hba_atoms'] = acc\n\t\tself.results[0]['hbd_atoms'] = don\n\t\tif self.DEBUG:\n# \t\t writeList(self.ligName+\"_lig_acceptors.pdb\", acc)\n# \t\t writeList(self.ligName+\"_lig_donors.pdb\", don)\n\t\t\tprint self.ligName+\"_lig_acceptors.pdb\", acc\n\t\t\tprint self.ligName+\"_lig_donors.pdb\", don\n\t\tself.histogram[LEindex].append(\"**\") # <-marker for LE\n\t\tif not LEpose == LCpose:\n\t\t\tself.results.append( LCpose )\n\t\t\tself.results[1]['csize'] = LCsize\n\t\t\tself.results[1]['cpercent'] = (float(LCsize) / float(self.totRuns))*100\n\t\t\tself.results[1]['leff'] = self.results[1]['energy'] / float( len( self.atomTypes))\n\t\t\tself.results[1]['vdw_contacts'] = []\n\t\t\tself.results[1]['metal_coord'] = []\n\t\t\tacc, don = self.findHbAccepDon(self.results[1]['text'])\n\t\t\tself.results[1]['hba_atoms'] = acc\n\t\t\tself.results[1]['hbd_atoms'] = don\n\t\t\tself.histogram[LCindex].append(\"*\") # <-marker for LC\n\t\tself.histogram.sort(key = lambda x: x[0])", "title": "" }, { "docid": "03d7724af8f167a659a1b3efb44889f5", "score": "0.5954771", "text": "def _generate_result_summary(self):\n group_stats = self.group_stats\n group_stats.pop('Groups')\n\n manova_result = {\n 'Analysis Performed': self.analysis_type,\n 'degrees of freedom': self.degrees_of_freedom,\n 'Pillai Statistic': self.pillai_statistic,\n \"Wilks Lambda\": self.wilks_lambda,\n \"Roys Statistic\": self.roys_statistic,\n \"Hotellings T^2\": self.hotelling_t2_statistic,\n 'Group Means': group_stats['Group Means'],\n 'Group Num. Observations': group_stats['Group Observations'],\n 'Observation Total Means': self.observation_stats['x means'],\n 'Dependent variable num.': self.observation_stats['x observations'],\n 'Observations': self.observation_stats\n }\n\n return manova_result", "title": "" }, { "docid": "e0e7751c635fd559a32dc03a1fa219fb", "score": "0.5950929", "text": "def result(self) -> Result:\n return self._results", "title": "" }, { "docid": "53c708367534d9f63fe11a17f3d10352", "score": "0.594166", "text": "def results(self):\n data = self.get('run', self.project.id, self.run_id).json\n return AppThwackResult(**data)", "title": "" }, { "docid": "13205fa268ba159bc9efcea72cd4ee68", "score": "0.5919297", "text": "def result(self):\n return self.res", "title": "" }, { "docid": "0283319a942be6bc112213bd7d6df2c7", "score": "0.5918996", "text": "def report(self):\n if self.num_examples > 0:\n avg_elapsed_secs = (self.total_elapsed_millisecs / self.num_examples) / 1000\n print(f\"Total no. of examples: {self.num_examples}\")\n if self.num_no_gold_label > 0:\n print(f\"Found {self.num_no_gold_label} examples without a gold label\")\n else:\n total_no_of_exceptions = (\n self.num_true_with_exception + self.num_false_with_exception\n )\n print(f\" No. true: {self.num_true}\")\n print(f\" No. correct: {self.num_correct_true}\")\n print(f\" No. of exceptions: {self.num_true_with_exception}\")\n print(\n f\" No. correct with exceptions: {self.num_correct_true_with_exception}\"\n )\n print(\n f\" No. incorrect without exception: {self.num_incorrect_true_no_exception}\"\n )\n print(f\" No. false: {self.num_false}\")\n print(f\" No. correct: {self.num_correct_false}\")\n print(f\" No. of exceptions: {self.num_false_with_exception}\")\n print(\n f\" No. correct with exceptions: {self.num_correct_false_with_exception}\"\n )\n print(\n f\" No. incorrect without exception: {self.num_incorrect_false_no_exception}\"\n )\n print(f\"Total no. correct: {self.num_correct}\")\n print(f\"Total no. with exceptions: {total_no_of_exceptions}\")\n print(f\"Accuracy: {(self.num_correct * 100.0) / self.num_examples}\")\n if total_no_of_exceptions > 0:\n print(\"\\nFailure Breakdown by Exception:\")\n for exception in self.exception_num_failures:\n print(\n f\" {exception}: {self.exception_num_failures[exception]}\"\n )\n print(\n f\"\\nAverage theorem proving time per example: {avg_elapsed_secs} secs\\n\\n\"\n )", "title": "" }, { "docid": "160e786cdfc5cc151e15d63490480e5d", "score": "0.59161055", "text": "def return_results(self):\n try:\n last_calc_uuid = self.ctx.last_calc.uuid\n except AttributeError:\n last_calc_uuid = None\n try: # if something failed, we still might be able to retrieve something\n last_calc_out = self.ctx.last_calc.out['output_parameters']\n last_calc_out_dict = last_calc_out.get_dict()\n except AttributeError:\n last_calc_out = None\n last_calc_out_dict = {}\n\n\n\n outputnode_dict = {}\n outputnode_dict['workflow_name'] = self.__class__.__name__\n outputnode_dict['workflow_version'] = self._workflowversion\n outputnode_dict['material'] = self.ctx.formula\n outputnode_dict['loop_count'] = self.ctx.loop_count\n outputnode_dict['iterations_total'] = last_calc_out_dict.get('number_of_iterations_total', None)\n outputnode_dict['distance_charge'] = last_calc_out_dict.get('charge_density', None)\n outputnode_dict['distance_charge_all'] = self.ctx.distance\n outputnode_dict['total_energy'] = last_calc_out_dict.get('energy_hartree', None)\n outputnode_dict['total_energy_all'] = self.ctx.total_energy\n outputnode_dict['distance_charge_units'] = 'me/bohr^3'\n outputnode_dict['total_energy_units'] = 'Htr'\n outputnode_dict['warnings'] = self.ctx.warnings\n outputnode_dict['successful'] = self.ctx.successful\n outputnode_dict['last_calc_uuid'] = last_calc_uuid\n # maybe also store some information about the formula\n #also lognotes, which then can be parsed from subworkflow too workflow, list of calculations involved (pks, and uuids),\n #This node should contain everything you wish to plot, here iteration versus, total energy and distance.\n\n if self.ctx.successful:\n self.report('STATUS: Done, the convergence criteria are reached.\\n'\n 'INFO: The charge density of the FLEUR calculation pk= '\n 'converged after {} FLEUR runs and {} iterations to {} '\n '\"me/bohr^3\" \\n'\n 'INFO: The total energy difference of the last two iterations '\n 'is {} htr \\n'.format(self.ctx.loop_count,\n last_calc_out_dict.get('number_of_iterations_total', None),\n last_calc_out_dict.get('charge_density', None), self.ctx.energydiff))\n\n else: # Termination ok, but not converged yet...\n if self.ctx.abort: # some error occured, donot use the output.\n self.report('STATUS/ERROR: I abort, see logs and '\n 'erros/warning/hints in output_scf_wc_para')\n else:\n self.report('STATUS/WARNING: Done, the maximum number of runs '\n 'was reached or something failed.\\n INFO: The '\n 'charge density of the FLEUR calculation pk= '\n 'after {} FLEUR runs and {} iterations is {} \"me/bohr^3\"\\n'\n 'INFO: The total energy difference of the last '\n 'two interations is {} htr'\n ''.format(self.ctx.loop_count,\n last_calc_out_dict.get('number_of_iterations_total', None),\n last_calc_out_dict.get('charge_density', None), self.ctx.energydiff))\n\n #also lognotes, which then can be parsed from subworkflow too workflow, list of calculations involved (pks, and uuids),\n #This node should contain everything you wish to plot, here iteration versus, total energy and distance.\n\n\n outputnode_t = ParameterData(dict=outputnode_dict)\n # this is unsafe so far, because last_calc_out could not exist...\n if last_calc_out:\n outdict = create_scf_result_node(outpara=outputnode_t, last_calc_out=last_calc_out)\n else:\n outdict = create_scf_result_node(outpara=outputnode_t)\n\n if 'fleurinp' in self.inputs:\n outdict['fleurinp'] = self.inputs.fleurinp\n else:\n try:\n fleurinp = self.ctx['inpgen'].out.fleurinpData\n except AttributeError:\n self.report('ERROR: No fleurinp, something was wrong with the inpgen calc')\n fleurinp = None\n outdict['fleurinp'] = fleurinp\n if last_calc_out:\n outdict['last_fleur_calc_output'] = last_calc_out\n\n #outdict['output_scf_wc_para'] = outputnode\n for link_name, node in outdict.iteritems():\n self.out(link_name, node)", "title": "" }, { "docid": "73e0a1ec0e6590d5c957557752f26e49", "score": "0.59126365", "text": "def results(self):\n if self._results is None:\n self._results = self.fit()\n return self._results", "title": "" }, { "docid": "8d6af002956bf6a635d689a37e2bf6be", "score": "0.5891054", "text": "def build_result(self):\n result = []\n if self.help_required:\n result += [self.get_help_text()+'\\n']\n if self._help_image_url_: self.set_help_image()\n result += [self.get_foreword(), self.get_question(), self.get_afterword()]\n \n try:\n result = '\\n'.join(filter(None, result))\n except:\n pass\n\n self.result = result \n \n return result", "title": "" }, { "docid": "672dac4fabd8ea007d38603ab3356a5b", "score": "0.58816546", "text": "def send_results(self):\n\n # TODO: Check if there is a better implementation\n # Wait for the clock to stop ticking to send the results\n while not self._clock.stop:\n time.sleep(1)\n\n if self._error_message:\n msg = {\n 'type': 'error',\n 'message': self._error_message\n }\n else:\n msg = {\n 'type': 'result',\n 'bits': self.bits\n }\n\n message = json.dumps({self.host_id: msg})\n self.send_classical(self._controller_host_id, message, await_ack=True)", "title": "" }, { "docid": "dc53bdadaea9cb4b3bd18aa067ff2140", "score": "0.5874543", "text": "def digest_run_results(self, results, retcodes, cfg):\n if cfg.get('test_type', 'perf') == 'return_code':\n # If any of the return codes is non-zero, output 1.\n overall_return_code = 0 if all(v == 0 for v in retcodes) else 1\n return {\n 'mean': overall_return_code,\n 'std_err': 0.0,\n 'std_dev': 0.0,\n 'values': results,\n }\n else:\n return perf_test.truncate_and_aggregate(self, results,\n cfg['truncate_percent'])", "title": "" }, { "docid": "112ca08cb9ea3abd52fe11257db71115", "score": "0.5872315", "text": "def get_result(self):\n return self._observations", "title": "" }, { "docid": "296c47533475e8d284408eb48c4c342b", "score": "0.58671874", "text": "def required_tool_results():\n return [AlphaDiversityResultModule]", "title": "" }, { "docid": "c270c8567dd9b996dfcf97ae2e9256de", "score": "0.5865271", "text": "def getArcResults(output, make_compatable):\n res = []\n for filename, results in output.iteritems():\n for err in results:\n severity = 'n'\n if len(err['severity']) > 0:\n severity = err['severity'][0]\n if severity == 'a':\n severity = 'n'\n desc = [err['name'].strip()]\n if desc[0] == filename:\n desc = []\n desc.append(err.get('code', '').strip())\n desc.append(err.get('description', '').strip())\n if make_compatable:\n res.append((\"%s:%d:%s\" % (filename, toint_or_other(err['line'], 1), \" \".join(desc))).replace(\"\\n\", \" \"))\n else:\n res.append((\"%s:%d:%d:%s:%s\" % (filename, toint_or_other(err['line'], 1), toint_or_other(err['char'], 1), severity, \" \".join(desc))).replace(\"\\n\", \" \"))\n\n return res", "title": "" }, { "docid": "2ad0aa6080278a05c2f5ca231d419f52", "score": "0.5864118", "text": "def results(self):\n\n # Expose the loaded_structure remote_folder\n self.out_many(self.exposed_outputs(self.ctx.stages[-2], Cp2kBaseWorkChain))\n\n # Return parameters, loaded structure and molecule\n cp2k_out_dict = {\n 'final_geo_opt': self.ctx.stages[-2].outputs.output_parameters,\n 'bsse': self.ctx.stages[-1].outputs.output_parameters\n }\n self.out('output_parameters', get_output_parameters(**cp2k_out_dict))\n self.out('loaded_structure', self.ctx.stages[-2].outputs.output_structure)\n self.out('loaded_molecule', get_loaded_molecule(self.outputs['loaded_structure'], self.inputs['molecule']))\n self.report('Completed! Ouput Dict<{}>, loaded StructureData<{}>, loaded molecule StructureData<{}>'.format(\n self.outputs['output_parameters'].pk, self.outputs['loaded_structure'].pk,\n self.outputs['loaded_molecule'].pk))", "title": "" }, { "docid": "e40eb17dce54fbb8ba5c40b0ffb9ae50", "score": "0.58641016", "text": "def buildReport(self):\n return self.optimizer.describeMatching(self.clusters, self.metrics)", "title": "" }, { "docid": "879422a4ef4e9b7fa4a530aef4919d9d", "score": "0.58625793", "text": "def _resultify(self, describe: bool = False):\n assert self.response is not None\n results = self.message.results\n if results is not None and len(results) > 0:\n self.response.info(f\"Clearing previous results and computing a new set of results\")\n self.message.results = []\n results = self.message.results\n self.message.n_results = 0\n\n message = self.message\n parameters = self.parameters\n\n debug_mode = parameters.get('debug', None)\n if debug_mode is not None:\n try:\n debug_mode = _parse_boolean_case_insensitive(debug_mode)\n except Exception as e:\n self.response.error(str(e))\n return\n\n for parameter_name in parameters.keys():\n if parameter_name == '':\n continue\n if parameter_name not in ARAXResultify.ALLOWED_PARAMETERS:\n error_string = \"parameter type is not allowed in ARAXResultify: \" + str(parameter_name)\n if not debug_mode:\n self.response.error(error_string)\n return\n else:\n raise ValueError(error_string)\n\n kg = message.knowledge_graph\n qg = message.query_graph\n ignore_edge_direction = parameters.get('ignore_edge_direction', None)\n if ignore_edge_direction is not None:\n try:\n ignore_edge_direction = _parse_boolean_case_insensitive(ignore_edge_direction)\n except ValueError as e:\n error_string = \"parameter value is not allowed in ARAXResultify: \" + str(ignore_edge_direction)\n if not debug_mode:\n self.response.error(error_string)\n return\n else:\n raise e\n\n try:\n results = _get_results_for_kg_by_qg(kg,\n qg,\n ignore_edge_direction)\n message_code = 'OK'\n code_description = 'Result list computed from KG and QG'\n except Exception as e:\n if not debug_mode:\n code_description = str(e)\n message_code = e.__class__.__name__\n self.response.error(code_description)\n results = []\n else:\n raise e\n\n message.results = results\n if len(results) == 0 and message_code == 'OK':\n message_code = 'WARNING'\n code_description = 'no results returned'\n if len(kg.nodes) == 0:\n code_description += '; empty knowledge graph'\n self.response.warning(code_description)\n\n message.n_results = len(results)\n message.code_description = code_description\n message.message_code = message_code", "title": "" }, { "docid": "a11c6cbeb7ec987561575d3c073d9555", "score": "0.58575886", "text": "def get(self):\n allresultsTempl = env.get_template(\"allmetricresults.html\")\n runId = int(self.request.arguments['runId'][0])\n self.write(allresultsTempl.render(runlist=runlist, runId=runId))", "title": "" }, { "docid": "ed6bf075a72f652db36944b9f76b98cc", "score": "0.5857054", "text": "def af_results(self):\n return self.__af_results", "title": "" }, { "docid": "8fd3728b8ccf949ef6299c894b0f5f84", "score": "0.585529", "text": "def results_handler(self, maximums, minimums, filtered_timeseries, diagnostic_result):\n if np.prod(maximums.shape) < self.minimum_data_count or np.prod(minimums.shape) < self.minimum_data_count:\n diagnostic_result.log('Set point detection is inconclusive3. Not enough data.')\n self.shrink(self.zone_temperature_array)\n results = {\n \"cycles\": 'INCONCLUSIVE',\n \"Avg On Cycle\": \"INCONCLUSIVE\",\n \"Avg Off Cycle\": \"INCONCLUSIVE\"\n }\n return results, diagnostic_result\n\n peak_array, valley_array = align_pv(filtered_timeseries, maximums, minimums, self.timestamp_array)\n\n if np.prod(peak_array.shape) < self.minimum_data_count or np.prod(valley_array.shape) < self.minimum_data_count:\n diagnostic_result.log('Set point detection is inconclusive4. Not enough data.')\n self.shrink(self.zone_temperature_array)\n results = {\n \"cycles\": 'INCONCLUSIVE',\n \"Avg On Cycle\": \"INCONCLUSIVE\",\n \"Avg Off Cycle\": \"INCONCLUSIVE\"\n }\n return results, diagnostic_result\n\n peak_copy = deepcopy(peak_array)\n valley_copy = deepcopy(valley_array)\n self.compressor_status_array = self.gen_status(peak_copy, valley_copy, self.timestamp_array)\n results = self.cycling_dx(peak_copy, valley_copy)\n diagnostic_result.log('Cycling diagnostic results: ' + str(results))\n self.shrink(self.zone_temperature_array)\n\n return results, diagnostic_result", "title": "" }, { "docid": "850892e8f94e0ecba670052404ce2a03", "score": "0.5854862", "text": "def get_result(self):\r\n return self.result", "title": "" }, { "docid": "54ab45a2a5cf251cec15bd9c7eb7cf65", "score": "0.5848674", "text": "def get_infos(self) -> Sequence[str]:\n\n info = \"Scan duration: {} seconds\".format(self.elapsed)\n if self.show_all_details:\n info += \"\\nStop Time: {}\".format(self.stop_time)\n info += \"\\nRequests Done: {}\".format(self.requests_done)\n info += \"\\nCached Requests: {}\".format(self.cached_requests)\n info += \"\\nData Sent: {}\".format(self.data_sent_humanised)\n info += \"\\nData Received: {}\".format(self.data_received_humanised)\n info += \"\\nUsed Memory: {}\".format(self.used_memory_humanised)\n\n return [info]", "title": "" }, { "docid": "b2c42fffe248c428d71228fc0d0eaee5", "score": "0.58292276", "text": "def stdout(self):\n result = {}\n all = self.results_raw.get(\"contacted\")\n for key, value in all.iteritems():\n result[key] = value.get(\"stdout\")\n return result", "title": "" }, { "docid": "2d1febe1dbb01728a3f21676f0079230", "score": "0.5822419", "text": "def all_evaluate(self):\n self.compute_euclidian_distance()\n print(\"Euclidian distance : \", self.euclidian_distance)\n self.compute_MAE()\n print(\"MAE : \", self.MAE)\n self.compute_RMSE()\n print(\"RMSE : \", self.RMSE)\n self.compute_PSNR()\n print(\"PSNR : \", self.PSNR)\n self.compute_UQI()\n print(\"UQI : \", self.UQI)\n self.compute_VIF()\n print(\"VIF : \", self.VIF)\n return()", "title": "" }, { "docid": "bd57a08dc4dab05910fefe86437a9052", "score": "0.58223647", "text": "def show_results(self):\n self.show_basic()\n self.show_more(self.report_node_level())\n print(\"Total %d row(s) matched\" % self.matched_rows)\n print(\"\\n\")\n return", "title": "" }, { "docid": "aa5087d9e1c741795b14a8ecbe743f78", "score": "0.5797732", "text": "def get_results(self):\n results = self.connection.get(DETECTED_DEVICES, \"vdom=root\")[1][\"results\"]\n\n ret = []\n for result in results:\n if \"hostname\" not in result:\n continue\n\n ret.append(result)\n\n return ret", "title": "" }, { "docid": "41f6344ec4fba91869cbba8c197f59a3", "score": "0.57966995", "text": "def export_results(self):\n problemIDs = list(set([result.problemID for result in self.results]))\n configIDs = list(set([result.configID for result in self.results]))\n\n labels = []\n labels.extend(TestResults._fields)\n labels.extend(SizeMetrics._fields) \n # Remove unused columns\n labels.remove(\"size_metrics\")\n labels.remove(\"problemID\")\n labels.remove(\"configID\")\n\n # output = pd.Panel(items=labels, major_axis=problemIDs, minor_axis=configIDs)\n multiindex = pd.MultiIndex.from_product([problemIDs, configIDs], names=[\"problems\", \"configs\"])\n\n output = pd.DataFrame(index=multiindex, columns=labels)\n output.columns.names = [\"stats\"]\n\n for result in self.results:\n problemID = result.problemID\n configID = result.configID\n for label in [label for label in TestResults._fields if label in labels]:\n output.loc[(problemID, configID), label] = getattr(result, label)\n for label in [label for label in SizeMetrics._fields if label in labels]:\n output.loc[(problemID, configID), label] = getattr(result.size_metrics, label)\n\n # Compute Statistics\n output.fillna(value=np.nan, inplace=True)\n output.sort_index(inplace=True)\n try:\n TestFramework.compute_mosek_error(output, \"opt_val\", \"mosek_config\")\n except (KeyError): # pragma: no cover\n print(\"TestFramework.compute_mosek_error: 'mosek_config' or 'opt_val' field not found.\")\n try:\n TestFramework.compute_performance(output, \"solve_time\")\n except (KeyError): # pragma: no cover\n print(\"TestFramework.compute_performance: 'solve_time' field not found.\")\n return output", "title": "" } ]
17dc4805118f3b1847d262679905a2bc
Assert that the script got called.
[ { "docid": "0e5b8876a3b927aef357a79a905d60df", "score": "0.8074525", "text": "def assertScriptCalled(self):\n # Wait a bit as the events are processed in a different thread.\n with open(os.path.join(self.tempdir, 'result')) as s:\n self.assertEqual('script called', s.read())", "title": "" } ]
[ { "docid": "4e01602e6d1b691fccc1b30b02bf62c1", "score": "0.69822", "text": "def assertScriptNotCalled(self):\n # Wait a bit as so the event can get propagated.\n self.assertFalse(os.path.exists(os.path.join(self.tempdir, 'result')))", "title": "" }, { "docid": "d7d99651f3a9b5ad67fa027183c215d1", "score": "0.6576484", "text": "def test_does_call(self):\n call_if_ok = Mock()\n\n with self.assertRaises(SystemExit):\n with ci._when_exits_zero(call_if_ok):\n sys.exit(0)\n\n self.assertTrue(call_if_ok.called)", "title": "" }, { "docid": "6842a31111df3e8365e054032b3a88cc", "score": "0.6493566", "text": "def __call__(self, *args, **kwargs):\n\t\traise AssertionError(\"The callable object was called by the test with parameters {args} and key-word arguments {kwargs}.\".format(args=str(args), kwargs=str(kwargs)))", "title": "" }, { "docid": "7be1c58aedec4ee8df4d51b1c996a068", "score": "0.64235103", "text": "def test_execution(self):\n\t\tpass", "title": "" }, { "docid": "574a45ad0ea5fd7c51ce789de0306db8", "score": "0.63659704", "text": "def test(did_pass):\r\n linenum = sys._getframe(1).f_lineno # Get the caller's line number.\r\n if did_pass:\r\n msg = \"Test at line {0} ok.\".format(linenum)\r\n else:\r\n msg = (\"Test at line {0} FAILED.\".format(linenum))\r\n print(msg)", "title": "" }, { "docid": "f5048fefa27355b266d50e75046d7162", "score": "0.6352247", "text": "def test (did_pass):\n\n\tline_num = sys._getframe(1).f_lineno #Get the caller's line number\n\n\tif (did_pass):\n\t\tmsg = \"Test at line {0} ok.\".format(line_num)\n\n\telse:\n\t\tmsg = \"Test at line {0} FAILED.\".format(line_num)\n\n\tprint (msg)", "title": "" }, { "docid": "950435948e9e9458b4e62a04cace4593", "score": "0.6343884", "text": "def test(did_pass):\n linenum = sys._getframe(1).f_lineno # Get the caller's line number.\n if did_pass:\n msg = \"Test at line {0} ok.\".format(linenum)\n else:\n msg = (\"Test at the line {0} FAILED.\".format(linenum))\n print(msg)", "title": "" }, { "docid": "950435948e9e9458b4e62a04cace4593", "score": "0.6343884", "text": "def test(did_pass):\n linenum = sys._getframe(1).f_lineno # Get the caller's line number.\n if did_pass:\n msg = \"Test at line {0} ok.\".format(linenum)\n else:\n msg = (\"Test at the line {0} FAILED.\".format(linenum))\n print(msg)", "title": "" }, { "docid": "2b48ff7ef3e0702bed6c684dd9c6a10e", "score": "0.63321275", "text": "def test_calls(self):\n\n # @TODO what's the right test here?\n pass", "title": "" }, { "docid": "fc8e2ca90a96c7d85b389d640e9004c2", "score": "0.63210565", "text": "def testit(did_pass):\r\n\r\n linenum = sys._getframe(1).f_lineno # Get the caller's line number.\r\n if did_pass:\r\n msg = \"Test at line {0} ok.\".format(linenum)\r\n else:\r\n msg = (\"Test at line {0} FAILED.\".format(linenum))\r\n print(msg)", "title": "" }, { "docid": "b1d472b032fa20e9f7b0086844492929", "score": "0.6299475", "text": "def test_method(self, *_args, **_kwargs):\n act, err = self.run_program(inp)\n msg = err_msg\n if err:\n msg += separator + err\n\n self.assertEqual(exp, act, msg.format(src=self.source, desc=desc))", "title": "" }, { "docid": "c0a5c2dc7797c5d6261cdc952d1d8b11", "score": "0.6248866", "text": "def test_run(self):\n parser = cli.create_parser()\n args = parser.parse_args(['run', 'SomePath'])\n\n assert args.func is cli.run_blackbird_script\n assert args.input == 'SomePath'", "title": "" }, { "docid": "275147cd55c3eb3921f8a2d12df5df4f", "score": "0.6247005", "text": "def assertCalled(self, mock, function_name):\n\n self.assertTrue(mock.called, \"Expected function '{}' to be called\".format(function_name))", "title": "" }, { "docid": "49260817a6c22b16cfe688d868bb0771", "score": "0.6238705", "text": "def testit(did_pass):\n\n linenum = sys._getframe(1).f_lineno # Get the caller's line number.\n if did_pass:\n msg = \"Test at line {0} ok.\".format(linenum)\n else:\n msg = (\"Test at line {0} FAILED.\".format(linenum))\n print(msg)", "title": "" }, { "docid": "4122f8c962b6558d227ce7bda4e9fb83", "score": "0.62348914", "text": "def test(did_pass):\n linenum = sys._getframe(1).f_lineno #Get the caller's line number\n if did_pass:\n msg = \"Test at line {0} ok.\" .format(linenum)\n else:\n msg = (\"Test at line {0} FAILED. \".format(linenum))\n print(msg)", "title": "" }, { "docid": "fbf2de21a3cb1e0d12853d244fd9889c", "score": "0.621061", "text": "def test_ScriptRuntimeSuccess(self):\n self.assertEqual(RunTests.output, 0,\n 'Script exits with a non 0 error code.')", "title": "" }, { "docid": "45f38753e907c667554a40a27de0d301", "score": "0.62082386", "text": "def test_call(self):\n self._load_test_plugin('test.py', 'call.py')\n plugins = self.site.plugin_manager.plugins\n\n plugin = plugins[0]\n\n self.assertEqual('plugin_call', plugin.plugin_name) # Just to check we're looking at the right one.\n\n self.site.build()\n\n # preBuild\n self.assertEqual(1, len(plugin.preBuild.calls))\n self.assertEqual((self.site,), plugin.preBuild.calls[0]['args'])\n\n # preBuildPage\n self.assertEqual(len(self.site.pages()), len(plugin.preBuildPage.calls))\n for call in plugin.preBuildPage.calls:\n self.assertIn(len(call['args']), (3, 4))\n\n # postBuildPage\n self.assertEqual(len(self.site.pages()), len(plugin.postBuildPage.calls))\n\n #postBuild\n self.assertEqual(1, len(plugin.postBuild.calls))\n self.assertEqual((self.site,), plugin.postBuild.calls[0]['args'])", "title": "" }, { "docid": "a695ab6caddf274e3f1ef91388f60b4d", "score": "0.62026095", "text": "def test(did_pass):\n linenum = sys._getframe(1).f_lineno # Get the caller's line number.\n if did_pass:\n msg = \"Test at line {0} ok.\".format(linenum)\n else:\n msg = (\"Test at line {0} FAILED.\".format(linenum))\n print(msg)", "title": "" }, { "docid": "a695ab6caddf274e3f1ef91388f60b4d", "score": "0.62026095", "text": "def test(did_pass):\n linenum = sys._getframe(1).f_lineno # Get the caller's line number.\n if did_pass:\n msg = \"Test at line {0} ok.\".format(linenum)\n else:\n msg = (\"Test at line {0} FAILED.\".format(linenum))\n print(msg)", "title": "" }, { "docid": "a695ab6caddf274e3f1ef91388f60b4d", "score": "0.62026095", "text": "def test(did_pass):\n linenum = sys._getframe(1).f_lineno # Get the caller's line number.\n if did_pass:\n msg = \"Test at line {0} ok.\".format(linenum)\n else:\n msg = (\"Test at line {0} FAILED.\".format(linenum))\n print(msg)", "title": "" }, { "docid": "a63ed3339b4997f6c1b257fc38a6ad2f", "score": "0.61902106", "text": "def func(self):\n self.caller.msg(\"Command called!\")", "title": "" }, { "docid": "bd37e99a1856138a6fe74d88a212bfbc", "score": "0.61774194", "text": "def test(did_pass):\n linenum = sys._getframe(1).f_lineno # Get the caller's line number.\n if did_pass:\n msg = \"Test at line {0} ok.\".format(linenum)\n else:\n msg = (\"Test at line {0} FAILED.\".format(linenum))\n print(msg)", "title": "" }, { "docid": "bd37e99a1856138a6fe74d88a212bfbc", "score": "0.61774194", "text": "def test(did_pass):\n linenum = sys._getframe(1).f_lineno # Get the caller's line number.\n if did_pass:\n msg = \"Test at line {0} ok.\".format(linenum)\n else:\n msg = (\"Test at line {0} FAILED.\".format(linenum))\n print(msg)", "title": "" }, { "docid": "bd37e99a1856138a6fe74d88a212bfbc", "score": "0.61774194", "text": "def test(did_pass):\n linenum = sys._getframe(1).f_lineno # Get the caller's line number.\n if did_pass:\n msg = \"Test at line {0} ok.\".format(linenum)\n else:\n msg = (\"Test at line {0} FAILED.\".format(linenum))\n print(msg)", "title": "" }, { "docid": "bd37e99a1856138a6fe74d88a212bfbc", "score": "0.61774194", "text": "def test(did_pass):\n linenum = sys._getframe(1).f_lineno # Get the caller's line number.\n if did_pass:\n msg = \"Test at line {0} ok.\".format(linenum)\n else:\n msg = (\"Test at line {0} FAILED.\".format(linenum))\n print(msg)", "title": "" }, { "docid": "ca324c528a95b631273c294796dac3b8", "score": "0.61759806", "text": "def test(did_pass):\n line_num = sys._getframe(1).f_lineno # Get the caller's line number.\n if did_pass:\n msg = \"Test at line {0} ok.\".format(line_num)\n else:\n msg = \"Test at line {0} FAILED.\".format(line_num)\n print(msg)", "title": "" }, { "docid": "203eff80aa717a6620a3e3c9c0441ea1", "score": "0.61697996", "text": "def test(did_pass):\n linenum = sys._getframe(1).f_lineno # Get the caller's line number.\n if did_pass:\n msg = \"Test at line {0} ok.\".format(linenum)\n else:\n msg = (\"Test at line {0} FAILED.\".format(linenum))\n print(msg)", "title": "" }, { "docid": "da31ba36b69600b5c836dc26cae9ae2c", "score": "0.6164061", "text": "def test(did_pass):\n linenum = sys._getframe(1).f_lineno # Get the caller's line number.\n if did_pass:\n msg = \"Test at line {0} ok.\".format(linenum)\n else:\n msg = (\"Test at line {0} FAILED.\".format(linenum))\n print(msg)", "title": "" }, { "docid": "da31ba36b69600b5c836dc26cae9ae2c", "score": "0.6164061", "text": "def test(did_pass):\n linenum = sys._getframe(1).f_lineno # Get the caller's line number.\n if did_pass:\n msg = \"Test at line {0} ok.\".format(linenum)\n else:\n msg = (\"Test at line {0} FAILED.\".format(linenum))\n print(msg)", "title": "" }, { "docid": "da31ba36b69600b5c836dc26cae9ae2c", "score": "0.6164061", "text": "def test(did_pass):\n linenum = sys._getframe(1).f_lineno # Get the caller's line number.\n if did_pass:\n msg = \"Test at line {0} ok.\".format(linenum)\n else:\n msg = (\"Test at line {0} FAILED.\".format(linenum))\n print(msg)", "title": "" }, { "docid": "908bcfc1c579c3196b76433b5f0481a5", "score": "0.6160658", "text": "def test(did_pass):\n linenum = sys._getframe(1).f_lineno # Get the caller's line number.\n if did_pass:\n msg = \"Test at line {0} ok.\" .format(linenum)\n else:\n msg = (\"Test at line {0} FAILED\" .format(linenum))\n print(msg)", "title": "" }, { "docid": "908bcfc1c579c3196b76433b5f0481a5", "score": "0.6160658", "text": "def test(did_pass):\n linenum = sys._getframe(1).f_lineno # Get the caller's line number.\n if did_pass:\n msg = \"Test at line {0} ok.\" .format(linenum)\n else:\n msg = (\"Test at line {0} FAILED\" .format(linenum))\n print(msg)", "title": "" }, { "docid": "964e5706426eb04c92c7b3fac456a4fc", "score": "0.6108258", "text": "def test(did_pass):\n linenum = sys._getframe(1).f_lineno # Get the caller's line number.\n if did_pass:\n msg = \"Test at line {0} ok.\".format(linenum)\n else:\n msg = (\"Test at line {0} FAILIED.\".format(linenum))\n print(msg)", "title": "" }, { "docid": "744e3a48744944cd5daadaca748a84c3", "score": "0.6102189", "text": "def test():\r\n\tassert agent() != \"\", \"test failed (personal_code: 0)\" # check all code\r", "title": "" }, { "docid": "92c759610c7dd27f999c79518d79e256", "score": "0.60893714", "text": "def test_casperjs_test_invoked(self):\n runner = CasperTestRunner('a', 'b', self._casperjs)\n runner.run()\n s = runner.successes()\n assert_in('test', s[1][1])", "title": "" }, { "docid": "efd4f7c73e7e82c181c0903801c84b42", "score": "0.6083142", "text": "def test_execute_custom_alert_action(self):\n pass", "title": "" }, { "docid": "df8abac4692300a48eb1a92098ff7bd9", "score": "0.60793936", "text": "def test(did_pass):\n linenum = sys._getframe(1).f_lineno # Get the caller’s line number.\n if did_pass:\n msg = \"Test at line {0} ok.\".format(linenum)\n else:\n msg = (\"Test at line {0} FAILED.\".format(linenum))\n print(msg)", "title": "" }, { "docid": "11a520a0aaa114e236ba5dd321afa01c", "score": "0.60685885", "text": "def test_test_me(self):\n self.assertTrue(test_me())", "title": "" }, { "docid": "4c7d151caafdd53233fc56441b6011c9", "score": "0.60302675", "text": "def test_calls_args_run(self):\n main([])\n\n self.mock_args.run.assert_called_once_with(self.mock_args)", "title": "" }, { "docid": "6019b1c560db154d2db908057e954e19", "score": "0.6017644", "text": "def test_command(self):", "title": "" }, { "docid": "86990ddfce667b14a43ffb924adf15dc", "score": "0.6016178", "text": "def test_no_args(self, bl, cl):\n bl.info(42)\n\n assert 42 == cl.calls[0][2][\"event\"]", "title": "" }, { "docid": "1c091909b6e5390e8ecef7d53178fae6", "score": "0.6011627", "text": "def test_does_not_call(self):\n call_if_ok = Mock()\n\n with self.assertRaises(SystemExit):\n with ci._when_exits_zero(call_if_ok):\n sys.exit(1)\n\n self.assertFalse(call_if_ok.called)", "title": "" }, { "docid": "9a918e529a76cd53ea24b0e8e0d0d06e", "score": "0.60052025", "text": "def __verify_function__(fn):\n assert callable(fn), 'battle_tested needs a callable function, not {0}'.format(repr(fn))", "title": "" }, { "docid": "9a918e529a76cd53ea24b0e8e0d0d06e", "score": "0.60052025", "text": "def __verify_function__(fn):\n assert callable(fn), 'battle_tested needs a callable function, not {0}'.format(repr(fn))", "title": "" }, { "docid": "d3b0c84f7b2f2f609769dc9ac961f588", "score": "0.597078", "text": "def assertWasCalled(self, deferred):\n if not deferred.called:\n raise AssertionError('This deferred was not called yet.')", "title": "" }, { "docid": "fe5b13aa0486301a6e026fb3bd916cfa", "score": "0.59702647", "text": "def __test():\n test()\n # raise __TestException, 'Wrapper function for command line testing only'\n raise __TestException(\"Wrapper function for command line testing only\")", "title": "" }, { "docid": "dbbd75079c23ec8c98862e3d6b10468e", "score": "0.59696126", "text": "def test_execute(self):", "title": "" }, { "docid": "6eb9bc916dbb1ecbba70ca91a623ba2b", "score": "0.596297", "text": "def test_cli_runscript(run_path, monkeypatch, tmp_path):\n script = tmp_path / 'test.py'\n script.write_text('import napari; v = napari.Viewer(show=False)')\n\n with monkeypatch.context() as m:\n m.setattr(sys, 'argv', ['napari', str(script)])\n __main__._run()\n\n run_path.assert_called_once_with(str(script))", "title": "" }, { "docid": "c7a04da77066ba469820f68a44e3d22f", "score": "0.59468526", "text": "def testShowCalledArgs(self):\n real = main.ProductionClass()\n real.something = MagicMock()\n real.method(1, 2, 3)\n real.method(2, 2, 3)\n expected = [call(1, 2, 3), call(2, 2, 3)]\n self.assertEqual(real.something.call_args_list, expected)", "title": "" }, { "docid": "744dd9be441fc25fa1d401574ba711c6", "score": "0.5901094", "text": "def mockCheckCall(self, tester, index, name, *args, **kwargs):\n try: call = self.mockAllCalledMethods[index]\n except IndexError:\n tester.fail(\"No call with index %d\" % index)\n tester.assertEqual(name, call.name, \"Expected call number %d to \\\nbe to %s, but it was to %s instead\" % (index, name, call.name,))\n call.checkArgs(tester, *args, **kwargs)", "title": "" }, { "docid": "45978a7ee19c3958654079330a0d1e2f", "score": "0.5894838", "text": "def test_before_send(self, sentry_sdk, sentry):\n assert sentry\n called = sentry_sdk.call_args.kwargs[\"before_send\"]\n expected = import_string(\"tests.core.test_sentry.before_send\")\n assert called == expected", "title": "" }, { "docid": "ad6af6af0e12c9ac7a5292b0c74f21ce", "score": "0.58774996", "text": "def testingSuccess():\n assert 1==1", "title": "" }, { "docid": "a696f05178528bc987411e951d031f8b", "score": "0.5870533", "text": "def exec_tests(self):", "title": "" }, { "docid": "d1696d3bf1d488e2131e5bf2b5682ea9", "score": "0.58607423", "text": "def test_normal_functionality(self):\n # Calling the function\n metaskat.check_args(self.dummy)", "title": "" }, { "docid": "b5cf9ec22300e14c65803369db7af56b", "score": "0.58541465", "text": "def test_parse_args_should_be_called(self):\n parse_args(self.argparse_mock)\n self.assertTrue(self.argparse_mock.parse_args.called)", "title": "" }, { "docid": "e1451a85ae1b6daec98c71783a826d8c", "score": "0.58496934", "text": "def test_maybe_init_is_main(cli_mock: Mock) -> None:\n cli_module.maybe_init()\n cli_mock.assert_called()", "title": "" }, { "docid": "0dd480ecf2b84b41129b6c4157754203", "score": "0.58360153", "text": "def testingSuccess():\n assert 1 == 1", "title": "" }, { "docid": "4c82e0b3bb04da4270614b69df01e0fe", "score": "0.5827677", "text": "def test_setup_mode_with_valid_call(self):\n self.mock_object.valid_call()\n self.assertEqual(len(self.mock_object._expected_calls_queue), 1)", "title": "" }, { "docid": "ef2fdc12b30d2c031c10c309a72b195d", "score": "0.58225715", "text": "def runTest(self):\n pass", "title": "" }, { "docid": "062939b786d2f966ba213fe47faf1f23", "score": "0.582172", "text": "def execute(self):\n print(\"test execute\")", "title": "" }, { "docid": "e9e5d6fbde6aa852c634ebe66c90803c", "score": "0.5818944", "text": "def cb_test( self, ):\n print(\"cb_test called may cause error if test not set up \")", "title": "" }, { "docid": "63b40a9fe1e76b1cd621c52e725dd0e9", "score": "0.5812044", "text": "def test_main_runs(mocker, filename_fixture):\n mock_etl = mocker.spy(main, 'etl')\n mock_distance = mocker.spy(main, 'distance_matrix')\n mock_path = mocker.spy(main, 'determine_path')\n mock_plot = mocker.spy(main, 'plot_path')\n\n # Test cli interface\n runner = CliRunner()\n result = runner.invoke(main.main, ['-f', filename_fixture])\n assert result.exit_code == 0\n\n mock_etl.assert_called_once_with(filename_fixture)\n mock_distance.assert_called_once()\n mock_path.assert_called_once()\n mock_plot.assert_called_once()", "title": "" }, { "docid": "241de471ffddd4a136255c3e5eccc120", "score": "0.5808996", "text": "def testMethodInvocationNoArgumentsNoReturn(self):\n expected_calls = [\n call('%s(%s)', '_Object.method1', ''),\n call('%s: %s', '_Object.method1', None),\n ]\n\n self.assertIsNone(self._object.method1())\n self.assertEqual(self._logger.call_args_list, expected_calls)", "title": "" }, { "docid": "a4316e3670458cf1b76f743f7ddbf8a4", "score": "0.580678", "text": "def test_func(self):\n return True", "title": "" }, { "docid": "dee460b18404668eb6e0aa52b2611f5e", "score": "0.5798582", "text": "def verify(self):\n LOG.info(\"Verify script called\")\n\n return 0", "title": "" }, { "docid": "d30bd78a45b29033987c0d51206f7136", "score": "0.5790931", "text": "def assert_no_tasks_called():\n assert_only_tasks_called()", "title": "" }, { "docid": "4bdf2eda39102ca7daaa54e13f023abf", "score": "0.5767736", "text": "def __verify_tested__(fn):\n battle_tested.__verify_function__(fn)\n assert fn in battle_tested._results.keys(), '{} was not found in battle_tested\\'s results, you probably haven\\'t tested it yet'.format(fn)", "title": "" }, { "docid": "045385a33fa084c8187f2c235a708cb2", "score": "0.5763929", "text": "def test_user_confirmation_accepted(self, mock_input, \n mock_setup_partners, mock_dfp_client):\n tasks.add_new_openwrap_partner.main()\n mock_setup_partners.assert_called_once()", "title": "" }, { "docid": "2e0e2679687a9d0d03150cd042758ae8", "score": "0.5763744", "text": "def test_trigger_custom_event():\n assert client.trigger_custom_event(None, None)", "title": "" }, { "docid": "685d8b6aff67bea4293677c351b8f33f", "score": "0.5761889", "text": "def test_runner(runner):\n assert runner.run() == 0", "title": "" }, { "docid": "b598325f16e14eee9025d1607ff36928", "score": "0.5760366", "text": "def testMethodInvocationWithArgumentAndReturn(self):\n argument = 'hello-test'\n expected_calls = [\n call('%s(%s)', '_Object.method2', argument),\n call('%s: %s', '_Object.method2', argument),\n ]\n\n self.assertEqual(self._object.method2(argument), argument)\n self.assertEqual(self._logger.call_args_list, expected_calls)", "title": "" }, { "docid": "90b433278dc8e5fd788f5772c30513d3", "score": "0.5759647", "text": "def __test(self):\n print()\n logging.getLogger(__name__).debug('Running test function')\n print(\"--------------------\")\n print(\"--------------------\")\n print()", "title": "" }, { "docid": "4566a42a097e6da5fac010b9935d676a", "score": "0.57592326", "text": "def test_maybe_init_is_not_main(cli_mock: Mock) -> None:\n cli_module.maybe_init()\n cli_mock.assert_not_called()", "title": "" }, { "docid": "0e160120451be1fed5254e2f25300b92", "score": "0.57467854", "text": "def mockCheckNamedCall(self, tester, methodName, index, *args, **kwargs):\n self.mockCalledMethods.get(methodName, [])\n try: call = self.mockCalledMethods.get(methodName, [])[index]\n except IndexError:\n raise ValueError(\"No call to %s with index %d\" % (methodName, index))\n call.checkArgs(tester, *args, **kwargs)", "title": "" }, { "docid": "441b95c95e17bb60541e370203edef17", "score": "0.57370293", "text": "def test(did_pass):\n linenum = sys._getframe(1).f_lineno\n if did_pass:\n msg = 'Test at line {0} ok.'.format(linenum)\n else:\n msg = 'Test at line {0} FAILED.'.format(linenum)\n print(msg)", "title": "" }, { "docid": "441b95c95e17bb60541e370203edef17", "score": "0.57370293", "text": "def test(did_pass):\n linenum = sys._getframe(1).f_lineno\n if did_pass:\n msg = 'Test at line {0} ok.'.format(linenum)\n else:\n msg = 'Test at line {0} FAILED.'.format(linenum)\n print(msg)", "title": "" }, { "docid": "182f95095b395c6ee84615d4aac0cc4b", "score": "0.5729531", "text": "def test_entrypoint(self):", "title": "" }, { "docid": "0b564f01ddd7bf676092f429d2b3a77a", "score": "0.5727722", "text": "def test_function():\n print(\"Hello\")", "title": "" }, { "docid": "faab2a42b41a95d8e608232a99ce85f7", "score": "0.5721176", "text": "def test_script_file():\n\n proc = Spidermonkey(script_file='-')\n stdout, stderr = proc.communicate('print(\"World\")')\n\n assert (stdout, stderr) == ('World\\n', '')\n assert proc.returncode == 0", "title": "" }, { "docid": "1dce7807cd1e8e92a62d8e53ab10d13a", "score": "0.57199824", "text": "def test_positive_examples(positive_example):\n script_code, prepared = positive_example\n parsed = parse_script(script_code)\n\n assert parsed.spec_id == prepared.spec_id\n assert len(prepared.calls) == len(parsed.calls)\n\n for prepared_call, parsed_call in zip(\n prepared.calls, parsed.calls\n ):\n assert parsed_call.address == prepared_call.address\n assert parsed_call.call_data_length == prepared_call.call_data_length\n assert parsed_call.method_id == prepared_call.method_id\n assert parsed_call.encoded_call_data == prepared_call.encoded_call_data", "title": "" }, { "docid": "087ad7da658c219f370f8cf190655435", "score": "0.5719278", "text": "def test_call_valid_call(self):\n # Bug the user if the server isn't running\n self.read_session_from_file()\n try:\n result = core.call(self.SERVER_NAME, \"+\", [1, 2, 3])\n except Exception as e:\n _raise_unexpected(e)\n eq_(result, 6)", "title": "" }, { "docid": "e7c094f1fc2638b054c011ded53f840c", "score": "0.5718594", "text": "def test_replay_with_expected_call(self):\n self.mock_object.valid_call() # setup method call\n self.mock_object._replay() # start replay mode\n self.mock_object.valid_call() # make method call", "title": "" }, { "docid": "037c103955378ea430e3225e0e6b3f55", "score": "0.57136774", "text": "def test(did_pass):\n linenum = sys._getframe(1).f_lineno\n if did_pass:\n msg = \"Test at line {0} ok.\".format(linenum)\n else:\n msg = \"Test at line {0} FAILED.\".format(linenum)\n print(msg)", "title": "" }, { "docid": "e8062fa8ddb36bbd5f3968dab611c92c", "score": "0.5711793", "text": "def test_result_is_none(self, monkeypatch, capsys):\n mocked_program = MockProgram()\n mocked_args = MockArgs()\n mocked_write_script_results = MockWriteScriptResults()\n\n with monkeypatch.context() as m:\n m.setattr(cli, \"load\", lambda arg: mocked_program)\n m.setattr(cli, \"RemoteEngine\", MockRemoteEngine)\n m.setattr(cli, \"write_script_results\", mocked_write_script_results.write_script_results)\n\n with pytest.raises(SystemExit):\n cli.run_blackbird_script(mocked_args)\n\n out, _ = capsys.readouterr()\n assert \"Executing program on remote hardware...\" in out\n\n # Check that the write_script_results function was not called\n assert not mocked_write_script_results.called", "title": "" }, { "docid": "49d7842d2183e53539d6d6fc7da8c0db", "score": "0.5708649", "text": "def it_is_callable(self):\n import da.build\n assert callable(da.build.main)", "title": "" }, { "docid": "2d6f8391ffe92522cb8331dc3ec5ccf1", "score": "0.5705334", "text": "def test_execute(self):\n e = self.friendlyEngine()\n\n world = MagicMock()\n action = MagicMock()\n action.execute.return_value = 'foo'\n\n r = yield e.execute(world, action)\n\n e.engine.isAllowed.assert_called_once_with(world, action)\n e.engine.workRequirement.assert_called_once_with(world, action)\n e.engine.energyRequirement.assert_called_once_with(world, action)\n\n action.execute.assert_called_once_with(world)\n self.assertEqual(r, 'foo', \"Should return the value of the \"\n \"execution\")", "title": "" }, { "docid": "163ca91ddf1ccc4bcf42f1705b0818f5", "score": "0.5702262", "text": "def test_invalid_action_2(self):\n self.assertMain(\"main_invalid_action_2.in\", \"main_invalid_action_2.out\")", "title": "" }, { "docid": "888ea64e841ce9594b5a18cdd68edb17", "score": "0.5697913", "text": "def test_args(self):\n dummy = Dummy(self)\n self.job1 = Job(dummy.foo1, timedelta(milliseconds=1), False,\n arg1_parameter, kwarg1=kwarg1_parameter)\n self.job1.jump_ahead(5)\n ensure_condition(self._foo_called, dummy)\n self.assertEqual(dummy.foo1_called, True)", "title": "" }, { "docid": "939e21bdd1924d990e789fd2e96c904d", "score": "0.569206", "text": "def test_alive():\n pass", "title": "" }, { "docid": "c51e411d9010f1d9f3faf08d23379c23", "score": "0.5678376", "text": "def test_mock_method_registration(self):\n\n self.assertEqual(self.client.call(\"ping\", \"o hai\"), \"pong o hai\")", "title": "" }, { "docid": "721ad0d17101606bf8f141c8fd2932fa", "score": "0.5672706", "text": "def test_output(self):\n parser = cli.create_parser()\n args = parser.parse_args(['run', 'SomeInputPath', '--output', 'SomeOutputPath'])\n\n assert args.func is cli.run_blackbird_script\n assert args.input == 'SomeInputPath'\n assert args.output == 'SomeOutputPath'", "title": "" }, { "docid": "1439e252aed41805d3acfb8779100b3a", "score": "0.566792", "text": "def test_main(self):\n pass", "title": "" }, { "docid": "fa668a7d4cac46f83a95b9d542d0bb55", "score": "0.56610215", "text": "def test(did_pass):\r\n linenum = sys._getframe(1).f_lineno\r\n if did_pass:\r\n msg = \"Test at line {0} is ok\".format(linenum)\r\n else:\r\n msg = \"Test at line {0} is FAILED\".format(linenum)\r\n print(msg)", "title": "" }, { "docid": "28aed8cb041e0f6014b0a7ead817f2cc", "score": "0.5657314", "text": "def test_main_calls_func(mock_cli_arguments, name):\n\n mock_parse_args = Mock()\n mock_parse_args.name = name\n mock_parse_args.version = None\n mock_parse_args.func = Mock()\n\n mock_cli_arguments.return_value = (None, mock_parse_args)\n\n awstemp.cli.main()\n\n assert mock_parse_args.func.call_args_list == [call()]", "title": "" }, { "docid": "f8b5d5fcdebb6d37da21ceba9b1c597d", "score": "0.5652796", "text": "def test_one_below(self, bl, cl):\n bl.debug(\"nope\")\n\n assert [] == cl.calls", "title": "" }, { "docid": "b1600e74a522088a41807cd57beed7f2", "score": "0.5651713", "text": "def test_invalid_action_3(self):\n self.assertMain(\"main_invalid_action_3.in\", \"main_invalid_action_3.out\")", "title": "" }, { "docid": "77f0d7b3f4d569f114421c3601806b01", "score": "0.5650895", "text": "def test_is_callable(self):\n self.mock_object().returns(\"mox0rd\")\n self.mock_object._replay()\n\n self.assertEqual(\"mox0rd\", self.mock_object())\n\n self.mock_object._verify()", "title": "" }, { "docid": "85a66bee0146dfe02929c566c040b925", "score": "0.5649975", "text": "def test_noarg(self):\n try:\n pymchelper.run.main([\"tripddd\"])\n except SystemExit as e:\n self.assertEqual(e.code, 2)", "title": "" }, { "docid": "5f9fc048abdfbdd4e29f912fa5e604c3", "score": "0.5646503", "text": "def test_script_args():\n\n proc = Spidermonkey(code='print(scriptArgs)',\n script_args=('Hello', 'World'))\n stdout, stderr = proc.communicate('')\n\n assert (stdout, stderr) == ('Hello,World\\n', '')\n assert proc.returncode == 0", "title": "" }, { "docid": "8aeb1bddd349b9f0a94f13bbbcf58617", "score": "0.5644186", "text": "def test_app_test(self):\n out = _run(\"entrypoint.sh test\")\n self.assertTrue(\"Ran 2 tests\" in out)", "title": "" } ]
e428f622cad9393b7b8625fa67d8623d
this is a custom list converter that convert a list of tuple to a list of all the second element of each tuple
[ { "docid": "e2143046c2cb436c639d40f5c2a4e1cf", "score": "0.0", "text": "def to_list(entry,index):\n out = []\n for e in entry:\n out.append(e[index])\n return out", "title": "" } ]
[ { "docid": "2e74caf3afc4b7843f8e24a0eb594860", "score": "0.7229407", "text": "def convert_list_to_tuple(self):\n\n # Change self.list to a tuple\n\n pass", "title": "" }, { "docid": "bd3cabcb8d5aa05ef113aba444bc2842", "score": "0.7141554", "text": "def listToTuple ( l ):\n for index, val in enumerate(l):\n if isinstance(val, list):\n l[index] = tuple(val)\n return tuple(l)", "title": "" }, { "docid": "61b763d35c4f286370d6a81b96650815", "score": "0.7140128", "text": "def convert_list_to_tuple(data):\r\n new_data = list()\r\n for i in range(len(data)):\r\n new_data.append( (data[i][0], data[i][1], data[i][2]) )\r\n \r\n return new_data", "title": "" }, { "docid": "54d5dd12d0216b6c6d4252ee6e566815", "score": "0.70565057", "text": "def list_to_list_two_tuples(values: list):\n return [(val, val) for val in values]", "title": "" }, { "docid": "644a5b815088dce40dee2ebb17ffff35", "score": "0.6844234", "text": "def process_tuple(self, node, state, *_):\n elements = [\n element[0]\n for element in [\n self.gen_grfn(list_element, state, \"ctx\")\n for list_element in node.elts\n ]\n ]\n\n return elements if len(elements) == 1 else [{\"list\": elements}]", "title": "" }, { "docid": "867a336512ded8f0234c79e13dc35cf9", "score": "0.68367016", "text": "def _retuple(tup):\r\n # length must be greater than 1\r\n L = len(tup[0])\r\n # firstly assert that every tuple is of the same length\r\n assert all(map(lambda t: len(t) == L, tup)), \"not all tuples are same length\"\r\n # now reshape according to this length\r\n return [tuple(map(lambda t: t[i], tup)) for i in range(L)]", "title": "" }, { "docid": "f600ad49ebbf1341bac9e54bdada7e59", "score": "0.6818067", "text": "def conv_list(rect_tuple):\n return[[list(pt) for pt in pts] for pts in rect_tuple]", "title": "" }, { "docid": "0703f6e5a2b605b886ed3c6a93f7f9ba", "score": "0.6686545", "text": "def _to_lists(x):\n if isinstance(x, tuple):\n return [_to_lists(el) for el in x]\n\n return x", "title": "" }, { "docid": "9237221d1cb00a77386000e498a85278", "score": "0.66759694", "text": "def list_of_tuples(list_of_lsts):\n\n list_of_tups = []\n for lst in list_of_lsts:\n list_of_tups.append(tuple(lst))\n return list_of_tups", "title": "" }, { "docid": "247075bab84984874a3e0216ec757f4b", "score": "0.6607863", "text": "def separate_lists(tuple_result):\n return tuple(map(list, zip(*tuple_result)))", "title": "" }, { "docid": "4199bf370b016556766f7f2e40722e0e", "score": "0.65439", "text": "def to_2tuple(x):\n if isinstance(x, collections.abc.Iterable):\n return x\n return (x, x)", "title": "" }, { "docid": "bdcfbffea797573c94074af1e0c17f3f", "score": "0.65400624", "text": "def transform_to_tuple(items):\n for item in items:\n if not isinstance(item, tuple):\n yield None, item\n else:\n yield item", "title": "" }, { "docid": "697b8abac46c54a68db9fca72002b8af", "score": "0.65318227", "text": "def _nested_tuple(nested_list: List) -> Tuple:\n return tuple(map(_nested_tuple, nested_list)) if isinstance(nested_list, list) else nested_list", "title": "" }, { "docid": "e35d453030cf9a79d16fe294d36281a9", "score": "0.65162915", "text": "def list_of_tuples(list_of_lists):\n list_of_tuples = []\n for lst in list_of_lists:\n list_of_tuples.append(tuple(lst))\n\n\n return list_of_tuples", "title": "" }, { "docid": "e2dd9b30e4cb31f72da37544455812ec", "score": "0.64947104", "text": "def cast(x):\n if isinstance(x, list):\n x = tuple(x)\n return x", "title": "" }, { "docid": "abb5da41c0bc9dc2cef49f3ad184aeb6", "score": "0.64708805", "text": "def tuple_py2(old_tuple):\n lst = list(old_tuple)\n lst = list_py2(lst)\n return tuple(lst)", "title": "" }, { "docid": "244484d54c6f6de44df4fcb7fdb9b74e", "score": "0.6464667", "text": "def _rewrite_tuples(self, elements):\n if len(elements) == 1:\n return [self._rewrite_tuple(elements[0], None)]\n\n return [self._rewrite_tuple(element, n) for n, element in enumerate(elements)]", "title": "" }, { "docid": "fe60c931e6a31027164317315aa4dbf3", "score": "0.64628863", "text": "def gen_tuplelist(inlist):\n if len(inlist) == 1:\n yield inlist[0], inlist[0]\n else:\n for i, item1 in enumerate(inlist):\n for item2 in gen_shortlist(start=i + 1, inlist=inlist):\n yield item1, item2", "title": "" }, { "docid": "f4a18e6865856dbecaa00a784462400c", "score": "0.6450634", "text": "def _convert_to_list_of_tuples(input_):\n # make it al into a list of tuple(s)\n if input_ is None:\n return None\n\n if isinstance(input_, list):\n for item in range(len(input_)):\n if isinstance(input_[item], str):\n input_[item] = (input_[item],)\n\n if isinstance(input_, tuple):\n input_ = [input_]\n\n if isinstance(input_, str):\n input_ = [(input_,),]\n\n return input_.copy()", "title": "" }, { "docid": "95c8f58cc051ed5244a5be04ff2e5d8b", "score": "0.6397833", "text": "def tup_to_rlist(tup):\n if not tup:\n return empty_rlist\n return rlist(tup[0], tup_to_rlist(tup[1:]))", "title": "" }, { "docid": "f7ac894e7a326bd3169f678c61670dfe", "score": "0.63911766", "text": "def _to_list(x):\n if isinstance(x, (list, tuple)):\n return list(x)\n return [x]", "title": "" }, { "docid": "f7ac894e7a326bd3169f678c61670dfe", "score": "0.63911766", "text": "def _to_list(x):\n if isinstance(x, (list, tuple)):\n return list(x)\n return [x]", "title": "" }, { "docid": "3edeb025bafbe30d3ebfdc428f98c622", "score": "0.6386835", "text": "def convert_tuple_to_list(data):\r\n new_data = list()\r\n for i in range(len(data)):\r\n new_data.append(list())\r\n for j in range(len(data[i])):\r\n new_data[i].append(data[i][j])\r\n return new_data", "title": "" }, { "docid": "ffd3fad674c9ae2cfcb0bc4070a583b7", "score": "0.6329196", "text": "def pairs(items: \"list[T]\") -> \"list[(T, T)]\":\n return [(items[i], items[i+1]) for i in range(len(items)-1)]", "title": "" }, { "docid": "539dd493221e49da8511e8d0bc3f74a3", "score": "0.6234064", "text": "def tuples_to_lists(L):\n out = []\n W = len(L[0])\n for w in range(W):\n out.append( [i[w] for i in L] )\n return out", "title": "" }, { "docid": "20dcf259a7349b8ce2f020a659ef2c22", "score": "0.6207867", "text": "def basic_listifier(item):\n if type(item) == tuple or type(item) == list:\n final = [x.replace(',','') for x in item]\n return final\n elif type(item) == str:\n final = [x.replace(',','') for x in item.split(' ')]\n return final", "title": "" }, { "docid": "59b02617e2faa94987b518c8481f58b0", "score": "0.6199563", "text": "def convert_array(a):\n try:\n return tuple(convert_array(i) for i in a)\n except TypeError:\n return a", "title": "" }, { "docid": "feeed78f30241028e531f2996efa6ffe", "score": "0.6168318", "text": "def test_list_of_tuples(self):\n test = [(1, 2), (1.5, 3.2)]\n incoming = self.oc.roundtrip(test)\n assert isinstance(incoming, Cell)\n incoming = incoming.squeeze()\n assert incoming[0].squeeze().tolist() == list(test[0])\n assert incoming[1].squeeze().tolist() == list(test[1])", "title": "" }, { "docid": "3ae8b9eb190885f583bcb9b03f80dbab", "score": "0.6151118", "text": "def convert(list_of_tuples, zero): #\n out = []\n for i in list_of_tuples:\n out.append(plus_tuple(i, zero))\n return out", "title": "" }, { "docid": "fcfb12ca3ab6f8c269327f017d757489", "score": "0.61322576", "text": "def conv_tuple(rect):\n return (tuple(rect[0]), tuple(rect[1]))", "title": "" }, { "docid": "a0534bf44438e9bd42d44970be282154", "score": "0.61255866", "text": "def parse_conversion_tuples(self):\n return []", "title": "" }, { "docid": "7b88b02e7674a50ba173656dc11eab6d", "score": "0.61137646", "text": "def make_tuple(value, convert_none=False):\n if isinstance(value, list):\n return tuple(value)\n if not isinstance(value, tuple) and (convert_none or value is not None):\n return (value,)\n return value", "title": "" }, { "docid": "c3aee705474a1a2a241f8a19a9041a81", "score": "0.6101101", "text": "def to_list_of_tuple(self, list_of_tuple, query, n):\n value = self.query(query)\n value = value[0]\n rank = list(value)\n rank.append(n)\n value = tuple(rank)\n list_of_tuple.append(value)\n\n return list_of_tuple", "title": "" }, { "docid": "0f5f060c80f73bb47fe8de520c2b2bc0", "score": "0.6093945", "text": "def flatten(l):\n return [x[0] for x in l]", "title": "" }, { "docid": "f2aa6db1d1ccf4879cd97339214eb91c", "score": "0.6071524", "text": "def to_points(tuples):\n return [element for tupl in tuples for element in tupl]", "title": "" }, { "docid": "ecc6dd47aeeeee308336c75660775638", "score": "0.60607135", "text": "def _make_list(item) -> list:\n if isinstance(item, (list, tuple)):\n pass\n else:\n item = [item]\n return item", "title": "" }, { "docid": "a7f6fd833364ab19c8ce605e8028a037", "score": "0.60593754", "text": "def convert_stops_to_tuples(stops_list):\n\n\tmap_func = lambda x: (x['tag'], {'title':x['title'], 'lat':x['lat'], 'lon':x['lon'], 'merged':x['merged']} )\n\n\treturn list(map(map_func, stops_list))", "title": "" }, { "docid": "3066376765a5819a3e3c22218ef3b7ea", "score": "0.6032929", "text": "def as_tuple(x):\n return tuple(x) if hasattr(x, '__iter__') else (x,)", "title": "" }, { "docid": "5df1b7d873b9e068b199edc1d74eaea7", "score": "0.60108024", "text": "def _to_tuple(t: Tensor) -> Tuple:\n return _nested_tuple(t.tolist())", "title": "" }, { "docid": "0bcb1000bd3fe7f14d781b775c7412d7", "score": "0.60064316", "text": "def ApparieListe2(lst1):\n if isinstance(lst1, list) and len(lst1) > 2:\n Res = [[lst1[0], num] for num in lst1[1:]]\n if len(lst1[1:]) > 2:\n Res.extend(ApparieListe2(lst1[1:]))\n return Res\n elif len(lst1[1:]) == 2:\n Res.append([lst1[1], lst1[2]])\n return Res\n else:\n return Res\n elif len(lst1) == 2:\n return [[lst1[0], lst1[1]]]\n else:\n return lst1", "title": "" }, { "docid": "1e16ee56875d0ccb4fc2509544dfeeb8", "score": "0.5970122", "text": "def list_to_python(value, item_type=None):\n return [value_to_python(item, item_type=item_type) for item in value]", "title": "" }, { "docid": "16177047129525f020b2ff76ce682385", "score": "0.59632695", "text": "def map_listcs(item):\n fields = item.split()\n\n fields = [x.split(\"=\", 1)[-1] for x in fields]\n\n return tuple( fields )", "title": "" }, { "docid": "09cc8c0d6aba050778e8edf43267fd17", "score": "0.5942752", "text": "def make_tuple(x):\n return (x,) if not (isinstance(x, tuple) or isinstance(x, list)) else x", "title": "" }, { "docid": "4c07c8f7e5bf53b2ec7ac25ad5df25af", "score": "0.5927832", "text": "def get_pairs_from_list(lst):\n tuples = []\n for i in range(0,len(lst)):\n for j in range(0,len(lst)):\n if i != j:\n tuples.append((lst[i], lst[j]))\n return tuples", "title": "" }, { "docid": "b42700978ccf8d1501e699d95845fb36", "score": "0.59274024", "text": "def f01_15_listConvert2d1d(inputList):\n outputList = []\n for i in inputList:\n for j in i:\n outputList.append(j)\n return outputList", "title": "" }, { "docid": "fb26e8a27fda9aac0761fadadba2f438", "score": "0.59000856", "text": "def cl_to_tuples(cell_list):\n\n list_of_tuples = list()\n num_cols = max([x.col for x in cell_list])\n for i, cell in enumerate(cell_list):\n col = i % num_cols\n if not col:\n a_tuple = tuple([x.value for x in cell_list[i:i+num_cols]])\n list_of_tuples.append(a_tuple)\n return list_of_tuples", "title": "" }, { "docid": "c58e25d0c3597671265f09627b0a7dee", "score": "0.5896375", "text": "def convert_to_tuple_points(lst1: list, lst2: list) -> list:\r\n new_lst = []\r\n assert len(lst1) == len(lst2)\r\n for i in range(0, len(lst1)):\r\n new_lst.append((lst1[i], lst2[i]))\r\n return new_lst", "title": "" }, { "docid": "75d6a103111e1b8887cd5ed98f54bc22", "score": "0.58957404", "text": "def lst2Tpl(lst):\r\n return tuple(lst)", "title": "" }, { "docid": "e8da85360f49563ede12172c42ba4f08", "score": "0.58902806", "text": "def points_to_tuple(*args):\n return tuple(args[i][j] for i in range(len(args)) for j in range(len(args[i])))", "title": "" }, { "docid": "a2a5bd2c3a040449fd82a691b44b1707", "score": "0.5885643", "text": "def tl_list_to_py(lst: TlList) -> list:\n return [to_py_type(v) for v in lst]", "title": "" }, { "docid": "2272e03f7ee6e674e01fc9266bc9a5b5", "score": "0.5880092", "text": "def list_of_lists(list_of_tuples):\n list_of_lists = []\n for tup in list_of_tuples:\n list_of_lists.append(list(tup))\n\n\n return list_of_lists", "title": "" }, { "docid": "89602d2a063c03eb8906530916a8cf6b", "score": "0.5878989", "text": "def flatten(args):\n return tuple(args[i][j] for i in range(len(args)) for j in range(len(args[i])))", "title": "" }, { "docid": "77deb4b61e8f2bc418b9e0bdfaa01b52", "score": "0.58757085", "text": "def make_list(x):\n if isinstance(x, list):\n return x\n elif isinstance(x, tuple):\n return list(x)\n else:\n return [x]", "title": "" }, { "docid": "f3ba9e776bbcf8752d0fcb6565043745", "score": "0.58539164", "text": "def listify(s):\n if isinstance(s, list):\n return s\n elif isinstance(s, tuple):\n return list(s)\n else:\n return [s]", "title": "" }, { "docid": "bf3fa6d0e48b3d256425a330695774d7", "score": "0.58497393", "text": "def removeNesting(nested_lst):\n \n new_lst = []\n for tuple_ in nested_lst:\n new_lst.append(tuple_[0])\n return new_lst", "title": "" }, { "docid": "8bb2990efd2e82902386c415eceec405", "score": "0.584293", "text": "def unlist(_list):\n if len(_list) == 1:\n return _list[0]\n else:\n return _list", "title": "" }, { "docid": "e6315a5a848ceb184725b551778871c7", "score": "0.5836541", "text": "def to_list(*args):\n return args", "title": "" }, { "docid": "1fe36c36b4c2c680a0c199d70d24a89f", "score": "0.5803557", "text": "def candidatelist(list1,tt):\n list2=[]\n if isinstance(list1, tuple):\n for i in range(len(list1[0])):\n list2.append(move(list1[0][0],tt))\n else:\n for i in range(len(list1)):\n list2.append(move(list1,tt))\n return list2", "title": "" }, { "docid": "48f2a66a14d8e5544d4fdbfdc6c2087e", "score": "0.5801643", "text": "def list_2dimension_convert(self, lst):\r\n self.matrix_value = []\r\n for sub_list in lst:\r\n self.matrix_value.append(sub_list)", "title": "" }, { "docid": "28fc8d4016a069a641667dd4cd28b4d6", "score": "0.5798221", "text": "def to_tuple(list_in):\n #print( 'im in to_tuple {var1}'.format(var1=list_in))\n result = tuple(list_in)\n #print( '{var1}'.format(var1=str(result)))\n if len(result) == 0:\n result = \"in ('0')\"\n elif len(result) > 1:\n result = \"in {ids}\".format(ids=result)\n else:\n result = \"= '{ids}'\".format(ids = result[0])\n return result", "title": "" }, { "docid": "453a1844d83a65086af9bbcf5ff36d76", "score": "0.5785629", "text": "def list_of_lists(list_of_tups):\n\n list_of_lsts = []\n for tup in list_of_tups:\n list_of_lsts.append(list(tup))\n return list_of_lsts", "title": "" }, { "docid": "77aad638d9831ba57c3f680df324e3a0", "score": "0.5782874", "text": "def flatten(l):\n r = []\n for item in l:\n if isinstance(item, list) or isinstance(item, tuple):\n r.extend(flatten(item))\n else: r.append(item)\n return r", "title": "" }, { "docid": "d400f035a0aea053658e4dd8c5a21770", "score": "0.5777385", "text": "def to_list(self):\n return list(astuple(self))", "title": "" }, { "docid": "4129d1473c8db6aa3037c71a2f994ac5", "score": "0.5776782", "text": "def to_list(item) -> list:\r\n return item if isinstance(item, list) else [item]", "title": "" }, { "docid": "cb8db95832606e8c122ba3927b170c2b", "score": "0.57569146", "text": "def _field_to_tuple(field):\r\n if isinstance(field, (tuple, list)):\r\n return (field[0], field[1])\r\n return (field, None)", "title": "" }, { "docid": "c303ece0af3e7a18595397bbd0ecbac4", "score": "0.57521486", "text": "def every_second_element(list_arg):\n\n list_arg = [list_arg[i] for i in range(1, len(list_arg), 2)]\n return list_arg", "title": "" }, { "docid": "81a0933687c36991ed652e14ccc69eb4", "score": "0.57325816", "text": "def flattened(lst):", "title": "" }, { "docid": "59ac5aab77960c463e3fb0b512759feb", "score": "0.57310694", "text": "def adapt_features_list(self, features_list):\n\t\tresult = []\n\t\tfor features in features_list:\n\t\t\tfeatures = list(features)\n\t\t\tfeatures[1] = \"\"\n\t\t\tfeatures = tuple(features)\n\t\t\tresult.append(features)\n\t\treturn result", "title": "" }, { "docid": "eb1e679c94fc7cd9c5a96400ac8f3f8a", "score": "0.5723822", "text": "def flatten(*args):\n\n x = args[0] if len( args ) == 1 else args\n\n result = []\n for el in x:\n #if isinstance(el, (list, tuple)):\n if IsSeq( el ): result.extend(flatten(el))\n else: result.append(el)\n return tuple( result )", "title": "" }, { "docid": "ae59c8b341b1c6826dbe950cde61a2a0", "score": "0.5720001", "text": "def merge_lists(list_result):\n return tuple(map(list, zip(list_result[0], list_result[1])))", "title": "" }, { "docid": "01200c7e3de44994bb449b9408818a78", "score": "0.5715183", "text": "def flatten_list(list):\r\n return [i for t in [v for v in list] for i in t]", "title": "" }, { "docid": "9d64f068d71e98b8dea538fac18b20fa", "score": "0.5709758", "text": "def make_list(iterable):\n if isinstance(iterable, (list, tuple)):\n return iterable\n return list(iterable)", "title": "" }, { "docid": "9a1b87f5a905ed9c89ee80a2e28072d9", "score": "0.570709", "text": "def choicify(values: Iterable[Any]) -> List[Tuple[Any, Any]]:\n return [(v, v) for v in values]", "title": "" }, { "docid": "4eed8cedcc8773f6ae90bcdbca9380af", "score": "0.5676634", "text": "def as_flattened_list(iterable):\n return [e for i in iterable for e in i]", "title": "" }, { "docid": "90ac8e3df57cbe1741c2b6f3426150b7", "score": "0.5675875", "text": "def _type_repacking(return_type, converted_list):\n if return_type == \"list\":\n return list(converted_list)\n elif return_type == \"tuple\":\n return tuple(converted_list)\n elif return_type == \"set\":\n return set(converted_list)", "title": "" }, { "docid": "720f467dd3ee53f9f089e85d20ca8d3e", "score": "0.56700283", "text": "def list_collate(batch):\n items = list(zip(*batch))\n\n for i in range(len(items)):\n if isinstance(items[i][0], (list, tuple)):\n items[i] = list(items[i])\n else:\n items[i] = default_collate(items[i])\n\n return items", "title": "" }, { "docid": "812fc3fc985ad9d9a33cdc6b20e1e44e", "score": "0.566689", "text": "def _convert_list(tokens):\n token = tokens.pop(0)\n if token == '(':\n a_list = []\n while tokens[0] != ')':\n a_list.append(ListParser._convert_list(tokens))\n tokens.pop(0) # pop off ')'\n return a_list\n return token", "title": "" }, { "docid": "39389ff424daaffd59810cb508e34ff7", "score": "0.56622726", "text": "def easy_unpack(elements: tuple) -> tuple:\n # your code here\n return elements[0], elements[2], elements[-2]", "title": "" }, { "docid": "3578850791ade5a675ff86de5d4a746b", "score": "0.5661572", "text": "def flatten(l):\n return (val for sublist in l for val in sublist)", "title": "" }, { "docid": "6a3285e4608f4017490bbcbd7750a374", "score": "0.56583166", "text": "def flatten(x):\n if not isinstance(x, (list, tuple)):\n return x # skip non-interables, including None\n y = []\n for e in x:\n if isinstance(e, (list, tuple)):\n y.extend(flatten(e))\n else:\n y.append(e)\n return y", "title": "" }, { "docid": "eca1625254f8a6aed7cf833dead17906", "score": "0.5654189", "text": "def tuplize(*items):\n return tuple(item for item in items if item is not None)", "title": "" }, { "docid": "f20cf06d9020b2f4dbb7af13fa6510fe", "score": "0.5649967", "text": "def lists_to_tuples(*args):\n L = []\n for i in zip(*args):\n L.append(i)\n return L", "title": "" }, { "docid": "3beba2516eaec587f99d9db2e51a5d20", "score": "0.5639606", "text": "def convertTuple(tup):\n string = ','.join(tup)\n return string", "title": "" }, { "docid": "a9f6fd787fe27b0b45f3ff779a75f853", "score": "0.5620126", "text": "def conv_position_2_list(position: Position):\n\n return [position.x, position.y]", "title": "" }, { "docid": "a9ef22c51a9529313cc13b1ec27a045a", "score": "0.56189394", "text": "def flatten_list(elem):\n if not elem:\n return []\n elif isinstance(elem, tuple):\n return [elem]\n elif isinstance(elem, list):\n combined = []\n for element in elem:\n flat = flatten_list(element)\n if flat:\n combined = combined + flat\n return combined\n else:\n print(type(elem))\n print(elem)", "title": "" }, { "docid": "c2179d400a2ff1a0663d614b1252769a", "score": "0.56131333", "text": "def unflatten_tuple(space: Tuple, x: np.ndarray) -> typing.Tuple[np.ndarray, ...]:\n dims = np.asarray([flatdim(s) for s in space.spaces], dtype=np.int_)\n list_flattened = np.split(x, np.cumsum(dims[:-1]))\n return tuple(unflatten(s, flattened) for flattened, s in zip(list_flattened, space.spaces))", "title": "" }, { "docid": "e616091d2d419bebbd31be4876f8eaff", "score": "0.5600429", "text": "def to_list(lst: List) -> tp.List:\n return [] if null(lst) else [head(lst)] + to_list(tail(lst))", "title": "" }, { "docid": "ff0a350e0bf13d8023bb88cd4c7b978a", "score": "0.5593872", "text": "def picking(items: Iterable[T]) -> Iterable[tuple[T, list[T]]]:\n items = list(items)\n for k, item in enumerate(items):\n yield item, items[:k]+items[k+1:]", "title": "" }, { "docid": "beb0a6fbd82a1df201a8342866cfa459", "score": "0.559282", "text": "def to_tuple(self) -> Tuple[int, int, int]:\n return self._transition_list[0].to_tuple()", "title": "" }, { "docid": "bc5e10906ef05f4e1e9e897131c6fab7", "score": "0.5590134", "text": "def list_flatten(data):\n flat = []\n for sub_list in data:\n if isinstance(sub_list, (list, tuple)):\n flat.extend(sub_list)\n else:\n flat.append(sub_list)\n return flat", "title": "" }, { "docid": "35859180c4928130ad719cfce098a6c9", "score": "0.55883163", "text": "def format_list(inp):\n formatted_list = []\n for value in inp:\n formatted_list.append(value.split(','))\n\n formatted_list = [(int(x[0]), int(x[1])) for x in formatted_list]\n return formatted_list", "title": "" }, { "docid": "b5f26994c3ab375952b1817a77fc2364", "score": "0.5582328", "text": "def _tomutable(value: Union[str, Tuple]) -> List:\n if isinstance(value, tuple):\n # convert immutable sequence into a list so it can be popped out\n value = list(value)\n elif isinstance(value, str):\n # convert immutable sequence into a list so it can be popped out\n if value.startswith('0b'):\n value = list(value[2:])\n else:\n value = list(value)\n return value", "title": "" }, { "docid": "652f346123db1d724fc5d81f6b7c4fd7", "score": "0.55729866", "text": "def convert_connections_to_tuples(connections_list):\n\n\tmap_func = lambda x: (x['from'], x['to'], {'routes':x['routes'], 'length': x['length']} )\n\n\treturn list(map(map_func, connections_list))", "title": "" }, { "docid": "d9d0859ecb4df02aea0a1d5a64ef71f9", "score": "0.55674154", "text": "def tolist(array) -> list:\n\tpass", "title": "" }, { "docid": "66ae0e586b65b82d2ac09f0e941ca60e", "score": "0.5563033", "text": "def to_list(data):\n if not isinstance(data, list):\n if isinstance(data, (tuple, set)):\n data = list(data)\n else:\n data = [data]\n return data", "title": "" }, { "docid": "291cf51330e29784a83f07d3cd188926", "score": "0.55456096", "text": "def flatten_nested_lists_to_list(inp):\n return list(flatten_nested_lists_to_generator(inp))", "title": "" }, { "docid": "64c37ddffedc01c822daf2556307ca6f", "score": "0.5542808", "text": "def adj_convert(self,adj):\n adj_list = []\n for i in range(adj.shape[0]):\n for j in range(adj.shape[0]):\n if i<j:\n l = tuple([i]+[j]+[adj[i][j]])\n adj_list.append(l)\n return adj_list", "title": "" }, { "docid": "31bb2763a02543349d2e9c12579fac49", "score": "0.554251", "text": "def listify_value(arg, split=None):\n out = []\n\n if not isinstance(arg, (list, tuple)):\n arg = [arg]\n\n for val in arg:\n if val is None:\n continue\n if isinstance(val, (list, tuple)):\n out.extend(listify_value(val, split=split))\n continue\n out.extend(s.strip() for s in str(val).split(split))\n assert all(isinstance(val, str) for val in out)\n return out", "title": "" }, { "docid": "73369dc48488006598c119d78179e6ef", "score": "0.5529919", "text": "def as_tuple(x):\n if isinstance(x, np.ndarray):\n return tuple(x.flat)\n elif hasattr(x, '__iter__'):\n return tuple(x)\n else:\n return (x,)", "title": "" }, { "docid": "5574dcb5260768c38c182790b3bfad8c", "score": "0.55242825", "text": "def _make_tuple(loader, node):\n tuple_string = loader.construct_scalar(node)\n list_string = \"[\" + tuple_string[1:-1] + \"]\"\n parsed_list = yaml.load(list_string, Loader=yaml.Loader)\n return tuple(parsed_list)", "title": "" }, { "docid": "291ea04c63eb48c86176cc598dd034d4", "score": "0.5521635", "text": "def to_struct(self, value):\n result = []\n for item in value:\n result.append(item.to_struct())\n return result", "title": "" } ]
3058d15b600c04c5550977cf6df2908e
coins is a list of the values of the coins in your possession
[ { "docid": "bc85cbe1656ecb56861d4ce0ad7b503a", "score": "0.6165489", "text": "def makeChange(coins, total):\n\n if total <= 0:\n return 0\n\n if coins is None or coins == []:\n return -1\n\n coins = sorted(coins, reverse=True)\n cont_coins = 0\n\n for coin in coins:\n if coin <= total:\n cont_coins += int(total / coin)\n total = total % coin\n\n if total == 0:\n return cont_coins\n\n return -1", "title": "" } ]
[ { "docid": "9d89ece5b00e635df5659787b3410db5", "score": "0.7158293", "text": "async def coins(self, ctx: BBContext):\n pass", "title": "" }, { "docid": "8eabe22d656c02c186b671b60b5e40f6", "score": "0.7005791", "text": "def _get_coins(self, num_coins):\n min_requestable_pages = math.ceil(num_coins / 250)\n results_per_page = math.ceil(num_coins / min_requestable_pages)\n \n coins = []\n url = 'https://api.coingecko.com/api/v3/coins/markets?vs_currency=usd&order=market_cap_desc'\n \n # iterates through a paginated API response to get all coins\n for page_number in range(min_requestable_pages):\n response = requests.get(url + f'&per_page={results_per_page}&page={page_number+1}').json() \n if response:\n coins += response\n else:\n break\n \n # truncates excess coins from API call\n remainder = len(coins) - num_coins\n \n if remainder > 0:\n coins = coins[:-remainder]\n \n return coins", "title": "" }, { "docid": "ae8c45312e6e574440558afc83ff6b33", "score": "0.70041794", "text": "def return_coins(user_money : int, coin_list : list) -> list:\n user_coins = []\n for coin in coin_list:\n while(user_money >= coin):\n user_coins.append(coin)\n user_money -= coin\n return user_coins", "title": "" }, { "docid": "40a36488ee6e17f21058c8918fc7265e", "score": "0.6959665", "text": "def get_coins(self):\n url_path = 'getcoins'\n return self._shapeshift_get_request(url_path)", "title": "" }, { "docid": "46eb5dc908e49045e54039b9a491d1db", "score": "0.69119585", "text": "def coins(self, coins):\n\n self._coins = coins", "title": "" }, { "docid": "9c44279a278089377aec3cef69615a48", "score": "0.6674714", "text": "def coins(num_coins):\n\n def _coins(num_coins, total, sums):\n # base case - if we have no more coins left, take our total and add to list\n if num_coins == 0:\n sums.append(total) \n return sums # ex. [12]\n \n else:\n p = _coins(num_coins-1, total+1, sums)\n d = _coins(num_coins-1, total+10, sums)\n \n # now we have 2 lists we need to merge, but sort first\n return sorted(p + d) # ex. [3] + [12] -> [3, 12]\n \n # the final return from _set() will be a sorted list of all possible sums, so remove dups with a set\n return set(_coins(num_coins, 0, sums=[]))", "title": "" }, { "docid": "cc296d9ae9f67aee43253ca5c231687a", "score": "0.6552558", "text": "def calculate_btc_value_portfolio(coins,portfolio):\n balances = Exchange.balances()\n btc_value = 0\n for coin in coins:\n market = portfolio[coin]\n price = Exchange.market_orderbook(market,1,\"buy\")[\"data\"][\"buyorders\"][0][\"price\"]\n amount = balances[\"data\"][\"available\"][\"{}\".format(coin)]\n btc_value += price*amount\n return btc_value", "title": "" }, { "docid": "6639a952d9faed365427360e2c979c1f", "score": "0.6519559", "text": "def coins_summary(coins):\r\n summary = {head : 0, tail : 0}\r\n for coin in coins: \r\n summary[coin] += 1 \r\n return summary", "title": "" }, { "docid": "0995ba5e562e5014408f58f2ae3a6242", "score": "0.6248133", "text": "def cg(amount, coins):\n\tif amount == 0: \n\t\treturn 0, []\n\n\n\n\t#take first coin\n\tif not coins or amount < coins[0]:\n\t\treturn float(\"inf\"), []\n\n\tfirst = coins[0]\n\tv1, ans1 = cg(amount-first, coins)\n\n\n\t#don't take it\n\trest = coins[1:]\n\tv2, ans2 = cg(amount, coins[1:])\n\n\treturn (1+v1, [first] + ans1) if (v1 + 1) < v2 else (v2, ans2)", "title": "" }, { "docid": "8e635e5f2ddab4ba10eb92627c443825", "score": "0.61233157", "text": "def reset_coins(self):\n game_coins = self.added_coins\n level_coins = self.game.get_coins()\n coin_items = self.added_coin_items\n # Check if the coin of the level is not in the list of coins of the game\n for coin in level_coins:\n if coin not in game_coins:\n for coin_item in coin_items:\n if coin_item.get_coin() == coin:\n # Add the coin back to the list of coins and scene\n location = coin.get_location()\n square = self.game.get_square(location)\n square.set_coin()\n self.scene.addItem(coin_item)\n self.added_coins.append(coin)", "title": "" }, { "docid": "22e10f42ddd8dfa5eb42e0ead425c039", "score": "0.6069606", "text": "def pmf_coin():\n rvs,ps = [0,1],[0.5,0.5]\n return pmf_list(rvs,ps)", "title": "" }, { "docid": "8218b4ab0aa207657f4fc3a73064e3ad", "score": "0.6060353", "text": "def coins_inserted(*args):\n\n print('coin inserted')\n if CoinAcceptor.pulses == 3:\n print('5 Cent')\n shared_values.SharedValues.FIAT += 0.05\n if CoinAcceptor.pulses == 4:\n print('10 Cent')\n shared_values.SharedValues.FIAT += 0.10\n if CoinAcceptor.pulses == 5:\n print('20 Cent')\n shared_values.SharedValues.FIAT += 0.20\n if CoinAcceptor.pulses == 6:\n print('50 Cent')\n shared_values.SharedValues.FIAT += 0.50\n\n # the number of pulses has to be set to zero to match the next incoming number of pulses\n CoinAcceptor.pulses = 0", "title": "" }, { "docid": "92cc489788420c2bd4710117f6e16279", "score": "0.6009007", "text": "def coinChange(coins,amount):\n table = [math.inf]*(amount+1) # by defualt min no. of coins required is infinite\n table[0] = 0\n \n for i in range(1,amount+1):\n for c in coins:\n if i-c>=0:\n table[i] = min(table[i],table[i-c]+1)\n \n if(table[amount]==math.inf):\n return -1\n else:\n return table[amount]", "title": "" }, { "docid": "5263e4549e32abc654364f980ce11b0a", "score": "0.5997778", "text": "def coinChange(coins: List[int], amount: int) -> int:\n dp:List[int] = [float('inf') for j in range(amount + 1)]\n dp[0] = 0\n \n for c in coins:\n for i in range(c, amount + 1):\n dp[i] = min(dp[i], dp[i - c] + 1)\n \n if dp[-1] == float('inf'): return -1\n else: return dp[-1]", "title": "" }, { "docid": "5574162717890cbc8a9e95b333fa068c", "score": "0.59813523", "text": "def run() -> None:\n print(\"How much money is to be converted into coins?\")\n user_input = int(input())\n coin_list = [25, 10, 5, 2, 1]\n user_coins = return_coins(user_input, coin_list)\n print(user_coins)", "title": "" }, { "docid": "387e86001b7331eacd2617b7f5fa53d0", "score": "0.5959527", "text": "def coin_combinations(total_pennies: int, coins: list[int]) -> list[list[int]]:\n if total_pennies == 0:\n return [[]]\n\n combinations: list[list[int]] = []\n for coin in coins:\n if coin <= total_pennies:\n lower_coins = COINS[COINS.index(coin) :] # Coins with same or lower denomination than coin\n combinations_of_rest = coin_combinations(total_pennies - coin, coins=lower_coins)\n combinations.extend([[coin, *combination] for combination in combinations_of_rest])\n\n return combinations", "title": "" }, { "docid": "b04b4121ba3f20320f769cdfb292f365", "score": "0.59512484", "text": "def find_min_coins(v, coins_available):\n\n # Write your code here!\n\n coins = []\n stack = list(coins_available)\n while v > 0:\n coin = stack.pop()\n num_coins = v // coin\n v -= (num_coins * coin)\n while num_coins > 0:\n coins.append(coin)\n num_coins -= 1\n\n return coins", "title": "" }, { "docid": "5cd224bda1fe7c9a40e98d67e6283594", "score": "0.59396565", "text": "def process_coins():\n print(\"Please insert coins.\")\n total = int(input(\"how many quarters?: \")) * 0.25\n total += int(input(\"how many dimes?: \")) * 0.1\n total += int(input(\"how many nickles?: \")) * 0.05\n total += int(input(\"how many pennies?: \")) * 0.01\n return total", "title": "" }, { "docid": "729bdd99232a5fa2c8aba2537f67edb9", "score": "0.59337294", "text": "def makeChange(coins, total):\n if total <= 0:\n return 0\n\n coins = sorted(coins, reverse=True)\n coinCount = 0\n\n for coin in coins:\n coinCount += int(total / coin)\n total = total % coin\n\n if total != 0:\n return -1\n\n return coinCount", "title": "" }, { "docid": "37498006bf85cfc6ecbbdc57db369794", "score": "0.5928823", "text": "def process_coins():\n print(\"please insert coins\")\n total = int(input(\"how many quarters:\\n ?\")) * 0.25\n total += int(input(\"how many dime:\\n ?\")) * 0.10\n total += int(input(\"how many nickles:\\n ?\")) * 0.5\n total += int(input(\"how many pennis:\\n ?\")) * 0.1\n return total", "title": "" }, { "docid": "2878df98623e02ec8467e674a420d110", "score": "0.59220266", "text": "def makeChange(coins, total):\n if total > 0:\n checked = [True]\n checked.extend(False for _ in range(total))\n n_coins = 0\n queue = [0]\n while queue:\n n_coins += 1\n level = []\n for value in queue:\n for coin in coins:\n if value + coin == total:\n return n_coins\n if value + coin >= total:\n continue\n if not checked[value + coin]:\n checked[value + coin] = True\n level.append(value + coin)\n queue = level\n return -1\n return 0", "title": "" }, { "docid": "b74f4e9091b2fb5b8e24fe31d12bf405", "score": "0.5897598", "text": "def makeChange(coins, total):\n if total < 1:\n return 0\n min_arr = [float(\"inf\")] * (total + 1)\n min_arr[0] = 0\n\n for i in range(1, len(min_arr)):\n for c in coins:\n if i >= c:\n min_arr[i] = min(min_arr[i], min_arr[i - c] + 1)\n return min_arr[total] if min_arr[total] != float(\"inf\") else -1", "title": "" }, { "docid": "ca2283d9e6a84c2020c49284f6042125", "score": "0.58632165", "text": "def coin_change(amount: int, coins: List[int]) -> int:\n # table of ways\n ways = [0]*(amount+1)\n ways[0] = 1\n for coin in coins:\n for target in range(1, amount+1):\n if coin <= target:\n ways[target] = ways[target] + ways[target - coin]\n\n return ways[amount]", "title": "" }, { "docid": "9c6c42af8028cb5a8128fca2ac1424c5", "score": "0.5822389", "text": "def update_bitcoins(self):\n bitcoin = self.poloniex.returnTicker()[self.bitcoin_key]\n self.bitcoin_USD_ask = float(bitcoin[self.ask_key])\n self.bitcoin_USD_bid = float(bitcoin[self.bid_key])\n self.bitcoin_USD_lasttrade = float(bitcoin[self.last_key])", "title": "" }, { "docid": "d48d61fac1760f3b8f1af2d52db721df", "score": "0.5728908", "text": "def get_coins(filename):\n with open(filename, 'r') as f:\n return [name.rstrip() for name in f]", "title": "" }, { "docid": "7cbdcf982455a01a0ed6413eddae6452", "score": "0.5711024", "text": "def print_summary(coins):\r\n summary = coins_summary(coins)\r\n strcoins = coins_to_str(coins)\r\n print('There are {} heads and {} tails: {}'.format(summary[head], summary[tail], strcoins))", "title": "" }, { "docid": "3eda7d4f9563e7980c5a01b03f3a78b4", "score": "0.570589", "text": "def flip_coins(n):\r\n coins = []\r\n for _i in range(n): \r\n coins.append(random.choice(coin_sides))\r\n return coins", "title": "" }, { "docid": "68ff15de56e53eeb5210bd8039f18c8d", "score": "0.57003486", "text": "def coins_to_str(coins):\r\n symbols = {head : 'o', tail : 'x'} \r\n expression = \"\"\r\n for coin in coins:\r\n expression += symbols[coin] + \" \"\r\n return expression", "title": "" }, { "docid": "51554336a0fd7f67f1ece42f7de77fce", "score": "0.5696409", "text": "def coin_combinations(target, coins):\n remainder = coins[1:]\n total = 0\n for subtotal in xrange(0, target+1, coins[0]):\n if len(remainder) > 1:\n total += coin_combinations(target-subtotal, remainder)\n else:\n # only 1p coins left\n total += 1\n return total", "title": "" }, { "docid": "1dbd91e01ced288bd8d55e08caa929be", "score": "0.56626093", "text": "def choices_from_coin_bundles():\n return [(str(bundle.get(\"coins\")), bundle.get(\"label\")) for bundle in COIN_BUNDLES]", "title": "" }, { "docid": "b64ed8bb0667eb1fe68ccbea01e74c9d", "score": "0.5661621", "text": "def pay(self, guzis):\n for guzi in guzis:\n self.balance.income.append(guzi)", "title": "" }, { "docid": "66642a46ee9616c34fe5c90a6d42db56", "score": "0.5659148", "text": "def __load_data(self, file) -> list:\n data = DatabaseDAL.load_data(file)\n return data[\"coins\"]", "title": "" }, { "docid": "d4bb6f86e0678642d438f1152d2000dd", "score": "0.5658031", "text": "def makeChange(coins, total):\n if total <= 0:\n return 0\n coins = sorted(coins)\n change = [total + 1] * (total + 1)\n change[0] = 0\n for i in range(total + 1):\n for coin in coins:\n if coin <= i:\n change[i] = min(change[i], 1 + change[i - coin])\n\n return change[total] if change[total] < total else -1", "title": "" }, { "docid": "6908cca7a0ab807a44f69d87ca526ef5", "score": "0.564908", "text": "def insert_coin(self, coin):\r\n\r\n if coin in self.accepted_coins:\r\n self.deposit.append(coin)\r\n else:\r\n print(\"Coin type not accepted\")\r\n return coin", "title": "" }, { "docid": "43dcd259e48bb0bba9cf780e2ccbd615", "score": "0.5643118", "text": "def change(amount, coins):\n\tassert amount >= 0 and 1 in coins, \"Does not satisfy preconditions for fn 'change'.\"\n\treturn cg(amount, sorted(coins))", "title": "" }, { "docid": "87ebb915d330d8f721a9a05b8b79cd27", "score": "0.56339186", "text": "def getCoinSupply(self):\n return self.call(\"getcoinsupply\")", "title": "" }, { "docid": "8967af963dc52a9176bbb6674b5f3b1e", "score": "0.5613981", "text": "def main():\n while True:\n change = float(input(\"Change owed: \"))\n if change >= 0:\n break\n\n change *= 100\n quarters = int(change / 25)\n dimes = int((change % 25) / 10)\n nickels = int(((change % 25) % 10) / 5)\n pennies = int((((change % 25) % 10) % 5))\n\n coins = quarters + dimes + nickels + pennies\n print(coins)", "title": "" }, { "docid": "e4a2740f68fe9a5daee4e7573f8711b6", "score": "0.56066114", "text": "def get_coin_list():\n\n return pd.DataFrame(\n client.get_coins_list(),\n columns=[COLUMNS[\"id\"], COLUMNS[\"symbol\"], COLUMNS[\"name\"]],\n ).reset_index()", "title": "" }, { "docid": "e0aea424b9d0b7e7f5ecc940bd064881", "score": "0.55982727", "text": "def updateBoardCoins():\n list_of_coins = []\n # capture the video with a VideoCapture object\n # argument 0 usually selects your laptop integrated webcam, other number (1,2,3.. try each!) grabs other cams\n num_caps = 80\n frames = []\n cap = cv2.VideoCapture(-1)\n time.sleep(2)\n\n for k in range(num_caps):\n ret, frame = cap.read()\n if not ret:\n break\n else:\n if k > 20:\n frames.append(frame)\n\n for frame in frames:\n coins = []\n if ret == True:\n cimg = frame # get the image as the coloured image\n img = cv2.cvtColor(cimg, cv2.COLOR_BGR2GRAY) # convert to grayscale\n # run a filter over it to remove salt and pepper noise\n img = cv2.medianBlur(img, 9)\n # Hough Transform, you will probably need to play with the parameters here\n circles = cv2.HoughCircles(\n img, cv2.HOUGH_GRADIENT, 1.9, 10, param1=50, param2=30, minRadius=8, maxRadius=12)\n if (circles is not None):\n for i in circles[0, :]:\n coin = Coin() # initialize the coin class\n if isWhite(img, i): # coin is white in colour\n # blue colour center marker for our coin\n cv2.circle(cimg, (i[0], i[1]), 2, (255, 0, 0), 3)\n # get the pixel value of the center of the coin\n pixel_x_cam = i[0]-320 - offset_pixel_x\n pixel_y_cam = -(i[1]-240) - offset_pixel_y\n # compute the position in mm\n X = pixel_x_cam/focus_x*Depth\n Y = pixel_y_cam/focus_y*Depth\n Xunrot = X\n X = np.cos(theta)*Xunrot - np.sin(theta)*Y\n Y = np.sin(theta)*Xunrot + np.cos(theta)*Y\n if(isCue(X, Y)):\n coin.set_position(X, Y)\n coin.set_identity(3)\n coins.append(coin)\n elif(withinBoardBoundary(X, Y)):\n coin.set_position(X, Y)\n coin.set_identity(1)\n coins.append(coin)\n else:\n pass\n elif isBlack(img, i): # coin is black in colour\n # red colour center marker for black (opponent)\n cv2.circle(cimg, (i[0], i[1]), 2, (0, 0, 255), 3)\n # get the pixel value of the center of the coin\n pixel_x_cam = i[0]-640/2 - offset_pixel_x\n pixel_y_cam = -(i[1]-480/2) - offset_pixel_y\n # compute the position in mm\n X = pixel_x_cam/focus_x*Depth\n Y = pixel_y_cam/focus_y*Depth\n Xunrot = X\n X = np.cos(theta)*Xunrot - np.sin(theta)*Y\n Y = np.sin(theta)*Xunrot + np.cos(theta)*Y\n if(withinBoardBoundary(X, Y)):\n coin.set_position(X, Y)\n coin.set_identity(2)\n coins.append(coin)\n else:\n pass # circle not of interest\n\n list_of_coins.append(coins)\n numbers_of_coins = []\n for j in range(len(list_of_coins)):\n numbers_of_coins.append(len(list_of_coins[j]))\n print(numbers_of_coins)\n counts = np.bincount(numbers_of_coins)\n idx_max_freq = np.argmax(counts)\n cv2.destroyAllWindows()\n return list_of_coins[idx_max_freq]", "title": "" }, { "docid": "02d71dbb375a4fe7115eb341377aeb65", "score": "0.5575148", "text": "def change_coins3(M, c):\n d = len(c)\n lookup = [None] * (M+1)\n lookup[0] = 0\n # for m from 1 to M\n for m in range(1, M+1):\n lookup[m] = float(\"inf\")\n # for each of the coins we have\n for i in range(d):\n # if the coin is equal or smaller than the m we're changing\n if c[i] <= m:\n \"\"\"\n Suppose you need to make change for 77 cents and the only coin denominations \n available are 1, 3, and 7 cents. The best combination for 77 cents will\n be one (the minimum) of the following:\n • the best combination for 77 − 1 = 76 cents, plus a 1-cent coin;\n • the best combination for 77 − 3 = 74 cents, plus a 3-cent coin;\n • the best combination for 77 − 7 = 70 cents, plus a 7-cent coin.\n \"\"\"\n cnt1 = lookup[m-c[i]] + 1\n cnt2 = lookup[m]\n print(\"m: {}. cnt1: {}. cnt2: {}\".format(m, cnt1, cnt2))\n lookup[m] = min(cnt1, cnt2)\n # our answer\n return lookup[M]", "title": "" }, { "docid": "5737794aeed3d7c85f45e9e0d642149c", "score": "0.5565458", "text": "def test_coin_type_value(self):\n\n my_quarter = Quarter()\n my_nickel = Nickel()\n my_dime = Dime()\n my_penny = Penny()\n\n list_of_coins = []\n list_of_coins.append(my_quarter)\n list_of_coins.append(my_penny)\n list_of_coins.append(my_dime)\n list_of_coins.append(my_nickel)\n\n total_value = display_payment_value(list_of_coins)\n self.assertEqual(total_value, .41)", "title": "" }, { "docid": "669f53597436b47500f3faa7ce46ad12", "score": "0.55645484", "text": "def showCoins(self):\n\t\t#the private key is sent as form url encoded content\n\t\t#var formContent = new FormUrlEncodedContent(new[] { new KeyValuePair<string, string>(\"pk\", keys.privatekey), new KeyValuePair<string, string>(\"account\", keys.account) });\n\t\tjson = \"error\"\n\t\ttry:\n\t\t\tshowCoins = self._cli.GetAsync(\"https://\" + self._keys.publickey + \"/show_coins.aspx?pk=\" + self._keys.privatekey + \"&account=\" + self._keys.account)\n\t\t\tjson = showCoins.Content.ReadAsStringAsync()\n\t\t\tbankTotals = JsonConvert.DeserializeObject(json)\n\t\t\tif bankTotals.status == \"coins_shown\":\n\t\t\t\tself.onesInBank = bankTotals.ones\n\t\t\t\tself.fivesInBank = bankTotals.fives\n\t\t\t\tself.twentyFivesInBank = bankTotals.twentyfives\n\t\t\t\tself.hundredsInBank = bankTotals.hundreds\n\t\t\t\tself.twohundredfiftiesInBank = bankTotals.twohundredfifties\n\t\t\telse:\n\t\t\t\tConsole.Out.WriteLine(bankTotals.status)\n\t\t\t\tfailResponse = JsonConvert.DeserializeObject(json)\n\t\t\t\tConsole.Out.WriteLine(failResponse.message)\n\t\texcept HttpRequestException, ex:\n\t\t\tConsole.Out.WriteLine(\"Exception: \" + ex.Message)\n\t\t\tConsole.Out.WriteLine(\"Check your connection, or your public key\")\n\t\t\treturn \n\t\texcept JsonSerializationException, ex: #end try catch\n\t\t\tConsole.Out.WriteLine(ex.Message)\n\t\t\tConsole.Out.WriteLine(json)\n\t\texcept JsonReaderException, ex:\n\t\t\tConsole.Out.WriteLine(ex.Message)\n\t\t\tConsole.Out.WriteLine(json)\n\t\tfinally:", "title": "" }, { "docid": "a9a88f4d2bdcd8961454f6c2b1611c33", "score": "0.55322516", "text": "def get_num_coins_in_hand(self):\n coins_in_hand = 0\n for card in self.hand:\n if card.type == 'Treasure':\n coins_in_hand += card.value\n return coins_in_hand", "title": "" }, { "docid": "c2745525f97451588d4902ed87f7285a", "score": "0.5519121", "text": "def __spawn_coins(self, _):\n if rnd.uniform(0, 1) < self.gen_rate:\n return\n\n pos = self.x_start, rnd.uniform(self.ymin, self.ymax)\n coin = Coin(pos)\n coin.set_collidable('coin')\n\n coin.do(ColliderMoveTo((-10, pos[1]), duration=2))\n wrapper = cocosnode.CocosNode()\n wrapper.add(coin, name=\"coin\")\n jumping = actions.MoveBy((0, 10), duration=0.1)\n wrapper.do(actions.Repeat(jumping + actions.Reverse(jumping)))\n self.add(wrapper)", "title": "" }, { "docid": "54aa0092db6600f6aa341ce765e9e935", "score": "0.5515734", "text": "def play_all_coins(self):\n for card in self.hand[:]:\n if card.type == 'Treasure':\n self.play_coin(card)", "title": "" }, { "docid": "fe1f7959a3e0679492cb0756620b3b76", "score": "0.55148405", "text": "def make_change(paid, owed, bills_coins):\n\tordered_bills_coins = bills_coins\n\tordered_bills_coins.sort(reverse = True)\n\t# Amount of change owed\n\tchange = paid - owed\n\tif change > 0:\n\t\t# Whole number amount of change owed (for when currencies have decimals)\n\t\twhole = int(change // 1)\n\t\t# Amount of change required that is less than 1\n\t\tdecimal = change - whole\n\t\t# Dictionary of bills and coins required\n\t\tbc_dict = {key: 0 for key in ordered_bills_coins}\n\t\t\n\t\tfor bc in bc_dict:\n\t\t\t# If the bill or coin has a whole number value, use whole number division and mod\n\t\t\tif int(bc // 1) >= 1:\n\t\t\t\tbc_dict[bc] = int(whole // bc)\n\t\t\t\twhole = whole % bc\n\t\t\telse:\n\t\t\t\tcount_bc = 0\n\t\t\t\twhile decimal - bc > 0:\n\t\t\t\t\tdecimal -= bc\n\t\t\t\t\tcount_bc += 1\n\t\t\t\tbc_dict[bc] = count_bc\n\t\treturn bc_dict\n\telse:\n\t\traise ValueError(\"Amount of change must be greater than 0\")", "title": "" }, { "docid": "0ffd9739a99e62c7c698d439c7933fc6", "score": "0.551182", "text": "def General_Coin(posn):\n rho = 0.3\n phi = 0.3\n theta = 0.3\n return normalise( [ [ math.sqrt(rho)*x[0] + math.sqrt(1 - rho)*cmath.exp(cmath.pi*1j*phi)*x[1], math.sqrt(1 - rho)*cmath.exp(cmath.pi*1j*theta)*x[0] - math.sqrt(rho)*cmath.exp(cmath.pi*1j*(phi+theta))* x[1]] for x in posn] )", "title": "" }, { "docid": "b08eb7ec0046cad953fe2d718f44d8d2", "score": "0.551011", "text": "def add_coin_graphics_items(self):\n coins = self.game.get_coins()\n for coin in coins:\n # If the coin is not in the game, new graphics item is created\n if coin not in self.added_coins:\n new_item = CoinGraphicsItem(coin, self.square_size, YELLOW)\n self.scene.addItem(new_item)\n self.added_coin_items.append(new_item)\n self.added_coins.append(coin)\n # If the coin is in the game, its graphics item is visualized in the game\n elif coin in self.added_coins:\n items = self.added_coin_items\n for item in items:\n if item.get_coin() == coin:\n self.scene.addItem(item)", "title": "" }, { "docid": "00a1f0326121f7888e172e69ecc9498e", "score": "0.54968715", "text": "def add_coin(coin, db, coins):\r\n lg.debug(\"> add_coin(%s)\", coin)\r\n\r\n sql_select = \"SELECT username FROM t_users WHERE username NOT IN (SELECT username FROM t_addrs WHERE coin = %s) ORDER BY username\"\r\n sql_insert = \"REPLACE INTO t_addrs (username, coin, address) VALUES (%s, %s, %s)\"\r\n\r\n try:\r\n\r\n mysqlsel = db.execute(sql_select, (coin))\r\n for m in mysqlsel:\r\n # Generate new coin address for user\r\n new_addr = coins[coin].getnewaddr(_user=m['username'])\r\n lg.info(\"add_coin(): got new address %s for %s\", new_addr, m['username'])\r\n # Add new coin address to MySQL\r\n mysqlins = db.execute(sql_insert, (m['username'].lower(), coin, new_addr))\r\n if mysqlins.rowcount <= 0:\r\n raise Exception(\"add_coin(%s): rowcount <= 0 when executing <%s>\", coin, sql_insert % (m['username'].lower(), coin, new_addr))\r\n time.sleep(1)\r\n\r\n except Exception, e:\r\n lg.error(\"add_coin(%s): error: %s\", coin, e)\r\n raise\r\n\r\n lg.debug(\"< add_coin(%s) DONE\", coin)\r\n return True", "title": "" }, { "docid": "688f832ad4609907869400f331b80829", "score": "0.5474256", "text": "def test_coin_supply(self):\n pass", "title": "" }, { "docid": "2551a64fa04142c5b47b79f288f33279", "score": "0.54659235", "text": "def nCents(n):\n options = [25, 10, 5, 1]\n value = 0\n nCoins = 0\n for coin in options:\n while value + coin <= n:\n value += coin\n nCoins += 1\n return nCoins", "title": "" }, { "docid": "9ca77788eabf0f4175efe3a1de562634", "score": "0.5456765", "text": "def get_coin_list(self, formated: bool = False) -> Union[Dict, List, None]:\n resp = self.fetch2('coin_list')\n coin_list = []\n for k, v in resp.get('Data', {}).items():\n assert k == v['Symbol']\n coin_list.append(\n {\n 'symbol': k,\n 'name': v['Name'],\n 'coin_name': v['CoinName'],\n 'full_name': v['FullName'],\n 'description': v['Description']\n }\n )\n return coin_list", "title": "" }, { "docid": "a6f0743b1fcce33764ff445acc959bb7", "score": "0.5448874", "text": "def add(self, coin):\n\n if not isinstance(coin, Money):\n print(\"zly nominal\")\n return\n #ustawienie limitu ilosc monet\n #oraz sprawdzanie czy nie został on przepełniony\n if coin.get_val() not in (10, 20, 50):\n if self._moneycount[coin.get_val()] == 200:\n print(\"Magazyn monet tego nominalu jest pelny\")\n return\n else:\n self._moneycount[coin.get_val()] += 1\n self._moneysum += coin.get_val()\n print(\"Dodano\", coin.get_val(), \"kredytu\")", "title": "" }, { "docid": "eb2842268d3a3bcf56a3f42bd9289de2", "score": "0.54312825", "text": "def Hadamard_Coin(posn):\n return normalise( [[x[0] + x[1], x[0] - x[1]] for x in posn] )", "title": "" }, { "docid": "c0fd46fa4c111bba416280d4e8a6debe", "score": "0.5377277", "text": "def change(self, amount: int, coins: List[int]) -> int:\n \n coins.sort()\n n = len(coins)\n memo_dict = {}\n #f(i,x) = no_of_ways to make x with coins[i:]\n #want f(0,amount)\n \n def f(start_index,left_over_amount):\n if (start_index, left_over_amount) in memo_dict:\n return memo_dict[(start_index, left_over_amount)]\n \n if start_index >= n and left_over_amount != 0:\n memo_dict[(start_index, left_over_amount)] = 0\n return 0\n elif left_over_amount == 0:\n memo_dict[(start_index, left_over_amount)] = 1\n return 1\n elif left_over_amount < 0:\n memo_dict[(start_index, left_over_amount)] = 0\n return 0\n else:\n memo_dict[(start_index, left_over_amount)] = f(start_index, left_over_amount - coins[start_index]) + f(start_index+1, left_over_amount)\n return memo_dict[(start_index, left_over_amount)]\n \n\n return f(0,amount)", "title": "" }, { "docid": "bfc7beaa8d289cb2e940b81cc7a5ba1e", "score": "0.53587234", "text": "def buy(portfolio,hgc_in_circulation,user_address): # Might need some CLEANING up.\n # Did btc arrrive in account\n balance = check_money_in_bank(3)\n # What are we gonna buy with the available btc?\n number_of_coins = len(portfolio)\n \n open_orders = []\n i=0\n \n # Chech number of coins BEFORE buy\n coins_before_buy = calculate_holdings(portfolio)\n \n # Place order for each coin in the portfolio\n for coin in portfolio:\n if i<(number_of_coins -1):\n order_id = open_order(balance/number_of_coins,portfolio[coin])\n open_orders.append(order_id)\n i+=1\n\n else:\n # Check remainder in btc account and place order using this remainder\n balance = check_money_in_bank(3)\n order_id = open_order(balance,portfolio[coin])\n open_orders.append(order_id)\n\n # Check number of coins AFTER buy\n if check_open_orders(open_orders):\n coins_after_buy = calculate_holdings(portfolio)\n # Calculate amount of hgc and send to user\n calculate_send_hgc(coins_before_buy,coins_after_buy,portfolio,hgc_in_circulation,user_address)", "title": "" }, { "docid": "666187bde50c9166064b7e0133692cc6", "score": "0.5352623", "text": "def plus_coins(self, num):\n self.num_coins += num\n print(\"+$\" + str(num))", "title": "" }, { "docid": "c3ebf7c967b045e952c0708de6279766", "score": "0.5349569", "text": "def add_coins(self, frame, widgets):\n coins = ('penny', 'nickel', 'dime', 'quarter')\n for i in range(4):\n self.get_image_label(coins[i], frame).grid(row=0, column=2 + i, padx=3)\n widgets[i].grid(row=1, column=(2 + i))", "title": "" }, { "docid": "7fed2f4babec562595cb147c7a0d989c", "score": "0.53440726", "text": "def draw_score(client, coins):\n score = client.text_format(\n str(coins), client.font, 60, client.settings[\"colors\"][\"white\"]\n )\n client.screen.blit(\n pg.transform.scale(client.data.item_img[\"coin\"], (92, 92)),\n (client.settings[\"gen\"][\"width\"] - 100, client.settings[\"gen\"][\"height\"] - 100),\n )\n score_x = 75 + len(str(coins)) * 25\n client.screen.blit(\n score,\n (\n client.settings[\"gen\"][\"width\"] - score_x,\n client.settings[\"gen\"][\"height\"] - 70,\n ),\n )", "title": "" }, { "docid": "22335ad6e0e5ee763703a0d67ab02c6b", "score": "0.5335945", "text": "def coinify(atoms):\n return round(atoms / 1e8, 8)", "title": "" }, { "docid": "36c2512578b657cae95f841c0cd50736", "score": "0.5331653", "text": "def shift(coin):\n newposn = [[0, 0] for i in range(len(coin))]\n for j in range(1, len(posn) - 1):\n newposn[j + 1][0] += coin[j][0]\n newposn[j - 1][1] += coin[j][1]\n return normalise(newposn)", "title": "" }, { "docid": "8ef58a7cea9c4dc1caba6191f811966a", "score": "0.53264254", "text": "def print_player_cash_balances(game_elements):\n\n for p in game_elements['players']:\n print(p.player_name, ' has cash balance ',str(p.current_cash))", "title": "" }, { "docid": "ad9452ea8f5626976584e3a46b1548a7", "score": "0.5318002", "text": "def Portfolio():\r\n\r\n # Every possible crypto with the trading pair USD\r\n # [coin, historical price data, # of coin possessed]\r\n allCrypto = [\r\n # ['ADA', [], 0.0],\r\n # ['ALGO', [], 0.0],\r\n # ['ATOM', [], 0.0],\r\n # ['BAND', [], 0.0],\r\n # ['BAT', [], 0.0],\r\n # ['BCH', [], 0.0],\r\n # ['BNB', [], 0.0],\r\n # ['BTC', [], 0.0],\r\n # ['BUSD', [], 0.0],\r\n # ['COMP', [], 0.0],\r\n # ['DAI', [], 0.0],\r\n # ['DASH', [], 0.0],\r\n # ['DOGE', [], 0.0],\r\n # ['EGLD', [], 0.0],\r\n # ['ENJ', [], 0.0],\r\n # ['EOS', [], 0.0],\r\n # ['ETC', [], 0.0],\r\n # ['ETH', [], 0.0],\r\n # ['HBAR', [], 0.0],\r\n # ['HNT', [], 0.0],\r\n # ['ICX', [], 0.0],\r\n # ['IOTA', [], 0.0],\r\n # ['KNC', [], 0.0],\r\n # ['LINK', [], 0.0],\r\n # ['LTC', [], 0.0],\r\n # ['MANA', [], 0.0],\r\n # ['MATIC', [], 0.0],\r\n # ['MKR', [], 0.0],\r\n # ['NANO', [], 0.0],\r\n # ['NEO', [], 0.0],\r\n # ['OMG', [], 0.0],\r\n # ['ONE', [], 0.0],\r\n # ['ONT', [], 0.0],\r\n # ['OXT', [], 0.0],\r\n # ['PAXG', [], 0.0],\r\n # ['QTUM', [], 0.0],\r\n # ['REP', [], 0.0],\r\n # ['SOL', [], 0.0],\r\n # ['STORJ', [], 0.0],\r\n # ['UNI', [], 0.0],\r\n # ['USDC', [], 0.0],\r\n # ['VET', [], 0.0],\r\n # ['VTHO', [], 0.0],\r\n # ['WAVES', [], 0.0],\r\n # ['XLM', [], 0.0],\r\n # ['XTZ', [], 0.0],\r\n # ['ZEC', [], 0.0],\r\n # ['ZEN', [], 0.0],\r\n # ['ZIL', [], 0.0],\r\n # ['ZRX', [], 0.0]\r\n ]\r\n\r\n currentPort = [\r\n ['DOGE', [], 703],\r\n ['RVN', [], 628],\r\n ['UNI', [], 1.48],\r\n ['BNB', [], 1.02554270],\r\n ['ETH', [], .06750217],\r\n ['BTC', [], .00072914],\r\n ['LTC', [], .67067000],\r\n ['BCH', [], .21459000],\r\n ['ZRX', [], 41.87],\r\n ['ZEN', [], .204],\r\n ['ICX', [], 66.76]\r\n ]\r\n\r\n return currentPort", "title": "" }, { "docid": "c8641ec9c50751c32a2adffb80e768b3", "score": "0.5303334", "text": "def flipCoin():\n\t\treturn (\"heads\", \"tails\")[random.randrange(2)]", "title": "" }, { "docid": "a1c06e2c26fe82e963124251ca47ef71", "score": "0.5285573", "text": "def price(self, coin):\n if coin not in constants.SUPPORTED_COINS:\n raise RuntimeError(f'coin {coin} not supported')", "title": "" }, { "docid": "780b22d40541ea4fa07e83e1477aa331", "score": "0.526286", "text": "def update_litecoins(self):\n litecoin = self.poloniex.returnTicker()[self.litecoin_key]\n self.litecoin_USD_ask = float(litecoin[self.ask_key])\n self.litecoin_USD_bid = float(litecoin[self.bid_key])\n self.litecoin_USD_lasttrade = float(litecoin[self.last_key])", "title": "" }, { "docid": "8bd60ef316a7c2e8574cfe43a2dfb515", "score": "0.5232004", "text": "def GenerateCoins(self):\n\n NUM_FLOORS = 7\n NUM_COINS_LIMITS = (3, 5)\n\n __limits = [ (4, 49), \n (21, 78),\n (4, 59),\n (16, 78),\n (4, 64),\n (11, 78),\n (4, 78)\n ]\n\n __floor_rows = [ 4, 8, 12, 16, 20, 24, 28 ]\n\n for i in range(NUM_FLOORS):\n num = random.randint(NUM_COINS_LIMITS[0], NUM_COINS_LIMITS[1])\n self.GenerateCoinsHelper(num, __limits[i], __floor_rows[i])", "title": "" }, { "docid": "d9454ae0c3e4722692edfdf265e21805", "score": "0.5229601", "text": "def process_coin():\n\n total = int(input(\"how many quarters? : \"))*0.25\n total += int(input(\"how many dimes ? : \"))*0.1\n total += int(input(\"how many nickeles? : \"))*0.05\n total += int(input(\"how many penny ? :\"))*0.01\n\n return total", "title": "" }, { "docid": "c8ceba5af36f5d892407737c19c3c0f2", "score": "0.5214174", "text": "def removals(self) -> List[Coin]:\n return [_.coin for _ in self.coin_spends]", "title": "" }, { "docid": "7ff2d6f239ff62926bc903b2378b8c42", "score": "0.5199737", "text": "def pmf_ncoins(N=2):\n if (not isinstance(N,int)):\n raise TypeError('N coins have to be an int!')\n xpf = pmf_binomial(N,0.5)\n return xpf", "title": "" }, { "docid": "8701927acf8ad0ee53a50a4f1a06b98e", "score": "0.51952744", "text": "def get_crypto():\n\n url = \"https://api.coinbase.com/v2/prices/USD/spot\"\n\n r = requests.get(url)\n data = r.json()\n crypto_prices = [\"\\n{}: ${}\".format(crypto['base'], crypto['amount']) for crypto in data['data']]\n return crypto_prices", "title": "" }, { "docid": "e7080a6d4b564d8194a9080876a07fd3", "score": "0.51822644", "text": "def read_coin_list(self):\n\n with open(self.coinListPath) as theCoinList:\n self.coinList = theCoinList.read().splitlines()", "title": "" }, { "docid": "04d064c673a728cee6a261a8abb29383", "score": "0.5182202", "text": "def coin_position(self):\n ret = {}\n for c in Top.coin_top_settings():\n ret[c] = Top.get_coin_position(c, self)\n return ret", "title": "" }, { "docid": "58f1aa4e64422311b6e6c82976ae05db", "score": "0.51806355", "text": "def add_coins(self, color, num):\n idx = self.name_dict[color]\n self.coins[idx] += num", "title": "" }, { "docid": "6db7dd532da7fc77673ddaf9dded1da5", "score": "0.5174117", "text": "def flip_coins(probabilities):\n qvm = get_qvm(len(probabilities))\n program = Program()\n ro = program.declare(\"ro\", \"BIT\", len(probabilities))\n for i, prob in enumerate(probabilities):\n program += rotation(prob, i)\n program += MEASURE(i, ro[i])\n result = qvm.run(program)\n return list(result[0])", "title": "" }, { "docid": "62efe2647ee91cab3dcddf2b7482cddf", "score": "0.5168676", "text": "def find_min_coins(coins: List[int], index: int, value: int):\n if value == 0:\n return 0\n elif index == 0:\n return value\n elif coins[index] > value:\n return find_min_coins(coins, index - 1, value)\n else:\n return min(find_min_coins(coins, index - 1, value), 1 + find_min_coins(coins, index, value - coins[index]))", "title": "" }, { "docid": "5248cbc58bb2ad67e40262ee34b57dcc", "score": "0.5162718", "text": "def _search_coin(self, coin):\n try:\n amount = COINS_CODE[str(coin)]\n return float(amount)\n except Exception as e:\n print e", "title": "" }, { "docid": "21f41154f7e11d0a3f9cae7dc598c708", "score": "0.5126959", "text": "def dozen_bet(self):\n outcome_list = []\n for d in [1,13,25]:\n dozen_string = \"Dozen \" + str(d)+\"-\"+str(d+11)\n for j in range(0, 12):\n outcome_list.append((d+j, Outcome(dozen_string, 2)))\n return outcome_list", "title": "" }, { "docid": "7998fb33810940ff3d0fbc5fab0af8f2", "score": "0.5121702", "text": "def print_dollars_cents(change_list):\n coin_values = [100, 25, 10, 5, 1]\n cents = 0\n for (a, b) in zip(change_list, coin_values):\n cents += a * b\n d, c = divmod(cents, 100)\n print(f\"${d:01d}.{c:02d}\")", "title": "" }, { "docid": "364f1479c628d3652a528b494389c2af", "score": "0.5116367", "text": "def coin_event(*args):\n\n print('coin event')\n # saves the time of the last impulse\n CoinAcceptor.lastimpulse = time.time()\n\n # measure how many pulses where send\n CoinAcceptor.pulses = CoinAcceptor.pulses + 1", "title": "" }, { "docid": "227cd68c5529da3eee57555a6821d9cd", "score": "0.5100843", "text": "def check(self):\n while self.received_amount < self.order[\"price\"]:\n if self.received_amount == 0:\n message = f'Please insert coins to pay!\\nAcceptable coins are: {self.coins} -> '\n else:\n message = f'Total inserted: {(self.received_amount/100):.2f}\\nPlease insert coins to pay!\\nAcceptable ' \\\n f'coins are: {self.coins} -> '\n\n amount = input(message)\n if amount in self.coins:\n amount = int(amount)\n self.received_amount += amount\n else:\n print('Invalid coin')\n\n print(f'Total amount inserted: {self.received_amount}\\nProceeding to checkout...')\n change = self.provide_change(self.order[\"price\"], self.received_amount)\n print('Thanks to buy at ACME Machines')\n print('Your change:')\n print(change)", "title": "" }, { "docid": "fe704c798b5d871ab9b0a08a26b8753f", "score": "0.5093493", "text": "def monitor_coins(*args):\n\n print('monitor coins')\n if (time.time() - CoinAcceptor.lastimpulse > 0.5) and (CoinAcceptor.pulses > 0):\n # if the condition matches, the coin_inserted method is called\n CoinAcceptor.coins_inserted()\n return True", "title": "" }, { "docid": "088d2a71ed74af9cddeb8c71a6548ef7", "score": "0.5093116", "text": "def getCryptoPrices(self):\n for name, data in self.my_variables_map[\"NOTION_ENTRIES\"].items():\n url = f\"https://api.binance.com/api/v3/avgPrice?\"\\\n f\"symbol={name}USDT\"\n response = requests.request(\"GET\", url)\n if response.status_code == 200:\n content = response.json()\n data['price'] = content['price']", "title": "" }, { "docid": "13dc734528f6f28ab9b80bfdd53fb8bc", "score": "0.50923526", "text": "async def add(self, ctx: BBContext, coins: int, member: discord.Member):\n\n con = await ctx.get_connection()\n player = await LeaderboardPlayer.fetch(con, member)\n\n if player.level < 1:\n return await ctx.send('Coins can not be added to anyone below level 1')\n\n await player.update(con, coins=coins)\n await ctx.tick()\n self.bot.logger.info('%s Event coins given to %s by %s', coins, str(member), str(ctx.author))", "title": "" }, { "docid": "0c0031e581bdf0917d5edf3e9b0e520c", "score": "0.5086717", "text": "def retrieve_coins(self, asset_symbol, strategy):\n active_trades = self.active_investments.loc[self.active_investments[\"strategy\"] == strategy.name]\n if asset_symbol in active_trades[\"asset\"].values:\n coins = float(active_trades.loc[active_trades[\"asset\"] == asset_symbol, \"coins\"])\n asset_step_size = self.get_step_size(asset_symbol)\n return calc_correct_quantity(asset_step_size, coins)\n return None", "title": "" }, { "docid": "7e033d3288d57d0fbbb97f2845e9e5a7", "score": "0.50777227", "text": "def create_coin(coin: tuple = ('HEADS', 'TAILS',)):\n COIN = {True: coin[0], False: coin[1]}\n return COIN", "title": "" }, { "docid": "8f9a4dce42930daa38fdac86dd4b4fe9", "score": "0.5068879", "text": "def calculate_holdings(portfolio):\n balances = Exchange.balances()\n portfolio_balance = {}\n for coin in portfolio:\n portfolio_balance[coin]= balances[\"data\"][\"available\"][\"{}\".format(coin)]\n\n return portfolio_balance", "title": "" }, { "docid": "0527955b74226e5bbe81fee52ebcd566", "score": "0.50555027", "text": "def minimumCoins(self, money, coins):\n\n if money in coins:\n return 1\n\n re_array = [float('inf')] * (money + 1)\n re_array[0] = 0\n\n for coin in coins:\n for i in range(coin, len(re_array)):\n re_array[i] = min(re_array[i], re_array[i - coin] + 1)\n return re_array[money] if re_array[money] != float('inf') else -1", "title": "" }, { "docid": "a58fe3672c6d1ad9f49d70a5fd4aca6c", "score": "0.5041625", "text": "def tn_cash(self):\n values = self._choose(num=5, val_range=(1, 35))\n cash_ball = self._choose(num=1, val_range=(1, 5))[0]\n return (values, cash_ball)", "title": "" }, { "docid": "9635bf22ca2b0922b189567ce3c8e6b1", "score": "0.50235677", "text": "def get_subscription_list(self):\n subscribed = self.currency.values_list('coin', flat=True)\n return subscribed", "title": "" }, { "docid": "8dd37c2e5209c87e2cff419ea0475ad1", "score": "0.5020947", "text": "def coin_value(self, _coin, _fiat):\r\n try:\r\n value = self.runtime['ev'][_coin]['btc'] * self.runtime['ev']['btc'][_fiat]\r\n except KeyError as e:\r\n lg.warning(\"CointipBot::coin_value(%s, %s): KeyError\", _coin, _fiat)\r\n value = 0.0\r\n return value", "title": "" }, { "docid": "c3a5c51ac5ba7f7b9d926673c3fe6d5c", "score": "0.50124896", "text": "def RecursiveChange(money, coins):\n change = []\n if (money == 0):\n return {'minCoins': 0, 'change': change}\n minCoins = INFINITY\n for changeValue in coins:\n if (money >= changeValue):\n list = RecursiveChange(money - changeValue, coins)\n numcoins = list['minCoins']\n if ((numcoins + 1) < minCoins):\n change = list['change']\n minCoins = numcoins + 1\n change.append(changeValue)\n return {'minCoins': minCoins, 'change': change}", "title": "" }, { "docid": "2c3ac07e0cd732546bfcb6359c0091be", "score": "0.5004157", "text": "async def coin(self, ctx, choice: str, bet: int):\r\n\r\n # Declare variables for the game.\r\n user = ctx.message.author\r\n settings = self.casino_bank.check_server_settings(user.server)\r\n choice = choice.title()\r\n choices = [_(\"Heads\"), _(\"Tails\")]\r\n chip_name = settings[\"System Config\"][\"Chip Name\"]\r\n\r\n # Run a logic check to determine if the user can play the game\r\n check = self.game_checks(settings, ctx.prefix, user, bet, \"Coin\", choice, choices)\r\n if check:\r\n msg = check\r\n else: # Run the game when the checks return None\r\n self.casino_bank.withdraw_chips(user, bet)\r\n settings[\"Players\"][user.id][\"Played\"][\"Coin Played\"] += 1\r\n outcome = random.choice([_(\"Heads\"), _(\"Tails\")])\r\n await self.bot.say(_(\"The coin flips into the air...\"))\r\n await asyncio.sleep(2)\r\n\r\n # Begin game logic to determine a win or loss\r\n if choice == outcome:\r\n amount = int(round(bet * settings[\"Games\"][\"Coin\"][\"Multiplier\"]))\r\n msg = _(\"Congratulations! The coin landed on {}!\").format(outcome)\r\n settings[\"Players\"][user.id][\"Won\"][\"Coin Won\"] += 1\r\n\r\n # Check if a threshold is set and withold chips if amount is exceeded\r\n if self.threshold_check(settings, amount):\r\n settings[\"Players\"][user.id][\"Pending\"] = amount\r\n msg += (_(\"\\nYour winnings exceeded the threshold set on this server. \"\r\n \"The amount of {} {} chips will be withheld until reviewed and \"\r\n \"released by an admin. Do not attempt to play additional games \"\r\n \"exceeding the threshold until this has been cleared.\"\r\n \"\").format(amount, chip_name, user.id))\r\n logger.info(\"{}({}) won {} chips exceeding the threshold. Game \"\r\n \"details:\\nPlayer Choice: {}\\nPlayer Bet: {}\\nGame \"\r\n \"Outcome: {}\\n[END OF REPORT]\"\r\n \"\".format(user.name, user.id, amount, choice.ljust(10),\r\n str(bet).ljust(10), outcome[0].ljust(10)))\r\n else:\r\n self.casino_bank.deposit_chips(user, amount)\r\n msg += _(\"```Python\\nYou just won {} {} chips.```\").format(amount, chip_name)\r\n else:\r\n msg = _(\"Sorry! The coin landed on {}.\").format(outcome)\r\n # Save the results of the game\r\n self.casino_bank.save_system()\r\n # Send a message telling the user the outcome of this command\r\n await self.bot.say(msg)", "title": "" }, { "docid": "cf5c5fcaaf4af1f1bf8d10713722b351", "score": "0.50018334", "text": "def make_count_change():\n \"*** YOUR CODE HERE ***\"\n seen_before = {}\n def count_change(a, coins=(50, 25, 10, 5, 1)):\n if (a, coins) in seen_before:\n return seen_before[(a, coins)]\n if a == 0:\n ans = 1\n elif a < 0 or len(coins) == 0:\n ans = 0\n else:\n ans = count_change(a, coins[1:]) + count_change(a - coins[0], coins) \n seen_before[(a, coins)] = ans\n return ans\n return count_change", "title": "" }, { "docid": "75a2dcacca7658be0199834bcc397319", "score": "0.49945697", "text": "async def spend_clawback_coins(\n self,\n request: Dict[str, Any],\n tx_config: TXConfig = DEFAULT_TX_CONFIG,\n extra_conditions: Tuple[Condition, ...] = tuple(),\n ) -> EndpointResult:\n if \"coin_ids\" not in request:\n raise ValueError(\"Coin IDs are required.\")\n coin_ids: List[bytes32] = [bytes32.from_hexstr(coin) for coin in request[\"coin_ids\"]]\n tx_fee: uint64 = uint64(request.get(\"fee\", 0))\n # Get inner puzzle\n coin_records = await self.service.wallet_state_manager.coin_store.get_coin_records(\n coin_id_filter=HashFilter.include(coin_ids),\n coin_type=CoinType.CLAWBACK,\n wallet_type=WalletType.STANDARD_WALLET,\n spent_range=UInt32Range(stop=uint32(0)),\n )\n\n coins: Dict[Coin, ClawbackMetadata] = {}\n batch_size = request.get(\n \"batch_size\", self.service.wallet_state_manager.config.get(\"auto_claim\", {}).get(\"batch_size\", 50)\n )\n tx_id_list: List[bytes] = []\n for coin_id, coin_record in coin_records.coin_id_to_record.items():\n try:\n metadata = coin_record.parsed_metadata()\n assert isinstance(metadata, ClawbackMetadata)\n coins[coin_record.coin] = metadata\n if len(coins) >= batch_size:\n tx_id_list.extend(\n (\n await self.service.wallet_state_manager.spend_clawback_coins(\n coins, tx_fee, tx_config, request.get(\"force\", False), extra_conditions=extra_conditions\n )\n )\n )\n coins = {}\n except Exception as e:\n log.error(f\"Failed to spend clawback coin {coin_id.hex()}: %s\", e)\n if len(coins) > 0:\n tx_id_list.extend(\n (\n await self.service.wallet_state_manager.spend_clawback_coins(\n coins, tx_fee, tx_config, request.get(\"force\", False), extra_conditions=extra_conditions\n )\n )\n )\n return {\n \"success\": True,\n \"transaction_ids\": [tx.hex() for tx in tx_id_list],\n }", "title": "" }, { "docid": "279af9289fd1702623c3bee443ce5fc1", "score": "0.49714786", "text": "def get_coin_discard_actions(self, player):\n actions = []\n if player.total_coins_count() > 10:\n # max to 3\n coins_to_discard = player.total_coins_count() - 10\n coins = [player.red, player.blue, player.green,\n player.black, player.white, player.gold]\n\n if coins_to_discard == 1:\n for i in range(6):\n if coins[i] > 0:\n discard = [0] * i + [1] + [0] * (5-i)\n actions.append(DiscardCoins(discard[0], discard[1],\n discard[2], discard[3], discard[4],\n discard[5]))\n\n elif coins_to_discard == 2:\n index_more_than_1 = [i for i in range(6) if coins[i] > 1]\n index_more_than_0 = [i for i in range(6) if coins[i] > 0]\n # combinations of 2 coin\n for i in index_more_than_1:\n discard = [0] * 6\n discard[i] = 2\n actions.append(DiscardCoins(discard[0], discard[1],\n discard[2], discard[3], discard[4],\n discard[5]))\n\n # combinations of 1 coin\n for i in range(len(index_more_than_0)):\n for j in range(i+1, len(index_more_than_0)):\n if index_more_than_0[i] != index_more_than_0[j]:\n discard = [0] * 6\n discard[index_more_than_0[i]] = discard[index_more_than_0[j]] = 1\n actions.append(DiscardCoins(discard[0], discard[1],\n discard[2], discard[3], discard[4],\n discard[5]))\n\n elif coins_to_discard == 3:\n index_more_than_2 = [i for i in range(6) if coins[i] > 2]\n index_more_than_1 = [i for i in range(6) if coins[i] > 1]\n index_more_than_0 = [i for i in range(6) if coins[i] > 0]\n # combinations of 2, 1 coin\n for i in index_more_than_1:\n for j in index_more_than_0:\n if i != j:\n discard = [0] * 6\n discard[i] = 2\n discard[j] = 1\n actions.append(DiscardCoins(discard[0], discard[1],\n discard[2], discard[3], discard[4],\n discard[5]))\n # combinations of 3 coin\n for i in index_more_than_2:\n discard = [0] * 6\n discard[i] = 3\n actions.append(DiscardCoins(discard[0], discard[1],\n discard[2], discard[3], discard[4],\n discard[5]))\n\n # combinations of 1 coin\n for i in range(len(index_more_than_0)):\n for j in range(i+1, len(index_more_than_0)):\n for k in range(j+1, len(index_more_than_0)):\n if index_more_than_0[i] != index_more_than_0[j] and index_more_than_0[j] != index_more_than_0[k]:\n discard = [0] * 6\n discard[index_more_than_0[i]] = discard[index_more_than_0[j]] = discard[index_more_than_0[k]] = 1\n actions.append(DiscardCoins(discard[0], discard[1],\n discard[2], discard[3], discard[4],\n discard[5]))\n # assigning option id to each action\n for i in range(len(actions)):\n actions[i].option = i\n\n return actions", "title": "" }, { "docid": "9b94c56b903a5d0ee5426e978415982f", "score": "0.4965749", "text": "def hook_spend_value(self, card, actual=False):\n val = card.hook_coinvalue(game=self.game, player=self)\n for c in self.piles[Piles.PLAYED]:\n val += c.hook_spend_value(game=self.game, player=self, card=card)\n for s in self.states:\n val += s.hook_spend_value(game=self.game, player=self, card=card)\n if val and self.coin_token:\n val -= 1\n if actual:\n self.coin_token = False\n return val", "title": "" }, { "docid": "7e0d4efdfa969b7d343d5da4bb3adca4", "score": "0.49599436", "text": "def get_transaction_history(self, coin) -> [(float, str)]:\n transaction_list = self.client.get_transactions(coin).get('data')\n num_coins_traded = [float(trans[\"amount\"].get(\"amount\")) for trans in transaction_list]\n timestamps = [trans[\"created_at\"] for trans in transaction_list]\n\n return list(zip(num_coins_traded, timestamps))", "title": "" }, { "docid": "a7c71b4bd7cf0f56f8623b5c9afc1eee", "score": "0.49581164", "text": "def listen_for_coins(self):\n self.enable_coins_reader_if_disabled()\n coin_regex = re.compile(\"Coin#[1-6]|Note#[1-3]\")\n while True:\n msg = self.device.readall()\n match = coin_regex.search(msg)\n if match:\n try:\n print self._search_coin(match.group(0))\n except Exception as e:\n print e", "title": "" }, { "docid": "a3e096c8074f992eaf811081fba96885", "score": "0.4944597", "text": "def test_empty_list_value(self):\n list_of_coins = []\n\n total_value = display_payment_value(list_of_coins)\n self.assertEqual(total_value, 0)", "title": "" } ]
d0a0ee151504cb81fe704c753ca268ba
Gets the gnomAD v3 info TableResource
[ { "docid": "2543294f5392e3763945bfe15143f4e2", "score": "0.5697084", "text": "def get_info(split: bool = True) -> VersionedTableResource:\n\n return VersionedTableResource(\n CURRENT_RELEASE,\n {\n release: TableResource(\n path=\"{}/gnomad_genomes_v{}_info{}.ht\".format(\n _annotations_root(release), release, \".split\" if split else \"\"\n )\n )\n for release in RELEASES\n },\n )", "title": "" } ]
[ { "docid": "6ae4ff619386eee68b71478e4f997011", "score": "0.58526546", "text": "def getTable(self):\n return self.__table", "title": "" }, { "docid": "ffc49ff7bb41356e4c94a330d93f4d77", "score": "0.58430845", "text": "def get_table(table_name: str):\n try:\n response = crud.table.get(table_name=table_name)\n return response\n except Exception as e:\n raise e", "title": "" }, { "docid": "91400717af77bba1241d879179dfb0b8", "score": "0.58152765", "text": "def get_table(self):\n return self.table", "title": "" }, { "docid": "3fe54345f9fdd4d1d880778c7530f8fd", "score": "0.5683645", "text": "def TTable_GetNodeTable(Network, Context):\n return _snap.TTable_GetNodeTable(Network, Context)", "title": "" }, { "docid": "b7f598465f50bf8983f67bff2e8c8488", "score": "0.55717874", "text": "def _get_table(self, obj):\n if isinstance(obj, Marble):\n return obj\n else:\n return obj.table", "title": "" }, { "docid": "96b5a6f8c96c5be648ab56e1525b33bb", "score": "0.5542823", "text": "def TableGet(self, *args):\n return _csnd.Csound_TableGet(self, *args)", "title": "" }, { "docid": "587eaf079abab28e435c2b2b70d0eb05", "score": "0.5495886", "text": "def table_info(request, name=None, get_table_size=False):\n\n try:\n if name is None:\n return JsonResponse({\n 'success': False,\n 'err': \"Invalid table name\"\n })\n\n t = mdl.Table.objects.filter(table_name=name)[0]\n\n if not t:\n return JsonResponse({'success': False, 'err': \"Table not found\"})\n\n t_props = dict()\n data = list()\n if len(t.tablefield_set.all()) > 0:\n data = [model_to_dict(tf) for tf in t.tablefield_set.all(\n ).order_by('-is_primary_key')]\n t_props[\"fields\"] = data\n this_tbl = model_to_dict(t)\n this_tbl[\"props\"] = t_props\n\n if get_table_size:\n schemas = getattr(settings, \"META_DB_SCHEMAS\", None)\n if not schemas:\n raise Exception('Schemas not configured')\n \n with get_schema_instance(schemas) as s:\n this_tbl[\"props\"][\"table_size\"] = s.get_table_info(schemas[0], name).get_table_size(s)\n\n\n return JsonResponse(this_tbl, safe=False)\n except Exception as e:\n return JsonResponse({\n 'success': False,\n 'err': str(e),\n 'errStack': traceback.format_exc()\n })", "title": "" }, { "docid": "eb92ede1b559d8cb625437429fa83e15", "score": "0.54712886", "text": "def getTableInfo( dbConn, tableName ):\r\n query = \"pragma table_info(%s)\" % tableName\r\n c = dbConn.execute( query )\r\n r = c.fetchall()\r\n return r", "title": "" }, { "docid": "cfc99d940dd82bfd58b21703ce697d2b", "score": "0.5449395", "text": "def get_table(self, name):\n response = self.layer1.describe_table(name)\n return Table(self, response)", "title": "" }, { "docid": "dd6f013e61718510a61607e1ddd44205", "score": "0.54200834", "text": "def get_table(table):\n g.tinydb = TinyDB(current_app.config['DATABASE'])\n person_table = g.tinydb.table(table)\n return person_table", "title": "" }, { "docid": "21b4670cbf2160bb1dde34be52cd9a81", "score": "0.5418668", "text": "def get(self, table, *args, **kwargs):\n return self.query('GET %s' % (table,), *args, **kwargs)", "title": "" }, { "docid": "f3abe625d9d06b84ebc474eb97658f92", "score": "0.53237236", "text": "def table(self):\n return self._table", "title": "" }, { "docid": "28e0de0c49f05173a67f6946e7ba17d8", "score": "0.5273466", "text": "def get_view(self, table):\n\n request = self.client.tables().get(projectId=table.project_id,\n datasetId=table.dataset_id,\n tableId=table.table_id)\n\n try:\n response = request.execute()\n except http.HttpError as ex:\n if ex.resp.status == 404:\n return None\n raise\n\n return response['view']['query'] if 'view' in response else None", "title": "" }, { "docid": "0a5a9b1a4bb38e79e8d7e19e39ecc24a", "score": "0.5263609", "text": "def table(self):\n return self._response_table", "title": "" }, { "docid": "a828635841843fb03298bc63f37ac9b1", "score": "0.5253432", "text": "def database_table_spec(self) -> 'outputs.GoogleCloudDatacatalogV1DatabaseTableSpecResponse':\n return pulumi.get(self, \"database_table_spec\")", "title": "" }, { "docid": "b728b917ae0b4bc3cb3c656f602c0512", "score": "0.52518964", "text": "def table(self) -> str:\n return pulumi.get(self, \"table\")", "title": "" }, { "docid": "7c12d71d63a9a8349365ab60cdaf9b42", "score": "0.52233386", "text": "def get_properties(self):\n endpoint = f\"{self.url}/databases/{self.db}\"\n headers = self.notion_headers()\n data = requests.get(endpoint,headers=headers)\n return data.json()", "title": "" }, { "docid": "93518980734e9ff8539a7a6c66fcd752", "score": "0.5205923", "text": "def get_table(self, tablename):\n import pandas as pd\n api_format = ('https://{username}.carto.com/api/v2/sql?q='\n 'SELECT%20*%20FROM%20{tablename}&format=csv')\n request = api_format.format(\n username=self.cdb_username,\n tablename=tablename)\n return pd.read_csv(request)", "title": "" }, { "docid": "232298aea66f09b68cae4612dbc8890e", "score": "0.519746", "text": "def get_table(self, table_full_name):\n table = None\n try:\n table = self.client.get_table(table_full_name) # Make an API request.\n print(\"Table {} already exists\".format(table_full_name))\n except NotFound:\n print(\"Table {} is not found\".format(table_full_name))\n return table", "title": "" }, { "docid": "27669e0f295af6118cfa9a313bd0f8e4", "score": "0.5193316", "text": "def get_metadata(self, table_name):\n # see if already downloaded\n\n\n if not table_name.startswith(\"NM_\"):\n path = \"api/v01/dataset/def.sdmx.json?\"\n query_params = {\"search\": \"*\"+table_name+\"*\"}\n else:\n path = \"api/v01/\" + table_name + \".def.sdmx.json?\"\n query_params = {}\n \n data = self.__fetch_json(path, query_params)\n\n # return empty if no useful metadata returned (likely table doesnt exist)\n if not data[\"structure\"][\"keyfamilies\"]:\n return\n\n # this is the nomis internal table name\n table = data[\"structure\"][\"keyfamilies\"][\"keyfamily\"][0][\"id\"]\n\n rawfields = data[\"structure\"][\"keyfamilies\"][\"keyfamily\"][0][\"components\"][\"dimension\"]\n fields = {}\n for rawfield in rawfields:\n field = rawfield[\"conceptref\"]\n\n fields[field] = {}\n\n # ignore when too many categories (i.e. geograpical ones)\n if field.upper() == \"CURRENTLY_RESIDING_IN\" or field.upper() == \"PLACE_OF_WORK\":\n continue\n\n # further query to get categories\n path = \"api/v01/dataset/\"+table+\"/\"+field+\".def.sdmx.json?\"\n #print(path)\n\n try:\n fdata = self.__fetch_json(path, {})\n except timeout:\n print(\"HTTP timeout requesting metadata for \" + table_name)\n return {}\n except (HTTPError, URLError):\n print(\"HTTP error requesting metadata for \" + table_name)\n return {}\n else:\n values = fdata[\"structure\"][\"codelists\"][\"codelist\"][0][\"code\"]\n #print(field+\":\")\n for value in values:\n # KEYs are stored as strings for json compatibility\n fields[field][value[\"value\"]] = value[\"description\"][\"value\"]\n\n # Fetch the geographies available for this table\n geogs = {}\n path = \"api/v01/dataset/\"+table+\"/geography/TYPE.def.sdmx.json?\"\n try:\n fdata = self.__fetch_json(path, {})\n except timeout:\n print(\"HTTP timeout requesting geography metadata for \" + table_name)\n except (HTTPError, URLError):\n print(\"HTTP error requesting geography metadata for \" + table_name)\n else:\n if fdata[\"structure\"][\"codelists\"]:\n values = fdata[\"structure\"][\"codelists\"][\"codelist\"][0][\"code\"]\n #print(values)\n for value in values:\n geogs[str(value[\"value\"])] = value[\"description\"][\"value\"]\n\n result = {\"nomis_table\": table,\n \"description\": data[\"structure\"][\"keyfamilies\"][\"keyfamily\"][0][\"name\"][\"value\"],\n \"fields\": fields,\n \"geographies\": geogs}\n\n # save a copy\n self.write_metadata(table_name, result)\n\n return result", "title": "" }, { "docid": "4f35d87ab635b4237c8fbef274312228", "score": "0.5185821", "text": "def get_table(self, table):\n # we query for ALL data within the table we specified\n query = \"SELECT * From [dbo].[\"+table+\"]\"\n\n # now send the query and connection parameters to 'read_sql' function\n data = read_sql(query, self.cnxn)\n\n # update the user\n print(\"{} table imported from {}. Type variable_name.head() to view, \"\n \"for example \\\"data.head()\\\".\".format(table, self.database))\n return data", "title": "" }, { "docid": "2a74765ce318d79b8d00619308e1d3f2", "score": "0.51749045", "text": "def tableObj(self, tableName):\n return self.db.load_table(tableName)", "title": "" }, { "docid": "d7a16a164715df382b0a1fa4675d8c99", "score": "0.5149835", "text": "def getTableInfo(self, rowObject):\n try:\n return self.schema[rowObject.rowTableName]\n except KeyError:\n raise DBError(\"class %s was not registered with %s\" % (\n rowObject.__class__, self))", "title": "" }, { "docid": "da38debc1bb7ede916a79baba9091088", "score": "0.51438326", "text": "def get(self):\n rs = Con.getad_info()\n return rs", "title": "" }, { "docid": "c1dba71767be02677bb9ad085cac7088", "score": "0.51225317", "text": "def metadata():\n log.debug(\"All checks true - loading gce metadata\")\n result = http.query(URL, headers=True, header_list=[\"Metadata-Flavor: Google\"])\n metadata = salt.utils.json.loads(result.get(\"body\", {}))\n\n return metadata", "title": "" }, { "docid": "5fb3bf51333266a785fed6f6ab4c6b6c", "score": "0.51194364", "text": "def info(self):\n with qdb.sql_connection.TRN:\n sql = \"\"\"SELECT information FROM qiita.{} WHERE\n logging_id = %s\"\"\".format(self._table)\n qdb.sql_connection.TRN.add(sql, [self.id])\n\n rows = qdb.sql_connection.TRN.execute_fetchlast()\n\n if rows:\n results = loads(rows)\n else:\n results = {}\n\n return results", "title": "" }, { "docid": "3d6fc939788dcdb727334e947108021a", "score": "0.5115461", "text": "def GetTable(self, *args):\n return _csnd.Csound_GetTable(self, *args)", "title": "" }, { "docid": "5647377e3a15cd5683a2d59af95cd3e2", "score": "0.51056594", "text": "def get(self, table, tget):\n pass", "title": "" }, { "docid": "d8990c63eec002040613d5bef6f19baf", "score": "0.50986207", "text": "def query_tableinfo(self, table_name):\n if table_name in self.tables:\n self.query('pragma table_info({table_name:s});'.format(\n table_name=table_name))\n res = {'name': [], 'type': [], 'index': []}\n for row in self.results:\n res['index'].append(row[0])\n res['name'].append(row[1])\n res['type'].append(row[2])\n return res\n else:\n return None", "title": "" }, { "docid": "9973d4f3eab51e2f15e9c9cfb91ab91a", "score": "0.5095611", "text": "def info(self):\n with qdb.sql_connection.TRN:\n sql = \"SELECT * from qiita.{0} WHERE email = %s\".format(\n self._table)\n # Need direct typecast from psycopg2 dict to standard dict\n qdb.sql_connection.TRN.add(sql, [self._id])\n # [0] retrieves the first row (the only one present)\n info = dict(qdb.sql_connection.TRN.execute_fetchindex()[0])\n # Remove non-info columns\n for col in self._non_info:\n info.pop(col)\n return info", "title": "" }, { "docid": "233209744de27a2df28872dd2dd6759b", "score": "0.50741786", "text": "def get(self, kind, cloudname, identifier, **kwargs):\n return self.resource(\"get\", kind, cloudname, **kwargs)", "title": "" }, { "docid": "3569e8b1f559661b28b71bbf76bcff42", "score": "0.507194", "text": "async def get_info(self):\n params = {}\n if self.owner_access_key:\n params['owner_access_key'] = self.owner_access_key\n prefix = get_naming(api_session.get().api_version, 'path')\n rqst = Request(\n 'GET', f'/{prefix}/{self.name}',\n params=params,\n )\n async with rqst.fetch() as resp:\n return await resp.json()", "title": "" }, { "docid": "ce70c94125c3f92d424cd54aacae7ffd", "score": "0.50699925", "text": "def get_table(self) -> List[Card]:\n return self._get(self._table)", "title": "" }, { "docid": "c59345cf0795ced01935a264cc2cbb1e", "score": "0.5068392", "text": "def getinfo(self, path):\n path = self._normpath(path)\n \n #Fix for skydrive, because if you don't write the path in the way they want\n # it will raise an exception with the error code 400. That error means nothing\n # because it's raised in many situations.\n if(not self.exists(path)):\n raise ResourceNotFoundError(path=path)\n \n return self._metadata_to_info( self.client.metadata(path) )", "title": "" }, { "docid": "3bbe8b3a09c7a21507babd3291a38fff", "score": "0.50572467", "text": "def get(self, table, format=None):\n if csv or format == 'csv':\n data = self.csv(table)\n else:\n data = self.xml(table)\n return data", "title": "" }, { "docid": "d4f29f7227b9f1958d402dbf57c10218", "score": "0.50416386", "text": "def get_table(db, pid, tname):\n\n logger.info('Getting data from table %s', tname)\n\n # get database connection\n conf = ccddb.get_projectdb_config(db, pid)\n if not conf:\n # no tables yet\n return []\n addr = conf[0] + '://' + \\\n conf[1] + ':' + conf[2] + '@' + \\\n conf[3] + ':' + str(conf[4]) + '/' + \\\n conf[5]\n _, meta, conn = __connect_to_projectdb(None, addr, 0)\n\n #logger.info('Getting data from table: %s', tname)\n\n try:\n tbl = result_tables(meta=meta)[tname]\n #tbl = meta.tables[tname]\n except KeyError:\n logger.warning('Table %s not found', tname)\n return None\n\n #for fk in tbl.foreign_keys:\n # tbl = tbl.join(meta.tables[fk.column.table.name])\n\n cc = [c.name for c in tbl.columns if c.name not in META_FIELDS]\n\n with conn.begin():\n s = sql.select(columns=cc).select_from(tbl)\n rs = conn.execute(s)\n\n return (cc, rs)", "title": "" }, { "docid": "77dbb9759eb393df3c3e26332c45b540", "score": "0.5040528", "text": "def big_table_details(self) -> Sequence['outputs.BigTableIODetailsResponse']:\n return pulumi.get(self, \"big_table_details\")", "title": "" }, { "docid": "f53deb9c0fcc1132f2dbab62e5ac84bf", "score": "0.5036313", "text": "def get_table(self, table_name):\n return self.tables.get(table_name, None)", "title": "" }, { "docid": "eb6189d514ddaabeef0dd60dda324de3", "score": "0.5025095", "text": "def get_table_info(self, table):\n desc = table.describe()\n status = desc['Table']['TableStatus']\n throughput = desc['Table']['ProvisionedThroughput']\n num_decreases = throughput['NumberOfDecreasesToday']\n read = throughput['ReadCapacityUnits']\n write = throughput['WriteCapacityUnits']\n return read, write, num_decreases, status", "title": "" }, { "docid": "9a51a10a06dadc9262180488e4e6ee4c", "score": "0.5017608", "text": "def get_info(self) -> dict:\n info = {}\n for i, e in enumerate(self.__table.rows):\n info[self.items[i]] = e.get()\n return info", "title": "" }, { "docid": "1d974edfa6b015d70962899b8775652f", "score": "0.5000175", "text": "def get(self, table_name):\n # get table\n table = tables[table_name]\n\n dump = TableSchema().dump(table)\n return jsonify(dump)", "title": "" }, { "docid": "a3c34cb91dc6e1674f3a04db5b7adfce", "score": "0.49860722", "text": "def info(self):\n resp = self.glesys._get(self._info_path)\n return Account(**resp.response.accountinfo)", "title": "" }, { "docid": "17dfd32e3bd1fb59daa3fb4fb57ff8a2", "score": "0.4980668", "text": "def info(self):\n with qdb.sql_connection.TRN:\n sql = \"SELECT * FROM qiita.{0} WHERE study_id = %s\".format(\n self._table)\n qdb.sql_connection.TRN.add(sql, [self._id])\n info = dict(qdb.sql_connection.TRN.execute_fetchindex()[0])\n # remove non-info items from info\n for item in self._non_info:\n info.pop(item)\n # removed because redundant to the id already stored in the object\n info.pop('study_id')\n\n if info['principal_investigator_id']:\n info['principal_investigator'] = qdb.study.StudyPerson(\n info[\"principal_investigator_id\"])\n else:\n info['principal_investigator'] = None\n del info['principal_investigator_id']\n\n if info['lab_person_id']:\n info['lab_person'] = qdb.study.StudyPerson(\n info[\"lab_person_id\"])\n else:\n info['lab_person'] = None\n del info['lab_person_id']\n\n return info", "title": "" }, { "docid": "8729205cdfec1a704698f6dbd27ea6e3", "score": "0.49350688", "text": "def get_metadata(self):\n path = \"/api/v3/metadata\"\n res = self.request_get(path)\n return res", "title": "" }, { "docid": "5a45d6759ad2a2c83f69baf119eb20d1", "score": "0.4924074", "text": "def resource_info(self):\n return self.__inst.resource_info()", "title": "" }, { "docid": "eddf6ccac08c357d7836a439deca44ac", "score": "0.4917717", "text": "def describe_table(self, table, fetch_through_pandas=True, fail_silently=False):\n\n sql_query = f\"\"\"SELECT name FROM sqlite_master WHERE type='{table}';\"\"\"\n return self.query(sql_query, fetch_through_pandas=fetch_through_pandas, fail_silently=fail_silently)", "title": "" }, { "docid": "56c6a4ea224c3330f06174e68617f06c", "score": "0.4915267", "text": "def get_info(self):\n return ({\"name\":self.name, \"lang\":self.lang, \"lang_amt\":self.lang_amt, \"relig\":self.relig,\n \"relig_amt\":self.relig_amt, \"eth\":self.eth,\"eth_amt\":self.eth_amt, \"customs\":self.customs,\n \"taboos\":self.taboos,\"curr_pow_time\":self.curr_pow_time,\"regulations\":self.regulations,\n \"government\":self.government, \"photos\":self.photos})", "title": "" }, { "docid": "3fb576fc72d1a4763e97ad9a7ca78b31", "score": "0.49094194", "text": "def _metadata_table(self):\n return _make_metadata_table(self)", "title": "" }, { "docid": "566949bc03a5ff3f0a4d9a6119c08916", "score": "0.49035496", "text": "def getTable(self):\n return self.classes", "title": "" }, { "docid": "86feb2e04ae884b6cb6d0a30886c8255", "score": "0.4891699", "text": "def getTablesWithTableReference(self, table):\n\t\tquery = (\"SELECT DISTINCT TABLE_NAME FROM INFORMATION_SCHEMA.COLUMNS \"\n\t\t\t+\"WHERE COLUMN_NAME LIKE ('REF|\"+table+\"|%') \"\n\t\t\t+\"AND TABLE_SCHEMA='\"+self.dbname+\"';\")\n\t\tself.executeQuery(query)\n\t\tresults = self.cursor.fetchall()\n\t\tif not results:\n\t\t\treturn None\n\t\treturn results", "title": "" }, { "docid": "2341dba7ae0e6014342b46c7b8a79282", "score": "0.48879397", "text": "def retrieve_info_json(self):\n with self._open_pkg() as tf:\n with closing(tf.extractfile(tf.getmember('info.json'))) as ijf:\n return ijf.read().decode('utf-8')", "title": "" }, { "docid": "5a54071f9b1869da59a17b4edb9bb7e1", "score": "0.4884755", "text": "def get_table(type, name):\r\n if type == IPV4:\r\n table = current_state.tables_v4[name]\r\n elif type == IPV6:\r\n table = current_state.tables_v6[name]\r\n else:\r\n raise ValueError(\"Invalid type %s for table\" % type)\r\n\r\n return table", "title": "" }, { "docid": "4579db910373744be4b6836083b0fe31", "score": "0.48840535", "text": "def get_table(\n table_name: str,\n boto3_session: Optional[boto3.Session] = None,\n) -> boto3.resource:\n dynamodb_resource = _utils.resource(service_name=\"dynamodb\", session=boto3_session)\n dynamodb_table = dynamodb_resource.Table(table_name)\n\n return dynamodb_table", "title": "" }, { "docid": "1dfc7efd96df0a9d12ff1f422ab49121", "score": "0.48504755", "text": "def get_info_format(self):\n return self.session.api.get_index(self)", "title": "" }, { "docid": "4ec583730db4ce5ee03204346a2ae276", "score": "0.48464674", "text": "def get_table(self, table_name):\n return r.db(self._name).table(table_name)", "title": "" }, { "docid": "f31714ac376709ed65fe483762d31c57", "score": "0.4839708", "text": "def get_table(table_adress):\n\n table = data_manager.get_table_from_file(table_adress)\n if table[0][0] == 'id':\n table = table[1:]\n\n return table", "title": "" }, { "docid": "8d04a18b0bc66cef925853e487f50d50", "score": "0.48378292", "text": "def get_data(self, apifunction):\n cnn = self.get_connection()\n hdr = {\n 'Authorization': 'Bearer ' + cnn['access_token']\n }\n url = cnn['instance_url'] + '/services/data/v37.0/' + apifunction\n grs = requests.get(url, headers=hdr, proxies=proxies())\n return grs.json()", "title": "" }, { "docid": "98b84763f7d5a51d57f606568bf3b567", "score": "0.48353705", "text": "def associated_route_table(self) -> Optional['outputs.SubResourceResponse']:\n return pulumi.get(self, \"associated_route_table\")", "title": "" }, { "docid": "98b84763f7d5a51d57f606568bf3b567", "score": "0.48353705", "text": "def associated_route_table(self) -> Optional['outputs.SubResourceResponse']:\n return pulumi.get(self, \"associated_route_table\")", "title": "" }, { "docid": "1eccd48b169aa513afb655452c747a9a", "score": "0.48239863", "text": "def show(self, req, id):\n context = req.environ['guts.context']\n try:\n r = objects.Resource.get(context, id)\n except exception.ResourceNotFound:\n raise webob.exc.HTTPNotFound()\n\n resource = {}\n resource['id'] = r.id\n resource['name'] = r.name\n resource['type'] = r.type\n resource['migrated'] = r.migrated\n resource['source'] = r.source_hypervisor\n resource['properties'] = r.properties\n\n return {'resource': resource}", "title": "" }, { "docid": "b0d4a7559d044483fce899c07bdfbb04", "score": "0.4822841", "text": "def get_dbdata(table, id):\n try:\n response = table.get_item(\n Key={\n 'userId': id\n }\n )\n except ClientError as e:\n print(e.response['Error']['Message'])\n item = {}\n else:\n if 'Item' in response:\n item = response['Item']\n print(\"GetItem succeeded:\", json.dumps(item, indent=4))\n else:\n item = {}\n return item", "title": "" }, { "docid": "3c11ff17bf03917208c67d0f29acec08", "score": "0.4814623", "text": "def get_purchase_Table(self):\n return self.purchase_Table", "title": "" }, { "docid": "271603d853e8ea5917bd68955e9c2af1", "score": "0.48065397", "text": "def getTableDescriptor(self, table):\n self.send_getTableDescriptor(table)\n return self.recv_getTableDescriptor()", "title": "" }, { "docid": "7f45cb0ab3b30989f9b473a0db7c297b", "score": "0.4797408", "text": "def info(self) -> None:\n print('---- Reference column info ----')\n for x in self.data.columns:\n if x == 'pt_id':\n print('pt_id: unique patient identifier')\n elif x == 'population_label':\n print('population_label: name of gated population single cell is associated to')\n elif x == 'cluster_id':\n print('cluster_id: name of cluster single cell is associated to, in the case of meta-clustering, '\n 'each row will have a unique sample_id and cluster_id combination')\n elif x == 'meta_cluster_id':\n print('meta_cluster_id: name of meta-cluster that each row (corresponding to a cluster) is associated '\n 'to')\n elif x == 'sample_id':\n print('sample_id: for meta-clustering, each row corresponds to a unique cluster within a sample, this '\n 'column pertains to the unique identifier for the origin sample')\n else:\n desc = meta_dict_lookup(x)\n if not desc:\n print(f'{x}: no description available')\n else:\n print(f'{x}: {desc}')\n print('-----------------------------')\n return self.data.info()", "title": "" }, { "docid": "ac606b0895784f3b7113cf0daffad326", "score": "0.4796609", "text": "def metadata(self) -> TGResultSetMetaData:", "title": "" }, { "docid": "3dd67b9c4da48e2b1ba131d4b676c909", "score": "0.4793529", "text": "def getObject(self):\n\n if self.tag == \"database\":\n obj = self.db\n elif self.tag == \"dataset\":\n obj = self.db.getDataset(self.name)\n else:\n obj = self.db.getObjFromDataset(self.name)\n\n return obj", "title": "" }, { "docid": "ec346069543b7ea7ee256c68969f4845", "score": "0.47890574", "text": "def get_space(self):\n fbinfo = {}\n try:\n client = flashblade.Client(target=self.endpoint,\n api_token=self.apitoken,\n user_agent='Pure_Nagios_plugin/0.2.0')\n if self.type == 'file-system' and self.volname:\n res = client.get_file_systems(names=[self.volname])\n elif self.type == 'object-store' and self.volname:\n res = client.get_buckets(names=[self.volname])\n else:\n res = client.get_arrays_space(type=self.type)\n if isinstance(res, flashblade.ValidResponse):\n fbinfo = res.items.next()\n \n except Exception as e:\n raise nagiosplugin.CheckError('FB REST call returned \"{}\"'.format(e))\n return(fbinfo)", "title": "" }, { "docid": "fc9fb90614e34854e76c26246b3c16d7", "score": "0.4788073", "text": "def metadata(self):\n if self._surface_dataset:\n return self._surface_dataset.metadata\n else:\n return {}", "title": "" }, { "docid": "965a29d1053d9db157d4e3c73e7beec1", "score": "0.47787777", "text": "def product_info(self):\n return self.query(ProductInfo)", "title": "" }, { "docid": "1a2d80fdc2506215521989c0df36154e", "score": "0.47678664", "text": "def getTableDescriptor(self, table):\n pass", "title": "" }, { "docid": "a3efc68cd24c96f22cd30747297cb3ff", "score": "0.4766846", "text": "def resource(self):\n return self._json_data.get('resource')", "title": "" }, { "docid": "27fa50859626f15e2898fa6503d1d202", "score": "0.4765653", "text": "def get_table(self, tid):\n table = {\"data\": []}\n page, pages = 1, None\n\n while pages is None or page <= pages:\n resp = self.tables.get_entry(\n pk=tid, _fields=[\"_all\"], data_page=page, data_per_page=1000\n ).result()\n table[\"data\"].extend(resp[\"data\"])\n if pages is None:\n pages = resp[\"total_data_pages\"]\n table[\"columns\"] = resp[\"columns\"]\n page += 1\n\n return pd.DataFrame.from_records(\n table[\"data\"], columns=table[\"columns\"], index=table[\"columns\"][0]\n )", "title": "" }, { "docid": "609a06854c7b48a6deecaa33488245e4", "score": "0.47656187", "text": "def get_table_metadata(self):\n return {\n 'num_items': self.table.item_count,\n 'primary_key_name': self.table.key_schema[0],\n 'status': self.table.table_status,\n 'bytes_size': self.table.table_size_bytes,\n 'global_secondary_indices': self.table.global_secondary_indexes\n }", "title": "" }, { "docid": "5ddac130e38fd53d66d82e05161e2da5", "score": "0.47633773", "text": "def fetch(self, table_name, primary_id):\n return r.db(self._name).table(table_name)\\\n .get(primary_id).run(self._conn)", "title": "" }, { "docid": "243b641190592a752eb31a8d0b181339", "score": "0.47447824", "text": "def describe(self, table):\n if table not in self.get_metadata().tables:\n print(\"Table not found: %s\" % table)\n return\n tbl = self.get_metadata().tables[table]\n\n def nullstr(nullable):\n return 'NULL' if nullable else 'NOT NULL'\n\n def namestr(c):\n return ('*%s' if c.primary_key else '%s') % c.name\n\n with pager() as out:\n items = ((namestr(c), c.type, nullstr(c.nullable))\n for c in tbl.columns)\n out.write(b'Columns' + b'\\n')\n asciitable.draw(\n FakedResult(sorted(items), 'Name Type Nullable'.split()),\n out, paginate=True,\n max_fieldsize=5000)\n out.write(b'\\n')\n out.write(b'Primary Key (*)\\n')\n out.write(b'---------------\\n')\n pk = ', '.join(c.name for c in tbl.columns if c.primary_key)\n out.write(b' ')\n if not pk:\n out.write(b'(None Found!)')\n else:\n out.write(pk.encode('utf8'))\n out.write(b'\\n\\n')\n out.write(b'Foreign Keys\\n')\n out.write(b'------------\\n')\n fks = self.get_metadata().foreign_keys(table)\n fk = None\n for fk in fks:\n out.write((' %s\\n' % str(fk)).encode('utf8'))\n if fk is None:\n out.write(b' (None Found)')\n out.write(('\\n\\nReferences to %s\\n' % table).encode('utf8'))\n out.write(b'--------------' + b'-' * len(table) + b'\\n')\n fks = self.get_metadata().fields_referencing(table)\n fk = None\n for fk in fks:\n out.write(b' ' + str(fk).encode('utf8') + b'\\n')\n if fk is None:\n out.write(b' (None found)\\n')\n out.write(b'\\n\\nIndexes\\n')\n\n def items():\n for idx in self.get_metadata().indexes(table):\n yield (idx.name, ', '.join(c.name for c in idx.columns),\n idx.unique)\n asciitable.draw(\n FakedResult(sorted(items()), 'Name Columns Unique'.split()),\n out, paginate=True, max_fieldsize=5000)", "title": "" }, { "docid": "4a3b74ec2e3cae1ab0304809a0b6a91d", "score": "0.4736733", "text": "def get(self, table, tget):\n self.send_get(table, tget)\n return self.recv_get()", "title": "" }, { "docid": "e458840aa4d5dc3568591279e1a817d5", "score": "0.47322056", "text": "def GetResourceMetadata(self):\n metadata = super(GcpTpu, self).GetResourceMetadata()\n metadata.update({\n 'project': self.project,\n 'cloud': self.CLOUD\n })\n return metadata", "title": "" }, { "docid": "4927cefca2e72f253eb224a7f6391f49", "score": "0.47255972", "text": "def get_object_table(request):\n\n storage_url = request.session.get('storage_url', '')\n auth_token = request.session.get('auth_token', '')\n container = request.session.get('container')\n prefix = request.session.get('prefix')\n\n try:\n meta, objects = client.get_container(\n storage_url,\n auth_token,\n container,\n delimiter='/',\n prefix=prefix)\n\n except client.ClientException:\n messages.add_message(request, messages.ERROR, _(\"Access denied.\"))\n return redirect(containerview)\n\n prefixes = prefix_list(prefix)\n pseudofolders, objs = pseudofolder_object_list(objects, prefix)\n base_url = get_base_url(request)\n account = storage_url.split('/')[-1]\n\n return JsonResponse({\n 'success': True,\n \"data\": {\n 'container': container,\n 'objects': objs,\n 'folders': pseudofolders,\n 'folder_prefix': prefix\n }\n })", "title": "" }, { "docid": "5438a7e74a582596b300ac896b57d193", "score": "0.47162598", "text": "def table_overview(\n self,\n region_name: str,\n table_name: str,\n ) -> str:\n return (\n f\"https://{region_name}.console.aws.amazon.com/dynamodbv2/\"\n f\"home?region={region_name}#\"\n f\"table?initialTagKey=&name={table_name}&tab=overview\"\n )", "title": "" }, { "docid": "8aeb14145e5fcdc0f1525ad97af491e0", "score": "0.47025532", "text": "def csoundTableGet(*args):\n return _csnd.csoundTableGet(*args)", "title": "" }, { "docid": "da56d2c1b9f5bb5d500b4b6ba6e1c8fc", "score": "0.4697068", "text": "def _get_data(mp, table=None):\n if table:\n return mp.data[table]\n else:\n return mp.data", "title": "" }, { "docid": "1e5deddebffba56e63e4487e705dfb23", "score": "0.46960324", "text": "def get(self, table, key):\n table = self.__get_table(table)\n return table.get_item(Key=make_key(key))['Item']", "title": "" }, { "docid": "1eb8e54fd4224235bb00cf3cc41a432b", "score": "0.46901804", "text": "def dict_for_table(resource: Any) -> dict:\n _dict = {}\n for column in resource.__table__.columns:\n _dict[column.name] = str(getattr(resource, column.name))\n return _dict", "title": "" }, { "docid": "8103db137dce149de57ae1b7b87543fe", "score": "0.46885788", "text": "def get_timeout(self):\n return super(GetTableRequest, self).get_timeout()", "title": "" }, { "docid": "871ebad0c52503709e43c9d54be93874", "score": "0.46880096", "text": "def test_sample_table_queryable():\r\n # Get attributes\r\n enterprise_table_name, _, _, _, _, region_name = get_sample_table_attributes()\r\n # Attempt GET\r\n dynamodb = boto3.resource('dynamodb', region_name=region_name)\r\n pp = pprint.PrettyPrinter(indent=2)\r\n \r\n table = dynamodb.Table(enterprise_table_name)\r\n response = table.get_item(Key={\"enterprise\": \"healthInsuranceGuys\"})\r\n\r\n if response: \r\n print(\"\\n------------SUCCESS------------\\nRESPONSE:\")\r\n pp.pprint(response)\r\n\r\n else:\r\n print(\"FAILED\")", "title": "" }, { "docid": "6330d389d3f06c9a257f97130b42bf87", "score": "0.46855998", "text": "def get(self):\n return self.basicModel(self.api.get)", "title": "" }, { "docid": "3f4efaa14da13e783707b59168b53270", "score": "0.4684304", "text": "def retrieve_tabel_data(table, db):\n # divine a dictonary\n result_dict = {}\n cursor = db.cursor()\n\n # retrieve all items from selected table\n cursor.execute(\"SELECT * FROM \" + table)\n myresult = cursor.fetchall()\n\n # for each item in myresult\n for item in myresult:\n result_dict[item[1]] = item[0]\n return result_dict", "title": "" }, { "docid": "08ce8d0787e7c34f99c6a1ff579cc00f", "score": "0.46839997", "text": "def TABLE(self):\n return \"table\"", "title": "" }, { "docid": "250283f2e2304e194f142db736060a20", "score": "0.4681998", "text": "def _metadata_table(self):\n\n return _make_metadata_table(self)", "title": "" }, { "docid": "11c9b1a8e253f7e3c31c5aa5238a6bfe", "score": "0.4679542", "text": "def describe_table(self, table, schema=\"public\", fetch_through_pandas=True, fail_silently=False):\n\n sql_query = f\"\"\"SELECT * FROM INFORMATION_SCHEMA.COLUMNS WHERE table_schema='{schema}' AND table_name='{table}'\"\"\"\n return self.query(sql_query, fetch_through_pandas=fetch_through_pandas, fail_silently=fail_silently)", "title": "" }, { "docid": "35861161fbf9cc4be43be6e8eff4d398", "score": "0.46626535", "text": "def get(self, table: str, primary_key: str=None):\n if primary_key:\n return self.session.execute(f\"SELECT * FROM {TABLE_NAMES[table]} WHERE {TABLE_FORMAT[table][0]}=%s\", [primary_key]).one()\n return self.session.execute(f\"SELECT * FROM {TABLE_NAMES[table]}\")", "title": "" }, { "docid": "9dec29afdc5c1807bfd4ce12913d698c", "score": "0.4653919", "text": "def getCardInformation( self, query_params = {} ):\n return self.fetchJson(\n uri_path = self.base_uri,\n query_params = query_params\n )", "title": "" }, { "docid": "3a733effba836e2b0dbb1eb353f42895", "score": "0.4653457", "text": "def get_resource(self):\n return self.resource", "title": "" }, { "docid": "b04839870b150b7e5fc224b52756ad63", "score": "0.46490985", "text": "def get_info():\n\n year = date.today().year\n month = date.today().month\n day = date.today().day\n name = '{}_{}_{}.db'.format(year, month, day)\n cur = sqlite3.connect('../data/db/{}'.format(name)).cursor()\n cur.execute(\"\"\"SELECT * FROM sensors WHERE \"rowid\" = (SELECT max(\"rowid\") FROM sensors)\"\"\")\n # time_utc real,\n # temperature_1 real,\n # temperature_2 real,\n # temperature_3 real,\n # humidity real,\n # pressure real,\n # CO2 real,\n # CO real,\n # voltage_system real,\n # voltage_heater real,\n # gyro_x real,\n # gyro_y real,\n # gyro_z real,\n res = cur.fetchone()\n print(res)\n tm, t1, t2, t3, h, p_1, p_2, c2, c, v_s, v_h, g_x, g_y, g_z, a_x, a_y, a_z = res\n # cur.close()\n p = randint(1000, 1400)\n print(t1, t2, p)\n return jsonify({\n 'temperature': [t1, t2, t3],\n 'pressure': [p_1, p_2],\n 'humidity': h,\n 'CO2': c2,\n 'fire': c > 3,\n 'voltageSystem': v_s,\n 'voltageHeater': v_h,\n 'gyro': {\n 'x': g_x,\n 'y': g_y,\n 'z': g_z\n },\n 'accel': {\n 'x': a_x,\n 'y': a_y,\n 'z': a_z\n }\n })", "title": "" }, { "docid": "5b2adc83688b7934218bcdef299575cc", "score": "0.46440557", "text": "def test_get_table(db_connection):\n with db_connection.cursor() as curs:\n t1 = insert_restaurant_table(curs, 3, 4, 5, 'ellipse')\n staff = insert_staff(curs, 'gcostanza', 'management')\n insert_event(curs, str(Event.ready), t1, staff)\n\n table = mg.get_table(db_connection, 1)\n assert table.rt_id == 1\n assert table.width == 4\n assert table.height == 5\n assert table.capacity == 3\n assert table.shape is Shape.ellipse\n assert table.state is State.available\n assert table.latest_event is Event.ready", "title": "" }, { "docid": "06256fb09cdaf0815dfc006db6ae2f8b", "score": "0.4639797", "text": "def describe_table(self, name):\n return self.layer1.describe_table(name)", "title": "" }, { "docid": "5b11811f8c723b49988481b8b30740ca", "score": "0.46352604", "text": "def get_metadata():", "title": "" }, { "docid": "f3b72790acf644dbf8ec1b9e950e68e1", "score": "0.46326032", "text": "def get_projects_table(self):\n return self._projects_table", "title": "" }, { "docid": "668e5ddccf61897935367f00137bc538", "score": "0.4629201", "text": "def GetNodeTable(Network, Context):\n return _snap.TTable_GetNodeTable(Network, Context)", "title": "" }, { "docid": "30249c261db0c662383bdc06fb3f276c", "score": "0.46275714", "text": "def return_table_type(self) -> 'outputs.StandardSqlTableTypeResponse':\n return pulumi.get(self, \"return_table_type\")", "title": "" } ]
8175c1e21206db487ee07a2ae912ec8d
Test Ansible module loader.
[ { "docid": "d76070268e34b061f597a49663e7c50a", "score": "0.7000035", "text": "def test_resolver_module_loader(resolver):\n with patch(\"salt.modules.ansiblegate.importlib\", MagicMock()), patch(\n \"salt.modules.ansiblegate.importlib.import_module\", lambda x: x\n ):\n assert resolver.load_module(\"four.five.six\") == \"ansible.modules.four.five.six\"", "title": "" } ]
[ { "docid": "0c1959200c05694f42caeea52eceb129", "score": "0.65836585", "text": "def test_ansible_module_help(resolver):\n\n class Module(object):\n \"\"\"\n An ansible module mock.\n \"\"\"\n\n __name__ = \"foo\"\n DOCUMENTATION = \"\"\"\n---\none:\n text here\n---\ntwo:\n text here\ndescription:\n describe the second part\n \"\"\"\n\n with patch.object(ansible, \"_resolver\", resolver), patch.object(\n ansible._resolver, \"load_module\", MagicMock(return_value=Module())\n ):\n ret = ansible.help(\"dummy\")\n assert sorted(\n ret.get('Available sections on module \"{0}\"'.format(Module().__name__))\n ) == [\"one\", \"two\"]\n assert ret.get(\"Description\") == \"describe the second part\"", "title": "" }, { "docid": "8cf42d0d0cb25c560941584b6151ebc6", "score": "0.65442586", "text": "def test_resolver_module_loader_import_failure(resolver):\n with patch(\"salt.modules.ansiblegate.importlib\", MagicMock()), patch(\n \"salt.modules.ansiblegate.importlib.import_module\", lambda x: x\n ):\n with pytest.raises(LoaderError) as loader_error:\n resolver.load_module(\"something.strange\")", "title": "" }, { "docid": "72db34e8978831ddaf2732ad5060dafd", "score": "0.64625317", "text": "def test_import():\n import deast", "title": "" }, { "docid": "7e19b20448b657db92dbd597bf8f404a", "score": "0.64368254", "text": "def test_module(self):\r\n bento_info = \"\"\"\\\r\nName: foo\r\n\r\nLibrary:\r\n SubDirectory: lib\r\n Modules: foo\r\n\"\"\"\r\n\r\n r_section = InstalledSection.from_source_target_directories(\"pythonfiles\",\r\n \"foo\",\r\n \"$_srcrootdir/../lib\",\r\n \"$sitedir\",\r\n [\"foo.py\"])\r\n r_sections = {\"pythonfiles\":\r\n {\"foo\": r_section}}\r\n self._test_installed_sections(bento_info, r_sections)", "title": "" }, { "docid": "71b299668572037511cc81a20227003f", "score": "0.6414092", "text": "def test_module(self):\n bento_info = \"\"\"\\\nName: foo\n\nLibrary:\n SubDirectory: lib\n Modules: foo\n\"\"\"\n\n r_section = InstalledSection.from_source_target_directories(\"pythonfiles\",\n \"foo\",\n \"$_srcrootdir/../lib\",\n \"$sitedir\",\n [\"foo.py\"])\n r_sections = {\"pythonfiles\":\n {\"foo\": r_section}}\n self._test_installed_sections(bento_info, r_sections)", "title": "" }, { "docid": "b4cb8c84436c86c8fb530d3f099915e0", "score": "0.6404243", "text": "def test_import_clissh_module():\n module_name = \"clissh\"\n try:\n from testlib import clissh\n\n clissh.CLISSH(None)\n except ImportError as err:\n pytest.fail(\"Import failure in '%s' module: %s\" % (module_name, err))", "title": "" }, { "docid": "27cf100d14da4568b07e7fc8ec8af372", "score": "0.63837457", "text": "def test_modulesSimpleFlow(env):\n checkSampleModules(env)", "title": "" }, { "docid": "a5e821c3c6e9d26ff6d96dfa92f76965", "score": "0.63811857", "text": "def test_load_module():\n load_module(\n \"packages.fetchai.connections.gym.connection\",\n Path(ROOT_DIR)\n / \"packages\"\n / \"fetchai\"\n / \"connections\"\n / \"gym\"\n / \"connection.py\",\n )", "title": "" }, { "docid": "6b3849c0f0512d7d3d6a0002940a7e45", "score": "0.63347656", "text": "def testImportPythonModule(self):\n module = dependencies._ImportPythonModule(u'os')\n self.assertIsNotNone(module)\n\n module = dependencies._ImportPythonModule(u'bogus')\n self.assertIsNone(module)", "title": "" }, { "docid": "b397a91a67da5d03fb6a3ab0e1b582b7", "score": "0.63295406", "text": "def test_packages(host):\n # get variables from file\n ansible_vars = host.ansible(\"include_vars\", \"file=main.yml\")\n # check dependencies and Uyuni packages\n for pkg in ansible_vars[\"ansible_facts\"][\"core_packages\"]:\n assert host.package(pkg).is_installed", "title": "" }, { "docid": "1f9822209b29e493178c20b2fe41e913", "score": "0.6326804", "text": "def test_load(self):\n importer = Importer('module')\n self.assertFalse(importer.loaded)\n\n importer.load()\n self.assertTrue(importer.loaded)", "title": "" }, { "docid": "da230ba32d25f1b110fabd5d0b95a2c4", "score": "0.6275179", "text": "def test_ModuleLoading(self):\n tm = TestManager()\n\n srd = SysrepodDaemonTester(\"Srd\")\n tester1 = SysrepoTester(\"First\", sr.SR_DS_STARTUP, sr.SR_CONN_DAEMON_REQUIRED, False)\n tester2 = SysrepoTester(\"Second\", sr.SR_DS_STARTUP, sr.SR_CONN_DAEMON_REQUIRED, False)\n tester3 = SysrepoTester(\"Third\", sr.SR_DS_STARTUP, sr.SR_CONN_DAEMON_REQUIRED, False)\n tester4 = SysrepoTester(\"Fourth\", sr.SR_DS_STARTUP, sr.SR_CONN_DAEMON_REQUIRED, False)\n\n\n srd.add_step(srd.startDaemonStep)\n tester1.add_step(tester1.waitStep)\n tester2.add_step(tester2.waitStep)\n tester3.add_step(tester3.waitStep)\n tester4.add_step(tester4.waitStep)\n\n srd.add_step(srd.waitStep)\n tester1.add_step(tester1.restartConnection)\n tester2.add_step(tester2.restartConnection)\n tester3.add_step(tester3.restartConnection)\n tester4.add_step(tester4.restartConnection)\n\n srd.add_step(srd.waitStep)\n tester1.add_step(tester1.getItemsStepExpectedCount, \"/test-module:main/*\", 19)\n tester2.add_step(tester2.getItemsStepExpectedCount, \"/test-module:main/*\", 19)\n tester3.add_step(tester3.getItemsStepExpectedCount, \"/test-module:main/*\", 19)\n tester4.add_step(tester4.getItemsStepExpectedCount, \"/test-module:main/*\", 19)\n\n srd.add_step(srd.stopDaemonStep)\n\n tm.add_tester(srd)\n tm.add_tester(tester1)\n tm.add_tester(tester2)\n tm.add_tester(tester3)\n tm.add_tester(tester4)\n tm.run()", "title": "" }, { "docid": "107f7839252df6199f6fa096049e72db", "score": "0.6273872", "text": "def main():\n module = AnsibleModule(\n argument_spec = dict(\n state = dict(default='present', choices=['present', 'absent']),\n name = dict(required=True),\n enabled = dict(required=True, type='bool'),\n something = dict(aliases=['whatever'])\n )\n )", "title": "" }, { "docid": "50fa999a80207ec3c93d4a6e5289951d", "score": "0.62716967", "text": "def test_plugin_import():\n stdplugins = import_plugins_package(True)\n assert isinstance(stdplugins, types.ModuleType)\n __import__('stdplugins.counter')\n __import__('stdplugins.filter')", "title": "" }, { "docid": "fd4e040d14c4c6f328b83b3a6de81735", "score": "0.62671375", "text": "def test_module_imports(self):\n apps = [\n 'testing',\n 'testing.management',\n 'testing.management.commands',\n 'testing.views',\n 'testing.helper',\n ]\n for a in apps:\n self.assertTrue(module_exists(a))", "title": "" }, { "docid": "7dde3ab694b435a4a3ce225db13f55ff", "score": "0.62637305", "text": "def test_command_module():\n assert dockerfile.Command.__module__ == dockerfile.__name__", "title": "" }, { "docid": "cf03a4a51c356fb61113cbad1273750a", "score": "0.6205061", "text": "def setUp(self):\n config.Config.register_module(modules.DummyModule1)\n config.Config.register_module(modules.DummyModule2)\n config.Config.register_recipe(test_recipe)", "title": "" }, { "docid": "7a5462d4ea645567ac6b8ea65f2a93d9", "score": "0.6201355", "text": "def test_module_imports(self):\n apps = (\n 'helpers.test',\n 'helpers.test.base',\n 'helpers.test.utils',\n 'helpers.test.settings',\n )\n for a in apps:\n self.assertTrue(module_exists(a))", "title": "" }, { "docid": "fe0323f70c7ff6c410d4ccfc785f5bdc", "score": "0.6191816", "text": "def test_plugin_loader():\n for group in pybtex.plugin._DEFAULT_PLUGINS:\n for name in pybtex.plugin.enumerate_plugin_names(group):\n pybtex.plugin.find_plugin(group, name)", "title": "" }, { "docid": "a38ca9196205b12f5ac2b27f37eafd90", "score": "0.6175929", "text": "def get_units_ansible_python_path(args, test_context): # type: (UnitsConfig, str) -> str\n if test_context == TestContext.controller:\n return get_ansible_python_path(args)\n\n try:\n cache = get_units_ansible_python_path.cache # type: ignore[attr-defined]\n except AttributeError:\n cache = get_units_ansible_python_path.cache = {} # type: ignore[attr-defined]\n\n python_path = cache.get(test_context)\n\n if python_path:\n return python_path\n\n python_path = create_temp_dir(prefix='ansible-test-')\n ansible_path = os.path.join(python_path, 'ansible')\n ansible_test_path = os.path.join(python_path, 'ansible_test')\n\n write_text_file(os.path.join(ansible_path, '__init__.py'), '', True)\n os.symlink(os.path.join(ANSIBLE_LIB_ROOT, 'module_utils'), os.path.join(ansible_path, 'module_utils'))\n\n if data_context().content.collection:\n # built-in runtime configuration for the collection loader\n make_dirs(os.path.join(ansible_path, 'config'))\n os.symlink(os.path.join(ANSIBLE_LIB_ROOT, 'config', 'ansible_builtin_runtime.yml'), os.path.join(ansible_path, 'config', 'ansible_builtin_runtime.yml'))\n\n # current collection loader required by all python versions supported by the controller\n write_text_file(os.path.join(ansible_path, 'utils', '__init__.py'), '', True)\n os.symlink(os.path.join(ANSIBLE_LIB_ROOT, 'utils', 'collection_loader'), os.path.join(ansible_path, 'utils', 'collection_loader'))\n\n # legacy collection loader required by all python versions not supported by the controller\n write_text_file(os.path.join(ansible_test_path, '__init__.py'), '', True)\n write_text_file(os.path.join(ansible_test_path, '_internal', '__init__.py'), '', True)\n elif test_context == TestContext.modules:\n # only non-collection ansible module tests should have access to ansible built-in modules\n os.symlink(os.path.join(ANSIBLE_LIB_ROOT, 'modules'), os.path.join(ansible_path, 'modules'))\n\n cache[test_context] = python_path\n\n return python_path", "title": "" }, { "docid": "6a7cc1c9e36f041ce9af285c093ad375", "score": "0.6110892", "text": "def test(self):\n\n if \"+python\" in self.spec:\n # Make sure we are importing the installed modules,\n # not the ones in the source directory\n for module in self.import_modules:\n self.run_test(\n self.spec[\"python\"].command.path,\n [\"-c\", \"import {0}\".format(module)],\n purpose=\"checking import of {0}\".format(module),\n work_dir=\"spack-test\",\n )", "title": "" }, { "docid": "6a7cc1c9e36f041ce9af285c093ad375", "score": "0.6110892", "text": "def test(self):\n\n if \"+python\" in self.spec:\n # Make sure we are importing the installed modules,\n # not the ones in the source directory\n for module in self.import_modules:\n self.run_test(\n self.spec[\"python\"].command.path,\n [\"-c\", \"import {0}\".format(module)],\n purpose=\"checking import of {0}\".format(module),\n work_dir=\"spack-test\",\n )", "title": "" }, { "docid": "9dc7dccde6f97f55b0eb0b247ee6d0cb", "score": "0.61075765", "text": "def test_import_nothing():\n modnames = loaded_vispy_modules('os', 2)\n assert_equal(modnames, set())", "title": "" }, { "docid": "354753d33ca7d51cbb986c2f017a13ee", "score": "0.61003864", "text": "def test_molecool_imported():\n assert \"molecool\" in sys.modules", "title": "" }, { "docid": "7a43fef7b0bcc03709250b813150d288", "score": "0.6095513", "text": "def run_module():\n\n # Module argument info\n module_args = {\n 'facts_key': {\n 'type': 'str',\n 'required': False,\n 'default': FACTS_KEY_DEFAULT\n }\n }\n\n # Seed result value\n result = {\n 'changed': False,\n 'failed': False,\n 'msg': ''\n }\n\n # Lean on boilerplate code in AnsibleModule class\n module = AnsibleModule(\n argument_spec=module_args,\n supports_check_mode=False\n )\n\n # Run logic\n # NOTE: This module does not support check mode right now so no special check handling\n err, result = run_normal(module.params, result)\n\n # Exit\n module.exit_json(**result)", "title": "" }, { "docid": "b8c38fd1e9dfb7bfe9bb732f3a0dd3fc", "score": "0.60826194", "text": "def test_import_vispy_util():\n modnames = loaded_vispy_modules('vispy.util', 2)\n assert_equal(modnames, set(['vispy', 'vispy.util']))", "title": "" }, { "docid": "fa4fbca07e77c420afccce9d6e6b4e15", "score": "0.6075052", "text": "def test_load_modules(main_menu_mock, models, db):\n # https://github.com/pytest-dev/pytest/issues/3697\n # caplog not working for some reason.\n from empire.server.core.module_service import ModuleService\n\n with catch_logs(\n level=logging.INFO, logger=logging.getLogger(ModuleService.__module__)\n ) as handler:\n module_service = ModuleService(main_menu_mock)\n\n messages = [x.message for x in handler.records if x.levelno >= logging.WARNING]\n\n if messages:\n pytest.fail(\"warning messages encountered during testing: {}\".format(messages))\n\n assert len(module_service.modules) > 300\n assert len(db.query(models.Module).all()) > 300\n\n for key, module in module_service.modules.items():\n if not module.advanced.custom_generate:\n resp, err = module_service._generate_script(\n db, module, convert_options_to_params(module.options), None\n )\n\n # not gonna bother mocking out the csharp server right now.\n if err != \"csharpserver plugin not running\":\n # fail if a module fails to generate a script.\n assert (\n resp is not None and len(resp) > 0\n ), f\"No generated script for module {key}\"", "title": "" }, { "docid": "ada363188140f328facbb157c129a6db", "score": "0.6062666", "text": "def test_import_plugin(logger):\n assert actions.utils.import_plugin(\"http\", \"client\")", "title": "" }, { "docid": "bf50b4fc50ad70e6fdf001e5530110bd", "score": "0.6057131", "text": "def test_resolver_module_loader_failure(resolver):\n mod = \"four.five.six\"\n with pytest.raises(ImportError) as import_error:\n resolver.load_module(mod)\n\n mod = \"i.even.do.not.exist.at.all\"\n with pytest.raises(LoaderError) as loader_error:\n resolver.load_module(mod)", "title": "" }, { "docid": "17431089d9a5891e21ff4148cd28d2d8", "score": "0.60490996", "text": "def run_module():\n\n # set up Ansible framework and define module parameters\n module_args = dict(\n url = dict(type = 'str', required = True),\n username = dict(type = 'str', required = True),\n password = dict(type = 'str', required = True, no_log = True),\n pages = dict(type = 'str', default = '5-100+'),\n posts = dict(type = 'str', default = '5-100+'),\n comments = dict(type = 'str', default = '1-30+'),\n state = dict(\n choices = ['present', 'absent', 'fresh'],\n default = 'present')\n )\n result = dict(\n changed = False,\n msg = 'initial dummy content message'\n )\n module = AnsibleModule(\n argument_spec = module_args,\n supports_check_mode = True\n )\n\n # Perform the actual function or this module.\n wp_api = xmlrpclib.ServerProxy('%s/xmlrpc.php' % module.params['url'],\n use_datetime=True)\n ranges = parse_value_ranges(module)\n result['posts'] = generate_posts(module, wp_api, ranges, 'post')\n result['pages'] = generate_posts(module, wp_api, ranges, 'page')\n result['comments'] = generate_comments(module, wp_api, ranges)\n result['ranges'] = ranges\n\n # Did we change anything?\n if result['posts']['added'] or result['posts']['removed'] or \\\n result['pages']['added'] or result['pages']['removed'] or \\\n result['comments']['added'] or result['comments']['removed']:\n result['changed'] = True\n\n module.exit_json(**result)", "title": "" }, { "docid": "9a30a7e6968a92d90d7e42d7f4582dee", "score": "0.6041441", "text": "def main(argv=sys.argv):\n Degoss(argv, AnsibleModule(\n argument_spec=dict(\n clean=dict(type='bool', required=False, default=True),\n clean_on_failure=dict(type='bool', required=False, default=True),\n debug=dict(type='bool', required=False, default=False),\n facts=dict(type='dict', required=False, default='{}'),\n test_dir=dict(type='path', required=True),\n test_file=dict(type='str', required=True),\n tmp_root=dict(type='path', required=True),\n variables=(dict(type='dict', required=False, default='{}')),\n version=dict(type='str', required=False, default='latest'),\n )\n )).execute()", "title": "" }, { "docid": "272ccbef6c364229f65e1e831793525d", "score": "0.6029893", "text": "def run_module():\n\n # Module argument info\n module_args = {\n 'facts': {\n 'type': 'bool',\n 'required': False,\n 'default': FACTS_DEFAULT\n },\n 'facts_key': {\n 'type': 'str',\n 'required': False,\n 'default': FACTS_KEY_DEFAULT\n }\n }\n\n # Seed result value\n result = {\n 'changed': False,\n 'failed': False,\n 'msg': ''\n }\n\n # Lean on boilerplate code in AnsibleModule class\n module = AnsibleModule(\n argument_spec=module_args,\n supports_check_mode=True\n )\n\n # Run logic\n # NOTE: This module makes no changes so check mode doesn't need to be handled\n # specially\n err, result = run_normal(module.params, result)\n\n # Exit\n module.exit_json(**result)", "title": "" }, { "docid": "ac1e9d65ea794271c5a5b0a9723d1239", "score": "0.6020677", "text": "def ansible_playbook(request):\n setup_playbooks = []\n teardown_playbooks = []\n\n if hasattr(request.node, \"get_marker\"):\n marker = request.node.get_marker('ansible_playbook_setup')\n setup_ms = [marker] if marker is not None else []\n marker = request.node.get_marker('ansible_playbook_teardown')\n teardown_ms = [marker] if marker is not None else []\n else:\n # since pytest 4.0.0, markers api changed, see:\n # https://github.com/pytest-dev/pytest/pull/4564\n # https://docs.pytest.org/en/latest/mark.html#updating-code\n setup_ms = request.node.iter_markers('ansible_playbook_setup')\n teardown_ms = request.node.iter_markers('ansible_playbook_teardown')\n\n for marker in setup_ms:\n if len(marker.args) == 0:\n raise Exception(get_empty_marker_error(\"setup\"))\n setup_playbooks.extend(marker.args)\n for marker in teardown_ms:\n if len(marker.args) == 0:\n raise Exception(get_empty_marker_error(\"teardown\"))\n teardown_playbooks.extend(marker.args)\n\n if len(setup_playbooks) == 0 and len(teardown_playbooks) == 0:\n msg = (\n \"no ansible playbook is specified for the test case, \"\n \"please add a decorator like this one \"\n \"``@pytest.mark.ansible_playbook_setup('playbook.yml')`` \"\n \"or \"\n \"``@pytest.mark.ansible_playbook_teardown('playbook.yml')`` \"\n \"for ansible_playbook fixture to know which playbook to use\")\n raise Exception(msg)\n\n with runner(request, setup_playbooks, teardown_playbooks):\n yield", "title": "" }, { "docid": "97d54399e054089db0d01364740f3977", "score": "0.6006175", "text": "def test_modules():\n from askbot import REQUIREMENTS\n for module_name, pip_path in REQUIREMENTS.items():\n try_import(module_name, pip_path)", "title": "" }, { "docid": "489ae866a86a4a8ce33a7027b454d259", "score": "0.6003271", "text": "def test_class8_ex7a():\n base_path = \"../class8/exercises/exercise7\"\n cmd_list = [\"ansible-playbook\", \"exercise7.yml\"]\n std_out, std_err, return_code = subprocess_runner(cmd_list, exercise_dir=base_path)\n std_err = remove_ansible_warnings(std_err)\n assert return_code != 0", "title": "" }, { "docid": "744186d2632a5037c47415976ba80863", "score": "0.5998534", "text": "def test_run_as_a_module():\n exit_status = os.system('python -m tiingo_prices.tiingo_prices --help')\n assert exit_status == 0", "title": "" }, { "docid": "5125e4dc4e291988383393aa5fec6432", "score": "0.5995214", "text": "def test_host_guest():\n import sys\n sys.path.append('./scripts/')\n import host_guest", "title": "" }, { "docid": "6010c16b522f869b2f1dcf3309af1661", "score": "0.5995189", "text": "def test_import_nothing():\n modnames = loaded_modules('os', 2)\n assert modnames == set()", "title": "" }, { "docid": "d7c11e490636b339d347ded19af99913", "score": "0.59900653", "text": "def test_make_module_stub(mocker: Any) -> None:\n mocker.patch(\"ansiblelint.config.options.cache_dir\", return_value=\".\")\n assert options.cache_dir is not None\n with pytest.raises(SystemExit) as exc:\n _make_module_stub(module_name=\"\", options=options)\n assert exc.type == SystemExit\n assert exc.value.code == RC.INVALID_CONFIG", "title": "" }, { "docid": "c401db50ff9278c6bd7c9058e1e91ed5", "score": "0.5986301", "text": "def setUpModule():\n pass", "title": "" }, { "docid": "0db318d7f40db7ccd4fd9cbd2327602b", "score": "0.59833205", "text": "def test_get_imported_modules_from_file():\n assert (len(helper.get_imported_modules_from_file(file_name=__file__)) > 0)", "title": "" }, { "docid": "d6c9e4d449a317327b72c2e224979b30", "score": "0.5968398", "text": "def test(self):\n\n # Make sure we are importing the installed modules,\n # not the ones in the source directory\n for module in self.import_modules:\n self.run_test(\n inspect.getmodule(self).python.path,\n [\"-c\", \"import {0}\".format(module)],\n purpose=\"checking import of {0}\".format(module),\n work_dir=\"spack-test\",\n )", "title": "" }, { "docid": "a35bf7d81b598f44ba57e461e6f69d48", "score": "0.596463", "text": "def main():\r\n module = AnsibleModule(\r\n argument_spec=dict(\r\n state=dict(default=\"present\", choices=[\"present\", \"absent\"]),\r\n libertydir=dict(default=\"/opt/IBM/Liberty\", required=True),\r\n name=dict(required=True),\r\n ),\r\n supports_check_mode=True\r\n )\r\n\r\n state = module.params[\"state\"]\r\n libertydir = module.params[\"libertydir\"]\r\n name = module.params[\"name\"]\r\n server_dir = \"{0}/usr/servers/{1}\".format(libertydir, name)\r\n\r\n def raise_on_path_not_exist(path):\r\n \"\"\"\r\n Raises a module failure exception if path does not exist\r\n\r\n :param path: File path\r\n :return: None\r\n \"\"\"\r\n if not os.path.exists(path):\r\n module.fail_json(msg=\"{0} does not exists\".format(path))\r\n\r\n if state == \"present\":\r\n if module.check_mode:\r\n if not os.path.exists(libertydir):\r\n module.exit_json(\r\n changed=False,\r\n msg=\"module would not run {0} does not exist\".format(libertydir)\r\n )\r\n elif os.path.exists(server_dir):\r\n module.exit_json(\r\n changed=False,\r\n msg=\"{0} server already exist\".format(name)\r\n )\r\n else:\r\n module.exit_json(\r\n changed=True,\r\n msg=\"{0} server would be created\".format(name)\r\n )\r\n raise_on_path_not_exist(libertydir)\r\n child = subprocess.Popen(\r\n [\"{0}/bin/server create {1}\".format(libertydir, name)],\r\n shell=True,\r\n stdout=subprocess.PIPE,\r\n stderr=subprocess.PIPE\r\n )\r\n stdout_value, stderr_value = child.communicate()\r\n if child.returncode != 0:\r\n module.fail_json(\r\n msg=\"Failed to create liberty server {0}\".format(name),\r\n stdout=stdout_value,\r\n stderr=stderr_value\r\n )\r\n module.exit_json(\r\n changed=True,\r\n msg=\"{0} server created successfully\".format(name),\r\n stdout=stdout_value\r\n )\r\n\r\n if state == \"absent\":\r\n if module.check_mode:\r\n if not os.path.exists(libertydir):\r\n module.exit_json(\r\n changed=False,\r\n msg=\"module would not run {0} does not exist\".format(libertydir)\r\n )\r\n elif os.path.exists(server_dir):\r\n module.exit_json(\r\n changed=False,\r\n msg=\"{0} server already exist\".format(name)\r\n )\r\n else:\r\n module.exit_json(\r\n changed=True,\r\n msg=\"{0} server would be created\".format(name)\r\n )\r\n raise_on_path_not_exist(libertydir)\r\n if os.path.exists(server_dir):\r\n shutil.rmtree(\r\n \"{0}/usr/servers/{1}\".format(libertydir, name),\r\n ignore_errors=True,\r\n onerror=None\r\n )\r\n module.exit_json(\r\n changed=True,\r\n msg=\"{0} server removed successfully\".format(name)\r\n )\r\n module.exit_json(\r\n changed=False,\r\n msg=\"{0} server already removed\".format(name)\r\n )", "title": "" }, { "docid": "ebc0e434e20921d4f0ad5f9fbe7ed2fe", "score": "0.5958604", "text": "def test_pytest_present():\n import pytest\n assert True", "title": "" }, { "docid": "e20dbc6846c58ef2a3cc0235381e00e7", "score": "0.5956634", "text": "def test_import_lookup():\n cls = interpolatr.util.lookup_import('interpolatr.config.YamlConfigSource')\n assert cls.__name__ == 'YamlConfigSource'", "title": "" }, { "docid": "ef6b295021b11d1329c2a2d7cfcd829f", "score": "0.5947681", "text": "def test_class8_ex6a():\n base_path = \"../class8/exercises/exercise6\"\n cmd_list = [\"ansible-playbook\", \"exercise6a.yml\"]\n std_out, std_err, return_code = subprocess_runner(cmd_list, exercise_dir=base_path)\n std_err = remove_ansible_warnings(std_err)\n assert std_err == \"\"\n assert return_code == 0\n assert re.search(r\"msg.*show_lldp\", std_out)", "title": "" }, { "docid": "36bba5e22f8c0fcb06ebb31d5fa4c734", "score": "0.59434515", "text": "def test_module(self):\n self.assertEqual(self.a_mod.name, \"module1\")\n self.assertEqual(self.a_mod.parent, self.a_rt)", "title": "" }, { "docid": "ebd82d673c553270ce8ee3afcfee1536", "score": "0.59425086", "text": "def setup_module():\n pass", "title": "" }, { "docid": "4203157ad5fc46fe08646dafd4e606a5", "score": "0.5940952", "text": "def test_load_from_example_module(self):\n self.settings = Settings.load_from_module(join(self.dir_name, \"example.py\"))\n eq_([\"host1\", \"host2\"], self.settings.environmentdefs[\"environment1\"])\n eq_([\"host1\"], self.settings.roledefs[\"role1\"])\n eq_([\"component1\"], self.settings.componentdefs[\"role1\"])", "title": "" }, { "docid": "e4035477de5651874d0f5d608dd4048f", "score": "0.5933423", "text": "def main():\n module = AnsibleModule(\n argument_spec=dict(\n pn_cliswitch=dict(required=False, type='str'),\n state=dict(required=True, type='str',\n choices=['update']),\n pn_intf=dict(required=False, type='str'),\n pn_crc_check_enable=dict(required=False, type='bool'),\n pn_dscp_map=dict(required=False, type='str'),\n pn_autoneg=dict(required=False, type='bool'),\n pn_speed=dict(required=False, type='str',\n choices=['disable', '10m', '100m', '1g',\n '2.5g', '10g', '25g', '40g',\n '50g', '100g']),\n pn_port=dict(required=True, type='str'),\n pn_vxlan_termination=dict(required=False, type='bool'),\n pn_pause=dict(required=False, type='bool'),\n pn_fec=dict(required=False, type='bool'),\n pn_loopback=dict(required=False, type='bool'),\n pn_loop_vlans=dict(required=False, type='str'),\n pn_routing=dict(required=False, type='bool'),\n pn_edge_switch=dict(required=False, type='bool'),\n pn_enable=dict(required=False, type='bool'),\n pn_description=dict(required=False, type='str'),\n pn_host_enable=dict(required=False, type='bool'),\n pn_allowed_tpid=dict(required=False, type='str',\n choices=['vlan', 'q-in-q', 'q-in-q-old']),\n pn_mirror_only=dict(required=False, type='bool'),\n pn_reflect=dict(required=False, type='bool'),\n pn_jumbo=dict(required=False, type='bool'),\n pn_egress_rate_limit=dict(required=False, type='str'),\n pn_eth_mode=dict(required=False, type='str',\n choices=['1000base-x', 'sgmii',\n 'disabled', 'GMII']),\n pn_fabric_guard=dict(required=False, type='bool'),\n pn_local_switching=dict(required=False, type='bool'),\n pn_lacp_priority=dict(required=False, type='str'),\n pn_send_port=dict(required=False, type='str'),\n pn_port_mac_address=dict(required=False, type='str'),\n pn_defer_bringup=dict(required=False, type='bool'),\n ),\n required_one_of=[['pn_intf', 'pn_crc_check_enable', 'pn_dscp_map',\n 'pn_speed', 'pn_autoneg',\n 'pn_vxlan_termination', 'pn_pause',\n 'pn_fec', 'pn_loopback', 'pn_loop_vlans',\n 'pn_routing', 'pn_edge_switch',\n 'pn_enable', 'pn_description',\n 'pn_host_enable', 'pn_allowed_tpid',\n 'pn_mirror_only', 'pn_reflect',\n 'pn_jumbo', 'pn_egress_rate_limit',\n 'pn_eth_mode', 'pn_fabric_guard',\n 'pn_local_switching', 'pn_lacp_priority',\n 'pn_send_port', 'pn_port_mac_address',\n 'pn_defer_bringup']],\n )\n\n # Accessing the arguments\n state = module.params['state']\n intf = module.params['pn_intf']\n crc_check_enable = module.params['pn_crc_check_enable']\n dscp_map = module.params['pn_dscp_map']\n autoneg = module.params['pn_autoneg']\n speed = module.params['pn_speed']\n port = module.params['pn_port']\n vxlan_termination = module.params['pn_vxlan_termination']\n pause = module.params['pn_pause']\n fec = module.params['pn_fec']\n loopback = module.params['pn_loopback']\n loop_vlans = module.params['pn_loop_vlans']\n routing = module.params['pn_routing']\n edge_switch = module.params['pn_edge_switch']\n enable = module.params['pn_enable']\n description = module.params['pn_description']\n host_enable = module.params['pn_host_enable']\n allowed_tpid = module.params['pn_allowed_tpid']\n mirror_only = module.params['pn_mirror_only']\n reflect = module.params['pn_reflect']\n jumbo = module.params['pn_jumbo']\n egress_rate_limit = module.params['pn_egress_rate_limit']\n eth_mode = module.params['pn_eth_mode']\n fabric_guard = module.params['pn_fabric_guard']\n local_switching = module.params['pn_local_switching']\n lacp_priority = module.params['pn_lacp_priority']\n send_port = module.params['pn_send_port']\n port_mac_address = module.params['pn_port_mac_address']\n defer_bringup = module.params['pn_defer_bringup']\n\n command = get_command_from_state(state)\n\n # Building the CLI command string\n cli = pn_cli(module)\n\n if command == 'port-config-modify':\n cli += ' %s ' % command\n if intf:\n cli += ' intf ' + intf\n if crc_check_enable:\n if crc_check_enable is enable:\n cli += ' crc-check-enable '\n else:\n cli += ' crc-check-disable '\n if dscp_map:\n cli += ' dscp-map ' + dscp_map\n if autoneg:\n cli += ' autoneg '\n else:\n cli += ' no-autoneg '\n if speed:\n cli += ' speed ' + speed\n if port:\n cli += ' port ' + port\n if vxlan_termination:\n if vxlan_termination is True:\n cli += ' vxlan-termination '\n else:\n cli += ' no-vxlan-termination '\n if pause:\n cli += ' pause '\n else:\n cli += ' no-pause '\n if fec:\n cli += ' fec '\n else:\n cli += ' no-fec '\n if loopback:\n if loopback is True:\n cli += ' loopback '\n else:\n cli += ' no-loopback '\n if loop_vlans:\n cli += ' loop-vlans ' + loop_vlans\n if routing:\n cli += ' routing '\n else:\n cli += ' no-routing '\n if edge_switch:\n if edge_switch is True:\n cli += ' edge-switch '\n else:\n cli += ' no-edge-switch '\n if enable:\n cli += ' enable '\n else:\n cli += ' disable '\n if description:\n cli += ' description ' + description\n if host_enable:\n if host_enable is True:\n cli += ' host-enable '\n else:\n cli += ' host-disable '\n if allowed_tpid:\n cli += ' allowed-tpid ' + allowed_tpid\n if mirror_only:\n if mirror_only is True:\n cli += ' mirror-only '\n else:\n cli += ' no-mirror-receive-only '\n if reflect:\n if reflect is True:\n cli += ' reflect '\n else:\n cli += ' no-reflect '\n if jumbo:\n if jumbo is True:\n cli += ' jumbo '\n else:\n cli += ' no-jumbo '\n if egress_rate_limit:\n cli += ' egress-rate-limit ' + egress_rate_limit\n if eth_mode:\n cli += ' eth-mode ' + eth_mode\n if fabric_guard:\n if fabric_guard is True:\n cli += ' fabric-guard '\n else:\n cli += ' no-fabric-guard '\n if local_switching:\n if lical_switching:\n cli += ' local-switching '\n else:\n cli += ' no-local-switching '\n if lacp_priority:\n cli += ' lacp-priority ' + lacp_priority\n if send_port:\n cli += ' send-port ' + send_port\n if port_mac_address:\n cli += ' port-mac-address ' + port_mac_address\n if defer_bringup:\n if defer_bringup is True:\n cli += ' defer-bringup '\n else:\n cli += ' no-defer-bringup '\n\n run_cli(module, cli)", "title": "" }, { "docid": "0741015adf05318175c745ef885768f2", "score": "0.59280413", "text": "def test_import_dev_linux_host_module():\n module_name = \"dev_linux_host\"\n try:\n # define parameters for object constructor\n config = {'name': None, 'id': None, 'instance_type': None, \"ipaddr\": \"1.1.1.1\"}\n\n from testlib import dev_linux_host\n dev_linux_host.GenericLinuxHost(config, FakeOpts())\n dev_linux_host.IpNetworkNamespace(config, FakeOpts())\n except ImportError as err:\n pytest.fail(\"Import failure in '%s' module: %s\" % (module_name, err))", "title": "" }, { "docid": "38bbce03dfd59ebd2e573e2e937c8fb7", "score": "0.59215856", "text": "def test_loader(packagename, names):\n modulename = plugin_targets + packagename\n\n instance = crow2.plugin.Tracker(modulename)\n\n instance.load()\n\n assert len(instance.plugins) == len(names)\n assert set([plugin.myname for plugin in instance.plugins]) == set(names)\n\n with pytest.raises(crow2.plugin.AlreadyLoadedError):\n instance.load()", "title": "" }, { "docid": "2093c02be2a82c2634618d5aa2c0cb02", "score": "0.589232", "text": "def loadTestsFromModule(self, module, path=None):\n pass", "title": "" }, { "docid": "01c058db516955ad13261e6399c17ba1", "score": "0.58890057", "text": "def setup_module(module):\n filepath = getfilepath()\n # ensure it is a ulg file\n base, ext = os.path.splitext(filepath)\n if ext.lower() not in (\".ulg\") or not filepath:\n pytest.exit(\"Either no file present or not an .ulg file.\")", "title": "" }, { "docid": "a3b4bdb3a99f450a6f3fa7a08e1cdd70", "score": "0.5888202", "text": "def fixture_module():\n print 'fixture module, pousti se pro cely modul'", "title": "" }, { "docid": "2177a28a5e569b927508f0555d6f2d15", "score": "0.58624476", "text": "def run_module():\n # Modules arguments\n module_args = {\n 'meth': {'type': str, 'required': True},\n 'path': {'type': str, 'required': True},\n 'data': {'type': dict, 'required': False, 'default': {}},\n }\n # Initializes module, connection and API helper\n module = AnsibleModule(\n argument_spec=module_args,\n supports_check_mode=False)\n connection = Connection(module._socket_path)\n # ---\n try:\n # Run module\n status_code, data = connection.send_request(\n data=module.params['data'],\n path=module.params['path'],\n method=module.params['meth']\n )\n # Setup callback and terminate\n cback = status_code in [200,] and module.exit_json or module.fail_json\n cback(**{\n 'msg': data.get('msg', None),\n 'code': data.get('code', -1),\n 'data': data\n })\n except Exception as error:\n module.fail_json(**{\n 'msg': str(error),\n 'code': -1,\n 'data': {}\n })", "title": "" }, { "docid": "dc5213fd7e1267e9dda58185dc1f4518", "score": "0.58612275", "text": "def test_import_vispy():\n modnames = loaded_vispy_modules('vispy', 2)\n assert_equal(modnames, set(['vispy', 'vispy.util']))", "title": "" }, { "docid": "e0010df007683b5ef0eb99c224d4f6af", "score": "0.5860565", "text": "def testLoadRecipe(self):\n test_state = state.DFTimewolfState(config.Config)\n test_state.load_recipe(test_recipe.contents)\n # pylint: disable=protected-access\n self.assertIn('DummyModule1', test_state._module_pool)\n self.assertIn('DummyModule2', test_state._module_pool)\n self.assertEqual(len(test_state._module_pool), 2)", "title": "" }, { "docid": "bb267739553abffe88dfe9982f52ea73", "score": "0.5847197", "text": "def test_basic_load(self):\n self.assertEqual(self._task_config['name'], 'test-basics')", "title": "" }, { "docid": "0ad9d63012a2840f53829305c27845e1", "score": "0.58423007", "text": "def build():\n spec = dict(\n username=dict(type='str', required=True),\n permissions=dict(type='list', required=True),\n server=dict(type='str', required=True),\n ssl_auth=dict(type='dict')\n )\n return AnsibleModule(\n argument_spec=spec,\n supports_check_mode=True\n )", "title": "" }, { "docid": "6570908d6923a86b7e3975f62b0abae0", "score": "0.5840199", "text": "def testLoads(self):\n check_modules = LoadChecks('restrictions')\n self.assertTrue(check_modules['restrictions'])", "title": "" }, { "docid": "e2cae40e687bdf5cf63d8dd3af87b60b", "score": "0.58394545", "text": "def __init__(self, inventory_path='/etc/ansible/hosts/'):\n self.configureAnsible(inventory_path)", "title": "" }, { "docid": "5a98adb3a4efc0ccb03b97a77487a605", "score": "0.5833454", "text": "def test_import_project():\n modnames = loaded_modules(PROJECT_NAME, 2)\n assert modnames == set(MIN_MODULES)", "title": "" }, { "docid": "ed0dd4c68d79d2e2c31ddbc599635543", "score": "0.5829593", "text": "def test_plugin_help(testdir):\n\n result = testdir.runpytest('--help')\n result.stdout.fnmatch_lines([\n # Check for the github args section header\n 'pytest-ansible:',\n # Check for the specific args\n ' --inventory=ANSIBLE_INVENTORY, --ansible-inventory=ANSIBLE_INVENTORY',\n ' --host-pattern=ANSIBLE_HOST_PATTERN, --ansible-host-pattern=ANSIBLE_HOST_PATTERN',\n ' --connection=ANSIBLE_CONNECTION, --ansible-connection=ANSIBLE_CONNECTION',\n ' --user=ANSIBLE_USER, --ansible-user=ANSIBLE_USER',\n ' --check, --ansible-check',\n ' --module-path=ANSIBLE_MODULE_PATH, --ansible-module-path=ANSIBLE_MODULE_PATH',\n ' --become, --ansible-become',\n ' --become-method=ANSIBLE_BECOME_METHOD, --ansible-become-method=ANSIBLE_BECOME_METHOD',\n ' --become-user=ANSIBLE_BECOME_USER, --ansible-become-user=ANSIBLE_BECOME_USER',\n ' --ask-become-pass=ANSIBLE_ASK_BECOME_PASS, --ansible-ask-become-pass=ANSIBLE_ASK_BECOME_PASS',\n # Check for the marker in --help\n ' ansible (args)*Ansible integration',\n ])", "title": "" }, { "docid": "85c8370ca01154807fe739ce623e840c", "score": "0.5827011", "text": "def test_parambayes_imported():\n assert \"parambayes\" in sys.modules", "title": "" }, { "docid": "e34897d5e83e933244105db36469a9f7", "score": "0.5822436", "text": "def test_import_connpool_module():\n module_name = \"connpool\"\n try:\n from testlib import connpool\n connpool.ConnectionPool()\n except ImportError as err:\n pytest.fail(\"Import failure in '%s' module: %s\" % (module_name, err))", "title": "" }, { "docid": "48d3ec737830ba1a6eec3e7acf074687", "score": "0.5818825", "text": "def main():\n module = AnsibleModule(\n argument_spec=dict(\n pn_cliswitch=dict(required=False, type='str'),\n pn_action=dict(required=True, type='str',\n choices=['create', 'delete', 'modify']),\n pn_name=dict(required=True, type='str'),\n pn_scope=dict(required=True, type='str',\n choices=['local', 'fabric']),\n pn_vflow_id=dict(type='str'),\n pn_vnet=dict(type='str'),\n pn_l2net=dict(type='str'),\n pn_vlan=dict(type='str'),\n pn_inport=dict(type='str'),\n pn_outport=dict(type='str'),\n pn_ether_type=dict(type='str',\n choices=['ipv4', 'arp', 'wake', 'rarp', 'vlan', 'ipv6',\n 'mpls-uni', 'mpls-multi', 'jumbo', 'aoe', 'dot1X',\n 'lldp', 'lacp', 'ecp', 'macsec', 'ptp', 'fcoe',\n 'fcoe-init', 'qinq']),\n pn_src_mac=dict(type='str'),\n pn_src_mac_mask=dict(type='str'),\n pn_dst_mac=dict(type='str'),\n pn_dst_mac_mask=dict(type='str'),\n pn_src_ip=dict(type='str'),\n pn_src_ip_mask=dict(type='str'),\n pn_dst_ip=dict(type='str'),\n pn_dst_ip_mask=dict(type='str'),\n pn_src_port=dict(type='str'),\n pn_src_port_mask=dict(type='str'),\n pn_dst_port=dict(type='str'),\n pn_dst_port_mask=dict(type='str'),\n pn_dscp_start=dict(type='str'),\n pn_dscp_end=dict(type='str'),\n pn_dscp=dict(type='str'),\n pn_dscp_map=dict(type='str'),\n pn_tos_start=dict(type='str'),\n pn_tos_end=dict(type='str'),\n pn_tos=dict(type='str'),\n pn_vlan_pri=dict(type='str'),\n pn_ttl=dict(type='str'),\n pn_proto=dict(type='str'),\n pn_tcp_flags=dict(type='str',\n choices=['fin', 'syn', 'rst', 'push', 'ack',\n 'urg', 'ece', 'cwr']),\n pn_flow_class=dict(type='str'),\n pn_ingress_tunnel=dict(type='str'),\n pn_egress_tunnel=dict(type='str'),\n pn_bw_min=dict(type='str'),\n pn_bw_max=dict(type='str'),\n pn_burst_size=dict(type='str'),\n pn_vrouter_name=dict(type='str'),\n pn_precedence=dict(type='str'),\n pn_vflow_action=dict(type='str',\n choices=['none', 'drop', 'to-port', 'to-cpu', 'trap',\n 'copy-to-cpu', 'copy-to-port', 'check', 'setvlan',\n 'add-outer-vlan', 'set-tpid', 'to-port-set-vlan',\n 'tunnel-pkt', 'set-tunnel-id', 'to-span', 'cpu-rx',\n 'cpu-rx-tx', 'set-metadata', 'set-dscp', 'decap',\n 'set-dmac', 'to-next-hop-ip', 'set-dmac-to-port',\n 'to-ports-and-cpu', 'set-vlan-pri', 'tcp-seq-offset',\n 'tcp-ack-offset', 'l3-to-cpu-switch', 'set-smac',\n 'drop-cancel-trap']),\n pn_action_value=dict(type='str'),\n pn_action_mac_value=dict(type='str'),\n pn_action_nexthop_ip_value=dict(type='str'),\n pn_action_ports_value=dict(type='str'),\n pn_mirror=dict(type='str'),\n pn_process_mirror=dict(type='bool'),\n pn_switch_local=dict(type='bool',default=True),\n pn_log_packets=dict(type='bool'),\n pn_packet_log_max=dict(type='str'),\n pn_log_stats=dict(type='bool'),\n pn_stats_interval=dict(type='str'),\n pn_dur=dict(type='str'),\n pn_metadata=dict(type='str'),\n pn_transient=dict(type='bool'),\n pn_vxlan=dict(type='str'),\n pn_vxlan_ether_type=dict(type='str',\n choices=['ipv4', 'arp', 'wake', 'rarp', 'vlan', 'ipv6',\n 'mpls-uni', 'mpls-multi', 'jumbo', 'aoe', 'dot1X',\n 'lldp', 'lacp', 'ecp', 'macsec', 'ptp', 'fcoe',\n 'fcoe-init', 'qinq']),\n pn_vxlan_proto=dict(type='str',\n choices=['tcp', 'udp', 'icmp', 'igmp', 'ip', 'icmpv6']),\n pn_set_src_ip=dict(type='str'),\n pn_set_dst_ip=dict(type='str'),\n pn_set_src_port=dict(type='str'),\n pn_set_dst_port=dict(type='str'),\n pn_udf_name1=dict(type='str'),\n pn_udf_data1=dict(type='str'),\n pn_udf_data1_mask=dict(type='str'),\n pn_udf_name2=dict(type='str'),\n pn_udf_data2=dict(type='str'),\n pn_udf_data2_mask=dict(type='str'),\n pn_udf_name3=dict(type='str'),\n pn_udf_data3=dict(type='str'),\n pn_udf_data3_mask=dict(type='str'),\n pn_enable=dict(type='bool'),\n pn_table_name=dict(type='str'),\n pn_cpu_class=dict(type='str'),\n pn_dscp_action=dict(type='str'),\n )\n )\n\n # Accessing the arguments\n action = module.params['pn_action']\n command = 'vflow-' + action\n name = module.params['pn_name']\n scope = module.params['pn_scope']\n vflow_id = module.params['pn_vflow_id']\n vnet = module.params['pn_vnet']\n l2net = module.params['pn_l2net']\n vlan = module.params['pn_vlan']\n inport = module.params['pn_inport']\n outport = module.params['pn_outport']\n ether_type = module.params['pn_ether_type']\n src_mac = module.params['pn_src_mac']\n src_mac_mask = module.params['pn_src_mac_mask']\n dst_mac = module.params['pn_dst_mac']\n dst_mac_mask = module.params['pn_dst_mac_mask']\n src_ip = module.params['pn_src_ip']\n src_ip_mask = module.params['pn_src_ip_mask']\n dst_ip = module.params['pn_dst_ip']\n dst_ip_mask = module.params['pn_dst_ip_mask']\n src_port = module.params['pn_src_port']\n src_port_mask = module.params['pn_src_port_mask']\n dst_port = module.params['pn_dst_port']\n dst_port_mask = module.params['pn_dst_port_mask']\n dscp_start = module.params['pn_dscp_start']\n dscp_end = module.params['pn_dscp_end']\n dscp = module.params['pn_dscp']\n dscp_map = module.params['pn_dscp_map']\n tos_start = module.params['pn_tos_start']\n tos_end = module.params['pn_tos_end']\n tos = module.params['pn_tos']\n vlan_pri = module.params['pn_vlan_pri']\n ttl = module.params['pn_ttl']\n proto = module.params['pn_proto']\n tcp_flags = module.params['pn_tcp_flags']\n flow_class = module.params['pn_flow_class']\n ingress_tunnel = module.params['pn_ingress_tunnel']\n egress_tunnel = module.params['pn_egress_tunnel']\n bw_min = module.params['pn_bw_min']\n bw_max = module.params['pn_bw_max']\n burst_size = module.params['pn_burst_size']\n vrouter_name = module.params['pn_vrouter_name']\n precedence = module.params['pn_precedence']\n vflow_action = module.params['pn_vflow_action']\n action_value = module.params['pn_action_value']\n action_mac_value = module.params['pn_action_mac_value']\n action_nexthop_ip_value = module.params['pn_action_nexthop_ip_value']\n action_ports_value = module.params['pn_action_ports_value']\n mirror = module.params['pn_mirror']\n process_mirror = module.params['pn_process_mirror']\n log_packets = module.params['pn_log_packets']\n packet_log_max = module.params['pn_packet_log_max']\n log_stats = module.params['pn_log_stats']\n stats_interval = module.params['pn_stats_interval']\n dur = module.params['pn_dur']\n metadata = module.params['pn_metadata']\n transient = module.params['pn_transient']\n vxlan = module.params['pn_vxlan']\n vxlan_ether_type = module.params['pn_vxlan_ether_type']\n vxlan_proto = module.params['pn_vxlan_proto']\n set_src_ip = module.params['pn_set_src_ip']\n set_dst_ip = module.params['pn_set_dst_ip']\n set_src_port = module.params['pn_set_src_port']\n set_dst_port = module.params['pn_set_dst_port']\n udf_name1 = module.params['pn_udf_name1']\n udf_data1 = module.params['pn_udf_data1']\n udf_data1_mask = module.params['pn_udf_data1_mask']\n udf_name2 = module.params['pn_udf_name2']\n udf_data2 = module.params['pn_udf_data2']\n udf_data2_mask = module.params['pn_udf_data2_mask']\n udf_name3 = module.params['pn_udf_name3']\n udf_data3 = module.params['pn_udf_data3']\n udf_data3_mask = module.params['pn_udf_data3_mask']\n enable = module.params['pn_enable']\n table_name = module.params['pn_table_name']\n cpu_class = module.params['pn_cpu_class']\n dscp_action = module.params['pn_dscp_action']\n\n # Building the CLI command string\n cli = pn_cli(module)\n if action == 'delete':\n check_cli(module)\n if VFLOW_EXISTS is False:\n module.exit_json(\n skipped=True,\n msg='vFlow %s does not exist' % name\n )\n cli += ' %s name %s ' % (command, name)\n if vflow_id:\n cli += ' id %s ' % vflow_id\n\n else:\n check_cli(module)\n if action == 'modify':\n if VFLOW_EXISTS is False:\n module.exit_json(\n skipped=True,\n msg='vFlow %s does not exist' % name\n )\n cli += ' %s name %s ' % (command, name)\n if vflow_id:\n cli += ' id %s ' % vflow_id\n\n if action == 'create':\n if VFLOW_EXISTS is True:\n module.exit_json(\n skipped=True,\n msg='vFlow %s already exists ' % name\n )\n cli += ' %s name %s scope %s ' % (command, name, scope)\n if vnet:\n cli += ' vnet ' + vnet\n if l2net:\n cli += ' l2-net ' + l2net\n if vlan:\n cli += ' vlan ' + vlan\n\n if inport:\n cli += ' in-port ' + inport\n\n if outport:\n cli += ' out-port ' + outport\n\n if ether_type:\n cli += ' ether-type ' + ether_type\n\n if src_mac:\n cli += ' src-mac ' + src_mac\n\n if src_mac_mask:\n cli += ' src-mac-mask ' + src_mac_mask\n\n if dst_mac:\n cli += ' dst-mac ' + dst_mac\n\n if dst_mac_mask:\n cli += ' dst-mac-mask ' + dst_mac_mask\n\n if src_ip:\n cli += ' src-ip ' + src_ip\n\n if src_ip_mask:\n cli += ' src-ip-mask ' + src_ip_mask\n\n if dst_ip:\n cli += ' dst-ip ' + dst_ip\n\n if dst_ip_mask:\n cli += ' dst-ip-mask ' + dst_ip_mask\n\n if src_port:\n cli += ' src-port ' + src_port\n\n if src_port_mask:\n cli += ' src-port-mask ' + src_port_mask\n\n if dst_port:\n cli += ' dst-port ' + dst_port\n\n if dst_port_mask:\n cli += ' dst-port-mask ' + dst_port_mask\n\n if dscp_start:\n cli += ' dscp-start ' + dscp_start\n\n if dscp_end:\n cli += ' dscp-end ' + dscp_end\n\n if dscp:\n cli += ' dscp ' + dscp\n\n if dscp_map:\n cli += ' dscp-map ' + dscp_map\n\n if dscp_action:\n cli += ' action ' + dscp_action\n if tos_start:\n cli += ' tos-start ' + tos_start\n\n if tos_end:\n cli += ' tos-end ' + tos_end\n\n if tos:\n cli += ' tos ' + tos\n\n if vlan_pri:\n cli += ' vlan-pri ' + vlan_pri\n\n if ttl:\n cli += ' ttl ' + ttl\n\n if proto:\n cli += ' proto ' + proto\n\n if tcp_flags:\n cli += ' tcp-flags ' + tcp_flags\n\n if flow_class:\n cli += ' flow-class ' + flow_class\n\n if ingress_tunnel:\n cli += ' ingress-tunnel ' + ingress_tunnel\n\n if egress_tunnel:\n cli += ' egress-tunnel ' + egress_tunnel\n\n if bw_min:\n cli += ' bw-min ' + bw_min\n\n if bw_max:\n cli += ' bw-max ' + bw_max\n\n if burst_size:\n cli += ' burst-size ' + burst_size\n\n if vrouter_name:\n cli += ' vrouter-name ' + vrouter_name\n\n if precedence:\n cli += ' precedence ' + precedence\n\n if vflow_action:\n cli += ' action ' + vflow_action\n\n if action_value:\n cli += ' action-value ' + action_value\n\n if action_mac_value:\n cli += ' action-set-mac-value ' + action_mac_value\n\n if action_nexthop_ip_value:\n cli += ' action-to-next-hop-ip-value ' + action_nexthop_ip_value\n\n if action_ports_value:\n cli += ' action-to-ports-value ' + action_ports_value\n\n if mirror:\n cli += ' mirror ' + mirror\n\n if process_mirror is True:\n cli += ' process-mirror '\n if process_mirror is False:\n cli += ' no-process-mirror '\n\n if log_packets is True:\n cli += ' log-packets '\n if log_packets is False:\n cli += ' no-log-packets '\n\n if packet_log_max:\n cli += ' packet-log-max ' + packet_log_max\n\n if log_stats is True:\n cli += ' log-stats '\n if log_stats is False:\n cli += ' no-log-stats '\n\n if stats_interval:\n cli += ' stats-interval ' + stats_interval\n\n if dur:\n cli += ' dur ' + dur\n\n if metadata:\n cli += ' metadata ' + metadata\n\n if transient is True:\n cli += ' transient '\n if transient is False:\n cli += ' no-transient '\n\n if vxlan:\n cli += ' vxlan ' + vxlan\n\n if vxlan_ether_type:\n cli += ' vxlan-ether-type ' + vxlan_ether_type\n\n if vxlan_proto:\n cli += ' vxlan-proto ' + vxlan_proto\n\n if set_src_ip:\n cli += ' set-src ' + set_src_ip\n\n if set_dst_ip:\n cli += ' set-dst ' + set_dst_ip\n\n if set_src_port:\n cli += ' set-src-port ' + set_src_port\n\n if set_dst_port:\n cli += ' set-dst-port ' + set_dst_port\n\n if udf_name1:\n cli += ' udf-name1 ' + udf_name1\n\n if udf_data1:\n cli += ' udf-data1 ' + udf_data1\n\n if udf_data1_mask:\n cli += ' udf-data1-mask ' + udf_data1_mask\n\n if udf_name2:\n cli += ' udf-name2 ' + udf_name2\n\n if udf_data2:\n cli += ' udf-data2 ' + udf_data2\n\n if udf_data2_mask:\n cli += ' udf-data2-mask ' + udf_data2_mask\n\n if udf_name3:\n cli += ' udf-name3 ' + udf_name3\n\n if udf_data3:\n cli += ' udf-data3 ' + udf_data3\n\n if udf_data3_mask:\n cli += ' udf-data3-mask ' + udf_data3_mask\n\n if enable is True:\n cli += ' enable '\n if enable is False:\n cli += ' no-enable '\n\n if table_name:\n cli += ' table-name ' + table_name\n\n if cpu_class:\n cli += ' cpu-class ' + cpu_class\n\n run_cli(module, cli)", "title": "" }, { "docid": "bae4d14a131bc0b109f6720cde215401", "score": "0.5816743", "text": "def test_bbb_imports():", "title": "" }, { "docid": "3669d741bd1dda00587fc8bef9cb9a79", "score": "0.58146363", "text": "def setup_module():\n FakeLFW().fetch()", "title": "" }, { "docid": "41788fe62c07ca4ddc63884f33461b3a", "score": "0.5811761", "text": "def test_module_uses_installed_package_in_execute_script(tmp_path, caplog):\n\n image_dir = tmp_path / \"image\"\n module_dir = image_dir / \"modules\"\n\n image_dir.mkdir(exist_ok=True)\n\n # Build the module\n module_dir.mkdir(exist_ok=True)\n\n install_script = module_dir / \"install.sh\"\n install_script.write_text(\"\\n\".join([\"#!/bin/bash\", \"zip --version\"]))\n\n module_descriptor_file = module_dir / \"module.yaml\"\n module_descriptor_file.write_text(\n yaml.dump(\n {\n \"name\": \"test_module\",\n \"version\": \"1.0\",\n \"execute\": [{\"script\": str(install_script.name)}],\n \"packages\": {\"manager\": \"apk\", \"install\": [\"zip\"]},\n }\n )\n )\n\n image_descriptor = {\n \"name\": \"test_image\",\n \"version\": \"1.0\",\n \"from\": \"alpine:latest\",\n \"modules\": {\n \"repositories\": [{\"name\": \"modules\", \"path\": \"modules/\"}],\n \"install\": [{\"name\": \"test_module\"}],\n },\n }\n\n run_cekit(image_dir, image_descriptor, args=[\"-v\", \"build\", \"docker\"])\n\n assert \"Applying module package manager of apk to image\" in caplog.text", "title": "" }, { "docid": "063e217643ad63bb1b143953f570af11", "score": "0.5796486", "text": "def setup_module(module):", "title": "" }, { "docid": "e6076ee07ea52a64c01425b66d9cd0e8", "score": "0.57842326", "text": "def setup_loader(path):\n logging.info(\"Updated ansible loader basedir to %r\", path)\n LOADER.set_basedir(path)", "title": "" }, { "docid": "1392bef4c7b51bc59640fa079f8ba17f", "score": "0.5781115", "text": "def test_missing_pyyaml(self):\n sys_path_saved = sys.path\n try:\n sys.path = [] # make import yaml failed\n if 'yaml' in sys.modules:\n # forget about previous yaml import\n del sys.modules['yaml']\n f = make_temp_file(\"\"\"\n vendors:\n apricot: node\"\"\")\n self.assertRaises(GroupResolverConfigError, YAMLGroupLoader,\n f.name)\n finally:\n sys.path = sys_path_saved", "title": "" }, { "docid": "fd254cb03b4bb6d0cbb5834b0c31a34d", "score": "0.57805115", "text": "def main():\n argument_spec = {\n 'gather_subset': dict(default=['software_info', 'software_images',\n 'host_name', 'platform_name',\n 'management_interface',\n 'software_version', 'fans',\n 'power_supplies', 'product_info',\n 'physical_interfaces',\n 'resource_utilization', 'domain_name'],\n type='list',\n choices=['software_info', 'software_images',\n 'host_name', 'platform_name',\n 'management_interface',\n 'software_version',\n 'config', 'fans', 'power_supplies',\n 'product_info', 'physical_interfaces',\n 'resource_utilization', 'domain_name']),\n 'gather_network_resources': dict(type='list',\n choices=['interfaces', 'vlans',\n 'vrfs'])\n }\n\n # Version Management\n try:\n\n from ansible.module_utils.aoscx_pyaoscx import Session\n from pyaoscx.session import Session as Pyaoscx_Session\n from pyaoscx.interface import Interface\n from pyaoscx.vlan import Vlan\n from pyaoscx.device import Device\n from pyaoscx.vrf import Vrf\n\n USE_PYAOSCX_SDK = True\n\n except ImportError:\n USE_PYAOSCX_SDK = False\n\n # Use the PYAOSCX SDK\n if USE_PYAOSCX_SDK:\n\n argument_spec.update(aoscx_http_argument_spec)\n\n ansible_module = AnsibleModule(argument_spec=argument_spec,\n supports_check_mode=True)\n\n # Get session\n session = Session(ansible_module)\n\n # Session info\n session_info = session.get_session()\n\n # Create pyaoscx session object\n s = Pyaoscx_Session.from_session(\n session_info['s'], session_info['url'])\n\n warnings = []\n if ansible_module.params[\"gather_subset\"] == \"!config\":\n warnings.append(\n 'default value for `gather_subset` will be changed '\n 'to `min` from `!config` v2.11 onwards')\n\n # Declare the Ansible facts\n ansible_facts = {}\n\n # Retrieve variables from module parameters\n network_resource_list = ansible_module.params['gather_network_resources']\n subset_list = ansible_module.params['gather_subset']\n\n # Retrieve ansible_network_resources\n ansible_network_resources = {}\n if network_resource_list is not None:\n for resource in network_resource_list:\n if resource == 'interfaces':\n ansible_network_resources.update(\n {'interfaces': Interface.get_facts(s)})\n elif resource == 'vlans':\n ansible_network_resources.update(\n {'vlans': Vlan.get_facts(s)})\n elif resource == 'vrfs':\n ansible_network_resources.update(\n {'vrfs': Vrf.get_facts(s)})\n\n ansible_facts.update(\n {'ansible_network_resources': ansible_network_resources})\n\n # Retrieve ansible_net_gather_network_resources\n ansible_facts.update(\n {'ansible_net_gather_network_resources': network_resource_list})\n\n # Retrieve ansible_net_gather_subset\n ansible_facts.update({'ansible_net_gather_subset': subset_list})\n\n # Retrieve device facts\n switch = Device(s)\n switch.get()\n switch.get_subsystems() # subsystem\n\n # Set the subsystem attributes allowed to retrieve as facts\n allowed_subsystem_attributes = [\n 'product_info',\n 'power_supplies',\n 'interfaces',\n 'fans',\n 'resource_utilization'\n ]\n\n # Set the default subsets that are always retreived as facts\n default_subset_list = [\n 'management_interface',\n 'software_version'\n ]\n\n # Extend subset_list with default subsets\n subset_list.extend(default_subset_list)\n\n # Delete duplicates\n subset_list = list(dict.fromkeys(subset_list))\n\n # Iterate through given subset arguments in the gather_subset parameter\n # in argument_spec\n for subset in subset_list:\n\n # Argument translation for management_interface and\n # physical_interfaces\n if subset == 'management_interface':\n subset = 'mgmt_intf_status'\n elif subset == 'physical_interfaces':\n subset = 'interfaces'\n elif subset == 'host_name':\n subset = 'hostname'\n\n str_subset = 'ansible_net_' + subset\n\n # Check if current subset is inside the Device object\n if hasattr(switch, subset):\n\n # Get attribute value and add it to Ansible facts dictionary\n ansible_facts[str_subset] = getattr(switch, subset)\n\n # Check if current subset is inside the allowed subsystem\n # attributes\n elif subset in allowed_subsystem_attributes:\n ansible_facts.update({str_subset: {}})\n\n # Iterate through Device subsystems\n for subsystem, value in switch.subsystems.items():\n\n # Get attribute value and update the Ansible facts\n # dictionary\n ansible_facts[str_subset].update(\n {subsystem: switch.subsystems[subsystem][subset]})\n\n ansible_module.exit_json(\n ansible_facts=ansible_facts,\n warnings=warnings)\n\n # USE OLD VERSION\n else:\n argument_spec.update(aoscx_http_argument_spec)\n\n module = AnsibleModule(argument_spec=argument_spec,\n supports_check_mode=True)\n\n module._connection = get_connection(module) # noqa\n\n warnings = []\n if module.params[\"gather_subset\"] == \"!config\":\n warnings.append(\n 'default value for `gather_subset` will be changed '\n 'to `min` from `!config` v2.11 onwards')\n\n result = Facts(module).get_facts()\n\n ansible_facts, additional_warnings = result\n warnings.extend(additional_warnings)\n\n module.exit_json(ansible_facts=ansible_facts, warnings=warnings)", "title": "" }, { "docid": "2b818698bbb02f4f78cb45201f7366d3", "score": "0.5772435", "text": "def test_cookiecutter_imported():\n assert \"cookiecutter\" in sys.modules", "title": "" }, { "docid": "6bacb7a0020fd4a2e5f4eb78c84ffbf6", "score": "0.57639295", "text": "def main():\n module = AnsibleModule(argument_spec=LldpArgs.argument_spec,\n supports_check_mode=True)\n\n result = Lldp(module).execute_module()\n module.exit_json(**result)", "title": "" }, { "docid": "a2a6d86d8910830c6d6f6dc6be228857", "score": "0.5761631", "text": "def run_module():\n\n module = AnsibleModule(\n argument_spec=dict(\n original_file=dict(type='str', required=True),\n new_file=dict(type='str', required=True),\n whitelist=dict(required=False, type='list', default=[])\n ),\n supports_check_mode=True\n )\n\n original_fh = open(module.params['original_file'], \"r\")\n original_contents = original_fh.read()\n original_fh.close()\n\n new_fh = open(module.params['new_file'], \"r\")\n new_contents = new_fh.read()\n new_fh.close()\n\n original_contents = account_for_whitelist(original_contents, new_contents, module.params['whitelist'])\n\n uni_diff = difflib.unified_diff(new_contents.splitlines(),\n original_contents.splitlines(),\n lineterm='')\n\n return module.exit_json(changed=False, # noqa: F405\n raw_patch=\"\\n\".join(uni_diff))", "title": "" }, { "docid": "847b754cfb6e133d6c81699d6c15190f", "score": "0.5732998", "text": "def test_futureImport(self):", "title": "" }, { "docid": "71454fb3edd9967151d9e30fc27c927c", "score": "0.572908", "text": "def setup_run_playbook(playbook, instance_id=None, tags=None, hosts=None):\n\n # No try/except here as we want ImportErrors to raise\n import shutil\n import yaml\n from ansible.module_utils.common.collections import ImmutableDict\n from ansible.parsing.dataloader import DataLoader\n from ansible.vars.manager import VariableManager\n from ansible.inventory.manager import InventoryManager\n from ansible.playbook.play import Play\n from ansible.playbook.task_include import TaskInclude\n from ansible.executor.task_queue_manager import TaskQueueManager\n from ansible.plugins.callback import CallbackBase\n from ansible import context\n\n #W2P_TASK = current.W2P_TASK\n\n if hosts is None:\n # NB This is the only current usecase as we always start on localhost\n # - remote servers are then accessed once we have the SSH private_key available\n hosts = [\"127.0.0.1\"]\n\n # Logging\n class PlayLogger:\n \"\"\"\n Store log output in a single String object.\n We create a new object per Ansible run\n \"\"\"\n def __init__(self):\n self.log = \"\"\n\n def append(self, log_line):\n \"\"\" Append to log \"\"\"\n self.log += log_line + \"\\n\\n\"\n\n logger = PlayLogger()\n\n class ResultCallback(CallbackBase):\n CALLBACK_VERSION = 2.0\n CALLBACK_TYPE = \"stored\"\n CALLBACK_NAME = \"database\"\n\n def __init__(self):\n\n self._last_task_banner = None\n self._last_task_name = None\n self._task_type_cache = {}\n super(ResultCallback, self).__init__()\n\n @staticmethod\n def _handle_exception(result):\n # Catch an exception\n # This may never be called because default handler deletes\n # the exception, since Ansible thinks it knows better\n traceback = result.get(\"exception\")\n if traceback:\n # Extract the error message and log it\n #error = traceback.strip().split(\"\\n\")[-1]\n #logger.append(error)\n # Log the whole Traceback\n logger.append(traceback)\n # Remove the exception from the result so it's not shown every time\n del result[\"exception\"]\n #current.s3task.scheduler.stop_task(W2P_TASK.id)\n # @ToDo: If this happens during a deploy from co-app and after nginx has replaced co-app on Port 80 then revert to co-app\n\n def _print_task_banner(self, task):\n args = u\", \".join(u\"%s=%s\" % a for a in task.args.items())\n prefix = self._task_type_cache.get(task._uuid, \"TASK\")\n\n # Use cached task name\n task_name = self._last_task_name\n if task_name is None:\n task_name = task.get_name().strip()\n\n logger.append(u\"%s: %s\\n[%s]\" % (prefix, task_name, args))\n\n def v2_runner_on_failed(self, result, ignore_errors=False):\n if self._last_task_banner != result._task._uuid:\n self._print_task_banner(result._task)\n\n self._handle_exception(result._result)\n\n if result._task.loop and \"results\" in result._result:\n self._process_items(result)\n else:\n logger.append(\"fatal: [%s]: FAILED!\\n%s\" % \\\n (result._host.get_name(),\n self._dump_results(result._result, indent=4)))\n\n def v2_runner_on_ok(self, result):\n if isinstance(result._task, TaskInclude):\n return\n if self._last_task_banner != result._task._uuid:\n self._print_task_banner(result._task)\n if result._result.get(\"changed\", False):\n msg = \"changed: [%s]\" % result._host.get_name()\n else:\n msg = \"ok: [%s]\" % result._host.get_name()\n\n if result._task.loop and \"results\" in result._result:\n self._process_items(result)\n else:\n self._clean_results(result._result, result._task.action)\n msg += \"\\n%s\" % self._dump_results(result._result, indent=4)\n logger.append(msg)\n\n def v2_runner_on_unreachable(self, result):\n if self._last_task_banner != result._task._uuid:\n self._print_task_banner(result._task)\n logger.append(\"fatal: [%s]: UNREACHABLE!\\n%s\" % \\\n (result._host.get_name(),\n self._dump_results(result._result, indent=4)))\n\n def v2_runner_item_on_failed(self, result):\n if self._last_task_banner != result._task._uuid:\n self._print_task_banner(result._task)\n\n self._handle_exception(result._result)\n\n msg = \"failed: [%s]\" % (result._host.get_name())\n\n logger.append(msg + \" (item=%s)\\n%s\" % \\\n (self._get_item_label(result._result),\n self._dump_results(result._result, indent=4)))\n\n def v2_runner_item_on_ok(self, result):\n if isinstance(result._task, TaskInclude):\n return\n if self._last_task_banner != result._task._uuid:\n self._print_task_banner(result._task)\n if result._result.get(\"changed\", False):\n msg = \"changed\"\n else:\n msg = \"ok\"\n\n msg += \": [%s] (item=%s)\\n%s\" % \\\n (result._host.get_name(),\n self._get_item_label(result._result),\n self._dump_results(result._result, indent=4))\n logger.append(msg)\n\n # Copy the current working directory to revert back to later\n cwd = os.getcwd()\n\n # Change working directory\n request = current.request\n roles_path = os.path.join(request.env.applications_parent, request.folder, \"private\", \"eden_deploy\", \"roles\")\n os.chdir(roles_path)\n\n # Since the API is constructed for CLI, it expects certain options to always be set in the context object\n if tags is None:\n tags = [] # Needs to be an iterable\n tmp_path = os.path.join(\"/\", \"tmp\", \"ansible\")\n context.CLIARGS = ImmutableDict(become = None,\n become_method = None,\n become_user = None,\n check = False,\n diff = False,\n #extra_vars = {\"ansible_local_temp\": tmp_path,\n # \"ansible_local_tmp\": tmp_path,\n # \"ansible_ssh_control_path_dir\": tmp_path,\n # },\n forks = 10,\n module_path = [roles_path],\n tags = tags,\n verbosity = 1,\n )\n\n # Initialize needed objects\n loader = DataLoader() # Takes care of finding and reading yaml, json and ini files\n\n # Instantiate Logging for handling results as they come in\n results_callback = ResultCallback()\n\n # Create Inventory and pass to Var manager\n if len(hosts) == 1:\n # Ensure that we have a comma to tell Ansible that this is a list of hosts not a file to read from\n sources = \"%s,\" % hosts[0]\n else:\n sources = \",\".join(hosts)\n\n inventory = InventoryManager(loader = loader,\n sources = sources)\n variable_manager = VariableManager(loader = loader,\n inventory = inventory)\n\n # Load Playbook\n with open(playbook, \"r\") as yaml_file:\n # https://msg.pyyaml.org/load\n playbooks = yaml.full_load(yaml_file)\n\n for play_source in playbooks:\n # Create play object, playbook objects use .load instead of init or new methods,\n # this will also automatically create the task objects from the info provided in play_source\n play = Play().load(play_source,\n variable_manager = variable_manager,\n loader = loader)\n\n # Run it - instantiate task queue manager, which takes care of forking and setting up all objects to iterate over host list and tasks\n tqm = None\n try:\n tqm = TaskQueueManager(inventory = inventory,\n variable_manager = variable_manager,\n loader = loader,\n passwords = None,\n # Use our custom callback instead of the ``default`` callback plugin, which prints to stdout\n stdout_callback = results_callback,\n )\n result = tqm.run(play) # Most interesting data for a play is actually sent to the callback's methods\n finally:\n # we always need to cleanup child procs and the structures we use to communicate with them\n if tqm is not None:\n tqm.cleanup()\n\n # Remove ansible tmpdir\n shutil.rmtree(tmp_path, True)\n\n # Change working directory back\n os.chdir(cwd)\n\n # Dump Logs to File\n # Logs are in eden/uploads/playbook instead of /tmp, however it works\n log_file_name = \"%s.log\" % playbook.split(\".\")[0]\n log_path = os.path.join(\"/\", \"tmp\", log_file_name)\n with open(log_path, \"w\") as log_file:\n log_file.write(logger.log)\n\n # Dump Logs to Database\n # This gets deleted:\n #current.db(current.s3db.scheduler_run.id == W2P_TASK.run_id).update(run_output = logger.log)\n\n if instance_id:\n # Upload logs to Database\n table = current.s3db.setup_instance\n field = table.log_file\n with open(log_path, \"rb\") as log_file:\n newfilename = field.store(log_file,\n log_file_name,\n field.uploadfolder)\n current.db(table.id == instance_id).update(log_file = newfilename)\n\n return result", "title": "" }, { "docid": "5af796d0ac9d0174f5ae0a057c23e91a", "score": "0.5701814", "text": "def run(self, module_name, module_args):\n self.results_raw = {'success':{}, 'failed':{}, 'unreachable':{}}\n # initialize needed objects\n variable_manager = VariableManager()\n loader = DataLoader()\n\n Options = namedtuple('Options', ['listtags', 'listtasks', 'listhosts', 'syntax', 'connection','module_path', \n 'forks', 'remote_user', 'private_key_file', 'ssh_common_args', 'ssh_extra_args', 'sftp_extra_args', \n 'scp_extra_args', 'become', 'become_method', 'become_user', 'verbosity', 'check'])\n options = Options(listtags=False, listtasks=False, listhosts=False, syntax=False, connection='ssh', module_path=None, \n forks=30, remote_user='root', private_key_file=None, ssh_common_args=None, ssh_extra_args=None, sftp_extra_args=None, \n scp_extra_args=None, become=True, become_method=None, become_user='root', verbosity=None, check=False)\n \n passwords = dict(sshpass=None, becomepass=None)\n\n # create inventory and pass to var manager\n inventory = MyInventory(self.resource, loader, variable_manager).inventory #你要生成自己的inventory\n variable_manager.set_inventory(inventory)\n\n # create play with tasks\n play_source = dict(\n name=\"Ansible Play\",\n hosts=self.host_list, #主机ip地址,[]\n gather_facts='no',\n tasks=[dict(action=dict(module=module_name, args=module_args))] #ansible要执行的命令\n )\n if module_name == 'ping':\n play_source = dict(\n name=\"Ansible Play\",\n hosts=self.host_list, #主机ip地址,[]\n gather_facts='no',\n tasks=[dict(action=dict(module=module_name))] #ansible要执行的命令\n )\n play = Play().load(play_source, variable_manager=variable_manager, loader=loader)\n\n # actually run it\n tqm = None\n # callback = Jsoncallback() #ansible命令执行结果收集器\n callback = minicb()\n try:\n tqm = TaskQueueManager(\n inventory=inventory,\n variable_manager=variable_manager,\n loader=loader,\n options=options,\n passwords=passwords,\n # stdout_callback='json',\n # run_tree=False,\n )\n tqm._stdout_callback = callback\n callrun = tqm.run(play)\n # return callrun\n\n result = callback.results\n\n finally:\n if tqm is not None:\n tqm.cleanup()\n # return result\n return result[-1]['tasks'][-1]", "title": "" }, { "docid": "c4d64ebeb1c36e44d642b16e05535fb2", "score": "0.56941855", "text": "def test_setup():\n\n try:\n # ensure every module imports OK\n print(\"convert\")\n from dnppy import convert\n print(\"core\")\n from dnppy import core\n print(\"download\")\n from dnppy import download\n print(\"landsat\")\n from dnppy import landsat\n print(\"modis\")\n from dnppy import modis\n print(\"radar\")\n from dnppy import radar\n print(\"raster\")\n from dnppy import raster\n print(\"solar\")\n from dnppy import solar\n print(\"test\")\n from dnppy import test\n print(\"textio\")\n from dnppy import textio\n print(\"time_series\")\n from dnppy import time_series\n return True\n\n except ImportError:\n print(\"failed!\")\n return False", "title": "" }, { "docid": "a49b94b5e54f8a6904824dba8715c1e1", "score": "0.5671843", "text": "def test_module_uses_installed_package_in_execute_script_manager_precedence(\n tmp_path, caplog\n):\n\n image_dir = tmp_path / \"image\"\n module_dir = image_dir / \"modules\"\n\n image_dir.mkdir(exist_ok=True)\n\n # Build the module\n module_dir.mkdir(exist_ok=True)\n\n install_script = module_dir / \"install.sh\"\n install_script.write_text(\"\\n\".join([\"#!/bin/bash\", \"zip --version\"]))\n\n module_descriptor_file = module_dir / \"module.yaml\"\n module_descriptor_file.write_text(\n yaml.dump(\n {\n \"name\": \"test_module\",\n \"version\": \"1.0\",\n \"execute\": [{\"script\": str(install_script.name)}],\n \"packages\": {\"manager\": \"dnf\", \"install\": [\"zip\"]},\n }\n )\n )\n\n image_descriptor = {\n \"name\": \"test_image\",\n \"version\": \"1.0\",\n \"from\": \"alpine:latest\",\n \"modules\": {\n \"repositories\": [{\"name\": \"modules\", \"path\": \"modules/\"}],\n \"install\": [{\"name\": \"test_module\"}],\n },\n \"packages\": {\"manager\": \"apk\"},\n }\n\n run_cekit(image_dir, image_descriptor, args=[\"-v\", \"build\", \"docker\"])\n\n assert \"Applying module package manager of apk to image\" not in caplog.text", "title": "" }, { "docid": "7fb54823a4878a9b61d663b2321329bb", "score": "0.5668248", "text": "def test_setup():\n assert_equals(next(api.ls())[\"name\"], \"Test\")", "title": "" }, { "docid": "aef18e5d95f506c0d361cb4b8d71eaba", "score": "0.56658393", "text": "def bear_test_module():\n bears_test_module = os.path.join(os.path.dirname(__file__),\n 'test_bears', '__init__.py')\n\n class EntryPoint:\n\n @staticmethod\n def load():\n class PseudoPlugin:\n __file__ = bears_test_module\n return PseudoPlugin()\n\n with unittest.mock.patch('pkg_resources.iter_entry_points',\n return_value=[EntryPoint()]) as mocked:\n yield", "title": "" }, { "docid": "94faf2b8e47368f3293002464ae07e1b", "score": "0.56546223", "text": "def setUpModule():\n base.enabledPlugins.append('jobs')\n base.enabledPlugins.append('romanesco')\n base.enabledPlugins.append('gravatar')\n base.enabledPlugins.append('minerva')\n base.startServer(False)", "title": "" }, { "docid": "a91309683b4ffb40149788b2a6b1de04", "score": "0.565395", "text": "def test_module() -> None:\n assert ctff.__version__ is not None", "title": "" }, { "docid": "89c5aabbb16bd695447e7fa7af7ffa56", "score": "0.56519556", "text": "def setUpModule():\n base.enabledPlugins.append('jobs')\n base.enabledPlugins.append('gravatar')\n base.enabledPlugins.append('minerva')\n base.startServer(False)", "title": "" }, { "docid": "212d1f19cb0f1f5e3b28522df6c14eea", "score": "0.5650717", "text": "def test_fake_module(self):\n path = 'simple/healthchecks'\n self.assertHealthCheck(path, 500, runs=3, errors=1, failures=1)", "title": "" }, { "docid": "173664e907f1e6e594ddd3747dc36ee9", "score": "0.56468827", "text": "async def test_runner_with_modules_in_legacy_python(\n modules_legacy_python_protocol_file: Path,\n) -> None:\n protocol_reader = ProtocolReader()\n protocol_source = await protocol_reader.read_saved(\n files=[modules_legacy_python_protocol_file],\n directory=None,\n )\n\n subject = await create_simulating_runner(\n robot_type=\"OT-2 Standard\",\n protocol_config=protocol_source.config,\n )\n result = await subject.run(protocol_source)\n commands_result = result.commands\n\n assert len(commands_result) == 6\n\n temp_module_result_captor = matchers.Captor()\n mag_module_result_captor = matchers.Captor()\n thermocycler_result_captor = matchers.Captor()\n heater_shaker_result_captor = matchers.Captor()\n\n assert commands_result[0] == commands.Home.construct(\n id=matchers.IsA(str),\n key=matchers.IsA(str),\n status=commands.CommandStatus.SUCCEEDED,\n createdAt=matchers.IsA(datetime),\n startedAt=matchers.IsA(datetime),\n completedAt=matchers.IsA(datetime),\n params=commands.HomeParams(axes=None),\n result=commands.HomeResult(),\n )\n assert commands_result[1] == commands.LoadLabware.construct(\n id=matchers.IsA(str),\n key=matchers.IsA(str),\n status=commands.CommandStatus.SUCCEEDED,\n createdAt=matchers.IsA(datetime),\n startedAt=matchers.IsA(datetime),\n completedAt=matchers.IsA(datetime),\n params=matchers.Anything(),\n result=matchers.Anything(),\n )\n\n assert commands_result[2] == commands.LoadModule.construct(\n id=matchers.IsA(str),\n key=matchers.IsA(str),\n status=commands.CommandStatus.SUCCEEDED,\n createdAt=matchers.IsA(datetime),\n startedAt=matchers.IsA(datetime),\n completedAt=matchers.IsA(datetime),\n params=matchers.Anything(),\n result=temp_module_result_captor,\n )\n\n assert commands_result[3] == commands.LoadModule.construct(\n id=matchers.IsA(str),\n key=matchers.IsA(str),\n status=commands.CommandStatus.SUCCEEDED,\n createdAt=matchers.IsA(datetime),\n startedAt=matchers.IsA(datetime),\n completedAt=matchers.IsA(datetime),\n params=matchers.Anything(),\n result=mag_module_result_captor,\n )\n\n assert commands_result[4] == commands.LoadModule.construct(\n id=matchers.IsA(str),\n key=matchers.IsA(str),\n status=commands.CommandStatus.SUCCEEDED,\n createdAt=matchers.IsA(datetime),\n startedAt=matchers.IsA(datetime),\n completedAt=matchers.IsA(datetime),\n params=matchers.Anything(),\n result=thermocycler_result_captor,\n )\n\n assert commands_result[5] == commands.LoadModule.construct(\n id=matchers.IsA(str),\n key=matchers.IsA(str),\n status=commands.CommandStatus.SUCCEEDED,\n createdAt=matchers.IsA(datetime),\n startedAt=matchers.IsA(datetime),\n completedAt=matchers.IsA(datetime),\n params=matchers.Anything(),\n result=heater_shaker_result_captor,\n )\n\n assert temp_module_result_captor.value[\"model\"] == ModuleModel.TEMPERATURE_MODULE_V1\n assert mag_module_result_captor.value[\"model\"] == ModuleModel.MAGNETIC_MODULE_V1\n assert (\n thermocycler_result_captor.value[\"model\"] == ModuleModel.THERMOCYCLER_MODULE_V1\n )\n assert (\n heater_shaker_result_captor.value[\"model\"]\n == ModuleModel.HEATER_SHAKER_MODULE_V1\n )", "title": "" }, { "docid": "8267cdb9a21a239f500fd85340979ec0", "score": "0.56450135", "text": "def run_ansible(self):\n AnsibleManager(self.config, self.project_name).run_ansible_setup()\n return", "title": "" }, { "docid": "d042d20e48b7d52e070db68df33f7051", "score": "0.5640521", "text": "def test_parse_modules_folder(self):\n # GIVEN: A SwordModules object using a folder for input\n modules = SwordModules(TEST_RESOURCE_FOLDER)\n\n # WHEN: parsing the modules conf files\n mods_metadata = modules.parse_modules()\n\n # THEN: Modules should be detectable and information extractable\n module_list = [u'ChiPinyin', u'FinPR', u'BSV', u'ASV', u'AraNAV', u'SpaRV1909']\n self.assertTrue(all(x in module_list for x in mods_metadata.keys()),\n u'Some expected bibles were not detected')\n # Depending on the operating system, the handling of non-utf8 encoded conf-files is different\n self.assertEqual(mods_metadata[u'FinPR'][u'description'], u'Finnish Pyhä Raamattu (1933/1938)',\n u'Could not extract \"description\" for \"FinPR\"')\n self.assertEqual(mods_metadata[u'BSV'][u'description'], u'The Bond Slave Version Bible',\n u'Could not extract \"description\" for \"BSV\"')\n self.assertEqual(mods_metadata[u'ASV'][u'description'], u'American Standard Version (1901)',\n u'Could not extract \"description\" for \"ASV\"')\n self.assertEqual(mods_metadata[u'AraNAV'][u'description'], u'New Arabic Version (Ketab El Hayat)',\n u'Could not extract \"description\" for \"AraNAV\"')", "title": "" }, { "docid": "efa782f1e7f07c4545376a9512a4b7fc", "score": "0.5635734", "text": "def run_module(module, raw_params=None, args=None, env=None):\n if args is None:\n args = {}\n if raw_params is not None:\n args['_raw_params'] = raw_params\n\n ansible.module_utils.basic.AnsibleModule.exit_json = monkey_exit_json\n ansible.module_utils.basic.AnsibleModule.fail_json = monkey_fail_json\n ansible.module_utils.basic._ANSIBLE_ARGS = json.dumps({\n 'ANSIBLE_MODULE_ARGS': args\n })\n\n temp_env = TemporaryEnvironment(env)\n try:\n try:\n mod = __import__(module, {}, {}, [''])\n module_fixups(mod)\n # Ansible modules begin execution on import. Thus the above __import__\n # will cause either Exit or ModuleError to be raised. If we reach the\n # line below, the module did not execute and must already have been\n # imported for a previous invocation, so we need to invoke main\n # explicitly.\n mod.main()\n except (Exit, ModuleError), e:\n result = json.dumps(e.dct)\n finally:\n temp_env.revert()\n\n return result", "title": "" }, { "docid": "4bfd23ee3e368081c9e63dbc97bfda2c", "score": "0.563156", "text": "def test_import(self):\n config_file = os.path.join(os.path.dirname(__file__),\n 'data',\n 'test_config.yaml')\n finder = importer.Finder(config_file)\n sys.meta_path.append(finder)\n\n import fake_package.fake_module\n self.assertTrue(hasattr(fake_package.fake_module, '__fake_callable__'))\n\n import fake_package\n self.assertTrue(hasattr(fake_package, '__fake_callable__'))", "title": "" }, { "docid": "eab582f95e06e8bedab9fa8138609279", "score": "0.5626397", "text": "def test_managed_install(linuxdeploy):\n assert linuxdeploy.managed_install is True", "title": "" }, { "docid": "27c55611e10c748d90bea6233af287ad", "score": "0.562425", "text": "def main():\n global module\n module = AnsibleModule(\n argument_spec={\n 'hostname': {'required': True, 'type': 'str'},\n 'mak': {'required': True, 'type': 'str'},\n 'title': {'required': True, 'type': 'str'},\n 'episode': {'required': False, 'type': 'str'},\n 'dest_dir': {'required': True, 'type': 'str'},\n 'tvdb_api_key': {'required': False, 'type': 'str'},\n 'tvdb_user_key': {'required': False, 'type': 'str'},\n 'tvdb_username': {'required': False, 'type': 'str'},\n 'tvdb_ignore_failure': {'required': False, 'type': 'bool',\n 'default': False},\n 'skip_if_in_path': {'required': False, 'type': 'str'}\n },\n supports_check_mode=False\n )\n dest_dir = module.params['dest_dir']\n if not path.isdir(dest_dir):\n module.fail_json(\n msg='The directory \"{0}\" does not exist'.format(dest_dir))\n recordings_info = get_tivo_recording_info(\n module.params['hostname'], module.params['mak'],\n module.params['title'], module.params['episode'])\n if not recordings_info:\n module.fail_json(msg='No recordings were found')\n\n tvdb_api_key = module.params['tvdb_api_key']\n tvdb_user_key = module.params['tvdb_user_key']\n tvdb_username = module.params['tvdb_username']\n skip_path = module.params['skip_if_in_path']\n mak = module.params['mak']\n download_count = 0\n\n for recording_info in recordings_info:\n title = recording_info.get('title')\n episode_name = recording_info.get('episode')\n if episode_name and tvdb_api_key and tvdb_user_key and tvdb_username:\n func_kwargs = {\n 'api_key': module.params['tvdb_api_key'],\n 'user_key': module.params['tvdb_user_key'],\n 'username': module.params['tvdb_username'],\n 'series_name': title,\n 'episode_name': episode_name,\n # Don't fail the first time when doing an exact match\n 'fail_on_error': False,\n 'fuzzy_match': False\n }\n season_num, episode_num = get_season_episode_num(**func_kwargs)\n # If there wasn't an exact match, then try a fuzzy match run\n if not all([season_num, episode_name]):\n # Now set the failure mode to the module parameters on the\n # fuzzy match run\n func_kwargs['fail_on_error'] = \\\n (not module.params['tvdb_ignore_failure'])\n func_kwargs['fuzzy_match'] = True\n season_num, episode_num = get_season_episode_num(**func_kwargs)\n\n if all([season_num, episode_name]):\n recording_info.update(\n {'season_num': season_num, 'episode_num': episode_num})\n\n if download_tivo_recording(mak, recording_info,\n module.params['dest_dir'], skip_path):\n download_count += 1\n if download_count < len(recordings_info):\n # Sleep 30 seconds between downloads to give the TiVo a break\n sleep(30)\n\n if download_count > 0:\n success_msg = '{0} recording(s) downloaded successfully'.format(\n download_count)\n module.exit_json(msg=success_msg, changed=True)\n else:\n if len(recordings_info) > 0:\n msg = '{0} recording(s) skipped'.format(len(recordings_info))\n else:\n msg = 'No recordings were found that matched the criteria'\n module.exit_json(msg=msg, changed=False)", "title": "" }, { "docid": "90eca1b928d049b7da1ebb02b1ca1785", "score": "0.5610538", "text": "def test_some_configs(self) :\n self.assertFalse(SERVICE_CONST.NSSM_EXECUTABLE_PATH.is_file())\n for sd in SERVICE_CONST.AVAILABLE_SERVICES :\n _ = importlib.import_module(sd['filepath'])\n #the command below explicitly creates a file but that file should be ignored in the repo\n SERVICE_CONST.LOGGER.info('testing')", "title": "" }, { "docid": "48996fcb999af5d803db981367406690", "score": "0.5609269", "text": "def test_import_all():\n assert cli\n assert keldcli.cli is cli\n assert __distribution__\n assert keldcli.__distribution__ is __distribution__\n assert __title__\n assert keldcli.__title__ is __title__\n assert __title__ == \"keldcli\"\n assert __version__\n assert keldcli.__version__ is __version__", "title": "" }, { "docid": "5c2bbc77f259ae3621e3c35981ab47e4", "score": "0.55945915", "text": "def __init__(self):\n self.module_params = utils.get_vplex_management_host_parameters()\n self.module_params.update(get_vplex_initiator_parameters())\n\n mutually_exclusive = [\n ['iscsi_name', 'port_wwn']\n ]\n\n # initialize the ansible module\n self.module = AnsibleModule(\n argument_spec=self.module_params,\n supports_check_mode=False,\n mutually_exclusive=mutually_exclusive\n )\n\n # Check for external libraries\n lib_status, message = utils.external_library_check()\n if not lib_status:\n LOG.error(message)\n self.module.fail_json(msg=message)\n\n # Check for Python vplexapi sdk\n if HAS_VPLEXAPI_SDK is False:\n self.module.fail_json(msg=\"Ansible modules for VPLEX require \"\n \"the vplexapi python library to be \"\n \"installed. Please install the library \"\n \"before using these modules.\")\n\n self.cl_name = self.module.params['cluster_name']\n if not self.cl_name:\n msg = \"Following is required: cluster_name\"\n LOG.error(msg)\n self.module.fail_json(msg=msg)\n # Create the configuration instance to communicate with\n # vplexapi\n self.client = utils.config_vplexapi(self.module.params)\n\n # Validating the user inputs\n if isinstance(self.client, tuple):\n err_code, msg = self.client # pylint: disable=W0612\n LOG.error(msg)\n self.module.fail_json(msg=msg)\n\n vplex_setup = utils.get_vplex_setup(self.client)\n LOG.info(vplex_setup)\n # Checking if the cluster is reachable\n (status, msg) = utils.verify_cluster_name(self.client, self.cl_name)\n if status != 200:\n if \"Resource not found\" in msg:\n msg = \"Could not find resource {0}\".format(self.cl_name)\n LOG.error(msg)\n self.module.fail_json(msg=msg)\n\n # Create an instance to InitiatorApi to communicate with\n # vplexapi\n self.initr = utils.ExportsApi(api_client=self.client)\n\n # Module parameters\n self.init_name = self.module.params['initiator_name']\n self.new_init_name = self.module.params['new_initiator_name']\n self.port_wwn = self.module.params['port_wwn']\n self.iscsi_name = self.module.params['iscsi_name']\n self.registered = self.module.params['registered']\n self.temp_initiator = None\n self.flag = 0\n self.reg_flag = 0\n self.unreg_flag = 0\n self.rename_flag = 0\n\n # result is a dictionary that contains changed status and\n # initiator details\n self.result = {\"changed\": False, \"initiator_details\": {}}", "title": "" }, { "docid": "06dc66d941e64728051573acca4aaa36", "score": "0.5591653", "text": "def test_module_resolver_modlist(resolver):\n assert resolver.get_modules_list() == [\n \"four.five.six\",\n \"one.two.three\",\n \"three.six.one\",\n ]\n for ptr in [\"five\", \"fi\", \"ve\"]:\n assert resolver.get_modules_list(ptr) == [\"four.five.six\"]\n for ptr in [\"si\", \"ix\", \"six\"]:\n assert resolver.get_modules_list(ptr) == [\"four.five.six\", \"three.six.one\"]\n assert resolver.get_modules_list(\"one\") == [\"one.two.three\", \"three.six.one\"]\n assert resolver.get_modules_list(\"one.two\") == [\"one.two.three\"]\n assert resolver.get_modules_list(\"four\") == [\"four.five.six\"]", "title": "" }, { "docid": "850397d9457ffc9f16b2bd81c01413af", "score": "0.5589264", "text": "def main():\n module = AnsibleModule(\n argument_spec=Acl_interfacesArgs.argument_spec,\n supports_check_mode=True,\n )\n\n result = Acl_interfaces(module).execute_module()\n module.exit_json(**result)", "title": "" } ]
5c3f6103716dfaeaae300f15ddebf9cd
_get_clients_for_channel(channel_name) > [client] Given a channel_name, this function returns the clients connected to the nodes that handle the channel. The length of the list is 1 for local channels and user channels. For a global channel the length is equal to the total number of Beaconpush nodes. channel_name the channel name a list of clients
[ { "docid": "4aef8017db638ef12f44a1d8cc41612a", "score": "0.8403511", "text": "def _get_clients_for_channel(self, channel_name):\r\n if not channel_name or channel_name[0] == \"*\":\r\n # Channel name is empty or a global channel\r\n return self._get_clients()\r\n else:\r\n # A local channel or a personal (user) channel which exists only on one node\r\n return [self._get_client(channel_name)]", "title": "" } ]
[ { "docid": "1e245e92f4a76ac8102fae4cb68f3d5c", "score": "0.62264466", "text": "def get_users_in_channel(self, channel_name):\r\n\r\n clients = self._get_clients_for_channel(channel_name)\r\n greenlets = [client.getUsersInChannel(channel_name) for client in clients]\r\n self._join_all(greenlets)\r\n\r\n resultList = []\r\n for greenlet in greenlets:\r\n users = greenlet.get()\r\n if users is not None:\r\n resultList += users\r\n\r\n return resultList", "title": "" }, { "docid": "8c78f180448467b1293c72c379628c40", "score": "0.60154665", "text": "def add_client_channels(self) -> None:\n for cluster_id, channel_class in zha_regs.CLIENT_CHANNELS_REGISTRY.items():\n cluster = self.endpoint.out_clusters.get(cluster_id)\n if cluster is not None:\n channel = channel_class(cluster, self)\n self.client_channels[channel.id] = channel", "title": "" }, { "docid": "1161b3f8cedc272ea282b4b6dc966175", "score": "0.5884601", "text": "def get_clients():\n get_requests_manager().accept_user_clients(session['user'])\n chans = get_requests_manager().get_user_channels(session['user'])\n return render_template(\"client_list.html\", chans=chans)", "title": "" }, { "docid": "5f7c90c86b4a5c3ab09a3969a5f76be4", "score": "0.581433", "text": "async def get_connection_channels(self, name: str) -> List[Mapping[str, Any]]:\n response = await self._requester.get_list('connection', name, 'channels')\n if response is None:\n raise ApiError\n return response", "title": "" }, { "docid": "0d3eecf91b929de7dcb536ba5daf7b08", "score": "0.58024794", "text": "def get_all_connections(self, channel):\n channel = to_object(channel, objtype='channel')\n return self.filter(db_channel=channel)", "title": "" }, { "docid": "0d3eecf91b929de7dcb536ba5daf7b08", "score": "0.58024794", "text": "def get_all_connections(self, channel):\n channel = to_object(channel, objtype='channel')\n return self.filter(db_channel=channel)", "title": "" }, { "docid": "d8f9f5b7cbc98aff86bb4ba62d663444", "score": "0.57939196", "text": "def client_channels(self) -> dict[str, base.ClientChannel]:\n return self._client_channels", "title": "" }, { "docid": "fe1578a5147c58e4128ccc34e5048c38", "score": "0.55847824", "text": "def retrieve_chan_names(self):\n\n v.log(3, \"SLACKBOT: retrieving channel names\")\n self.chan_names = {}\n data = self.client.api_call(\"channels.list\")\n for channel in data[\"channels\"]:\n self.chan_names[channel[\"id\"]] = \"#\" + channel[\"name\"]", "title": "" }, { "docid": "8bd79ec741a0ea89dd5bcb3933076222", "score": "0.5528644", "text": "def get_users(self, channel):\r\n\r\n users = []\r\n if channel in self.channels:\r\n users = self.channels[channel].users()\r\n\r\n return users", "title": "" }, { "docid": "272f44204f275d114a26baa639c90d85", "score": "0.5517355", "text": "def channel_names(self) -> ChannelNames:\n dotnet_result = self._dotnet_instance.ChannelNames\n return _wrap(dotnet_result)", "title": "" }, { "docid": "e78a55d2017b73fd76cbdf835d72e597", "score": "0.5497325", "text": "def get_all_channel(self, channel: Channel) -> Sequence[ScopedValue]:\n ret = tree_get(self.values, (channel.transport.id, channel.server.id, channel.id))\n if ret is not None:\n results = []\n gather_tree_nodes(results, ret)\n return results\n else:\n return []", "title": "" }, { "docid": "89548e947629d1119f1fddaa516d65a2", "score": "0.5450535", "text": "def get_channel_names(self):\n return self._channel_names", "title": "" }, { "docid": "89548e947629d1119f1fddaa516d65a2", "score": "0.5450535", "text": "def get_channel_names(self):\n return self._channel_names", "title": "" }, { "docid": "b399538efacdad4c324f4eafa656a433", "score": "0.54429865", "text": "def get_channels():\n with MongoClient() as client:\n msg_collection = client[DB][MSG_COLLECTION]\n distinct_channel_list = msg_collection.distinct(\"channel\")\n return distinct_channel_list", "title": "" }, { "docid": "8562515d4a43405050cb244eeeee94ec", "score": "0.5413347", "text": "def clients(self, client=None):\n method = \"getClients\"\n\n if not client:\n client = \"\"\n\n parameters = {\n \"client\": client,\n }\n\n return self._voipms_client._get(method, parameters)", "title": "" }, { "docid": "ba61855f4f7a2e3a71bb75804ec707de", "score": "0.54036546", "text": "def list_channels():\n bot_token = SECRETS_DICT['CITIGROUP_SLACKBOT_TOKEN']\n # bot_token = SECRETS_DICT['COMPUTERLAB_SLACKBOT_TOKEN']\n sc = SlackClient(bot_token)\n channels = sc.api_call('channels.list')\n return channels", "title": "" }, { "docid": "c5d2d37965bc5a76af40fd6069b4eb19", "score": "0.5368334", "text": "def get_channels(\n self,\n on_channel_open: Callable[[str], None],\n on_catastrophic_disconnect: Callable[[str], None],\n on_message: Callable[[str, \"Packet\"], None],\n ) -> List[\"Channel\"]:\n raise NotImplementedError()", "title": "" }, { "docid": "838db4bf545a542ae807e0946f81e54f", "score": "0.53466284", "text": "def get_users_in_channel(self, channel, **kwargs):\n pass", "title": "" }, { "docid": "5b3c7053df88afee5c37cfda32458d62", "score": "0.5341461", "text": "def getChannels(self):\r\n cursor = self._conn.cursor()\r\n cursor.execute(\"\"\"SELECT DISTINCT(chan) FROM chans_cache\"\"\")\r\n results = []\r\n for row in cursor:\r\n results.append(row[0])\r\n cursor.close()\r\n return results", "title": "" }, { "docid": "7b312b64cc1878451ac60601021c4d37", "score": "0.5337136", "text": "def getchannels(self):\n return self._call(\"getchannels\", [])['channels']", "title": "" }, { "docid": "914ae46a1bdacfe801695aa5a534df72", "score": "0.52967584", "text": "def get_channels(options):\n channel_list = dict()\n\n resp = requests.get(CHANNELS_URL, auth=(options.api_key, None))\n if resp.status_code != requests.codes.ok:\n print(\"Getting Channels: %s\" % resp.status_code)\n return None\n subs = json.loads(resp.content)\n for sub in subs['subscriptions']:\n if not sub['active']:\n continue\n channel = sub['channel']\n if not channel.has_key(u'website_url'):\n name = channel['tag']\n channel_list[name] = channel['iden']\n\n if options.debug:\n for channel in channel_list.keys():\n print(\"CHAN %s: %s\" % (channel, channel_list[channel]))\n\n return channel_list", "title": "" }, { "docid": "017c608d5f9f820166eb84210a1a5b2c", "score": "0.5281334", "text": "def get_channels(ApplicationId=None):\n pass", "title": "" }, { "docid": "e8ace7a9da2a24c685b1532bd0d56521", "score": "0.527044", "text": "def _query_channels(self):\n\n self._channels = {}\n response = []\n for i in range(10):\n response = self.execute(\"get_config=lora:channel\")\n if response:\n break\n time.sleep(random.random(i))\n \n data = ''.join(response).strip()\n channels = data.split(\"; \")\n for channel in channels:\n parts = channel.split(',')\n star = False\n try:\n channel_no = parts.pop(0)\n channel_no = int(channel_no)\n except ValueError:\n channel_no = int(channel_no[1:])\n star = True\n data = []\n data.append(parts.pop(0))\n freq = parts.pop(0)\n freq = float(freq[0:3] + '.' + freq[3:])\n if freq == 0:\n sys.exit(\"Channels are still initializing\")\n data.append(freq)\n data.extend(parts)\n data.append(star)\n self._channels[channel_no] = tuple(data)", "title": "" }, { "docid": "04dec0b5095073e0c236128f9080734e", "score": "0.5267537", "text": "def get_users_channels(self):\n if self._channels:\n return self._channels\n else:\n uf.print_out(\"[ERROR] - User: {} needs to subscribe first.\".format(self._user))", "title": "" }, { "docid": "7f78b0ccccf4dd167ccb718ab9ec2dcc", "score": "0.52511173", "text": "def get_channel_list(self):\n if self.channels:\n print(list(self.channels.keys()))\n else:\n print('No channels available')", "title": "" }, { "docid": "1d33d71049c10d759f691be9de772644", "score": "0.5234107", "text": "def channels():\n if 'Channels' not in user_config:\n return []\n return [x[0] for x in user_config.items('Channels')]", "title": "" }, { "docid": "a439a98781ff872d5cdd79ea5acc9c52", "score": "0.51671255", "text": "def getClients(self):\n\n clients: List[str] = []\n\n for id in self.clients.values():\n clients.append(id)\n\n return clients", "title": "" }, { "docid": "f259433ac6b1a5828b2993d5dd86c2b2", "score": "0.51659757", "text": "def iterate_clients(self) -> Iterable[Client]:\n for app_name in self.clients:\n for client in self.clients[app_name].values():\n if client.status == ClientStatus.CONNECTED:\n yield client", "title": "" }, { "docid": "d8b06e3ab62b8f2b505db1762f59cc50", "score": "0.5161276", "text": "def channel_list(self):\n return sorted(channel for channel in self.channels.values() if channel.parent_id == 0)", "title": "" }, { "docid": "9af0a0146841887080739442ffa9ca8c", "score": "0.51611227", "text": "def get_channels(self): \n channels = create_string_buffer(BUF_SIZE)\n daqmx(\n dll.DAQmxGetTaskChannels,\n (\n self.handle,\n channels,\n BUF_SIZE\n )\n )\n return parseStringList(channels.value)", "title": "" }, { "docid": "5b598dbe27dcda04acc80572a503ee9b", "score": "0.5148462", "text": "def get_all_connections(self, channel):\n # import here to avoid circular imports\n #from src.comms.models import PlayerChannelConnection\n PlayerChannelConnection = ContentType.objects.get(app_label=\"comms\", \n model=\"playerchannelconnection\").model_class()\n ExternalChannelConnection = ContentType.objects.get(app_label=\"comms\", \n model=\"externalchannelconnection\").model_class()\n return itertools.chain(PlayerChannelConnection.objects.get_all_connections(channel),\n ExternalChannelConnection.objects.get_all_connections(channel))", "title": "" }, { "docid": "282b5d77fdd0a08f6a493bfd45a1b077", "score": "0.513103", "text": "def get_channel(ctx, channel_name):\n desired_channel = None\n for channel in ctx.guild.text_channels:\n if str(channel.name) == str(channel_name):\n desired_channel = channel\n else:\n pass\n return desired_channel", "title": "" }, { "docid": "dd620aa02227c310a3945bc73ee2fee5", "score": "0.5121694", "text": "def get_list_package(client, channels=None):\n # Fetch channels\n results = client.run_command('channel', 'listAllChannels')\n if results is None:\n raise client.get_error()\n all_channels = list({result['label'] for result in results})\n LOGGER.info(all_channels)\n if channels is None:\n channels = all_channels\n elif isinstance(channels, list):\n for channel in channels:\n if channel not in all_channels:\n raise ValueError(f'Channel \\'{channel}\\' not found')\n else:\n raise ValueError('\\'channels\\' must be a list or None')\n\n packages = dict()\n for channel in channels:\n results = client.run_command('channel.software', 'listAllPackages', [channel])\n if results is not None:\n for result in results:\n if not result['name'] in packages:\n packages[result['name']] = {'options': {}}\n packages[result['name']]['options'][result['version']] = {}\n return packages", "title": "" }, { "docid": "cfa84a95cebe7bba119fd87d80b3af70", "score": "0.5109427", "text": "def channels_list(token):\n # Raise an access error if not a valid token\n is_valid_token(token)\n # Get the list of all channels the user is part of\n channels = get_user_channels_by_key(\"token\", token)\n # return the list\n return {\"channels\": channels}", "title": "" }, { "docid": "4ffbc9ae9d1f1728854c680a7632f7d0", "score": "0.5097396", "text": "async def get_channels(self) -> Channel:\n\n res = await self.bot.http.request_url(f'/guilds/{self.id}/channels')\n return await Channel.from_api_res(res, self.bot)", "title": "" }, { "docid": "57fd0d733e3a3788f85c43fb1e3fb727", "score": "0.5061149", "text": "def get_channel_by_name(guild, channel_name):\n channel = None\n for c in guild.channels:\n if c.name == channel_name.lower():\n channel = c\n break\n return channel", "title": "" }, { "docid": "57480277b80dbfdf46ca8bb27bead644", "score": "0.50557995", "text": "def get_available_clients(self, min_available=1):\n\n return [ (s, i[1]) \\\n for s, i in self._clients.items() \\\n if i[1] >= min_available ]", "title": "" }, { "docid": "b2e592b01ab7ff88cc19f7dfb1654836", "score": "0.50382066", "text": "def get_channels(self):\n all_channels = (redis_db.pubsub_channels())\n all_channels = [i.decode('utf-8') for i in all_channels]\n # print('all_channels: %s' % all_channels)\n return all_channels", "title": "" }, { "docid": "79514bd7a7dd6c6e2e252ecd570d9fda", "score": "0.50361335", "text": "def get_channels_like(self, name, type_checker = None):\n if isinstance(type_checker, type):\n warnings.warn(\n f'`type_checker` cannot be `type`, but should be a function. Got {type_checker!r}.',\n FutureWarning,\n stacklevel = 2,\n )\n type_checker = None\n \n if name.startswith('#'):\n name = name[1:]\n \n name_length = len(name)\n if name_length > CHANNEL_NAME_LENGTH_MAX:\n return []\n \n channel_name_pattern = re_compile('.*?'.join(re_escape(char) for char in name), re_ignore_case)\n \n matches = []\n \n for channel in self.channels.values():\n if (type_checker is not None) and (not type_checker(channel)):\n continue\n \n channel_name = channel.name\n parsed = channel_name_pattern.search(channel_name)\n if parsed is None:\n continue\n \n match_start = parsed.start()\n match_length = parsed.end() - match_start\n \n matches.append((channel, (match_length, match_start)))\n \n if not matches:\n return matches\n \n matches.sort(key = _channel_match_sort_key)\n return [item[0] for item in matches]", "title": "" }, { "docid": "a6cc1c6d1f3d7ebdf915373331fc6e58", "score": "0.5032529", "text": "async def get_channels(self) -> List[Mapping[str, Any]]:\n response = await self._requester.get_list('channels')\n if response is None:\n raise ApiError\n return response", "title": "" }, { "docid": "688372824b4bab88f015e61eceabdd48", "score": "0.5032147", "text": "def get_available_channels(self):\n return list(self.group.keys())", "title": "" }, { "docid": "df8b95edb4e1213706b5f23adcc05cca", "score": "0.50301474", "text": "def listClients(self):\n config = q.config.getInifile(\"arakoonclients\")\n return config.getSections()", "title": "" }, { "docid": "1aab4e8dceab2d8b87efecd40d6324e0", "score": "0.5004872", "text": "def get_all_clients(self):\n self.logger.info(\"Getting all clients\")\n headers = self.__get_admin_access_token_headers()\n payload = {\"viewableOnly\": \"true\"}\n url = \"{0}/admin/realms/{1}/clients\".format(self.base_url, self.realm)\n ret = self.__send_request(\"get\", url, headers=headers, params=payload)\n # return clients as list of json instead of string\n return json.loads(ret.text)", "title": "" }, { "docid": "8d0166732b9a85456653a3ef9aef40dc", "score": "0.5004267", "text": "def channels_list(token):\n\n # Check token is valid\n u_id = token_to_uid(token)\n\n channel_list = []\n # Create a list of channel (extract the list that the user is member of)\n for channel_id in data.users[u_id]['in_channels']:\n channel_tmp = {\n 'channel_id': channel_id,\n 'name': data.channel[channel_id]['name'],\n }\n # Add the copy of dictionary instead of adding directly to avoid making reference\n channel_copy = channel_tmp.copy()\n channel_list.append(channel_copy)\n\n return {'channels': channel_list}", "title": "" }, { "docid": "84fe94502d3a109aa1a8c09ba60efe92", "score": "0.49950665", "text": "def channels_listall(token):\n # Raise an access error if not a valid token\n is_valid_token(token)\n # Get the list of all channels the user is part of\n channels = get_channels()\n print(f\"listall gave channel_id {channels}\")\n # return the list\n return {\"channels\": channels}", "title": "" }, { "docid": "2698631c9e65e0004c7235b655af1edc", "score": "0.4994847", "text": "def get_all_channels(self):\n return self.all()", "title": "" }, { "docid": "c95ab47eef5977537df3b6dfcf51cb1a", "score": "0.49927387", "text": "def channel(self):\n # Note that since the names can change, we need to query channel names\n # each time. This is inefficient, but alas.\n return ProxyList(self, self.Channel, self._channel_names())", "title": "" }, { "docid": "fc26b27948a6917207ace6f43df36a92", "score": "0.49852017", "text": "def get_comchans(self, nick):\n comchans = []\n for bla, chan in self.channels.items():\n if nick in chan.users:\n comchans.append(chan)\n\n return comchans", "title": "" }, { "docid": "59ad024b47b70215e3b6643fc0654552", "score": "0.49830833", "text": "def channel(channel_name):\n\n # save the current channel\n session['curr_channel'] = channel_name\n # Get channel details\n channel = CHANNELS.get(channel_name)\n messages = channel.get(\"messages\")\n\n # Remove current channel from the list\n channels = list(CHANNELS.keys())\n channels.remove(channel_name)\n\n # send a JSON asynchronously\n # return jsonify(messages)\n return render_template(\n \"channel.html\",\n channels = channels,\n messages=messages)", "title": "" }, { "docid": "02b7a0d534c31f8815f589d544f9bdcb", "score": "0.49787128", "text": "def gluster_clients(self):\n return self._gluster_clients", "title": "" }, { "docid": "60968abcbde6eb41de06e040a745548f", "score": "0.4968679", "text": "def get_channels(self):\n return self.channels", "title": "" }, { "docid": "1dd99761bc732eff1881515d40b66801", "score": "0.49669364", "text": "def get_execution_channels(self):\n response = self.client.get(self.client.get_url())\n\n results = {}\n for item in response.json():\n results[item['ChannelID']] = item['ChannelName']\n return results", "title": "" }, { "docid": "46ea93f6d903fc2c5d0e726921799215", "score": "0.49635655", "text": "def get_user_clients(self, ctx) -> UserClientSet:\n return get_user_client_set(self.cfg, ctx[\"user_id\"], ctx[\"token\"])", "title": "" }, { "docid": "81a5118fbd5a8b6f54b695ab5b846326", "score": "0.49620274", "text": "def cli_cosmosdb_managed_cassandra_cluster_list(client,\n resource_group_name=None):\n\n if resource_group_name is None:\n return client.list_by_subscription()\n\n return client.list_by_resource_group(resource_group_name)", "title": "" }, { "docid": "81a5118fbd5a8b6f54b695ab5b846326", "score": "0.49620274", "text": "def cli_cosmosdb_managed_cassandra_cluster_list(client,\n resource_group_name=None):\n\n if resource_group_name is None:\n return client.list_by_subscription()\n\n return client.list_by_resource_group(resource_group_name)", "title": "" }, { "docid": "c03d346a3df9af3036488e0a04df34b0", "score": "0.49530828", "text": "def list_notification_channels(\n self,\n ) -> Callable[\n [notification_service.ListNotificationChannelsRequest],\n Awaitable[notification_service.ListNotificationChannelsResponse],\n ]:\n # Generate a \"stub function\" on-the-fly which will actually make\n # the request.\n # gRPC handles serialization and deserialization, so we just need\n # to pass in the functions for each.\n if \"list_notification_channels\" not in self._stubs:\n self._stubs[\"list_notification_channels\"] = self.grpc_channel.unary_unary(\n \"/google.monitoring.v3.NotificationChannelService/ListNotificationChannels\",\n request_serializer=notification_service.ListNotificationChannelsRequest.serialize,\n response_deserializer=notification_service.ListNotificationChannelsResponse.deserialize,\n )\n return self._stubs[\"list_notification_channels\"]", "title": "" }, { "docid": "b70b6c706cc31f9ab2eedddd584649df", "score": "0.49444413", "text": "async def get_all_users_messages(username, limit=None, channels=None):\n\n if not channels:\n channels = get_all_channels()\n\n counter = 0\n messages = []\n for channel in channels:\n async for message in channel.history(limit=limit):\n if message.author.name == username:\n messages.append(message)\n counter += 1\n print(f\"Completed retriving {username}'s messages, {counter} in total\")\n return messages", "title": "" }, { "docid": "25a78fc3840dc472da654a50ea5f72d9", "score": "0.49364266", "text": "def getClients(self):\n return list(self.__clients)", "title": "" }, { "docid": "df2bcc7edc314b0e45de4d9b0ef144a0", "score": "0.49140254", "text": "def channels_listall(token):\n # Check token is valid\n __ = token_to_uid(token)\n\n channel_list = []\n # Create a list of all channel (extract the list from data.channel)\n for channel_id in data.channel:\n channel_tmp = {\n 'channel_id': channel_id,\n 'name': data.channel[channel_id]['name'],\n }\n # Add the copy of dictionary instead of adding directly to avoid making reference\n channel_copy = channel_tmp.copy()\n channel_list.append(channel_copy)\n\n return {'channels': channel_list}", "title": "" }, { "docid": "42dd3b4610d463ba42cb48ea02d1ba5a", "score": "0.49122417", "text": "def get_owners_channel(self, channel_id: str) -> dict:", "title": "" }, { "docid": "fa32502cd49917e5e4fa0fb34872b379", "score": "0.49091193", "text": "def get_channels(self):\r\n return self.channels", "title": "" }, { "docid": "f733c39a91a14f079dac0a70f560c024", "score": "0.4907912", "text": "def clients(self) -> List[str]:\n raise NotImplementedError", "title": "" }, { "docid": "7c1d2e30bfe26e15d22b0cdebc2a0e6f", "score": "0.48982906", "text": "def _route_channels(self, channel_names):\r\n routes = {}\r\n for channel_name in channel_names:\r\n clients = self._get_clients_for_channel(channel_name)\r\n for client in clients:\r\n channels_for_client = routes.get(client, [])\r\n channels_for_client.append(channel_name)\r\n routes[client] = channels_for_client\r\n return routes", "title": "" }, { "docid": "db77396c3ade3d1a8b90ae2585998007", "score": "0.48888284", "text": "def get_related_channels(channel_name):\n\n base_url = \"https://www.googleapis.com/youtube/v3/search\"\n\n params_dict = {\n \"q\": channel_name,\n \"part\": \"snippet\",\n \"type\": \"channel\",\n \"maxResults\": 25,\n \"order\": \"relevance\",\n \"key\": api_key,\n }\n\n response_object = requests.get(base_url, params=params_dict)\n response = json.loads(response_object.text)\n # print(response_object.content) # Try when Quota Exceeded\n\n fin_dict = {}\n\n try:\n if response[\"items\"] == []:\n return \"Sorry! Could not find any channels related to the given keyword. Please try Again!\"\n # elif response_object.content\n else:\n for item in response[\"items\"]:\n title = item[\"snippet\"][\"title\"]\n thumb_url = item[\"snippet\"][\"thumbnails\"][\"default\"][\"url\"]\n channel_id = item[\"snippet\"][\"channelId\"]\n\n fin_dict[title] = {\"thumbnail\": thumb_url, \"channel_id\": channel_id}\n except Exception as e:\n print(str(e))\n\n return fin_dict", "title": "" }, { "docid": "e107ba4365747cd66eb04c6b7b81142f", "score": "0.4886231", "text": "def channel(self, name):\n return self._channels.get(name, None)", "title": "" }, { "docid": "784d2d086a013f0c2f1b78dc17df17c3", "score": "0.48809665", "text": "def get_messages(self, channel_name, days):\n\n url_template = self.url + \"channels.history?oldest={}&token={}&channel={}\"\n cid = self.get_channelid(channel_name)\n ago = time.time() - (days * 86400)\n url = url_template.format(ago, self.api_token, cid)\n payload = requests.get(url).json()\n assert 'messages' in payload\n # Why filter out subtype? Because Slack marks *everything* as\n # messages, even when it's automated 'X has joined the channel'\n # notifications (which really should be marked as events, not messages)\n # The way to know whether or not such message is an event is to see\n # if it has a subtype -- someone just talking has no subtype, but\n # 'X has joined the channel' have a subtype, so we'll filter that out.\n return [x for x in payload['messages'] if x.get(\"subtype\") is None]", "title": "" }, { "docid": "371e321efdec6448f495df7daab45c6a", "score": "0.48724985", "text": "def channelList(self):\n self.sendLine(\"LIST\")", "title": "" }, { "docid": "371e321efdec6448f495df7daab45c6a", "score": "0.48724985", "text": "def channelList(self):\n self.sendLine(\"LIST\")", "title": "" }, { "docid": "822a26a11a751de00feb62f29c89e621", "score": "0.48634082", "text": "def channel_groups(self):\n return []", "title": "" }, { "docid": "e532cf6b289496aad342005b92d37b55", "score": "0.48615518", "text": "def cluster_get(version=None, name=None):\n clusters = cluster_get_all()\n\n # Filter version and name\n if not version is None:\n clusters = [c for c in clusters if c[\"version\"] == version]\n if not name is None:\n clusters = [c for c in clusters if c[\"cluster\"] == name]\n\n return clusters", "title": "" }, { "docid": "d9edd9312932ce2c2ad29ea7e5f3f378", "score": "0.48605493", "text": "def list_of_channels(self):\n\n channels_list = self.sc.api_call('conversations.list')\n\n return [(c['id'], c['name']) for c in channels_list['channels']]", "title": "" }, { "docid": "6444f9124bd19e14d81284d05b82cded", "score": "0.48589656", "text": "def getClientFromName(cliname, match=False):\n\n if match == True:\n mclis = MClient.query.filter_by(client_name=cliname)\n else:\n mclis = MClient.query.filter(or_(MClient.client_name.startswith(cliname),MClient.client_name.endswith(cliname)))\n\n if mclis is None:\n return False\n\n clis = []\n \n for mcli in mclis:\n cli = Client(mcli.client_name)\n cli.ID = mcli.id\n cli.license_filename = mcli.client_license_file\n cli._mcli = mcli\n clis.append(cli)\n\n return clis", "title": "" }, { "docid": "9817638beeea2e19df7776be5fac4f6e", "score": "0.48445582", "text": "def getNodes(self):\n\n config = q.config.getInifile(self._configPath)\n \n clientconfig = dict()\n\n if config.checkSection(\"global\"):\n nodes = self.__getNodes(config)\n\n for name in nodes:\n clientconfig[name] = (config.getValue(name, \"ip\"),\n config.getValue(name, \"client_port\"))\n\n return clientconfig", "title": "" }, { "docid": "d1d6fff00a78a5ca6f8b758303341fee", "score": "0.48396897", "text": "def List():\r\n return ApiClient.Request('GET', '/channel/list')", "title": "" }, { "docid": "e33ff098d36426bc09228624f97fdfb7", "score": "0.4839593", "text": "def get_service_clients(self):\n return []", "title": "" }, { "docid": "335ccb0d6b8b007832ed6d2113d450eb", "score": "0.4829167", "text": "def get_channel_by_name(bot, channel):\n if channel.startswith(\"#\"):\n channel = channel[1:]\n redis_client = omniredis.get_redis_client()\n channel_id = redis_client.hget(f\"channelsmap:{bot.team.name}\", channel)\n if channel_id:\n return _get_channel_name_from_cache(\"channels\", bot.team.name, channel_id)\n group_id = redis_client.hget(f\"groupsmap:{bot.team.name}\", channel)\n if group_id:\n return _get_channel_name_from_cache(\"groups\", bot.team.name, group_id)\n im_id = redis_client.hget(f\"imsmap:{bot.team.name}\", channel)\n if im_id:\n return _get_channel_name_from_cache(\"ims\", bot.team.name, im_id)\n mpim_id = redis_client.hget(f\"mpimsmap:{bot.team.name}\", channel)\n if mpim_id:\n return _get_channel_name_from_cache(\"mpims\", bot.team.name, mpim_id)\n return None", "title": "" }, { "docid": "780d77ac7f08bcab894cd69fca569f6e", "score": "0.4823259", "text": "def get_songs_by_channel(self, channel_name=None):\n if channel_name:\n try:\n print('Channel:', channel_name)\n print('Songs:')\n print(self.channel_playlist[channel_name])\n except KeyError:\n print('No songs available for this channel name')\n else:\n if self.channel_playlist:\n for channel, playlist in self.channel_playlist.items():\n print('Channel:', channel)\n print('Songs:')\n print(playlist)\n print('-'*55)\n else:\n print('No channel playlist available')", "title": "" }, { "docid": "c37ca1193ed8551123c2f9ab125ad402", "score": "0.48141408", "text": "async def get_chatters(self, channel: str):\n\n url = f'http://tmi.twitch.tv/group/user/{channel.lower()}/chatters'\n\n async with self.http._session.get(url) as resp:\n if 200 <= resp.status < 300:\n data = await resp.json()\n else:\n raise HTTPException(f'Fetching chatters failed: {resp.status}', resp.reason)\n\n all_ = []\n for x in data['chatters'].values():\n all_ += x\n\n return Chatters(data['chatter_count'], all_, *data['chatters'].values())", "title": "" }, { "docid": "1be491b73c6771cdaf637b81e52a4663", "score": "0.47909036", "text": "def get_client_list():\n return util.PropertyCookie(util.get_property(root, '_NET_CLIENT_LIST'))", "title": "" }, { "docid": "0200059ee7c86fc5b66297378473bda3", "score": "0.4790731", "text": "def list_channels(self, **kwargs):\n r = self.get(\n self.services['channel'],\n params=kwargs\n )\n if r.status_code == 200:\n content = r.json()\n\n channels = dict([(ch['slug'], ch) for ch in content['objects']])\n while content['meta']['next'] is not None:\n r = self.get(self.root + content['meta']['next'])\n content = r.json()\n channels.update(dict(\n [(ch['slug'], ch) for ch in content['objects']]))\n return channels\n else:\n raise HttpError(\n \"Got response code {0} from {1}\".format(\n r.status_code, self.endpoint))", "title": "" }, { "docid": "cbbb27b6e8d969b0d201e351c27ad4d0", "score": "0.4786526", "text": "def get_channels(self) -> dict:", "title": "" }, { "docid": "3680a5fc1f646123eb4980720782c9d4", "score": "0.47760066", "text": "def channels(self):\n return self.properties.get('channels',\n ChannelCollection(self.context, ResourcePath(\"channels\", self.resource_path)))", "title": "" }, { "docid": "7cf438cfbdf4e5cd635c357f8bceb816", "score": "0.47639072", "text": "def get_all_channels(self):\n channels = self.get_open_channels(public_only=False, active_only=False)\n return channels", "title": "" }, { "docid": "5207d89ed7ee389a85d18c7755a47a66", "score": "0.47604457", "text": "def clients(self):\n\n self.assert_validity()\n return list(self.m_session.query(Client))", "title": "" }, { "docid": "4d7bf5e8a46a9b0f5b0eb0d41070de28", "score": "0.47596598", "text": "def get_clients_in_room(cls, room):\n return [client for client in cls.all_clients if room in client.rooms]", "title": "" }, { "docid": "cca1fced4e2d758eb359c1e9909bea4f", "score": "0.4754735", "text": "def channels_current(self) -> List[ChannelType]:\n if self.api_version >= 5:\n favorite_list = self.favorite_lists.get(self.channel_list_id)\n if not favorite_list:\n return list(self.channels.values())\n\n return [\n {\n **channel,\n \"preset\": favorite.get(\"preset\", \"\")\n }\n for favorite in favorite_list.get(\"channels\", [])\n if (channel := self.channels.get(str(favorite.get(\"ccid\"))))\n ]\n else:\n return [\n {\n **channel,\n \"ccid\": key\n }\n for key, channel in self.channels.items()\n ]", "title": "" }, { "docid": "affba9d9aeb35d4576b20257e792bc22", "score": "0.47513562", "text": "def get_global_channels(cls):\n global_channels = create_string_buffer(BUF_SIZE)\n daqmx(\n dll.DAQmxGetSysGlobalChans,\n (\n global_channels,\n BUF_SIZE\n )\n )\n global_channels = parseStringList(global_channels.value)\n channel_dict = {\n task_type:[] for task_type in cls.TASK_TYPES\n }\n for channel in global_channels:\n channel_dict[cls.get_channel_type(channel)].append(channel)\n return channel_dict", "title": "" }, { "docid": "badf24acdfd3ac867040cd6398aa950f", "score": "0.47464475", "text": "def iter_ipnet_channels():\n channel_name = \"\"\n for line in urllib2.urlopen(IPNET_URL):\n channel_name_match = CHANNEL_NAME_RE.match(line)\n if channel_name_match:\n channel_name = channel_name_match.group(1).decode('utf-8')\n channel_url_match = CHANNEL_URL_RE.match(line)\n if channel_url_match:\n channel_ip = channel_url_match.group(1)\n channel_port = int(channel_url_match.group(2))\n yield channel_name, channel_ip, channel_port\n channel_name = \"\"", "title": "" }, { "docid": "f052441e1a15d7c551cf6316a7c44c71", "score": "0.4736474", "text": "def getChannelIds():\n res = {}\n for row in data:\n res[row[\"name\"]] = row[\"id\"]\n return res", "title": "" }, { "docid": "b9843235dbdcbcd4f5a0667337460182", "score": "0.4733496", "text": "def name(self):\n\t\treturn \"getchannels\"", "title": "" }, { "docid": "f94e0b6f20ac34937a2c803d56cb0124", "score": "0.47329465", "text": "def channels(self) -> Sequence[BaseNode]:\n dotnet_result = self._dotnet_instance.Channels\n return _wrap(dotnet_result)", "title": "" }, { "docid": "1736645cacb4be74af51745800e9629b", "score": "0.4732831", "text": "def channel_list_flattened(self):\n channels = []\n for channel in sorted(channel for channel in self.channels.values() if channel.parent_id == 0):\n channels.append(channel)\n if channel.is_guild_category():\n channels.extend(channel.channels)\n \n return channels", "title": "" }, { "docid": "1b13774516869f04ce6387fb807afd8e", "score": "0.4725197", "text": "def _get_channels(self) -> List[BaseChannel]:\n result: List[BaseChannel] = []\n for connector in self._connectors:\n result += connector._get_channels() # pylint: disable=protected-access\n return result", "title": "" }, { "docid": "3c05da4a45dabf0a1c10c04355e6e6e8", "score": "0.47245085", "text": "def add_all_channels(self) -> None:\n for cluster_id, cluster in self.endpoint.in_clusters.items():\n channel_class = zha_regs.ZIGBEE_CHANNEL_REGISTRY.get(\n cluster_id, base.ZigbeeChannel\n )\n # really ugly hack to deal with xiaomi using the door lock cluster\n # incorrectly.\n if (\n hasattr(cluster, \"ep_attribute\")\n and cluster_id == zigpy.zcl.clusters.closures.DoorLock.cluster_id\n and cluster.ep_attribute == \"multistate_input\"\n ):\n channel_class = general.MultistateInput\n # end of ugly hack\n channel = channel_class(cluster, self)\n if channel.name == const.CHANNEL_POWER_CONFIGURATION:\n if (\n self._channels.power_configuration_ch\n or self._channels.zha_device.is_mains_powered\n ):\n # on power configuration channel per device\n continue\n self._channels.power_configuration_ch = channel\n elif channel.name == const.CHANNEL_IDENTIFY:\n self._channels.identify_ch = channel\n\n self.all_channels[channel.id] = channel", "title": "" }, { "docid": "ee3e9cbbe48084fa9b00de8d7826de86", "score": "0.472365", "text": "def channel(self, n):\n return self.channels(range(n, n + 1))", "title": "" }, { "docid": "2586298587519e0c92df38db899f0f14", "score": "0.47022265", "text": "def getChannelList(self, top_slice):\n\t\treturn(list(self.slices[top_slice].channels))", "title": "" }, { "docid": "92585bf768159cdb1b85c71ea60dfa06", "score": "0.47019827", "text": "def get_channel_dfs_list(df):\n channel_id_list = get_unique_channels(df)\n channel_dfs_list = []\n\n for channel_id in channel_id_list:\n channel_df = df.loc[df[\"channel_id\"] == channel_id]\n channel_dfs_list.append(channel_df)\n\n return channel_dfs_list", "title": "" }, { "docid": "b239002a74a0edcb8715068422468c66", "score": "0.46869463", "text": "def get_client_by_name(self, client_name):\n return self.clients_list.get_clients_by_name(client_name)[0]", "title": "" }, { "docid": "e36741007b6becd1f1c708d500d5dad4", "score": "0.4683968", "text": "def get_channelid(self, channel_name):\n return self.channels[channel_name]", "title": "" }, { "docid": "e4e7d8d882bd59b397bf0c2e90d819d8", "score": "0.46833917", "text": "def get_rss_news_channels(self, call):\n if \"rss_news_category\" not in call.data:\n return []\n if call.data[\"rss_news_category\"] == ais_global.G_EMPTY_OPTION:\n # reset status for item below\n self.hass.services.call(\n \"input_select\",\n \"set_options\",\n {\n \"entity_id\": \"input_select.rss_news_channel\",\n \"options\": [ais_global.G_EMPTY_OPTION],\n },\n )\n return\n ws_resp = self.cloud.audio_name(\n ais_global.G_AN_NEWS, call.data[\"rss_news_category\"]\n )\n try:\n json_ws_resp = ws_resp.json()\n except Exception as e:\n _LOGGER.warning(\"get_rss_news_channels problem \" + str(e))\n return\n\n names = [ais_global.G_EMPTY_OPTION]\n self.news_channels = []\n for item in json_ws_resp[\"data\"]:\n names.append(item[\"NAME\"])\n self.news_channels.append(item)\n self.hass.services.call(\n \"input_select\",\n \"set_options\",\n {\"entity_id\": \"input_select.rss_news_channel\", \"options\": names},\n )\n # check if the change was done form remote\n import homeassistant.components.ais_ai_service as ais_ai\n\n if (\n ais_ai.CURR_ENTITIE == \"input_select.rss_news_category\"\n and ais_ai.CURR_BUTTON_CODE == 23\n ):\n ais_ai.set_curr_entity(self.hass, \"input_select.rss_news_channel\")\n self.hass.services.call(\n \"ais_ai_service\", \"say_it\", {\"text\": \"Wybierz kanał wiadomości\"}\n )", "title": "" } ]
69907cdd1fb10ed77ccb10841edc39a5
unloadSfx(self) Unloads any sound effects that may have been loaded.
[ { "docid": "fb6339aaa3f1d5f4c73877a25ef8cc05", "score": "0.8124208", "text": "def unloadSfx(self):\n if self.cogDropSound != None:\n self.cogDropSound = None\n self.cogLandSound = None\n self.cogSettleSound = None\n self.openSfx = None\n \n if self.cogWeakenSound != None:\n self.cogWeakenSound = None\n self.toonGrowSound = None\n self.toonSettleSound = None\n self.openSfx = None", "title": "" } ]
[ { "docid": "eabb169063d4295eba34b9a8e1a66cb8", "score": "0.7055466", "text": "def unload(self, *args, **kwargs):\n \n if self.fs:\n self.fs.sfunload(self.sfid, update_midi_preset=0)", "title": "" }, { "docid": "000e57bdd28eed6537fdefc2043e4290", "score": "0.6056518", "text": "def _unload(self):\n self._execute_instance_shutdown()\n if self.config.get(\"debug\", False):\n self._garbage_collect()\n self.loaded = False\n self._emit_skill_shutdown_event()", "title": "" }, { "docid": "dc270c4fdf504dd61b3c6d6afa205016", "score": "0.605364", "text": "def clear_effects(self):\n self.player.current_level.clear_effects()", "title": "" }, { "docid": "226d79a7de779092973a66ddaf3da6bf", "score": "0.6039599", "text": "def close_sound():\n playsound(CLOSE_SOUND)", "title": "" }, { "docid": "64837d4138bbfd0bec82657c78257fdb", "score": "0.6036798", "text": "def remove_sfx(cls, source, subscriber):\n fn = basename(source)\n resource = cls.sfx[fn]\n del resource.subscribers[subscriber]\n if len(resource.subscribers) == 0:\n del cls.sfx[fn]\n resource.track.unload()", "title": "" }, { "docid": "cbf00dd68dc9f2aff9581ba4b241d284", "score": "0.591304", "text": "def stopmusic(self):\n pygame.mixer.music.stop()", "title": "" }, { "docid": "b948d8510cbea4f1218acb23ec96a3a6", "score": "0.5871934", "text": "def sfx_death(self):\n sound = self.sfx[\"death\"]\n sound.play()", "title": "" }, { "docid": "684147d4484ce2a165c8e9281f174ad2", "score": "0.58157724", "text": "def close(self):\n pygame.mixer.quit()", "title": "" }, { "docid": "75bda194f3b9b60826151ef1b68d8173", "score": "0.5778129", "text": "def _unload(self):\n for a in self.actions.values(): a.unload()", "title": "" }, { "docid": "659151937dbf97bb954552b48373ddef", "score": "0.57769334", "text": "def unload(self):\n for action in self.actions:\n self.iface.removePluginMenu(\n self.tr(u'&MicMac_SFM'),\n action)\n self.iface.removeToolBarIcon(action)\n # remove the toolbar\n del self.toolbar", "title": "" }, { "docid": "2ad9fc07f1a08cad7c517bca0b324d7c", "score": "0.5751646", "text": "def unload():\r\n sourcerpg.skills.removeSkill( skillName )", "title": "" }, { "docid": "bba704d1e4e09233057c71e4e60c6645", "score": "0.57263875", "text": "def loadAnimToSuitSfx(self):\n if self.cogDropSound == None:\n self.cogDropSound = base.loadSfx(self.TAKEOVER_SFX_PREFIX + \"cogbldg_drop.mp3\")\n self.cogLandSound = base.loadSfx(self.TAKEOVER_SFX_PREFIX + \"cogbldg_land.mp3\")\n self.cogSettleSound = base.loadSfx(self.TAKEOVER_SFX_PREFIX + \"cogbldg_settle.mp3\")\n self.openSfx = base.loadSfx(\"phase_5/audio/sfx/elevator_door_open.mp3\")", "title": "" }, { "docid": "278f985f505a8356f415dcb0519cf7a7", "score": "0.570859", "text": "def stopmusic():\n pygame.mixer.music.stop()", "title": "" }, { "docid": "278f985f505a8356f415dcb0519cf7a7", "score": "0.570859", "text": "def stopmusic():\n pygame.mixer.music.stop()", "title": "" }, { "docid": "c18ddfcb527188f8747d031fb0e8d2d5", "score": "0.56259376", "text": "def unload(self, fname):\n pass", "title": "" }, { "docid": "5e74f61c9441e18c213c91042c8b7860", "score": "0.55969983", "text": "def __del__(self):\n mixer.quit()", "title": "" }, { "docid": "25bd8f329a3352f9d8f298366f197bce", "score": "0.5575443", "text": "def unload(self):\n for action in self.actions:\n self.iface.removePluginVectorMenu(\n self.tr(u'&Seismic Positioning Data Importer'),\n action)\n self.iface.removeToolBarIcon(action)", "title": "" }, { "docid": "e12f80b9ab17f74564df419ebb23edc7", "score": "0.55750036", "text": "def stop(self):\n\t\tpygame.mixer.music.stop()\n\t\tset_kv(\"now_playing\", None)", "title": "" }, { "docid": "56f10bf4b8cabdc9f2da427725fea175", "score": "0.5559036", "text": "def unload(self):\n for image in self:\n image.unload()", "title": "" }, { "docid": "e485e0c624f8fd8ccca072f872a9d6a2", "score": "0.5556905", "text": "def remove_music(cls, source, subscriber):\n fn = basename(source)\n resource = cls.music[fn]\n del resource.subscribers[subscriber]\n if len(resource.subscribers) == 0:\n del cls.music[fn]\n resource.track.unload()", "title": "" }, { "docid": "be6b355ffd482852c0f2f37536f3956a", "score": "0.5503873", "text": "def _unload(self):\n pass", "title": "" }, { "docid": "39d6fe641183048840ee8846e719ff97", "score": "0.55011386", "text": "def unload(self):\n\n if self.user_dex:\n self.user_dex.unload()\n self.user_dex = None\n print(\"unloaded pokedex {self.pokedex_file}\")\n self.pokedex_file = None\n else:\n print(\"no pokedex is currently loaded\")", "title": "" }, { "docid": "8db0e5caa44906a6fc603dd89c6a7103", "score": "0.55006516", "text": "def stopEffects(self):\n self.stopFade()\n self.stopRotate()", "title": "" }, { "docid": "4212493fdc7dbd4259f6d181e2a663e3", "score": "0.5488589", "text": "def unload(self):\n for action in self.actions:\n self.iface.removePluginMenu(\n self.tr(u'&Import shapes'),\n action)\n self.iface.removeToolBarIcon(action)", "title": "" }, { "docid": "0ba684ee527a31fe3372b4974c10a62c", "score": "0.54400575", "text": "async def unload(self) -> List[str]:\n if not self.is_loaded:\n return []\n return await _do_it(self, 'unload')", "title": "" }, { "docid": "3697249aaabdf46e9a9f0f46aadadd57", "score": "0.5419777", "text": "def shutdown(self):\n if os.name == 'nt':\n # Must load another mp3 for pygame since it never wants to\n # release the file and we want to cleanup the temp dir.\n pygame.mixer.music.load(os.path.join(DATADIR, 'fake.mp3'))\n\n cdgPlayer.shutdown(self)", "title": "" }, { "docid": "2619a6e2ac2fadca6d970bf537fe59ba", "score": "0.54157835", "text": "def unload(self):\n for action in self.actions:\n self.iFace.removePluginWebMenu(\n self.tr(u'&Bhuvan Web Services'),\n action)\n self.iFace.removeToolBarIcon(action)", "title": "" }, { "docid": "ce908653241b1c91f38536787f16c77f", "score": "0.5406359", "text": "def unload_file(self):\n self.send_basic_command('unload_file')", "title": "" }, { "docid": "3910c0aed7a9a1ac97500da2cd1da9c4", "score": "0.538692", "text": "def unload(self):\n logger.debug(\"Unloaded\")", "title": "" }, { "docid": "7e41fd89874e321f874845732ea80005", "score": "0.5384727", "text": "def unload(self):\n for action in self.actions:\n self.iface.removePluginMenu(\n self.tr(u'&Huff Model'),\n action)\n self.iface.removeToolBarIcon(action)\n # remove the toolbar\n del self.toolbar", "title": "" }, { "docid": "8f398850572ffd804e52e9eb15788681", "score": "0.53771716", "text": "def close(self):\n mididriver.close()\n Synthesizer._started = False", "title": "" }, { "docid": "9f0a8e00e24408ae39d704904373b41c", "score": "0.53719515", "text": "def stop(self):\n self.unload_player()\n # Unload from noisemakers active loops\n del(self.nm.active_loops[self.loop_id])", "title": "" }, { "docid": "214a6b747a61d711d98604709af64d94", "score": "0.53414756", "text": "def unload(self):\n pass", "title": "" }, { "docid": "214a6b747a61d711d98604709af64d94", "score": "0.53414756", "text": "def unload(self):\n pass", "title": "" }, { "docid": "3faf35fc867452c4e6f9e402bb4a640b", "score": "0.5305479", "text": "def unload(self):\n for action in self.actions:\n self.iface.removePluginVectorMenu(\n self.tr(u'&Creer un Gpx pour Garmin'),\n action)\n self.iface.removeToolBarIcon(action)", "title": "" }, { "docid": "597a444ac27edef328632efc497cb558", "score": "0.5285244", "text": "def stop(self):\n sounddevice.stop()", "title": "" }, { "docid": "73fea318739aec2210c048d835a5a671", "score": "0.5278874", "text": "async def unload(self, ctx, ext: str):\n self.bot.unload_extension(\"cogs.\" + ext)\n self.bot.log.info(f'Unloaded ext {ext}')\n await ctx.send(f':white_check_mark: `{ext}` successfully unloaded.')", "title": "" }, { "docid": "11fd7ac2fdacbea75083fc89ee6e56e9", "score": "0.52715135", "text": "def unload(self): \n pass", "title": "" }, { "docid": "231f30289a0b79ce3a34261eb994d4b3", "score": "0.52650946", "text": "def unload(self):\n for action in self.actions:\n self.iface.removePluginMenu(\n self.tr(u'&Short Path'),\n action)\n self.iface.removeToolBarIcon(action)", "title": "" }, { "docid": "09795c0a7ae0d71c691b300840eee21c", "score": "0.5263616", "text": "def unload(self):\n for action in self.actions:\n self.iface.removePluginMenu(\n self.tr(u'&WLC/OWA Tool'),\n action)\n self.iface.removeToolBarIcon(action)", "title": "" }, { "docid": "c8f1e17d17273c289c039c4caac82389", "score": "0.52537173", "text": "def __unload(self):\n pass", "title": "" }, { "docid": "bf889e66169b9e2b139bcbca4d0dad19", "score": "0.5244136", "text": "def unload(self):\n for action in self.actions:\n self.iface.removePluginVectorMenu(\n self.tr(u'&Covid Analyzer'),\n action)\n self.iface.removeToolBarIcon(action)", "title": "" }, { "docid": "61a49a4ea1daf6c33b60d82fd8928664", "score": "0.5241713", "text": "def __unload(self):\n self.session.close()\n self._stop_pollers()", "title": "" }, { "docid": "0874ac90ee1b278ecfd418927b3f24f6", "score": "0.5226187", "text": "def unload(self):\n for action in self.actions:\n self.iface.removePluginMenu(\n self.tr(u'&template Loader'),\n action)\n self.iface.removeToolBarIcon(action)", "title": "" }, { "docid": "53bdd39ed65a1b627da47326fe5f5229", "score": "0.52216077", "text": "def __unload(self):", "title": "" }, { "docid": "fcd7f3eb88196ed3b1ff4a8aaf560939", "score": "0.5189134", "text": "def loadAnimToToonSfx(self):\n if self.cogWeakenSound == None:\n self.cogWeakenSound = base.loadSfx(self.TAKEOVER_SFX_PREFIX + \"cogbldg_weaken.mp3\")\n self.toonGrowSound = base.loadSfx(self.TAKEOVER_SFX_PREFIX + \"toonbldg_grow.mp3\")\n self.toonSettleSound = base.loadSfx(self.TAKEOVER_SFX_PREFIX + \"toonbldg_settle.mp3\")\n self.openSfx = base.loadSfx(\"phase_5/audio/sfx/elevator_door_open.mp3\")", "title": "" }, { "docid": "c146a9034cee440355b5251c6a412d46", "score": "0.5159993", "text": "def unload(self):\n\n self.__container.clear()\n self.loaded = False", "title": "" }, { "docid": "bc6f22f8a2a854094657c4de3b0ec4f0", "score": "0.51527864", "text": "def unload(self):\n if not self.loaded:\n return\n\n pmsg(\"Unloading %s...\" % self.PLUGIN_NAME)\n\n # mark the core as 'unloaded' and teardown its components\n self.loaded = False\n\n # remove UI integrations\n self._uninstall_ui()\n\n # spin down any active contexts (stop threads, cleanup qt state, etc)\n for pctx in self.contexts.values():\n pctx.terminate()\n self.contexts = {}\n\n # all done\n logger.info(\"-\"*75)\n logger.info(\"Plugin terminated\")", "title": "" }, { "docid": "cdea280609994f80c536c788ff99eb05", "score": "0.51458514", "text": "def load_sounds(self) -> Dict[str, mixer.Sound]:\n # reserves one channel, does not reserve channel 1\n mixer.set_reserved(1)\n sfx = {\n x[:-4]: mixer.Sound(os.path.join(self.SFX_DIR, x))\n for x in os.listdir(self.SFX_DIR)\n if x.endswith(\".wav\")\n }\n music = {\n x[:-4]: mixer.Sound(os.path.join(self.MUSIC_DIR, x))\n for x in os.listdir(self.MUSIC_DIR)\n if x.endswith(\".wav\")\n }\n return sfx, music", "title": "" }, { "docid": "346d34283c1ef1bf188f4a4acd145360", "score": "0.51449966", "text": "def unload(self):\n\n for action in self.actions:\n self.iface.removePluginMenu(\n self.tr(u'&OSM_SAA'),\n action)\n self.iface.removeToolBarIcon(action)\n # remove the toolbar\n del self.toolbar", "title": "" }, { "docid": "5fb330ca86abc17056bfd03637b55e3a", "score": "0.5141464", "text": "def unload(self):\n for action in self.actions:\n self.iface.removePluginMenu(\n self.tr(u'&demo'),\n action)\n self.iface.removeToolBarIcon(action)\n # remove the toolbar\n del self.toolbar", "title": "" }, { "docid": "7166a6ae275df70ce02fc551a0df229d", "score": "0.5137282", "text": "def cog_unload(self):\n self.bot.lavalink._event_hooks.clear()", "title": "" }, { "docid": "db0ff0ffd79eace8a1cdb683688f3f81", "score": "0.51130676", "text": "async def stop(self, ctx):\n await ctx.music_state.stop()", "title": "" }, { "docid": "ed31119095cd121f1dfda62f3132930a", "score": "0.51065946", "text": "def unload(self):\n for action in self.actions:\n self.iface.removePluginVectorMenu(\n self.tr(u'&Save Attributes'),\n action)\n self.iface.removeToolBarIcon(action)", "title": "" }, { "docid": "d1e2d3218c1ba205d16e94c4c32be7e3", "score": "0.5079389", "text": "def unload(self):\n for action in self.actions:\n self.iface.removePluginMenu(\n self.tr(u'&Owls Season Distance'),\n action)\n self.iface.removeToolBarIcon(action)\n # remove the toolbar\n del self.toolbar", "title": "" }, { "docid": "db06930d6319125157bfc906a2622237", "score": "0.5071777", "text": "def deinit(self):\n self._sclk.deinit()\n if self._miso:\n self._miso.deinit()\n if self._mosi:\n self._mosi.deinit()", "title": "" }, { "docid": "4300eda7d1bf72122ca7c5668393d5d4", "score": "0.5057429", "text": "def destroy(self):\n self.level.sprites.discard(self)", "title": "" }, { "docid": "125c7b898184b558ad67d7a343318ba1", "score": "0.5049533", "text": "def clearSED(self):\n self.wavelen = None\n self.fnu = None\n self.flambda = None\n self.zp = -8.9\n self.name = None\n return", "title": "" }, { "docid": "0c2457ab83003d772a5c17f7b1c6cba1", "score": "0.5040606", "text": "def unload(self):\n for action in self.actions:\n self.iface.removePluginMenu(\n self.tr(u'&Least Cost Path'),\n action)\n self.iface.removeToolBarIcon(action)\n # remove the toolbar\n del self.toolbar", "title": "" }, { "docid": "b89315c227dc0a51060b8f61d2514fd9", "score": "0.50296104", "text": "def unload(self):\n\n #print \"** UNLOAD raster_tracer\"\n\n for action in self.actions:\n self.iface.removePluginMenu(\n self.tr(u'&raster_tracer'),\n action)\n self.iface.removeToolBarIcon(action)\n # remove the toolbar\n del self.toolbar", "title": "" }, { "docid": "87915e47c7fb7a43952659b2d90da265", "score": "0.5026213", "text": "def checkMusic(self, state):\n if state:\n pygame.mixer.music.unpause()\n else:\n pygame.mixer.music.pause()", "title": "" }, { "docid": "e2002b89b181f771975c71ee2c035622", "score": "0.50238097", "text": "def s_exit_load(self):\n self.msg.clear_msg()\n pass", "title": "" }, { "docid": "98df939cc965f9a9106bcffe60b1019c", "score": "0.50216043", "text": "def unload(self):\n for action in self.actions:\n self.iface.removePluginMenu(\n self.tr(u'&Generador de Manzanas Catastrales'),\n action)\n self.iface.removeToolBarIcon(action)", "title": "" }, { "docid": "07bc04aedd05a716373dc1c7927dae68", "score": "0.5020404", "text": "def unload():\n global grammar\n grammar.unload()\n grammar = None", "title": "" }, { "docid": "10df55b7091acf96bfcefb845f1939d0", "score": "0.50088155", "text": "def unload(self):\n for action in self.actions:\n self.iface.removePluginMenu(\n self.tr(u'&deep layers'),\n action)\n self.iface.removeToolBarIcon(action)", "title": "" }, { "docid": "16ea89e492f36e3df974a13e97385b2d", "score": "0.49886143", "text": "def deactivate(self):\n\n global _LOADED_PLUGINS\n\n if (self.name in _LOADED_PLUGINS and\n _LOADED_PLUGINS[self.name] is self):\n del _LOADED_PLUGINS[self.name]\n\n if (_SYS_VAR_DICT is not None and\n self.name in _SYS_VAR_DICT):\n del _SYS_VAR_DICT[self.name]", "title": "" }, { "docid": "858a4be37c0e199ff3060d379e044c95", "score": "0.4987746", "text": "def on_cleanup(self):\n pygame.quit()", "title": "" }, { "docid": "2941fc71fce3e2e90a5c2e973bc1dfee", "score": "0.4969781", "text": "def shut_down(self) -> None:\n self.playing = False\n self.running = False", "title": "" }, { "docid": "dbbc3f0b5bed7f79acdc4770e8376c22", "score": "0.4944467", "text": "def clean(cls):\n import mcpython.engine.ResourceLoader\n\n shared.world.world_generation_process.stop()\n import mcpython.engine.ResourceLoader\n\n mcpython.engine.ResourceLoader.close_all_resources()\n shared.tmp.cleanup()", "title": "" }, { "docid": "0c9a0d3f8f4b31fe72a3079f3270805c", "score": "0.4935124", "text": "def test_stop(self):\n try:\n expected_channels = 0\n filename = example_path(os.path.join(\"data\", \"house_lo.wav\"))\n sound = mixer.Sound(file=filename)\n\n sound.stop()\n\n self.assertEqual(sound.get_num_channels(), expected_channels)\n finally:\n pygame.mixer.quit()\n with self.assertRaisesRegex(pygame.error, \"mixer not initialized\"):\n sound.stop()", "title": "" }, { "docid": "d9fd7e25c24e35904531f237ecc12d7f", "score": "0.49257436", "text": "def unload(self):\n for action in self.actions:\n self.iface.removePluginMenu(\n self.tr(u'&Delft3D FlexibleMesh Toolbox'),\n action)\n self.iface.removeToolBarIcon(action)\n # remove the toolbar\n del self.toolbar", "title": "" }, { "docid": "85d1d5d5da749622db8fbeab118d32e2", "score": "0.492457", "text": "def unload(self):\n for action in self.actions:\n self.iface.removePluginMenu(\n self.tr(u'&Re-route'),\n action)\n self.iface.removeToolBarIcon(action)", "title": "" }, { "docid": "97642814f271ac638660aed13824dc6b", "score": "0.49113765", "text": "def close_scenes(self):\n \n from enthought.mayavi import mlab\n eng = mlab.get_engine()\n try:\n # Uncomment to activate again\n #eng.close_scene(self.scene2d)\n eng.close_scene(self.scene3d)\n \n except Exception:\n pass\n # mlab.close(self.scene3d)", "title": "" }, { "docid": "06e17402e9537793d55df62392114727", "score": "0.49068168", "text": "def unload(self):\n gl.glUseProgram(0)", "title": "" }, { "docid": "63ca3e63b2cdd710d005ec08491a8d33", "score": "0.48996755", "text": "def unload(self):\n self._load_state()\n self._clear_symlinks()\n Term.status(\"Configuration unloaded\")", "title": "" }, { "docid": "94f7f055a89df5726f92a4050e90d0af", "score": "0.48873007", "text": "def unload(self):\n for action in self.actions:\n self.iface.removePluginMenu(\n self.tr(u'&EMUiAdok'),\n action)\n self.iface.removeToolBarIcon(action)\n # remove the toolbar\n del self.toolbar", "title": "" }, { "docid": "e6d645d2dd4ca2392bd4db044db91053", "score": "0.4886454", "text": "def stop():\n\n sounddevice.stop()", "title": "" }, { "docid": "8b2780c57659faa5fbf009c97247bc8b", "score": "0.4864342", "text": "def unload(self):\n for action in self.actions:\n self.iface.removePluginMenu(\n self.tr(u'&Split Line'),\n action)\n self.iface.removeToolBarIcon(action)\n # remove the toolbar\n del self.toolbar", "title": "" }, { "docid": "163225568cd3fde9f9e5232920a23966", "score": "0.48640454", "text": "def close(self):\n for face in self._faces.values():\n face.teardown()\n\n for cube in self._light_cube.values():\n cube.teardown()", "title": "" }, { "docid": "8ce08044c8ad75d5bbf879aa1bbbd156", "score": "0.48611477", "text": "def unload(self):\n _LOGGER.info(\"AirnutSensor Sock close\")\n self._socketServer.shutdown(2)\n self._socketServer.close()", "title": "" }, { "docid": "a302d9a057b0bfcf4b84623ff0867bc5", "score": "0.48553246", "text": "def broken_down(self):\r\n # 1. play music\r\n if self.down_sound:\r\n self.down_sound.play()\r\n # 2. play images\r\n for img in self._destroy_img_list:\r\n self.screen.blit(img, self.rect)\r\n # 3. after exploding\r\n self.active = False", "title": "" }, { "docid": "2c08b6ba3cc608a5fc3676735ded33bb", "score": "0.4854334", "text": "def destroy(self):\n if hasattr(self, \"gfx\"):\n try:\n for gfx in self.gfxlist:\n try:\n gfx.destroy(True)\n except BaseException:\n pass\n except BaseException:\n pass", "title": "" }, { "docid": "2e36135f15a5c0e9d89a72588429f21f", "score": "0.48440236", "text": "def on_unload(self):\n self._background = None\n self._event_finish()", "title": "" }, { "docid": "a592b79390fa8dcc436fa97dca878b9a", "score": "0.4835919", "text": "def update_sfx(cls):\n for resource in cls.sfx:\n resource.track.volume = cls.sfx_volume()", "title": "" }, { "docid": "8fbb2673df8376ac491f2c501585388a", "score": "0.48205408", "text": "def _del_ocio_tex(self):\n for tex, tex_name, sampler_name, tex_type, tex_index \\\n in self._ocio_tex_ids:\n GL.glDeleteTextures([tex])\n del self._ocio_tex_ids[:]", "title": "" }, { "docid": "25d03b9a5124d36c5e2bff2f2d2a88c1", "score": "0.48188356", "text": "def unload(self):\n\n #print \"** UNLOAD aceo\"\n\n for action in self.actions:\n self.iface.removePluginMenu(\n self.tr(u'&AgriCarbon-EO Plugin'),\n action)\n self.iface.removeToolBarIcon(action)\n # remove the toolbar\n del self.toolbar", "title": "" }, { "docid": "b320990c111eddc3b45733d6ccc97b98", "score": "0.4817285", "text": "def unregister():\n\n\tbpy.types.TOPBAR_MT_file_import.remove(menu_draw)\n\tbpy.utils.unregister_class(AutoOp.AutomatOperatorFromTexture)\n\tbpy.utils.unregister_class(AdjOp.AdjustableOperatorFromTexture)", "title": "" }, { "docid": "70b21d12d31b5f61c158c1d1db300ac4", "score": "0.48110357", "text": "def unload(namespace):\n names = dir(soerp) + dir(umath) + [\"fraction\", \"float\", \"pi\", \"e\"]\n for name in names:\n if name[0] != '_':\n try:\n del namespace[name]\n except KeyError:\n pass", "title": "" }, { "docid": "cdaccf50a980697e7e2d79dcebb31a51", "score": "0.48104137", "text": "def __del__(self):\n self.__unload(closing=False)\n #self.exit()", "title": "" }, { "docid": "66c31abf609885a302a91f6b6da52974", "score": "0.48102146", "text": "def unload(self):\n self.unload_parameters()\n self.unload_properties()\n self.unload_schema()\n self.unload_data()\n self.unload_traj()", "title": "" }, { "docid": "be1078da9af7f1f54d5941dfea06540f", "score": "0.48093548", "text": "async def dev_unload(self, ctx, *cogs: str):\n success = []\n for cog in cogs:\n try:\n self.bot.unload_extension(cog)\n success.append(f\"`{cog}`\")\n except Exception as e:\n await ctx.send(f\"{type(e).__name__} - {e}\")\n await ctx.send(f\"Successfully unloaded: {', '.join(success)}\")", "title": "" }, { "docid": "19835563face0f2223165ce4681ad5ed", "score": "0.4802887", "text": "def unload(self):\n if self.__dllhandle is None:\n warning('GCSDll.unload: cannot unload %r', self.__dllpath)\n else:\n self.close()\n # Access to a protected member _handle of a client class pylint: disable=W0212\n if sys.platform in ('win32', 'cygwin'):\n ctypes.windll.kernel32.FreeLibrary(ctypes.c_int(self.__dllhandle._handle))\n else:\n ctypes.cdll.LoadLibrary(self.__dllpath).dlclose(ctypes.c_int(self.__dllhandle._handle))\n self.__dllhandle = None\n debug('GCSDll.unload %r', self.__dllpath)", "title": "" }, { "docid": "660555f86c732d9094f3e392e98c7d08", "score": "0.48025134", "text": "def _handle_wave_clear(self):\n if self._wave == self._level.get_max_wave():\n self._handle_game_over(won=True)\n\n # Task 1.5 (Play Controls): remove this line\n #self.next_wave()", "title": "" }, { "docid": "a93d711195aa0d6f12249475646b3bf6", "score": "0.47932747", "text": "def _handle_wave_clear(self):\n if self._wave == self._level.get_max_wave():\n self._handle_game_over(won=True)\n\n # Task 1.5 (Play Controls): remove this line\n self.next_wave()", "title": "" }, { "docid": "c6fc6ca7539aadc7454d0023a7ae6a41", "score": "0.47927323", "text": "def _handle_wave_clear(self):\n if self._wave == self._level.get_max_wave():\n self._handle_game_over(won=True)", "title": "" }, { "docid": "c6aac676221b927046758b594675937b", "score": "0.47901928", "text": "def unload():\n magic.unload_ipython_extension(get_ipython())", "title": "" }, { "docid": "079399ba8a30fefef65f2cbb24502ee6", "score": "0.47880003", "text": "def unload(self):\n for action in self.actions:\n self.iface.removePluginMenu(\n self.tr(u'&INSPIRE Nederland plugin voor QGIS'), action)\n self.iface.removeToolBarIcon(action)\n # remove the toolbar\n del self.toolbar", "title": "" }, { "docid": "2c27644aa6a412ad112a42d642510784", "score": "0.4778541", "text": "def clearTextures(self):\n GL.glDeleteTextures(1, self._texID)\n GL.glDeleteTextures(1, self._maskID)", "title": "" }, { "docid": "446c9893a261c1d6190109d96612558a", "score": "0.47707987", "text": "def playSFX(filename):\n pass", "title": "" }, { "docid": "27dfc2dbb3dc58dea8f559038e055faa", "score": "0.47581887", "text": "def close(mmc):\r\n print(\"Unloading devices...\"),\r\n mmc.unloadAllDevices()\r\n print(\"Done.\")\r\n return", "title": "" } ]
48f5d542884a82f9b65b9c9cb4555eaa
Force reloading the data from the file. All data in the inmemory dictionary is discarded. This method is called automatically by the constructor, normally you don't need to call it.
[ { "docid": "1b837d3a0ca06c72d167d2ed4f31d5bb", "score": "0.0", "text": "def load(self):\n self._check_open()\n try:\n data = json.load(self.file, **self.load_args)\n except ValueError:\n data = {}\n if not isinstance(data, dict):\n raise ValueError('Root JSON type must be dictionary')\n self.clear()\n self.update(data)", "title": "" } ]
[ { "docid": "c91f69b5199796c9f13475ff65faf51e", "score": "0.7423884", "text": "def reload_file(self):\n self.__load_file()", "title": "" }, { "docid": "37495bab71e58f0c4711c4a8f445cfd9", "score": "0.7374094", "text": "def Reload(self):\n self.LoadFromFile(self.infile_name)", "title": "" }, { "docid": "2e085fde28b9b27cba5c7840b5c05b27", "score": "0.7281882", "text": "def reload(self):\n if exists(self.__file_path):\n with open(self.__file_path, 'r') as f:\n content = f.read()\n if len(content):\n new_dict = json.loads(content)\n for key, value in new_dict.items():\n cls = key.split(\".\")\n self.__objects[key] = self.__names[cls[0]](**value)", "title": "" }, { "docid": "3ffe333882fde7cdb8e8e6daaaccd44d", "score": "0.72724533", "text": "def reload(self):\n if os.path.exists(self.__file_path):\n with open(self.__file_path, mode=\"r\") as save_file:\n load = json.load(save_file)\n for value in load.values():\n create = \"{}(**value)\".format(value[\"__class__\"])\n obj = eval(create)\n self.new(obj)", "title": "" }, { "docid": "fb8a33852d19477b31eeb11a43b9f3f5", "score": "0.72288525", "text": "def reload(self):\n def object_hook(o):\n if '__class__' in o:\n oclass = o['__class__']\n return classes[oclass](**o)\n else:\n return o\n\n try:\n with open(self.__file_path, 'r') as f:\n self.__objects = json.load(f, object_hook=object_hook)\n except FileNotFoundError:\n self.__objects.clear()", "title": "" }, { "docid": "1798048784eb644823a9189c6209d912", "score": "0.7208809", "text": "def reload(self):\n try:\n with open(FileStorage.__file_path, \"r\") as file:\n read = file.read()\n dict_file = {}\n if read != \"\":\n dict_file = json.loads(read)\n\n for key, value in dict_file.items():\n if key not in FileStorage.__objects.keys():\n class_name = value[\"__class__\"]\n new = eval(\"{}(**value)\".format(class_name))\n self.new(new)\n except FileNotFoundError:\n return", "title": "" }, { "docid": "124f1f68dd55ed44c7209420b3ab9c30", "score": "0.7166866", "text": "def reload(self):\n data, names = LoadDictFromProjFile(self.filename)\n self.names = names\n # Need to deep copy the new data file data into the current tree,\n # so that any existing G2Phase, or G2PwdrData objects will still be\n # valid\n _deep_copy_into(from_=data, into=self.data)", "title": "" }, { "docid": "a2b461cf65bcd4331ad436779b61cff3", "score": "0.712053", "text": "def reload(self):\n dictOfdict = {}\n dic_temp = {}\n FileStorage.__file_path = \"file.json\"\n try:\n with open(FileStorage.__file_path, 'r') as f:\n dictOfdict = json.load(f)\n for k, v in dictOfdict.items():\n dic_temp[k] = (eval(v['__class__']))(**v)\n FileStorage.__objects = dic_temp\n except:\n pass", "title": "" }, { "docid": "8b901ec7c063c86057a500457adbcad6", "score": "0.71010995", "text": "def reload(self):\n try:\n with open(self.__file_path, 'r') as f:\n new_obj = json.load(f)\n for key, val in new_obj.items():\n obj = self.class_dict[val['__class__']](**val)\n self.__objects[key] = obj\n except FileNotFoundError:\n pass", "title": "" }, { "docid": "37e9c1d018cb213e53115303b19e9971", "score": "0.7083252", "text": "def reconstitute(self):\n if os.path.exists(self.file):\n try:\n with open(self.file) as fd:\n self.data = pickle.load(fd)\n self.data['id'] = self.id\n except:\n # session is corrupted; keep old data\n pass\n else:\n self.clearData()", "title": "" }, { "docid": "cbfe49160dc0ea369ebdfce37139f30f", "score": "0.70821935", "text": "def reload(self):\n try:\n with open(self.__file_path, \"r\") as file:\n serializable_file = json.load(file)\n for key, value in serializable_file.items():\n self.__objects[key] = eval(value['__class__'])(**value)\n except FileNotFoundError:\n pass", "title": "" }, { "docid": "cb88541af10dce903184fc8512179d02", "score": "0.7068127", "text": "def load(self, datafile):\n\t\tself.datafile = os.path.normpath(datafile)\n\t\treturn self.reload()", "title": "" }, { "docid": "f2b432aef9d9ed3de6568e55bb6e52ff", "score": "0.70678526", "text": "def reload(self):\r\n try:\r\n if os.path.isfile(self.__file_path):\r\n with open(self.__file_path, mode=\"r\", encoding=\"utf-8\") as mfy:\r\n dic_to_dic = json.load(mfy)\r\n for val in dic_to_dic.values():\r\n clsName = val['__class__']\r\n self.new(eval(clsName)(**val))\r\n else:\r\n return\r\n except NoFoundFile:\r\n pass", "title": "" }, { "docid": "98074d8f256c67ab2a6ea28dcf4779ed", "score": "0.6938692", "text": "def reload(self):\n file_path = FileStorage.__file_path\n if os.path.exists(file_path):\n with open(file_path, mode='r', encoding='utf-8') as f:\n temp_objs = json.load(f)\n for k, v in temp_objs.items():\n new_list = k.split(\".\")\n new_obj = self.cls_dict[new_list[0]]\n FileStorage.__objects[k] = new_obj(**v)", "title": "" }, { "docid": "d7f1c3437a3ea12bc0cf9319f7c5f2fd", "score": "0.6883611", "text": "def unpickle(self):\n name_suffix = './googledata/' + self.seed.replace(' ', '_')\n if os.path.isfile(name_suffix):\n with open(name_suffix, 'rb') as f:\n tmp = pickle.load(f)\n self.__dict__.update(tmp)", "title": "" }, { "docid": "81ebc87615aa692a9cf096f18be526a1", "score": "0.6867311", "text": "def reload(self):\n try:\n with open(FileStorage.__file_path, encoding='utf-8') as json_file:\n FileStorage.__objects = json.load(json_file)\n for value in FileStorage.__objects.copy().values():\n classname = value['__class__']\n self.new(eval(classname)(**value))\n except FileNotFoundError:\n pass", "title": "" }, { "docid": "59504b991a709590820d7b6580edb71f", "score": "0.686635", "text": "def reload(self):\n try:\n with open(self.__file_path, 'r', encoding=\"UTF-8\") as f:\n current = json.load(f)\n for k, v in current.items():\n func = \"models.{}\".format(v['__class__'])\n self.__objects[k] = eval(func)(**v)\n except FileNotFoundError:\n pass", "title": "" }, { "docid": "8601ab5654fbf36e9605bd7db6a61210", "score": "0.6775731", "text": "def reload(self):\n try:\n with open(self.__file_path, mode='r', encoding='UTF-8') as f:\n json_objects = json.load(f)\n for k, v in json_objects.items():\n class_name = v['__class__']\n self.__objects[k] = classes[class_name](**v)\n except:\n pass", "title": "" }, { "docid": "7fe82f705e07dce36b86960633ac5791", "score": "0.67635006", "text": "def reload(self):\n try:\n with open(self.__file_path, \"r\") as f:\n json_dict = json.load(f)\n for obj in json_dict.values():\n self.new(eval(obj[\"__class__\"])(**obj))\n except:\n pass", "title": "" }, { "docid": "37a810a4948d137f85458ebf5a22b33a", "score": "0.6755896", "text": "def reload(self):\n raise NotImplementedError()", "title": "" }, { "docid": "25b32b44112ed69b9a2dbc1f41e6dc2d", "score": "0.67181313", "text": "def update(self, filename):\n\n state = cPickle.load(open(filename))\n # Update self.__dict__ from it\n self.__dict__ = state.__dict__", "title": "" }, { "docid": "de7f268af3fedeb6948b2abad6139860", "score": "0.66719", "text": "def reload(self):\n res = self.api._req_file(self.fid)\n data = res['data'][0]\n self.name = data['file_name']\n self.sha = data['sha1']\n self.pickcode = data['pick_code']", "title": "" }, { "docid": "3c9b329d70011a556d9ea388359345fb", "score": "0.6671643", "text": "def reload(self):\n self.read(self.filename)\n # Handle autostart of the application\n self.enable_autostart(self.getboolean('GENERAL', 'autostart'))", "title": "" }, { "docid": "da626ae75a5755a9b0595899000d3717", "score": "0.66295606", "text": "def load(self):\n # Only update values that don't already exist\n with open(self.path, 'r') as f:\n file_config = json.load(f)\n for key, val in file_config.items():\n if key not in self:\n self[key] = val", "title": "" }, { "docid": "4f1a7ac2b88468d893154dbaa273b342", "score": "0.66051906", "text": "def reload(self):\n pass", "title": "" }, { "docid": "5f6589dc98504dcb541ed68996009053", "score": "0.6595129", "text": "def load(self, filename):\n try:\n self.pdict = pickle.load(open(filename,'r'))\n except Exception:\n log('Error loading cache file: %s (possibly corrupt)' % filename)\n self.clear()", "title": "" }, { "docid": "66ed246b6f1b9ac6bd5ef4197775e3b3", "score": "0.6515276", "text": "def reload_world(self, filename: str | None = None) -> None:\n # TODO fix this\n self.__init__(filename) # type: ignore[misc]", "title": "" }, { "docid": "8af07cddf1b435f11048cb1b96a173e9", "score": "0.6500648", "text": "def reload(self):\n self._cache = dict(super().items())", "title": "" }, { "docid": "c9e87ec84e6acab89e84b1ebe990de0f", "score": "0.64847356", "text": "def reload(self):\n try:\n with open(self.__file_path, 'r', encoding='utf-8') as my_file:\n objs = json.load(my_file)\n\n \"\"\"for key, value in objects.items():\n if value['__class__'] == 'BaseModel':\n FileStorage.__objects[key] = User(**value)\"\"\"\n for key in objs:\n self.__objects[key] = (classes[objs[key][\"__class__\"]]\n (**objs[key]))\n\n except FileNotFoundError:\n pass", "title": "" }, { "docid": "86b03fcbbc1ea72ee2f2669663becf24", "score": "0.6471154", "text": "def restore_data(self):\n raise NotImplementedError()", "title": "" }, { "docid": "5d45b249bda43c63c92783e450924113", "score": "0.6445839", "text": "def load(self):\n with open(self.CACHE_DIR + self.file_name, 'r') as data_file: \n self.cache = json.load(data_file)", "title": "" }, { "docid": "ee70d5624d5540945aabc9f7a0c99149", "score": "0.64297104", "text": "def load(self, filename):\n with open(filename, 'rb') as f:\n loaded = pickle.load(f)\n self.__dict__.update(loaded.__dict__)", "title": "" }, { "docid": "9c1032faf2e9fb982bb522871a454570", "score": "0.64231825", "text": "def __init__(self, file: str):\n self.file = file\n self.data = None\n self.injected_objects = set()\n mcpython.engine.event.EventHandler.PUBLIC_EVENT_BUS.subscribe(\n \"command:reload:end\", self.reload\n )", "title": "" }, { "docid": "d8b39c56d601a9136853514e51c860dd", "score": "0.63932", "text": "def load(self, filename):\n f = open(filename, \"rb\")\n loaded = pickle.load(f)\n self.__dict__ = loaded.__dict__", "title": "" }, { "docid": "3ba90541abe3770f8cacb4b37eb40d3b", "score": "0.6386983", "text": "def load(self, filename):\n self.__dict__.update(load_pkl(filename))", "title": "" }, { "docid": "2709ee065de7260ea6a17640decdbc11", "score": "0.6355738", "text": "def load(self,filename):\n f = open(filename,\"rb\")\n loaded = pickle.load(f)\n self.__dict__ = loaded.__dict__", "title": "" }, { "docid": "ed8ebd5274a9e49eb82f7b90e5a41a45", "score": "0.6344929", "text": "def attempt_reload(self, df, name):\n try:\n self.df = pd.read_pickle(self.save_name.format(name=name))\n print(\"data reloaded\")\n except FileNotFoundError:\n self.df = df\n # Cleaning text operations\n self.clean_text()\n # Adding features\n self._save(name)", "title": "" }, { "docid": "35b59c85a0b127014a61a67b87f54efa", "score": "0.63401735", "text": "def load_state(self, filestr):\n print \"Unthawing...\"\n tmp = pickle.load(open(filestr, \"rb\"))\n self.__dict__ = tmp.__dict__\n self.saved_state = True\n return True", "title": "" }, { "docid": "db4d34eaca2ca30a7ca1335fdf2ec9b0", "score": "0.63298917", "text": "def reload(self):\n self._initialized = False\n self._initDetails()", "title": "" }, { "docid": "034b2eb3c80cbc6148bcb221a1907f7e", "score": "0.63268155", "text": "def reload(self):\n from models.base_model import BaseModel\n from models.user import User\n from models.place import Place\n from models.state import State\n from models.city import City\n from models.amenity import Amenity\n from models.review import Review\n\n classes = {\n 'BaseModel': BaseModel, 'User': User, 'Place': Place,\n 'State': State, 'City': City, 'Amenity': Amenity,\n 'Review': Review\n }\n try:\n temp = {}\n with open(FileStorage.__file_path, 'r') as f:\n \"\"\"\n # temp = {'Place.861cb0ed-d4e2-4258-b8cc-e9893d264434':\n # {'__class__': 'Place', 'city_id': '0001',\n # 'created_at': '2020-12-16T17:10:26.302193',\n # 'id': '861cb0ed-d4e2-4258-b8cc-e9893d264434',\n # 'latitude': '37.773972', 'longitude': '-122.431297',\n # 'max_guest': '10', 'name': 'My little house'\n # 'number_bathrooms': '2', ...},\n # 'State.2ce47b59-e40d-463b-a50f-8c127cb6385f'\n # {'_class_':'State', 'created_at':\n # '2020-12-16T17:10:26.298419'\n # 'id': '2ce47b59-e40d-463b-a50f-8c127cb6385f',\n # 'name': 'California',\n # 'updated_at': '2020-12-16T17:10:26.298446'},\n # 'State.af7cff8d-55aa-4206-8487-af4733f701da':\n # {'__class__': 'State',\n # 'created_at': '2020-12-16T17:10:26.298761',\n # 'id': 'af7cff8d-55aa-4206-8487-af4733f701da',\n # 'name': 'Arizona', 'updated_at':\n # '2020-12-16T17:10:26.298777'}}\n \"\"\"\n temp = json.load(f)\n # 'Place.861cb0ed-d4e2-4258-b8cc-e9893d264434'\n for key, val in temp.items():\n self.all()[key] = classes[val['__class__']](**val)\n except FileNotFoundError:\n pass", "title": "" }, { "docid": "2a2e62a5a8d27dcc64a0cf08cf37a481", "score": "0.632547", "text": "def load(self, filename):\r\n\r\n self.print_progress(\"Loading model from \" + filename)\r\n try:\r\n with open(self.save_path + \"/\" + filename, 'rb') as f:\r\n tmp_dict = pickle.load(f)\r\n self.__dict__.update(tmp_dict)\r\n except Exception as e:\r\n print(e)", "title": "" }, { "docid": "788931d3b0a3b95dcec474e78ba15298", "score": "0.63189644", "text": "async def reload(self):\n self.data = await mcpython.engine.ResourceLoader.read_json(self.file)\n for state_instance in self.injected_objects:\n self.inject(state_instance)", "title": "" }, { "docid": "3af3d8d41b03bec5ff97603df51fb3a3", "score": "0.6310249", "text": "def reload(self):\n try:\n with open(self.__file_path, 'r') as burger:\n beer = json.load(burger)\n for malta in beer:\n self.__objects[malta] = classes[beer[malta][\"__class__\"]](\n **beer[malta])\n except:\n pass", "title": "" }, { "docid": "9f6bb44cacc36ca2264eaed7b7ac74d2", "score": "0.6307454", "text": "def reloadFile(self):\n self.__fileheader = []\n self.__filecontents = []\n f = open(self.__filename)\n for line in f:\n if line.startswith('##'):\n self.__fileheader.append(line.strip())\n else:\n self.__filecontents.append(line)\n f.close()\n self._parseHeaders()\n self._compileFunction()", "title": "" }, { "docid": "960dd448e8b3e5d35a5d960a8a3f6d20", "score": "0.62800795", "text": "def _loadCache(self):\n self._fileChecker()\n if self._data is None:\n with open(self._path) as f:\n try:\n self._data = json.load(f)\n except Exception as e:\n self.logger.warning(\"Error while loading TPB cache\\n\\n%s\" % e)\n self._data = {}", "title": "" }, { "docid": "988603a0994690950f55d1bd80947f47", "score": "0.6275009", "text": "def load(self):\n\t\twith open(self.filename, \"r\") as jsonf:\n\t\t\tself.store = json.load(jsonf)", "title": "" }, { "docid": "9f119dccda1b6e1272849649fe0d56a8", "score": "0.6223982", "text": "def load(self, filename):\n # Unpack and assign\n with open(filename, \"rb\") as f:\n my_dict=pickle.load(f)\n self.configuration=my_dict[\"configuration\"]\n self.inputs=my_dict[\"inputs\"] \n self.outputs=my_dict[\"outputs\"] \n self.plot_options=my_dict[\"plot_options\"]\n return", "title": "" }, { "docid": "7162a697d6f87277e2ca5b309d8b3f2f", "score": "0.622048", "text": "def reload(self):\n self.__init__(self.meta[\"path\"])", "title": "" }, { "docid": "e5dbdfa0583fcd4223deab38844498c3", "score": "0.6201626", "text": "def reload(self):\n self.close()\n self.dss=self.load()", "title": "" }, { "docid": "b027d651e84781f5a03a2a00bd36d147", "score": "0.61975604", "text": "def load(self):\n if os.path.exists(self.path):\n self._load()\n self.build_reversed()\n else:\n self._db = {}", "title": "" }, { "docid": "b08f31c29f1ad29eb837866e85cf0f10", "score": "0.6190571", "text": "def from_file(self, file):\n self.__dict__.update(file.__dict__)", "title": "" }, { "docid": "a1baffcf78d5245b01860ad904ee45f5", "score": "0.6189168", "text": "def load_data(self):\n pass", "title": "" }, { "docid": "bd393e4403770992d180bbf71e364860", "score": "0.61838484", "text": "def _load_data(self, open_file):\n raise NotImplementedError", "title": "" }, { "docid": "667d5c5f69e44535558d7ec9bac41c9b", "score": "0.6174091", "text": "def refresh(self):\n self._data_cache = None", "title": "" }, { "docid": "a5e651383e3ec839edebc8fbc192dd75", "score": "0.6169544", "text": "def _readfile(self):\n try:\n with open(self._path, \"r+\") as datafile:\n self._data = json.load(datafile)\n except json.JSONDecodeError:\n # TODO: Improve error handling\n print(\"could not decode JSON\")\n self._data = {}\n except FileNotFoundError:\n open(self._path, 'w')\n self._data = {}", "title": "" }, { "docid": "7e93d3a11ac90b0f68c782c1c8c23084", "score": "0.6166382", "text": "def __init__(self,filen):\n self._data = {}\n self.filename = filen\n self.loadData()", "title": "" }, { "docid": "8ea9ea15afda1bd5e2eec6998a215392", "score": "0.61469823", "text": "def load_data(self):\n return", "title": "" }, { "docid": "6fb48266eb76a7d0eb180bc81e30f975", "score": "0.6139554", "text": "def loadAndSetdata(self):", "title": "" }, { "docid": "102e45f863e78bf46c84d72bc87be936", "score": "0.61298525", "text": "def load_data(self) -> None:\n raise NotImplementedError()", "title": "" }, { "docid": "1170832992db896e766c8f005e610dd7", "score": "0.61230797", "text": "def load(self):\n self.data[self.mode] = self.load_specific(self.mode.lower())", "title": "" }, { "docid": "557db510209ad8959463daa8fad24a7c", "score": "0.61079854", "text": "def reload(self):\r\n dataset = Dataset.find_one(self.dataset_id)\r\n self.record = dataset.record\r\n self.clear_cache()\r\n\r\n return self", "title": "" }, { "docid": "2ca08eac618c32a1540db59b4688b748", "score": "0.610517", "text": "def load(self,filename):\n filename = filename.strip('.pnm')\n if self != {}:\n print('Warning: Loading data onto non-empty controller object, existing data will be lost')\n self.clear()\n self = _pickle.load(open(filename+'.pnm','rb'))", "title": "" }, { "docid": "c6ea4fd8ee242dd4d349c429afc942fb", "score": "0.60894614", "text": "def load_existing(self):\n pass", "title": "" }, { "docid": "3da161513b7ca0690d71b33293e42f1f", "score": "0.60806066", "text": "def reload(self, event=None):\r\n if self.filename != '' and (self.saved or dialogs.warn_changes()):\r\n self.load()\r\n self.first_render = False", "title": "" }, { "docid": "1c16a8d78790efc9201204d4e7524919", "score": "0.6070586", "text": "def rebase_data(self):\n print(\"Rebasing data from %s...\"%DATADR+self.datafile)\n table = pd.read_csv(DATADR+self.datafile, encoding='latin1')\n pickle.dump({'table':table}, open(self.fname,'wb'), -1)\n print(\"Done.\")", "title": "" }, { "docid": "b7ead1a71a6582a2b169e8bf2353ef48", "score": "0.6061289", "text": "def test_reload_empty(self):\n a_storage = FileStorage()\n try:\n os.remove(\"file.json\")\n except Exception:\n pass\n with open(\"file.json\", \"w\") as f:\n f.write(\"{}\")\n with open(\"file.json\", \"r\") as r:\n for line in r:\n self.assertEqual(line, \"{}\")\n self.assertIs(a_storage.reload(), None)", "title": "" }, { "docid": "8ba9225f1b235af39437e979bdcf8feb", "score": "0.6038703", "text": "def load(self, filename):\n\n # New document from file\n vo = _vo.FileDocumentConfigVO(filename)\n doc = self._new_doc(vo)\n\n # Replace\n proxy = DocumentProxy.__instances.pop(self.docname)\n self.data = doc\n DocumentProxy.__instances[doc.name] = self\n\n # Notify\n self.sendNotification(self.DOC_UPDATED, self)", "title": "" }, { "docid": "ee56be72742af3afea1b5ef571b7db06", "score": "0.6035074", "text": "async def reinitialise(self):\n\t\tdecoded = {}\n\t\twith open(self._dest) as file:\n\t\t\tdecoded = json.loads(file.read())\n\n\t\tfor key, val in decoded.items():\n\t\t\tif val[\"timestamp\"] + val[\"duration\"] <= timestamp():\n\t\t\t\tcontinue\n\n\t\t\trestored_poll = await Poll.restore(val)\n\t\t\tif restored_poll is not None:\n\t\t\t\tself[key] = restored_poll", "title": "" }, { "docid": "a97930e27362c315a0833ba36c1c1f63", "score": "0.602019", "text": "def load_existing(self):\n data = JsonIO.read(self._data_file_path)\n if self.__class__.__name__ in data:\n self._set_desc(data[self.__class__.__name__])", "title": "" }, { "docid": "775dcd515143e17fd323392c2c68f21f", "score": "0.6013607", "text": "def load_data():\n pass", "title": "" }, { "docid": "d2f2df43aa369b8809abeffb1f1df211", "score": "0.59979624", "text": "def loadObjectFromFile(self):\n with open(self._boundFile, 'r+') as fd:\n # TODO add error checking (e.g. IOError)?\n tmpDict = json.load(fd)\n self._boundObj = dot_access_dict.DotAccessDict(tmpDict)", "title": "" }, { "docid": "490a5b200ee5561ce1bab1438ee4788b", "score": "0.5987581", "text": "def reload(self, warning=True):\n self.unload()\n self.load()", "title": "" }, { "docid": "14db874fb5e5cc9a3ebabda3ab3d5e94", "score": "0.5980145", "text": "def load(self):\n if os.path.exists(self.location()):\n with open(self.location(), 'r') as f:\n try:\n self.update(json.load(f))\n\n except BaseException:\n traceback.print_exc()\n print('Failed to load json data')", "title": "" }, { "docid": "9853935d04da88bc6f1a23336f9e8b11", "score": "0.596754", "text": "def refreshData(self):\n for k in self.cardData.data.iterkeys():\n c = cards.Card(k)\n c.load()\n if c.loaded:\n self.cardData.data[k] = c\n else:\n print('Unable to load data for ' + k + '.')", "title": "" }, { "docid": "7b6c5c3dc63a3edbc34eec14d5d496fb", "score": "0.596592", "text": "def set_persistent_location(self, file_path):\n self.file_path = file_path\n self.cache = read_cache(file_path)", "title": "" }, { "docid": "44ecaa2826e9050c5ba228511c52b334", "score": "0.5964565", "text": "def load_data(self):\n raise NotImplementedError", "title": "" }, { "docid": "9b51362af4fa34926dd3979d119cd9ce", "score": "0.5964123", "text": "def load_data(self):", "title": "" }, { "docid": "0d8ae517fc55271ecdaf6b78c99cf0f5", "score": "0.5958983", "text": "def load(self, infile):\n self.__dict__ = parse_infernal(infile).all()[0]", "title": "" }, { "docid": "cc32a6aecbb7a1ddc3134221a645c0a4", "score": "0.5955208", "text": "def fromfile(cls, fname):\n self = cls()\n with open(fname, 'r') as f:\n self.readdata(f)\n f.close()\n return self", "title": "" }, { "docid": "31252fe576982410b59d61bfdaaaf399", "score": "0.5943225", "text": "def load_cache_file(self) :\n if not self.enabled :\n return\n\n try:\n with open( self.cache_file_name, \"r\" ) as file:\n self.cache_dict = json.load( file )\n except FileNotFoundError:\n logging.info( F'Cache file \"{self.cache_file_name}\" not found.' )\n except:\n # Disable the cache to prevent overwriting the existing file, so we have a chance to inspect/debug the bad file.\n self.enabled = False\n logging.warning( F'Cache file \"{self.cache_file_name}\" failed to load. Cache will not be used.' )\n else:\n logging.info( F'Loaded cache file \"{self.cache_file_name}\".' )", "title": "" }, { "docid": "8d51dc5e08a28a7c3e0872ee29ae6fb1", "score": "0.5940809", "text": "def _read_file(self):\n self._store = ConfigObj(self._filename)", "title": "" }, { "docid": "8a8d6ad674cf9f3f37429441975d7794", "score": "0.5930181", "text": "def loadFromFile(self):\n\t\ttry:\n\t\t\tconfigFile = open(self.filename)\n\t\t\tsome_dict = pickle.load(configFile)\n\t\t\tself.__dict__.update(some_dict)\n\t\t\tconfigFile.close()\n\t\texcept IOError:\n\t\t\traise UnknownComicSource\n\t\texcept EOFError:\n\t\t\tprint(\"[x] Something went wrong when trying to load comic helper\")", "title": "" }, { "docid": "6b0e15445d99d104b2e2f7cb696d8df1", "score": "0.5918204", "text": "def restore_data(self):\n self.max_temps = pickle.load(open(\"max_temps.p\", \"rb\"))\n self.min_temps = pickle.load(open(\"min_temps.p\", \"rb\"))\n self.rainfall = pickle.load(open(\"rainfall.p\", \"rb\"))", "title": "" }, { "docid": "ec019448e94fc15e3edab9d853b37fd2", "score": "0.5911357", "text": "def load(self):\n raise NotImplemented()", "title": "" }, { "docid": "2590644816302a31d6a2df0d9508b1a1", "score": "0.5906507", "text": "def refresh(self):\n # Read the fields/QC matrix\n if self.path is not None:\n self.matrix = pd.read_csv(self.path)\n self.extended = []", "title": "" }, { "docid": "4b8d56f6e288c9f1cc07b9cdb0e24481", "score": "0.5906372", "text": "def Reload(self, jsondata):\r\n\t\tself.__dict__ = json.loads(jsondata, encoding=\"utf-8\")", "title": "" }, { "docid": "d14b1b625ff6ba8213fbe738fe1993eb", "score": "0.59034103", "text": "def load(self) -> None:\n self.reset()\n list(self)", "title": "" }, { "docid": "902c276d2ecefd8702e0478aab960b0e", "score": "0.5877219", "text": "def reload_mappings(self):\n with open(self.mappings_path, 'rb') as f:\n mappings = pickle.load(f)\n self.id_to_word = mappings['id_to_word']\n self.id_to_char = mappings['id_to_char']\n self.id_to_tag = mappings['id_to_tag']", "title": "" }, { "docid": "fff8a4f5ac9d465400e48ba1f206c11e", "score": "0.5876413", "text": "def load_file(self):\n\n try:\n with open(self.filename, 'rb') as f:\n sand_dict = pickle.load(f)\n self.n_iterations = sand_dict[\"n_iterations\"]\n self.sand_array = sand_dict[\"sand_array\"]\n self.topple_data = sand_dict[\"topple_data\"]\n except:\n print(f\"File {self.filename} couldn't be loaded. Starting new simulation.\")", "title": "" }, { "docid": "63cbe316d31c23a5be4d1d645c262400", "score": "0.5875463", "text": "def refresh(self):\n self.data = self.data", "title": "" }, { "docid": "59200108f8c58388efe692dca1ea7e35", "score": "0.5868069", "text": "def load(self, fd):\n self.__dict__.update(self.parse(fd))", "title": "" }, { "docid": "7c2d93acd7db6a7d998b875eab5a477f", "score": "0.58673126", "text": "async def load(self):\n if not self.filename:\n return\n\n if inspect.iscoroutinefunction(self._load_func):\n data = await self._load_func(self.filename, self._mode)\n else:\n data = await asyncio.get_event_loop().run_in_executor(\n None, partial(self._load_func, self.filename, self._mode))\n\n if data is not None:\n self._dict.update(self._serializer.loads(data).items())", "title": "" }, { "docid": "2122aa6139bcf22a3829615984409e73", "score": "0.5858796", "text": "def load(self):\n super(VTIFileStore, self).load()\n with open(self.__dbfilename, mode=\"rb\") as file:\n info_json = json.load(file)\n self._set_parameter_list(info_json['arguments'])\n self.metadata = info_json['metadata']", "title": "" }, { "docid": "2d9d818a1b2690454d59c884ef7f49d6", "score": "0.58587855", "text": "def loadFile(self):\n with open(self.filePath, \"rb\") as file:\n self.stateList = pickle.load(file)", "title": "" }, { "docid": "ef5e1d31c64bb698c51551ff9f4810f9", "score": "0.5851187", "text": "def load(cls, filename, **kwargs):\n filename = Path(filename).with_suffix('.pickle')\n\n obj = cls(**kwargs)\n try:\n with filename.open('rb') as ff:\n tmp_dict = pickle.load(ff)\n except Exception:\n with filename.open('rb') as ff:\n tmp_dict = dill.load(ff)\n\n obj.__dict__.clear()\n obj.__dict__.update(tmp_dict)\n return obj", "title": "" }, { "docid": "1c0c0b2cd4fb547f3d8e3d181ec06dee", "score": "0.58477867", "text": "def reload_data_dict(self):\n\n with open(self.specfile, \"r\") as f:\n specifications = yaml.load(f, Loader=Loader)\n self.validators = {}\n self.column_rules = {}\n self.df_rules = {}\n logger.info(\"Reloading project information.\")\n for name, specs in specifications.iteritems():\n logger.info(\"Schema for dataset {0}:\".format(name))\n logger.info(json.dumps(specs, cls=TypeEncoder))\n is_pickled = specs.get('pickle', False)\n self.validators[name] = SchemaValidator(specification=specs,\n specfile=self.specfile,\n name=name,\n is_pickled=is_pickled)\n self.column_rules[name] = specs.get('column_rules', {})\n self.df_rules[name] = specs.get('dataframe_rules', {})\n self.specifications = specifications", "title": "" }, { "docid": "a399f7f9962f7ec0f660d9503b6a5ef7", "score": "0.5844366", "text": "def load(self):\n self.read(self.filename)", "title": "" }, { "docid": "1d3e47491ad7ef5313c93cafe4f9c2f7", "score": "0.58387464", "text": "def update(self):\n\n try:\n # update the file\n FileManager.write_in(self.path,\n json.dumps(self.data, indent=2),\n mode='w')\n\n # update path\n self.paths = self._get('paths')\n\n self.comparator = self._get('comparator')\n self.placeholders = self._get('placeholders')\n\n self.max_stc_len = self._get(\"max_stc_len\")\n\n # Vocal settings\n self.tts = self._get('tts')\n self.stt = self._get('stt')\n # Load voice engine\n #self.voice_engine = VoiceEngine(self)\n\n self.wakers = [\n waker.lower() for waker in self._get('wakers', \"Iris\")\n ]\n\n # name/class conversion table\n self.tasks = self._get('tasks')\n\n except IOError as e:\n ConsolePrinter.print_error(\"SettingsLoader: I/O error(%s): %s\" +\n e.errno + e.strerror)", "title": "" }, { "docid": "edbee065d0458794d0c5cd6d729a8238", "score": "0.5835373", "text": "def reload_mappings(self):\n with open(self.mappings_path, 'rb') as f:\n mappings = cPickle.load(f)\n self.id_to_word = mappings['id_to_word']\n self.id_to_char = mappings['id_to_char']\n self.id_to_tag = mappings['id_to_tag']\n self.id_to_morpho_tag = mappings['id_to_morpho_tag']", "title": "" }, { "docid": "aa9b71fb5686280e1ea621ee5ac05449", "score": "0.5833663", "text": "def load(self,filename):\n try:\n file_handle = open(filename, 'rb')\n stored_values = pickle.load(file_handle)\n self.update(stored_values[0])\n self.ignorechars = stored_values[1]\n self.ignorecase = stored_values[2]\n except Exception as e:\n print \"Unable to load freq file :\"+str(e)", "title": "" }, { "docid": "08ea71ffa610935dc84711f7b9407333", "score": "0.58319217", "text": "def load(self, data, record_cls):\n data.pop(self.key, None)", "title": "" } ]
926a0f55b8770271cc5a1ea38bafe2bb
Create a editor account
[ { "docid": "cd846220b2f381b50036779188cc176d", "score": "0.0", "text": "def createsuperuser(username, password):\r\n u = User(username=username, password=password,confirmed=True)\r\n u.role = Role.query.filter_by(name='Admin').first()\r\n db.session.add(u)\r\n db.session.commit()\r\n click.echo('Done')", "title": "" } ]
[ { "docid": "2fbceed6628b4873f1d3e2e219f3fe5a", "score": "0.7483424", "text": "def createeditor(username, password):\r\n u = User(username=username, password=password)\r\n u.role = Role.query.filter_by(name='Editor').first()\r\n db.session.add(u)\r\n db.session.commit()\r\n click.echo('Done')", "title": "" }, { "docid": "f4e1f2bb265e8195a030765775fc0e86", "score": "0.7000179", "text": "def create_account(self, name):\n pass", "title": "" }, { "docid": "b74540129fb81da8ca7d24200b70b58b", "score": "0.69745505", "text": "def create_account():\n\n return render_template('create_account.html')", "title": "" }, { "docid": "2b3119e97b182f2eafddbeca6113b55b", "score": "0.6920073", "text": "def create_account():\n return render_template('create_account.html')", "title": "" }, { "docid": "86d684e3ce4e4e6f996d5d12afa76511", "score": "0.66343766", "text": "def create_account(self, account, name):\n\n raise PrivilegeError(\"Action not allowed in mode atm\")", "title": "" }, { "docid": "6a84e1936953684a553dc61189a16d57", "score": "0.640089", "text": "def process_create_account(self, cmd):\n account = cmd[1]\n name = cmd[2]\n\n self.mode.create_account(account, name) \n\n Logger.info(\"Create account complete: name {} account number {}\".format(name, account))", "title": "" }, { "docid": "c1eb19dbcaccb9e45a5e78c6d22014ea", "score": "0.6324654", "text": "def account():\n form = NewAccountForm()\n form.configure_parser(exclude=['name'])\n\n parser.add_argument(\n 'name',\n nargs=1,\n help='name for the new account',\n )\n parser.add_argument(\n '-p',\n '--password',\n action='store_true',\n default=False,\n help='set the password for the new account (prompts for value)',\n )\n\n yield\n\n args.name = args.name[0]\n if not form.bind_and_validate():\n msg = 'error: there were validation error(s) with input value(s)'\n print(msg, file=sys.stderr)\n form.print_errors()\n yield VALIDATION_ERROR\n\n account = form.create_account()\n\n if args.password:\n while True:\n password = getpass.getpass('Password: ')\n confirm = getpass.getpass('Confirm: ')\n if password == confirm:\n break\n print('error: passwords did not match\\n', file=sys.stderr)\n g.db.commit()\n g.db.add(Password(account_id=account.id, value=password))\n\n g.commit_and_save()", "title": "" }, { "docid": "89504edeacb8f9407eb12390f24407d1", "score": "0.631209", "text": "def post(self):\n return CreateNewUserAccount(request)", "title": "" }, { "docid": "e8ff8e45944ab936452af26a1e71386f", "score": "0.6255274", "text": "def new_account():\n return render_template('new-account.html')", "title": "" }, { "docid": "83182e5068ee3ec75daa3604f44d39a9", "score": "0.6210622", "text": "def test_create_account(self):\n pass", "title": "" }, { "docid": "0d6961841e3d53c5ccfb523e132d21a7", "score": "0.6185477", "text": "def create_admin_user(ctx):", "title": "" }, { "docid": "ca80ad8c48adb69964726728d8c4ea87", "score": "0.6180416", "text": "def create_account(self, values):\n raise NotImplementedError", "title": "" }, { "docid": "95c742b301d353d1a2849d94c1ca233c", "score": "0.6172014", "text": "def create_account(self):\n code = self.request.POST.get(\"code\") or None\n username = self.request.POST.get(\"username\") or None\n email = self.request.POST.get(\"email\") or None\n password = self.request.POST.get(\"password\") or None\n language = self.request.POST.get(\"language\").upper() or 'EN'\n newsletter = self.request.POST.get(\"newsletter\") or False\n eula = self.request.POST.get(\"eula\") or False\n ip = self.request.client_addr\n beta_create = None\n\n if (code and email and password):\n beta_create = auth.call('beta_create', code, username, email,\n password, language, newsletter, eula,\n ip)\n else:\n return {'status': 'failure', 'reason': 'incomplete'}\n \n if (beta_create['result'] == 'ok'):\n auth_data = str(beta_create['uid'])+\"|\"+\\\n str(beta_create['token']+\"|0|0\")\n headers = remember(self.request, auth_data)\n self.request.response.headerlist.extend(headers)\n return {'status': 'success'}\n else:\n return {'status': 'failure', 'reason': beta_create['message']}", "title": "" }, { "docid": "7bd503cda8f21bf7b74c5ec66a701513", "score": "0.6114838", "text": "def user_reg_creator(self, name):\n password = '2'\n user = User.objects.create_user(username=name, email=f'{name}@gmail.com', password=password)\n self.stdout.write(f'User {name} with creator permissions registered with password: {password}')\n creator = Permission.objects.get(codename='creator')\n user.user_permissions.add(creator)\n\n project_props = {\n 'owner': user,\n 'name': f'Desert',\n 'icon_chars': 'Pr',\n 'lang_orig_id': 75,\n }\n project = Project.objects.create(**project_props)\n project.translate_to.set([15, 18, 22])\n Folder.objects.create(project=project, name='Folder1', position=1)\n Folder.objects.create(project=project, name='Folder2', position=2)", "title": "" }, { "docid": "4f5b59f0438db3db695f0fe9e928fa3d", "score": "0.607093", "text": "def create(ctx, **options):\n _client = get_client(ctx)\n try:\n resp = _client.api.add_user(**options)\n cli_print_output(\"text\", resp)\n except (EvengHTTPError, EvengApiError) as err:\n console.print_error(err)", "title": "" }, { "docid": "a3b6f01d00fbe93fb0159711da78daae", "score": "0.6060293", "text": "def create_account(self,\n account_name,\n json_meta={},\n creator=None,\n owner_key=None,\n active_key=None,\n posting_key=None,\n memo_key=None,\n password=None,\n additional_owner_keys=[],\n additional_active_keys=[],\n additional_posting_keys=[],\n additional_owner_accounts=[],\n additional_active_accounts=[],\n additional_posting_accounts=[],\n storekeys=True,\n ):\n assert len(account_name) <= 16, \"Account name must be at most 16 chars long\"\n\n if not creator:\n raise ValueError(\n \"Not creator account given. Define it with \" +\n \"creator=x, or set the default_author using piston\")\n if password and (owner_key or posting_key or active_key or memo_key):\n raise ValueError(\n \"You cannot use 'password' AND provide keys!\"\n )\n\n account = None\n try:\n account = Account(account_name)\n except:\n pass\n if account:\n raise AccountExistsException\n\n \" Generate new keys from password\"\n from pistonbase.account import PasswordKey, PublicKey\n if password:\n posting_key = PasswordKey(account_name, password, role=\"posting\")\n active_key = PasswordKey(account_name, password, role=\"active\")\n owner_key = PasswordKey(account_name, password, role=\"owner\")\n memo_key = PasswordKey(account_name, password, role=\"memo\")\n posting_pubkey = posting_key.get_public_key()\n active_pubkey = active_key.get_public_key()\n owner_pubkey = owner_key.get_public_key()\n memo_pubkey = memo_key.get_public_key()\n posting_privkey = posting_key.get_private_key()\n active_privkey = active_key.get_private_key()\n # owner_privkey = owner_key.get_private_key()\n memo_privkey = memo_key.get_private_key()\n # store private keys\n if storekeys:\n # self.wallet.addPrivateKey(owner_privkey)\n self.wallet.addPrivateKey(active_privkey)\n self.wallet.addPrivateKey(posting_privkey)\n self.wallet.addPrivateKey(memo_privkey)\n elif (owner_key and posting_key and active_key and memo_key):\n posting_pubkey = PublicKey(posting_key, prefix=self.rpc.chain_params[\"prefix\"])\n active_pubkey = PublicKey(active_key, prefix=self.rpc.chain_params[\"prefix\"])\n owner_pubkey = PublicKey(owner_key, prefix=self.rpc.chain_params[\"prefix\"])\n memo_pubkey = PublicKey(memo_key, prefix=self.rpc.chain_params[\"prefix\"])\n else:\n raise ValueError(\n \"Call incomplete! Provide either a password or public keys!\"\n )\n\n owner = format(owner_pubkey, self.rpc.chain_params[\"prefix\"])\n active = format(active_pubkey, self.rpc.chain_params[\"prefix\"])\n posting = format(posting_pubkey, self.rpc.chain_params[\"prefix\"])\n memo = format(memo_pubkey, self.rpc.chain_params[\"prefix\"])\n\n owner_key_authority = [[owner, 1]]\n active_key_authority = [[active, 1]]\n posting_key_authority = [[posting, 1]]\n owner_accounts_authority = []\n active_accounts_authority = []\n posting_accounts_authority = []\n\n # additional authorities\n for k in additional_owner_keys:\n owner_key_authority.append([k, 1])\n for k in additional_active_keys:\n active_key_authority.append([k, 1])\n for k in additional_posting_keys:\n posting_key_authority.append([k, 1])\n\n for k in additional_owner_accounts:\n owner_accounts_authority.append([k, 1])\n for k in additional_active_accounts:\n active_accounts_authority.append([k, 1])\n for k in additional_posting_accounts:\n posting_accounts_authority.append([k, 1])\n\n props = self.rpc.get_chain_properties()\n fee = props[\"account_creation_fee\"]\n s = {'creator': creator,\n 'fee': fee,\n 'json_metadata': json_meta,\n 'memo_key': memo,\n 'new_account_name': account_name,\n 'owner': {'account_auths': owner_accounts_authority,\n 'key_auths': owner_key_authority,\n 'weight_threshold': 1},\n 'active': {'account_auths': active_accounts_authority,\n 'key_auths': active_key_authority,\n 'weight_threshold': 1},\n 'posting': {'account_auths': posting_accounts_authority,\n 'key_auths': posting_key_authority,\n 'weight_threshold': 1},\n 'prefix': self.rpc.chain_params[\"prefix\"]}\n\n op = operations.Account_create(**s)\n\n return self.finalizeOp(op, creator, \"active\")", "title": "" }, { "docid": "b1c25cb3559c890800065baa4f7bfae4", "score": "0.60153717", "text": "def create_account(sender, instance, created, **kwargs):\n if created:\n Profile.objects.create(owner=instance)", "title": "" }, { "docid": "cea376f51df25f4269192e66e760da90", "score": "0.60063565", "text": "def create_account():\n account = yield defer.maybeDeferred(Account.create)\n log.msg(\"Created account with address=%s\" % account.address)", "title": "" }, { "docid": "7fa14f761e361ff028c26d99bd9528bc", "score": "0.600149", "text": "def display_create_account_form():\n \n return render_template('create-account.html')", "title": "" }, { "docid": "a1610ecd83c2498b7e5f1a0677e1bd22", "score": "0.59808975", "text": "def create_account(domain, name, public_key):\n # 1. Create account\n tx = iroha_config.IROHA_ADMIN.transaction(\n [iroha_config.IROHA_ADMIN.command('CreateAccount',\n account_name=name,\n domain_id=domain,\n public_key=public_key)])\n IrohaCrypto.sign_transaction(tx, iroha_config.ADMIN_PRIVATE_KEY)\n send_transaction_and_print_status(tx)", "title": "" }, { "docid": "b7ec91841520d6387e3d75df2efced4a", "score": "0.59769624", "text": "def user_create():\n pass", "title": "" }, { "docid": "8f9cbfbb8cb0580a4190a8a043b204e1", "score": "0.5964106", "text": "def create(ctx, name, company, email, position):\n pass", "title": "" }, { "docid": "a47abe4d1d4ec5e0ae1a2fd0e22400a0", "score": "0.5944036", "text": "def create(self,request):\n return ObtainAuthToken().post(request)", "title": "" }, { "docid": "a47abe4d1d4ec5e0ae1a2fd0e22400a0", "score": "0.5944036", "text": "def create(self,request):\n return ObtainAuthToken().post(request)", "title": "" }, { "docid": "938ed6a77beb7ada6f73e377d084f3be", "score": "0.59421414", "text": "def create_account(request):\n\n\t# Don't allow logged in users to acces login_view\n\tif request.user.is_authenticated():\n\t\treturn HttpResponseRedirect(reverse('profile'))\n\n\tform = UserCreationForm(request.POST or None)\n\n\tif request.POST and form.is_valid():\n\t\tform.save()\n\t\tmessages.info(request, \"You're signed up! Use the login form below to get started.\" )\n\t\treturn HttpResponseRedirect(reverse('login'))\n\treturn render(request, 'create_account.html', {\n\t\t'form': form,\n\t})", "title": "" }, { "docid": "cb5a6b819779b3d056440bccfc7c9f1c", "score": "0.5924597", "text": "def newaccount(self,walletname,info):\n\n data = {\n \"apiKey\" : self.key,\n \"walletName\" : walletname,\n \"info\" : info\n }\n\n url = \"/auth/newaccount\"\n\n return self.process(url,data)", "title": "" }, { "docid": "be253de1015f20aa2db9667433a60be1", "score": "0.5919441", "text": "def make_account_cmd(\n username: str,\n password: str,\n admin: bool = False,\n adminorg: bool = False,\n org: str = None,\n) -> None:\n orgs = [org] or []\n aorgs = [adminorg] if adminorg else []\n\n salt = bcrypt.gensalt()\n pwd = bcrypt.hashpw(password.encode(\"utf-8\"), salt).decode(\"ascii\")\n doc = {\n \"email\": username,\n \"password\": pwd,\n \"displayName\": username,\n \"organisations\": orgs,\n \"ownerships\": aorgs,\n \"defaultOrganisation\": None, # Default org for user\n \"verified\": True, # Account verified via email?\n \"userlevel\": \"admin\" if admin else \"user\",\n }\n db = ESDatabase()\n db.create_index(doc_type=\"useraccount\", id_=username, body=doc)\n print(\"Account created!\")", "title": "" }, { "docid": "db07d4ad7ba60547629deb406c342adb", "score": "0.5906373", "text": "def admin_create_account(self, account_name, card_id, amount):\n\n return self.modify('INSERT INTO cards(account_name, card_id, balance) \\\n values (?, ?, ?);', (account_name, card_id, amount,))", "title": "" }, { "docid": "5a44e8b637de56ad42e4a8cf3bd3fffd", "score": "0.59060043", "text": "def create_account(self, name: str, contact_info: Dict[str, Any], payment_info: Dict[str, Any],\n **kwargs) -> any:", "title": "" }, { "docid": "525d9f8e104a8f1ec348c389f79380ba", "score": "0.59018874", "text": "def test_create_swift_account(self):\n pass", "title": "" }, { "docid": "d93da30fcd080c31d68bff466b490a3a", "score": "0.59000576", "text": "def create_account():\n print(\"Please choose one kind of card\\n\"\n \"[1]: Card\\n\"\n \"[2]: CreditCard\\n\")\n kind = input()\n if 1 == int(kind):\n card = Card()\n elif 2 == int(kind):\n card = CreditCard()\n print(\"Please set credit limit(between 10,000 to 100,000):\")\n card.set_creditLimit(int(input()))\n else:\n print(\"Please input true value\")\n return\n print(\"Please enter your name:\\n\")\n card.set_owner(str(input()))\n print(\"Please enter your person ID:\\n\")\n card.set_personID(str(input()))\n print(\"Please enter your phone number:\\n\")\n card.set_phoneNumber(str(input()))\n print(\"Please set your password:\\n\")\n card.set_password(str(input()))\n add_card(card)", "title": "" }, { "docid": "4bcc0315c6e77e4e70e9a1016b6a6fea", "score": "0.5893448", "text": "def create_account(details):\n\n createtoken = fetch_create_token()\n\n response = S.post(url=API_ENDPOINT, data={\n 'action': 'createaccount',\n 'createtoken': createtoken,\n 'username': details['name'],\n 'password': details['password'],\n 'retype': details['retype'],\n 'email': details['email'],\n 'createreturnurl': 'http://127.0.0.1:5000/',\n 'captchaId': details['captcha_id'],\n 'captchaWord': details['captcha_word'],\n 'format': 'json',\n })\n\n data = response.json()\n createaccount = data['createaccount']\n\n if createaccount['status'] == \"PASS\":\n flash(\n 'Success! An account with username ' +\n details['name'] +\n ' has been created!')\n else:\n flash(\n 'Oops! Something went wrong -- ' +\n createaccount['messagecode'] +\n \".\" +\n createaccount['message'])", "title": "" }, { "docid": "95010f0ff3c2149ccc0a19a984f13d39", "score": "0.58373845", "text": "def create_account(self):\n name = names.get_full_name()\n r_first_name = name.split(' ')[0]\n r_last_name = name.split(' ')[1]\n r_email = r_first_name[0] + r_last_name + str(random.randrange(99)) + self.settings['catchall_email_suffix']\n r_email = r_email.lower()\n r_password = self.settings['password_prefix'] + str(random.randrange(99))\n # set default names\n self.first_name = r_first_name\n self.last_name = r_last_name\n self.email = r_email\n self.password = r_password\n # create the account\n self.log('pass: {}'.format(r_password))\n self.log('creating account - F: {} L: {}'.format(r_first_name, r_last_name))\n self.log('getting auth token from login page')\n try:\n r = self.s.get(\n url='https://hotwheelscollectors.mattel.com/webapp/wcs/stores/servlet/AjaxLogonView',\n params={\n 'catalogId': self.settings['catalog_id'],\n 'storeId': self.settings['store_id']\n }\n )\n except requests.exceptions.ConnectionError:\n self.log('unable to reach server while getting auth token from login page')\n return False\n try:\n r.raise_for_status()\n except requests.exceptions.HTTPError:\n self.log('bad status {} while getting auth token from login page'.format(r.status_code))\n return False\n try:\n auth_token = re.findall('<input type=\"hidden\" name=\"authToken\" value=\"(.*)\"', r.text)[0]\n except IndexError:\n self.log('unable to find auth token in page source')\n return False\n try:\n r = self.s.post(\n url='https://hotwheelscollectors.mattel.com/shop/UserRegistrationAdd',\n data={\n 'authToken': auth_token,\n 'myAcctMain': '1',\n 'new': 'Y',\n 'storeId': self.settings['store_id'],\n 'catalogId': self.settings['catalog_id'],\n 'rememberMe': 'true',\n 'sourceName': 'WebCreateAccount',\n 'URL': 'AjaxLogonForm?logonId*=&firstName*=&lastName*=&address1*=&address2*=&city*=&country'\n '*=&state*=&zipCode*=&email1*=&phone1*=&register_type=user',\n 'URLOrg': 'AjaxLogonForm?usr_logonId*=&usr_firstName*=&usr_lastName*=&usr_address1*=&usr_address2'\n '*=&usr_city*=&usr_country*=&usr_state*=&usr_zipCode*=&usr_email1*=&usr_phone1'\n '*=&org_orgEntityName*=&org_address1*=&org_address2*=&org_city*=&org_country'\n '*=&org_state*=&org_zipCode*=&org_email1*=&org_phone1*=&register_type=organization',\n 'receiveSMSNotification': 'false',\n 'receiveSMS': 'false',\n 'errorViewName': 'AjaxLogonView',\n 'page': 'account',\n 'registerType': 'G',\n 'primary': 'true',\n 'isBuyerUser': 'true',\n 'demographicField5': 'on',\n 'challengeQuestion': '-',\n 'challengeAnswer': '-',\n 'usr_profileType': 'B',\n 'addressType': 'PARTY',\n 'receiveEmail': 'false',\n 'AddressForm_FieldsOrderByLocale': 'first_name,LAST_NAME,EMAIL1_HIDDEN',\n 'firstName': r_first_name,\n 'lastName': r_last_name,\n 'email1': r_email,\n 'logonIdDisplay': r_email,\n 'logonId': '{}|{}'.format(self.settings['store_id'], r_email),\n 'logonIdVerifyDisplay': r_email,\n 'logonIdVerify': '{}|{}'.format(self.settings['store_id'], r_email),\n 'logonPassword': r_password,\n 'logonPasswordVerify': r_password,\n 'user_name': r_email.split('@')[0].lower(),\n 'userName': '', # left blank\n }\n )\n except requests.exceptions.ConnectionError:\n self.log('unable to reach server')\n return False\n try:\n r.raise_for_status()\n except requests.exceptions.HTTPError:\n self.log('bad status code {} while creating account'.format(r.status_code))\n return False\n self.log('generated account: {}:{}'.format(r_email, r_password))\n self.write_account_to_file(r_email, r_password)\n return True", "title": "" }, { "docid": "845c1f77a69c6346f3791564629ddc05", "score": "0.5832486", "text": "def create_new_user(context):\n print(\"I am creating a new user\")\n print(\":) :) :) :) :) :)\")\n print(\"More code would go here\")\n # user.user_creator()\n prefix = context.config.userdata.get('prefix')\n\n pdb.set_trace()", "title": "" }, { "docid": "a7ff368da37d9cb791d5a02bce166a0d", "score": "0.5813963", "text": "def account_create(self):\n user = Session.query(Users).get(session['REMOTE_USER'])\n if user.logname:\n h.flash.set_message(u'You have a login', u'error')\n h.redirect('/users/index')\n return render('/access/account_create.mako')", "title": "" }, { "docid": "908687d15c3324065a2c18ffcb21fed1", "score": "0.5813328", "text": "def make_id(self):\n print(\"<< Making a new account... >>\")\n id = input(\"Please type in an id: \")\n password = input(\"Please type in a password: \")\n\n self.id = id\n self.password = password\n\n print(\"Your account has successfully been created.\\n\")", "title": "" }, { "docid": "c21519207092b722254d33111373b7de", "score": "0.57809645", "text": "def create_admin(self):\n self.elena = Client()\n user = User.objects.create_superuser(\n email='[email protected]',\n password='test',\n )\n self.elena.force_login(user)", "title": "" }, { "docid": "c40661dc617572feba289b687098d62c", "score": "0.57683194", "text": "def create_user(self, user_id: str, user_name: str) -> None:", "title": "" }, { "docid": "4481599eba181b1df0c803c1a02fc129", "score": "0.576256", "text": "def create_new_user(self, button):\n user_id = self.builder.get_object(\"login_id\")\n user_id_text = user_id.get_text()\n user_password = self.builder.get_object(\"login_password\")\n user_password_text = user_password.get_text()\n self.user_id = int(user_id_text)\n self.user_password = user_password_text\n if user_id_text and user_password_text:\n private_key_owner, public_key_owner = rsa_key_generation()\n private_key_owner_str = private_key_owner.save_pkcs1().decode('ascii')\n public_key_owner_str = public_key_owner.save_pkcs1().decode('ascii')\n save_user_to_db(self.user_id, public_key_owner_str, private_key_owner_str, self.user_password)\n self.user_private_key = private_key_owner\n self.user_public_key = public_key_owner\n print(user_id_text, public_key_owner)\n print('Calling API to create user')\n try:\n create_user(self.user_id, self.user_public_key, self.user_password)\n except (URLError, HTTPError) as e:\n pass\n self.login_window.hide()\n self.logged()", "title": "" }, { "docid": "3ba90692afede26858007d8f26f96eca", "score": "0.57568115", "text": "def create_an_account():\n\n firstname = request.form.get('firstname')\n lastname = request.form.get('lastname')\n email = request.form.get('email')\n password = request.form.get('password')\n\n user = crud.get_user_by_email(email)\n\n if user:\n # flash('Sorry. This login email already exists. Please try a different email address to register, or login to your exisiting account.')\n return ('Sorry. This login email already exists. Please try a different email address to register, or login to your exisiting account.')\n else:\n user = crud.create_user(firstname, lastname, email, password)\n return ('Account succesfully created. Please proceed and log in to your account.')", "title": "" }, { "docid": "53e2b0223a9e61926e1666dfe5151693", "score": "0.57273966", "text": "async def create(self, ctx: commands.Context):", "title": "" }, { "docid": "9796eebfb6c71e38ed111ac9185e0774", "score": "0.57241696", "text": "def signupeditor():\n form = SignupForm()\n # validate if the user filled out the form correctly\n # validate_on_submit is a built-in method\n if form.validate_on_submit():\n # make sure it's not an existing user\n existing_user = User.query.filter_by(email=form.email.data).first()\n if existing_user is None:\n # create a new user\n user = User(\n name=form.name.data,\n email=form.email.data,\n organization=form.organization.data,\n user_type='editor',\n user_status='pending'\n )\n # use our set_password method\n user.set_password(form.password.data)\n # commit our new user record and log the user in\n db.session.add(user)\n db.session.commit() # Create new user\n\n # new user now has a type, extract and send to permissions signal\n # use identity_changed to send signal to flask_principal showing identity, user_type\n identity_changed.send(current_app._get_current_object(), identity=Identity(user.id,user.user_type))\n # placing identity_object into variable for print/display\n identity_object = Identity(user.id,user.user_type)\n # printing identity_object to console for verification\n print('Sent: ',identity_object,' ...to current_app', file=sys.stderr)\n\n login_user(user, remember=False, duration=None, force=False, fresh=True)\n # if everything goes well, they will be redirected to the main application\n return redirect(url_for('editor_bp.dashboard_editor'))\n flash('A user already exists with that email address.')\n return render_template(\n 'signup_editor.jinja2',\n title='Create an Editor Account.',\n form=form,\n template='signup-page',\n body=\"Sign up for a Sponsor account.\"\n )", "title": "" }, { "docid": "93385e5d2c420f9d8a5300c2f3bc9648", "score": "0.57104653", "text": "def create_account(self):\n # Sanity check - account name\n if self.name == \"\":\n return {'status': False, 'msg': 'No account name specified'}\n\n # check that account name is not already used\n account = Account.query.filter(Account.name == self.name).first()\n if account:\n return {'status': False, 'msg': 'Account already exists'}\n\n db.session.add(self)\n db.session.commit()\n return {'status': True, 'msg': 'Account created successfully'}", "title": "" }, { "docid": "d8451ec9f375233a2aa6d17e2859e7c7", "score": "0.5709214", "text": "def register_user():\n return render_template('create_account_form.html')", "title": "" }, { "docid": "b41e8a7e40d933187a1f74b29e883ef8", "score": "0.5707474", "text": "def test_create_user(self):\n\n self.browser.get(self.url(''))\n self.assert_at_login_page()\n\n self.do_create_user()\n\n self.assert_at_account_home()", "title": "" }, { "docid": "6fefb67ff9914fbdaf64b03a5ac3c2f9", "score": "0.570385", "text": "def account():\n global lakctx\n portfolio = lakctx.get_portfolio()\n\n account = edit_and_parse(None, lakshmi.Account.from_dict, 'Account.yaml')\n portfolio.add_account(account)\n lakctx.save_portfolio()", "title": "" }, { "docid": "7b5937e443faa91b7a02c4ac6ef81105", "score": "0.5701974", "text": "def create_account_in_cloudcheckr(env, adminapikey, accountname):\n\n api_url = env + \"/api/account.json/add_account_v3\"\n\n add_account_info = json.dumps({\"account_name\": accountname})\n\n response_post = requests.post(api_url, headers={\"Content-Type\": \\\n \"application/json\", \"access_key\": \\\n adminapikey}, data=add_account_info)\n\n if \"cc_external_id\" in response_post.json():\n print(\"Successfully created the account \" + accountname + \\\n \" with external_id \" + response_post.json()[\"cc_external_id\"])\n print(response_post.json())\n return response_post.json()[\"cc_external_id\"]\n else:\n print(response_post.json())\n return None", "title": "" }, { "docid": "8d38d02588494286fbfa377bc62107a2", "score": "0.5697413", "text": "def do_create_user(self):\n\n self.browser.find_element_by_id(\"id_email\").send_keys(self.username)\n self.browser.find_element_by_id(\"id_password1\").send_keys(self.password)\n self.browser.find_element_by_id(\"id_password2\").send_keys(self.password)\n\n self.browser.find_element_by_css_selector(\"#sign-up button[type='submit']\").click()", "title": "" }, { "docid": "92d25728dc7445d8b1407dccf8fd92f4", "score": "0.5689697", "text": "def create_user(account, password):\n new_user = Credential(account, password)\n return new_user", "title": "" }, { "docid": "d4842785557892905a27e7192bac610a", "score": "0.5684663", "text": "def create_account(self, **kwargs):\n\n self.account.post_account_create(body=mangadex.CreateAccount(**kwargs))", "title": "" }, { "docid": "340f56561fd27c0ee41b065ff00ed3f5", "score": "0.5681051", "text": "def create_account(accountname, accountemail, accountrole, access_to_billing, scp, root_id, accountbilling):\n account_id = 'None'\n client = get_client('organizations')\n try:\n create_account_response = client.create_account(Email=accountemail, AccountName=accountname,\n RoleName=accountrole,\n IamUserAccessToBilling=access_to_billing,\n Tags=[\n {\n \"Key\": \"AccountBilling\",\n \"Value\": accountbilling\n }\n ])\n except botocore.exceptions.ClientError as exception:\n print(exception)\n sys.exit(1)\n # time.sleep(30)\n create_account_status_response = client.describe_create_account_status(\n CreateAccountRequestId=create_account_response.get('CreateAccountStatus').get('Id'))\n account_id = create_account_status_response.get('CreateAccountStatus').get('AccountId')\n\n while account_id is None:\n create_account_status_response = client.describe_create_account_status(\n CreateAccountRequestId=create_account_response.get('CreateAccountStatus').get('Id'))\n account_id = create_account_status_response.get('CreateAccountStatus').get('AccountId')\n return (create_account_response, account_id)", "title": "" }, { "docid": "b32da17f32c8fa2797375c56864341fc", "score": "0.56647474", "text": "def invoke(self):\n print(\"Register an account\\n\")\n # get user details\n username = self.get_username()\n password = self.get_password()\n first_name = self.get_first_name()\n last_name = self.get_last_name()\n email = self.get_email()\n self.register_face(username)\n\n # create new user\n credentials = UserCredential(username, password)\n self.db.insert_user(credentials, first_name, last_name, email)\n print(\"Successfully registered!\")", "title": "" }, { "docid": "8bc8f22d94926009c7783389789be2e1", "score": "0.566353", "text": "def user_account_create(request, oauth2_context):\n user_id = request.matchdict.get('user_id')\n account_info = request.deserialized_body\n \n result = send_task(\"accounts.create\", \n kwargs=dict(account_info=account_info,\n user_id=user_id)).get()\n \n if type(result) is dict and \"error\" not in result:\n request.response.status_int = 201\n else:\n request.response.status_int = 403\n \n return result", "title": "" }, { "docid": "714cc647d3ef9b2c512ce2d197f4c1d7", "score": "0.5663185", "text": "def account_create_db(self):\n user = Session.query(Users).get(session['REMOTE_USER'])\n user.email = request.POST.get('email')\n user.logname = request.POST.get('logname')\n user.home = '%s%s' % (home_dir_prefix, user.id)\n user.start_date = datetime.now()\n frm = request.POST.get('from')\n if frm == 'creation':\n logname = Session.query(Users).filter(Users.logname == user.logname).all()\n if logname:\n h.flash.set_message('Logname already in use. Please choose another one', 'error')\n return render('/access/account_create.mako')\n user.password = unicode(hashlib.sha1(request.POST.get('password')).hexdigest(), 'utf-8')\n Session.add(user)\n Session.commit()\n h.flash.set_message(u'Account succesfully created/modified.', u'success')\n h.redirect('/users/index')", "title": "" }, { "docid": "af49221240d0dd6810e107fbc55bc8fe", "score": "0.5639136", "text": "def test_create_account(self):\n response = self.client.post(reverse(\"account-list\"),\n {\"inn\": \"012345678912\",\n \"amount\": 100000.00,\n \"user\": 5})\n\n self.assertEqual(response.status_code, 201,\n f\"Wrong status code: {response.status_code} \"\n f\"\\n {response.data}\")\n self.assertTrue(response.data,\n f\"Wrong data: {response.data}\")", "title": "" }, { "docid": "cf24818ab667439c45e6f6fbbcc6e944", "score": "0.5638116", "text": "def edit_account(self, email, name):\n pass", "title": "" }, { "docid": "1b76cd37c3aad5629378c33a74a25398", "score": "0.5610823", "text": "def account():\n raise SubcommandNotImplemented(\"account\")", "title": "" }, { "docid": "5071fcc3079af28ca0d4698bd19a74fb", "score": "0.56086427", "text": "def create_user(service, account):\n print(\"[*] Attempting to create new user {}...\"\n .format(account))\n password = ''.join(random.SystemRandom()\n .choice(string.ascii_uppercase + string.digits)\n for _ in range(12))\n body = {'primaryEmail': account,\n 'name': {'givenName': 'GSuite', 'familyName': 'API-Admin'},\n 'password': password}\n\n service.users().insert(body=body).execute()\n print(\"[+] Created user with password: {}\".format(password))\n\n print(\"[*] Attempting to make user an admin...\")\n service.users().makeAdmin(userKey=account,\n body={'status': True}).execute()\n print(\"[+] Success! Enjoy god-mode.\")", "title": "" }, { "docid": "57ecee692aa7e8c2e4d4749170fb2012", "score": "0.55995154", "text": "def create_auth_token(sender, instance=None, created=False, **kwargs):\n if created:\n Token.objects.create(user=instance)\n UserProfile.objects.create(user=instance)\n Account.objects.create(owner=instance)", "title": "" }, { "docid": "5d620e2bbac30f2db8315fd6a1229b68", "score": "0.55691993", "text": "def account():\r\n\r\n return render_template('account.html', title='Account')", "title": "" }, { "docid": "ccde5aa2df4d35583bdebf6f962d2d11", "score": "0.55662644", "text": "def create_account(first_name, last_name, user_name, password):\n new_user = User(first_name, last_name, user_name, password)\n return new_user", "title": "" }, { "docid": "9fbf7e554aba859e2f94608157388ab5", "score": "0.5545296", "text": "def add_editor(user, fname, editor, **kwargs):\n\n if FILES[fname].owner != user:\n LOG.warning('{0} was trying to access editors of {1} without permissions'.format(user, fname))\n raise ServerException('Must be owner to change editors')\n\n if editor not in FILES[fname].users:\n FILES[fname].users.append(editor)\n LOG.info('{0} made {1} an editor of {2}'.format(user, editor, fname))\n\n return {}", "title": "" }, { "docid": "7cb4fef2893563d417662f2f6837a99d", "score": "0.5532979", "text": "def test_create_account(self):\n test_account = generate_account()\n body = RequestAccount.from_dict(test_account)\n response = self.client.open(\n '/customer/{customer_id}/accounts'.format(\n customer_id=self.customer_id),\n method='POST',\n data=json.dumps(body),\n headers=headers,\n content_type='application/json')\n self.assert200(response,\n 'Response body is : ' + response.data.decode('utf-8'))\n self.compare_response(test_account, response.json)", "title": "" }, { "docid": "c7b7dd27e26c4b2158e06b37c0217724", "score": "0.55320275", "text": "def createUser(self, user):\n pass", "title": "" }, { "docid": "afbf2ce3417aa035f20ceb5a5c174f29", "score": "0.5529432", "text": "def create():\n get_log().info(\"create: begin.\")\n\n from pp.db import session\n\n user_dict = dict(\n username=\"admin\",\n display_name=u'Andrés Plácido Bolívar',\n email=u'andrés.bolí[email protected]',\n phone=\"123\",\n password=\"password\",\n )\n\n s = session()\n admin_user = s.add(UserTable(**user_dict))\n\n # Call any custom creating hooks here\n get_log().info(\"create: Initial admin user <%s> created OK.\" % admin_user)", "title": "" }, { "docid": "3b176dd3a613f9c348856fd810408b91", "score": "0.5526705", "text": "async def create_credential(\n self,\n schema,\n credential_offer,\n credential_request,\n credential_values,\n revoc_reg_id: str = None,\n tails_reader_handle: int = None,\n ):\n pass", "title": "" }, { "docid": "d0e56324861095686e901561cb20f7ee", "score": "0.5525229", "text": "def api_post_account():\n\treturn api_post(request.json, object_type=\"account\")", "title": "" }, { "docid": "78a4f3967a8f4418cd0ef85bba8015b6", "score": "0.5523277", "text": "def create_new_user(ruta: str):\n file = open(ruta, \"w\")\n print(\"create\")\n file.close()", "title": "" }, { "docid": "8890c3ed221b25cb900c0ea7e47f22ef", "score": "0.55230826", "text": "def signUp(request):\n return render(request, \"ToDoer/createAccount.html\")", "title": "" }, { "docid": "1d8f834a71f61e72b881c46c6e43e323", "score": "0.5522491", "text": "def create_user(self, **kwargs):\n # TODO : Implement this method\n return True", "title": "" }, { "docid": "71e44a48b1783c223c4076017a1b4adc", "score": "0.5515817", "text": "def create_account_with_assets(domain, name, public_key, asset_name, asset_qty):\n asset_id = asset_name + '#' + domain\n # 1. Create account\n tx = iroha_config.IROHA_ADMIN.transaction(\n [iroha_config.IROHA_ADMIN.command('CreateAccount',\n account_name=name,\n domain_id=domain,\n public_key=public_key)])\n IrohaCrypto.sign_transaction(tx, iroha_config.ADMIN_PRIVATE_KEY)\n send_transaction_and_print_status(tx)\n\n # 2. Create credit for the user\n tx = iroha_config.IROHA_ADMIN.transaction([iroha_config.IROHA_ADMIN.command('AddAssetQuantity',\n asset_id=asset_id,\n amount=asset_qty)])\n IrohaCrypto.sign_transaction(tx, iroha_config.ADMIN_PRIVATE_KEY)\n send_transaction_and_print_status(tx)\n\n # 3. Transfer credit to the user\n dest_account_id = name + '@' + domain\n tx = iroha_config.IROHA_ADMIN.transaction([\n iroha_config.IROHA_ADMIN.command('TransferAsset',\n src_account_id='admin@test',\n dest_account_id=dest_account_id,\n asset_id=asset_id,\n description='initial credit',\n amount=asset_qty)])\n IrohaCrypto.sign_transaction(tx, iroha_config.ADMIN_PRIVATE_KEY)\n send_transaction_and_print_status(tx)", "title": "" }, { "docid": "01939f498d2eb72cfd366d31577054f2", "score": "0.5512322", "text": "def create_reader_account():\n #创建reader账号\n #run(\"\"\"mysql -uroot -e \"grant select on *.* to 'reader'@'%' identified by 'reader@secu'; flush privileges;\" \"\"\")\n run(\"\"\"mysql -uroot -e \"grant select, show view on *.* to 'reader'@'localhost' identified by 'reader@secu'; flush privileges;\" \"\"\")", "title": "" }, { "docid": "696ab1a4d21f7314aaac2a4d80cbf8d6", "score": "0.5510905", "text": "def run(args):\n\n status = 0\n message = None\n\n account_uid = None\n\n try:\n account_name = args[\"account_name\"]\n except:\n account_name = None\n\n try:\n description = args[\"description\"]\n except:\n description = None\n\n try:\n authorisation = Authorisation.from_data(args[\"authorisation\"])\n except:\n authorisation = None\n\n if account_name is None or description is None \\\n or authorisation is None:\n raise CreateAccountError(\"You must supply both an account name \"\n \"and a description to create an account\")\n\n if not isinstance(authorisation, Authorisation):\n raise TypeError(\"The passed authorisation must be of type \"\n \"Authorisation\")\n\n authorisation.verify()\n\n # try to create a 'main' account for this user\n accounts = Accounts(authorisation.user_uid())\n account = accounts.create_account(name=account_name,\n description=description)\n\n account_uid = account.uid()\n\n status = 0\n message = \"Success\"\n\n return_value = create_return_value(status, message)\n\n if account_uid:\n return_value[\"account_uid\"] = account_uid\n\n return return_value", "title": "" }, { "docid": "479bb6d2ccb18dedbdb34357b65b8256", "score": "0.5507388", "text": "def create():\n pass", "title": "" }, { "docid": "f601daf0a15ab402f273dd592928f5c6", "score": "0.5499275", "text": "def create_user(self, email, username, password): \n tmp = user(key_name=username.lower()) \n tmp.username = username \n tmp.email = email \n tmp.password = password \n\n mail.send_mail( \n sender=email, \n to=email, \n subject=\"Account Activation\", \n body=\"\"\"Dear \"\"\"+username+\"\"\": \n\nA new account has been created with this email address at http://www.example.com \n\nBut in order to log in and play you must first activate your account \nwith your unique activation code included in this email. Simply click \nthe link included here to activate your account or copy and paste the \nfollowing URL into your browser \n\n\"\"\"+\"http://www.example.com/validate?activate=\"+tmp.activation_code) \n\n self._sync_user(tmp)", "title": "" }, { "docid": "22bfff4c70d1f9c7df4b62083cc267d6", "score": "0.5498268", "text": "def create_user_slot(self):\n self.check_register_data()\n if self.creation_flag:\n user_str = self.user_edit.text()\n password_str = self.password_edit.text()\n self.authorised_data[user_str.lower()] = hashlib.sha1(password_str.encode('utf-8')).hexdigest()\n self.write_authorised_data()\n self.registerDone.emit()\n self.close()\n else:\n msg = QMessageBox()\n msg.setIcon(QMessageBox.Critical)\n\n msg.setFixedSize(200, 100)\n msg.setWindowTitle(\"Ошибка создания пользователя\")\n msg.setText(\"Проверьте введенные данные:\")\n msg.setInformativeText(\"Пользователь с таким именем уже существует \\n\"\n \"или проверьте правильность паролей\")\n msg.setStandardButtons(QMessageBox.Ok)\n msg.exec_()", "title": "" }, { "docid": "7638a199067959d1ceadefe00b5f7f04", "score": "0.54961556", "text": "def test_add_account(self):\n body = NewAccountRequest()\n headers = [('idempotency_key', '38400000-8cf0-11bd-b23e-10b96e4ef00d')]\n response = self.client.open(\n '/paySmart/ps-processadora/v1/accounts',\n method='POST',\n data=json.dumps(body),\n headers=headers,\n content_type='application/json')\n self.assert200(response,\n 'Response body is : ' + response.data.decode('utf-8'))", "title": "" }, { "docid": "638b936dbe58772d183647c0c11ad00c", "score": "0.5492303", "text": "def add_user(self):", "title": "" }, { "docid": "09753ebb373d5d77fa6a81c30b0352ab", "score": "0.54896414", "text": "def user_create(ctx, username, firstname, lastname, email, password, enterprise_id ):\n \n import hashlib\n \n # Define mandotory values\n params = {'userName' : username,\n 'firstName' : firstname,\n 'lastName' : lastname,\n 'email' : email,\n 'password' : hashlib.sha1(password).hexdigest() }\n \n result = ctx.obj['nc'].post(\"enterprises/%s/users\" %enterprise_id, params)[0]\n print_object( result, only=ctx.obj['show_only'] )", "title": "" }, { "docid": "cf67ae46fa3c1693a146c029cc7fb1b7", "score": "0.54879665", "text": "def _register_submit(request, info):\n log.info('Creating new user {} <{}> as {}'.format(\n info.get('name'), info.get('email'),\n UserStatus(int(info.get('account_type'))).label\n ))\n\n # Debexpo use the email field as the username\n user = User.objects.create_user(info.get('email'), info.get('name'))\n user.save()\n profile = Profile(user=user, status=info.get('account_type'))\n profile.save()\n\n uid = urlsafe_base64_encode(force_bytes(user.pk))\n token = default_token_generator.make_token(user)\n\n _send_activate_email(request, uid, token, user.email)\n\n log.debug('New user saved')\n return render(request, 'activate.html', {\n 'settings': settings\n })", "title": "" }, { "docid": "1e95ac0b819a5614559e495810aa3636", "score": "0.5483895", "text": "async def register(self, ctx):\n user = ctx.message.author\n try:\n account = self.bank.create_account(user)\n await self.bot.say(\"{}\\n```css\\nAccount opened. Current balance: {}\\n\"\n \"Remember to periodically type {}payday to get free credits.\\n```\".format(user.mention,\n account.balance,ctx.prefix))\n await ctx.invoke(self.payday)\n except AccountAlreadyExists:\n await self.bot.say(\"{}\\n```css\\nYou already have an account at the bank.\\n```\".format(user.mention))", "title": "" }, { "docid": "dabaadbc3e65fdbc8063530f80a87cbd", "score": "0.54831576", "text": "def _create_account(self, username, expected_status=200):\n values = {\n \"username\": username,\n \"first_name\": \"Tester\", \"last_name\": \"Toto\",\n \"role\": \"SimpleUsers\", \"quota_act\": True,\n \"is_active\": True, \"email\": username,\n \"random_password\": True, \"stepid\": 2\n }\n response = self.client.post(\n reverse(\"admin:account_add\"), values,\n HTTP_X_REQUESTED_WITH=\"XMLHttpRequest\")\n self.assertEqual(response.status_code, expected_status)\n return values", "title": "" }, { "docid": "6c1037535115552a5331cf905ced84d1", "score": "0.548143", "text": "def create_account(request):\r\n if request.method == 'POST':\r\n form = CustomUserCreationForm(request.POST)\r\n if form.is_valid():\r\n form.save()\r\n username = form.cleaned_data.get('username')\r\n raw_password = form.cleaned_data.get('password1')\r\n user = authenticate(username=username, password=raw_password)\r\n login(request, user)\r\n return redirect('/users/profile')\r\n else:\r\n form = CustomUserCreationForm()\r\n return render(request, 'users/create_account.html', {'form': form})", "title": "" }, { "docid": "2247645e25d82878cd934b35bcb7ca19", "score": "0.54801875", "text": "def create_account(self, data):\r\n with sqlite3.connect(self.__user_db) as conn:\r\n self.__init_table\r\n _cursor = conn.cursor()\r\n try:\r\n _cursor.execute(self.commands.insert_user_data, data)\r\n except sqlite3.IntegrityError:\r\n return it.USERNAME_ALREADY_EXISTS\r\n else:\r\n conn.commit()\r\n return it.CREATE_ACCOUNT_SUCCESSFUL", "title": "" }, { "docid": "bf96cba14c28aa738c70e7fc084e2704", "score": "0.54733294", "text": "def create_facebook_account(sender, instance, created, **kwargs):\n if created:\n FacebookAccount.objects.create(user=instance)", "title": "" }, { "docid": "7f461261f7ab84bbe1abd06365950b1a", "score": "0.5470733", "text": "def sample_account(user, **params):\n defaults = {\n 'name': 'Sample Account'\n }\n defaults.update(params)\n\n return Account.objects.create(user=user, **defaults)", "title": "" }, { "docid": "7435bd02629fc39d3db28814ccbe02b7", "score": "0.54686296", "text": "def create_account(self, form):\n self.created_user.first_name = form.cleaned_data['firstname']\n self.created_user.last_name = form.cleaned_data['lastname']\n self.created_user.save()\n return super(MapStorySignup, self).create_account(form)", "title": "" }, { "docid": "8c8e0226010c1942d27b4c2522fac8c0", "score": "0.5465579", "text": "def user_create(username, password, is_admin):\n check_clientlib_response(\n lambda: C.user.create(username, password, is_admin)\n )", "title": "" }, { "docid": "d35f2fb94a33683921b85adedc814ac4", "score": "0.54616106", "text": "def create_admin():\n # db.session.add(User(email='[email protected]', password='admin', admin=True))\n # db.session.commit()", "title": "" }, { "docid": "e81dd2a9bfb18277987ff255b2d31d7f", "score": "0.5447556", "text": "def create_admin(username=None, password=None, email=None):\r\n\r\n if not (username and password and email):\r\n username = prompt(\"Username\")\r\n email = prompt(\"A valid email address\")\r\n password = prompt_pass(\"Password\")\r\n\r\n u = User.create_admin_user(username=username, password=password, email=email)\r\n print(\"Creating admin user :%s Sucessful!\" %u.username)", "title": "" }, { "docid": "b6c6b218021c27e4ae0207f5da11bbf8", "score": "0.5430904", "text": "def create_admin():\n user = User(email='[email protected]', password='admin', username='admin', admin=True)\n db.session.add(user)\n db.session.flush()\n db.session.flush()\n org = Organization(name=\"Admin Organization\", slug=\"admin-org\")\n org.add_user(user, admin=True)\n db.session.add(org)\n db.session.flush()\n db.session.refresh(org)\n proj = Project(name=\"Admin Project\", slug=\"admin-proj\", organization_id=org.id)\n db.session.add(proj)\n db.session.flush()\n db.session.refresh(proj)\n app = Application(name=\"Admin Application\", slug=\"admin-app\", project_id=proj.id)\n db.session.add(app)\n db.session.commit()", "title": "" }, { "docid": "ca542ebf6fca54cb3857c31d20f6eb73", "score": "0.5426799", "text": "def create_account(req):\n # this is the non-decorator version of the login_required decorator\n # basically it checks, if the user is authenticated and redirects him, if\n # not. The decorator could not handle the reverse url-resolution.\n if not req.user.is_authenticated():\n return redirect(reverse('oweb:app_login'))\n\n acc = Account()\n acc.owner = req.user\n acc.save()\n\n return redirect(reverse('oweb:account_settings', args=[acc.id]),\n )", "title": "" }, { "docid": "bca678b1d45ad25f0ff2c58a230663a1", "score": "0.542405", "text": "def save_account(account):\n account.save_account()", "title": "" }, { "docid": "329b0984418b65d95fd7631826519c11", "score": "0.5421718", "text": "def create_user():\n user = dict(name='Someone',\n email='[email protected]',\n tags=[\"awesome\", \"docker\", \"guru\"])\n db.users.insert_one(user)\n return 'Success!'", "title": "" }, { "docid": "f8ecc1f8d561026ce80ace392ee99b1a", "score": "0.5418208", "text": "def register_new_account(conf, key):\n LOG.info(\"Registering with ACME server with the new account key\")\n newReg = messages.NewRegistration(contact=tuple(conf['info']), key=key.public_key())\n acme_client = client.Client(conf['directory'], key)\n registration_resource = acme_client.register(newReg)\n LOG.info(\"Agreeing on the TOS on your behalf\")\n acme_client.agree_to_tos(registration_resource)", "title": "" }, { "docid": "906f18fdb5daeb565065ec3da8ec2118", "score": "0.5414114", "text": "def generate_new_account(self):\n title_bar = t.Frame(self.root, width=self.root.winfo_width(), height=int(self.root.winfo_height() * 0.1),\n bg=self.color3)\n title_bar.pack(side=\"top\", fill=\"x\", expand=\"false\")\n title_bar.update()\n title_bar.propagate(0)\n description_label = t.Label(title_bar, bg=self.color3, text=\"A Biosensor for measuring Vancomycin\",\n font=(self.font, self.normalfontsize), fg=self.color4)\n description_label.place(relwidth=0.4, relheight=1.0, relx=0.3)\n description_label.update()\n self.includelogo(title_bar)\n\n # create upper frame with text\n account_frame = t.Frame(self.root, width=self.root.winfo_width(), height=int(self.root.winfo_height() * 0.3),\n bg=self.color3)\n account_frame.pack(side=\"top\", fill=\"x\", expand=\"false\")\n account_frame.update()\n account_frame.propagate(0)\n\n # create entry box\n input_frame = t.Frame(account_frame, width=int(self.root.winfo_width()),\n height=int(self.root.winfo_height() / 2), bg=self.color2)\n input_frame.pack(side=\"top\", expand=\"false\")\n input_frame.update()\n input_frame.propagate(0)\n\n # label and box for username\n user_label = t.Label(input_frame, bg=self.color2, text=\"Username:\", font=(self.font, self.normalfontsize))\n user_label.place(relheight=0.3, relwidth=0.1, relx=0.3, rely=0.05)\n username_box = t.Entry(input_frame, font=(self.font, self.normalfontsize), bg=self.color3)\n username_box.place(relheight=0.3, relwidth=0.2, relx=0.4, rely=0.05)\n\n # label and box for password\n pwd_label = t.Label(input_frame, bg=self.color2, text=\"Password:\", font=(self.font, self.normalfontsize))\n pwd_label.place(relheight=0.3, relwidth=0.1, relx=0.3, rely=0.35)\n password_box = t.Entry(input_frame, show=\"*\", font=(self.font, 24), bg=self.color3)\n password_box.place(relheight=0.3, relwidth=0.2, relx=0.4, rely=0.35)\n\n # create login button\n create_button = t.Button(input_frame, text=\"Create\", bg=self.color4, font=(self.font, self.normalfontsize),\n activebackground=self.color2, activeforeground=self.color3,\n command=lambda: [self.save_data(username=username_box.get(),\n password=password_box.get())],\n fg=self.color3)\n create_button.place(relheight=0.3, relwidth=0.2, relx=0.4, rely=0.7)\n keyboard.main(self.root)", "title": "" }, { "docid": "b4d7b4aa2798dac597b267e644da267c", "score": "0.54073143", "text": "def create(self):\n if self.project.mode == 'Create':\n self.transfer_from_GUI()\n valid, response = self.project.validate()\n if valid:\n self.project.create()\n else:\n self.error(response)\n else:\n self.error(\"Mode error: Create called inappropriately\")", "title": "" }, { "docid": "02e9c53be1f55e13bea49b82949710f6", "score": "0.54038435", "text": "def create(login, password=\"\"):\n\t\taccounts = tools.get_json_from_file(ACCOUNTS_JSON)\n\n\t\t# add new account\n\t\tif not login in accounts:\n\t\t\taccounts[login] = password\n\t\t\ttools.success(f\"the {login} account was created successfully\")\n\t\telse:\n\t\t\ttools.error(\"already have this account\")\n\t\t\treturn\n\n\t\ttools.save_json_to_file(ACCOUNTS_JSON, accounts)", "title": "" }, { "docid": "ce2f79f1c8d3553cdafe32129960c98a", "score": "0.5383062", "text": "def create_users(self):\n from django.contrib.auth.models import User\n user = User.objects.create_user('red', '', 'red')", "title": "" }, { "docid": "22dfe76de4b651e918b9bec37f6823d1", "score": "0.5380133", "text": "def create_account(self, account_name, amount):\n try:\n amount = int(amount)\n except ValueError:\n logging.info('amount must be a integer')\n return False\n\n card_id = str(uuid.uuid4())\n\n if self.db_obj.admin_create_account(account_name, card_id, amount):\n logging.info('admin create account success')\n r = os.urandom(32)\n rand_key = os.urandom(32)\n\n return xmlrpclib.Binary(r + rand_key + card_id)\n\n logging.info('admin create account failed')\n return False", "title": "" }, { "docid": "2cf9e11ab5e294e54e7a2c75305bc36f", "score": "0.5377042", "text": "def entry():\n args = parse_arguments(sys.argv[1:])\n try:\n print(\"Creating new account:\")\n account_info = ptcaccount2.random_account(args.username, args.password, args.email, args.birthday)\n\n if args.compact:\n print('{}:{}'.format(account_info[\"username\"], account_info[\"password\"]))\n else:\n print(' Username: {}'.format(account_info[\"username\"]))\n print(' Password: {}'.format(account_info[\"password\"]))\n print(' Email : {}'.format(account_info[\"email\"]))\n print('\\n')\n\n # Handle account creation failure exceptions\n except PTCInvalidPasswordException as err:\n print('Invalid password: {}'.format(err))\n except (PTCInvalidEmailException, PTCInvalidNameException) as err:\n print('Failed to create account! {}'.format(err))\n except PTCException as err:\n print('Failed to create account! General error: {}'.format(err))", "title": "" } ]
16a93f50d67d8c2e598a35f05f8402a2
Describes all the unique elements in a column
[ { "docid": "8e864e7c847448d8b7f7d6017dfaab5f", "score": "0.7731794", "text": "def describe_unique(df, colname, filter_unnecessary=True):\n print(\"Column name : \", colname)\n unique_elems = pd.unique(df[colname])\n types_of_data = [type(x) for x in unique_elems]\n if filter_unnecessary:\n if len(unique_elems) == df.shape[0]:\n print(\"All values are unique.\")\n return\n print(\"Number of unique elems : \", len(unique_elems))\n print(\"Types of data in col :\", set(types_of_data))\n for idx, uel in zip(range(0, len(unique_elems)), unique_elems):\n print(\" \", str(idx)+\".\", type(uel), \"\\t\",uel)", "title": "" } ]
[ { "docid": "41599ce2227a6e0260c76034890cd48a", "score": "0.7366619", "text": "def unique_values(df):\r\n \r\n print(\"Unique values:\")\r\n for col in df.columns:\r\n print(col + \":\", df[col].unique())", "title": "" }, { "docid": "93e91793f8b371a425e0d984c1727f30", "score": "0.69873977", "text": "def unique_vals(self, column, rows = \"\"):\n if rows == \"\":\n rows - self.rows\n return set([row[column] for row in self.rows])", "title": "" }, { "docid": "ec675095cdc97b87bc60a8b1d32933fe", "score": "0.6951029", "text": "def unique(self, col):\n def make_safe(v):\n if v:\n return v.lower().strip()\n else:\n return \"\"\n\n safe_col = make_safe(col)\n\n di = self.header_di()\n\n location = di[safe_col]\n\n already_used = []\n\n indexes = []\n\n for x, r in enumerate(self.data):\n o = r[location]\n h = hash(o)\n if h in already_used:\n continue\n else:\n indexes.append(x)\n already_used.append(h)\n\n return self.__iter__(indexes=indexes)", "title": "" }, { "docid": "0b0e08f8b446a542fb5ce027fbc3348c", "score": "0.672605", "text": "def unique_vals(rows, col):\n value_list = [row[col] for row in rows]\n #print(\"unique_vals:value_list: \", value_list)\n return set(value_list)", "title": "" }, { "docid": "0167f96d9c50a3b948624a60639e53f6", "score": "0.6594997", "text": "def unique_vals(rows, col):\n row_list= [row[col] for row in rows]\n return set(row_list)", "title": "" }, { "docid": "e56b7f62b1bd33c4a8edad8ba3fa8da1", "score": "0.6541342", "text": "def unique_vals(rows, col):\n return set([row[col] for row in rows])", "title": "" }, { "docid": "e56b7f62b1bd33c4a8edad8ba3fa8da1", "score": "0.6541342", "text": "def unique_vals(rows, col):\n return set([row[col] for row in rows])", "title": "" }, { "docid": "6ca3f687e9bcf11dc27c84bcd7a72bef", "score": "0.65322125", "text": "def nunique(self, \n\t\t\t\tcolumns: list = []):\n\t\treturn (self.aggregate(func = [\"unique\"], columns = columns))", "title": "" }, { "docid": "6aed1d5baa25a3572f6eb67ec11037d9", "score": "0.64843255", "text": "def unique(data: str, col: str):\n df = pd.read_json(data)\n unique = set(df[col])\n print(f\"Unique values in {col}:\")\n print(unique)", "title": "" }, { "docid": "e8b83a940ed5e1ec8d81683d8413c423", "score": "0.64058673", "text": "def count_unique_value(col):\n col = col.values.tolist()\n visited = {}\n for i in range(1, len(col)): # without header\n if str(col[i]) not in visited:\n visited.update({str(col[i]): 1})\n else:\n visited[str(col[i])] += 1\n \n return visited", "title": "" }, { "docid": "46e96f9e9b1737183fd0ea8378431ebc", "score": "0.6390997", "text": "def column_values(df):\n for col in df.columns:\n # unique = np.unique(lits_2016_selected[col])\n unique = df[col].unique()\n # for i in \n if len(unique) < 14:\n print('{}\\n {}, Members: ({})'.format(col,len(unique),unique))\n else:\n print('{}\\n {}, Members: (mora than 10)'.format(col,len(unique)))", "title": "" }, { "docid": "43ae59dfa5a64ce3155d97f1b6843ec0", "score": "0.62577057", "text": "def unique(self, field):\n \n # this is an unsafe sql query\n # but I couldn't get SELECT ? FROM self.tabname to work\n sql = \"SELECT DISTINCT \"+field+\" FROM \"+self.name\n result = []\n with get_conn(self.dbfile) as conn:\n cur = conn.cursor() \n cur.execute(sql)\n for row in cur: \n result.append(row[field]) \n return result", "title": "" }, { "docid": "053f748bb03d0c586bf2e6b277d5105a", "score": "0.6215692", "text": "def unique_features(dataframe):\n column_list = dataframe.columns.tolist()\n output_list = []\n output = ''\n useless_list = ['id', 'address', 'account']\n # removing unique rows from the dataset\n dataframe = dataframe.drop_duplicates()\n useless_list = ['id', 'address', 'account']\n for feature_name in column_list:\n unique_values = dataframe[[feature_name]].nunique()\n total_rows = dataframe[feature_name].shape[0]\n # if the feature contains only one value in the whole dataset then it is not needed\n if (unique_values[0] == 1):\n output_list.append(feature_name)\n # if the unique values are equal to the total number of rows of the column then they are unique\n # the name of the feature contains 'ID'\n elif ((unique_values[0] == total_rows) and (dataframe[feature_name].dtype == 'object')): #if the type is string and has many unique values\n output_list.append(feature_name)\n elif((unique_values[0] == total_rows) and (any(i in feature_name.lower() for i in useless_list))): # ID, Address Columns\n output_list.append(feature_name)\n output = ', '.join(output_list)\n if (output == ''):\n output = 'There are no UNIQUE column(s) in this Dataset.\\n'\n else:\n output = 'The column(s) ' + output + ' is/are inconsistent and are advisable to avoid.\\n'\n return output", "title": "" }, { "docid": "665e33043e83d5f4f86d3167a73b4f3a", "score": "0.61615413", "text": "def _unique_key(self, row):\n return '_'.join([str(row[i]) for i in self.unique_by]) if self.unique_by else str(row)", "title": "" }, { "docid": "e1d95113e36d0f680e976c79fb87a503", "score": "0.61174864", "text": "def unique_labels(self):\n return tuple(pd.unique(self.labels))", "title": "" }, { "docid": "c94b219ae8764a27a62e08642465dd89", "score": "0.61128324", "text": "def get_unique_values_in_column(df, col_name):\r\n return df[col_name].unique()", "title": "" }, { "docid": "c54f991953640ced4ba204b8a5db1c04", "score": "0.6101048", "text": "def _get_unique(column, dummy_na):\n if isinstance(column, cudf.core.column.CategoricalColumn):\n unique = column.categories\n else:\n unique = column.unique().sort_values()\n if not dummy_na:\n if np.issubdtype(unique.dtype, np.floating):\n unique = unique.nans_to_nulls()\n unique = unique.dropna()\n return unique", "title": "" }, { "docid": "978bed2e4f664cf13817600c13450500", "score": "0.60621536", "text": "def get_unique_column(data):\n \n # New dictionary to store unique data in each columns\n unique_data = {}\n for col in data.columns:\n unique_data[col] = list(set(data[col]))\n return unique_data", "title": "" }, { "docid": "ba83f8b1e9f2aaab7cc4d6a753d9f38e", "score": "0.60568464", "text": "def unique_values(self) -> str:\n return \" + \".join(\n f\"(a{i+1}.f1 * {10**i})\" for i in range(0, ceil(self.scale()))\n )", "title": "" }, { "docid": "514e48787aa9b71545e1878133685b1b", "score": "0.6044941", "text": "def uniqueValues(name=None, withLengths=0):", "title": "" }, { "docid": "82e8e9ecc5cd3d1a47b9c855d7f269cd", "score": "0.6024046", "text": "def get_uniques(data, feature):\n uniques = data[feature].sort_values().unique()\n print(uniques)\n print(len(uniques))", "title": "" }, { "docid": "ed1c9f3a274d580ca450dd8f045db850", "score": "0.59270555", "text": "def unique_vals(df):\n return len(df.drop_duplicates())", "title": "" }, { "docid": "24a6d25bfb9c1f8c83b7e9917703b350", "score": "0.5906624", "text": "def inspect_csv(df):\n print(df.head())\n print(len(df))\n print(np.unique(df['taskID']))\n try:\n print(np.unique(df['pilotID']))\n except KeyError:\n print(np.unique(df['subject']))", "title": "" }, { "docid": "bef6101f4c6e0dbdf656208a574db07d", "score": "0.5888792", "text": "def get_col_info(self, col):\n if self.check_col_name(col):\n messages = [\n f\"Column Name: {col}\",\n f\"Number of unique values: {self.data[col].nunique()}\",\n self.data[col].value_counts(normalize=True)\n ]\n print_message(messages)", "title": "" }, { "docid": "cc894f95aa97e640033fbe916ccbaf47", "score": "0.58669156", "text": "def hasUniqueValuesFor(name):", "title": "" }, { "docid": "075f9bf852acd26baa35bb6e1a80344a", "score": "0.5853541", "text": "def _render_distinct(self):\n\n sql = ''\n if self._distinct:\n sql = ' DISTINCT'\n\n return sql", "title": "" }, { "docid": "cfea241387400c1c66649cc6529866c4", "score": "0.58505565", "text": "def unique_samples(self):\n \"\"\"Get the different sample names.\"\"\"\n if len(self._df.columns.names) > 1:\n return tuple(pd.unique(self.samples))\n else:\n return tuple(self._df.columns)", "title": "" }, { "docid": "9f0424e88d32033359d345b35d08368d", "score": "0.58334184", "text": "def isunique(table, field):\n\n vals = set()\n for v in itervalues(table, field):\n if v in vals:\n return False\n else:\n vals.add(v)\n return True", "title": "" }, { "docid": "df4a6b4aa4e0376b4fc1f5533eaadb30", "score": "0.5823741", "text": "def getDistinctCellTypes(self, column):\n\n cellarray = self.getCellData(column)\n # cells = cellarray\n\n # cell_types = np.array([]) # Distinct cell labels in the cellarray\n\n # # Find distinct cell lables in the cell array\n # while (cells.size != 0):\n # cell_types = np.append(cell_types, cells[0])\n # mask = cells != cells[0]\n # cells = cells[mask]\n\n # cell_types = np.sort(cell_types)\n \n # return cell_types\n return np.unique(cellarray)", "title": "" }, { "docid": "86fa089e50a6e3453f9b6bac7f9d5844", "score": "0.57774496", "text": "def get_unique(self):\n\n return np.unique(self.X, axis=0)", "title": "" }, { "docid": "f9daa54e55fead3a9bbcc30cd8662a73", "score": "0.57375574", "text": "def profile_named_entity(column: pd.Series) -> typing.List[str]:\n\n return column.unique().tolist()", "title": "" }, { "docid": "ff2dd4e773b5000a9eeb174a4208d9f1", "score": "0.5721326", "text": "def check_unique_rows(df, column):\n print(\"Checking unique subject/admissions using column=%s\" % column)\n df_user_counts = df.groupby(['subject_id', 'hadm_id']).size().reset_index(name='counts')\n df_user_counts_with_column = df.groupby(['subject_id', 'hadm_id', column]).size().reset_index(name='counts')\n df_user_counts_len = len(df_user_counts.index)\n df_user_counts_with_column_len = len(df_user_counts_with_column.index)\n assert df_user_counts_len == df_user_counts_with_column_len, \"Duplicates detected: subject/admission counts=%d \" \\\n \"while subject/admission/%s counts=%d\" \\\n % (df_user_counts_len, column,\n df_user_counts_with_column_len)", "title": "" }, { "docid": "cba8ffda4e0b5662b73a87188f95fb67", "score": "0.56998116", "text": "def __unique(self,a):\n ar = set(a)\n return list(ar)", "title": "" }, { "docid": "7c28ff902b9276209efb06f95da7d296", "score": "0.5681367", "text": "def peek(df: pd.DataFrame, columns: List[str]=None)->pd.DataFrame:\n print(df.shape)\n print(df.columns)\n if columns:\n print(\"\\nUnique Values:\")\n for column in columns:\n num_unique = df[column].nunique()\n print(f\"{column}: {num_unique}\")\n return df.head()", "title": "" }, { "docid": "66547d1a70d5515e47149e3ab7203d81", "score": "0.5672598", "text": "def unique(self, col_or_col_list):\n\n if isinstance(col_or_col_list, list):\n col_is_list = True\n col_list = col_or_col_list\n else:\n col_is_list = False\n col_list = [col_or_col_list]\n\n output = []\n\n for col in col_list:\n\n if self.cache_valid(col):\n # retrieve values from existing disk-based factorization\n col_values_rootdir = self[col].rootdir + '.values'\n carray_values = bcolz.carray(rootdir=col_values_rootdir, mode='r')\n values = list(carray_values)\n else:\n # factorize on-the-fly\n _, values = ctable_ext.factorize(self[col])\n values = values.values()\n\n output.append(values)\n\n if not col_is_list:\n output = output[0]\n\n return output", "title": "" }, { "docid": "190ad2bd1b5c52ba6cd02913bdae6a5d", "score": "0.56663316", "text": "def _unique(X):\n if X is None:\n return None, []\n\n dataframe = type(X) is pd.DataFrame\n if dataframe:\n columns = X.columns\n X = X.values\n\n assert type(X) is np.ndarray, 'must pass in a numpy ndarray or dataframe'\n uX, inds = np.unique(X, axis=0, return_index=True)\n\n if dataframe:\n uX = pd.DataFrame(data=uX, columns=columns, index=np.arange(len(inds)))\n\n\n return uX, inds", "title": "" }, { "docid": "44a121a620d4a0799da472b9c56d76b0", "score": "0.5658793", "text": "def unique_keys(self):\n return [key\n for key in self.find(UniqueKeyImage)\n if not key.is_primary and key.origin_columns == [self]]", "title": "" }, { "docid": "ddff6fdc5a3dfad86c7d017e98bb0e6f", "score": "0.56587267", "text": "def test_distinct_values(self):\n t1 = Table(header=self.t1_header, rows=self.t1_rows)\n self.assertEqual(len(t1.distinct_values(\"chrom\")), 2)\n self.assertEqual(len(t1.distinct_values(\"stableid\")), 10)\n self.assertEqual(len(t1.distinct_values(\"length\")), 10)\n\n t2 = Table(header=self.t2_header, rows=self.t2_rows)\n self.assertEqual(len(t2.distinct_values(\"id\")), 5)\n self.assertEqual(len(t2.distinct_values(\"foo\")), 3)\n self.assertEqual(len(t2.distinct_values(\"bar\")), 5)", "title": "" }, { "docid": "c403e7c53daca640611788e7402a71b7", "score": "0.56520337", "text": "def __init__(self):\r\n self.col = set()", "title": "" }, { "docid": "5b66f322aba679bcaf920550cdb8829e", "score": "0.5651827", "text": "def node_extractor(dataframe, *columns):\n data_list = [dataframe[column].unique().tolist() for column in columns]\n\n return list(set(itertools.chain.from_iterable(data_list)))", "title": "" }, { "docid": "b6b638afb3a7374bd59e7bae8917c19c", "score": "0.5650407", "text": "def unique_chars(self, dataset):\n return sorted(list(set(dataset)))", "title": "" }, { "docid": "ac50b449b464d352186c2fdef2e3447c", "score": "0.564922", "text": "def printCategoricalValues(df, colCat, logger):\n # Checking how many classes that can be used\n tmp = [df.select(countDistinct(c).alias(c)).collect()[0] for c in colCat] \n print(\"Unique column values:\", tmp)\n\n logger.write2file(\"Categorical values\", str(tmp))", "title": "" }, { "docid": "dd4e127013ac5a735ec278fb622c3358", "score": "0.5604848", "text": "def all_unique_values(self):\n return self.recent_unique_values(limit=None)", "title": "" }, { "docid": "7b3d5b2742695e4fef277931e6f32df5", "score": "0.56022394", "text": "def unique_words(self):\n return [key for key, val in self.item() if val==1]", "title": "" }, { "docid": "cd6a3e0cdd8878f43d4027703222ebad", "score": "0.5599225", "text": "def column_info(data, column):\n logging.info(f\" {column} Columns \".center(30, \"=\"))\n\n logging.info(data[column].value_counts())\n logging.info(f\"\\nUnique values {len(data[column].unique())}\\n\")\n logging.info(f\"Null values {data[column].isnull().sum()}\\n\")", "title": "" }, { "docid": "b3db9d2daa57baf199043448a0c94915", "score": "0.55952936", "text": "def unique(self) -> ParentType:\n\n if is_list_dtype(self._column.children[1].dtype):\n raise NotImplementedError(\"Nested lists unique is not supported.\")\n\n return self._return_or_inplace(\n distinct(self._column, nulls_equal=True, nans_all_equal=True)\n )", "title": "" }, { "docid": "79a372115008c62943726b562f294aac", "score": "0.55911267", "text": "def unique_all(x):\n UniqueAll = namedtuple('UniqueAll', ['values', 'indices', 'inverse_indices', 'counts'])\n return UniqueAll(*_mx_nd_np.unique(x, True, True, True))", "title": "" }, { "docid": "c913c1d170c7890459c1550b152ea04f", "score": "0.5564768", "text": "def unique_rows(data):\n uniq = np.unique(data.view(data.dtype.descr * data.shape[1]))\n return uniq.view(data.dtype).reshape(-1, data.shape[1])", "title": "" }, { "docid": "9611b1e5bebad50bcc68d226df1599af", "score": "0.5550889", "text": "def val_set(data, column):\r\n return set(val_list(data, column))", "title": "" }, { "docid": "c1b7f86d7154a4f211e5f5c3a6d4f5e4", "score": "0.5543263", "text": "def columns(self) -> Iterable[str]:\n return [str(val) for val in attr.asdict(self).values() if val]", "title": "" }, { "docid": "8c30dec9ca671ff1fad07574ef0c08f3", "score": "0.5536655", "text": "def uniq(table):\n if len(table) <= 1 :\n return table\n else :\n return auxUniq(uniq(table[:len(table)//2]),uniq(table[len(table)//2:]))", "title": "" }, { "docid": "770dbd06bf49ab39f83f0534c8c69a85", "score": "0.5535826", "text": "def unique_samples(self):\n return tuple(pd.unique(self.samples))", "title": "" }, { "docid": "d9dcb0a31d06de91a73fb089caad9f5a", "score": "0.5535133", "text": "def count_unique(df, col_name):\n count = df[col_name].nunique()\n return count", "title": "" }, { "docid": "a6c141250eb9590570408ea221460b67", "score": "0.5526623", "text": "def get_unique(self):\n return bool(self.get_singleprop(\"unique\", False))", "title": "" }, { "docid": "2a65521d186bb8f05e5a0e07abb5fe69", "score": "0.55107075", "text": "def unique_gen(test_col, df_2):\n series_data = df_2[test_col]\n column_list = []\n # Get unique entries raw, this will have redundancies bc it is order dependent\n tmp = series_data.unique()\n for i in range(0,len(tmp)):\n column_list.append(tmp[i])\n\n # Remove all commas\n column_list = [w.replace(',', '') for w in column_list]\n # Replace special chars with commas\n column_list = [w.replace('_', ',') for w in column_list]\n column_list = [w.replace('.', ',') for w in column_list]\n\n # Turn repeated commas into just one comma\n commas = ',,'\n for i in range(0,100):\n column_list = [w.replace(commas, ',') for w in column_list]\n commas = commas + ','\n column_list = [w.replace(',,,', ',') for w in column_list]\n column_list = [w.replace(',,', ',') for w in column_list]\n\n # Remove the first comma\n column_list = [w.lstrip(',') for w in column_list]\n # Remove the last comma\n column_list = [w.rstrip(',') for w in column_list]\n\n # Separate out entries with multiple entries\n column_list = [w.split(',') for w in column_list]\n\n # Organize into one long list where redundancies are separated\n dummy_list = []\n for i in range(0,len(column_list[:])):\n for j in range(0,len(column_list[i][:])):\n dummy_list.append(column_list[i][j])\n\n # Get unique values\n unique = list(set(dummy_list))\n\n if test_col == 'drug_char':\n result = []\n result = [drug_char_dict[l1] for l1 in unique]\n unique = result\n\n if test_col == 'admin_route':\n result = []\n result = [admin_route_dict[l1] for l1 in unique]\n unique = result\n if test_col == 'unii':\n result = []\n result = [unii_dict[l1] for l1 in unique]\n unique = result\n return unique", "title": "" }, { "docid": "6fb2195c60019d6c48d7d8c717bf6bf7", "score": "0.55106294", "text": "def unique_values(x):\n return _mx_nd_np.unique(x, False, False, False)", "title": "" }, { "docid": "32315f65536d772f258488e414b02f5f", "score": "0.5478907", "text": "def extract_unique(dataframe, column):\n if dataframe.empty:\n return None\n if len(dataframe[column].unique()) > 1:\n raise ValueError(f\"Non-unique {column} encountered.\")\n return dataframe[column].unique().item()", "title": "" }, { "docid": "a5cb073c469cc05553650497fe7d0a7d", "score": "0.5472207", "text": "def unique_collect_fields(collect):\n return set([collect_field for collect_field, _ in collect])", "title": "" }, { "docid": "72fdc3d72df5f2eb447b5cf74c94a9c4", "score": "0.5463528", "text": "def return_unique_pairs(df, column_names):\n return df.groupby(column_names).size().reset_index().drop([0], axis=1)", "title": "" }, { "docid": "9f46020866758f7e9e10e6a7e5870403", "score": "0.5457184", "text": "def get_unique(self, table=None, field=None, **query):\n print('deprecation candidate')\n unique_values = {}\n for name in self.tables:\n if table is not None and name != table:\n continue\n df = self.get(name, **query)\n if field is not None:\n unique_values[name] = df[field].unique()\n else:\n unique_values[name] = {field_name: df[field_name].unique() for field_name in df.columns}\n\n return unique_values if table is None else unique_values[table]", "title": "" }, { "docid": "19c61cdd701c29795693ca2b8f55b233", "score": "0.5440525", "text": "def test___init__one_column(self):\n # Run\n instance = Unique(columns='a')\n\n # Assert\n assert instance.columns == ['a']", "title": "" }, { "docid": "ac6742e984886b3e075e47e4180da0c5", "score": "0.5421553", "text": "def count_unique(\n self\n ) -> int:\n count = np.unique(\n np.array(\n [np.array(list(entity[0].values()))\n for entity in self.population]\n ),\n axis=0\n )\n return len(count)", "title": "" }, { "docid": "789330ff04c1ee899c101e6cdb546e9c", "score": "0.54187334", "text": "def _get_unique(self, attr):\n unique = set(self.get_all(attr, astype=dict).values())\n if len(unique) > 1:\n return 'mixed'\n else:\n return tuple(unique)[0] #set doesn't support indexing ", "title": "" }, { "docid": "be5c987a1ce9278c07c98b3346e93086", "score": "0.5413731", "text": "def _spark(\n cls,\n column: pyspark.Column,\n **kwargs,\n ) -> pyspark.Column:\n return F.countDistinct(column)", "title": "" }, { "docid": "badabccd013d2cc5fd63bd79bfb5b11f", "score": "0.5412394", "text": "def distinct(self, cols):\n return self._statement(cols, \"DISTINCT ON\", \"Parameter of DISTINCT statement \"\\\n \"has to be string or iterable (list, tuple, set)\")", "title": "" }, { "docid": "c48181ce3c94889e20ccc2d86333094c", "score": "0.538494", "text": "def uniq_pivots(seq):\n return sorted(list(set(seq)))", "title": "" }, { "docid": "320b6caaa7228b613b62b51d14c746d1", "score": "0.53712326", "text": "def get_unique(self):\n return None", "title": "" }, { "docid": "320b6caaa7228b613b62b51d14c746d1", "score": "0.53712326", "text": "def get_unique(self):\n return None", "title": "" }, { "docid": "ea32c7a2dceea431a8fbf461858beea8", "score": "0.5362141", "text": "def unique_column(df, column):\n if column not in df.columns:\n raise TestError(f\"DataFrame doesn't have a `{column}` column\")\n return len(df[column].unique()) == 1", "title": "" }, { "docid": "0b5f954c51979c53884e55e1441cb839", "score": "0.53550786", "text": "def auxUniq(table1,table2):\n \n temp = set()\n \n for i in table1 :\n temp.add(i)\n \n for i in table2 :\n temp.add(i)\n \n return list(temp)", "title": "" }, { "docid": "99fda31a0a3d852d4e56f3c3d78c3d29", "score": "0.5343937", "text": "def unique_name_key(df):\n df['uid'] = df['first'] + df['m'] + df['last']\n \n return df", "title": "" }, { "docid": "7c37539ff1f661f4d2caeae733de856f", "score": "0.5325069", "text": "def test_spec_to_column_unique(unique: bool):\n column = column_factory._spec_to_column(spec={\"type\": \"number\", \"x-unique\": unique})\n\n assert column.unique == unique", "title": "" }, { "docid": "3c3ee0fa96dbcbdf239f4ba0b8d145d6", "score": "0.53228927", "text": "def unique(atuple):\n \n new_atuple=()\n for i in range(len(atuple)):\n if atuple[i] not in new_atuple:\n \n new_atuple=new_atuple+(atuple[i],)\n \n new_atuple=tuple(sorted(new_atuple))\n \n return new_atuple", "title": "" }, { "docid": "8cfc37d1e8a4a57711dd93d93fc56c8d", "score": "0.53222394", "text": "def unique(lar, return_index=False, return_inverse=False):\n return np.unique(lar.x, return_index, return_inverse)", "title": "" }, { "docid": "0469aedb23840338cedc0b2a96c5c426", "score": "0.5286481", "text": "def duplicated(dataframe):\n duplicated_rows = dataframe[dataframe.duplicated()]\n no_duplicated_rows = duplicated_rows.shape[0]\n if(no_duplicated_rows == 0):\n output = 'None'\n else:\n output = str(no_duplicated_rows) +\\\n ' row(s) out of ' + str(dataframe.shape[0]) + ' row(s) are duplicated'\n print('\\nThe row(s) that are duplicated:\\n')\n display(duplicated_rows)\n return output", "title": "" }, { "docid": "f1bab7c7387056c5d79d538b661f8445", "score": "0.5285701", "text": "def unique_mapping(x):\n \n if type(x) is pd.DataFrame:\n cols = x.columns\n x = x.values\n else:\n cols = None\n s = x.mean(axis=0)\n med = np.median(x, axis=0)\n d = dict()\n uniques = dict()\n for i in range(x.shape[1]):\n tx = x[:, i]\n k = (s[i], med[i])\n if k not in d:\n d[k] = [i]\n uniques[i] = list()\n else:\n lt = d[k]\n try:\n j = next(filter(lambda y: np.all(x[:, y] == tx), lt))\n uniques[j].append(i)\n except StopIteration:\n d[k].append(i)\n uniques[i] = list()\n if cols is not None:\n uniques = {cols[i]: [cols[j] for j in lt] for i, lt in uniques.items()}\n return uniques", "title": "" }, { "docid": "ecab5f00f2cf1f53af59bf68b86520e9", "score": "0.5281325", "text": "def get_unique_constraints(cls, object_class):\n constraints = object_class.__table__.constraints\n unique = filter(lambda x: isinstance(x, UniqueConstraint), constraints)\n # we only handle single column unique constraints\n unique_columns = [u.columns.keys() for u in unique if len(u.columns) == 1]\n return set(sum(unique_columns, []))", "title": "" }, { "docid": "b787d52b5a23eef729da2d1712ff2c04", "score": "0.52791476", "text": "def uniqueEntries(v):\n\t\t\t\n\t\tunique_entry = []\n\t\tdupicate_enteries = []\n\t\tfor i in v:\n\t\t\tif i in unique_entry:\n\t\t\t\tdupicate_enteries.append(i)\n\t\t\telse:\n\t\t\t\tunique_entry.append(i)\n\t\treturn unique_entry,dupicate_enteries", "title": "" }, { "docid": "1e3a5616807941842f9c9b8b008ec7a4", "score": "0.5278792", "text": "def extract_unique_ids(dataset: CDFMDataset, col: Any = EID) -> Set[EntityID]:\n first_record = _extract_first(dataset)\n loc_idx = first_record._fields.index(col)\n return {row[loc_idx] for row in dataset}", "title": "" }, { "docid": "68cdfe0138477101134b876a41bfef14", "score": "0.5274542", "text": "def get_distinct_value(self, column):\n query = \"select distinct {c} from {t} {f} order by {c}\".format(t=self.table, col=self.add_lower(column), c=column, f=self.add_where_condition())\n # print(query)\n try:\n return self.db()(query)\n except:\n sys.exit(sys.exc_info())", "title": "" }, { "docid": "50fe7f215f489517796d550f64164d4e", "score": "0.52697426", "text": "def count_total_uniq_insertions(data, columnid):\n return len(data[columnid].unique())", "title": "" }, { "docid": "81e91a376109e8601ab16d56ea0d469a", "score": "0.52637863", "text": "def unique(a):\n return list(set(a))", "title": "" }, { "docid": "8fdb69ab486f601d7579a90a2a26b987", "score": "0.5262445", "text": "def _clean_uid(self, col_nm, idx_vals):\n\n #TODO: list repeats?\n if len(idx_vals) != len(set(idx_vals)):\n self._add_error(message='Duplicate UID values detected', column=col_nm, index='N/A')\n\n col = []\n for v in idx_vals:\n if is_nan(v):\n self._add_error(message='Bad UID value for {}. It is blank and is being skipped.'\n .format(str(v)), column=col_nm, index='Missing')\n else:\n col.append(str(v))\n\n #TODO: list repeats?\n if len(idx_vals) != len(set(idx_vals)):\n self._add_error(message='Duplicate UID values detected', column=col_nm, index='N/A')\n\n return col", "title": "" }, { "docid": "8f5234dad5c280d50d05ed2708cba5ce", "score": "0.52600485", "text": "def listUnique(self, seq):\n seen = {}\n result = []\n for item in seq:\n marker = item\n if marker in seen:\n continue\n seen[marker] = 1\n result.append(item)\n return result", "title": "" }, { "docid": "86d631d57a1f5c29b8adbca82fb70f14", "score": "0.5256373", "text": "def check_unique(dataframe, identifier_list):\n unique_identifier = dataframe.groupby(by = identifier_list).count().iloc[:, 0]\n unique_identifier.name = 'Count'\n unique_identifier = unique_identifier[unique_identifier > 1]\n return unique_identifier.to_frame()", "title": "" }, { "docid": "86374d140b4aa7d1a92180ba54895c4e", "score": "0.5251841", "text": "def get_variations_of(self, column: Union[str, Attribute]) -> List[Any]:\n col = getattr(column, 'col_name', column)\n return list(set(self.data[col]))", "title": "" }, { "docid": "b414b5d4a06799a7fc6817e278102452", "score": "0.5251759", "text": "def feature_group_nunique(df, gcol, acol):\n if isinstance(acol, list) or len(acol) >0:\n df = df.groupby(gcol).agg(dict(map(lambda x:(x,'nunique'), acol)))\n return df", "title": "" }, { "docid": "f36b541d412020a12b1ea7bff2ac1894", "score": "0.52429515", "text": "def check_unique(self):\n lst_nums = list(self.possible_nums)\n for row in self.rows:\n for num in row:\n if num in lst_nums:\n lst_nums.remove(num)\n\n if len(lst_nums) <= 1:\n return True\n else:\n return False", "title": "" }, { "docid": "80c26d63db083b1066871441ec3ec6d5", "score": "0.5236875", "text": "def genUniqueCcaList(self):\n for rows in self.__dict.values():\n for cols in rows:\n if not cols.get(self.__headers[3]) in self.__ccaList:\n self.__ccaList.append(cols.get(self.__headers[3]))\n return self.__ccaList", "title": "" }, { "docid": "bb9e6800d0f8144ba3802fc4a3331c3f", "score": "0.5232816", "text": "def metric_names(self) -> Set[str]:\n return set() if self.df.empty else set(self.df[\"metric_name\"].values)", "title": "" }, { "docid": "b21396d51edd9c929f85f5f8704ed99e", "score": "0.5229084", "text": "def count_by_column(column_to_count, unique_val_column):\n quantities = []\n for value in unique_val_column: \n quantity_value = 0 \n for column_value in column_to_count:\n if (column_value == value):\n quantity_value += 1 \n quantities.append(quantity_value)\n return [unique_val_column, quantities]", "title": "" }, { "docid": "d7cac0ec8103c89cb09edbd1af4d030e", "score": "0.52267", "text": "def names(self):\n return set([str(n) for n in self])", "title": "" }, { "docid": "a5a7ec2a39dc089e076a643dece9685f", "score": "0.52253777", "text": "def unique_tuples(k, enumerated=False):\n\n # Upon initial run, the database may not exist yet and trigger a ProgrammingError when queried.\n try:\n if PushResult.objects.count() == 0:\n unique_values = []\n else:\n unique_values = PushResult.objects.order_by().values_list(k).distinct()\n except ProgrammingError:\n unique_values = []\n\n vs = (('*', '*', ), )\n for i, v in enumerate(sorted(unique_values)):\n if enumerated:\n vs += ((str(i), ) + v, )\n else:\n vs += (v + v, )\n return vs", "title": "" }, { "docid": "8bce8b58e58eb4218910aad0e3129c6e", "score": "0.52245337", "text": "def extractUniquePeptides(self) :\n uniquePep = []\n uniqueHash = set([])\n for g in self.items :\n assert g.peptideHash is not None\n if g.peptideHash not in uniqueHash :\n uniqueHash.add(g.peptideHash)\n uniquePep.append((g.peptideHash, g.peptideSeq))\n return uniquePep", "title": "" }, { "docid": "4c97f93c759beb5e15eccf066d7d3584", "score": "0.52179044", "text": "def unique(seq):\n return list(set(seq))", "title": "" }, { "docid": "29e1ca22659e0584dca8e72cad785bbe", "score": "0.5212877", "text": "def test_unique():\r\n itr = [6, 1, 2, 1, 7, 41.2, '41.2', 1, '41.2']\r\n unique_list = []\r\n for elem in util.unique(itr):\r\n unique_list.append(elem)\r\n assert unique_list == [6, 1, 2, 7, 41.2, '41.2']", "title": "" }, { "docid": "22c5d9f81f73007cf68bf368cc88399b", "score": "0.5201587", "text": "def check_distinct_events(df):\n orig_rows = len(df)\n df_counts = df.groupby(['subject_id', 'hadm_id']).size().reset_index(name='counts')\n unique_counts = len(df_counts)\n if not orig_rows == unique_counts:\n print(\"WARNING: Orig row cnt=%d but unique rows by subject/hadmid cnt=%d\" %\\\n (orig_rows, unique_counts))", "title": "" }, { "docid": "1866a69139c3a6dfafa829ae2265aa92", "score": "0.5201028", "text": "def columns(self):\n return [\n x[0]\n for x in self.m80.db.cursor()\n .execute(\n \"\"\"\n SELECT DISTINCT(key) FROM metadata;\n \"\"\"\n )\n .fetchall()\n ]", "title": "" }, { "docid": "c41ffb4fadc6606637cd0638c5eb7c12", "score": "0.5199095", "text": "def get_distinct_count(self, column):\n query = \"\"\" select count(distinct {c}) from {t} {f} \"\"\".format(c=column, t=self.table, f=self.add_where_condition())\n # print(query)\n try:\n return self.db()(query)\n except:\n sys.exit(sys.exc_info())", "title": "" }, { "docid": "c3a0b9a78803b4c10fbca34a8a36d921", "score": "0.5192491", "text": "def nunique(self, axis=0, dropna=True): # noqa: PR01, RT01, D200\n axis = self._get_axis_number(axis)\n return self._reduce_dimension(\n self._query_compiler.nunique(axis=axis, dropna=dropna)\n )", "title": "" } ]
fdc72e8ee05e22ce74abf4fb861fafb1
Plot the simulation results and save the plots
[ { "docid": "7821ee5577b36c3b849205fa9f7f2495", "score": "0.0", "text": "def plot_results_parameters(matrix_averages_infected, matrix_averages_death, matrix_averages_recovery, n_nodes):\n\n\tfig = plt.figure(figsize = (14, 8))\n\t\n\t######## Infections subplots ########\n\tax1 = fig.add_subplot(131)\n\tax1.set_title('Infections')\n\n\t# plot each simulation\n\tfor i in range(n_parameters_combinations):\n\t\tax1.plot(matrix_averages_infected[i]/n_nodes, color='b', linewidth=1,alpha = 0.8)\n\tax1.plot(data_infections, color='m', label='data')\n\tax1.legend()\n\t# plot lockdowns\n\t#for i in range(len(days_lockdown_start)):\n\t#\tax1.axvline(days_lockdown_start[i], color='r', linewidth=0.5,alpha = 0.5)\n\t#\tax1.axvline(days_lockdown_end[i], color='r', linewidth=0.5,alpha = 0.5)\n\n\t#for i in range(len(day_school_close)):\n\t#\tax1.axvline(day_school_close[i], color='m', linewidth=0.5,alpha = 0.5)\n\t#\tax1.axvline(day_school_open[i], color='m', linewidth=0.5,alpha = 0.5)\n\n\t######## Death subplots ########\n\tax2 = fig.add_subplot(132)\n\tax2.set_title('Deaths')\n\n\t# plot each simulation\n\tfor i in range(n_parameters_combinations):\n\t\tax2.plot(matrix_averages_death[i]/n_nodes, color='r', linewidth=1,alpha = 0.8)\n\tax2.plot(n_nodes*data_death, color='m', label='data')\n\tax2.legend()\n\t\n\t# plot lockdowns\n\t#for i in range(len(days_lockdown_start)):\n\t#\tax2.axvline(days_lockdown_start[i], color='r', linewidth=0.5,alpha = 0.5)\n\t#\tax2.axvline(days_lockdown_end[i], color='r', linewidth=0.5,alpha = 0.5)\n\n\t#for i in range(len(day_school_close)):\n\t#\tax2.axvline(day_school_close[i], color='m', linewidth=0.5,alpha = 0.5)\n\t#\tax2.axvline(day_school_open[i], color='m', linewidth=0.5,alpha = 0.5)\n\n\t######## Infections subplots ########\n\tax3 = fig.add_subplot(133)\n\tax3.set_title('Recoveries')\n\n\t# plot each simulation\n\tfor i in range(n_parameters_combinations):\n\t\tax3.plot(matrix_averages_recovery[i]/n_nodes, color='g', linewidth=1,alpha = 0.8)\n\n\t# plot lockdowns\n\t#for i in range(len(days_lockdown_start)):\n\t#\tax3.axvline(days_lockdown_start[i], color='r', linewidth=0.5,alpha = 0.5)\n\t#\tax3.axvline(days_lockdown_end[i], color='r', linewidth=0.5,alpha = 0.5)\n\n\t#for i in range(len(day_school_close)):\n\t#\tax3.axvline(day_school_close[i], color='m', linewidth=0.5,alpha = 0.5)\n\t#\tax3.axvline(day_school_open[i], color='m', linewidth=0.5,alpha = 0.5)\n\n\tplt.savefig('Results_parameters.pdf')\n\treturn", "title": "" } ]
[ { "docid": "4ff24f98596cd6e1e5d3ff8afc6c8ff7", "score": "0.769103", "text": "def plot_results():\r\n fig_format = 'png'\r\n plt.figure()\r\n plt.plot(return_history)\r\n plt.xlabel('Iteration')\r\n plt.ylabel('Return')\r\n plt.title('Return (Discounted Cumulative Reward) Convergence')\r\n plt.grid()\r\n plt.savefig('return_convergence.%s' % fig_format, format=fig_format)\r\n plt.figure()\r\n plt.imshow(rl_algorithm.q)\r\n plt.xlabel('Action')\r\n plt.ylabel('State')\r\n plt.title('Q (Action-Value) Table')\r\n plt.grid()\r\n plt.colorbar()\r\n plt.savefig('action_value_table.%s' % fig_format, format=fig_format)\r\n policy = compute_greedy_policy_as_table(rl_algorithm.q)\r\n plt.figure()\r\n plt.imshow(policy)\r\n plt.xlabel('Action')\r\n plt.ylabel('State')\r\n plt.title('Greedy Policy Table')\r\n plt.grid()\r\n plt.colorbar()\r\n plt.savefig('greedy_policy_table.%s' % fig_format, format=fig_format)\r\n plt.show()", "title": "" }, { "docid": "f962ed3ccbda7c900ff7187a1c8aacd7", "score": "0.7398579", "text": "def show_result(self):\n\n self.plot_result()\n plt.show()", "title": "" }, { "docid": "eabba57cdf75ce8a3da6d89d73cd5e34", "score": "0.73112035", "text": "def view_results(tau, speed):\n hours = np.arange(24)\n plt.figure(figsize=(20,10))\n plt.subplot(121)\n plt.plot(hours, speed, '-o')\n plt.title('Speed estimations in function of hours of the day')\n plt.subplot(122)\n plt.plot(hours, tau, '-o')\n plt.title('Time of picking up estimations in function of hours of the day')\n plt.savefig('results.png')", "title": "" }, { "docid": "71588d1a662b7209a970178020b932e8", "score": "0.73090416", "text": "def plot_results(results):\n plt.plot(results[0], results[1], label=\"gaussian\")\n plt.plot(results[0], results[2], label=\"thomas\")\n plt.title(\"Computation Time Gaussian vs. Thomas Algorithms\")\n plt.xlabel(\"Coefficient matrix dimension (n x n) [-]\")\n plt.ylabel(\"CPU time [ms]\")\n plt.legend()\n plt.savefig(\"problem2.pdf\")", "title": "" }, { "docid": "5c6f7062fa086b80189133919cdb09cd", "score": "0.722761", "text": "def final_results(self, print_results, plot_results):\n if print_results:\n print('Observed difference:', self.obs_diff)\n print('P-Value:', self.p)\n print('Failed to reject' if self.p > 0.05 else 'Rejected', 'the null hypothesis.')\n \n if plot_results:\n fig = plt.figure(figsize=(9, 3))\n plt.subplot(1, 2, 1)\n self.plot_sampling_dist()\n plt.subplot(1, 2, 2)\n self.plot_null_dist()", "title": "" }, { "docid": "1833915f94d8e55525dd0e487e47c53d", "score": "0.71806043", "text": "def plot_result(results, names):\n for i, r in enumerate(results):\n plt.plot(range(len(r)), r, label=names[i])\n plt.legend()\n plt.title(\"KMNIST\")\n plt.xlabel(\"Epoch\")\n plt.ylabel(\"Test accuracy\")\n plt.grid(True)\n plt.tight_layout()\n plt.show()\n plt.savefig(\"./part_2_plot.png\")", "title": "" }, { "docid": "afbcb7b41325dc645e4d58adea12d565", "score": "0.7144685", "text": "def makePlots( self, save = False) :\n\n # Plots and printouts only on root process.\n if rank != 0:\n return\n\n #### Plot intensity histogram.\n self._makeIntensityHistogram()\n pyplot.draw()\n pyplot.savefig( \"%s_%s%s_run%d_intensity_histogram.pdf\" % (self.__prefix, self.station, self.experiment, self.run))\n\n #### Intensity vs. event index.\n self._makeIntensityVsEventPlot()\n pyplot.draw()\n pyplot.savefig( \"%s_%s%s_run%d_intensity_vs_event.pdf\" % (self.__prefix, self.station, self.experiment, self.run))\n\n ### Angular integrated sum image.\n #self._makeRadialIntensityHistogram()\n #pyplot.savefig( \"%s_%s%s_run%d_radial_intensity.pdf\" % ( self.__prefix, self.station, self.experiment, self.run))\n\n ### Summed image.\n #self._makeSummedImagePlot()\n #pyplot.savefig( \"%s_%s%s_run%d_image_sum.pdf\" % ( self.__prefix, self.station, self.experiment, self.run))\n\n ### Histogram over peak radii.\n self._makeBraggTracePlot()\n pyplot.draw()\n pyplot.savefig( \"%s_%s%s_run%d_bragg_histogram.pdf\" % ( self.__prefix, self.station, self.experiment, self.run))\n\n ### Pulse energy.\n self._makePulseEnergyPlot()\n pyplot.draw()\n pyplot.savefig( \"%s_%s%s_run%d_pulse_energy.pdf\" % ( self.__prefix, self.station, self.experiment, self.run))\n\n ### Weight in ROI vs. event.\n self._makePeaksInROIPlot()\n pyplot.draw()\n pyplot.savefig( \"%s_%s%s_run%d_peaks_in_roi.pdf\" % ( self.__prefix, self.station, self.experiment, self.run))\n\n ### Intensity vs. pulse energy correlation plot.\n self._makeScatterPulseEnergyCorrelationPlot()\n pyplot.draw()\n pyplot.savefig( \"%s_%s%s_run%d_intensity_vs_pulse_energy.pdf\" % ( self.__prefix, self.station, self.experiment, self.run))\n\n ### A/B ratio vs. event no.\n self._makeABRatioPlot()\n pyplot.draw()\n pyplot.savefig( \"%s_%s%s_run%d_ratioAB_vs_event.pdf\" % ( self.__prefix, self.station, self.experiment, self.run))\n\n if self.__interactive:\n pyplot.show()\n raw_input(\"Press key to end this analysis run. (Closes all plot windows)\")", "title": "" }, { "docid": "d8c22b1dd6b80f8118bb7370705999ff", "score": "0.7116905", "text": "def showPlot1():\n data=[]\n data.append(0)\n for i in xrange(1,11):\n data.append(runSimulation(i,1,20,20,.8,100,StandardRobot))\n pylab.plot(data)\n pylab.title('Ticks to Clean a 20x20 room to 80%')\n pylab.xlabel('Robots')\n pylab.ylabel('Ticks')\n pylab.show()", "title": "" }, { "docid": "1575de75785c62764171159252a36dd5", "score": "0.70946985", "text": "def _print_graphs(self, traj):\n print_folder = self._make_folder(traj)\n\n # If we use BRIAN's own raster_plot functionality we\n # need to sue the SpikeMonitor directly\n plt.figure()\n plt.scatter(self.spike_monitor.t, self.spike_monitor.i, s=1)\n plt.xlabel('t')\n plt.ylabel('Exc. Neurons')\n plt.title('Spike Raster Plot')\n\n filename=os.path.join(print_folder,'spike.png')\n\n print('Current plot: %s ' % filename)\n plt.savefig(filename)\n plt.close()\n\n fig=plt.figure()\n self._plot_result(traj, 'monitors.V')\n filename=os.path.join(print_folder,'V.png')\n print('Current plot: %s ' % filename)\n fig.savefig(filename)\n plt.close()\n\n plt.figure()\n self._plot_result(traj, 'monitors.I_syn_e')\n filename=os.path.join(print_folder,'I_syn_e.png')\n print('Current plot: %s ' % filename)\n plt.savefig(filename)\n plt.close()\n\n plt.figure()\n self._plot_result(traj, 'monitors.I_syn_i')\n filename=os.path.join(print_folder,'I_syn_i.png')\n print('Current plot: %s ' % filename)\n plt.savefig(filename)\n plt.close()\n\n if not traj.analysis.show_plots:\n plt.close('all')\n else:\n plt.show()", "title": "" }, { "docid": "6cf1da767f8447c4665ac275db3fcec1", "score": "0.7046957", "text": "def showPlot1(): \n # raise NotImplementedError\n x_axis = list()\n y_axis = list()\n for i in range(1, 11):\n \n x_axis.append(i)\n y_axis.append(runSimulation(i, 1.0, 20, 20, .8, 10, StandardRobot))\n\n plt.plot(x_axis, y_axis, 'bo')\n plt.xlabel('Robot Numbers')\n plt.ylabel('Avg Time to Clean a Room')\n plt.title('Robot Numbers versus Avg Cleaning Time')\n plt.show()", "title": "" }, { "docid": "ed7685dfe92b2171fa396b6a181fbc91", "score": "0.6985788", "text": "def results(self, params):\n self.show_results.plot(self.data_processed, params)", "title": "" }, { "docid": "61eeec2edef7edd3059833bf3226e853", "score": "0.69760585", "text": "def save_results(parameters, results, model):\n for key in parameters:\n plt.figure()\n plt.title(model)\n plt.xlabel(key)\n plt.ylabel(\"accuracy\")\n plt.xticks(parameters[key])\n plt.plot(parameters[key], results[key])\n path = \"experiments/hyperparameter_eval/\"\n plt.savefig(path + \"{}/{}_eval.png\".format(model, key))", "title": "" }, { "docid": "837400717c38006e59fe4b3465f10348", "score": "0.69696933", "text": "def plot(self, fname=None):\n plt.figure()\n xy = sorted(zip(self.x, self.smooth_result))\n x, y = zip(*xy)\n plt.plot(x, y, '-')\n plt.plot(self.x, self.y, '.')\n if fname:\n plt.savefig(fname)\n else:\n plt.show()\n plt.close()", "title": "" }, { "docid": "af9090b7cc1b7fe1a08da2ff4d1fcea8", "score": "0.69569135", "text": "def _plot_results(x_values, y_values, weights, do_logistic, title_string,\n output_file_name):\n\n _, axes_object = pyplot.subplots(\n 1, 1, figsize=(FIGURE_WIDTH_INCHES, FIGURE_HEIGHT_INCHES))\n\n axes_object.plot(\n x_values, y_values, linestyle='None', marker=MARKER_TYPE,\n markerfacecolor=MARKER_COLOUR, markeredgecolor=MARKER_COLOUR,\n markersize=MARKER_SIZE, markeredgewidth=MARKER_EDGE_WIDTH)\n\n if do_logistic:\n x_values_in_line = numpy.linspace(X_MINIMUM, X_MAXIMUM, num=1000)\n else:\n x_values_in_line = numpy.array([X_MINIMUM, X_MAXIMUM])\n\n y_values_in_line = weights[0] + weights[1] * x_values_in_line\n if do_logistic:\n y_values_in_line = _inverse_logit(y_values_in_line)\n\n axes_object.plot(\n x_values_in_line, y_values_in_line, color=LINE_COLOUR,\n linewidth=LINE_WIDTH)\n\n pyplot.xlabel(r'Predictor ($x$)')\n pyplot.ylabel(r'Target ($y$)')\n\n pyplot.title(title_string)\n print 'Saving figure to: \"{0:s}\"...'.format(output_file_name)\n pyplot.savefig(output_file_name, dpi=FIGURE_RESOLUTION_DPI)\n pyplot.close()", "title": "" }, { "docid": "d8c47b6ae473ced68f2c94690322da4c", "score": "0.6878602", "text": "def plot_results(self):\n results = np.asarray(self._results)\n plt.plot(results[:,0],results[:,1],'ro-',linewidth=2,markersize=10)\n plt.xlabel(self.convergeParam)\n plt.ylabel(self.convergeUsing)\n plt.title('Convergence test')\n plt.show()", "title": "" }, { "docid": "60ccecb9b1d96e0ac7fa5c83941dfd99", "score": "0.68585867", "text": "def visualize(self):\n # Figure / axis set up\n fig, ax = plt.subplots()\n\n # We'll plot the list of params and their accuracy\n ax.plot(self.tune_results.keys(), self.tune_results.values())\n\n # Title\n ax.set_title(rf'{self.data_name} Tune Results')\n\n # X axis\n ax.set_xlabel('Step_Size')\n ax.set_xlim(0, .25)\n ax.set_xticks(list(self.tune_results.keys()))\n ax.set_xticklabels(list(self.tune_results.keys()), rotation=45, fontsize=6)\n\n # Y axis\n ax.set_ylabel('Misclassification')\n\n # Saving\n plt.savefig(f'output_{self.data_name}\\\\logistic_{self.data_name}_tune.jpg')", "title": "" }, { "docid": "28dce457588d32fd9d9f0dcc9c25c0e2", "score": "0.685244", "text": "def showPlot3():\n data1=[]\n data1.append(0)\n for i in xrange(1,11):\n data1.append(runSimulation(i,1,20,20,.8,100,StandardRobot))\n pylab.plot(data1)\n data2=[]\n data2.append(0)\n for i in xrange(1,11):\n data2.append(runSimulation(i,1,20,20,.8,100,RandomWalkRobot))\n pylab.plot(data2)\n pylab.title('Ticks to Clean a 20x20 room to 80%')\n pylab.xlabel('Robots')\n pylab.ylabel('Ticks')\n pylab.show()", "title": "" }, { "docid": "7b075779eda477e41654a6a4717641c3", "score": "0.6830455", "text": "def plot_energy(dr, results):\n f = pj(dr, \"energy.png\")\n if not exists(f):\n print(\"making energy plot\")\n az.plot_energy(results)\n plt.savefig(f)\n plt.close()", "title": "" }, { "docid": "05b0424cfdcfc8dc002aa98c360ec055", "score": "0.6829008", "text": "def plotSim():\n subplot(2,2,1)\n xlabel(\"X-Position [m]\")\n ylabel(\"Y-Position [m]\")\n plot(x,y)\n #axes().set_aspect('equal', 'datalim')\n\n subplot(2,2,2)\n plot(T,lf)\n xlabel(\"Time [s]\")\n ylabel(\"Force in wire [N]\")\n\n subplot(2,2,4)\n #plot(x[counterPhase[2]:counterPhase[3]],attAng[counterPhase[2]:counterPhase[3]])\n plot(x,attAng)\n xlabel(\"X-Position [m]\")\n ylabel(\"Angle of attack [deg]\")\n\n subplot(2,2,3)\n plot(x,velocity)\n xlabel(\"X-Position [m]\")\n ylabel(\"Velocity\")\n show()\n \"\"\"\n subplot(2,2,3)\n plot(x,E)\n xlabel(\"X-Position [m]\")\n ylabel(\"Energy [J]\")\n \"\"\"\n show()", "title": "" }, { "docid": "0b71c80aa5a8f9834cff4eb9dee9de89", "score": "0.6820655", "text": "def plot_results(self):\n t = -1 \n\n kd_actual = self.library.seqs['k_d']\n\n d = {'(P*S)/PS = K_d': self.P[t]*self.S[t]/self.PS[t],\n 'S/PS' : self.S[t]/self.PS[t],\n \n 'in/PS' : self.S[0]/self.PS[t],\n 'in_avg/PS' : self.S[0].mean()/self.PS[t],\n \n 'S/in' : self.S[t]/self.S[0],\n 'S/in_avg' : self.S[t]/self.S[0].mean()}\n \n # Print stats and create plot\n print('P=[P], S=[S], PS=[PS] \\n')\n for title, kd_observed in d.items():\n slope, intercept, r_value, p_value, std_err = sp.stats.linregress(kd_observed, self.library.seqs['k_d'])\n print('slope:', slope, ' int:', intercept)\n print('r^2:', r_value**2)\n plt.title(title)\n plt.scatter(kd_actual, kd_observed, s=2.5)\n plt.xlabel('kd_actual')\n plt.ylabel('kd_observed')\n plt.xlim(kd_actual.min(),kd_actual.max())\n plt.ylim(kd_observed.min(),kd_observed.max())\n plt.xscale('log')\n plt.yscale('log')\n plt.show()", "title": "" }, { "docid": "706633c7d0c38db20c686e3ec7b7c3ce", "score": "0.67851466", "text": "def showPlot2():\n data=[]\n data.append(runSimulation(2,1,20,20,.8,100,StandardRobot))\n data.append(runSimulation(2,1,25,16,.8,100,StandardRobot))\n data.append(runSimulation(2,1,40,10,.8,100,StandardRobot))\n data.append(runSimulation(2,1,50,8,.8,100,StandardRobot))\n data.append(runSimulation(2,1,80,5,.8,100,StandardRobot))\n data.append(runSimulation(2,1,100,4,.8,100,StandardRobot))\n pylab.plot(data)\n pylab.title('Ticks to Clean various room sizes to 80%')\n pylab.xlabel('Room Size')\n pylab.ylabel('Ticks')\n pylab.show()", "title": "" }, { "docid": "34d820b5865534bf4fba1d48a12452bd", "score": "0.675555", "text": "def _plot_results(self):\n print(\"\\n... Plotting Results\")\n\n x_plot = np.linspace(0,self._L,self._plot_accuracy)\n u_exact_plot = np.array([ 3.19189120e-16, -2.92473198e-03, -5.75846263e-03,\n -8.45459163e-03, -1.09601761e-02, -1.32210840e-02,\n -1.51861590e-02, -1.68104849e-02, -1.80578393e-02,\n -1.89024177e-02, -1.93299083e-02, -1.93379957e-02,\n -1.89363636e-02, -1.81462695e-02, -1.69997556e-02,\n -1.55385583e-02, -1.38127781e-02, -1.18793637e-02,\n -9.80046304e-03, -7.64169191e-03, -5.47036423e-03,\n -3.35372706e-03, -1.35723904e-03, 4.57072353e-04,\n 2.03214271e-03, 3.31726314e-03, 4.26930821e-03,\n 4.85377755e-03, 5.04563293e-03, 4.82991581e-03,\n 4.20213409e-03, 3.16840983e-03, 1.74538356e-03,\n -4.01261465e-05, -2.15170549e-03, -4.54416503e-03,\n -7.16458683e-03, -9.95358679e-03, -1.28467760e-02,\n -1.57764019e-02, -1.86731464e-02, -2.14680546e-02,\n -2.40945646e-02, -2.64906051e-02, -2.86007248e-02,\n -3.03782126e-02, -3.17871669e-02, -3.28044651e-02,\n -3.34215850e-02, -3.36462234e-02])\n\n # Plot Displacements\n fig_disp = plt.figure()\n \n for i in range(self._num_elements):\n FE, = plt.plot(self.x_plot[i], self.u_plot[i], 'b', lw=1.5, marker='o', markevery=(self._plot_accuracy-1), markerfacecolor='None')\n FE.set_label('FE Solution')\n plt.legend\n plt.plot(x_plot, u_exact_plot, 'k--', lw=1.5, label='Exact Solution')\n plt.legend(loc='best')\n plt.title('Displacement (Polynomial Degree:'+str(self._polynomial_degree)+' Elements:'+str(self._num_elements)+')')\n plt.xlabel('X',fontsize=15)\n fig_disp.canvas.set_window_title('Displacement (Polynomial Degree:'+str(self._polynomial_degree)+' Elements:'+str(self._num_elements)+')')\n plt.grid(True)\n #plt.savefig('Displacement_p'+str(self._polynomial_degree)+'_e'+str(self._num_elements),format='jpeg', dpi=300)\n plt.show()", "title": "" }, { "docid": "f1cb28051049d5c7aae66326b0e646d2", "score": "0.6745518", "text": "def plot_results(self, models):\n encoder, decoder = models\n test_gaussian = operations.get_gaussian_parameters(self.x_test, self.latent_dimension)\n os.makedirs(self.image_directory, exist_ok=True)\n\n filename = \"vae_mean.png\"\n filepath = os.path.join(self.image_directory, filename)\n\n z_gaussian, z_data = encoder.predict([test_gaussian, self.x_test], batch_size=self.batch_size)\n z_mean, z_covariance = operations.split_gaussian_parameters(z_gaussian)\n\n if self.latent_dimension == 2:\n # display a 2D plot of the data classes in the latent space\n plt.figure(figsize=(12, 10))\n plt.scatter(z_mean[:, 0], z_mean[:, 1], c=self.y_test, s=8, alpha=0.3)\n plt.colorbar(ticks=np.linspace(0, 2, 3))\n plt.xlabel(\"z[0]\")\n plt.ylabel(\"z[1]\")\n plt.savefig(filepath, dpi=200)\n if self.show:\n plt.show()\n else:\n # display a 2D t-SNE of the data classes in the latent space\n plt.figure(figsize=(12, 10))\n tsne = LatentSpaceTSNE(z_mean, self.y_test, self.experiment_directory)\n tsne.save_tsne()\n\n if self.latent_dimension == 2:\n if self.is_mnist:\n filename = \"latent.png\"\n filepath = os.path.join(self.image_directory, filename)\n # display a 30x30 2D manifold of digits\n n = 30\n image_size = 28\n figure = np.zeros((image_size * n, image_size * n))\n # linearly spaced coordinates corresponding to the 2D plot\n # of digit classes in the latent space\n grid_x = np.linspace(-4, 4, n)\n grid_y = np.linspace(-4.5, 3.5, n)[::-1]\n\n for i, yi in enumerate(grid_y):\n for j, xi in enumerate(grid_x):\n parameter_tuple = (np.zeros(self.latent_dimension), np.ones(self.latent_dimension))\n dummy_gaussian = np.asarray([np.concatenate(parameter_tuple)])\n z_sample = np.array([[xi, yi]])\n x_decoded = decoder.predict([dummy_gaussian, z_sample])\n digit = x_decoded[1].reshape(image_size, image_size)\n figure[i * image_size: (i + 1) * image_size,\n j * image_size: (j + 1) * image_size] = digit\n\n plt.figure(figsize=(10, 10))\n start_range = image_size // 2\n end_range = (n - 1) * image_size + start_range + 1\n pixel_range = np.arange(start_range, end_range, image_size)\n sample_range_x = np.round(grid_x, 1)\n sample_range_y = np.round(grid_y, 1)\n plt.xticks(pixel_range, sample_range_x)\n plt.yticks(pixel_range, sample_range_y)\n plt.xlabel(\"z[0]\")\n plt.ylabel(\"z[1]\")\n plt.imshow(figure, cmap='Greys_r')\n plt.savefig(filepath)\n if self.show:\n plt.show()\n plt.close('all')\n\n else:\n filename = \"latent.png\"\n filepath = os.path.join(self.image_directory, filename)\n # display a latent representation\n n = 30\n image_size = 224\n figure = np.zeros((image_size * n, image_size * n))\n # linearly spaced coordinates corresponding to the 2D plot\n # of digit classes in the latent space\n grid_x = np.linspace(-4, 4, n)\n grid_y = np.linspace(-4.5, 3.5, n)[::-1]\n\n for i, yi in enumerate(grid_y):\n for j, xi in enumerate(grid_x):\n parameter_tuple = (np.zeros(self.latent_dimension), np.ones(self.latent_dimension))\n dummy_gaussian = np.asarray([np.concatenate(parameter_tuple)])\n z_sample = np.array([[xi, yi]])\n x_decoded = decoder.predict([dummy_gaussian, z_sample])\n digit = x_decoded[1].reshape(image_size, image_size)\n figure[i * image_size: (i + 1) * image_size,\n j * image_size: (j + 1) * image_size] = digit\n\n plt.figure(figsize=(10, 10))\n start_range = image_size // 2\n end_range = (n - 1) * image_size + start_range + 1\n pixel_range = np.arange(start_range, end_range, image_size)\n sample_range_x = np.round(grid_x, 1)\n sample_range_y = np.round(grid_y, 1)\n plt.xticks(pixel_range, sample_range_x)\n plt.yticks(pixel_range, sample_range_y)\n plt.xlabel(\"z[0]\")\n plt.ylabel(\"z[1]\")\n plt.imshow(figure, cmap='Greys_r')\n plt.savefig(filepath)\n if self.show:\n plt.show()\n plt.close('all')", "title": "" }, { "docid": "f0abacaa036832a6e7e33292e9370124", "score": "0.674292", "text": "def showPlot1(): \n\tmean_time = [runSimulation(num_robots,1.0,20,20,.8,100,StandardRobot) for num_robots in range(1,11)]\n\tnum_robots = range(1,11)\n\tpylab.clf()\n\tpylab.plot(num_robots,mean_time)\n\tpylab.ylim(max(min(mean_time)-10,0),max(mean_time)+10)\n\tpylab.title(\"Time taken to clean 80 percent of a 20 by 20 room\")\n\tpylab.xlabel(\"number of robots\")\n\tpylab.ylabel(\"time averaged over 100 trials\")\n\tpylab.savefig('time_versus_number_robots.png', bbox_inches='tight')", "title": "" }, { "docid": "743c81c86947fde15e84f6302f0659d7", "score": "0.67412734", "text": "def plot_spectra(x):\n\n print(x)\n # run dynamics using TDD, gwet phi(t)\n t_list = []\n phi_list = []\n s_list = []\n h_list = []\n \n #Clear Workspace\n cmd_1 = \"python Load_workspace.py --ClearWorkspace True\"\n out_str = subprocess.check_output(cmd_1, shell=True)\n print(out_str)\n count = 0\n \n for xp in x:\n Zw = xp[0:2]\n pd = xp[2:4]\n w1 = xp[4]\n print(xp)\n t, phi = tdd.getPhiPD(Zw, pd, w1, isPlot = False)\n np.savetxt(\"relax.dat\", np.c_[t, phi])\n par = readInput('inpReSpect.dat')\n _, _ = getContSpec(par)\n t_list.append(t),phi_list.append(phi)\n\n ## Source path \n #source = 'C://Users//18503//Dropbox//RA//Code//RA//PatchUp//PatchUp'\n #source_file = os.path.join(source,\"h.dat\")\n ## Destination path \n #destination_file = os.path.join(source,\"TrainData\",\"h{}.dat\".format(count))\n #shutil.move(source_file, destination_file)\n #count+=1\n data = np.genfromtxt('h.dat', delimiter='')\n #print(data)\n s_list.append(data[:,0]),h_list.append(data[:,1])\n time.sleep(0.02)\n \n\n #Plot t vs phi\n \n fig, ax = plt.subplots(1,2,figsize=(10,5))\n \n color = ['b', 'orange', 'g', 'orange']\n linestyle = ['-','-','-','--']\n #markersize = [5,5,5,8]\n for i in range(len(x)):\n ax[0].plot(t_list[i], phi_list[i],label=r'$W_1$'+\" = \" +str((x[i][4])),color = color[i],linestyle = linestyle[i],alpha = 0.8)\n# plt.plot(t_list[1], phi_list[1], linewidth=4,alpha=0.5,label=r'$W_1$'+\" =\" +str(0.5))\n# plt.plot(t_list[2], phi_list[2], linewidth=4,alpha=0.5,label=r'$W_1$'+\" =\" +str(0.9))\n\n ax[0].set_xlabel(r'$t$')\n ax[0].set_ylabel(r'$\\phi(t)$')\n\n ax[0].set_ylim(1e-4, None)\n ax[0].set_xscale('log')\n ax[0].set_yscale('log')\n anchored_text = AnchoredText(\"A\", loc=1)\n ax[0].add_artist(anchored_text)\n ax[0].legend(prop={'size': 12.5})\n ax[0].tick_params(axis='both', which='both', length=5) \n \n for i in range(len(x)):\n ax[1].plot(s_list[i], h_list[i], label=r'$W_1$'+\" = \" +str((x[i][4])),color = color[i],linestyle = linestyle[i],alpha = 0.8)\n# plt.plot(s_list[1], h_list[1], linewidth=4,alpha=0.5,label=r'$W_1$'+\" =\" +str(0.5))\n# plt.plot(s_list[2], h_list[2], linewidth=4,alpha=0.5,label=r'$W_1$'+\" =\" +str(0.9))\n\n ax[1].set_xlabel(r'$s$')\n ax[1].set_ylabel(r'$h$')\n ax[1].set_ylim(1e-4, None)\n ax[1].set_xscale('log')\n anchored_text = AnchoredText(\"B\", loc=1)\n ax[1].add_artist(anchored_text)\n ax[1].legend(prop={'size': 12.5})\n ax[1].tick_params(axis='both', which='both', length=5) \n \n plt.tight_layout(pad=2.0)\n plt.savefig(\"images/spectrum.png\",bbox_inches='tight', pad_inches=0.10)\n plt.show()\n\n return None", "title": "" }, { "docid": "c405cd9fcccc0f73bbfe7ebebb5b7c1f", "score": "0.67404974", "text": "def plot_results(df, exp_name):\n fig, ax = plt.subplots(figsize=[12,8])\n ax.set_title(f'GAN Losses: {exp_name}')\n ax.set_xlabel('Iteration Number')\n ax.set_ylabel('Loss (%)')\n ax.plot(df.iter_num, df.disc_loss_trn)\n ax.plot(df.iter_num, df.disc_loss_tst)\n ax.plot(df.iter_num, df.gen_loss_trn)\n ax.plot(df.iter_num, df.gen_loss_tst)\n ax.legend()\n ax.grid()\n fname = f'{exp_name}.png'\n fig.savefig(fname)", "title": "" }, { "docid": "f1ac4fab939fbb51c7f265e2ee7ffeb2", "score": "0.6732807", "text": "def main():\n # Utilization\n high_utilization = .95\n low_utilization = .50\n \n # Store results from high and low utilization \n sim_residence_times_high_u = []\n sim_residence_times_low_u = []\n \n \n # Store results for each type of customer under each load period\n sim_lp_residence_times_low_u = []\n sim_lp_residence_times_high_u = []\n \n sim_hp_residence_times_low_u = []\n sim_hp_residence_times_high_u = []\n \n # Fraction values for plots\n f_values = []\n \n \n for f in range(5, 95, 5):\n \n f_values.append(f/100.0)\n \n sim_residence_times_high_u = (simulate(high_utilization, f/100.0))\n sim_residence_times_low_u = (simulate(low_utilization, f/100.00))\n \n sim_hp_residence_times_low_u.append(sim_residence_times_low_u[0])\n sim_lp_residence_times_low_u.append(sim_residence_times_low_u[1])\n \n \n sim_hp_residence_times_high_u.append(sim_residence_times_high_u[0])\n sim_lp_residence_times_high_u.append(sim_residence_times_high_u[1])\n \n\n \n # Create new figure \n plt.figure()\n # Set values to be plotted\n plt.plot(f_values, sim_lp_residence_times_high_u, label=\"Low priority\")\n plt.plot(f_values, sim_hp_residence_times_high_u, label=\"High priority\")\n # Set title and axis labels\n plt.title(\"Residence times during high-load periods\")\n plt.ylabel(\"Residence time\")\n plt.xlabel(\"Fraction of customers with FastPass\")\n plt.legend()\n # Save as a PDF \n plt.savefig(\"High_Utilization_plots.pdf\", bbox_inches='tight')\n \n # Create new figure\n plt.figure()\n # Set values to be plotted \n plt.plot(f_values, sim_lp_residence_times_low_u, label=\"Low priority\")\n plt.plot(f_values, sim_hp_residence_times_low_u, label=\"High priority\")\n # Set title and axis labels \n plt.title(\"Residence times during low-load periods\")\n plt.ylabel(\"Residence time\")\n plt.xlabel(\"Fraction of customers with FastPass\")\n plt.legend()\n # Save as a PDF\n plt.savefig(\"Low_Utilization_plots.pdf\", bbox_inches='tight')", "title": "" }, { "docid": "0ccf0ecfa6cd795da78c9fbd01f58f54", "score": "0.6718705", "text": "def showPlot3():\n\tmean_time_standard = [runSimulation(num_robots,1.0,20,20,.8,100,StandardRobot) for num_robots in range(1,11)]\n\tmean_time_random = [runSimulation(num_robots,1.0,20,20,.8,100,RandomWalkRobot) for num_robots in range(1,11)]\n\tnum_robots = range(1,11)\n\tpylab.clf()\n\tpylab.plot(num_robots,mean_time_standard,'-b',label=\"standard\")\n\tpylab.plot(num_robots,mean_time_random,'-r',label=\"random walk\")\n\ty_max = max(max(mean_time_standard),max(mean_time_random))+100\n\tpylab.ylim(0,y_max)\n\tpylab.yticks(range(0,int(y_max)+99,100))\n\tpylab.title(\"Time taken to clean 80 percent of a 20 by 20 room\")\n\tpylab.xlabel(\"number of robots\")\n\tpylab.ylabel(\"time averaged over 100 trials\")\n\tpylab.grid(b=True, which='m',linestyle='--')\n\tpylab.legend(loc=1)\n\tpylab.savefig('two_strategies_compared.png', bbox_inches='tight')", "title": "" }, { "docid": "32f167c79f64e6e2545711bc8481e180", "score": "0.67137873", "text": "def paddy_plot_and_print(self, verbose=None, figure_name=None):\n is_running = False\n if verbose is None:\n print(\"plot and print requires a string argument\\\n or list containing valid string arguments\")\n return\n if isinstance(verbose, bool):\n print('plot and print does not accept bool objects')\n return\n #print(verbose)\n paddy_plot_and_write_comands = {'generation', 'final_results',\n 'pop_fitness', 'gen_fitness',\n 'best_sown', 'average_population',\n 'scatter', 'box', 'box_notched',\n 'box_hidden', 'average_gen'}\n plotter_types = {'best_sown', 'average_population', 'scatter', 'box',\n 'box_notched', 'box_hidden', 'average_gen'}\n if isinstance(verbose, str):\n verbose = {verbose}\n for comand in verbose:\n if paddy_plot_and_write_comands.issuperset({comand}):\n is_running = True\n else:\n print(str(comand)+' is not a valid input for plot and print')\n return\n verbose = set(verbose)\n ###Generation Seed ID's###\n if 'generation' in verbose:\n print(self.generation_data)\n #######Final Results######\n if 'final_results' in verbose:\n clean_parameter_print(self.top_values)\n #########Fitness Array of Population##########\n fitness_list = get_top_fitness(self.top_values)\n if 'pop_fitness' in verbose:\n print(\"List of fitness values:\")\n print(fitness_list)\n ####All Fitness In Gens####\n gf = self.generation_fitness\n if 'gen_fitness' in verbose:\n print(gf)\n ###########################\n info = [fitness_list, gf]\n print_len = len(plotter_types.intersection(verbose))\n if print_len > 0:\n preformance_plotter(info, verbose, figure_name)\n else:\n if is_running:\n pass", "title": "" }, { "docid": "8b2dc29b9725db7644b6ce50b9acbf87", "score": "0.6710874", "text": "def show_final_plots(self):\n # plt.ioff()\n plt.show()\n # input(\"Press [enter] to close the plots.\")\n plt.close()", "title": "" }, { "docid": "46a18bb9f5394b833541d8ffb1327014", "score": "0.6665905", "text": "def main():\n\tplt.clf()\n\taxes = setup_axes()\n\tvisuals.plot_output_4axes(axes,\n\t\t\"../../simulations/sudden_5Gyr_5e9Msun_schmidt\", \"crimson\", \"O\")\n\tvisuals.plot_output_4axes(axes,\n\t\t\"../../simulations/sudden_5Gyr_5e9Msun_ts1p0_schmidt\", \"deepskyblue\",\n\t\t\"O\")\n\tvisuals.plot_output_4axes(axes, \"../../simulations/sudden_5Gyr_5e9Msun\",\n\t\t\"black\", \"O\", second_linestyle = ':')\n\tvisuals.plot_track_points_intervals(axes[2],\n\t\tvice.history(\"../../simulations/sudden_5Gyr_5e9Msun\"))\n\tvisuals.sfr_ifr_legend(axes[0])\n\tvisuals.legend(axes[2], [\"black\", \"crimson\", \"deepskyblue\"],\n\t\t[r\"$\\tau_*\\propto M_\\text{g}^0$ \\qquad$\\tau_\\text{s}$ = 0\",\n\t\tr\"$\\tau_*\\propto M_\\text{g}^{-1/2}$\\quad$\\tau_\\text{s}$ = 0\",\n\t\tr\"$\\tau_*\\propto M_\\text{g}^{-1/2}$\\quad$\\tau_\\text{s}$ = 1 Gyr\"])\n\tplot_ifr(axes[0], \"../../simulations/sudden_5Gyr_5e9Msun_schmidt\",\n\t\t\"crimson\")\n\tplot_ifr(axes[0], \"../../simulations/sudden_5Gyr_5e9Msun_ts1p0_schmidt\",\n\t\t\"deepskyblue\")\n\tplot_ifr(axes[0], \"../../simulations/sudden_5Gyr_5e9Msun\",\n\t\t\"black\")\n\tplt.tight_layout()\n\tvisuals.yticklabel_formatter(axes[3])\n\tplt.savefig(sys.argv[1])\n\tplt.clf()", "title": "" }, { "docid": "482fbc6e439548f541cb2a3500e6668e", "score": "0.66591376", "text": "def plot(self):\n for i, sp in enumerate(self.species_short):\n for j, data_type in enumerate(self.data_types):\n if data_type == 'adult_survival':\n self._plot_adult_survival(i, j, sp, data_type)\n elif data_type == 'recruit_survival':\n self._plot_recruit_survival(data_type, i, j, sp)\n elif data_type == 'recruit_size':\n self._plot_recruit_size(data_type, i, j, sp)\n elif data_type == 'recruit_fv_fm':\n self._plot_recruit_fv_fm(data_type, i, j, sp)\n print('saving svg')\n plt.savefig(os.path.join(self.root_dir, 'figures',\n f\"physiology_and_survival_base_figure_{str(datetime.now()).split('.')[0].replace('-', '').replace(' ', 'T').replace(':', '')}.svg\"),\n dpi=1200)\n print('saving png')\n plt.savefig(os.path.join(self.root_dir, 'figures',\n f\"physiology_and_survival_base_figure_{str(datetime.now()).split('.')[0].replace('-', '').replace(' ', 'T').replace(':', '')}.png\"),\n dpi=1200)", "title": "" }, { "docid": "a10bec336c289053c86cf3acfb35e9c3", "score": "0.6646833", "text": "def plot_comp_periodic():\n\n plt.plot()\n if save == False:\n plt.show()\n else:\n plt.savefig(sfile)", "title": "" }, { "docid": "05a574223f384823ebf5f71e6e946ee9", "score": "0.66437435", "text": "def visual() -> None:\n\n parser = ap.ArgumentParser(description='Plots information about the simulation outcome')\n parser.add_argument('visualization', type=str,\n choices=('queue_size', 'utilization', 'lifecycle', 'gantt',\n 'gantt_no_label', 'core_bubbles', 'mem_bubbles',\n 'mem_bw_overutilization', 'losses', 'rewards', 'action_preferences'),\n help='Statistic to visualise')\n parser.add_argument('-s', '--save', type=str, help='Save the plot in the specified file path')\n args = parser.parse_args()\n\n # Obtain the title for the plots given the agent configuration\n with open('./options.json', 'r') as in_f:\n options = json.load(in_f)\n agent_options = options['pybatsim']['agent']\n env_options = options['pybatsim']['env']\n if agent_options['type'] == 'CLASSIC':\n job_sel, core_sels = tuple(\n env_options[\"actions\"][\"selection\"][0].items()\n )[0]\n title = f'{agent_options[\"type\"]} {job_sel}-{core_sels[0]}'\n elif agent_options['type'] == 'LEARNING':\n title = (f'{agent_options[\"type\"]} {agent_options[\"run\"]} {agent_options[\"hidden\"]} '\n f'{agent_options[\"lr\"]} {agent_options[\"gamma\"]}')\n else:\n raise ValueError('Invalid agent type in \"options.json\"')\n \n # Job visualizations\n if args.visualization in ('queue_size', 'utilization', 'lifecycle', 'gantt', 'gantt_no_label'):\n jobset = ej.JobSet.from_csv('./out_jobs.csv', resource_bounds=(0, options['nb_resources']))\n if args.visualization == 'queue_size':\n _fixed_plot_series(jobset, name='queue', title=f'Queue size over time for {title}',\n legend_label='Queue size')\n plt.xlabel('Simulation time')\n plt.ylabel('Pending jobs')\n elif args.visualization == 'utilization':\n _fixed_plot_series(jobset, name='utilization', title=f'Utilization over time for {title}',\n legend_label='Load')\n plt.xlabel('Simulation time')\n plt.ylabel('Active cores')\n elif args.visualization == 'lifecycle':\n evl.plot_lifecycle(jobset, title=f'Job lifecycle for {title}')\n elif args.visualization == 'gantt':\n evg.plot_gantt(jobset, title=f'Gantt chart for {title}')\n plt.xlabel('Simulation time')\n elif args.visualization == 'gantt_no_label':\n evg.plot_gantt(jobset, title=f'Gantt chart for {title}', labeler=lambda _: '')\n plt.xlabel('Simulation time')\n # Over-utilization visualizations\n elif args.visualization in ('core_bubbles', 'mem_bubbles', 'mem_bw_overutilization'):\n with open('overutilizations.json', 'r') as in_f:\n overutilizations = json.load(in_f)\n with open('out_schedule.csv', 'r') as in_f:\n _, values = [row for row in csv.reader(in_f, delimiter=',')]\n makespan = float(values[2])\n _, ax = plt.subplots()\n ax.set_xlim(0, makespan)\n ax.set_xlabel('Simulation time')\n ax.grid(True)\n if args.visualization == 'core_bubbles':\n ax.set_title(f'Core bubbles for {title}')\n ax.set_ylim(0, len(overutilizations['core']))\n ax.plot(overutilizations['core'], range(len(overutilizations['core'])))\n elif args.visualization == 'mem_bubbles':\n ax.set_title(f'Memory bubbles for {title}')\n ax.set_ylim(0, len(overutilizations['mem']))\n ax.plot(overutilizations['mem'], range(len(overutilizations['mem'])))\n elif args.visualization == 'mem_bw_overutilization':\n ax.set_title(f'Mem BW overutilization spans for {title}')\n ax.set_ylim(0, overutilizations['mem_bw']['nb_procs'])\n ax.set_ylabel('Processors')\n for proc_id in overutilizations['mem_bw']['procs']:\n ax.broken_barh(overutilizations['mem_bw']['procs'][proc_id]['values'],\n (int(proc_id), int(proc_id)))\n # Learning visualizations\n elif args.visualization in ('losses', 'rewards', 'action_preferences'):\n _, ax = plt.subplots()\n ax.set_xlabel('Episodes')\n ax.grid(True)\n if args.visualization == 'losses':\n with open('losses.log', 'r') as in_f:\n losses = in_f.readlines()\n ax.set_title(f'Loss evolution for {title}')\n ax.set_ylabel('Loss value')\n ax.plot(tuple(range(len(losses))), tuple(map(float, losses)), 'r')\n elif args.visualization == 'rewards':\n with open('rewards.log', 'r') as in_f:\n rewards = in_f.readlines()\n ax.set_title(f'Reward evolution for {title}')\n ax.set_ylabel('Reward')\n ax.plot(tuple(range(len(rewards))), tuple(map(float, rewards)))\n elif args.visualization == 'action_preferences':\n ax.set_title(f'Action preferences for {title}')\n ax.set_ylabel('Probability')\n action_names = []\n for sel in env_options['actions']['selection']:\n for job_sel, core_sels in sel.items():\n for core_sel in core_sels:\n action_names.append(f'{job_sel}-{core_sel}')\n with open('probs.json', 'r') as in_f:\n all_probs = json.load(in_f)['probs']\n for action_name, probs in zip(action_names, all_probs):\n ax.plot(tuple(range(len(probs))), tuple(map(float, probs)), label=action_name)\n ax.legend()\n\n ax.set_ylim(bottom=0)\n if args.save:\n plt.savefig(args.save, format='svg', dpi=1200)\n plt.show()", "title": "" }, { "docid": "c4a5a58f70ba836986f98c509b2f8109", "score": "0.663999", "text": "def test_plot():\n save_dir = pathlib.Path(__file__).parent\n save_dir = save_dir / 'data' / 'save' / 'result_images' / 'frf_plot'\n frf = pymodal.FRF(frf=array_3d, resolution=0.5)\n\n frf[0].plot()\n plt.close()\n \n frf[0].plot()\n file_path = save_dir / 'one_frf.png'\n plt.savefig(file_path)\n plt.close()\n assert file_path.is_file()\n\n frf[0:2].real().plot()\n file_path = save_dir / 'two_frf.png'\n plt.savefig(file_path)\n plt.close()\n assert file_path.is_file()\n\n frf[0:3].imag().plot()\n file_path = save_dir / 'three_frf.png'\n plt.savefig(file_path)\n plt.close()\n assert file_path.is_file()\n\n __, ax = plt.subplots(2, 1, figsize=(10,10))\n plt.title = 'Frequency Response Function'\n frf[0].abs().plot(ax=ax[0], title='Magnitude')\n frf[0].phase().plot(ax=ax[1], title='Phase')\n plt.tight_layout()\n file_path = save_dir / 'frf_mag_phase.png'\n plt.savefig(file_path)\n plt.close()\n assert file_path.is_file()", "title": "" }, { "docid": "b7a51957788f7d1109880453229eeafb", "score": "0.6636711", "text": "def showPlot3():\n # raise NotImplementedError\n x_0 = [5, 10, 15, 20, 25, 30]\n y_s = list()\n y_r = list()\n x = list()\n for i in x_0:\n x.append(i ** 2)\n y_s.append(runSimulation(1, 1.0, i, i, .9, 1, StandardRobot))\n y_r.append(runSimulation(1, 1.0, i, i, .9, 1, RandomWalkRobot))\n plt.plot(x, y_s, 'r^', x, y_r, 'bs')\n\n plt.xlabel('Room Area')\n plt.ylabel('Avg Time to Clean a Room')\n plt.title('different algorithm')\n \n plt.show()\n plt.show()", "title": "" }, { "docid": "244397cd725241b14ee640c395be01a2", "score": "0.6635522", "text": "def _plot_result(self, traj, result_name):\n result = traj.f_get(result_name)\n varname = result.record_variables[0]\n values = result[varname]\n times = result.t\n\n record = result.record\n\n for idx, celia_neuron in enumerate(record):\n plt.subplot(len(record), 1, idx+1)\n plt.plot(times, values[idx,:])\n if idx==0:\n plt.title('%s' % varname)\n if idx==1:\n plt.ylabel('%s' % ( varname))\n if idx == len(record)-1:\n plt.xlabel('t')", "title": "" }, { "docid": "afa03fe1ca1425cd500b71a756b51960", "score": "0.66331804", "text": "def results(self, tmin=None, tmax=None, figsize=(10, 8), **kwargs):\n # Number of rows to make the figure with\n rows = 3 + len(self.ml.stressmodels)\n fig = plt.figure(figsize=figsize, **kwargs)\n # Main frame\n ax1 = plt.subplot2grid((rows, 3), (0, 0), colspan=2, rowspan=2)\n o = self.ml.observations()\n o_nu = self.ml.oseries.series.drop(o.index)\n if not o_nu.empty:\n # plot parts of the oseries that are not used in grey\n o_nu.plot(ax=ax1, linestyle='', marker='.', color='0.5', label='',\n x_compat=True)\n o.plot(ax=ax1, linestyle='', marker='.', color='k', x_compat=True)\n sim = self.ml.simulate(tmin=tmin, tmax=tmax)\n sim.plot(ax=ax1, x_compat=True)\n ax1.legend(loc=(0, 1), ncol=3, frameon=False)\n ax1.set_ylim(min(o.min(), sim.loc[tmin:tmax].min()),\n max(o.max(), sim.loc[tmin:tmax].max()))\n ax1.minorticks_off()\n\n # Residuals and noise\n ax2 = plt.subplot2grid((rows, 3), (2, 0), colspan=2, sharex=ax1)\n res = self.ml.residuals(tmin=tmin, tmax=tmax)\n res.plot(ax=ax2, sharex=ax1, color='k', x_compat=True)\n if self.ml.settings[\"noise\"] and self.ml.noisemodel:\n noise = self.ml.noise(tmin=tmin, tmax=tmax)\n noise.plot(ax=ax2, sharex=ax1, x_compat=True)\n ax2.axhline(0.0, color='k', linestyle='--', zorder=0)\n ax2.legend(loc=(0, 1), ncol=3, frameon=False)\n ax2.minorticks_off()\n\n # Stats frame\n ax3 = plt.subplot2grid((rows, 3), (0, 2), rowspan=3)\n ax3.set_title('Model Information', loc='left')\n\n # Add a row for each stressmodel\n for i, sm in enumerate(self.ml.stressmodels.keys(), start=3):\n ax = plt.subplot2grid((rows, 3), (i, 0), colspan=2, sharex=ax1)\n contrib = self.ml.get_contribution(sm, tmin=tmin, tmax=tmax)\n contrib.plot(ax=ax, sharex=ax1, x_compat=True)\n title = [stress.name for stress in self.ml.stressmodels[sm].stress]\n plt.title(\"Stresses:%s\" % title, loc=\"right\")\n ax.legend(loc=(0, 1), ncol=3, frameon=False)\n if i == 3:\n sharex = None\n else:\n sharex = axb\n axb = plt.subplot2grid((rows, 3), (i, 2), sharex=sharex)\n self.ml.get_step_response(sm).plot(ax=axb)\n ax.minorticks_off()\n\n ax1.set_xlim(tmin, tmax)\n\n plt.tight_layout(pad=0.0)\n\n # Draw parameters table\n parameters = self.ml.parameters.copy()\n parameters['name'] = parameters.index\n cols = [\"name\", \"optimal\", \"stderr\"]\n parameters = parameters.loc[:, cols]\n for name, vals in parameters.loc[:, cols].iterrows():\n parameters.loc[name, \"optimal\"] = '{:.2f}'.format(vals.optimal)\n stderr_perc = np.abs(np.divide(vals.stderr, vals.optimal) * 100)\n parameters.loc[name, \"stderr\"] = '{:.1f}{}'.format(stderr_perc,\n \"\\u0025\")\n ax3.axis('off')\n # loc='upper center'\n ax3.table(bbox=(0., 0., 1.0, 1.0), cellText=parameters.values,\n colWidths=[0.5, 0.25, 0.25], colLabels=cols)\n\n return fig.axes", "title": "" }, { "docid": "82ea7fafbe2bcc3fb42dc3ba8668f031", "score": "0.6632697", "text": "def save_plot(self, i):\r\n #plt.figure(i)\r\n title = 'Number of Evals Completed: ' + str(i)\r\n front = self.memory_archive.contents\r\n fvals = [rec.fx for rec in front]\r\n fvals = np.asarray(fvals)\r\n maxgen = (self.maxeval - self.numinit)/(self.nsamples*self.ncenters)\r\n curgen = (self.numeval - self.numinit)/(self.nsamples*self.ncenters) + 1\r\n if self.data.pf is not None:\r\n plt.plot(self.data.pf[:,0], self.data.pf[:,1], 'g')\r\n all_fvals = [rec.fx for rec in self.evals]\r\n all_fvals = np.asarray(all_fvals)\r\n plt.plot(all_fvals[:,0], all_fvals[:,1], 'k+')\r\n if fvals.ndim > 1:\r\n plt.plot(fvals[:,0], fvals[:,1], 'b*')\r\n plt.title(title)\r\n plt.draw()\r\n plt.savefig('Final')\r\n plt.clf()\r\n\r\n all_xvals = [rec.x for rec in self.evals]\r\n all_xvals = np.asarray(all_xvals)\r\n npts = all_xvals.shape[0]\r\n X = np.zeros((npts, self.data.dim + self.data.nobj))\r\n X[:, 0:self.data.dim] = all_xvals\r\n X[:, self.data.dim:self.data.dim + self.data.nobj] = all_fvals\r\n np.savetxt('final.txt', X)\r\n\r\n if self.data.pf is not None:\r\n plt.plot(self.data.pf[:,0], self.data.pf[:,1], 'g')\r\n if fvals.ndim > 1:\r\n plt.plot(fvals[:,0], fvals[:,1], 'b*')\r\n plt.title(title)\r\n plt.draw()\r\n plt.savefig('Final_front')\r\n plt.clf()", "title": "" }, { "docid": "f327c094fad11f872d27e22eccd41b37", "score": "0.66295326", "text": "def plotResults(self,**kwargs):\n for fittingProblem in self.fittingProblemList:\n fittingProblem.plotResults(**kwargs)", "title": "" }, { "docid": "59ecb400534d318c572ea569e4b333e2", "score": "0.6625662", "text": "def plot_test_predictions(self, display_figs=True, save_figs=False,\n output_folder=\"images\", output_format=\"png\"):\n # Load test data and make prediction\n x = self.test_set_x.get_value(borrow=True)\n y = self.test_set_y.get_value(borrow=True)\n prediction = self.output(x)\n\n # Plot each chunk with its prediction\n for i, chunk in enumerate(zip(x, y, prediction)):\n # Create a figure and add a subplot with labels\n fig = plt.figure(i)\n graph = fig.add_subplot(111)\n fig.suptitle(\"Chunk Data\", fontsize=25)\n plt.xlabel(\"Month\", fontsize=15)\n plt.ylabel(\"Production\", fontsize=15)\n\n # Make and display error label\n mean_cost = abs_error_cost(chunk[1], chunk[2]).eval()\n plt.title(\"Mean Cost: %f\" % mean_cost, fontsize=10)\n\n # Plot the predictions as a blue line with round markers\n prediction = np.append(chunk[0], chunk[2])\n graph.plot(prediction, \"b-o\", label=\"Prediction\")\n\n # Plot the future as a green line with round markers\n future = np.append(chunk[0], chunk[1])\n graph.plot(future, \"g-o\", label=\"Future\")\n\n # Plot the past as a red line with round markers\n past = chunk[0]\n graph.plot(past, \"r-o\", label=\"Past\")\n\n # Add legend\n plt.legend(loc=\"upper left\")\n\n # Save the graphs to a folder\n if save_figs:\n filename = \"%s/%04d.%s\" % (output_folder, i, output_format)\n fig.savefig(filename, format=output_format)\n\n # Display the graph\n if display_figs:\n plt.show()\n \n # Clear the graph\n plt.close(fig)", "title": "" }, { "docid": "9794c8e5f76ed4cf49552b9f65c274b0", "score": "0.659709", "text": "def main():\n print(\"***\")\n print(str(config_obj.sim.num_stockpiles))\n print(str(config_obj.sim.data_file_path))\n\n\n res = ga.ea_runner()\n utils.plot_sim_dest_individual(res[2][0])", "title": "" }, { "docid": "e0d6b7d52148261c4d28063175629331", "score": "0.65933394", "text": "def plot(self):\n self._prep_dir()\n self._write_links_file()\n self._write_genes_file()\n self._write_data_files()\n if not self._config_supplied:\n self._write_circos_config()\n self._execute_circos()", "title": "" }, { "docid": "c30bd2e78c5a9964addb69794106247f", "score": "0.6592874", "text": "def plots():\n pass", "title": "" }, { "docid": "43fd498c8ae246d12ee78f59560f91d1", "score": "0.65826434", "text": "def main():\n\n # Get position list\n while True:\n try:\n positions = list_input(raw_input('Please specify the list of positions to simulate (separated by commas): '), int)\n break\n except (InputError, ConversionError) as e:\n print e\n continue\n\n # Get number of trials input\n while True:\n try:\n ntrials = well_input(raw_input('Please specify number of trials of the simulation: '), int)\n break\n except (InputError, ConversionError) as e:\n print e\n continue\n \n invest_instrument = Invest(1000) # Create an instance with total asset 1000\n results = ['position mean std'] # List to store simulation results\n \n for position in positions:\n print 'Simulating with position = {} ...'.format(position)\n daily_ret = invest_instrument.simulate(ntrials, position)\n results.append(' '.join(map(str, [position, np.mean(daily_ret), np.std(daily_ret)])))\n \n p = plt.figure()\n # Uncomment if you like xkcd style\n # plt.xkcd()\n plt.hist(daily_ret, 100, range=[-1,1], color='grey')\n plt.title('Histogram of Daily Return with position {}'.format(position))\n plt.xlabel('daily return')\n p.savefig('histogram_{}_pos.pdf'.format(str(position).zfill(4)))\n\n print 'Saving results ...'\n with open('results.txt', 'w') as f:\n f.write('\\n'.join(results))", "title": "" }, { "docid": "300c19fc2fb0182f5c78824437a11681", "score": "0.65787107", "text": "def _plotResults(self):\r\n # plot only if test is started manually\r\n if __name__ != '__main__':\r\n return\r\n # create empty stream\r\n st = Stream()\r\n st.label = self._testMethodName\r\n # original trace\r\n self.orig_trace.label = \"Original Trace\"\r\n st += self.orig_trace\r\n # use header information of original trace with filtered trace data\r\n tr = self.orig_trace.copy()\r\n tr.data = self.filt_trace_data\r\n tr.label = \"Filtered original Trace\"\r\n st += tr\r\n # real processed chunks\r\n for i, tr in enumerate(self.rt_appended_traces):\r\n tr.label = \"RT Chunk %02d\" % (i + 1)\r\n st += tr\r\n # real time processed trace\r\n self.rt_trace.label = \"RT Trace\"\r\n st += self.rt_trace\r\n st.plot(automerge=False, color='blue', equal_scale=False)", "title": "" }, { "docid": "dabba3516f3638b88eb93dc959310a9a", "score": "0.65652746", "text": "def plot(self, tmin=None, tmax=None, oseries=True, simulation=True,\n **kwargs):\n fig = self._get_figure(**kwargs)\n fig.suptitle(\"Results of \" + self.ml.name)\n\n if oseries:\n o = self.ml.observations(tmin=tmin, tmax=tmax)\n o_nu = self.ml.oseries.drop(o.index)\n if not o_nu.empty:\n # plot parts of the oseries that are not used in grey\n o_nu.plot(linestyle='', marker='.', color='0.5', fig=fig)\n o.plot(linestyle='', marker='.', color='k', fig=fig)\n\n if simulation:\n h = self.ml.simulate(tmin=tmin, tmax=tmax)\n h.plot(fig=fig)\n plt.xlim(tmin, tmax)\n plt.ylabel(\"Groundwater levels [meter]\")\n plt.legend()\n\n return fig.axes", "title": "" }, { "docid": "c9343615664986b770bdc1df148c45f7", "score": "0.65646", "text": "def plot_results(results, epochs, out_dir):\n _, (ax1, ax2) = plt.subplots(1, 2)\n\n ax1.set_xlabel(\"Epochs\")\n ax1.set_ylabel(\"Losses\")\n ax1.plot(\n range(1, epochs+1),\n results.history['val_loss'],\n label=\"Validation loss\",\n marker='o')\n ax1.plot(\n range(1, epochs+1),\n results.history['loss'],\n label=\"loss\",\n marker='o')\n ax1.legend()\n\n ax2.set_xlabel(\"Epochs\")\n ax2.set_ylabel(\"Accuracies\")\n ax2.plot(\n range(1, epochs+1),\n [accuracy * 100 for accuracy in results.history['accuracy']],\n label=\"Accuracy\",\n marker='o')\n ax2.plot(\n range(1, epochs+1),\n [accuracy * 100 for accuracy in results.history['val_accuracy']],\n label=\"validation accuracy\",\n marker='o')\n ax2.legend()\n\n plt.tight_layout()\n plt.savefig(\n \"%s/%s/result.png\" % (\n out_dir,\n datetime.datetime.now().date().strftime(\"%Y_%m_%d\")\n ),\n format=\"png\",\n papertype=\"letter\",\n pad_inches=0.5,\n dpi=1000\n )\n plt.show()", "title": "" }, { "docid": "31fc718ff1159a70e66de7620eecee28", "score": "0.6562226", "text": "def plot(self, result):\n prstd, iv_l, iv_u = wls_prediction_std(result)\n\n fig, ax = plt.subplots(figsize=(8,6))\n\n ax.plot(self.x, self.y, 'o', label=\"data\")\n ax.plot(self.x, self.y_true, 'b-', label=\"True\")\n ax.plot(self.x, self.res_ols.fittedvalues, 'r--.', label=\"OLS\")\n ax.plot(self.x, iv_u, 'r--')\n ax.plot(self.x, iv_l, 'r--')\n ax.legend(loc='best')\n plt.show()", "title": "" }, { "docid": "3ef999cb54a8c5824eb5d30dd37ec2b7", "score": "0.6561482", "text": "def plot_and_save_UF_analysis(self, time_reqd_arr, save_data = False):\n plt.rcParams[\"font.family\"] = \"serif\"\n fig = plt.figure(figsize=(16,10))\n \n \n plt.hist(time_reqd_arr)\n plt.title('Dependence of execution time on $U_f$ (Simon\\'s algorithm)', fontsize=25)\n plt.xlabel('Execution time (s)',fontsize=20)\n plt.ylabel('Frequency of occurence',fontsize=20)\n plt.xticks(fontsize=15)\n plt.yticks(fontsize=15)\n \n if save_data:\n fig.savefig('Figures/Simon_hist.png', bbox_inches='tight')", "title": "" }, { "docid": "080b57f5d008a636caa1a395d4945018", "score": "0.6557627", "text": "def plot(self, results_to_plot: pd.DataFrame, values_of_psi: List[int]) -> None:\n fig, ax = plt.subplots(figsize=(13, 13))\n textstr = '\\t\\t'.join((\n r'$\\alpha=%.2e$' % (self.layers[0][0].learning_rate,),\n r'$\\lambda=%.2e$' % (self.layers[0][0].weight_decay,),\n r'$\\nu=%.2e$' % (self.layers[0][0].momentum,),\n r'$\\kappa=%s$' % (str(self.features_in_dataset),),\n r'$\\eta=%s$' % (self.n_neurons_str,)))\n props = dict(boxstyle='round', facecolor='wheat', alpha=0.1)\n columns = ['epoch', 'f-score NIT', 'f-score SXT']\n plain_results = results_to_plot[columns]\n plain_results = plain_results.melt('epoch', var_name='cols', value_name='%')\n sns.lineplot(data=plain_results, x='epoch', y='%', hue='cols', style='cols')\n plt.legend(loc='upper center', bbox_to_anchor=(0.5, -0.05), fancybox=True, fontsize=14, ncol=2)\n plt.text(0.01, 1.04, textstr, fontsize=14, transform=ax.transAxes, verticalalignment='top', bbox=props)\n plt.xlabel('epoch', fontsize=14)\n plt.ylabel('%', fontsize=14)\n plt.savefig(self.train_path+'/fscore_results.pdf')\n plt.close(fig)\n plt.close()\n fig, ax = plt.subplots(figsize=(13, 13))\n global_results = results_to_plot.melt('epoch', var_name='cols', value_name='%')\n sns.lineplot(data=global_results, x='epoch', y='%', hue='cols', style='cols')\n plt.legend(loc='upper center', bbox_to_anchor=(0.5, -0.05), fancybox=True, fontsize=14, ncol=4)\n plt.text(0.01, 1.04, textstr, fontsize=14, transform=ax.transAxes, verticalalignment='top', bbox=props)\n plt.xlabel('epoch', fontsize=14)\n plt.ylabel('%', fontsize=14)\n plt.savefig(self.train_path+'/global_results.pdf')\n plt.close(fig)\n plt.close()\n fig, ax = plt.subplots(figsize=(13, 13))\n error_results = results_to_plot[results_to_plot.columns[~results_to_plot.columns.\n isin(['f-score NIT', 'f-score SXT'])\n ]].melt('epoch', var_name='cols', value_name='%')\n sns.lineplot(data=error_results, x='epoch', y='%', hue='cols', style='cols')\n plt.legend(loc='upper center', bbox_to_anchor=(0.5, -0.05), fancybox=True, fontsize=14, ncol=3)\n plt.text(0.01, 1.04, textstr, fontsize=14, transform=ax.transAxes, verticalalignment='top', bbox=props)\n plt.xlabel('epoch', fontsize=14)\n plt.ylabel('%', fontsize=14)\n plt.savefig(self.train_path+'/error_results.pdf')\n plt.close(fig)\n plt.close()\n columns = ['epoch', 'f-score NIT', 'f-score SXT']\n for psi in values_of_psi:\n psi_str = r'$\\psi=$' + str(psi)\n columns_with_psi = columns[:]\n for i, label in enumerate(self.class_labels):\n columns_with_psi += ['error ' + label + ' (' + psi_str + ')']\n fig, ax = plt.subplots(figsize=(13, 13))\n psi_error_results = results_to_plot[columns_with_psi].melt('epoch', var_name='cols', value_name='%')\n sns.lineplot(data=psi_error_results, x='epoch', y='%', hue='cols', style='cols')\n plt.legend(loc='upper center', bbox_to_anchor=(0.5, -0.05), fancybox=True, fontsize=14, ncol=2)\n plt.text(0.01, 1.04, textstr, fontsize=14, transform=ax.transAxes, verticalalignment='top', bbox=props)\n plt.xlabel('epoch', fontsize=14)\n plt.ylabel('%', fontsize=14)\n plt.savefig(self.train_path + '/' + str(psi) + '_psi_error_results_with_fscore.pdf')\n plt.close(fig)\n plt.close()\n columns = ['epoch']\n for psi in values_of_psi:\n psi_str = r'$\\psi=$' + str(psi)\n columns_with_psi = columns[:]\n for i, label in enumerate(self.class_labels):\n columns_with_psi += ['error ' + label + ' (' + psi_str + ')']\n fig, ax = plt.subplots(figsize=(13, 13))\n psi_error_results = results_to_plot[columns_with_psi].melt('epoch', var_name='cols', value_name='%')\n sns.lineplot(data=psi_error_results, x='epoch', y='%', hue='cols', style='cols')\n plt.legend(loc='upper center', bbox_to_anchor=(0.5, -0.05), fancybox=True, fontsize=14, ncol=2)\n plt.text(0.01, 1.04, textstr, fontsize=14, transform=ax.transAxes, verticalalignment='top', bbox=props)\n plt.xlabel('epoch', fontsize=14)\n plt.ylabel('%', fontsize=14)\n plt.savefig(self.train_path + '/' + str(psi) + '_psi_error_results.pdf')\n plt.close(fig)\n plt.close()", "title": "" }, { "docid": "7d0d2c761f0e9333a60d3485c0493043", "score": "0.6551567", "text": "def script_run_plot(file_name: str, period: float):\n # Set working dir to src\n SH.set_working_directory()\n\n # Get y data\n y_full_data = get_data_from_all_simulations_output(file_name, Const.Y_OPTION, period)\n y_data = SH.get_average_from_list_of_lists(y_full_data)\n\n # Get x data\n x_data = [x for x in range(SH.get_size_of_smallest_list(y_full_data))]\n\n # Get max and min vectors\n max_min_dictionary = SH.get_max_min_vectors_from_list_of_lists(y_full_data)\n maximums_vector = max_min_dictionary['maximum']\n minimums_vector = max_min_dictionary['minimum']\n\n # Plot data\n SH.plot_data(x_data, y_data, Const.X_OPTION, Const.Y_OPTION, file_name, period,\n Const.FILL_MAX_MIN, maximums_vector, minimums_vector)", "title": "" }, { "docid": "e387034083578f34043883963411ea68", "score": "0.6550589", "text": "def plot_output_sequence(values, filename):\n plt.plot(flatten(values))\n plt.ylabel('Output')\n plt.xlabel('Position in Sequence')\n plt.savefig(filename)\n plt.clf()", "title": "" }, { "docid": "fb5c0e4581f4ce88a3a50f0df40c786c", "score": "0.65481037", "text": "def plot_results(results, original_df, model_name):\n fig, ax = plt.subplots(figsize=(15, 5))\n sns.lineplot(original_df.date, original_df.sales, data=original_df, ax=ax,\n label='Original', color='mediumblue')\n sns.lineplot(results.date, results.pred_value, data=results, ax=ax,\n label='Predicted', color='red')\n ax.set(xlabel=\"Date\",\n ylabel=\"Sales\",\n title=f\"{model_name} Sales Forecasting Prediction\")\n ax.legend()\n sns.despine()\n\n plt.savefig(f'../model_output/{model_name}_forecast.png')", "title": "" }, { "docid": "aae9a8afb1785e4111e23834cb4fbee0", "score": "0.6518924", "text": "def test_response_plot(self):\r\n sta = read_inventory()[0][0]\r\n with warnings.catch_warnings(record=True):\r\n warnings.simplefilter(\"ignore\")\r\n with ImageComparison(self.image_dir, \"station_response.png\") as ic:\r\n rcParams['savefig.dpi'] = 72\r\n sta.plot(0.05, channel=\"*[NE]\", outfile=ic.name)", "title": "" }, { "docid": "b535b2544ece555c15c4cf9582a2f95b", "score": "0.6500606", "text": "def plot_results(self, syn_size):\n fig = plt.figure(figsize=(15,10))\n for i in range(4):\n plt.subplot(2,2, i+1)\n if i==0:\n rr = range(0,len(self.accumulated_gloss), 3)\n short_g_loss = [self.accumulated_gloss[i] for i in rr]\n short_d_loss = [self.accumulated_dloss[i] for i in rr]\n plt.plot(rr, short_g_loss, label=\"Generator\", color=\"#FFB248\")\n plt.plot(rr, short_d_loss, label=\"Discriminator\", color=\"#5F84B4\")\n plt.xlabel(\"Epochs\", fontsize=13)\n plt.ylabel(\"Loss\", fontsize=13)\n plt.grid(axis=\"y\")\n plt.legend();\n elif i==1:\n plt.title(\"Divergencia de Kullback-Leibler\", fontsize=14)\n plt.plot(range(len(self.kl_d)), self.kl_d, linewidth=.5)\n plt.plot(range(len(self.kl_d)), np.zeros(len(self.kl_d)))\n plt.grid()\n plt.xlabel(\"Epochs\");\n plt.ylabel(\"Divergencia de Kullback-Leibler\");\n elif i==2:\n short_precision = [self.precision[v] for v in rr]\n plt.plot(rr, short_precision, linewidth=.8, label=\"precision\")\n plt.grid(axis=\"y\")\n plt.plot(rr, np.ones(len(short_precision))*.5,\n label=\"target\", linestyle=\"--\")\n plt.legend();\n elif i==3:\n pca = PCA(n_components=2)\n X_real_pca = pca.fit_transform(self.X_train)\n noise = tf.random.normal([syn_size, self.noise_input])\n synthetics = self.G(noise)\n X_fake_pca = pca.transform(synthetics.numpy())\n plt.scatter(X_fake_pca[:,0], X_fake_pca[:,1], label=\"synthetic\", alpha=.4, color=\"orange\",\n edgecolors=\"red\");\n plt.scatter(X_real_pca[:,0], X_real_pca[:,1], label=\"real\", marker=\"*\", s=80, color=\"green\",\n edgecolors=\"black\");\n return fig", "title": "" }, { "docid": "03f9ba944c66006f8acfdf70547146ad", "score": "0.6492778", "text": "def plot_results(results, original_data, model_name):\r\n fig, ax = plt.subplots(figsize=(15,5))\r\n sns.lineplot(original_data.date, original_data.sales, data=original_data, ax=ax, \r\n label='Original', color='mediumblue')\r\n sns.lineplot(results.date, results.predict_value, data=results, ax=ax, \r\n label='Predicted', color='Red')\r\n \r\n ax.set(xlabel = \"Date\",\r\n ylabel = \"Sales\",\r\n title = f\"{model_name} Sales Forecasting Prediction\")\r\n \r\n ax.legend()\r\n \r\n sns.despine()\r\n \r\n plt.savefig(f'model_output/{model_name}_forecast.png')", "title": "" }, { "docid": "4ced9e17c405b6897d2d0525f7d604d7", "score": "0.64885837", "text": "def plot_result(self):\n\n to_plot = self.results\n if self.forced_inputs:\n to_plot += self.forced_inputs\n\n BooleanTimeSeries.plot_many(to_plot)\n plt.legend()\n plt.xlabel(\"time\")\n plt.tight_layout()", "title": "" }, { "docid": "97e14f561f54c32b9d1aaeccfc53d531", "score": "0.6485181", "text": "def save_spec_plot(self, save_path=None):\n check_save_path(save_path)\n for (n, spec_opt) in zip(self.obj_nums, self.obj_specs_opt):\n plt.plot(self.wavl, spec_opt/spec_opt.max(), color='k', label=\"opt\", alpha=0.9)\n plt.xlabel(\"wavelength\",fontsize=12)\n plt.ylabel(\"normed flux\",fontsize=12)\n plt.legend(fontsize=12)\n plt.savefig(os.path.join(save_path, \"#%d.png\"%n),dpi=100)\n plt.close()", "title": "" }, { "docid": "395047b995dd06683d9bdff485cda89a", "score": "0.6482422", "text": "def test_plot(self):\r\n self.mseed_stream.plot(show=False)", "title": "" }, { "docid": "8ed7266151fe86246b3674026cf3022f", "score": "0.6481828", "text": "def update_plots(self):\n self.plot_spikes(self.sids)\n self.plot_neurons(self.nids)\n self.clear_draw_blit()", "title": "" }, { "docid": "b3c287021a544b48ecd73692af9836aa", "score": "0.6481512", "text": "def save_plots(xa_ens_pickle, out_dir):\n dat_xt = nc.Dataset('output/background/xb0.daily.nc', 'r')\n jda = fourdenvar.FourDEnVar()\n obs = es.obs_fn()\n date = nc.num2date(dat_xt.variables['time'][:], dat_xt.variables['time'].units)\n xb_dir = es.output_directory + '/ensemble' + str(es.seed_value)\n xa_dir = es.output_directory + '/ensemble_xa_' + str(es.seed_value)\n #plot SM\n fig, ax = plot_twin_spread(date[:], 'smcl', xb_dir, xa_dir,\n ob_times=obs['sm_times'], obs=obs['sm_obs'], err=obs['sm_err'],\n ylab=r'Soil Moisture (m$^{3}$ m$^{-3}$)')\n fig.savefig(out_dir+'/sm.png', bbox_inches='tight')\n xb_dir2 = es.output_directory + '/background_tst'\n xa_dir2 = es.output_directory + '/analysis_med'\n fig, ax = calc_cosmos_stats(date[:], xb_dir2, xa_dir2)\n fig.savefig(out_dir+'/cosmos_median.png', bbox_inches='tight')\n #plot distribution\n #true_params = {'alpha_io': 5.5e-02, 'neff_io': 5.7e-04, 'fd_io': 9.6e-03, 'mu_io': 2.0e-02, 'nu_io': 4.0e+00,\n # 'gamma_io': 1.76e+01, 'delta_io':-3.3e-01}\n xa_ens = pickle.load(open(xa_ens_pickle, 'rb'))\n p_keys = ['oneovernminusone','oneoveralpha','satcon','vsat','vcrit','vwilt','hcap','hcon']\n fig, ax = plot_mult_dist(xa_ens, jda.xbs, p_keys)\n fig.savefig(out_dir + '/distributions.png', bbox_inches='tight')\n return 'plots saved!'", "title": "" }, { "docid": "3bc5dbda3f7e80c85b08edf4c1a81a6a", "score": "0.64723384", "text": "def plot(self, plot_simulation_domain=False, ax=None):\n pass", "title": "" }, { "docid": "eb88519cd3147c4c1c5e6db5321fa989", "score": "0.6471695", "text": "def plot(self):\n plt.figure(1)\n plt.subplot(211)\n plt.plot(self.reward_arr)\n plt.ylabel('Rewards')\n plt.xlabel('Episodes')\n if self.episode_lengths is not None:\n plt.subplot(212)\n plt.plot(self.episode_lengths)\n plt.ylabel('Length')\n plt.xlabel('Episodes')\n\n plt.suptitle(\"Plots of %s\" % self.env_name)\n\n plt.show()", "title": "" }, { "docid": "114f0db633dc1d7e434bdf6cae06f0c7", "score": "0.64713025", "text": "def plotSimulation(time, filenames):\n \n visualizers = []\n outputs = []\n labels = []\n for filename in filenames:\n output = Output(filename)\n \n outputs += [output]\n visualizers += [Visualizer(output)]\n dx = output.getNameList().xl / output.getNameList().nx\n labels += [str(int(dx/1000)) + \" km\"] if dx >= 1000 else [str(int(dx)) + \" m\"]\n \n # Adjust the tot_prec for diffrent number of timesteps\n for visualizer, output in zip(visualizers, outputs):\n dt_ref = outputs[0].getNameList().dt\n dt = output.getNameList().dt\n visualizer.tot_prec = visualizer.tot_prec * dt / dt_ref\n \n fig = plt.figure(figsize=(6.5, 5.25))\n fig.subplots_adjust(left = 0.1, bottom = 0.12, right = 0.97, top = 0.96)\n ax = fig.add_subplot(1, 1, 1)\n plt.sca(ax)\n ax.cla()\n \n max_prec = 2.0 # mm\n \n ax.set_ylabel('Accumulated Rain [mm]')\n ax.set_xlabel('x [km]')\n \n ax.set_ylim([0, max_prec])\n ax.set_xlim(visualizers[0].xlim)\n \n # Topography \n x = visualizers[0].x[:, 0]\n y = 0.1 * max_prec * visualizers[0].topo[time, :] / max(visualizers[0].topo[time, :])\n y0 = np.zeros(np.shape(y))\n ax.fill_between(x, y, y0, where=y >= y0, facecolor='0.75', interpolate=True)\n\n # Precipitations\n for visualizer, label in zip(visualizers, labels):\n ax.plot(visualizer.x[:, 0], visualizer.tot_prec[time, :], label=label)\n \n plt.legend(frameon=False)\n plt.title('Time = {0} seconds'.format(int(visualizers[0].output.t()[time])))\n plt.tight_layout()\n\n plt.savefig(\"total-precipitation.pdf\")\n plt.show()", "title": "" }, { "docid": "f73f91d0802d6705d96ab6ce82419ed0", "score": "0.645013", "text": "def Plot(self,show=True,filename=\"wind_farm\"):\n ### Create the path names ###\n folder_string = self.params.folder+\"/plots/\"\n file_string = self.params.folder+\"/plots/\"+filename+\".pdf\"\n\n ### Check if folder exists ###\n if not os.path.exists(folder_string): os.makedirs(folder_string)\n\n ### Create a list that outlines the extent of the farm ###\n ex_list_x = [self.ex_x[0],self.ex_x[1],self.ex_x[1],self.ex_x[0],self.ex_x[0]]\n ex_list_y = [self.ex_y[0],self.ex_y[0],self.ex_y[1],self.ex_y[1],self.ex_y[0]]\n\n ### Generate and Save Plot ###\n plt.plot(ex_list_x,ex_list_y,c=\"r\")\n p=plt.scatter(self.x,self.y,c=self.z)\n plt.xlim(self.dom.x_range[0],self.dom.x_range[1])\n plt.ylim(self.dom.y_range[0],self.dom.y_range[1])\n clb = plt.colorbar(p)\n clb.ax.set_ylabel('Hub Height')\n\n plt.title(\"Location of the Turbines\")\n plt.savefig(file_string)\n if show:\n plt.show()", "title": "" }, { "docid": "eebed4346e66f1150fda5d21f102af0f", "score": "0.6442907", "text": "def flee_and_plot(config, simulation_period, **args):\n update_environment(args, {\"simulation_period\": simulation_period})\n env.simulation_settings = \"simsetting.csv\"\n flee(config, simulation_period, **args)\n plot_output(\"%s\" % (env.job_name), \"graph\")", "title": "" }, { "docid": "8568f9c78650e0d1140bfefa88a17672", "score": "0.64365107", "text": "def plot_results(models,\n data,\n batch_size=128,\n model_name=\"vae_mnist\"):\n\n pass # need to install graphviz for this to work", "title": "" }, { "docid": "504b7211abd78eb48c8f268dd31c05d9", "score": "0.6427671", "text": "def display_solution(self):\r\n x_nodes = []\r\n y_nodes = []\r\n plt.subplot(1, 2, 1)\r\n for node in graph.get_nodes():\r\n plt.scatter(node.x, node.y, c=\"#000000\")\r\n for node in ga.starting_best.route_taken:\r\n x_nodes.append(node.x)\r\n y_nodes.append(node.y)\r\n plt.plot(x_nodes, y_nodes)\r\n plt.title(\"Original Randomly Generated Solution\")\r\n\r\n x_nodes = []\r\n y_nodes = []\r\n plt.subplot(1, 2, 2)\r\n for node in graph.get_nodes():\r\n plt.scatter(node.x, node.y, c=\"#000000\")\r\n for node in ga.get_solution_best():\r\n x_nodes.append(node.x)\r\n y_nodes.append(node.y)\r\n plt.plot(x_nodes, y_nodes)\r\n plt.title(\"Genetic Algorithm Solution\")\r\n\r\n plt.savefig(\"solution_comparison\")\r\n plt.tight_layout()", "title": "" }, { "docid": "beefb55cd2441cc0865e083a2bc484c1", "score": "0.64273566", "text": "def plot_final_multifit(var_pars, out_rootname=\"out\", title=\" \"):\n print \"Var par shape:\", np.shape(var_pars)\n print var_pars\n plot_file = out_rootname + \".\" + PLOT_EXT\n\n multifit_plots.make_var_plots(plot_file, N_SPECTRA, var_pars, quiet=False,\n title=title)\n subprocess.call(['cp', plot_file,\n \"/Users/abigailstevens/Dropbox/Research/CCF_paper1/images/\"])", "title": "" }, { "docid": "ffa1383e96cbd12ea93ee55fb7d0ff2f", "score": "0.64266676", "text": "def plot(self):\n\n for mult, tdir in self.plot_combos:\n self._set_plotter_state(mult, tdir)\n self._init_figure()\n self._plot_vectors()\n self._config_axes()\n self._present_figure()", "title": "" }, { "docid": "0a0dfd766b13f61eb69f4db58f4e9bd5", "score": "0.64237124", "text": "def showPlot2():\n # raise NotImplementedError\n x_axis = list()\n y_axis = list()\n width_list = [20, 25, 40, 50, 80, 100]\n height_list = [20, 16, 10, 8, 5, 4]\n \n for i in range(len(width_list)): \n x_axis.append(width_list[i]*1.0/height_list[i])\n y_axis.append(runSimulation(2, 1.0, width_list[i], height_list[i], .8, 100, StandardRobot))\n\n plt.plot(x_axis, y_axis, 'bo')\n plt.xlabel('width/height ratio')\n plt.ylabel('Avg Time to Clean a Room')\n plt.title('width/height ratio versus Avg Cleaning Time')\n plt.show()", "title": "" }, { "docid": "115bcc4b1373d45c13bce233a078ae8d", "score": "0.6408471", "text": "def plot_results(\n self,\n true,\n predicted: pd.DataFrame,\n prefix,\n where,\n inputs=None,\n ):\n\n for plot in self.plots:\n if plot == \"murphy\":\n self.murphy_plot(true, predicted, prefix, where, inputs)\n else:\n getattr(self, f\"{plot}_plot\")(true, predicted, prefix, where)\n return", "title": "" }, { "docid": "22882993a083616ff6f7cd09dfc7dfc9", "score": "0.6404481", "text": "def plot(self):\n\n fig, ax = plt.subplots()\n\n values = {}\n for run in self.runs:\n # Load dataset\n data = run.get_dataset(\"stats-performance-raw-*.csv\")\n\n # Extract values\n try:\n data = data[self.graph.settings['state']]\n except KeyError:\n raise exceptions.ImproperlyConfigured(\"State vs. size plot requires the 'state' tag to be set!\")\n\n try:\n values[run.orig.settings['size']] = (numpy.average(data), numpy.std(data))\n except KeyError:\n raise exceptions.ImproperlyConfigured(\"State vs. size plot requires the 'size' tag to be set!\")\n\n X = sorted(values.keys())\n Y = [values[x][0] for x in X]\n Yerr = [values[x][1] for x in X]\n\n ax.errorbar(X, Y, Yerr, marker='x', color='black', label='Meritve')\n\n # Fit a function over the measurements when configured\n fit_function = self.graph.settings.get('fit', None)\n if fit_function is not None:\n popt, pcov = scipy.optimize.curve_fit(fit_function, X, Y)\n Fx = numpy.linspace(min(X), max(X) + 1*(X[-1] - X[-2]), 100)\n Fy = [fit_function(x, *popt) for x in Fx]\n ax.plot(Fx, Fy, linestyle='--', color='black', label=self.graph.settings.get('fit_label', 'Fit'))\n\n ax.set_xlabel(u'Število vozlišč')\n ax.set_ylabel(u'Število vnosov')\n ax.grid()\n ax.set_ylim(0, None)\n\n if self.graph.settings.get('scale'):\n ax.set_xscale(self.graph.settings.get('scale'))\n\n legend = ax.legend(loc='lower right')\n if self.settings.GRAPH_TRANSPARENCY:\n legend.get_frame().set_alpha(0.8)\n fig.savefig(self.get_figure_filename())", "title": "" }, { "docid": "590b937014a60ed47fe58ae729b02020", "score": "0.6402225", "text": "def run_simulation_results(self):\n #Run Simulations, print graphs \n results = self.intc.run()\n available_simulations = self.intc.getresultdata('ONA_1')\n available_sim = available_simulations.split('\\n')\n self.intc.save(\"test_interconnect\")\n \n \n data1 = self.intc.getresult('ONA_1',\"input 1/mode 1/gain\")\n data = self.intc.getresult('ONA_1',\"input 1/mode 1/transmission\")\n attr = data['Lumerical_dataset']['attributes']\n self.attr_str = self.listToString(attr)\n #Plot graphs\n self.help = plt.plot(data['wavelength']*10e8,\n abs(data[self.attr_str])**2)\n self.help = plt.title('Transmission vs Wavelength (Through One Port)')\n self.help = plt.xlabel('Wavelength (nm)')\n self.help = plt.ylabel(self.attr_str) \n self.help = plt.show()\n req = self.req[0]\n for i in data1['mode 1 gain (dB)']:\n if i < req:\n print (\"Insertion Loss exceeds design requirements, please revise YAML and resimulate/reoptimize\")\n return False\n #Checking \n return True", "title": "" }, { "docid": "0ced0d44ad355ebcee0eb5b2438e0a92", "score": "0.64009595", "text": "def create_plot(method, data):\n title_font = {'size':'16'}\n axis_font = {'size':'14'}\n\n plt.figure(figsize=(15,10))\n plt.tick_params(labelsize=14)\n plt.title(f\"{name} {method} Test Accuracies\", **title_font)\n\n for i, exp in enumerate(experiments):\n styles = {'linestyle': get_style(exp), 'color': colorlist[i]}\n for i in range(runs):\n vals = experiment_dict[exp]\n plt.plot(vals[:,i], alpha=0.2, **styles)\n plt.plot(vals.mean(axis=1), label=exp, **styles)\n plt.xlabel('Steps', labelpad=10, **axis_font)\n plt.ylabel('Accuracy', labelpad=10, **axis_font)\n plt.legend(loc='lower right', prop=axis_font)\n\n plt.savefig(f\"plots/{to_file_base}_{method.lower().replace(' ','_')}_{img_name}.png\")\n print(f\"saved to plots/{to_file_base}_{method.lower().replace(' ','_')}_{img_name}.png\", '\\n')\n plt.close()", "title": "" }, { "docid": "c0d90f0ca2ae8ec0f984e012f7eb96b4", "score": "0.64008975", "text": "def plot_and_save_results(rewards: List, days_uncaught: List, title: str,\n goal: int, max_days: int, file_path: Optional[str] = None):\n # Close all previous windows\n plt.close(\"all\")\n\n # Define the figure\n f, ax = plt.subplots(nrows=1, ncols=2, figsize=(12, 5))\n f.suptitle(title)\n ax[0].plot(rewards, label=\"score per run\")\n ax[0].axhline(goal, c=\"red\", ls=\"--\", label=\"maximum goal\")\n ax[0].set_xlabel(\"Games\")\n ax[0].set_ylabel(\"Total Reward\")\n ax[0].legend()\n\n # Plot the days over episodes\n ax[1].plot(days_uncaught)\n ax[1].set_xlabel(\"Games\")\n ax[1].set_ylabel(\"# days\")\n ax[1].axhline(max_days, c=\"red\", ls=\"--\", label=\"maximum days\")\n\n # If a file path is given, then the plot should be saved otherwise it should be shown\n if file_path is not None:\n plt.savefig(file_path)\n else:\n plt.show()", "title": "" }, { "docid": "96222d524ff356453dbd07d1b55113d2", "score": "0.6395647", "text": "def plot_data(x_vals, y_vals, y_pred, name, filepath):\n plt.scatter(x_vals, y_vals, c='b')\n plt.scatter(x_vals, y_pred, c='r')\n plt.xlabel('x-values')\n plt.ylabel('y-values')\n plt.title(name)\n plt.savefig(filepath + name + \".png\")\n plt.close()", "title": "" }, { "docid": "1634be59a4ffa9ea89acea1e8bf8c596", "score": "0.63914907", "text": "def plotModel(self):\n #Get best paramters predicted by the model\n param = self.getBestParam()\n\n #Get a dataset of list_pts with \"param\" used as parameters\n data = self.getDataset(param)\n\n #Predictions\n y_pred = self.getPrediction(data)+ self.exp_values[:,1]\n \n #Plot result\n plt.figure(figsize = (10,10))\n x = self.exp_values[:,0] \n #y = [self.fct(t,2,1,3) for t in x]\n plt.scatter(x,y_pred,marker = \"+\",c = 'b',label = 'Fonction Estimée \\n' + str(param))\n plt.scatter(x,self.exp_values[:,1],marker = 'x', c='r',label = 'Mesures expérimentales')\n plt.plot(x,2*x+1,c='g',label = 'Fonction recherchée')\n plt.xlabel('x')\n plt.ylabel('y_pred-y')\n plt.legend()\n plt.savefig(\"ModelPredHistory/step{:d}.png\".format(self.step ))\n plt.show()", "title": "" }, { "docid": "9da12c8af92778e68363c6ba96402cf8", "score": "0.63910156", "text": "def plot(self):\n fig = plt.figure()\n\n ax = fig.add_subplot(2, 2, 1)\n plt.plot(self.samples.nbasis)\n plt.ylabel(\"number of basis functions\")\n plt.xlabel(\"MCMC iteration (post-burn)\")\n\n ax = fig.add_subplot(2, 2, 2)\n plt.plot(self.samples.s2)\n plt.ylabel(\"error variance\")\n plt.xlabel(\"MCMC iteration (post-burn)\")\n\n ax = fig.add_subplot(2, 2, 3)\n yhat = self.predict(self.data.xx_orig).mean(axis=0) # posterior predictive mean\n plt.scatter(self.data.y, yhat)\n abline(1, 0)\n plt.xlabel(\"observed\")\n plt.ylabel(\"posterior prediction\")\n\n ax = fig.add_subplot(2, 2, 4)\n plt.hist(self.data.y - yhat, color=\"skyblue\", ec=\"white\", density=True)\n axes = plt.gca()\n x = np.linspace(axes.get_xlim()[0], axes.get_xlim()[1], 100)\n plt.plot(x, sp.stats.norm.pdf(x, scale=np.sqrt(self.samples.s2.mean())), color='red')\n plt.xlabel(\"residuals\")\n plt.ylabel(\"density\")\n\n fig.tight_layout()\n\n plt.show()", "title": "" }, { "docid": "e76ba6fcc2902aaad79a4480ac468c5c", "score": "0.6390604", "text": "def results(self, tmin=None, tmax=None, **kwargs):\n figsize = kwargs.pop(\"figsize\", None)\n fig = self._get_figure(figsize=figsize, )\n\n # Number of rows to make the figure with\n rows = 3 + len(self.ml.stressmodels)\n\n # Main frame\n ax1 = plt.subplot2grid((rows, 3), (0, 0), colspan=2, rowspan=2,\n fig=fig)\n o = self.ml.observations(tmin=tmin, tmax=tmax)\n o_nu = self.ml.oseries.drop(o.index)\n if not o_nu.empty:\n # plot parts of the oseries that are not used in grey\n o_nu.plot(ax=ax1, linestyle='', marker='.', color='0.5',\n x_compat=True)\n o.plot(ax=ax1, linestyle='', marker='.', color='k', x_compat=True)\n sim = self.ml.simulate(tmin=tmin, tmax=tmax)\n sim.plot(ax=ax1, x_compat=True)\n plt.legend(loc=(0, 1), ncol=3, frameon=False)\n\n ax1.set_ylim(min(o.min(), sim.loc[tmin:tmax].min()),\n max(o.max(), sim.loc[tmin:tmax].max()))\n\n # Residuals and innovations\n ax2 = plt.subplot2grid((rows, 3), (2, 0), colspan=2, sharex=ax1)\n res = self.ml.residuals(tmin=tmin, tmax=tmax)\n res.plot(ax=ax2, sharex=ax1, color='k', x_compat=True)\n if self.ml.settings[\"noise\"] and self.ml.noisemodel:\n v = self.ml.innovations(tmin=tmin, tmax=tmax)\n v.plot(ax=ax2, sharex=ax1, x_compat=True)\n plt.legend(loc=(0, 1), ncol=3, frameon=False)\n\n # Stats frame\n ax3 = plt.subplot2grid((rows, 3), (0, 2), rowspan=3)\n ax3.xaxis.set_visible(False)\n ax3.yaxis.set_visible(False)\n # plt.text(0.05, 0.8, 'Rsq: %.2f' % self.ml.stats.rsq())\n # plt.text(0.05, 0.6, 'EVP: %.2f' % self.ml.stats.evp())\n plt.title('Model Information', loc='left')\n\n # Draw parameters table\n cols = [\"optimal\", \"stderr\"]\n parameters = self.ml.parameters.loc[:, cols]\n parameters.optimal = parameters.optimal.round(5)\n for name, vals in parameters.loc[:, [\"optimal\", \"stderr\"]].iterrows():\n popt, stderr = vals\n val = np.abs(stderr / popt * 100)\n parameters.loc[name, \"stderr\"] = \\\n \"{:} {:.5e} ({:.2f}{:})\".format(\"\\u00B1\", stderr, val,\n \"\\u0025\")\n\n table = plt.table(cellText=parameters.values,\n rowLabels=parameters.index,\n colLabels=cols,\n colWidths=[0.2, 0.4],\n loc='center')\n # table.auto_set_font_size(value=True)\n ax3.add_table(table)\n plt.setp(ax3.spines.values(), color=None)\n\n # Add a row for each stressmodel\n for i, ts in enumerate(self.ml.stressmodels.keys(), start=3):\n ax = plt.subplot2grid((rows, 3), (i, 0), colspan=2, sharex=ax1)\n contrib = self.ml.get_contribution(ts, tmin=tmin, tmax=tmax)\n contrib.plot(ax=ax, sharex=ax1, x_compat=True)\n title = [stress.name for stress in self.ml.stressmodels[ts].stress]\n plt.title(\"Stresses:%s\" % title, loc=\"right\")\n ax.legend(loc=(0, 1), ncol=3, frameon=False)\n if i == 3:\n sharex = None\n else:\n sharex = axb\n axb = plt.subplot2grid((rows, 3), (i, 2), sharex=sharex)\n self.ml.get_step_response(ts).plot(ax=axb)\n\n ax1.set_xlim(tmin, tmax)\n\n return fig.axes", "title": "" }, { "docid": "a4240a5aadf84ac63cc47713c81c8741", "score": "0.638988", "text": "def plot(self):\n pass", "title": "" }, { "docid": "8de3beadaf4515ebd4fecb6a52b713dc", "score": "0.6389388", "text": "def plot_results(errors, performance_log, save=False):\n \n # Split data into x and y axes for each subplot\n errs_y = [error[0] for error in errors]\n errs_x = [error[1] for error in errors]\n perf_f = [score[0][0] for score in performance_log]\n perf_r = [score[0][1] for score in performance_log]\n perf_x = [score[1] for score in performance_log]\n \n # Create plots\n f, (ax1, ax2) = pyplot.subplots(2, sharex=True)\n ax1.plot(errs_x, errs_y)\n ax1.set_title('Q Network Loss', fontsize=14)\n ax1.set_ylabel('Mean Squared Error')\n ax1.set_xlabel('Epoch')\n\n ax2.plot(perf_x, perf_f, label='CW')\n ax2.plot(perf_x, perf_r, label='CCW')\n ax2.set_title('Test Results', fontsize=14)\n ax2.set_ylabel('Successful Trials')\n ax2.set_xlabel('Epoch')\n ax2.legend(loc='upper left', fontsize='medium')\n ax2.annotate(time.strftime(\"%m/%d/%Y, %H:%M:%S\"), xy=(1, 0),\n xycoords='axes fraction', fontsize=8,\n horizontalalignment='right', verticalalignment='bottom')\n \n f.subplots_adjust(hspace=0.5) # Adjust spacing\n fig = pyplot.gcf() \n fig.canvas.set_window_title('Performance Plots') # Add window title\n\n if save:\n fig.savefig(time.strftime(\"../Plots/%m%d%Y_%H%M%S\"))\n\n f.show()", "title": "" }, { "docid": "f55484c7eb4fdbaa54dcc004706844ae", "score": "0.6384634", "text": "def update_plots():\n global results\n\n has_tide = tide_toggle.active\n river_flow_rate = river_flow_slider.value\n N_river = river_N_slider.value\n G = gas_exchange_slider.value\n P = productivity_slider.value\n\n results = run_model(has_tide, river_flow_rate, N_river, G, P)\n\n # Update internal data handler with latest results/model run output\n source.data = dict(V=results['V'], S=results['S'],\n N=results['N'], O=results['O'],\n Z=results['Z'],\n day=results['day'])\n\n # title_str = \"Estuary\"\n # comps = []\n # if has_tide:\n # comps += ['tides']\n # if river_flow_rate > 0:\n # comps += ['river']\n # extra_str = \" with \" + \" and \".join(comps)\n # top.title = title_str + extra_str\n\n # Reset plot ranges if necessary\n # TODO: This seems to be an open bug in bokeh, where the plot doesn't\n # detect the need to re-draw following a range change.\n if results['S'].max() > top.y_range.end:\n top.y_range = Range1d(0, 1.05*results['S'].max())\n if results['N'].max() > mid.y_range.end:\n mid.y_range = Range1d(0, 1.05*results['N'].max())\n if results['O'].max() > bot.y_range.end:\n bot.y_range = Range1d(0, 1.05*results['O'].max())", "title": "" }, { "docid": "7e2bd17df6b91ac9985a0bab193af05d", "score": "0.6377757", "text": "def show_plots() -> None:\n plt.show()", "title": "" }, { "docid": "5ffb68513b1bdb02d85e92fb7284ba37", "score": "0.63716733", "text": "def save_plot(cfg,fig,title):\n save_dir = os.path.join(cfg['CWD_PATH'],cfg['repo_path'], cfg['result_path'],cfg['model_result_path'])\n fig.savefig(save_dir + title+\".png\")", "title": "" }, { "docid": "ba552cb4e54e1848ba07f47316110637", "score": "0.6357201", "text": "def plot(self):\n plt_args = {}\n plt_args['schemes']=self.args_tuple.test_config['test-name']\n plt_args['data_dir']=self.tmp_dir\n plt_args['no_graphs']=None\n plt_args['custom_test']=True\n plt_args['include_acklink']=False\n Args = namedtuple('Args', plt_args.keys())\n p = Plot(Args(**plt_args), {i+1:flow['flow_info'] for i, flow in enumerate(self.args_tuple.test_config['flows'])})\n p.run()\n return p.perf_data[self.experiment_name]", "title": "" }, { "docid": "50b46428bc164099797980dd5529ccfb", "score": "0.63485926", "text": "def plots(self):\n raise NotImplementedError", "title": "" }, { "docid": "675f8c44f5ac18b8976bd99eb89c764a", "score": "0.63473004", "text": "def plot(self, tmin=None, tmax=None, oseries=True, simulation=True,\n ax=None, figsize=None, legend=True, **kwargs):\n if ax is None:\n fig, ax = plt.subplots(figsize=figsize, **kwargs)\n\n ax.set_title(\"Results of {}\".format(self.ml.name))\n\n if oseries:\n o = self.ml.observations()\n o_nu = self.ml.oseries.series.drop(o.index)\n if not o_nu.empty:\n # plot parts of the oseries that are not used in grey\n o_nu.plot(linestyle='', marker='.', color='0.5', label='',\n ax=ax)\n o.plot(linestyle='', marker='.', color='k', ax=ax)\n\n if simulation:\n sim = self.ml.simulate(tmin=tmin, tmax=tmax)\n sim.plot(ax=ax)\n plt.xlim(tmin, tmax)\n plt.ylabel(\"Groundwater levels [meter]\")\n if legend:\n plt.legend()\n plt.tight_layout()\n return ax", "title": "" }, { "docid": "e0f4e96e902a02020fc07cd28c120213", "score": "0.6344138", "text": "def plot_solution(self, ntimes, save_to_file=False, save_file_name='figure.png'):\r\n step = math.floor((self.nt)/(ntimes-1))\r\n for i in range(0,self.nt,step):\r\n plt.plot(self.x,self.solution[:,i], label =r\"time={}s\".format(i*self.dt))\r\n #plt.text(0.0, 1.0, 'error = %s'%(self.mae))\r\n #plt.text(0.0, 0.9, 'delta_t = %s'%(self.dt))\r\n plt.xlabel(\"x\")\r\n plt.ylabel(\"Temperature\")\r\n #plt.ylabel(\"Vertical Displacement\")\r\n\r\n plt.title('Temperature Along x-axis Over Time')\r\n #plt.title('Vertical Displacement Along x-axis Over Time')\r\n\r\n plt.legend()\r\n\r\n\r\n # determine to save the figure or not\r\n if save_to_file:\r\n \tplt.savefig(save_file_name, dpi = 300)\r\n else:\r\n plt.show()", "title": "" }, { "docid": "3cb3c916df6996ae2f376b467d15610d", "score": "0.63383263", "text": "def save_final_spectrum(job):\n es = construct_electron_spectrum(job)\n es.plot()\n es.savefig()", "title": "" }, { "docid": "ffa565254aead51a6c00e9d387b44e66", "score": "0.6335097", "text": "def plot(self):\n fig, ax = pyplot.subplots()\n line = lines.Line2D(self.x_values, self.y_values)\n ax.add_line(line)\n ax.set_title(self.title)\n ax.axis([0, len(self.x_values) + 1, 0, max(self.y_values) + 10])\n ax.set_xlabel(self.x_label)\n ax.set_ylabel(self.y_label)\n pyplot.savefig(self.filename)\n fig.clear()", "title": "" }, { "docid": "de34bb2a0816624c34ec3e459f46dccf", "score": "0.63315535", "text": "def plot(x, y, ytrue,Filename='results'):\r\n fig, ax = plt.subplots()\r\n plt.xlabel('x')\r\n plt.ylabel('arctan(x)') \r\n plt.grid(True)\r\n plt.title('Taylor series approximation')\r\n plt.plot(x,y, label = 'Order '+ str(Nvalue))\r\n plt.plot(x,ytrue, label = 'arctan(x)')\r\n plt.legend()\r\n plt.show()\r\n fig.savefig(Filename)", "title": "" }, { "docid": "7f6d6bfcdf183edff5ed28343e2eded4", "score": "0.6330841", "text": "def plot(self, output_directory):\n import matplotlib.pyplot as plt\n\n f, ax = plt.subplots()\n if self.Tlist is not None:\n t_list = [t for t in self.Tlist.value_si]\n else:\n t_list = 1000.0 / np.arange(0.4, 3.35, 0.05)\n klist = np.zeros_like(t_list)\n klist2 = np.zeros_like(t_list)\n for i in range(len(t_list)):\n klist[i] = self.reaction.calculate_tst_rate_coefficient(t_list[i])\n klist2[i] = self.reaction.kinetics.get_rate_coefficient(t_list[i])\n\n order = len(self.reaction.reactants)\n klist *= 1e6 ** (order - 1)\n klist2 *= 1e6 ** (order - 1)\n t_list = [1000.0 / t for t in t_list]\n plt.semilogy(t_list, klist, 'ob', label='TST calculation')\n plt.semilogy(t_list, klist2, '-k', label='Fitted rate')\n plt.legend()\n reaction_str = '{0} {1} {2}'.format(\n ' + '.join([reactant.label for reactant in self.reaction.reactants]),\n '<=>', ' + '.join([product.label for product in self.reaction.products]))\n plt.title(reaction_str)\n plt.xlabel('1000 / Temperature (K^-1)')\n plt.ylabel('Rate coefficient ({0})'.format(self.k_units))\n\n plot_path = os.path.join(output_directory, 'plots')\n\n if not os.path.exists(plot_path):\n os.mkdir(plot_path)\n valid_chars = \"-_.()<=> %s%s\" % (string.ascii_letters, string.digits)\n filename = ''.join(c for c in reaction_str if c in valid_chars) + '.pdf'\n plt.savefig(os.path.join(plot_path, filename))\n plt.close()", "title": "" }, { "docid": "b6fe95522fea8c8eee4b8bdd0b1754b4", "score": "0.63307333", "text": "def main(save=False):\n update_csv()\n for loc in ['South West', 'West Berkshire', 'Leicester', 'Essex',\n 'Basildon', 'Dorset', 'Somerset', 'Gloucestershire',\n 'Wiltshire', 'City of Bristol', 'Cornwall and Isles of Scilly',\n 'Bath and North East Somerset']:\n plt.figure(figsize=FIG_SIZE)\n plot_values(loc)\n if save:\n save_fig(loc)\n\n plt.figure(figsize=FIG_SIZE)\n plot_devon()\n if save:\n save_fig('Devon Totals')\n else:\n plt.show()", "title": "" }, { "docid": "405f92d30cf0fa7b91c1f3b924ddb28e", "score": "0.6329138", "text": "def plot_result(data_dir, hurricane, k, save_dir):\r\n k = int(k)\r\n save_dir += hurricane + '/' + str(k) +'/'\r\n #with open(data_dir + hurricane + '.outages_data') as f:\r\n # line = f.readline()\r\n #counties = line.strip().split('\\t')\r\n #with open(data_dir + 'TICC/' + hurricane + '/interp_data.csv') as f:\r\n # lines = f.readlines()\r\n with open(data_dir + hurricane + '_interp.csv')as f:\r\n #with open(data_dir + 'hurricane_matthew_60min_sampling_interval.csv') as f:\r\n #with open(data_dir + 'Matthew.csv') as f:\r\n lines = f.readlines()\r\n counties = lines[0].strip().split(',')\r\n lines = lines[1:]\r\n data = []\r\n for line in lines:\r\n data.append([float(x) for x in line.strip().split(',')])\r\n data = np.array(data)\r\n l = len(lines)\r\n print 'reading the network'\r\n G = nx.read_adjlist(data_dir + hurricane + '.adjlist', delimiter = '\\t')\r\n print 'reading the segmentation result'\r\n with open(save_dir + 'S_' + str(k) + '.txt') as f:\r\n line = f.readline()\r\n S = ast.literal_eval(line.strip())\r\n print 'reading the explanation'\r\n E = np.load(save_dir + 'E_' + str(k) + '.txt.npy')\r\n #E = (E - E.min(0)) / (E.max(0) - E.min(0))\r\n\r\n print 'plotting the segmentation'\r\n for i in range(len(counties)):\r\n plt.plot(range(l), data[:, i])\r\n for s in S:\r\n plt.axvline(x=float(s))\r\n plt.savefig(save_dir + '/S_' + str(k) + '.pdf')\r\n\r\n plt.clf()\r\n print 'plotting the graphs'\r\n node_labels = {}\r\n nodes = G.nodes()\r\n #for i in range(k):\r\n for i in range(len(S)):\r\n node_size = []\r\n for n in nodes:\r\n index = counties.index(n)\r\n w = E[index, i] * 300\r\n node_size.append(w)\r\n nx.draw_networkx(G, nodelist = nodes, node_size = node_size, font_size = 5)\r\n plt.savefig(save_dir + '/Egraph_' + str(k) + '_cut' + str(i) + '.pdf')\r\n plt.clf()", "title": "" }, { "docid": "c4eddf79a418a32204baa4bcbd729525", "score": "0.63278687", "text": "def makePlot(values, title=-1):\n\n\ttitles = [\"Task 1: Deterministic Model without King's moves\", \"Task 2: Deterministic with King's moves\",\\\n\t\t\t\t \"Task 3: Stochastic Model with King's moves\",\"Task 4: Deterministic Model with 9 moves\", \"Task 5: Stochastic Model without King's moves\"]\n\tfilename = None\n\n\tif title == \"all\":\n\t\t\n\t\talgos = [\"Sarsa\",\"Expected_Sarsa\",\"Q_Learning\"]\n\t\tfor itr in range(len(values)):\n\t\t\tpyplot.plot(values[itr], range(len(values[itr])), label=algos[itr])\n\n\t\tpyplot.xlabel(\"Time Steps\")\n\t\tpyplot.ylabel(\"Episodes\")\n\t\tpyplot.title(\"All Algorithms Combined\")\n\t\tpyplot.legend(loc='lower right')\n\t\tfilename = \"plots/all.png\"\n\n\telse:\n\n\t\tfilename = \"plots/task-%d.png\"%(title)\n\n\t\tpyplot.plot(values, range(len(values)))\n\t\tpyplot.xlabel(\"Time Steps\")\n\t\tpyplot.ylabel(\"Episodes\")\n\t\tpyplot.title(titles[title-1])\n\t\t# pyplot.show()\n\n\tif \"plots\" not in os.listdir():\n\t\tos.mkdir(\"plots\")\n\tpyplot.savefig(filename, bbox_inches='tight')", "title": "" }, { "docid": "1a4178a465f6c983fbed316ea6f2f43e", "score": "0.63240546", "text": "def graphs_plot():\n csv_files = (read_csv_input(args.targets_csv, args.isotopic_species),\n read_csv_input(args.decoys_csv, args.isotopic_species))\n\n if csv_files[0].empty:\n print(\"ERROR! CSV input file {} is empty. Execution terminated without generating any output\".format(\n args.targets_csv))\n sys.exit(1)\n\n name_input = filename_from_path(args.targets_csv)[8:-4]\n\n # Histogram with the distribution of ppm error offsets for MS1 and MS2 matches\n plt.subplot(2, 1, 1)\n stats.ppm_errors_histogram(csv_files, args.Sp_cutoff, 'MS1', args.match_file, args.info_box)\n\n plt.subplot(2, 1, 2)\n stats.ppm_errors_histogram(csv_files, args.Sp_cutoff, 'MS2', args.match_file, args.info_box)\n save_graph(\"Hist_ppm_offset_{}.png\".format(name_input))\n plt.close()\n\n # Histogram with the distribution of the Sp scores for targets/decoys and top target/decoys\n plt.subplot(2, 1, 1)\n stats.hist_Sp(csv_files, args.lengths, args.Sp_cutoff, args.only_targets_with_decoys, args.info_box)\n\n plt.subplot(2, 1, 2)\n stats.hist_top_Sp(csv_files, args.lengths, args.Sp_cutoff, args.only_targets_with_decoys, args.info_box)\n plt.subplots_adjust(hspace=0.25)\n save_graph(\"Hist_Sp_{}.png\".format(name_input))\n plt.close()\n\n # Histogram with the distribution of the dSp values for the second best targets\n stats.hist_second_dSp(csv_files, args.lengths, args.Sp_cutoff, args.only_targets_with_decoys, args.info_box)\n save_graph(\"Hist_dSp2_{}.png\".format(name_input))\n plt.close()\n\n # Scatter + boxplot of the Sp scores vs the matches ions length\n plt.subplot(2, 1, 1)\n stats.scatter_nts_vs_score(csv_files, 'Sp', args.y_min, args.y_max, args.lengths, args.Sp_cutoff, args.info_box,\n args.only_targets_with_decoys)\n plt.subplot(2, 1, 2)\n stats.box_nts_vs_score(csv_files, 'Sp', args.y_min, args.y_max, args.lengths, args.Sp_cutoff,\n args.only_targets_with_decoys)\n\n save_graph(\"Scatterbox_Sp_length_{}.png\".format(name_input))\n plt.close()\n\n if args.scatterbox_lengths_z == 'y':\n # Scatter + boxplot of the Sp scores vs the matches ions length grouped by charge\n try:\n plt.subplot(2, 1, 1)\n stats.scatter_nts_z_vs_score(csv_files, 'Sp', args.y_min, args.y_max, args.lengths, args.Sp_cutoff,\n args.only_targets_with_decoys)\n plt.subplot(2, 1, 2)\n stats.box_nts_z_vs_score(csv_files, 'Sp', args.y_min, args.y_max, args.lengths, args.Sp_cutoff,\n args.only_targets_with_decoys)\n save_graph(\"Scatterbox_Sp_length_z_{}.png\".format(name_input))\n plt.close()\n\n except:\n print('Scatter + boxplots of Sp scores of matches vs length grouped by charge could not be produced. '\n 'This is probably due to the parameters chosen')\n\n # Additional graphs are plotted only if the decoys csv file is not empty\n if not csv_files[1].empty:\n # Scatter plot of Sp of targets vs Sp of decoys of the given targets\n stats.scatter_Sp_vs_Spdecoy(csv_files, args.lengths, args.Sp_cutoff, args.info_box)\n save_graph(\"Scatter_Sp_Spdecoy_{}.png\".format(name_input))\n plt.close()\n\n # Scatter plot of Sp of targets vs Sp of decoys of the given targets, divided by nucleotide\n # lengths of precursors\n stats.scatter_Sp_vs_Spdecoy(csv_files, ['analysis'], args.Sp_cutoff, args.info_box)\n save_graph(\"Scatter_Sp_Spdecoy_bylength_{}.png\".format(name_input))\n plt.close()\n\n if args.scatterplot_Sp_vs_dSp == 'y':\n # Scatter with the dSp scores vs Sp scores for targets and decoys. FDR and percentile lines are optional\n stats.scatter_dSp_vs_Sp(csv_files, args.lengths, args.Sp_cutoff, args.info_box,\n args.only_targets_with_decoys)\n save_graph(\"Scatter_dSp_Sp_{}.png\".format(name_input))\n plt.close()\n\n # Edit the FDR table if additional targets (without decoys) are used in the plots\n if args.only_targets_with_decoys == 'n':\n stats.FDR_update(csv_files, args.lengths, args.Sp_cutoff, name_input)", "title": "" }, { "docid": "b276dfe70ed4b3f7406c9cb77d3f31eb", "score": "0.6310977", "text": "def create_experiment_results_plot(title, parameter, directory, log=False):\n fullpath = os.path.join(directory, 'per_epoch_stats.csv')\n df = pd.read_csv(filepath_or_buffer=fullpath)\n epochs = df[\"epoch\"]\n train_param = df[\"train_{0}\".format(parameter)]\n val_param = df[\"val_{0}\".format(parameter)]\n\n if log:\n plt.semilogy(epochs, train_param, 'r', label='train')\n plt.semilogy(epochs, val_param, 'b', label='validation')\n plt.ylabel(parameter + \" semilog\")\n axes = plt.gca()\n axes.set_ylim([10 ** -3, 10 ** 1])\n else:\n plt.plot(epochs, train_param, 'r', label='train')\n plt.plot(epochs, val_param, 'b', label='validation')\n plt.ylabel(parameter)\n axes = plt.gca()\n axes.set_ylim([0.1, 0.95])\n plt.xlabel('Epochs')\n\n plt.title(title)\n plt.legend()\n file_name = (os.path.join(directory, (title + parameter + \".png\")))\n plt.savefig(file_name)\n\n plt.close()\n return file_name", "title": "" }, { "docid": "6d8b2cb1779a4513cdc12f2a7af21269", "score": "0.63085884", "text": "def plot_raw(datasize):\n for test in TESTS:\n for datastore in DATASTORES:\n if datastore_benchmarked(datasize, datastore):\n if XKCD_STYLE:\n plt.xkcd()\n plt.figure()\n plt.title(datastore + ':' + test)\n plt.xlabel('Iteration')\n plt.ylabel('time [ns]')\n values = read_raw_values(datasize, datastore, test)\n if len(values) > 0:\n plt.plot(values)\n plt.savefig(str(datasize) + '/raw_' + datastore + '_' + test + '.png')\n plt.close()", "title": "" }, { "docid": "80f3024e68537e3dadbcab0dd9407fd9", "score": "0.6306506", "text": "def plot_results(models,\n data,\n batch_size=128,\n model_name=\"vae_mnist\"):\n encoder, decoder = models\n x_test, y_test = data\n os.makedirs(model_name, exist_ok=True)\n \n for i in range(5):\n filename = os.path.join('../',model_name, 'decode_result_' + str(i) +'.png')\n filename_input = os.path.join('../',model_name, 'decode_input_' + str(i) +'.png')\n test_img = np.reshape(x_test[i], [-1, x_test[i].shape[0], x_test[i].shape[1], x_test[i].shape[2]])\n x_decoded = decoder.predict(encoder.predict(test_img)[2])\n print(x_decoded.shape)\n x_decoded = np.reshape(x_decoded, [-1, x_decoded.shape[1], x_decoded.shape[2]]) \n test_img = np.reshape(y_test[i], [-1, x_test[i].shape[0], x_test[i].shape[1]])\n plt.imsave(filename_input, test_img[0])\n plt.imsave(filename, x_decoded[0])\n \n print(filename_input, filename)\n print(test_img[0].shape, x_decoded[0].shape)", "title": "" } ]
8d4471b9ada82bb05125e270d96b5f67
Load config from file, overwriting current contents.
[ { "docid": "413045953b29f344c28ecd358539afb4", "score": "0.0", "text": "def load(self):\r\n try:\r\n self.loading = True\r\n if os.path.exists(self.filename):\r\n text = open(self.filename).read()\r\n obj = json_decode(text)\r\n for key in obj:\r\n self[key] = obj[key]\r\n self.do_validate()\r\n except ValidationError:\r\n raise\r\n except Exception:\r\n logger.warning('failed to load from config file %s',self.filename,\r\n exc_info=True)\r\n finally:\r\n self.loading = False", "title": "" } ]
[ { "docid": "13792ba4b1af4d88d4b7e93afd2d1326", "score": "0.75971365", "text": "def load_config(self, fname):\n\n self.config = config.Config(fname).config", "title": "" }, { "docid": "d68380d81829c94c3d3adc0387fc7d70", "score": "0.755855", "text": "def load(self):\n Debug(self, \".load()\", force=True)\n\n self._config = configparser.ConfigParser()\n\n self._config.read(self._filename)\n\n self.set_defaults()\n\n self._synced = True\n if self._changed_callback is not None:\n self._changed_callback()\n\n if not os.path.isfile(self._filename):\n # Save newly created default file\n self.save()", "title": "" }, { "docid": "98147c86e65d394e21ba66e9fbb7c6f5", "score": "0.7522815", "text": "def load(self):\n self.config.read(\"config.py\")\n pass", "title": "" }, { "docid": "ea40d054393293efde6f7a948cf167ca", "score": "0.74960685", "text": "def _load_config(self, filename):\n # Read entire file for metadata\n fh = open(filename, \"r\")\n self.file_contents = fh.read()\n\n # Replace !include directives with content\n config_dir = os.path.split(filename)[0]\n include_re = re.compile(r\"^(.*)!include\\s+(.*)$\", re.MULTILINE)\n\n def recursive_load(matchobj, path):\n first_spacing = matchobj.group(1)\n other_spacing = first_spacing.replace(\"-\", \" \")\n fname = os.path.join(path, matchobj.group(2).rstrip())\n new_path, _ = os.path.split(fname)\n new_path = os.path.realpath(new_path)\n text = \"\"\n with open(fname) as f:\n text = f.read()\n text = first_spacing + text\n text = text.replace(\n \"\\n\", \"\\n{}\".format(other_spacing), text.count(\"\\n\") - 1\n )\n return re.sub(\n include_re, lambda m: recursive_load(m, new_path), text\n )\n\n # def include_repl(matchobj):\n # first_spacing = matchobj.group(1)\n # other_spacing = first_spacing.replace(\"-\", \" \")\n # fname = os.path.join(config_dir, matchobj.group(2))\n # text = \"\"\n # with open(fname) as f:\n # text = f.read()\n # text = first_spacing + text\n # text = text.replace(\n # \"\\n\", \"\\n{}\".format(other_spacing), text.count(\"\\n\") - 1\n # )\n # return text\n\n self.file_contents = re.sub(\n include_re,\n lambda m: recursive_load(m, config_dir),\n self.file_contents,\n )\n # Read in dictionary\n self.config = self.__ordered_load(self.file_contents)\n\n # Convert functions of other params to true expressions\n for k in self.config.keys():\n self.config[k] = YamlConfig.__convert_key(self.config[k])\n\n fh.close()\n\n # Load core configuration\n return self.config", "title": "" }, { "docid": "404531a154d763c7a098ad55111c83e5", "score": "0.7470208", "text": "def load_config(self):\n conf_file = open(self.config_file, 'r')\n self.config = yaml.load(conf_file)\n conf_file.close()", "title": "" }, { "docid": "d5382f7cdd5f4a4e98a1dd16cb181244", "score": "0.7428611", "text": "def load_from(self, path):\n self.config.read(path)", "title": "" }, { "docid": "31e3ae969b50a92e512b00962c6cf616", "score": "0.7419602", "text": "def load_config(config):\n\n CONFIG_FILE.update(config)", "title": "" }, { "docid": "d4a2e22d04317fa4c0ae97f85a7b58a4", "score": "0.7278958", "text": "def _loadConfig(self):\n\n logger.info(\"Loading config from %s\" % self.filePath)\n self.file = open(self.filePath, 'r')", "title": "" }, { "docid": "3dcf7edc06a5e104ac32e83a5730017f", "score": "0.7276047", "text": "def reload(self):\n self.read(self.config_file)", "title": "" }, { "docid": "96bbbfa8895f0063c77a54eb53e89832", "score": "0.72527945", "text": "def file_config(self, filename: str):\n with open(self.root_path() + filename, 'rt', encoding=\"utf-8\") as ymlfile:\n self.__config = yaml.safe_load(ymlfile)", "title": "" }, { "docid": "43189137c943bb5365ab89218b532299", "score": "0.72063315", "text": "def load(self):\n try:\n f = open(self.file_path, 'r')\n self.config = json.loads(f.read())\n f.close()\n except IOError:\n self.config = {}", "title": "" }, { "docid": "d4bed0c248d76a7cca9433339ae26235", "score": "0.71869534", "text": "def load_config_yaml(self) -> None:\n\n with open(self.config_file, \"r\") as f:\n self.config = yaml.safe_load(f)", "title": "" }, { "docid": "72a8c32e2936c185c5f94096acb420bf", "score": "0.7172325", "text": "def load_config_file(self, config):\n self.parse(config)\n self.initialized = True", "title": "" }, { "docid": "62e0c2ad6e2a65ed75667ab8914f504a", "score": "0.7148298", "text": "def load(self, config_file):\n data = json.load(open(config_file, 'r'))\n self.__dict__.update(data)\n\n # if the attrs were set and not empty strings set the config as loaded.\n if self.settingsfile and self.basedir:\n self.loaded = True", "title": "" }, { "docid": "41e498592bd2ab6a0e28f6803ddb9c4f", "score": "0.7146188", "text": "def init_config_file(self):\n self.config = YamlConfig(self.config_path)\n self.config.loadConfig()\n self.config = self.config.parseConfig()", "title": "" }, { "docid": "79a2287c229ac4e2b2502b0c8154c901", "score": "0.7138202", "text": "def load(self):\n if not self.file:\n raise ValueError(\"No configuration file configured\")\n try:\n reader = ConfigReader()\n with open(self.file, \"r\", encoding=\"utf-8\") as f:\n reader.read_file(f)\n for section, settings in self.settings.items():\n for key, setting in settings.items():\n try:\n setting.validate(reader)\n except ValueError as e:\n value = reader.get(section, key, fallback='(undefined)')\n logger.warning(\n \"config key '{}' in section '{}' has the invalid configuration value '{}': {}\".format(\n key, section, value, str(e)\n ))\n except KeyError as e:\n logger.warning(\"config key '{}' in section '{}' needs to be set\".format(key, section))\n self.reader = reader\n except FileNotFoundError as e:\n pass", "title": "" }, { "docid": "2565473e20e0420d6e7c7bc868f5c768", "score": "0.71309835", "text": "def load_config(filename):\n AS[\"config\"] = load_yaml_file(filename)", "title": "" }, { "docid": "55edb76374f1ef97695f7d9a37fcbdbb", "score": "0.71037996", "text": "def read_config(self, ):\n with open(self.config_file) as f:\n self._config_data = json.load(f)", "title": "" }, { "docid": "06eebd19e049a2979ac5086792ef4935", "score": "0.7073353", "text": "def _load_configurations(self):\n with open(self.config_file) as f:\n configs = f.read()\n config = ConfigParser.RawConfigParser(allow_no_value=True)\n config.readfp(io.BytesIO(configs))\n self.config = config\n #\n self.cert_file = self.config.get(\"cert-paths\", \"cert_file\")", "title": "" }, { "docid": "01623a5fd90754244c7b90fc38db13da", "score": "0.70708525", "text": "def load_config(self, filename):\n\n # Create parser for config file\n parser = configparser.SafeConfigParser(self.defaults)\n parser.read(filename)\n\n # Get values from parser\n for key in self.defaults:\n setattr(self, key, parser.get(\"DEFAULT\", key))", "title": "" }, { "docid": "6168d64065420189f23366dc82588d90", "score": "0.7058346", "text": "def load(self, config_path: str):\n config_path = os.path.abspath(config_path)\n if os.path.isfile(config_path):\n try:\n with open(config_path, 'r') as f:\n self._config: dict = self.load_dict(f)\n logger.info(f\"Config file '{config_path}' was successfully loaded.\\nConfiguration:\\n{self}.\")\n except Exception as e:\n logger.error(f\"Can not load config file '{config_path}. {e}.\")\n else:\n logger.warning(f\"Can not load config file '{config_path}'. File does not exist. \"\n f\"Creating config file with default configuration.\")\n self.save(config_path)", "title": "" }, { "docid": "a5888e9e594618093c5b08b0a91cfe72", "score": "0.70570314", "text": "def load_from_file(self, path: str = 'config.yml') -> None:\n\n with open(path, 'r') as stream:\n self._set_values(yaml.load(stream, Loader=yaml.SafeLoader))", "title": "" }, { "docid": "3f8f503c9fe190022429de38f7d1eaf5", "score": "0.70513093", "text": "def __load_configuration(self):\n\n with open(self.configuration, 'r') as f:\n self.configuration_dict = yaml.safe_load(f)", "title": "" }, { "docid": "83d9dda26c08ef892673a90987634dff", "score": "0.7043932", "text": "def enhance_configuration(self):\n config_file = self.templ[\"config_path\"]\n if os.path.exists(config_file):\n with open(config_file, 'r') as f:\n self.templ.update(yaml.safe_load(f))", "title": "" }, { "docid": "674636f827fcbe4a28fe0f223bc3b543", "score": "0.7037226", "text": "def load_configuration_file(self, file_name: str):\n if not file_name.endswith(\".yaml\"):\n file_name += \".yaml\"\n with open(file_name) as file:\n configuration_values = yaml.safe_load(file)\n self.values_dict.update(configuration_values)", "title": "" }, { "docid": "4e21e531ad734e29bd90a25beb895d18", "score": "0.7030441", "text": "def load_config(self, conf_file):\n self._data = ConfigParser()\n self._data.read(conf_file)", "title": "" }, { "docid": "7b3427781e9eec53271ebfd168073784", "score": "0.70159227", "text": "def _load_config(self, conf_file: str):\n self._data = ConfigParser()\n self._data.read(conf_file)", "title": "" }, { "docid": "7220fdb51d71b4d4eef6f2d8b09e0a40", "score": "0.6994145", "text": "def read(self):\n if not os.path.exists(self.path):\n raise Exception(\"Config file `%s` does not exist.\" % self.path)\n config = ConfigParser.SafeConfigParser()\n config.read(self.path)\n self.config = config", "title": "" }, { "docid": "a4521d8097a461d1efb2d4543658b4a7", "score": "0.6993582", "text": "def load_config(self, filename=None):\n if filename and not os.path.isfile(filename):\n self.logger.error('Config file not found: {}, falling back to default'.format(filename))\n filename = None\n\n if filename is None:\n filename = os.path.join(Battery_Testing_Software.labphew.package_path, 'core', 'defaults', 'analog_discovery_2_config.yml')\n with open(filename, 'r') as f:\n self.properties.update(yaml.safe_load(f))\n self.properties['config_file'] = filename", "title": "" }, { "docid": "d97e68b6d6531e42cd2b71bc88b8adca", "score": "0.69875705", "text": "def load_config(self):\r\n logging.info('Opening config file '+args.config)\r\n config = \"\"\r\n for line in open(args.config, 'r'):\r\n if line.find('//') == -1:\r\n config += line\r\n config = re.sub(\"/\\*(.|[\\r\\n])*?\\*/\", \"\", config)\r\n self.config = json.loads(config)\r\n logging.info('Config loaded')", "title": "" }, { "docid": "ff9167c554d875987271ad667be89116", "score": "0.69774526", "text": "def load(self):\n self.config = ConfigParser.ConfigParser()\n if not os.path.exists(self.file_name):\n for option in Preferences.params:\n self.set(option)\n else:\n pref_file = open(self.file_name)\n self.config.readfp(pref_file)\n pref_file.close()", "title": "" }, { "docid": "534d4df4bed85c2dca1ccb78c83aeaa6", "score": "0.697129", "text": "def load_from_file(self, path):\n with open(path) as file:\n configs = yaml.load_all(file, Loader=yaml.SafeLoader)\n for conf in configs:\n for name, value in conf.items():\n self.setValue(name, value)", "title": "" }, { "docid": "7e4336f2bd94fc49d53c646851d1d0dc", "score": "0.69520324", "text": "def load_config_file() -> None:\n\n global config\n\n try:\n with open('IceScraper.json', 'r') as f:\n config.update(json.load(f))\n\n except FileNotFoundError:\n\n print('ERROR:', filename, 'not found.')\n sys.exit()\n\n except PermissionError:\n\n print('ERROR: You do not have sufficient permissions to read', filename)\n sys.exit()\n\n # If no proxy servers have been defined, set the proxies flag to false\n\n if 'proxies' not in config:\n config.update({'proxies':{'enable':False}})", "title": "" }, { "docid": "4aeb2a72f861116f1b25d762a74ae246", "score": "0.6938803", "text": "def init_config(self):\n with open(self.config_file, 'r') as fh:\n self.config = json.load(fh, object_pairs_hook=OrderedDict)\n logger.info('Config loaded: %s' % os.path.abspath(self.config_file))", "title": "" }, { "docid": "4a8fb8fb6fce36cb8a7c1bd70c299be5", "score": "0.6908839", "text": "def read_config(self):\n config = configparser.ConfigParser()\n config.read(self._filename)\n self.config = config._sections", "title": "" }, { "docid": "8f231961248b31fd8cc78dc459ace014", "score": "0.6897819", "text": "def read(self):\r\n self._config.read(self.filename)", "title": "" }, { "docid": "6d25ae610c9b51e65e193ed73ca1653b", "score": "0.68940836", "text": "def load_config(self):\n\n try:\n fd = open(cfg.CfgSaveFile, 'r')\n except IOError:\n return\n\n config = ConfigParser.RawConfigParser()\n config.read(cfg.CfgSaveFile)\n\n cfg.GenSaveDir = config.get('Config', 'GenSaveDir')\n log('load_config: cfg.GenSaveDir=%s' % cfg.GenSaveDir)\n\n fd.close()", "title": "" }, { "docid": "be67e6ac3b04b430072b6fb874bbd767", "score": "0.68758845", "text": "def read_file(self):\n\n try:\n self.config.readfp(open(self.CONFIG_FILE_PATH))\n except IOError:\n print('setting up config.ini file.')\n copyfile(self.CONFIG_TEMPLATE_PATH, self.CONFIG_FILE_PATH)\n self.config.readfp(open(self.CONFIG_FILE_PATH))", "title": "" }, { "docid": "ea169db8c0e749ee2f0794777f8462f5", "score": "0.68706083", "text": "def _load_config(self, path='./config.json'):\n config_json = open(path).read()\n self.config = json.loads(config_json)", "title": "" }, { "docid": "6987e59d0908266900090b03481ed499", "score": "0.6843048", "text": "def update_file():\n with open(CONFIG_PATH, \"w\") as configfile:\n config.write(configfile)", "title": "" }, { "docid": "82cb7a4bdc3a74606d70ad58d46a09f8", "score": "0.6841327", "text": "def _load_config_from_file(self, path_to_config: str) -> None:\n\t\tself.config = {\n\t\t\t\"username\": None,\n\t\t\t\"password\": None,\n\t\t\t\"apikey\": None,\n\t\t\t\"steamid\": None,\n\t\t \"shared_secret\": None,\n\t\t \"identity_secret\": None,\n\t\t \"proxy\": None\n\t\t}\n\t\twith open(path_to_config, \"r\") as file:\n\t\t\t_json_data = json.load(file)\n\t\t\tfor key, value in _json_data.items():\n\t\t\t\tif key not in self.config:\n\t\t\t\t\traise UnknownConfigKeyError(f\"Check the spelling of keys in the configuration file\")\n\t\t\t\tself.config[key] = value", "title": "" }, { "docid": "d240bceee8c2feada95d3a7dbfe398bb", "score": "0.68168527", "text": "def load_configuration(self, filename):\n\n config = SafeConfigParser()\n config.read([\n os.path.join('/etc/', filename),\n filename,\n ])\n if not config.sections():\n raise IOError('Cannot open config file.')\n return config", "title": "" }, { "docid": "21020966a2928181e1d577dadd756f19", "score": "0.6799133", "text": "def load_from_file(self, file: Union[str, Path]):\n self.settings = self._read_mp_config(file)\n self.current_file = file", "title": "" }, { "docid": "6b7dd825ac9ff022a40301b87bf95b9e", "score": "0.67966425", "text": "def load_config():\n global conf\n try:\n with open(conf_file, 'r') as f:\n conf = yaml.load(f)\n except Exception as e:\n logger.error(e)", "title": "" }, { "docid": "6d88b8ca846c97ffb71c9eeef9564732", "score": "0.6785268", "text": "def _load(self):\n\t\tself._load_dir_stack = []\n\t\tself.config_contents_list = []\n\t\tself.config_name_list = []\n\n\t\tconfig_fname = ConfigParser._default_values['general']['config_file']\n\t\tif config_fname != '':\n\t\t\tif not os.path.isfile(config_fname):\n\t\t\t\tprint(\"Config file '{}' not found\".format(config_fname), file=sys.stderr)\n\t\t\t\tsys.exit(1)\n\n\t\t\tself._load_dir_stack.append(os.path.dirname(config_fname))\n\t\t\ttry:\n\t\t\t\tself.read(config_fname)\n\t\t\tfinally:\n\t\t\t\tself._load_dir_stack.pop()\n\n\t\tself.add_defaults(ConfigParser._default_values)\n\n\t\tincludes = self.get('general', 'include', fallback='')\n\n\t\tself._included = set()\n\t\tself._traverse_includes(includes, this_dir=os.path.dirname(config_fname))\n\n\t\tdel self._load_dir_stack", "title": "" }, { "docid": "6d7b5ed7c7843957e689c6bc906216c8", "score": "0.67834663", "text": "def _load_config(self):\n\n # First load the standard entries from file\n stream_config = self._load_config_from_file()\n # Then override or add new entries with the supplied entries.\n if self._config is not None:\n stream_config.update(self._config)\n\n return stream_config", "title": "" }, { "docid": "03c6a17d2593dd257c5e70e53659e1dc", "score": "0.6782358", "text": "def loadConfig(self):\n logging.debug(\"Trying to load configuration file from {}\".format(self.configFilename))\n if len(self.config.read(self.configFilename)) == 0:\n logging.warning(\"No configuration file in path specified. Creating default configuration file.\")\n self.setDefaultConfig()\n self.saveConfig()", "title": "" }, { "docid": "db96755e29d8da3642f422b077b177dd", "score": "0.67338645", "text": "def load_config(self):\n self.config = configparser.ConfigParser()\n self.fconfig = os.path.dirname(os.path.abspath(__file__)) + \\\n '/data/cmdaq_gui.cfg'\n if os.path.exists(self.fconfig):\n try:\n self.config.read(self.fconfig)\n except:\n print('Corrupted config file: {}'.format(self.fconfig))\n print('This file will be overwritten when QUIT is clicked.\\n')", "title": "" }, { "docid": "a646c40f3cf4b7d13a488d6b572f7fdb", "score": "0.6699347", "text": "def LoadConfig(self):\n self.m_local_config.load_yaml_config()", "title": "" }, { "docid": "be6836ea1b7ec25f76c20b20fcec986e", "score": "0.669483", "text": "def load_config(self):\n config = dict([(key, value) for key, value in iteritems(self.options)\n if key in self.cfg.settings and value is not None])\n for key, value in iteritems(config):\n self.cfg.set(key.lower(), value)", "title": "" }, { "docid": "da59021a3652fccf8053dd2fc53681cc", "score": "0.6686956", "text": "def load_config(path):\n config = get_default_config()\n\n if not os.path.exists(path):\n print (\"WARN: no config file could be found at %s\" % path)\n else:\n try:\n with open(path, \"r\") as f:\n config_file = yaml.safe_load(f)\n config = merge_config(config, config_file if config_file else {})\n except Exception as e:\n print (\"WARN: invalid configuration file at %s: %s\" % (path, e))\n\n return config", "title": "" }, { "docid": "00256126129d21f58d2f349161f0dc95", "score": "0.6667671", "text": "def load_config_file(self, **kwargs: Any) -> None:\n if self.config_file:\n paths = [os.path.abspath(\"{}.py\".format(self.config_file))]\n else:\n config_dir = self.config_file_paths.copy()\n config_dir.insert(0, os.getcwd())\n paths = [os.path.join(x, \"{}.py\".format(self.config_file_name)) for x in config_dir]\n\n if not any(os.path.exists(x) for x in paths):\n self.log.warning(\"No nbgrader_config.py file found (rerun with --debug to see where nbgrader is looking)\")\n\n super(NbGrader, self).load_config_file(**kwargs)\n\n # Load also config from current working directory\n super(JupyterApp, self).load_config_file(self.config_file_name, os.getcwd())", "title": "" }, { "docid": "b9cb5dd4728c211e457a28e50a6fe007", "score": "0.6665637", "text": "def loadConfig(self, filename: str = \"configClient.json\"):\n if not os.path.exists(filename):\n print(\"No config file found, setting default config\")\n self.config = {\"serverAddress\": \"localhost\", \"serverPort\": 50051}\n else:\n with open(filename) as configFile:\n self.config = json.load(configFile)", "title": "" }, { "docid": "2429986bf636804449d71a2f1a956058", "score": "0.6665536", "text": "def load():\n config = ConfigParser.RawConfigParser()\n # keep file case sensitive\n config.optionxform = str\n config.read(Config.RELATIVE_CONFIG_FILE_PATH)\n for section in config.sections():\n for key in config.options(section):\n Config.ATTRIBUTES[key] = config.get(section, key)\n\n # set output file path\n for key in config.options(\"output\"):\n if key == \"output_folder\":\n # create the folder\n if not os.path.exists(Config.ATTRIBUTES[\"output_folder\"]):\n os.makedirs(Config.ATTRIBUTES[\"output_folder\"])\n else:\n Config.ATTRIBUTES[key] = Config.ATTRIBUTES[\"output_folder\"] + \"/\" + Config.ATTRIBUTES[key]", "title": "" }, { "docid": "24338ad72a16ee9dee37e70c98c74ce9", "score": "0.66601956", "text": "def loadConfig(self, confFile):\n self.confFile = confFile\n\n try:\n namespace = {}\n execfile(self.confFile, namespace)\n if not namespace.has_key(\"config\"):\n self.rawConfig = json.loads(open(self.confFile, 'r').read())\n else:\n self.rawConfig = namespace[\"config\"]\n except ValueError, e:\n raise SystemExit(e)\n\n self.config()", "title": "" }, { "docid": "0a27c8703348bc2bf3cb60e24a994e35", "score": "0.66555065", "text": "def load_config(fname):\n with open(fname) as f:\n yml_cfg = AttrDict(yaml.load(f.read(), Loader=yaml.Loader))\n _merge_cfg_a_to_b(yml_cfg, __C)", "title": "" }, { "docid": "e9eb236a4c5d628b37a0275c34e8a4a1", "score": "0.6648139", "text": "def _load_conf():\n\n args = _read_args()\n\n if _ENV_FILE_CONFIG_NAME in os.environ:\n cfg_file = os.environ[_ENV_FILE_CONFIG_NAME]\n else:\n cfg_file = args[_ARG_FILE_CONFIG_NAME] if _ARG_FILE_CONFIG_NAME in args else None\n\n if cfg_file is None:\n raise LostConfigError(\"Do you forget give config file? Try to do it by \"\n f\"{_ENV_FILE_CONFIG_NAME} environmet or --{_ARG_FILE_CONFIG_NAME} argument\")\n\n if not os.path.exists(cfg_file):\n raise FileNotFoundError(cfg_file)\n\n global _cached_config\n with open(cfg_file, \"rt\") as f:\n _cached_config = yaml.load(f, Loader=yaml.BaseLoader)\n\n _fill_extra_pros(args)\n\n return True", "title": "" }, { "docid": "f27a62a8b947c1402c47c489f8354980", "score": "0.6636167", "text": "def load_config(config):\n with open(config) as d_file:\n data = json.load(d_file)\n\n for key in data:\n Configuration.set(key, data[key])", "title": "" }, { "docid": "b0c928a933d417ded74b1d72f2b63ae2", "score": "0.6603238", "text": "def load(self):\n if not self.loaded:\n self.values = configobj.ConfigObj({}, **self.DEFAULT_CONFIG_OPTS)\n for path in self.locations():\n try:\n part = configobj.ConfigObj(infile=path, **self.DEFAULT_CONFIG_OPTS)\n except configobj.ConfigObjError as cause:\n raise LoggedFailure(\"Error in file '{path}': {cause}\".format(path=pretty_path(path), cause=cause))\n self.values.merge(part)\n self.loaded = True\n return self.values", "title": "" }, { "docid": "d3af94703d1d65a237a57162f0030f12", "score": "0.65823513", "text": "def load_configuration(self):\n with open(self.get_config_file(), 'r') as cfg_file:\n config = json.load(cfg_file)\n\n config = self._parse_configuration(config)\n\n for prop in config:\n if prop in self.CONFIG_PARAMS:\n setattr(self, prop, config[prop])", "title": "" }, { "docid": "9d91c291d3de03990f28e87f55d20148", "score": "0.6546881", "text": "def readConfig(self):\n ##Open ConfigFile\n self.config=ConfigObj(infile='sims/tcpwater/config', unrepr=True)", "title": "" }, { "docid": "2a7a71783340ec3adfc29b84eb1e275b", "score": "0.65422493", "text": "def from_file(cls, filename):\n\n local_config = cls.get_local_config(filename)\n sdk_testnet_config = cls.get_sdk_testnet_config()\n final_config = merge_configs(sdk_testnet_config, local_config)\n return cls(final_config)", "title": "" }, { "docid": "bbad5b1e0c699715448b70294c210fbb", "score": "0.6537636", "text": "def load_config(self):\n pass", "title": "" }, { "docid": "bbad5b1e0c699715448b70294c210fbb", "score": "0.6537636", "text": "def load_config(self):\n pass", "title": "" }, { "docid": "bbad5b1e0c699715448b70294c210fbb", "score": "0.6537636", "text": "def load_config(self):\n pass", "title": "" }, { "docid": "a5fe7ba87b980d0c58be48c767cbc8ed", "score": "0.6534869", "text": "def load_config_file() -> ConfigParser:\n global CONFIGURATION_FILE # pylint: disable=global-statement\n if not CONFIGURATION_FILE:\n CONFIGURATION_FILE = ConfigParser()\n CONFIGURATION_FILE.read([\n PATH_TO_DEFAULT_CONFIGURATION_FILE,\n PATH_TO_OVERRIDE_CONFIGURATION_FILE\n ], \"utf-8\")\n\n return CONFIGURATION_FILE", "title": "" }, { "docid": "4f141b988efb14e1fa33a910f04b7b74", "score": "0.6531925", "text": "def _reload(self):\n self._last_load = datetime.utcnow()\n if not os.path.exists(self._filename):\n LOG.warning(\"%s not found\", self._filename)\n return\n stream = open(self._filename)\n try:\n if self._filename.endswith(\".yaml\"):\n data = yaml.load(stream)\n self.clear()\n self.update(data)\n elif self._filename.endswith(\".json\"):\n data = json.load(stream)\n self.clear()\n self.update(data)\n else:\n raise Exception(\"Unsupported file format! \" + self._filename)\n except:\n LOG.error(\"Bad config format! \" + self._filename)", "title": "" }, { "docid": "b8c866bd6c71388842bd4df757398185", "score": "0.65231466", "text": "def load_configuration_file(file_name):\n global _settings, _configuration_file_location\n\n _configuration_file_location = pathlib.Path(file_name).resolve()\n\n try:\n with open(file_name, 'r') as configuration_file:\n _loaded_configuration = load(configuration_file, Loader=Loader)\n if _loaded_configuration:\n _update(_settings, _loaded_configuration)\n except FileNotFoundError:\n pass\n\n return munch.Munch.fromDict(_settings)", "title": "" }, { "docid": "a2448a9e409857cc5ab105fdbf41cd17", "score": "0.6514413", "text": "def load_config() -> RBToolsConfig:\n config = RBToolsConfig()\n\n for filename in reversed(get_config_paths()):\n config.merge(parse_config_file(filename))\n\n return config", "title": "" }, { "docid": "a8c9926959fe043af6bb7aa7dcecf72f", "score": "0.6511394", "text": "def load_config(self, config_file, file_config):\n\n config = copy.deepcopy(self.defaults)\n try:\n config.update(file_config['main'])\n except KeyError:\n self.log.error(\"Config file: %s Missing main section\", config_file)\n raise ConfigError\n # Check important sections existence\n try:\n config['promotion_steps_map'] = file_config['promote_from']\n except KeyError:\n self.log.error(\"Missing promotion_from section\")\n raise ConfigError\n for target_name in config['promotion_steps_map']:\n try:\n config['promotion_criteria_map'][target_name] = \\\n file_config[target_name]\n except KeyError:\n self.log.error(\"Missing criteria section for target %s\",\n target_name)\n raise ConfigError\n\n # This is done also in the child class, in expand config, but it's\n # really necessary to expand this even in the base class\n config['log_file'] = os.path.expanduser(config['log_file'])\n\n return config", "title": "" }, { "docid": "dfa7e4037db8a769b4307a4c34ef0e68", "score": "0.6492623", "text": "def load_config_file(self):\n config_paths = [\"./configuration.yaml\",\n os.path.join(os.path.expanduser(\"~\"),\n \".opsdroidaudio/configuration.yaml\"),\n \"/etc/opsdroidaudio/configuration.yaml\"]\n config_path = \"\"\n for possible_path in config_paths:\n if not os.path.isfile(possible_path):\n _LOGGER.debug(\"Config file %s not found\", possible_path)\n else:\n config_path = possible_path\n break\n\n if not config_path:\n self.critical(\"No configuration files found\", 1)\n\n try:\n with open(config_path, 'r') as stream:\n _LOGGER.info(\"Loaded config from %s\", config_path)\n return yaml.load(stream)\n except yaml.YAMLError as error:\n self.critical(error, 1)\n except FileNotFoundError as error:\n self.critical(str(error), 1)", "title": "" }, { "docid": "91efdeb6a414e844d6752c3ac9ee6914", "score": "0.64920413", "text": "def load_config(fname):\n with open(fname, 'r') as f:\n config = yaml.load(f)\n return config", "title": "" }, { "docid": "c7351860b837688fa6dc9fb506ad18cd", "score": "0.64809066", "text": "def read_config_from_file(self, config_filename):\n with open(config_filename) as filep:\n self.config = json.load(filep)", "title": "" }, { "docid": "956e12894a400811dd43fa064ef662a2", "score": "0.64775366", "text": "def load_conf_file(self, file_path=\"config.json\"):\n\n try:\n config_dic = json.load(open(file_path))\n\n except FileNotFoundError:\n\n print(\"You do not have a config file yet\")\n print(\"a temporary config file was generated\")\n print(\"See the documentation for generating it\")\n self.generate_conf()\n config_dic = json.load(open(file_path))\n\n self.load_reference(config_dic[\"references\"])\n self.load_institution(config_dic[\"institution\"])\n self.load_instrument(config_dic[\"instrument_name\"])\n self.load_comments(config_dic[\"comments\"])\n self.load_site(config_dic[\"site_name\"])\n self.load_contact(config_dic[\"contact_person\"])\n self.load_email(config_dic[\"email\"])\n\n return self", "title": "" }, { "docid": "ed17d98f9abfee01af77002b506221ff", "score": "0.64708465", "text": "def load_settings_from_file(self):\n # check if the file exists\n if os.path.isfile(self.gen_abs_path_to_settings_file()):\n # load content from file\n f = open(self.gen_abs_path_to_settings_file(), 'r')\n loaded = f.read().strip()\n f.close()\n\n # and feed own variables with it\n self.feed_json(loaded)", "title": "" }, { "docid": "eab8d908da8df209811c88ffab33d41f", "score": "0.64563364", "text": "def load(self, config_path):\n logging.debug('Reading from config_path={0!r}'.format(config_path))\n with open(config_path, 'rb') as load_f:# Read the config from file.\n config = yaml.safe_load(load_f)\n if config is None:\n logging.error('Could not load config from {0!r}'.format(config_path))\n return\n for key in config.keys():# Store values to class instance.\n setattr(self, key, config[key])# Sets self.key to config[key]\n logging.debug('Loaded config values: {0!r}'.format(config))\n return", "title": "" }, { "docid": "653c0aeca5d1afa922f833626310eadf", "score": "0.64509565", "text": "def load(self):\n file = \"config.json\"\n\n if os.path.isfile(file):\n try:\n config = json.load(open(file))\n\n self.networks = config[\"irc\"]\n self.api_keys = config[\"api_keys\"]\n self.metadata = config[\"metadata\"]\n self.modules = config[\"modules\"]\n\n self.logger.setTimestamp(self.getTimestampFormat())\n val = self._validate()\n self.logger.log(\"Configuration successfully loaded. Networks: {}, Warnings: {}.\\n\"\n .format(val[0], val[1]))\n except Exception as e:\n self.logger.error(\"An error occured while loading config.json:\\n{}\".format(str(e)))\n sys.exit(1)\n else:\n self.logger.error(\"Could not find configuration file config.json, did you configure the bot?\")\n sys.exit(1)", "title": "" }, { "docid": "c2fd7d15ab29110c508991e83101f9d0", "score": "0.64493054", "text": "def read_config():\n def __recursive_update(old, new):\n out = deepcopy(old)\n for k, v in new.items():\n if issubclass(type(v), dict):\n if k in old:\n out[k] = __recursive_update(old[k], v)\n else:\n out[k] = v\n else:\n out[k] = v\n\n return out\n\n for fpath in CONFIG_FILE_PATHS:\n if os.path.exists(fpath):\n data = munch.munchify(json.load(open(fpath, 'r')))\n\n # Our code expects a munch, so ensure that any regular dicts are converted\n return os.path.dirname(fpath), munch.munchify(__recursive_update(DEFAULT_CONFIG, data))\n\n raise FileNotFoundError('Configuration file not found')", "title": "" }, { "docid": "081537cc5ec54f2ed5149807bf106705", "score": "0.6447099", "text": "def LoadConfigFromFile(config_file=constants.CHROMEOS_CONFIG_FILE):\n json_string = osutils.ReadFile(config_file)\n return LoadConfigFromString(json_string)", "title": "" }, { "docid": "7712612104843003d272f08cb4b9d78b", "score": "0.6445202", "text": "def load_config(file_name):\n file_path = os.path.join(BASE_DIR, \"etc\", file_name)\n with open(file_path) as config:\n return json.load(config)", "title": "" }, { "docid": "a1444d4a9849bdb408c9c4928dcf23d5", "score": "0.6434692", "text": "def LoadConfig(config_file):\n config = {}\n global_dict = {\n 'Slave': SlaveConfig,\n }\n execfile(config_file, global_dict, config)\n return config", "title": "" }, { "docid": "5a5614c312ffe8b9a11ce2c393a418ad", "score": "0.64338136", "text": "def load(self, path):\n\n logger.debug(\"Config file path=%s\" % path)\n\n self.mtime = os.path.getmtime(path)\n logger.debug(\"Last modification time of %s was %s\" %\n (path, time.ctime(self.mtime)))\n\n self.filePath = path\n self._parseYaml()\n\n # Clear the existing list of sockets\n self.sockets = []\n\n logger.info(\"Found %d sockets in config\" % len(self.yaml))\n\n for socket in self.yaml:\n self.sockets.append(Socket(**socket))", "title": "" }, { "docid": "68401be0f37e3201fa027cc04bba7938", "score": "0.6430383", "text": "def load_settings(self, config_file, subdir='modes'):\n filepath = os.path.join(self.CURRENT_DIR, subdir, config_file)\n print \"WARNING: Loading %s\" % filepath\n if os.path.exists(filepath):\n with open(filepath, 'r') as jsonfile:\n self.config = json.loads(jsonfile.read())\n else:\n print \"ERROR: Specified config not found! Trying default!\" \n self.default_settings()", "title": "" }, { "docid": "a204691c68d4bcbcb9ea6cc237f350b0", "score": "0.642955", "text": "def load_conf_file():\n with open(CONF_FILE) as _fh:\n conf = json.load(_fh)\n if CONF_OVERRIDE_FILE:\n with open(CONF_OVERRIDE_FILE) as _fh:\n override = json.load(_fh)\n conf.update(override)\n conf = replace_conf_nulls(conf)\n try:\n conf['SECRET_KEY'] = b64decode(conf['SECRET_KEY'])\n except Error:\n print('Secret key not base64 encoded')\n except TypeError:\n print('Secret key is blank')\n return conf", "title": "" }, { "docid": "89e05cff8517a2b6674567d7424222e8", "score": "0.6423304", "text": "def load_config(config_file=\"config.json\"):\n return json.load(open(config_file))", "title": "" }, { "docid": "696963f88b6565e4bfd9cdc3782604dd", "score": "0.63935024", "text": "def cfg_from_file(filename):\n with open(filename, 'r') as f:\n yaml_cfg = edict(yaml.load(f))\n\n _merge_a_into_b(yaml_cfg, __C)", "title": "" }, { "docid": "696963f88b6565e4bfd9cdc3782604dd", "score": "0.63935024", "text": "def cfg_from_file(filename):\n with open(filename, 'r') as f:\n yaml_cfg = edict(yaml.load(f))\n\n _merge_a_into_b(yaml_cfg, __C)", "title": "" }, { "docid": "313a664a8c3f5ec6c8d268506d56fdf5", "score": "0.6391483", "text": "def load(self, filepath):\n # type: (str) -> None\n try:\n self._config.read(filepath)\n import ast\n self.connection.timeout = \\\n self._config.getint(\"Connection\", \"timeout\")\n self.connection.verify = \\\n self._config.getboolean(\"Connection\", \"verify\")\n self.connection.cert = \\\n self._config.get(\"Connection\", \"cert\")\n\n self.proxies.proxies = \\\n ast.literal_eval(self._config.get(\"Proxies\", \"proxies\"))\n self.proxies.use_env_settings = \\\n self._config.getboolean(\"Proxies\", \"env_settings\")\n\n self.redirect_policy.allow = \\\n self._config.getboolean(\"RedirectPolicy\", \"allow\")\n self.redirect_policy.max_redirects = \\\n self._config.getint(\"RedirectPolicy\", \"max_redirects\")\n\n except (ValueError, EnvironmentError, NoOptionError):\n error = \"Supplied config file incompatible.\"\n raise_with_traceback(ValueError, error)\n finally:\n self._clear_config()", "title": "" }, { "docid": "f12b420c3a3b731b2d628599f5b1a29e", "score": "0.6389122", "text": "def loadfile(self, filename):\n if not path.exists(filename):\n raise FileNotFoundError(filename)\n\n loadedyaml = yamlhelper.load(filename)\n if loadedyaml:\n self.merge(loadedyaml)", "title": "" }, { "docid": "7b643416305c3b17fa43fae71bac7a2a", "score": "0.6386808", "text": "def _load_cfg(self, cfg_path, sharing_path):\n if os.path.isfile(cfg_path):\n try:\n with open(cfg_path, 'r') as fo:\n loaded_config = OrderedDict()\n for k, v in json.load(fo).iteritems():\n loaded_config[k] = v\n except ValueError:\n logger.warning('\\nImpossible to read \"{0}\"!\\n'\n 'Config file overwrited and loaded with default configuration!\\n'.format(cfg_path))\n else:\n # Check that all the key in DEF_CONF are in loaded_config\n if not [True for k in Daemon.DEF_CONF if k not in loaded_config]:\n # In the case is all gone right we can update the CONFIG costant and return loaded_config\n Daemon.CONFIG_FILEPATH = cfg_path\n Daemon.CONFIG_DIR = os.path.dirname(cfg_path)\n return loaded_config\n logger.warning('\\nWarning \"{0}\" corrupted!\\n'\n 'Config file overwrited and loaded with default configuration!\\n'.format(cfg_path))\n else:\n logger.warning('\\nWarning \"{0}\" doesn\\'t exist!\\n'\n 'New config file created and loaded with default configuration!\\n'.format(cfg_path))\n return self._create_cfg(cfg_path, sharing_path)", "title": "" }, { "docid": "70d19f5c95e657aa98ead893faf23476", "score": "0.6385981", "text": "def manageConfig(self, config_file):\n self.parser = SafeConfigParser()\n self.parser.read(config_file)\n self.sensors = self.parser", "title": "" }, { "docid": "35b6a6425c22ea18f01b77aeb0889448", "score": "0.6385788", "text": "def loadFromConfig(self):\n\n try:\n config_file = open(self._sprintlyConfigPath, 'r')\n serialized_config = config_file.readline()\n config_file.close()\n self._config = json.loads(serialized_config)\n except:\n raise SprintlyException('Unable to read credentials from disk at %s' % self._sprintlyConfigPath)\n\n # validate version\n if 'version' not in self._config or self._config['version'] != CONFIG_VERSION:\n self.cprint('Your configuration needs to be updated. You will now be prompted to update it.', attr=YELLOW)\n self.updateConfig()", "title": "" }, { "docid": "161fbc337785afdd092d0f5937ce9447", "score": "0.6377704", "text": "def load(self):\n self.conf = {}\n for globbed in self.configurations:\n for config in glob.glob(globbed):\n with open(config) as f:\n for line in f.readlines():\n self.parse_line(line, self.conf)\n if self.is_sleeping():\n self.conf['directories'] = exclude_directories(\n self.conf['directories'], self.conf['sleep'])\n if 'sleep' in self.conf: del self.conf['sleep']", "title": "" }, { "docid": "8ec590a5baaedb6fab23a1e36f3c1c9a", "score": "0.6373317", "text": "def read_config(config_file_path):\n\tglobal config\n\tconfig = json.loads(open(config_file_path).read())", "title": "" }, { "docid": "9e4e835b1a59d775af7e380a95452903", "score": "0.6368848", "text": "def reload_():\n load_conf(True)", "title": "" }, { "docid": "f0cac551f3ed48d961a8ff6a6b48f2b2", "score": "0.63627", "text": "def load_config(self):", "title": "" }, { "docid": "8dce2db4c0dd5d9c92904e6cf00399a7", "score": "0.63626796", "text": "def load_properties(self, config_file=None, replace=False) -> None:\n if config_file is None:\n _path = self.__DEFAULT_CONFIG\n else:\n _path = Path(os.path.expanduser(config_file))\n if _path.exists() and _path.is_file():\n try:\n with closing(_path.open()) as ymlfile:\n cfg_dict = yaml.load(ymlfile)\n except IOError as e:\n raise IOError(\"The configuration file {} failed to open with: {}\".format(_path, e))\n try:\n self.add_to_root(cfg_dict, replace=replace)\n except TypeError:\n raise TypeError(\"The configuration file {} could not be loaded as a dict type\".format(_path))\n else:\n raise FileNotFoundError(\"The configuration file {} does not exist\".format(_path))", "title": "" }, { "docid": "8fd3543a80ebfbcfea79eb86372ac0bc", "score": "0.63609314", "text": "def load(self):\n with open(self.path, 'r') as config_fp:\n config_json = json.load(config_fp)\n for key, value in config_json.items():\n if key == 'source_dir':\n self.source_dir = value\n elif key in self._PATH_TRAVERSAL_ATTRS:\n setattr(self, key, set(value))\n else:\n raise ValueError('unknown config name: ' + key)", "title": "" }, { "docid": "03f24fa88b688a31720f6c887cca16e7", "score": "0.63550025", "text": "def cfg_from_file(filename):\n import yaml\n with open(filename, 'r') as f:\n yaml_cfg = edict(yaml.load(f))\n\n _merge_a_into_b(yaml_cfg, __C)", "title": "" }, { "docid": "3bfb24901cbb22af302509a3d23112d6", "score": "0.63505113", "text": "def load_settings():\n global settings\n\n check_and_create_file()\n with open(filename, 'r') as f:\n settings = yaml.load(f.read(), Loader=yaml.FullLoader)", "title": "" }, { "docid": "ed683786e7220fa700429fc62e93543e", "score": "0.63499266", "text": "def __load_or_create(config_file):\n required_sections = [NiftyNetGlobalConfig.global_section]\n required_keys = {\n required_sections[0]: [NiftyNetGlobalConfig.home_key]\n }\n default_values = {\n required_sections[0]: {\n NiftyNetGlobalConfig.home_key: '~/niftynet'\n }\n }\n\n backup = False\n if isfile(config_file):\n try:\n config = ConfigParser()\n config.read(config_file)\n\n # check all required sections and keys present\n for required_section in required_sections:\n if required_section not in config:\n backup = True\n break\n\n for required_key in required_keys[required_section]:\n if required_key not in config[required_section]:\n backup = True\n break\n\n if backup:\n break\n\n except Error:\n backup = True\n\n if not backup: # loaded file contains all required\n # config options: so return\n return dict(config)\n\n config_dir, config_filename = split(config_file)\n if not isdir(config_dir):\n os.makedirs(config_dir)\n\n if backup: # config file exists, but does not contain all required\n # config opts: so backup not to override\n timestamp = strftime('%Y-%m-%d-%H-%M-%S')\n random_str = ''.join(choice(ascii_lowercase) for _ in range(3))\n backup_suffix = '-'.join(['backup', timestamp, random_str])\n\n filename, extension = splitext(config_filename)\n backup_filename = ''.join([filename, '-', backup_suffix, extension])\n backup_file = join(config_dir, backup_filename)\n os.rename(config_file, backup_file)\n\n # create a new default global config file\n config = ConfigParser(default_values)\n for required_section in required_sections:\n for required_key in required_keys[required_section]:\n config.add_section(required_section)\n config[required_section][required_key] = \\\n default_values[required_section][required_key]\n with open(config_file, 'w') as new_config_file:\n config.write(new_config_file)\n return dict(config)", "title": "" } ]
68ae881d624711688d61b991f3ae080d
Tests two busy times, one in the middle of the start day and one on a different day.
[ { "docid": "6a335bd9b281b3021e5ca2b79521bb38", "score": "0.6556361", "text": "def get_free_times_7_test():\n begin_date = arrow.get().replace(\n tzinfo=tz.tzlocal(), hour=9, minute=0, second=0, microsecond=0, day=16,\n month=11, year=2015)\n end_date = arrow.get().replace(\n tzinfo=tz.tzlocal(), hour=17, minute=0, second=0, microsecond=0, day=20,\n month=11, year=2015)\n\n busy = [{'start': {'dateTime': '2015-11-16T10:00:00-08:00'},\n 'end': {'dateTime': '2015-11-16T11:00:00-08:00'}},\n {'start': {'dateTime': '2015-11-17T12:00:00-08:00'},\n 'end': {'dateTime': '2015-11-17T13:00:00-08:00'}}]\n\n free = [('2015-11-16T09:00:00-08:00', '2015-11-16T10:00:00-08:00'),\n ('2015-11-16T11:00:00-08:00', '2015-11-17T12:00:00-08:00'),\n ('2015-11-17T13:00:00-08:00', '2015-11-20T17:00:00-08:00')]\n\n assert free == get_free_times(busy, begin_date, end_date)", "title": "" } ]
[ { "docid": "dedcc10e201d066c2c9eb74db9004b9a", "score": "0.71987724", "text": "def really_between_times(instance, begin_time, end_time):\r\n\r\n\t# Case where the user searches for busy times throughout the whole day(24h)\r\n\tbegin = arrow.get(begin_time).format(\"HH:mm\")\r\n\tend = arrow.get(end_time).format(\"HH:mm\")\r\n\tif begin == end:\r\n\t\tprint(\"Begin and end time is the same. All instances will be busy times\")\r\n\t\treturn True\r\n\r\n\t# Case where end_time > begin_time. First lists all available time ranges on\r\n\t# the entire date range upon which the instance exists. Then tests whether the \r\n\t# instance overlaps with any of the time ranges.\r\n\tavails = list_availabilities_btwn_dates(instance['begin_datetime'], instance['end_datetime'], begin_time, end_time)\r\n\tinstance_begin = arrow.get(instance['begin_datetime'])\r\n\tinstance_end = arrow.get(instance['end_datetime'])\r\n\tfor avail in avails:\r\n\t\tbt = arrow.get(avail['bt'])\r\n\t\tet = arrow.get(avail['et'])\r\n\t\tif (instance_begin < bt and instance_end < bt) or (instance_begin > et and instance_end > et):\r\n\t\t\tprint(\"{} is a NOT a busy time within {} and {} on {}\".format(instance['summary'], begin, end, bt.format('YYYY-MM-DD')))\r\n\t\t\tcontinue\r\n\t\telse:\r\n\t\t\tprint(\"{} is a busy time within {} and {} on {}\".format(instance['summary'], begin, end, bt.format('YYYY-MM-DD')))\r\n\t\t\treturn True\r\n\t\r\n\treturn False", "title": "" }, { "docid": "a20503bf8809e621d25f339d30e151ab", "score": "0.6759078", "text": "def get_busy_dict_2_test():\n begin_date = arrow.get().replace(\n tzinfo=tz.tzlocal(), hour=9, minute=0, second=0, microsecond=0, day=16,\n month=11, year=2015)\n end_date = arrow.get().replace(\n tzinfo=tz.tzlocal(), hour=17, minute=0, second=0, microsecond=0, day=20,\n month=11, year=2015)\n\n events = [{'start': {'date': '2015-11-20'},\n 'end': {'date': '2015-11-22'}}]\n\n busy = {'2015-11-20T09:00:00-08:00':\n {'start': {'dateTime': '2015-11-20T09:00:00-08:00'},\n 'end': {'dateTime': '2015-11-20T17:00:00-08:00'}}}\n\n busy_test = get_busy_dict(events, begin_date, end_date)\n\n for event in busy_test:\n assert event in busy", "title": "" }, { "docid": "02fd82a55d6e66248a299857730c5bcd", "score": "0.67394036", "text": "def get_free_times_2_test():\n begin_date = arrow.get().replace(\n tzinfo=tz.tzlocal(), hour=9, minute=0, second=0, microsecond=0, day=16,\n month=11, year=2015)\n end_date = arrow.get().replace(\n tzinfo=tz.tzlocal(), hour=17, minute=0, second=0, microsecond=0, day=20,\n month=11, year=2015)\n\n busy = [{'start': {'dateTime': '2015-11-16T09:00:00-08:00'},\n 'end': {'dateTime': '2015-11-16T10:00:00-08:00'}}]\n\n free = [('2015-11-16T10:00:00-08:00', '2015-11-20T17:00:00-08:00')]\n\n assert free == get_free_times(busy, begin_date, end_date)", "title": "" }, { "docid": "fe9018a167d3475abe35ee7801250b91", "score": "0.66659546", "text": "def get_free_times_11_test():\n begin_date = arrow.get().replace(\n tzinfo=tz.tzlocal(), hour=9, minute=0, second=0, microsecond=0, day=16,\n month=11, year=2015)\n end_date = arrow.get().replace(\n tzinfo=tz.tzlocal(), hour=17, minute=0, second=0, microsecond=0, day=20,\n month=11, year=2015)\n\n busy = [{'start': {'dateTime': '2015-11-16T09:00:00-08:00'},\n 'end': {'dateTime': '2015-11-16T17:00:00-08:00'}},\n {'start': {'dateTime': '2015-11-18T09:00:00-08:00'},\n 'end': {'dateTime': '2015-11-18T17:00:00-08:00'}}]\n\n free = [('2015-11-17T09:00:00-08:00', '2015-11-17T17:00:00-08:00'),\n ('2015-11-19T09:00:00-08:00', '2015-11-20T17:00:00-08:00')]\n\n assert free == get_free_times(busy, begin_date, end_date)", "title": "" }, { "docid": "cd8ccda625256ab30846394a7f01c11d", "score": "0.6653801", "text": "def get_free_times_1_test():\n begin_date = arrow.get().replace(\n tzinfo=tz.tzlocal(), hour=9, minute=0, second=0, microsecond=0, day=16,\n month=11, year=2015)\n end_date = arrow.get().replace(\n tzinfo=tz.tzlocal(), hour=17, minute=0, second=0, microsecond=0, day=20,\n month=11, year=2015)\n\n busy = []\n\n free = [('2015-11-16T09:00:00-08:00', '2015-11-20T17:00:00-08:00')]\n\n assert free == get_free_times(busy, begin_date, end_date)", "title": "" }, { "docid": "f7b09f374832be4bbc486da872cb055e", "score": "0.6609194", "text": "def get_free_times_9_test():\n begin_date = arrow.get().replace(\n tzinfo=tz.tzlocal(), hour=9, minute=0, second=0, microsecond=0, day=16,\n month=11, year=2015)\n end_date = arrow.get().replace(\n tzinfo=tz.tzlocal(), hour=17, minute=0, second=0, microsecond=0, day=20,\n month=11, year=2015)\n\n busy = [{'start': {'dateTime': '2015-11-16T10:00:00-08:00'},\n 'end': {'dateTime': '2015-11-16T11:00:00-08:00'}},\n {'start': {'dateTime': '2015-11-20T16:00:00-08:00'},\n 'end': {'dateTime': '2015-11-20T17:00:00-08:00'}}]\n\n free = [('2015-11-16T09:00:00-08:00', '2015-11-16T10:00:00-08:00'),\n ('2015-11-16T11:00:00-08:00', '2015-11-20T16:00:00-08:00')]\n\n assert free == get_free_times(busy, begin_date, end_date)", "title": "" }, { "docid": "f0a9746bc68dd4d4b93afc6a4b5de881", "score": "0.66039026", "text": "def get_busy_dict_1_test():\n begin_date = arrow.get().replace(\n tzinfo=tz.tzlocal(), hour=9, minute=0, second=0, microsecond=0, day=16,\n month=11, year=2015)\n end_date = arrow.get().replace(\n tzinfo=tz.tzlocal(), hour=17, minute=0, second=0, microsecond=0, day=20,\n month=11, year=2015)\n\n events = [{'start': {'date': '2015-11-12'},\n 'end': {'date': '2015-11-16'}}]\n\n busy = {'2015-11-16T09:00:00-08:00':\n {'start': {'dateTime': '2015-11-16T09:00:00-08:00'},\n 'end': {'dateTime': '2015-11-16T17:00:00-08:00'}}}\n\n busy_test = get_busy_dict(events, begin_date, end_date)\n\n for event in busy_test:\n assert event in busy", "title": "" }, { "docid": "cfb668b676552061ee419536968aadba", "score": "0.65635", "text": "def get_busy_dict_11_test():\n begin_date = arrow.get().replace(\n tzinfo=tz.tzlocal(), hour=9, minute=0, second=0, microsecond=0, day=16,\n month=11, year=2015)\n end_date = arrow.get().replace(\n tzinfo=tz.tzlocal(), hour=17, minute=0, second=0, microsecond=0, day=20,\n month=11, year=2015)\n\n events = [{'start': {'dateTime': '2015-11-16T09:00:00-08:00'},\n 'end': {'dateTime': '2015-11-16T10:00:00-08:00'}},\n {'start': {'dateTime': '2015-11-16T10:00:00-08:00'},\n 'end': {'dateTime': '2015-11-16T11:00:00-08:00'}}]\n\n busy = {'2015-11-16T09:00:00-08:00':\n {'start': {'dateTime': '2015-11-16T09:00:00-08:00'},\n 'end': {'dateTime': '2015-11-16T10:00:00-08:00'}},\n '2015-11-16T10:00:00-08:00':\n {'start': {'dateTime': '2015-11-17T10:00:00-08:00'},\n 'end': {'dateTime': '2015-11-17T11:00:00-08:00'}}}\n\n busy_test = get_busy_dict(events, begin_date, end_date)\n\n for event in busy_test:\n assert event in busy", "title": "" }, { "docid": "8edeb1b964c672e2f77fa04a8ff1e276", "score": "0.6557012", "text": "def get_free_times_8_test():\n begin_date = arrow.get().replace(\n tzinfo=tz.tzlocal(), hour=9, minute=0, second=0, microsecond=0, day=16,\n month=11, year=2015)\n end_date = arrow.get().replace(\n tzinfo=tz.tzlocal(), hour=17, minute=0, second=0, microsecond=0, day=20,\n month=11, year=2015)\n\n busy = [{'start': {'dateTime': '2015-11-17T10:00:00-08:00'},\n 'end': {'dateTime': '2015-11-17T11:00:00-08:00'}},\n {'start': {'dateTime': '2015-11-18T12:00:00-08:00'},\n 'end': {'dateTime': '2015-11-18T13:00:00-08:00'}}]\n\n free = [('2015-11-16T09:00:00-08:00', '2015-11-17T10:00:00-08:00'),\n ('2015-11-17T11:00:00-08:00', '2015-11-18T12:00:00-08:00'),\n ('2015-11-18T13:00:00-08:00', '2015-11-20T17:00:00-08:00')]\n\n assert free == get_free_times(busy, begin_date, end_date)", "title": "" }, { "docid": "d676650178ad41ecb1ebbc496a85442c", "score": "0.65149826", "text": "def get_free_times_3_test():\n begin_date = arrow.get().replace(\n tzinfo=tz.tzlocal(), hour=9, minute=0, second=0, microsecond=0, day=16,\n month=11, year=2015)\n end_date = arrow.get().replace(\n tzinfo=tz.tzlocal(), hour=17, minute=0, second=0, microsecond=0, day=20,\n month=11, year=2015)\n\n busy = [{'start': {'dateTime': '2015-11-20T16:00:00-08:00'},\n 'end': {'dateTime': '2015-11-20T17:00:00-08:00'}}]\n\n free = [('2015-11-16T09:00:00-08:00', '2015-11-20T16:00:00-08:00')]\n\n assert free == get_free_times(busy, begin_date, end_date)", "title": "" }, { "docid": "e4829488c3c4b1879693c9439d85fcfe", "score": "0.6510528", "text": "def test_full_between_casuistic_2(self):\n a = datetime(2017, 12, 17, 14, 36, 12)\n b = datetime(2017, 12, 19, 7, 35, 24)\n\n days = SLATime.full_in_between_working_days(a, b)\n expected_days = 0\n\n self.assertEqual(expected_days, days)", "title": "" }, { "docid": "ffa6c6f86e83cf848817a718b61b068d", "score": "0.6499147", "text": "def get_free_times_6_test():\n begin_date = arrow.get().replace(\n tzinfo=tz.tzlocal(), hour=9, minute=0, second=0, microsecond=0, day=16,\n month=11, year=2015)\n end_date = arrow.get().replace(\n tzinfo=tz.tzlocal(), hour=17, minute=0, second=0, microsecond=0, day=20,\n month=11, year=2015)\n\n busy = [{'start': {'dateTime': '2015-11-16T10:00:00-08:00'},\n 'end': {'dateTime': '2015-11-16T11:00:00-08:00'}},\n {'start': {'dateTime': '2015-11-16T12:00:00-08:00'},\n 'end': {'dateTime': '2015-11-16T13:00:00-08:00'}}]\n\n free = [('2015-11-16T09:00:00-08:00', '2015-11-16T10:00:00-08:00'),\n ('2015-11-16T11:00:00-08:00', '2015-11-16T12:00:00-08:00'),\n ('2015-11-16T13:00:00-08:00', '2015-11-20T17:00:00-08:00')]\n\n assert free == get_free_times(busy, begin_date, end_date)", "title": "" }, { "docid": "35154c6cdea846e0780bd2315531db3a", "score": "0.64914364", "text": "def test_full_between_normal_working_days_2(self):\n a = datetime(2018, 2, 15, 7, 35, 24, 43)\n b = datetime(2018, 2, 16, 7, 35, 24, 44)\n\n days = SLATime.full_in_between_working_days(a, b)\n expected_days = 0\n\n self.assertEqual(expected_days, days)", "title": "" }, { "docid": "d38b020067c6076689ab0a61db3b6101", "score": "0.6481907", "text": "def get_free_times_10_test():\n begin_date = arrow.get().replace(\n tzinfo=tz.tzlocal(), hour=9, minute=0, second=0, microsecond=0, day=16,\n month=11, year=2015)\n end_date = arrow.get().replace(\n tzinfo=tz.tzlocal(), hour=17, minute=0, second=0, microsecond=0, day=20,\n month=11, year=2015)\n\n busy = [{'start': {'dateTime': '2015-11-16T09:00:00-08:00'},\n 'end': {'dateTime': '2015-11-18T11:00:00-08:00'}},\n {'start': {'dateTime': '2015-11-19T09:00:00-08:00'},\n 'end': {'dateTime': '2015-11-19T13:00:00-08:00'}},\n {'start': {'dateTime': '2015-11-19T15:00:00-08:00'},\n 'end': {'dateTime': '2015-11-19T16:00:00-08:00'}},\n {'start': {'dateTime': '2015-11-20T16:00:00-08:00'},\n 'end': {'dateTime': '2015-11-20T17:00:00-08:00'}}]\n\n free = [('2015-11-18T11:00:00-08:00', '2015-11-18T17:00:00-08:00'),\n ('2015-11-19T13:00:00-08:00', '2015-11-19T15:00:00-08:00'),\n ('2015-11-19T16:00:00-08:00', '2015-11-20T16:00:00-08:00')]\n\n assert free == get_free_times(busy, begin_date, end_date)", "title": "" }, { "docid": "ab96f439e3157c499c0500b77b638ecb", "score": "0.64669484", "text": "def get_free_times_5_test():\n begin_date = arrow.get().replace(\n tzinfo=tz.tzlocal(), hour=9, minute=0, second=0, microsecond=0, day=16,\n month=11, year=2015)\n end_date = arrow.get().replace(\n tzinfo=tz.tzlocal(), hour=17, minute=0, second=0, microsecond=0, day=20,\n month=11, year=2015)\n\n busy = [{'start': {'dateTime': '2015-11-16T09:00:00-08:00'},\n 'end': {'dateTime': '2015-11-16T10:00:00-08:00'}},\n {'start': {'dateTime': '2015-11-20T16:00:00-08:00'},\n 'end': {'dateTime': '2015-11-20T17:00:00-08:00'}}]\n\n free = [('2015-11-16T10:00:00-08:00', '2015-11-20T16:00:00-08:00')]\n\n assert free == get_free_times(busy, begin_date, end_date)", "title": "" }, { "docid": "0acc4aab793cb69cfa6662afad9397b3", "score": "0.6463193", "text": "def get_busy_dict_9_test():\n begin_date = arrow.get().replace(\n tzinfo=tz.tzlocal(), hour=9, minute=0, second=0, microsecond=0, day=16,\n month=11, year=2015)\n end_date = arrow.get().replace(\n tzinfo=tz.tzlocal(), hour=17, minute=0, second=0, microsecond=0, day=20,\n month=11, year=2015)\n\n events = [{'start': {'dateTime': '2015-11-16T10:00:00-08:00'},\n 'end': {'dateTime': '2015-11-16T11:00:00-08:00'}}]\n\n busy = {'2015-11-16T10:00:00-08:00':\n {'start': {'dateTime': '2015-11-16T10:00:00-08:00'},\n 'end': {'dateTime': '2015-11-16T11:00:00-08:00'}}}\n\n busy_test = get_busy_dict(events, begin_date, end_date)\n\n for event in busy_test:\n assert event in busy", "title": "" }, { "docid": "8d6eef98a57c8207ec5c4b9a5fc11d69", "score": "0.6456636", "text": "def test_full_between_casuistic_4(self):\n a = datetime(2018, 1, 29, 14, 36, 12)\n b = datetime(2018, 2, 1, 8, 35, 24)\n\n days = SLATime.full_in_between_working_days(a, b)\n expected_days = 2\n\n self.assertEqual(expected_days, days)", "title": "" }, { "docid": "687a6189e87231b202cf6bb050fcc0f5", "score": "0.6450391", "text": "def get_free_times_4_test():\n begin_date = arrow.get().replace(\n tzinfo=tz.tzlocal(), hour=9, minute=0, second=0, microsecond=0, day=16,\n month=11, year=2015)\n end_date = arrow.get().replace(\n tzinfo=tz.tzlocal(), hour=17, minute=0, second=0, microsecond=0, day=20,\n month=11, year=2015)\n\n busy = [{'start': {'dateTime': '2015-11-16T10:00:00-08:00'},\n 'end': {'dateTime': '2015-11-16T11:00:00-08:00'}}]\n\n free = [('2015-11-16T09:00:00-08:00', '2015-11-16T10:00:00-08:00'),\n ('2015-11-16T11:00:00-08:00', '2015-11-20T17:00:00-08:00')]\n\n assert free == get_free_times(busy, begin_date, end_date)", "title": "" }, { "docid": "aef3f107bced9c856e47bee28a30eb7e", "score": "0.6440175", "text": "def test_full_between_casuistic_1(self):\n a = datetime(2018, 2, 18, 6, 36, 12)\n b = datetime(2018, 2, 19, 7, 35, 24)\n\n days = SLATime.full_in_between_working_days(a, b)\n expected_days = 0\n\n self.assertEqual(expected_days, days)", "title": "" }, { "docid": "1412640cc7e128a28ae1bb98a95fde13", "score": "0.6428671", "text": "def double_day(bday1, bday2):", "title": "" }, { "docid": "8ba1061ae40fb921fdc8f7a98af84a02", "score": "0.6390355", "text": "def get_busy_dict_8_test():\n begin_date = arrow.get().replace(\n tzinfo=tz.tzlocal(), hour=9, minute=0, second=0, microsecond=0, day=16,\n month=11, year=2015)\n end_date = arrow.get().replace(\n tzinfo=tz.tzlocal(), hour=17, minute=0, second=0, microsecond=0, day=20,\n month=11, year=2015)\n\n events = [{'start': {'dateTime': '2015-11-16T10:00:00-08:00'},\n 'end': {'dateTime': '2015-11-16T18:00:00-08:00'}}]\n\n busy = {'2015-11-16T10:00:00-08:00':\n {'start': {'dateTime': '2015-11-16T10:00:00-08:00'},\n 'end': {'dateTime': '2015-11-16T17:00:00-08:00'}}}\n\n busy_test = get_busy_dict(events, begin_date, end_date)\n\n for event in busy_test:\n assert event in busy", "title": "" }, { "docid": "1b8a69216f68746c549cbc1dfe26c8f4", "score": "0.63884544", "text": "def get_busy_dict_7_test():\n begin_date = arrow.get().replace(\n tzinfo=tz.tzlocal(), hour=9, minute=0, second=0, microsecond=0, day=16,\n month=11, year=2015)\n end_date = arrow.get().replace(\n tzinfo=tz.tzlocal(), hour=17, minute=0, second=0, microsecond=0, day=20,\n month=11, year=2015)\n\n events = [{'start': {'dateTime': '2015-11-16T08:00:00-08:00'},\n 'end': {'dateTime': '2015-11-16T10:00:00-08:00'}}]\n\n busy = {'2015-11-16T09:00:00-08:00':\n {'start': {'dateTime': '2015-11-16T09:00:00-08:00'},\n 'end': {'dateTime': '2015-11-16T10:00:00-08:00'}}}\n\n busy_test = get_busy_dict(events, begin_date, end_date)\n\n for event in busy_test:\n assert event in busy", "title": "" }, { "docid": "91621a08fc6ec6ac489fda61b5a6e408", "score": "0.6385017", "text": "def test_full_day_starting_at_noon():\n _test_half_way_between_range(12, 12)", "title": "" }, { "docid": "31dd52076ec34bf14487bcfa3c786e4f", "score": "0.6384068", "text": "def get_busy_dict_6_test():\n begin_date = arrow.get().replace(\n tzinfo=tz.tzlocal(), hour=9, minute=0, second=0, microsecond=0, day=16,\n month=11, year=2015)\n end_date = arrow.get().replace(\n tzinfo=tz.tzlocal(), hour=17, minute=0, second=0, microsecond=0, day=20,\n month=11, year=2015)\n\n events = [{'start': {'date': '2015-11-16'},\n 'end': {'date': '2015-11-17'}},\n {'start': {'date': '2015-11-17'},\n 'end': {'date': '2015-11-18'}}]\n\n busy = {'2015-11-16T09:00:00-08:00':\n {'start': {'dateTime': '2015-11-16T09:00:00-08:00'},\n 'end': {'dateTime': '2015-11-16T17:00:00-08:00'}},\n '2015-11-17T09:00:00-08:00':\n {'start': {'dateTime': '2015-11-17T09:00:00-08:00'},\n 'end': {'dateTime': '2015-11-17T17:00:00-08:00'}}}\n\n busy_test = get_busy_dict(events, begin_date, end_date)\n\n for event in busy_test:\n assert event in busy", "title": "" }, { "docid": "d7c83ea786209cc2348561193b2bbe92", "score": "0.63398045", "text": "def get_busy_dict_4_test():\n begin_date = arrow.get().replace(\n tzinfo=tz.tzlocal(), hour=9, minute=0, second=0, microsecond=0, day=16,\n month=11, year=2015)\n end_date = arrow.get().replace(\n tzinfo=tz.tzlocal(), hour=17, minute=0, second=0, microsecond=0, day=20,\n month=11, year=2015)\n\n events = [{'start': {'date': '2015-11-19'},\n 'end': {'date': '2015-11-20'}}]\n\n busy = {'2015-11-19T09:00:00-08:00':\n {'start': {'dateTime': '2015-11-19T09:00:00-08:00'},\n 'end': {'dateTime': '2015-11-20T17:00:00-08:00'}}}\n\n busy_test = get_busy_dict(events, begin_date, end_date)\n\n for event in busy_test:\n assert event in busy", "title": "" }, { "docid": "51047a4b8d0f1642892b516686216671", "score": "0.6321999", "text": "def get_busy_dict_10_test():\n begin_date = arrow.get().replace(\n tzinfo=tz.tzlocal(), hour=9, minute=0, second=0, microsecond=0, day=16,\n month=11, year=2015)\n end_date = arrow.get().replace(\n tzinfo=tz.tzlocal(), hour=17, minute=0, second=0, microsecond=0, day=20,\n month=11, year=2015)\n\n events = [{'start': {'dateTime': '2015-11-16T08:00:00-08:00'},\n 'end': {'dateTime': '2015-11-16T18:00:00-08:00'}}]\n\n busy = {'2015-11-16T09:00:00-08:00':\n {'start': {'dateTime': '2015-11-16T09:00:00-08:00'},\n 'end': {'dateTime': '2015-11-16T17:00:00-08:00'}}}\n\n busy_test = get_busy_dict(events, begin_date, end_date)\n\n for event in busy_test:\n assert event in busy", "title": "" }, { "docid": "003f53a0a3173b3fb4752ad7f22ab39c", "score": "0.6317189", "text": "def test_full_between_casuistic_3(self):\n a = datetime(2018, 1, 31, 14, 36, 12)\n b = datetime(2018, 2, 2, 8, 35, 24)\n\n days = SLATime.full_in_between_working_days(a, b)\n expected_days = 0\n\n self.assertEqual(expected_days, days)", "title": "" }, { "docid": "0d190e7446f2f877165f643c2318a1f8", "score": "0.6306318", "text": "def get_busy_dict_5_test():\n\n begin_date = arrow.get().replace(\n tzinfo=tz.tzlocal(), hour=9, minute=0, second=0, microsecond=0, day=16,\n month=11, year=2015)\n end_date = arrow.get().replace(\n tzinfo=tz.tzlocal(), hour=17, minute=0, second=0, microsecond=0, day=20,\n month=11, year=2015)\n\n events = [{'start': {'date': '2015-11-15'},\n 'end': {'date': '2015-11-21'}}]\n\n busy = {'2015-11-16T09:00:00-08:00':\n {'start': {'dateTime': '2015-11-16T09:00:00-08:00'},\n 'end': {'dateTime': '2015-11-20T17:00:00-08:00'}}}\n\n busy_test = get_busy_dict(events, begin_date, end_date)\n\n for event in busy_test:\n assert event in busy", "title": "" }, { "docid": "c2776d4f32987e20151f07d13a3af174", "score": "0.629757", "text": "def test_full_between_normal_working_days_1(self):\n a = datetime(2018, 2, 15, 7, 35, 24, 43)\n b = datetime(2018, 2, 16, 7, 35, 24, 42)\n\n days = SLATime.full_in_between_working_days(a, b)\n expected_days = 0\n\n self.assertEqual(expected_days, days)", "title": "" }, { "docid": "f6c753d03c3cdc425815db8576575abb", "score": "0.6287171", "text": "def get_busy_dict_3_test():\n begin_date = arrow.get().replace(\n tzinfo=tz.tzlocal(), hour=9, minute=0, second=0, microsecond=0, day=16,\n month=11, year=2015)\n end_date = arrow.get().replace(\n tzinfo=tz.tzlocal(), hour=17, minute=0, second=0, microsecond=0, day=20,\n month=11, year=2015)\n\n events = [{'start': {'date': '2015-11-18'},\n 'end': {'date': '2015-11-20'}}]\n\n busy = {'2015-11-18T09:00:00-08:00':\n {'start': {'dateTime': '2015-11-18T09:00:00-08:00'},\n 'end': {'dateTime': '2015-11-19T17:00:00-08:00'}}}\n\n busy_test = get_busy_dict(events, begin_date, end_date)\n\n for event in busy_test:\n assert event in busy", "title": "" }, { "docid": "4e27669abcdf226d0c3d32816b2d2fa1", "score": "0.6237872", "text": "def test_full_between_same_working_days(self):\n a = datetime(2018, 2, 16, 7, 35, 24, 43)\n b = datetime(2018, 2, 16, 7, 35, 24, 43)\n\n days = SLATime.full_in_between_working_days(a, b)\n expected_days = 0\n\n self.assertEqual(expected_days, days)", "title": "" }, { "docid": "c2e71068b8d3a63934507c7f892c473b", "score": "0.62334764", "text": "def test_full_between_normal_working_days(self):\n a = datetime(2018, 2, 15, 7, 35, 24, 43)\n b = datetime(2018, 2, 16, 7, 35, 24, 43)\n\n days = SLATime.full_in_between_working_days(a, b)\n expected_days = 0\n\n self.assertEqual(expected_days, days)", "title": "" }, { "docid": "d5e38b8b85d813a02477dbaac1d291d1", "score": "0.6186111", "text": "def test_full_between_working_days_weekend_2(self):\n a = datetime(2018, 2, 16, 7, 35, 24, 43)\n b = datetime(2018, 2, 19, 8, 47, 43, 12)\n\n days = SLATime.full_in_between_working_days(a, b)\n expected_days = 0\n\n self.assertEqual(expected_days, days)", "title": "" }, { "docid": "dc0373246f631ee74364acf49449e976", "score": "0.6148972", "text": "def test_full_between_wrong_working_days(self):\n a = datetime(2018, 2, 16, 7, 35, 24, 43)\n b = datetime(2018, 2, 15, 7, 35, 24, 43)\n\n days = SLATime.full_in_between_working_days(a, b)\n expected_days = 0\n\n self.assertEqual(expected_days, days)", "title": "" }, { "docid": "d30df828b5e71a3359cc2e0b89c96660", "score": "0.6117803", "text": "def get_busy_list_2_test():\n\n busy = {'2015-11-16T09:00:00-08:00':\n {'start': {'dateTime': '2015-11-16T09:00:00-08:00'},\n 'end': {'dateTime': '2015-11-16T17:00:00-08:00'}},\n '2015-11-16T10:00:00-08:00':\n {'start': {'dateTime': '2015-11-16T10:00:00-08:00'},\n 'end': {'dateTime': '2015-11-16T11:00:00-08:00'}}}\n\n busy_list = [{'start': {'dateTime': '2015-11-16T09:00:00-08:00'},\n 'end': {'dateTime': '2015-11-16T17:00:00-08:00'}}]\n\n assert busy_list == get_busy_list(busy)", "title": "" }, { "docid": "67ed7a523469aeadb8524cb2e3809a92", "score": "0.60968757", "text": "def get_busy_list_1_test():\n busy = {'2015-11-16T09:00:00-08:00':\n {'start': {'dateTime': '2015-11-16T09:00:00-08:00'},\n 'end': {'dateTime': '2015-11-16T17:00:00-08:00'}},\n '2015-11-17T09:00:00-08:00':\n {'start': {'dateTime': '2015-11-17T09:00:00-08:00'},\n 'end': {'dateTime': '2015-11-17T17:00:00-08:00'}}}\n\n busy_list = [{'start': {'dateTime': '2015-11-16T09:00:00-08:00'},\n 'end': {'dateTime': '2015-11-17T17:00:00-08:00'}}]\n\n assert busy_list == get_busy_list(busy)", "title": "" }, { "docid": "7abfe1c6849f5e65096721b19bae9d72", "score": "0.60618544", "text": "def test_create_slot_times_method_with_same_day_and_no_dstp(self):\n self.booking_type_30.dailyslottimepattern_set.create(\n day=self.start_date.weekday() + 1, start_time='9:00',\n end_time='11:00')\n result, = self.st_generation.create_slot_times()\n self.assertEqual(result, 0)\n self.assertEqual(self.booking_type_30.slottime_set.count(), 0)", "title": "" }, { "docid": "faa33d61ebb2d341074fd0e386b90e5f", "score": "0.6020618", "text": "def test_get_availability_next_by_task_and_date_1_availability_today2(self):\n\n now = datetime.now(timezone.utc)\n loc_name = \"Cordoba\"\n l1 = Location.objects.create_location(loc_name)\n task_name = \"Device Installation\"\n t1 = Task.objects.create_task(task_name, 60)\n a1 = Availability.objects.create_availability(now + timedelta(minutes = 30), l1, t1)\n a2 = Availability.objects.create_availability(now + timedelta(minutes = -60), l1, t1)\n result = Availability.objects.get_next_by_task_and_date(task_name, now)\n self.assertEqual(len(result), 1)\n availability = result[0]\n self.assertEqual(availability.when, now + timedelta(minutes = 30))\n self.assertEqual(availability.where.name, loc_name)\n self.assertEqual(availability.what.name, task_name)", "title": "" }, { "docid": "5350b3715ed1428c9d142c0f4e619b40", "score": "0.601611", "text": "def get_busy_list_4_test():\n busy = {'2015-11-16T09:00:00-08:00':\n {'start': {'dateTime': '2015-11-16T09:00:00-08:00'},\n 'end': {'dateTime': '2015-11-16T17:00:00-08:00'}},\n '2015-11-17T09:00:00-08:00':\n {'start': {'dateTime': '2015-11-17T09:00:00-08:00'},\n 'end': {'dateTime': '2015-11-17T10:00:00-08:00'}}}\n\n busy_list = [{'start': {'dateTime': '2015-11-16T09:00:00-08:00'},\n 'end': {'dateTime': '2015-11-17T10:00:00-08:00'}}]\n\n assert busy_list == get_busy_list(busy)", "title": "" }, { "docid": "b31cab31337492fd2a9271de7160c7b8", "score": "0.60008156", "text": "def sleep_in2(weekday, vacation):", "title": "" }, { "docid": "fc122007204b1f79eb3b84f471c6541a", "score": "0.59909153", "text": "def get_busy_list_3_test():\n\n busy = {'2015-11-16T09:00:00-08:00':\n {'start': {'dateTime': '2015-11-16T09:00:00-08:00'},\n 'end': {'dateTime': '2015-11-16T10:00:00-08:00'}},\n '2015-11-16T10:00:00-08:00':\n {'start': {'dateTime': '2015-11-16T10:00:00-08:00'},\n 'end': {'dateTime': '2015-11-16T11:00:00-08:00'}}}\n\n busy_list = [{'start': {'dateTime': '2015-11-16T09:00:00-08:00'},\n 'end': {'dateTime': '2015-11-16T11:00:00-08:00'}}]\n\n assert busy_list == get_busy_list(busy)", "title": "" }, { "docid": "37c4a5f8dbbf6933b5ec890a696b9345", "score": "0.59762204", "text": "def time_in_range(startTime:time, endTime:time, nowTime:time) -> time:\n if startTime < endTime:\n return nowTime >= startTime and nowTime <= endTime\n else: #Over midnight\n return nowTime >= startTime or nowTime <= endTime", "title": "" }, { "docid": "51d8d1c8a3c6e4a4362f74a603e06552", "score": "0.5955628", "text": "def check_two(self):\r\n if not self.check_date():\r\n return \"Incorrect date format. Please, try again.\"\r\n curr_year = str(datetime.date.today()).split()[0].replace(\"-\", \"/\")[2:4]\r\n curr_month = str(datetime.date.today()).split()[0].\\\r\n replace(\"-\", \"/\")[5:7]\r\n curr_date = str(datetime.date.today()).split()[0].\\\r\n replace(\"-\", \"/\")[8:10]\r\n if self.year1 != curr_year or self.year2 != curr_year:\r\n return \"You can plan your trip by the end of this year. \\\r\nAviaticket for next year will be avaible later\"\r\n time1 = datetime.date(year = int(\"20\" + self.year1), \\\r\n month = int(self.month1), day = int(self.day1))\r\n time2 = datetime.date(year = int(\"20\" + self.year2), \\\r\n month = int(self.month2), day = int(self.day2))\r\n if time1 <= datetime.date.today() or time2 <= datetime.date.today():\r\n return \"Your date Incorrect. Please, try again.\"\r\n final_time = time2 - time1\r\n total_days = int(str(final_time).split()[0])\r\n if 0 < total_days < 3:\r\n return \"Interval between date too short.\"\r\n if total_days <= 0:\r\n return \"Incorrect date. Please, try again.\"\r\n elif total_days > 21:\r\n return \"Interval between your date too large.\"\r\n else:\r\n return True", "title": "" }, { "docid": "e044576d5c04899c5b80ee55cc139060", "score": "0.5919559", "text": "def test_full_between_working_days_weekend(self):\n a = datetime(2018, 2, 16, 7, 35, 24, 43)\n b = datetime(2018, 2, 18, 8, 47, 43, 12)\n\n days = SLATime.full_in_between_working_days(a, b)\n expected_days = 0\n\n self.assertEqual(expected_days, days)", "title": "" }, { "docid": "3399df12236c22556af1fce47917c35b", "score": "0.5909657", "text": "def test_TimeRange(self):\n for start, end, has in (\n (\"20020101T000000Z\", \"20020101T000001Z\", True),\n (\"20020101T000000Z\", \"20020101T000000Z\", True), # Timespan of zero duration\n (\"20020101\", \"20020101\", True), # Timespan of zero duration\n (\"20020101\", \"20020102\", True),\n (\"20020101\", \"20020103\", True),\n (\"20020102\", \"20020103\", False),\n (\"20011201\", \"20020101\", False), # End is non-inclusive\n\n # Expanded recurrence\n (\"20030101T000000Z\", \"20030101T000001Z\", True),\n (\"20030101T000000Z\", \"20030101T000000Z\", True), # Timespan of zero duration\n (\"20030101\", \"20030101\", True), # Timespan of zero duration\n (\"20030101\", \"20030102\", True),\n (\"20030101\", \"20030103\", True),\n (\"20030102\", \"20030103\", False),\n (\"20021201\", \"20030101\", False), # End is non-inclusive\n ):\n if has:\n no = \"no \"\n else:\n no = \"\"\n\n if has != storeFilter(\n Filter(\n ComponentFilter(\n ComponentFilter(\n TimeRange(start=start, end=end),\n name=\"VEVENT\"\n ),\n name=\"VCALENDAR\"\n )\n )\n ).match(self.calendar):\n self.fail(\"Calendar has %sVEVENT with timerange %s?\" % (no, (start, end)))", "title": "" }, { "docid": "160b091e15fd64e55df3ad16edd092fb", "score": "0.5882509", "text": "def test_schedule_two_tasks():\n schedule_two_tasks(algorithms.Backfill())", "title": "" }, { "docid": "47043f948e376e3f5d30c8274d94fbc9", "score": "0.5847941", "text": "def test_check_hours_restriction(self):\n result1 = app.check_hours_restriction('08:25')\n result2 = app.check_hours_restriction('18:30')\n result3 = app.check_hours_restriction('09:35')\n self.assertEqual(result1, True)\n self.assertEqual(result2, True)\n self.assertEqual(result3, False)", "title": "" }, { "docid": "99292a9796110f38017449c0a85cf05b", "score": "0.5815596", "text": "def test_with_late_finish_period(self):\n periods = [(\"18:00\", \"00:00\")]\n assert GapFiller.fill_around(periods) == [(\"00:00\", \"18:00\")]", "title": "" }, { "docid": "0f0896cccd0bc2ba81ddf28222b2f62d", "score": "0.5814515", "text": "def time_in_range(start, end, day_start, day_end):\n if (start <= day_start and end >= day_end):\n return True\n elif (start >= day_start and start <= day_end):\n return True\n elif (end >= day_start and end <= day_end):\n return True\n return False", "title": "" }, { "docid": "94623c06a1b26524a87ede93af672454", "score": "0.5812479", "text": "def test_started_false_before_populaire_start_datetime(self):\n pop = self._make_one(\n short_name='VicPop',\n date=date(2011, 3, 27),\n time=time(10, 0))\n with patch('randopony.populaires.models.datetime') as mock_datetime:\n mock_datetime.now.return_value = datetime(2011, 3, 21, 13, 0)\n mock_datetime.combine = datetime.combine\n self.assertFalse(pop.started)", "title": "" }, { "docid": "c8213d3b1bdc2d1a69aadfd7ed5665d6", "score": "0.5751656", "text": "def test_started_false_after_populaire_start_datetime(self):\n pop = self._make_one(\n short_name='VicPop',\n date=date(2011, 3, 27),\n time=time(10, 0))\n with patch('randopony.populaires.models.datetime') as mock_datetime:\n mock_datetime.now.return_value = datetime(2011, 3, 27, 12, 30)\n mock_datetime.combine = datetime.combine\n self.assertTrue(pop.started)", "title": "" }, { "docid": "6687a52cc93598f85b61a3d98c8a71fd", "score": "0.57431465", "text": "def test_courier_busy_event(self, *args):\n\n # Constants\n initial_time = hour_to_sec(14)\n courier_id = 14\n time_delta = min_to_sec(10)\n on_time = time(14, 0, 0)\n off_time = time(15, 0, 0)\n service_time = min_to_sec(7)\n\n # Verifies 2 test cases for how the courier transitions to being busy\n # For each test case, assert the courier starts in a set and ends up in the busy set\n\n # Test 1: courier starts dropping off state\n env = Environment(initial_time=initial_time)\n dispatcher = Dispatcher(env=env, matching_policy=DummyMatchingPolicy())\n courier = Courier(dispatcher=dispatcher, env=env, courier_id=courier_id, on_time=on_time, off_time=off_time)\n env.process(courier._dropping_off_state(\n orders={\n 21: Order(drop_off_service_time=service_time, ready_time=time(12, 20, 0))\n }\n ))\n env.run(until=initial_time + time_delta)\n self.assertEqual(courier.condition, 'dropping_off')\n self.assertEqual(dispatcher.dropping_off_couriers, {courier.courier_id: courier})\n self.assertEqual(dispatcher.idle_couriers, {})\n\n # Test 2: courier start moving state\n env = Environment(initial_time=initial_time)\n dispatcher = Dispatcher(env=env, matching_policy=DummyMatchingPolicy())\n courier = Courier(\n dispatcher=dispatcher,\n env=env,\n courier_id=courier_id,\n location=Location(lat=4.690296, lng=-74.043929),\n on_time=on_time,\n off_time=off_time\n )\n env.process(courier._moving_state(destination=Location(lat=4.689697, lng=-74.055495)))\n env.run(until=initial_time + time_delta)\n self.assertEqual(courier.condition, 'moving')\n self.assertEqual(dispatcher.moving_couriers, {courier.courier_id: courier})\n self.assertEqual(dispatcher.idle_couriers, {})", "title": "" }, { "docid": "6718de31f676928ee517b7b0187dbc65", "score": "0.57170963", "text": "def __check_date_range(start_time, end_time, now_time):\n if start_time <= now_time <= end_time:\n return True\n return False", "title": "" }, { "docid": "92af89c14361085b89f3cf7c384c5d18", "score": "0.5712705", "text": "def test_get_availability_next_by_task_and_date_none_2(self):\n\n now = datetime.now(timezone.utc)\n loc_name = \"Cordoba\"\n l1 = Location.objects.create_location(loc_name)\n task_name = \"Device Installation\"\n t1 = Task.objects.create_task(task_name, 60)\n a1 = Availability.objects.create_availability(now + timedelta(days=-1, minutes = 30), l1, t1)\n a2 = Availability.objects.create_availability(now + timedelta(days=2, minutes = -60), l1, t1)\n result = Availability.objects.get_next_by_task_and_date(task_name, None)\n self.assertEqual(len(result), 1)\n availability = result[0]\n self.assertEqual(availability.when, now + timedelta(days=2, minutes = -60))\n self.assertEqual(availability.where.name, loc_name)\n self.assertEqual(availability.what.name, task_name)", "title": "" }, { "docid": "cdda3f8a949ccb7296f53fe4e0eec814", "score": "0.56636155", "text": "def test_schedule_two_tasks():\n schedule_two_tasks(algorithms.FCFS())", "title": "" }, { "docid": "30e08de406d21be5907fc6fe5645fba5", "score": "0.56246066", "text": "def test_started_after_assigned_and_scheduled():\n state = {\n TaskEventType.CREATED,\n TaskEventType.ASSIGNED,\n TaskEventType.SCHEDULED,\n }\n\n # already created\n assert_that(TaskEventType.CREATED.may_transition(state), is_(equal_to(False)))\n # already assigned\n assert_that(TaskEventType.ASSIGNED.may_transition(state), is_(equal_to(False)))\n # already scheduled\n assert_that(TaskEventType.SCHEDULED.may_transition(state), is_(equal_to(False)))\n # may be started\n assert_that(TaskEventType.STARTED.may_transition(state), is_(equal_to(True)))", "title": "" }, { "docid": "cd1c492e063800d850a4ee4184310fcc", "score": "0.56221753", "text": "def test_get_availability_next_by_task_and_date_1_availability_today(self):\n\n now = datetime.now(timezone.utc)\n loc_name = \"Cordoba\"\n l1 = Location.objects.create_location(loc_name)\n task_name = \"Device Installation\"\n t1 = Task.objects.create_task(task_name, 60)\n a1 = Availability.objects.create_availability(now + timedelta(minutes = 30), l1, t1)\n result = Availability.objects.get_next_by_task_and_date(task_name, now)\n self.assertEqual(len(result), 1)\n availability = result[0]\n self.assertEqual(availability.when, now + timedelta(minutes = 30))\n self.assertEqual(availability.where.name, loc_name)\n self.assertEqual(availability.what.name, task_name)", "title": "" }, { "docid": "a732c45195c251cbdace9f457c517223", "score": "0.5607123", "text": "def isoffc_time(current_time):\n if '09:00:00' < current_time < '17:00:00':\n return True\n else:\n print(\"This is not office hour , Your program will terminate in a minute\")\n return False", "title": "" }, { "docid": "d234964e76fcc6e3bdbdf20cbf2e5b0f", "score": "0.56013036", "text": "def test_get_availability_next_by_task_and_date_none_3(self):\n\n now = datetime.now(timezone.utc)\n loc_name = \"Cordoba\"\n l1 = Location.objects.create_location(loc_name)\n task_name = \"Device Installation\"\n t1 = Task.objects.create_task(task_name, 60)\n a2 = Availability.objects.create_availability(now + timedelta(minutes = -60), l1, t1)\n result = Availability.objects.get_next_by_task_and_date(task_name, None)\n self.assertEqual(len(result), 0)", "title": "" }, { "docid": "1087315c38b1607b82b621b1aac91600", "score": "0.5594892", "text": "def test_get_availability_next_by_task_and_date_none_4(self):\n\n now = datetime.now(timezone.utc)\n loc_name = \"Cordoba\"\n l1 = Location.objects.create_location(loc_name)\n task_name = \"Device Installation\"\n t1 = Task.objects.create_task(task_name, 60)\n a2 = Availability.objects.create_availability(now + timedelta(minutes = 60), l1, t1)\n result = Availability.objects.get_next_by_task_and_date(task_name, None)\n self.assertEqual(len(result), 1)\n availability = result[0]\n self.assertEqual(availability.when, now + timedelta(minutes = 60))\n self.assertEqual(availability.where.name, loc_name)\n self.assertEqual(availability.what.name, task_name)", "title": "" }, { "docid": "927dfa3f180e6da6291e762d20be67e1", "score": "0.5594198", "text": "def test_check_days_restriction(self):\n license_plate = 'abc-1235'\n data1 = dt.datetime.strptime('08-05-2019', app.DATE_FORMAT).date()\n result1 = app.check_days_restriction(data1, license_plate)\n data2 = dt.datetime.strptime('05-05-2019', app.DATE_FORMAT).date()\n result2 = app.check_days_restriction(data2, license_plate)\n self.assertEqual(result1, True)\n self.assertEqual(result2, False)", "title": "" }, { "docid": "a82d35534a2279eb9c16bac50e40963c", "score": "0.558758", "text": "def test_get_availability_next_by_task_and_date_none(self):\n\n now = datetime.now(timezone.utc)\n loc_name = \"Cordoba\"\n l1 = Location.objects.create_location(loc_name)\n task_name = \"Device Installation\"\n t1 = Task.objects.create_task(task_name, 60)\n a1 = Availability.objects.create_availability(now + timedelta(days=2, minutes = 30), l1, t1)\n a2 = Availability.objects.create_availability(now + timedelta(days=2, minutes = -60), l1, t1)\n result = Availability.objects.get_next_by_task_and_date(task_name, None)\n self.assertEqual(len(result), 2)\n availability = result[0]\n self.assertEqual(availability.when, now + timedelta(days=2, minutes = -60))\n self.assertEqual(availability.where.name, loc_name)\n self.assertEqual(availability.what.name, task_name)\n availability = result[1]\n self.assertEqual(availability.when, now + timedelta(days=2, minutes = 30))\n self.assertEqual(availability.where.name, loc_name)\n self.assertEqual(availability.what.name, task_name)", "title": "" }, { "docid": "d0b407450025afad16dc4dc75d0dd331", "score": "0.55755883", "text": "def test_get_availability_next_by_task_and_date_none_7(self):\n\n now = datetime.now(timezone.utc)\n l1 = Location.objects.create_location(\"Cordoba\")\n t1 = Task.objects.create_task(\"Device Installation\", 60)\n a1 = Availability.objects.create_availability(now + timedelta(minutes = -30), l1, t1)\n a2 = Availability.objects.create_availability(now + timedelta(days=1, minutes = 60), l1, t1)\n a2 = Availability.objects.create_availability(now + timedelta(days=2, minutes = 60), l1, t1)\n result = Availability.objects.get_next_by_task_and_date(t1.name, None)\n self.assertEqual(len(result), 1)\n availability = result[0]\n self.assertEqual(availability.when, now + timedelta(days=1, minutes = 60))\n self.assertEqual(availability.where.name, l1.name)\n self.assertEqual(availability.what.name, t1.name)", "title": "" }, { "docid": "8cb552af58645de078d8a696b44a1e45", "score": "0.55739284", "text": "def test_intersection_symmetry(self):\n a = DT.today()\n b = DT.today()\n c = DT.today()\n d = DT.today()\n time_slots = [a, b, c, d]\n for w in time_slots:\n for x in time_slots:\n if w >= x:\n continue\n T1 = TimeSlot(w, x)\n for y in time_slots:\n for z in time_slots:\n if y >= z:\n continue\n T2 = TimeSlot(y, z)\n assert_equal(T1.intersects_boundary_with(T2), T2.intersects_boundary_with(T1))\n assert_equal(T1.intersects_internally_with(T2), T2.intersects_internally_with(T1))", "title": "" }, { "docid": "5b1f973a225a03fe75dba9b3f4465775", "score": "0.55673236", "text": "def catalog_time_comparison(times1, times2, tol_seconds = 10):\n \n #using the the datetime objects were really slow. \n #Converting to seconds to run the comparison\n #Speedup is something like (1e3)!!!\n\n if isinstance(times1[0], obspy.core.utcdatetime.UTCDateTime):\n times1 = np.array([time.mktime(t.timetuple()) for t in np.array(times1)])\n \n if isinstance(times2[0], obspy.core.utcdatetime.UTCDateTime):\n times2 = np.array([time.mktime(t.timetuple()) for t in np.array(times2)])\n \n\n\n mask1_ = []\n mask2_ = []\n ix = 0\n for t in times1:\n dt = times2- t #Produces an array\n\n if np.any(np.abs(dt) < tol_seconds):\n mask1_.append(ix) #index of a shared event in first set\n mask2_.append(np.argmin(np.abs(dt))) #index of a shared event in first set\n\n ix+=1\n return mask1_, mask2_", "title": "" }, { "docid": "9376b38453811d2d34ed459538b6a240", "score": "0.556688", "text": "def test_percent_calculation_when_range_crosses_midnight():\n _test_half_way_between_range(22, 4)", "title": "" }, { "docid": "e8e0269d393df67784fa7d241fca790d", "score": "0.5553197", "text": "def timerange(datetime1, datetime2, step=100, eod=2400.0):\n (date1,time1) = datetime1\n (date2,time2) = datetime2\n date1,time1=timeadd((date1,time1),(0,0),eod)\n date2,time2=timeadd((date2,time2),(0,0),eod)\n while (date1,time1)!=(date2,time2):\n yield date1,time1\n date1,time1 = timeadd((date1,time1),(0,step),eod)", "title": "" }, { "docid": "65d247e41d5fb501a975b7bf10858574", "score": "0.55527383", "text": "def test_with_early_start_period(self):\n periods = [(\"00:00\", \"10:00\")]\n assert GapFiller.fill_around(periods) == [(\"10:00\", \"00:00\")]", "title": "" }, { "docid": "7e762e47b162154402cb47d15be445b7", "score": "0.55361706", "text": "def _test_range(weekday: int, hours: int, minutes: int, seconds: int) -> bool:\n if not 0 <= weekday <= 7:\n return False\n if not 0 <= hours <= 23:\n return False\n if not 0 <= minutes <= 59:\n return False\n if not 0 <= seconds <= 59:\n return False\n return True", "title": "" }, { "docid": "88e2dcc5876a34daa4245089fd6db98a", "score": "0.5533711", "text": "def test_get_availability_next_by_task_and_date_none_6(self):\n\n now = datetime.now(timezone.utc)\n loc_name = \"Cordoba\"\n l1 = Location.objects.create_location(loc_name)\n task_name = \"Device Installation\"\n t1 = Task.objects.create_task(task_name, 60)\n a1 = Availability.objects.create_availability(now + timedelta(minutes = -30), l1, t1)\n a2 = Availability.objects.create_availability(now + timedelta(days=1, minutes = 60), l1, t1)\n result = Availability.objects.get_next_by_task_and_date(task_name, None)\n self.assertEqual(len(result), 1)\n availability = result[0]\n self.assertEqual(availability.when, now + timedelta(days=1, minutes = 60))\n self.assertEqual(availability.where.name, loc_name)\n self.assertEqual(availability.what.name, task_name)", "title": "" }, { "docid": "23c452545b2547c531f0aeb501ade20b", "score": "0.5533153", "text": "def schedule_on ():\n return 6 <= datetime.now().hour < 22", "title": "" }, { "docid": "dddb2bd8d94a541d59b473793940a6e2", "score": "0.55260307", "text": "def considered_dates(r,low=6,up=22) -> bool:\n l = int(r.dep_time[:2]) # take hours from string\n h = int(r.arr_time[:2]) # take hours from string\n return (low < l) and (low < h) and (l < up) and (h < up)", "title": "" }, { "docid": "361aff550698fd573e62a4516abe8660", "score": "0.5524823", "text": "def sleep_in(weekday, vacation):\n return not weekday or vacation", "title": "" }, { "docid": "c9708a90ea2a979f6ef242b45e748dfa", "score": "0.551774", "text": "def timer_between_these_two_times(self,start_time, end_time):\r\n if (self.current_time - self.transition_timer) >= start_time\\\r\n and (self.current_time - self.transition_timer) < end_time:\r\n return True", "title": "" }, { "docid": "5b3363fc87fa3db819b33e81845d082e", "score": "0.55106217", "text": "def test_current_view_simple(self):\n day1 = ScheduleBlock.objects.create(\n start_time=D.datetime(2013, 9, 22, 7, 0, 0,\n tzinfo=D.timezone.utc),\n end_time=D.datetime(2013, 9, 22, 19, 0, 0,\n tzinfo=D.timezone.utc),\n )\n day2 = ScheduleBlock.objects.create(\n start_time=D.datetime(2013, 9, 23, 7, 0, 0,\n tzinfo=D.timezone.utc),\n end_time=D.datetime(2013, 9, 23, 19, 0, 0,\n tzinfo=D.timezone.utc),\n )\n venue1 = Venue.objects.create(order=1, name='Venue 1')\n venue2 = Venue.objects.create(order=2, name='Venue 2')\n venue1.blocks.add(day1)\n venue2.blocks.add(day1)\n\n start1 = D.datetime(2013, 9, 22, 10, 0, 0, tzinfo=D.timezone.utc)\n start2 = D.datetime(2013, 9, 22, 11, 0, 0, tzinfo=D.timezone.utc)\n start3 = D.datetime(2013, 9, 22, 12, 0, 0, tzinfo=D.timezone.utc)\n start4 = D.datetime(2013, 9, 22, 13, 0, 0, tzinfo=D.timezone.utc)\n start5 = D.datetime(2013, 9, 22, 14, 0, 0, tzinfo=D.timezone.utc)\n\n # During the first slot\n cur1 = D.datetime(2013, 9, 22, 10, 30, 0, tzinfo=D.timezone.utc)\n # Middle of the day\n cur2 = D.datetime(2013, 9, 22, 11, 30, 0, tzinfo=D.timezone.utc)\n cur3 = D.datetime(2013, 9, 22, 12, 30, 0, tzinfo=D.timezone.utc)\n # During the last slot\n cur4 = D.datetime(2013, 9, 22, 13, 30, 0, tzinfo=D.timezone.utc)\n # After the last slot\n cur5 = D.datetime(2013, 9, 22, 15, 30, 0, tzinfo=D.timezone.utc)\n\n slots = []\n\n slots.append(Slot.objects.create(start_time=start1, end_time=start2))\n slots.append(Slot.objects.create(start_time=start2, end_time=start3))\n slots.append(Slot.objects.create(start_time=start3, end_time=start4))\n slots.append(Slot.objects.create(start_time=start4, end_time=start5))\n\n pages = make_pages(8)\n venues = [venue1, venue2] * 4\n items = make_items(venues, pages)\n\n for index, item in enumerate(items):\n item.slots.add(slots[index // 2])\n\n c = Client()\n response = c.get('/schedule/current/', {'timestamp': cur1.isoformat()})\n context = response.context\n\n assert context['cur_slot'] == slots[0]\n assert len(context['schedule_page'].venues) == 2\n # Only cur and next slot\n assert len(context['slots']) == 2\n assert context['slots'][0].items[venue1]['note'] == 'current'\n assert context['slots'][1].items[venue1]['note'] == 'forthcoming'\n\n response = c.get('/schedule/current/', {'timestamp': cur2.isoformat()})\n context = response.context\n assert context['cur_slot'] == slots[1]\n assert len(context['schedule_page'].venues) == 2\n # prev, cur and next slot\n assert len(context['slots']) == 3\n assert context['slots'][0].items[venue1]['note'] == 'complete'\n assert context['slots'][1].items[venue1]['note'] == 'current'\n assert context['slots'][2].items[venue1]['note'] == 'forthcoming'\n\n response = c.get('/schedule/current/', {'timestamp': cur3.isoformat()})\n context = response.context\n assert context['cur_slot'] == slots[2]\n assert len(context['schedule_page'].venues) == 2\n # prev and cur\n assert len(context['slots']) == 3\n assert context['slots'][0].items[venue1]['note'] == 'complete'\n assert context['slots'][1].items[venue1]['note'] == 'current'\n assert context['slots'][2].items[venue1]['note'] == 'forthcoming'\n\n response = c.get('/schedule/current/', {'timestamp': cur4.isoformat()})\n context = response.context\n assert context['cur_slot'] == slots[3]\n assert len(context['schedule_page'].venues) == 2\n # preve and cur slot\n assert len(context['slots']) == 2\n assert context['slots'][0].items[venue1]['note'] == 'complete'\n assert context['slots'][1].items[venue1]['note'] == 'current'\n\n response = c.get('/schedule/current/', {'timestamp': cur5.isoformat()})\n context = response.context\n assert context['cur_slot'] is None\n assert len(context['schedule_page'].venues) == 2\n # prev slot only\n assert len(context['slots']) == 1\n assert context['slots'][0].items[venue1]['note'] == 'complete'\n\n # Check that next day is an empty current view\n response = c.get('/schedule/current/',\n {'timestamp': (cur3 + D.timedelta(days=1)).isoformat()})\n assert len(response.context['slots']) == 0", "title": "" }, { "docid": "3e11f6a3385fc0ba39adfc41e80b5dd5", "score": "0.5505152", "text": "def sleep_in(weekday, vacation):\n return not weekday or vacation # it's not a weekday and on vacation", "title": "" }, { "docid": "bf41678313f3352aacbada1fc7e6def2", "score": "0.5499889", "text": "def _test_nearest_workday_2(self, raise_error):\n\n\t\truleset = crontimesequence.parse_cronstring_day(\"1W,22W,18W\", raise_error=raise_error)\n\t\tself.assertEqual(len(ruleset), 3)\n\n\t\tpositive_dateset = []\n\t\tnegative_dateset = []\n\t\tfor i in range(1, 32):\n\t\t\td = datetime.datetime(2012, 7, i, 9, 39)\n\t\t\tif i in (2, 18, 23,):\n\t\t\t\tpositive_dateset.append(d)\n\t\t\telse:\n\t\t\t\tnegative_dateset.append(d)\n\n\t\tis_rule_dateset_compatible(self, ruleset, positive_dateset, True)\n\t\tis_rule_dateset_compatible(self, ruleset, negative_dateset, False)", "title": "" }, { "docid": "a1e3edb180469357cb346f6bc1338a20", "score": "0.5498831", "text": "def available_times(self, times):\n # TODO: do we increment one step or one minute (6 steps)\n # TODO: currently returns time immediately prior to another event, okay?\n busy = True\n while busy == True:\n busy = False\n for event in self.schedule:\n # fix_times = event.times\n # if times[0] <= fix_times[-1] and times[-1] >= fix_times[0]:\n start_time, end_time = event.times\n if times[0] <= end_time and times[-1] >= start_time:\n times = [x + 1 for x in times]\n busy = True\n break\n return times", "title": "" }, { "docid": "c95ebc39f8e7e23a1b773749a0cf5006", "score": "0.5498642", "text": "def test_get_availability_next_by_task_and_date_none_5(self):\n\n now = datetime.now(timezone.utc)\n loc_name = \"Cordoba\"\n l1 = Location.objects.create_location(loc_name)\n task_name = \"Device Installation\"\n t1 = Task.objects.create_task(task_name, 60)\n a1 = Availability.objects.create_availability(now + timedelta(minutes = -30), l1, t1)\n a2 = Availability.objects.create_availability(now + timedelta(minutes = 60), l1, t1)\n result = Availability.objects.get_next_by_task_and_date(task_name, None)\n self.assertEqual(len(result), 1)\n availability = result[0]\n self.assertEqual(availability.when, now + timedelta(minutes = 60))\n self.assertEqual(availability.where.name, loc_name)\n self.assertEqual(availability.what.name, task_name)", "title": "" }, { "docid": "42db3e9145755d57b6092ae78cc814cb", "score": "0.5487302", "text": "def test_1000_km_open():\n assert open_time(60.0, 1000, START_TIME) == \"2017-01-01T01:46:00+00:00\"\n assert open_time(200.0, 1000, START_TIME) == \"2017-01-01T05:53:00+00:00\"\n assert open_time(232.0, 1000, START_TIME) == \"2017-01-01T06:53:00+00:00\"\n assert open_time(300.0, 1000, START_TIME) == \"2017-01-01T09:00:00+00:00\"\n assert open_time(332.0, 1000, START_TIME) == \"2017-01-01T10:00:00+00:00\"\n assert open_time(400.0, 1000, START_TIME) == \"2017-01-01T12:08:00+00:00\"\n assert open_time(500.0, 1000, START_TIME) == \"2017-01-01T15:28:00+00:00\"\n assert open_time(600.0, 1000, START_TIME) == \"2017-01-01T18:48:00+00:00\"\n assert open_time(800.0, 1000, START_TIME) == \"2017-01-02T01:57:00+00:00\"\n assert open_time(1000.0, 1000, START_TIME) == \"2017-01-02T09:05:00+00:00\"\n assert open_time(1000.0, 1000, START_TIME) == \"2017-01-02T09:05:00+00:00\"\n assert open_time(1000.0, 1000, START_TIME) == \"2017-01-02T09:05:00+00:00\"", "title": "" }, { "docid": "f6675d06c084166c72f950764a33eac5", "score": "0.54789335", "text": "def test_get_availability_next_by_task_and_date_no_availabilities_at_date(self):\n\n now = datetime.now(timezone.utc)\n tomorrow = now + timedelta(days = 1)\n loc_name = \"Cordoba\"\n l1 = Location.objects.create_location(loc_name)\n task_name = \"Device Installation\"\n t1 = Task.objects.create_task(task_name, 30)\n a1 = Availability.objects.create_availability(tomorrow + timedelta(days = 1), l1, t1)\n result = Availability.objects.get_next_by_task_and_date(task_name, tomorrow)\n self.assertEqual(len(result), 0)", "title": "" }, { "docid": "6542f17ba39281e11dbce77896889ff9", "score": "0.5469883", "text": "def test_get_availability_next_by_task_and_date_no_more_availabilities_today(self):\n\n now = datetime.now(timezone.utc)\n loc_name = \"Cordoba\"\n l1 = Location.objects.create_location(loc_name)\n task_name = \"Device Installation\"\n t1 = Task.objects.create_task(task_name, 60)\n a1 = Availability.objects.create_availability(now + timedelta(minutes = -30), l1, t1)\n result = Availability.objects.get_next_by_task_and_date(task_name, now)\n self.assertEqual(len(result), 0)", "title": "" }, { "docid": "b155cb178d6c6bc824e7ae12a07f96e2", "score": "0.5459001", "text": "def test_zero_control_open():\n for dist in BREVET_DIST:\n assert open_time(0.0, dist, START_TIME) == START_TIME", "title": "" }, { "docid": "76abff9cba385ccdb33219734faab5a7", "score": "0.54570234", "text": "def sleep_in(weekday, vacation):\r\n if (not weekday or vacation):\r\n return True\r\n else:\r\n return False", "title": "" }, { "docid": "c0136a680ee2e06b3b3e7d0721dfa4af", "score": "0.54492414", "text": "def is_compatible_arrival(self, other):\n appts = [self, other]\n appts.sort(key=attrgetter('start'), reverse=False)\n first_appt = appts[0]\n second_appt = appts[1]\n arrival_time = calc_arrival(first_appt, second_appt)\n second_time = second_appt.start.copy()\n second_time.add_time(hours=0, minutes=self.late_allowed)\n return arrival_time <= second_time", "title": "" }, { "docid": "60aa4c7300bf5747da83540b22a1568d", "score": "0.54489064", "text": "def test_nonreferenceClock(self):\n startTime = time.time()\n clockTime = self.clock.getTime()\n endTime = time.time()\n assert(clockTime >= startTime and clockTime <= endTime) # Check that returned time is correct", "title": "" }, { "docid": "223445acffabd24773acdb766fccc27e", "score": "0.5448904", "text": "def checkClashWithOtherReservationsOfUser(requestedDate, requestedStartTime, endTimeArray, user): \n userReservations = getReservationsByUserID(user.user_id())\n requestedDateStartTimeFormatted = formatToDateTime(requestedDate, requestedStartTime)\n requestedDateEndTimeFormatted = formatToDateTime(requestedDate, endTimeArray) \n requestedDateFormatted = formatOnlyDate(requestedDate)\n \n for reservation in userReservations:\n if reservation.date == requestedDateFormatted.date():\n reservationDateStartTimeFormatted = formatToDateTime(str(reservation.date), reservation.startTime)\n reservationDateEndTimeFormatted = formatToDateTime(str(reservation.date), reservation.endTime)\n \n if reservationDateStartTimeFormatted == requestedDateStartTimeFormatted:\n return True\n elif reservationDateStartTimeFormatted < requestedDateStartTimeFormatted:\n if reservationDateEndTimeFormatted > requestedDateStartTimeFormatted:\n return True\n elif reservationDateStartTimeFormatted > requestedDateStartTimeFormatted:\n if reservationDateStartTimeFormatted < requestedDateEndTimeFormatted:\n return True\n \n return False", "title": "" }, { "docid": "21f6e671dc25e9b642d1cb145ee1c7f4", "score": "0.54467064", "text": "def test_nearest_workday_2_DEx(self):\n\t\tself._test_nearest_workday_2(False)", "title": "" }, { "docid": "77dd947fc04a3f1b2ea151b97c1d75fb", "score": "0.54436725", "text": "def test_ambiguous_full_day(self):\n self.event.write({\n 'start': datetime(2020, 3, 23, 0, 0),\n 'stop': datetime(2020, 3, 23, 23, 59),\n })\n self.event.allday = True\n self.event._apply_recurrence_values({\n 'interval': 1,\n 'rrule_type': 'weekly',\n 'mo': True,\n 'count': 2,\n 'event_tz': 'Europe/Brussels' # DST change on 2020/3/23\n })\n events = self.event.recurrence_id.calendar_event_ids\n self.assertEventDates(events, [\n (datetime(2020, 3, 23, 0, 0), datetime(2020, 3, 23, 23, 59)),\n (datetime(2020, 3, 30, 0, 0), datetime(2020, 3, 30, 23, 59)),\n ])", "title": "" }, { "docid": "cb93631ac6f5d2090d241a7dfb06e849", "score": "0.54282004", "text": "def test_delayConflicting(self):\n crop = dummyCrop()\n seedA = dummySeed(crop)\n seedB = dummySeed(crop)\n tasks = [\n SeedFlats(datetime(2012, 5, 1), seedA, 10),\n SeedFlats(datetime(2012, 5, 1), seedB, 10)]\n schedule = schedule_tasks(tasks)\n self.assertEqual(\n [SeedFlats(datetime(2012, 5, 1, 8, 0, 0), seedA, 10),\n SeedFlats(datetime(2012, 5, 1, 8, 20, 0), seedB, 10)],\n schedule)", "title": "" }, { "docid": "3982dded67cf84c28b5c18d60194ee91", "score": "0.54224175", "text": "def test_get_availability_next_by_task_and_date_2_availability_today_ordered_ascending(self):\n\n now = datetime.now(timezone.utc)\n loc_name = \"Cordoba\"\n l1 = Location.objects.create_location(loc_name)\n task_name = \"Device Installation\"\n t1 = Task.objects.create_task(task_name, 60)\n a1 = Availability.objects.create_availability(now + timedelta(minutes = 60), l1, t1)\n a2 = Availability.objects.create_availability(now + timedelta(minutes = 30), l1, t1)\n result = Availability.objects.get_next_by_task_and_date(task_name, now)\n self.assertEqual(len(result), 2)\n availability = result[0]\n self.assertEqual(availability.when, now + timedelta(minutes = 30))\n self.assertEqual(availability.where.name, loc_name)\n self.assertEqual(availability.what.name, task_name)\n availability = result[1]\n self.assertEqual(availability.when, now + timedelta(minutes = 60))\n self.assertEqual(availability.where.name, loc_name)\n self.assertEqual(availability.what.name, task_name)", "title": "" }, { "docid": "377bd9b6e026878733c51085875df90b", "score": "0.54220235", "text": "def check_should_activate(recurrence):\n now = right_now()\n return recurrence.year == now.year and recurrence.month == now.month and recurrence.day == now.day and recurrence.hour == now.hour and recurrence.minute == now.minute", "title": "" }, { "docid": "244f95a1212a389fa7f0b57cb444d9f6", "score": "0.54182005", "text": "def test_near_deadline_near_task(self):\n\n time = timezone.now() + datetime.timedelta(hours=23, minutes=59, seconds=59)\n done = Task(deadline=time, done=True)\n not_done = Task(deadline=time, done=False)\n\n self.assertIs(done.is_near_to_deadline(), False)\n self.assertIs(not_done.is_near_to_deadline(), True)", "title": "" }, { "docid": "774423cdab85e280eb7aa343faa7b315", "score": "0.5414065", "text": "def test_postponeConflicting(self):\n crop = dummyCrop()\n seedA = dummySeed(crop)\n seedB = dummySeed(crop)\n tasks = [\n SeedFlats(datetime(2012, 5, 1), seedA, 90),\n SeedFlats(datetime(2012, 5, 1), seedB, 90)]\n schedule = schedule_tasks(tasks, maxManHours=timedelta(hours=3))\n self.assertEqual(\n [SeedFlats(datetime(2012, 5, 1, 8, 0, 0), seedA, 90),\n SeedFlats(datetime(2012, 5, 2, 8, 0, 0), seedB, 90)],\n schedule)", "title": "" }, { "docid": "78221d1888745e2dc6d1cc6361eb5aab", "score": "0.5403868", "text": "def _in_work_time(self, dt, resource=None):\n\n if resource is None:\n resource = self.env['resource.resource']\n\n assert dt.tzinfo, 'Provided datetimes needs to be timezoned'\n dt = dt.astimezone(timezone(self.tz))\n\n range_start = dt + relativedelta(hour=0, minute=0, second=0)\n range_end = dt + relativedelta(days=1, hour=0, minute=0, second=0)\n\n for interval in self._work_intervals_batch(range_start, range_end, resource)[resource.id]:\n if interval[0] <= dt <= interval[1]:\n return True\n return False", "title": "" }, { "docid": "8ba6ecfaaca7fa66982882d078e900fc", "score": "0.53946006", "text": "def single_time_check(expanded_itinerary, activity_name):\n \"\"\"date will be in for the form yyyy-mm-dd\"\"\"\n\n weekly_opening_hours = expanded_itinerary.get_opening_hours(activity_name)\n client_activity = expanded_itinerary.get_client_hours(activity_name)\n activity_date = expanded_itinerary.get_parameter(activity_name, 'date')\n\n year = int(activity_date[0:4])\n month = int(activity_date[5:7])\n day = int(activity_date[-2:])\n date_object = dt.datetime(year, month, day)\n day_name = date_object.strftime(\"%A\")\n \n if weekly_opening_hours == 0:\n suggestion_type = None #For now leave it as None\n suggestion = f'SHIFT {activity_name}'\n message = f'Activity start or end timing does not fall within operating hours of {activity_name} as it is closed for today'\n constraint_type = 'Red'\n suggestion_tuple = (suggestion_type, suggestion, message, constraint_type)\n return suggestion_tuple\n \n day_hours = weekly_opening_hours[day_name]\n\n opening_time, closing_time = int(day_hours[0]), int(day_hours[1])\n client_start, client_end = int(client_activity['activity_start']), int(client_activity['activity_end'])\n\n if client_start <= opening_time or client_start >= closing_time or client_end <= opening_time or client_end >= closing_time:\n\n \"\"\" Suggestion tuple schema TBC!!!!\n A: [ (suggestion_type, suggestion, message, constraint_type) ]\n where A is the activity_name\"\"\"\n \n suggestion_type = None #For now leave it as None\n suggestion = f'SHIFT {activity_name}'\n message = f'Activity start or end timing does not fall within operating hours of {activity_name}. Opening time is {day_hours[0]} and closing time is {day_hours[1]}'\n constraint_type = 'Red'\n suggestion_tuple = (suggestion_type, suggestion, message, constraint_type)\n\n return suggestion_tuple\n \n return None", "title": "" }, { "docid": "9b5c56e5de6737b394ddba9ff3659f4d", "score": "0.53934366", "text": "def test_get_time_period(self):\n self.assertEqual(self.pressure_reading_manager1.get_time_period().get_start_datetime('%Y-%m-%d %H:%M'),'2018-09-23 19:56')\n self.assertEqual(self.pressure_reading_manager1.get_time_period().get_end_datetime('%Y-%m-%d %H:%M'),'2018-09-23 20:06')", "title": "" }, { "docid": "c25e11b463172f22d0b6f278b4756d1b", "score": "0.5391946", "text": "def test_timeslot_double_booking(self):\n self.client.login(username='bob', password='bob')\n # Follow the response triggers any confirmation message\n response = self.client.post(self.booking_url, {}, follow=True)\n # Attempt booking with a different and a duplicate timeslot\n url = self.get_booking_url(self.submission.id, self.t2.short_id)\n response = self.client.post(url, {}, follow=True)\n self.assertRedirects(response, self.submission.get_absolute_url())\n self.assertTrue('messages' in response.context)\n for item in list(response.context['messages']):\n self.assertEqual(item.tags, 'error')", "title": "" }, { "docid": "71b1dd54b2a74ae22b36ba2422d41b28", "score": "0.5386673", "text": "def test_600_km_open():\n assert open_time(60.0, 600, START_TIME) == \"2017-01-01T01:46:00+00:00\"\n assert open_time(200.0, 600, START_TIME) == \"2017-01-01T05:53:00+00:00\"\n assert open_time(232.0, 600, START_TIME) == \"2017-01-01T06:53:00+00:00\"\n assert open_time(300.0, 600, START_TIME) == \"2017-01-01T09:00:00+00:00\"\n assert open_time(332.0, 600, START_TIME) == \"2017-01-01T10:00:00+00:00\"\n assert open_time(400.0, 600, START_TIME) == \"2017-01-01T12:08:00+00:00\"\n assert open_time(500.0, 600, START_TIME) == \"2017-01-01T15:28:00+00:00\"\n assert open_time(600.0, 600, START_TIME) == \"2017-01-01T18:48:00+00:00\"\n assert open_time(630.0, 600, START_TIME) == \"2017-01-01T18:48:00+00:00\"\n assert open_time(660.0, 600, START_TIME) == \"2017-01-01T18:48:00+00:00\"", "title": "" }, { "docid": "e0c891fbd688af90faaa4a4376388090", "score": "0.538355", "text": "def by_time(gr1, gr2):\n return min(\n gr1.schedule_datetime + gr1.game_interval,\n gr2.schedule_datetime + gr2.game_interval\n ) - max(\n gr1.schedule_datetime,\n gr2.schedule_datetime\n ) >= min(\n gr1.game_interval,\n gr2.game_interval\n )", "title": "" } ]
3f64b6653a03e4f175305cf6bc764495
Produces classifier predictions on the set
[ { "docid": "e69a02c969fd0e25c28c13db3edcc456", "score": "0.0", "text": "def predict(self, X):\n step_1f = self.layer_1.forward(X)\n step_2f = self.layer_2.forward(step_1f)\n step_3f = self.layer_3.forward(step_2f)\n probs = softmax(step_3f)\n pred = np.array(list(map(lambda x: x.argsort()[-1], probs)))\n return pred", "title": "" } ]
[ { "docid": "0ba7dfaf59c269fbf03499fc467b096d", "score": "0.7867263", "text": "def predict(self, inputset):\n return self.clf.predict(inputset)", "title": "" }, { "docid": "a051de44eecda307d96be7e662c2b897", "score": "0.76698387", "text": "def predict(self):\n self.predictions = self.clf.predict(self.dataset['test']['features'])", "title": "" }, { "docid": "062866ed511937926a8f8a626d2c9a05", "score": "0.76471627", "text": "def get_predictions(self, feature_set, label_set, test_set):\n feature_set = np.nan_to_num(feature_set)\n test_set = np.nan_to_num(test_set)\n self.model_predictor.fit(feature_set.transpose(), label_set)\n return self.model_predictor.predict(test_set.transpose())", "title": "" }, { "docid": "d6da3a0f5c299020ba4f1d72010865a2", "score": "0.73854387", "text": "def predict(self, test_set):\n\ttest_set = self.augmented_set(test_set)\n\tpred_labels_ = (np.dot(test_set, self.weights) > 0.0).astype(int)\n\tidx = pred_labels_==0\n\tpred_labels_[idx] = self.labels_[0]\n\tpred_labels_[~idx] = self.labels_[1]\n\treturn list(pred_labels_)", "title": "" }, { "docid": "93b620c32e82ab2067d3167900545072", "score": "0.7365597", "text": "def make_predictions(classifier, eval_set, name):\n INVERSE_MAP = {\n 0: 'entailment',\n 1: 'neutral',\n 2: 'contradiction'\n }\n\n hypotheses = classifier(eval_set)\n predictions = []\n \n for i in range(len(eval_set)):\n hypothesis = hypotheses[1][i]\n prediction = INVERSE_MAP[hypothesis]\n pairID = eval_set[i][\"pairID\"]\n gold_label = eval_set[i][\"gold_label\"]\n predictions.append((pairID, prediction, gold_label))\n\n f = open(name + '_predictions.csv', 'wt')\n w = csv.writer(f, delimiter = ',')\n w.writerow(['pairID','prediction', 'gold_label'])\n for example in predictions:\n w.writerow(example)\n f.close()", "title": "" }, { "docid": "bf81dac9e7c5b38250758e07067dd1b6", "score": "0.7193037", "text": "def make_predictions(self):\n model_path = \"../models/mlp.pickle\"\n if not os.path.exists(model_path):\n return\n\n with open(model_path) as model_file:\n model = pickle.loads(model_file.read())\n predictions = model.predict(self.test_vectors)\n labels = self.label_encoder.inverse_transform(predictions)\n result = \"Test_Document_ID\\tPredicted_Category\\n\" + reduce(lambda x, y: \"{}\\n{}\".format(x, y),\n map(lambda x: \"{}\\t{}\".format(x[0], x[1]),\n zip(self.test_raw_docs_ids, labels)))\n\n with open(\"../results/testSet_categories.csv\", \"w+\") as testset_out:\n testset_out.write(result)", "title": "" }, { "docid": "603331ce934ce4081a0a7c7675cf96ae", "score": "0.7150006", "text": "def predict():", "title": "" }, { "docid": "06da9bdc714c587c4422983d99a88a60", "score": "0.7102102", "text": "def predict(self, dataset):\n labels = self.active_model.classes_\n probabilities = self.active_model.predict_proba(dataset)\n return pd.DataFrame({label: probabilities[:, i] for i, label in enumerate(labels)})", "title": "" }, { "docid": "8c4d821474754f03beb56721d16f1f1a", "score": "0.70842457", "text": "def predict(self):\n PredictionStrategy.predict(self)\n return self.__predict(self.user_movie_instances_to_be_predicted)", "title": "" }, { "docid": "ff6e31bbc893e8fbe3e9e21335abd4fd", "score": "0.707831", "text": "def prediction_list():", "title": "" }, { "docid": "21b838ceb5b01fff268334477a60e951", "score": "0.70515007", "text": "def testclassifier():\n data = request.data\n test_data = json.loads(data)\n pred_results = []\n for loaded_cls in ppnamespace.LOADED_CLASSIFIERS:\n pred_data = loaded_cls.predict(test_data)\n #print pred_data\n for i in xrange(len(pred_data)):\n if i % 2 == 0:\n elem = loaded_cls.classifier_name + \"_\" + pred_data[i]\n else:\n elem = pred_data[i]\n pred_results.append(elem)\n return json.dumps(pred_results)", "title": "" }, { "docid": "6c6bf1c0857481d5762c667a3d948290", "score": "0.7048871", "text": "def predict(self):", "title": "" }, { "docid": "29085a199b9a7b953f15e1d7f632e3f3", "score": "0.70375365", "text": "def predict(self, triples):\n\t\treturn self._algorithm(triples)", "title": "" }, { "docid": "49b08bb56003a764caac6df92ba56032", "score": "0.70305014", "text": "def predict(self, X):\n predictions = None\n if self.classifier is not None:\n predictions = []\n for i in range(self.n_classifiers):\n predictions.append(self.classifier[i].predict(X))\n\n return predictions", "title": "" }, { "docid": "29911046d6aae8d2c106958ef7a1edb4", "score": "0.7010295", "text": "def predict(self, x):\n \n assert len(self.classifiers), 'Model not trained, call self.fit(x, y) first!'\n pred_list = np.array([c.predict(x) for c in self.classifiers] )\n \n # Vote the majority class to obtain final prediction \n pred = [Counter(p).most_common(1)[0][0] for p in pred_list.T]\n\n return pred", "title": "" }, { "docid": "29911046d6aae8d2c106958ef7a1edb4", "score": "0.7010295", "text": "def predict(self, x):\n \n assert len(self.classifiers), 'Model not trained, call self.fit(x, y) first!'\n pred_list = np.array([c.predict(x) for c in self.classifiers] )\n \n # Vote the majority class to obtain final prediction \n pred = [Counter(p).most_common(1)[0][0] for p in pred_list.T]\n\n return pred", "title": "" }, { "docid": "1ac279976e79c20248ef8adb284b6ff7", "score": "0.70021445", "text": "def predict(self):\n if self.method == \"perceptron\":\n return self.clf.predict(self.data)\n elif self.method == \"linear_model\":\n return [int(round(i)) for i in self.clf.predict(self.data)]\n elif self.method == \"lms\":\n return self.clf.predict(self.data)\n else:\n return \"Non-classifier\"", "title": "" }, { "docid": "15f0d8a1521c4b0695df86cd08acd145", "score": "0.69700825", "text": "def fit_predict(self, train_set: np.ndarray, test_set: np.ndarray) -> np.ndarray:\n self.train(self.__processInput(train_set))\n return self.predict(self.__processInput(test_set))", "title": "" }, { "docid": "b7e77c52e1c41665d7be27ee7d051f66", "score": "0.69524604", "text": "def predict(self):\n\n if self.test_file is not None:\n for user in self.test_set['users']:\n for item in self.test_set['feedback'][user]:\n self.predictions.append((user, item, self.predict_score(self.user_to_user_id[user],\n self.item_to_item_id[item], True)))\n else:\n raise NotImplementedError", "title": "" }, { "docid": "283ef48b4cdbedf22eee237274fa5fc4", "score": "0.6943128", "text": "def final_prediction(classifiers, classifier_weights, testing_features):\r\n _classified_labels = []\r\n\r\n # each column is the prediction results of a single record against all classifier\r\n # [num_of_classifiers, num_of_testing_samples]\r\n all_labels = []\r\n for _, classifier in enumerate(classifiers):\r\n all_labels.append(dt.classify_testing_dataset(classifier, testing_features))\r\n\r\n labels_set = set([0, 1])\r\n for sample in range(len(testing_features)):\r\n # find the label that yields the max value\r\n max_value = -1.0\r\n label = None\r\n for y in labels_set:\r\n temp = sum([classifier_weights[i] if all_labels[i][sample] == y else 0\\\r\n for i in range(len(classifiers))])\r\n if temp > max_value:\r\n max_value = temp\r\n label = y\r\n _classified_labels.append(label)\r\n\r\n return _classified_labels", "title": "" }, { "docid": "db46c54eef48ac7435e03dbbe95c6193", "score": "0.69362813", "text": "def predict(self):\n raise NotImplementedError", "title": "" }, { "docid": "baec62cf2dcfea2e8d6ad1b1878f0018", "score": "0.693105", "text": "def predict(self, test):\n classes = pd.unique(self.y)\n return test.apply(self.compute_class, axis=1, args=(self.parameters, classes))", "title": "" }, { "docid": "c552bc5a42ae0090fca290e84def571d", "score": "0.6873445", "text": "def get_predictions(self):\r\n \r\n self.new_data[self.target_col] = self.model.predict(self.new_data[self.feature_cols])", "title": "" }, { "docid": "b1f611cb20f3e703eacb894f33ff25df", "score": "0.68633366", "text": "def predict_samples(self, threshold=0.5):\r\n for classifier_id, classifier in self.classifiers_dict.items():\r\n print('\\rPredicting for classifier: {}'.format(classifier_id), end='')\r\n self.predictions[classifier_id] = {}\r\n for data_id, data_instance in self.test_data_dict.items():\r\n prediction = classifier.predict(x=data_instance[0], threshold=threshold)\r\n self.predictions[classifier_id][data_id] = prediction\r\n tf.keras.backend.clear_session()\r\n return self.predictions", "title": "" }, { "docid": "c219fccd68362dc7b783ac131bc3bcc5", "score": "0.68581927", "text": "def fit_predict(train, test, set_size_pruning=1):\n\n # Construct tree given the train data\n tree = ID3.construct_tree(train[:, 1:], train[:, 0], set_size_pruning)\n\n num_samples = test.shape[0]\n predictions = np.zeros(num_samples)\n for sample in range(num_samples):\n\n # Retrieve the sample\n x = test[sample]\n\n # Traverse the tree according to the sample\n node = tree\n while node.leaf == -1:\n if x[node.feature] < node.t:\n node = node.left\n else:\n node = node.right\n\n # Save the prediction\n predictions[sample] = node.leaf\n\n return predictions", "title": "" }, { "docid": "904ca821cee8a6be0526fb13cf7e267b", "score": "0.6853163", "text": "def predict(self) -> None:\n\n pass", "title": "" }, { "docid": "3797b4a1eed0a7b44fddb58c814ec7e3", "score": "0.68446755", "text": "def get_predictions():\n model = generate_final_classifier(train_file)\n\n data = pd.read_csv(data_file, encoding='latin1')\n # don't need the second return element\n data, _ = run_preprocess_nb_code(data)\n\n data['class'] = model.predict(data)\n data['class'] = data['class'].apply(lambda x: 'procedural' if x == 0 else 'engagement')\n\n # Actually, I just want engagements\n cols = ['pid', 'did']\n data = data.loc[data['class'] == 'engagement', cols]\n\n return data", "title": "" }, { "docid": "d3f235d74ec2d2a837df6616d67d54f2", "score": "0.68408227", "text": "def predict(self, list_of_sentence):\n list_of_preds = []\n for sentence in list_of_sentence:\n X = self.get_features(sentence)\n y = self.clf.predict(X)\n y_pred = [(w, t) for w, t in zip(sentence, y)]\n list_of_preds.append(y_pred)\n return list_of_preds", "title": "" }, { "docid": "eea6b66b9f8b25d9ae5f69ee4ad4224a", "score": "0.6839091", "text": "def predict(self):\n # Check if cached\n if self._candidates is None or not self._caching:\n # Run neural network\n results = self._net.detect(Image(self._image))\n # Init lists\n self._candidates = defaultdict(list)\n # Go through results\n for out in results:\n # Get class id\n class_id = out[0]\n # Get confidence\n confidence = out[1]\n if confidence > self._confidence_threshold:\n # Get candidate position and size\n x, y, w, h = out[2]\n x = x - int(w // 2)\n y = y - int(h // 2)\n # Create candidate\n c = Candidate(int(x), int(y), int(w), int(h), confidence)\n # Append candidate to the right list depending on the class\n assert class_id.decode() in self._class_names, \\\n f\"Predicted class {class_id.decode()} not in {self._class_names}.\"\n self._candidates[class_id.decode()].append(c)", "title": "" }, { "docid": "1574b915d33d4ad8ee2cb70bfbbe7177", "score": "0.683883", "text": "def generate_prediction_benchmarks(self):\n self.classifier_model_dict = self.train(len(self.examples))\n for num in xrange(100, len(self.docs_to_classify), 100):\n self.predict(num)", "title": "" }, { "docid": "b1b327e8235a54b587a8af9d367dc7b5", "score": "0.6833852", "text": "def predictor(classifier, question):\n d_fs = parseToPredictor(question)\n \n return classifier.classify(d_fs)", "title": "" }, { "docid": "84bf4d2544ab5f8485485f35b420dbfb", "score": "0.6823501", "text": "def predict(self, X):", "title": "" }, { "docid": "84bf4d2544ab5f8485485f35b420dbfb", "score": "0.6823501", "text": "def predict(self, X):", "title": "" }, { "docid": "49077111458946a7565def499e519db8", "score": "0.68021005", "text": "def predict(self, imgs): \n features = self.process_imgs(imgs)\n \n y= self.classifier.predict(features)\n \n if self.finetune==1:\n y_np = np.array(y)\n \n idx7 = np.where(y_np==7)[0]\n idx25 = np.where(y_np==25)[0]\n \n y7_5 = self.classifier7_5.predict(features[idx7])\n y25_22 = self.classifier25_22.predict(features[idx25])\n \n idx7to5 = idx7[y7_5==1]\n idx25to22 = idx25[y25_22==1]\n y_np[idx7to5] = 5\n y_np[idx25to22] = 22\n\n y = y_np\n \n \n return y", "title": "" }, { "docid": "0ffde49480fd44d54eb3937047150d68", "score": "0.6793986", "text": "def predict(self):\n yhat = lambda x: self.aggregator([node.prediction for node in x])\n return [yhat(group) for group in self.leaves]", "title": "" }, { "docid": "7ab58df359279fe43b62dd67f01b67f6", "score": "0.67777085", "text": "def predict(self, x):\n # *** START CODE HERE ***\n return self.clf.predict(x)\n # *** END CODE HERE ***", "title": "" }, { "docid": "9a0f8727b5f128a1c801ae06c5f43cc2", "score": "0.6758236", "text": "def predict(self):\n # fit model\n (clf, name) = self.est\n clf.fit(self.x_train, self.y_train)\n # predict classification\n preds = clf.predict(self.x_test)\n # predict probability\n if hasattr(clf, \"predict_proba\"):\n prob_pos = clf.predict_proba(self.x_test)[:, 1]\n else: # use decision function\n prob_pos = clf.decision_function(self.x_test)\n prob_pos = \\\n (prob_pos - prob_pos.min()) / (prob_pos.max() - prob_pos.min())\n return preds, prob_pos", "title": "" }, { "docid": "1d37586b900e90e93d333781a6483c73", "score": "0.6754897", "text": "def predict(self, X):\n y_pred = self.classifier.predict(X)\n return y_pred", "title": "" }, { "docid": "2e4e68237167bb972832ff6d9698b412", "score": "0.6747512", "text": "def predict_recommender(self, data):", "title": "" }, { "docid": "7527ca90cdac9d1c1cab61f61f778536", "score": "0.6726113", "text": "def make_predictions(self, trainer, class_labels):\n for index in range(len(self.data.data_set)):\n new_z = copy.deepcopy(trainer.Z_const)\n for key in class_labels:\n for feature in self.data.data_set[index]:\n if feature in trainer.cond_probs:\n feature_prod = trainer.cond_probs[feature][key]\n new_z[key] += np.math.log(feature_prod/(1 - feature_prod), 10)\n preds = {key: 10**(new_z[key] - max(new_z.values())) for key in new_z}\n self.data.data_pred.append(self.predictions[0][0])\n self.predictions[index] = sorted(preds.items(), key=itemgetter(1), reverse=True)", "title": "" }, { "docid": "4b6fb2bd0216136e8b1f55a9792995d0", "score": "0.6717591", "text": "def predict_training_set(self): \n if self.model == None:\n raise RuntimeError(\"Model must be trained before predict-method is called. \")\n df = self.df\n lambda_matrix = self.lambda_matrix\n probs = self.model.predict_proba(lambda_matrix)\n df, probs = filter_unlabeled_dataframe(X=df, y=probs, L=lambda_matrix)\n return pd.DataFrame({'prob_bad': probs[:,0], 'prob_good': probs[:,1]}, index=df.index)", "title": "" }, { "docid": "50161b74f9979e32b7412ed123c790ed", "score": "0.67072326", "text": "def test_fit_predict(self):\n\n model = MultiLabelClassifier(config=self.default_config())\n train_sample = self.dataset.sample(n=self.n_sample)\n valid_sample = self.dataset.sample(n=self.n_sample)\n model.fit(train_sample.Text, [[t, 6, 3] for t in train_sample.Target])\n\n predictions = model.predict(valid_sample.Text)\n for prediction in predictions:\n self.assertIsInstance(prediction[0], (str, np.int, np.int64))\n self.assertIn(3, prediction)\n self.assertIn(6, prediction)\n\n probabilities = model.predict_proba(valid_sample.Text)\n for proba in probabilities:\n self.assertIsInstance(proba, dict)", "title": "" }, { "docid": "7d00115a0c3b4d3524e426429c616161", "score": "0.67047536", "text": "def make_prediction(*, input_data):\n\n classify = pipeline.predict(input_data)\n classify_proba = pipeline.predict_proba(input_data)\n print(classify, classify_proba)\n return classify, classify_proba", "title": "" }, { "docid": "40360f339b7992af43af2be36ef70669", "score": "0.6702246", "text": "def predictions(self):\n return self._predictions", "title": "" }, { "docid": "232c648d8e9b35b73e3e46d376b08c18", "score": "0.6671509", "text": "def predict(self):\n for track in self.tracks:\n track.predict(self.kf)", "title": "" }, { "docid": "232c648d8e9b35b73e3e46d376b08c18", "score": "0.6671509", "text": "def predict(self):\n for track in self.tracks:\n track.predict(self.kf)", "title": "" }, { "docid": "2b891781e717db48b58ccf64b2288d24", "score": "0.666517", "text": "def predict(self, test_data):\n raise NotImplementedError", "title": "" }, { "docid": "923e67c64d19b1669c43d142d3ad5e94", "score": "0.6663892", "text": "def predict(self,data):\n preds = []\n for idx,instance in enumerate(data):\n preds.append({\"id\":instance[\"id\"], \"pred_clusters\": [instance['sentence_no']]})\n\n return preds", "title": "" }, { "docid": "da368bfc7ca1cefb92143e0ef0a03d3c", "score": "0.66595435", "text": "def predict(self, X):\n\n ##TODO##", "title": "" }, { "docid": "8e9dcb8947f52ddfb7b27fd610dfd7a1", "score": "0.66587996", "text": "def predict(self):\n for track in self.tracks:\n track.predict(self.kf)", "title": "" }, { "docid": "59538229221e5000fe79c56d705daec5", "score": "0.66576475", "text": "def _predict(self, X):\n results = []\n for clf in self.estimators:\n pred = clf.predict(X)\n #converting the prediction into the desired output of the models\n if pred.dtype == np.int32:\n results.append(pred)\n else:\n results.append(self.le.transform(clf.predict(X)))\n return np.asarray(results).T", "title": "" }, { "docid": "1af0923dcca7c04cf66762ac7177dfcd", "score": "0.66576344", "text": "def predict(self, x):\n return self.clf.predict(x)", "title": "" }, { "docid": "e4009a73cfb02298ac59fbaab6e0c1c3", "score": "0.66559637", "text": "def predict(self, test_dataset, class_names, display_metrics=True):\r\n # fix the order of elements in val_dataset\r\n test_dataset = test_dataset.cache()\r\n \r\n # the ensemble model outputs, len(logits) = top\r\n logits = self.model.predict(test_dataset)\r\n logits = [logits[i] for i in self.top_idxes]\r\n \r\n # predicted class IDs, resulting in [num_test, top]\r\n class_ids = []\r\n for logit in logits:\r\n class_ids.append(np.argmax(logit, axis=1))\r\n class_ids = np.stack(class_ids, axis=1)\r\n \r\n # predicted class IDs, resulting in [num_test]\r\n num_test = class_ids.shape[0]\r\n ensemble_class_ids = np.zeros(shape=(num_test), dtype=np.int32)\r\n for i in range(num_test):\r\n unique_class_ids, counts = np.unique(\r\n class_ids[i], return_counts=True)\r\n idx = np.argmax(counts)\r\n class_id = unique_class_ids[idx]\r\n ensemble_class_ids[i] = class_id\r\n \r\n def compute_metrics(positive_class_id, y_true, y_pred):\r\n \"\"\"\r\n Given a positive class, computes precision, recall and F1-score.\r\n\r\n Parameters\r\n ----------\r\n positive_class_id : integer\r\n A positive class ID.\r\n y_true : numpy array\r\n Ground-truth class IDs.\r\n y_pred : numpy array\r\n Predicted class IDs.\r\n\r\n Returns\r\n -------\r\n metrics : tuple\r\n Includes precision, recall and F1-score.\r\n\r\n \"\"\"\r\n tp, fp, fn, tn = 0, 0, 0, 0\r\n for idx in range(len(y_pred)):\r\n if y_pred[idx] == positive_class_id and \\\r\n y_pred[idx] == y_true[idx]:\r\n tp += 1\r\n elif y_pred[idx] == positive_class_id and \\\r\n y_pred[idx] != y_true[idx]:\r\n fp += 1\r\n elif y_pred[idx] != positive_class_id and \\\r\n y_pred[idx] != y_true[idx]:\r\n fn += 1\r\n elif y_pred[idx] != positive_class_id and \\\r\n y_pred[idx] == y_true[idx]:\r\n tn += 1\r\n precision = tp / (tp + fp + 1e-5)\r\n recall = tp / (tp + fn + 1e-5)\r\n f1_score = 2 * precision * recall / (precision + recall + 1e-5)\r\n metrics = (precision, recall, f1_score)\r\n return metrics\r\n \r\n # compute metrics\r\n accs, precs, recalls, f1s = [], {}, {}, {}\r\n for _, y_batch in test_dataset.take(1):\r\n batch_size = y_batch.shape[0]\r\n num_batches = int(np.ceil(num_test / batch_size))\r\n for i, (_, y_true_batch) in zip(\r\n range(num_batches), test_dataset.as_numpy_iterator()):\r\n y_pred_batch = ensemble_class_ids[\r\n i*batch_size : batch_size + i*batch_size]\r\n acc_batch = np.sum(y_true_batch == y_pred_batch) / len(y_true_batch)\r\n accs.append(acc_batch)\r\n # for each class, compute precision, recall and F1-score\r\n for class_id in range(len(class_names)):\r\n prec_batch, recall_batch, f1_batch = compute_metrics(\r\n class_id, y_true_batch, y_pred_batch)\r\n if class_names[class_id] not in precs.keys():\r\n precs[class_names[class_id]] = []\r\n precs[class_names[class_id]].append(prec_batch)\r\n if class_names[class_id] not in recalls.keys():\r\n recalls[class_names[class_id]] = []\r\n recalls[class_names[class_id]].append(recall_batch)\r\n if class_names[class_id] not in f1s.keys():\r\n f1s[class_names[class_id]] = []\r\n f1s[class_names[class_id]].append(f1_batch)\r\n metrics = (accs, precs, recalls, f1s)\r\n \r\n # display metrics\r\n print('Test accuracy: %.2f\\n' % (np.mean(np.array(accs))))\r\n precs_class, recalls_class, f1s_class = [], [], []\r\n for name in class_names:\r\n precs_class.append(np.mean(np.array(precs[name])))\r\n recalls_class.append(np.mean(np.array(recalls[name]))) \r\n f1s_class.append(np.mean(np.array(f1s[name])))\r\n print('%s:' % (name))\r\n print(' precision %.2f, recall %.2f, F1-score %.2f' % (\r\n precs_class[-1], recalls_class[-1], f1s_class[-1])) \r\n print('\\nAverage:')\r\n print(' precision %.2f, recall %.2f, F1-score %.2f' % (\r\n np.mean(precs_class), np.mean(recalls_class), np.mean(f1s_class)\r\n ))\r\n \r\n return ensemble_class_ids, metrics", "title": "" }, { "docid": "2f409e0bebc41d304a51ed673b5e82ce", "score": "0.66544235", "text": "def predict(self,X):", "title": "" }, { "docid": "970e4ec0a8777ac045920f919126354b", "score": "0.66489047", "text": "def predict(self):\n\n self.predictions = self.cnn_model.predict(x = self.dataset.test_data,\n verbose = self.config.config_namespace.predict_verbose)\n\n return", "title": "" }, { "docid": "9e476f46cceef3e35f51606c1b0d2dfa", "score": "0.6643845", "text": "def predict(self, dataset_test):\n self.learner.eval()\n for images in dataset_test:\n #logging.info('Images shape : {}'.format(images))\n images = self.process_imgs(images[0])\n\n qry_logits = self.learner(images).detach()\n return qry_logits", "title": "" }, { "docid": "7929426143fb97a58e7d9023056a6626", "score": "0.6638887", "text": "def predict(self) -> None:\n # ite prediction\n self.ite_pred_list: list = [] if self.real_world else [self.ite_test]\n for um in self.um_list:\n if um.um_type == \"ipm\":\n self.ite_pred_list.append(um.predict(self.X_test))\n elif um.um_type == \"tdf\":\n self.ite_pred_list.append(um.predict_ite(self.X_test))\n\n # treatment allocation\n self.treatment_pred_list: list = [] if self.real_world else [np.array(self.ite_test > 0, dtype=int)]\n for um in self.um_list:\n if um.um_type == \"ipm\":\n self.treatment_pred_list.append(np.array(um.predict(self.X_test) > 0, dtype=int))\n elif um.um_type == \"tdf\":\n self.treatment_pred_list.append(um.predict_assignment(self.X_test))", "title": "" }, { "docid": "fb780796c39e9e082fec393e6f6291a0", "score": "0.6636453", "text": "def predict(self, sample):\n # predict the test set by using each decision tree\n p_results = []\n for dtree in self.forest:\n p_results.append(dtree.predict(sample))\n # vote the majority result\n if p_results.count('p') > p_results.count('e'):\n return 'p'\n return 'e'", "title": "" }, { "docid": "b6da52be306e704797dfa0d60d2e4294", "score": "0.6631414", "text": "def predict(self, X):\n return super(AutoSklearnClassifier, self).predict(X)", "title": "" }, { "docid": "e7262b0bfa279c6d62f21892034288d4", "score": "0.6630271", "text": "def utPredict():\n from data_provider import data_provider\n attribute, dataset = data_provider('../diabetes_train.arff')\n attribute, testset = data_provider('../diabetes_test.arff')\n root = TreeNode(dataset, attribute)\n curTree = DecisionTree(root)\n curTree.createTree(root, 4)\n try:\n assert(curTree.predict(root, testset[0]) == 'positive')\n assert(curTree.predict(root, testset[22]) == 'positive')\n assert(curTree.predict(root, testset[52]) == 'positive')\n assert(curTree.predict(root, testset[3]) == 'negative')\n assert(curTree.predict(root, testset[78]) == 'negative')\n assert(curTree.predict(root, testset[99]) == 'negative')\n print '[predict] TEST PASS'\n except AssertionError:\n print '[predict] TEST FAILED'", "title": "" }, { "docid": "86016653045533a97dcbcf561e8b5af3", "score": "0.6626634", "text": "def predict(self, X):\n raw_predictions = self.decision_function(X)\n class_predictions = self.binarizer.inverse_transform(raw_predictions)\n\n return class_predictions", "title": "" }, { "docid": "86016653045533a97dcbcf561e8b5af3", "score": "0.6626634", "text": "def predict(self, X):\n raw_predictions = self.decision_function(X)\n class_predictions = self.binarizer.inverse_transform(raw_predictions)\n\n return class_predictions", "title": "" }, { "docid": "0157a3f2dae0a7b21f5006f14ad8cf3e", "score": "0.662232", "text": "def predict(self, imgs, details=False):\n all_preds = self.model.predict(imgs)\n idxs = np.argmax(all_preds, axis=1)\n preds = [all_preds[i, idxs(i)] for i in range(len(idxs))]\n classes = [self.classes[idx] for idx in idxs]\n return np.array(preds), idxs, classes", "title": "" }, { "docid": "6dd286b40c50abbd0d9e526bb93615b2", "score": "0.6612203", "text": "def predictions(self):\n\n return self._predictions", "title": "" }, { "docid": "9eb0e8bd88e8603ebc5620af14812182", "score": "0.6611148", "text": "def classify():\n data = np.array(request.json)\n\n if data.shape[0] > 1:\n app.logger.warning(\"Trying to predict on two or more data. Not implemented yet...\")\n\n if not data.size:\n app.logger.warning(\"Empty data\")\n return jsonify({})\n\n predictions = MODEL.predict(data, verbose=0)\n predicted_label = LABELS[np.argmax(predictions)]\n\n app.logger.info(f\"Data shape: {data.shape}. Predicted genre: {predicted_label}\")\n\n return jsonify({\"predicted_label\": predicted_label, \"probability\": float(np.max(predictions))})", "title": "" }, { "docid": "ea158cf79dd3e145e647f80bbd223b16", "score": "0.6608255", "text": "def predict(self, x):\n # will contain a list of lists where list i is for\n # observation i, and the contents of list i are\n # the predictions for observation i from the classifiers\n predictions = list()\n\n if type(x) == type(pd.DataFrame([])):\n print('fix the object')\n x = x.values\n\n for obs in range(len(x)):\n # create list i\n predictions.append(list())\n # move through classifiers generating predictions for\n # observation i\n for clsf in self.clsf:\n predictions[obs].append(clsf.predict([x[obs]]))\n #print(predictions[obs])\n tl = []\n # now go through the predictions for each observation using the\n # look up table to make the final prediction\n yp = list()\n for obs in range(len(predictions)):\n #print(self.LUT[predictions[obs][0], predictions[obs][1]].tolist())\n yp.append( self.LUT[predictions[obs][0], predictions[obs][1]].tolist()[0].index(max(self.LUT[predictions[obs][0], predictions[obs][1]].tolist()[0])))\n\n #print(yp)\n return yp", "title": "" }, { "docid": "25839e87dd3bf5efe2cdedb83dbe0e31", "score": "0.6606118", "text": "def _predict(\n self, predict_input: gobbli.io.PredictInput, context: ContainerTaskContext\n ) -> gobbli.io.PredictOutput:\n pred_proba_df = pd.DataFrame(\n {\n label: 1 if label == self.majority_class else 0\n for label in predict_input.labels\n },\n index=range(len(predict_input.X)),\n )\n\n return gobbli.io.PredictOutput(y_pred_proba=pred_proba_df)", "title": "" }, { "docid": "cb1e32c8b1016c8ecaaadfe1daa882d5", "score": "0.6597053", "text": "def predict(self, probs):\n\n return self.model.predict(probs)", "title": "" }, { "docid": "7e4551eced53cb93cbd888dfd640fbc0", "score": "0.6585883", "text": "def predict(self, data: pd.DataFrame) -> dict:\n x, y = self.preprocessor.fit_transform(data, fit=False)\n y_pred = self.classifier.predict(x)\n return y_pred", "title": "" }, { "docid": "9dfe59541675053a08c2ee20b99aa3de", "score": "0.65856326", "text": "def chemprop_predict() -> None:\n make_predictions(args=PredictArgs().parse_args())", "title": "" }, { "docid": "a6fc3b51500200bd20efb5b8b47094f4", "score": "0.65852624", "text": "def predict(img):\n\n prediction = model.predict(img)\n\n print mapToCategory(prediction)", "title": "" }, { "docid": "bb2002fd0d4b392da3678c708c47dd9a", "score": "0.6584952", "text": "def evaluate_predictions(self, y_true: np.ndarray,\n y_pred: np.ndarray) -> OrderedDict[Text, Any]:", "title": "" }, { "docid": "5900a4443c1ba6ee08e90c9f8a6a4c57", "score": "0.6584014", "text": "def predict(self, features_set, metadata_set):\n\n if self.seed is not None:\n np.random.seed(self.seed)\n\n def one_hot(x):\n return np.eye(self.num_labels)[x]\n temp_labels = metadata_set.copy()\n for label_int in self.label_translate:\n label_name = self.label_translate[label_int]\n temp_labels.loc[(temp_labels.label == label_name), 'label'] = label_int\n try:\n temp_labels = np.array([one_hot(x) for x in temp_labels.label])\n except IndexError:\n temp_labels = np.array([np.zeros(len(self.label_translate)) for x in temp_labels.label])\n\n num_test_samples, _ = np.shape(features_set)\n\n samples_covered = 0\n pred_labels = []\n while samples_covered < num_test_samples:\n start = samples_covered\n end = samples_covered + self.batch_size\n if end > num_test_samples:\n end = num_test_samples\n batch_ids = np.arange(start, end)\n batch_features = features_set.loc[batch_ids]\n batch_labels = temp_labels[batch_ids]\n batch_protected_attributes = np.reshape(list(metadata_set[self.protected_attribute_name].loc[batch_ids]), [-1,1])\n\n batch_feed_dict = {self.features_ph: batch_features,\n self.true_labels_ph: batch_labels,\n self.protected_attributes_ph: batch_protected_attributes,\n self.keep_prob: 1.0}\n\n pred_labels += self.sess.run(self.pred_labels, feed_dict=batch_feed_dict).tolist()\n samples_covered += len(batch_features)\n\n pred_labels = np.array(pred_labels, dtype=np.float64)\n dataset_new = metadata_set.copy()\n for label_num in self.label_translate:\n dataset_new['pred_score_{}'.format(self.label_translate[label_num])] = pred_labels[:,label_num]\n dataset_new['pred_label'] = [self.label_translate[x] for x in (np.argmax(pred_labels, axis=1)).astype(np.int32).tolist()]\n\n return dataset_new", "title": "" }, { "docid": "a971080c946a5b3e11f54d79c714be99", "score": "0.65808", "text": "def predict(self, X):\n rng = check_random_state(self.random_state)\n return np.array(\n [\n self.classes_[int(rng.choice(np.flatnonzero(prob == prob.max())))]\n for prob in self.predict_proba(X)\n ]\n )", "title": "" }, { "docid": "cd36e1c355e08ead0a06a2b0918a42ee", "score": "0.6573806", "text": "def predict(self, X):\r\n # TODO for multi output classes\r\n p = self.predict_proba(X)\r\n p_class = np.array([1 if pn > 0.5 else 0 for pn in p])\r\n return p_class", "title": "" }, { "docid": "53e10b7a39aa2ba92f2bc41f8949d9de", "score": "0.657159", "text": "def predict(self, X):\r\n if(self.num == 2):\r\n y_pred = []\r\n for i in np.array(X):\r\n val = predict_1(self.tree_fit,i)\r\n y_pred.append(val)\r\n return pd.Series(y_pred)\r\n elif(self.num == 3):\r\n y_pred = np.zeros(len(X))\r\n for i in range(len(X)):\r\n val = regressor_predict(self.tree_fit,X.iloc[i])\r\n y_pred[i] = val\r\n return pd.Series(y_pred)\r\n elif(self.num == 1):\r\n y_pred = []\r\n arr_1 = np.array(self.label_class)\r\n for i in range(len(arr_1)):\r\n val = random.choice(arr_1)\r\n y_pred.append(val)\r\n return pd.Series(y_pred)\r\n else:\r\n y_pred = []\r\n arr_1 = np.array(self.label_class)\r\n for i in range(len(arr_1)):\r\n val = random.choice(arr_1)\r\n y_pred.append(val)\r\n return pd.Series(y_pred)", "title": "" }, { "docid": "7e17946290e930f5fc212a2910a91afc", "score": "0.6565332", "text": "def predict(self, model, data):", "title": "" }, { "docid": "286066101558e5d8001cd17591ff1ea8", "score": "0.6564087", "text": "def test_predict_returns_predictions(self):\n preds = self.test_GNB.predict(self.X_test)\n self.assertEqual(preds.shape, self.Y_test.shape)", "title": "" }, { "docid": "e462d44382baac94e616fe8df069fa9a", "score": "0.6560759", "text": "def predict(self, x):\n\n n = len(x)\n num_class = len(np.unique(self.y))\n prediction = np.zeros((n, num_class))\n\n ############################################################\n ############################################################\n # BEGIN_YOUR_CODE\n # calculate naive bayes probability of each class of input x\n\n pass\n # END_YOUR_CODE\n ############################################################\n ############################################################\n\n return prediction", "title": "" }, { "docid": "adb9c087192d0bbb378c4378139a3966", "score": "0.65598834", "text": "def predict(self, obs):\n preds = []\n for ind_ob in obs:\n preds.append([*self.classify(ind_ob)][0])\n return np.array(preds)", "title": "" }, { "docid": "ea1e447c65eea5ba399d5d41d5e68b7b", "score": "0.6559878", "text": "def fitAndPredict(self, X_train, y_train, X_test):\n predictions = []\n for i in range(self.num_classifiers):\n classifier = self.createClassifierFunction()\n print(\"Classifier %d\" % (i + 1))\n print(classifier)\n self.classifiersStr.append(str(classifier))\n self.fit(classifier, X_train, y_train)\n preds = self.getPredictions(classifier, X_test)\n predictions.append(preds)\n\n return getMode(np.array(predictions))", "title": "" }, { "docid": "e1510ff1efcba3ac388ca3ca6b986c76", "score": "0.6558183", "text": "def predict(self, X_test):\r\n\t\tif type(X_test) is not np.array:\r\n\t\t\tX_test = np.array(X_test)\r\n\r\n\t\tpreds = [self._calc_posterior(f) for f in X_test]\r\n\t\treturn preds", "title": "" }, { "docid": "e68dee815f419953a70cc4e855b40ffd", "score": "0.6549808", "text": "def predict(self, data):\n predicted_labels = self._model.predict(data)\n return predicted_labels", "title": "" }, { "docid": "bbc6e4c00cf84ce763c4085c196a9dc5", "score": "0.6546953", "text": "def predict(self, X, transformer_ids=None):\n vote_overall = self.predict_proba(X, transformer_ids=transformer_ids)\n return self.classes[np.argmax(vote_overall, axis=1)]", "title": "" }, { "docid": "53db1a773fa80bfa63490db1b3536940", "score": "0.6538681", "text": "def get_predictions(model, testset, tta):\n num_images = len(testset)\n predictions = []\n for i, batch in enumerate(tqdm(testset)):\n if tta:\n # images.shape [n, 3, 96, 96] where n is num of 1+tta\n for images in batch:\n preds = model(images.to(device)) # [n, num_classes]\n predictions.append(preds.mean(dim=0).detach().tolist())\n else:\n preds = model(batch[:, 0].to(device))\n preds = preds.detach().tolist() # [1]\n predictions.extend(preds)\n\n return np.array(predictions)", "title": "" }, { "docid": "242397e2270090a6b433c35a7479d295", "score": "0.65320814", "text": "def predicted(self):\n label = []\n for i in range(0,len(self.data_test)):\n label.append(self.comp_label(list(self.data_test.iloc[i][:-1])))\n return label", "title": "" }, { "docid": "21e2e705aad9a40e71475a995163e6df", "score": "0.65198857", "text": "def make_pred(self):\n prod_pred_filename = (self.fname + \"_ensemble.txt\")\n logger.info(\"predicting...\")\n with open(prod_pred_filename, 'w') as f:\n writer = csv.writer(f)\n writer.writerow(['_id', 'category_id'])\n with tqdm.tqdm(total=len(self.all_products), **get_tqdm_kwargs()) as pbar:\n for prod in self.all_products:\n pred_category = prod.pred_category(self.inv_map)\n writer.writerow([prod.prod_id, pred_category])\n pbar.update(1)", "title": "" }, { "docid": "15eeb55ca6d129039ffd3373c0c68ef7", "score": "0.6518068", "text": "def predict_test_values(self):\n\n # transform data\n X_test, _ = self.transform.transform_data(self.X_test, self.X_test)\n\n # load model\n self.learner.load_model(model_name=self.model_name)\n\n # predict the probability of success\n predictions = self.learner.model.predict_proba(X_test)[:, -1][0]\n\n return predictions", "title": "" }, { "docid": "b7a4bf9e9bb6fa7c8e4902fb244ca476", "score": "0.6514807", "text": "def predict(self, features, return_conf=...):\n ...", "title": "" }, { "docid": "c72f0575fd49d478c632a4adea322c9e", "score": "0.6510035", "text": "def classify (self, X_test):\n filtered_X = X_test.filter(items=['work_class', 'education', 'marital', 'occupation_code', 'relationship', 'race', 'sex', 'country'])\n self.one_hot_encoder.fit(filtered_X)\n new_labels = self.one_hot_encoder.get_feature_names()\n # print(\"new labels:\")\n # print(new_labels)\n\n test = self.one_hot_encoder.transform(filtered_X).toarray()\n encoded_arr = self.encode(test, X_test) \n classifications = self.clf.predict(encoded_arr)\n return classifications", "title": "" }, { "docid": "4319109db4ab0b7ceded6878df6aa816", "score": "0.6508967", "text": "def predict_all(self, features):\n pred = np.tile(self.average_label, features.shape[0]) # make a 1D vector of predictions, 1 for each instance\n return pred.reshape(-1,1) # reshape this so it is the correct shape = instances, # of output classes", "title": "" }, { "docid": "92b27d7f67b9d223daba3f5a1595c8d8", "score": "0.6508393", "text": "def _predict(self, X):\n return np.asarray([clf.predict(X) for clf in self.estimators]).T", "title": "" }, { "docid": "e26f33658322eee43501cda2f1291a27", "score": "0.6507257", "text": "def _fit_predict(cls, X_train, y_train, X_test):\n model = Pipeline([\n ('imputer', SimpleImputer()),\n ('scalar', RobustScaler()),\n ('classifier', cls._get_classifier()),\n ])\n model.fit(X_train, y_train)\n\n return model.predict_proba(X_test)[:, 1]", "title": "" }, { "docid": "8cf1451e4a1d52d35ffcf77304c6f612", "score": "0.65061617", "text": "def make_predictions(ratings_data, mfact_data):\r\n\r\n\t# load data from the original training set\r\n\tcenter = ratings_data[\"center\"]\r\n\tscale = ratings_data[\"scale\"]\r\n\tbook_isbn_to_index = ratings_data[\"book_isbn_to_index\"]\r\n\r\n\t# load data calculated by the matrix factorization\r\n\tP = mfact_data[\"P\"]\r\n\tQ = mfact_data[\"Q\"]\r\n\tBn = mfact_data[\"Bn\"]\r\n\tBd = mfact_data[\"Bd\"]\r\n\tmean = mfact_data[\"mean\"]\r\n\r\n\t# load the set of requested predictions\r\n\tqueries = util.load_test(\"../../data/books/ratings-test.csv\")\r\n\tL = len(queries)\r\n\tdebug(\"Making %d predictions\",L)\r\n\r\n\t# for each query\r\n\tfor (i,query) in enumerate(queries):\r\n\r\n\t\t# print progress\r\n\t\t# if DEBUG: print (\"%d / %d : \" % (i+1,L)),\r\n\r\n\t\t# lookup user and book index\r\n\t\tuser_index = query[\"user\"] - 1\r\n\t\tbook_index = book_isbn_to_index[query[\"isbn\"]]\r\n\r\n\t\t# calculate predicted rating\r\n\t\trating_float = (np.dot(P[user_index,:],Q[book_index,:]) + mean + Bn[user_index] + Bd[book_index]) \\\r\n\t\t\t* scale + center\r\n\r\n\t\t# coerce to range (1,5); round\r\n\t\trating = max(1,min(5,rating_float))\r\n\r\n\t\t# store both values so we can do visualization of distributions later\r\n\t\tquery[\"rating\"] = rating\r\n\t\tquery[\"rating_f\"] = rating_float\r\n\r\n\t\t# print value\r\n\t\t# if DEBUG: print \"%f -> %d\" % (rating_float, rating)\r\n\r\n\treturn queries", "title": "" }, { "docid": "1b7ad4361855fbe746a1819a08148066", "score": "0.6503868", "text": "def _get_predictions(self, cls_score: torch.Tensor,\n data_samples: List[DataSample]):\n pred_scores = torch.sigmoid(cls_score)\n\n if data_samples is None:\n data_samples = [DataSample() for _ in range(cls_score.size(0))]\n\n for data_sample, score in zip(data_samples, pred_scores):\n if self.thr is not None:\n # a label is predicted positive if larger than thr\n label = torch.where(score >= self.thr)[0]\n else:\n # top-k labels will be predicted positive for any example\n _, label = score.topk(self.topk)\n data_sample.set_pred_score(score).set_pred_label(label)\n\n return data_samples", "title": "" }, { "docid": "a1710d17d1cb4fb93641695920d70b94", "score": "0.65007263", "text": "def predict_class(cls, X_train, y_train, X_test, y_test,\n pipeline, silent=False, target2=None):\n pipeline.fit(X_train, y_train)\n predictions = pipeline.predict(X_test)\n print(\"predictions computed....\")\n return cls.evaluate_class(predictions, y_test, target2, silent)", "title": "" }, { "docid": "a29705a18c958108771a88c8b5e2c2a6", "score": "0.64981437", "text": "def get_prediction(self, data):\n X_test = self.get_features(data)\n pred = self.model.predict(X_test)\n\n return pred", "title": "" }, { "docid": "47ff9b4c32cf6eba2fc44214a21cd7d9", "score": "0.6497745", "text": "def predict(self, images):\r\n\r\n ii = convert_images_to_integral_images(images)\r\n\r\n scores = np.zeros((len(ii), len(self.haarFeatures)))\r\n\r\n # Populate the score location for each classifier 'clf' in\r\n # self.classifiers.\r\n\r\n feature_index_list = [clf.feature for clf in self.classifiers]\r\n\r\n for i, im in enumerate(ii):\r\n scores[i, feature_index_list] = [self.haarFeatures[hf_index].evaluate(im) for hf_index in feature_index_list]\r\n\r\n # print(\"2\")\r\n\r\n # Obtain the Haar feature id from clf.feature\r\n\r\n # Use this id to select the respective feature object from\r\n # self.haarFeatures\r\n\r\n # Add the score value to score[x, feature id] calling the feature's\r\n # evaluate function. 'x' is each image in 'ii'\r\n\r\n result = []\r\n\r\n # Append the results for each row in 'scores'. This value is obtained\r\n # using the equation for the strong classifier H(x).\r\n alpha_sum = np.sum(self.alphas)\r\n for x in scores:\r\n local_sum = 0.0\r\n for i in range(len(feature_index_list)):\r\n clf = self.classifiers[i]\r\n alpha = self.alphas[i]\r\n local_sum += clf.predict(x) * alpha\r\n\r\n if local_sum >= alpha_sum / 2:\r\n result.append(1)\r\n else:\r\n result.append(-1)\r\n\r\n return result", "title": "" }, { "docid": "d3bfd4caa65863687cd15b9c1fb88d4e", "score": "0.64959073", "text": "def model_predictions(featureset, model, return_probs=True):\n feature_df = featureset.to_dataframe()\n if return_probs and hasattr(model, 'predict_proba'):\n preds = model.predict_proba(feature_df)\n else:\n preds = model.predict(feature_df)\n\n predset = featureset.copy()\n if len(preds.shape) == 1:\n predset['prediction'] = (['name'], preds)\n else:\n if isinstance(model, GridSearchCV):\n columns = model.best_estimator_.classes_\n else:\n columns = model.classes_\n predset['class_label'] = columns\n predset['prediction'] = (['name', 'class_label'], preds)\n return predset", "title": "" }, { "docid": "2b2270fd717839129c65169d3d9c67ca", "score": "0.6489659", "text": "def predict(self, sections):", "title": "" }, { "docid": "40ce119b0c3991f5c9152a1b9526ed0e", "score": "0.64889884", "text": "def fit_predict(self, features, classes):\n self.fit(features, classes)\n return self.predict(features)", "title": "" } ]
e8bcd69370b79d6f78a35d37f89c5612
Get a list of substitutions by quasirandom sampling from a Sobol sequence.
[ { "docid": "b7fe19d9cc9206ed92268ad8aec491cb", "score": "0.49346414", "text": "def sobol(range_map: Dict[str, ranges.Range], trials: int) -> List[Dict[str, str]]:\n ordered_range_map = collections.OrderedDict(range_map)\n sobol_values = sobol_seq.i4_sobol_generate(len(range_map), trials)\n\n def transform_vector(uniform_vector):\n substitution = {}\n for uniform_sample, (name, rng) in zip(\n uniform_vector, ordered_range_map.items()\n ):\n substitution[name] = rng.transform_uniform_sample(uniform_sample)\n return substitution\n\n return [transform_vector(vector) for vector in sobol_values]", "title": "" } ]
[ { "docid": "5b26bd916d5dca4a8cadd3b66c7eaeac", "score": "0.5697027", "text": "def pseudosample(x):\n#\n BXs=[]\n for k in range(len(x)):\n ind=random.randint(0,len(x)-1)\n BXs.append(x[ind])\n return BXs", "title": "" }, { "docid": "ec87410c0f6f07aed4874be7e5d77bf0", "score": "0.5676431", "text": "def sample_text(self) -> list:\r\n text = self.load_text()\r\n pattern = r'(?i)(?<=[\\s])([a-z]+)(?=[,;\\.?!(\"\\s)])'\r\n sample_list = list(set(re.findall(pattern, text)))\r\n return random.choices(population = sample_list, k = self.sample_length)", "title": "" }, { "docid": "ddd053b536f1ed5d2f5dfef2e96007e6", "score": "0.563744", "text": "def rand_variant(seq, prob):\n nuc_bases = \"ACGT\"\n new_seq = \"\"\n for nuc in seq:\n cur_prob = randint(1, prob+1)\n if cur_prob == 1:\n temp_bases = nuc_bases.replace(nuc, '')\n new_nuc = temp_bases[randint(0,2)]\n new_seq += new_nuc\n else:\n new_seq += nuc\n return new_seq", "title": "" }, { "docid": "16426261fcc347319dc90920a368c261", "score": "0.5603993", "text": "def pick_random_mutations(self, n_mutations, sequence):\n n_mutations = min(len(self.multichoices), n_mutations)\n if n_mutations == 1:\n index = np.random.randint(len(self.multichoices))\n choice = self.multichoices[index]\n return [(choice.segment, choice.random_variant(sequence=sequence))]\n\n return [\n (choice_.segment, choice_.random_variant(sequence=sequence))\n for choice_ in [\n self.multichoices[i]\n for i in np.random.choice(\n len(self.multichoices), n_mutations, replace=False\n )\n ]\n ]", "title": "" }, { "docid": "ec5837898505b973939b2584830ee30b", "score": "0.55713844", "text": "def all_variants(self, sequence):\n new_sequence = bytearray(sequence.encode())\n choice_start, choice_end = self.choices_span\n encoded_segment = sequence[choice_start:choice_end].encode()\n\n def sort_variants_by_distance_to_current(choice):\n \"\"\"This function iterates through the variants of a given choice\n using not the alphabetical (which would bias AC over GT) but rather\n a kind of 'least-change' order, which biases towards solutions\n close to the current sequence.\n\n Impact on overall algorithm speed is < 0.5%.\"\"\"\n current = sequence[choice.segment[0] : choice.segment[1]]\n alphasort = {v: i for i, v in enumerate(sorted(choice.variants))}\n\n def sort_key(v):\n return (abs(alphasort[v] - alphasort[current]), v)\n\n return sorted(choice.variants, key=sort_key)\n\n variants_slots = [\n [\n (choice_.segment, v.encode())\n for v in sort_variants_by_distance_to_current(choice_)\n ]\n for choice_ in self.multichoices\n ]\n for variants in itertools.product(*variants_slots):\n new_sequence[choice_start:choice_end] = encoded_segment\n for ((start, end), variant) in variants:\n new_sequence[start:end] = variant\n yield new_sequence.decode()", "title": "" }, { "docid": "dc78c6cf98955240d856187e6a11dc78", "score": "0.55536056", "text": "def createRandomSequences(size=100,length=9):\n aa = list(IUPAC.IUPACProtein.letters)\n vals=[]\n for i in range(0, size):\n seq = \"\"\n for s in range(0,length):\n index = random.randint(0, len(aa)-1)\n seq += aa[index]\n vals.append(seq)\n return vals", "title": "" }, { "docid": "94fcfd3e21a9a96445e86bace0ea3aeb", "score": "0.55395097", "text": "def sample_sequence(self):\n \n \n seq = \"\"\n \n for i in range(self.length):\n \n seq += self._sample_char(i)\n \n \n return seq", "title": "" }, { "docid": "565341394f079628371e6585ee38c063", "score": "0.5536407", "text": "def gamble() -> list:\n result = random.choices(range(1, 50), k=6)\n return result", "title": "" }, { "docid": "5fadc202e9609ca57c7d18695a5d363e", "score": "0.54363674", "text": "def generate_rand_quaternions(self):\r\n quaternions = []\r\n i = 1\r\n if self.settings.nrotations > 1:\r\n while i <= self.settings.nrotations:\r\n r1 = random.uniform(-1, 1)\r\n r2 = random.uniform(-1, 1)\r\n s1 = r1 * r1 + r2 * r2\r\n if s1 < 1:\r\n r3 = random.uniform(-1, 1)\r\n r4 = random.uniform(-1, 1)\r\n\r\n s2 = r3 * r3 + r4 * r4\r\n if s2 < 1:\r\n q = (r1, r2, r3 * (np.sqrt((1 - s1) / s2)), r4 * (np.sqrt((1 - s1) / s2)))\r\n quaternions.append(q)\r\n\r\n i += 1\r\n\r\n return quaternions", "title": "" }, { "docid": "0a129c912693c51d9e72671d81f399d5", "score": "0.5422097", "text": "def random_sublist(X, s):\n import sage.misc.prandom as random\n return [a for a in X if random.random() <= s]", "title": "" }, { "docid": "2b5dd294fca41db209baac668daebecf", "score": "0.5415513", "text": "def sample(self):\n return sorted(set([sconc(s,e) for s in self.S+self.R() for e in self.E]))", "title": "" }, { "docid": "15b9ba6ba988489d8dcdf6f0e98d1a4d", "score": "0.53934735", "text": "def _sample_gamma(s, c):\n\treturn gamma(s, c)", "title": "" }, { "docid": "e86796d2fcb0bbeb7c6e546a6abe4b39", "score": "0.5335885", "text": "def extract_a_random_sampling(word_list, how_many):\n\tworking_word_list = []\n\tfor i in range(0, how_many):\n\t\tworking_word_list.append(random.choice(word_list))\n\treturn working_word_list", "title": "" }, { "docid": "77e24b67bd8e47d27a513a04de2b44cc", "score": "0.5329319", "text": "def powerlaw_sequence(self):\n sequence: list = [random.paretovariate(self.gamma-1) for i in range(self.n)]\n factor: float = self.m / sum(sequence)\n sequence = [round(k * factor) for k in sequence]\n return sequence", "title": "" }, { "docid": "a906755725f5b4ae8fdbf3c1aa911a3c", "score": "0.53276885", "text": "async def get_random_choices(self):\n choices = random.sample(songs, k=4)\n if self.right_song not in choices:\n choices[0] = self.right_song\n random.shuffle(choices)\n logger.info(\"shuffled choices: {}\".format([song.title for song in choices]))\n return choices", "title": "" }, { "docid": "77085110b8bd94a3daf4e82c05e9b143", "score": "0.53216404", "text": "def sample(self):\n phase = np.random.choice(self._div, p=self._prob) + np.random.rand()\n phase /= self._div\n return phase", "title": "" }, { "docid": "d9cb5a6bd6f09e18ee059ab844ebbbe3", "score": "0.53052104", "text": "def randomized(seq):\n #import pdb\n #pdb.set_trace()\n while len(seq)>0:\n idx = np.random.randint(0,len(seq))\n yield seq[idx]\n seq = seq[:idx] + seq[idx+1:]", "title": "" }, { "docid": "cc83da3c77f4f59f8c0b7b5f0f8bd0a8", "score": "0.5284822", "text": "def generate_random_gene_sequence(gene_pool):\n genes = []\n for _ in range(chromosome_size):\n genes.append(random.choice(gene_pool))\n\n return genes", "title": "" }, { "docid": "cc83da3c77f4f59f8c0b7b5f0f8bd0a8", "score": "0.5284822", "text": "def generate_random_gene_sequence(gene_pool):\n genes = []\n for _ in range(chromosome_size):\n genes.append(random.choice(gene_pool))\n\n return genes", "title": "" }, { "docid": "fbc8564875236967a7631314b96b69d3", "score": "0.5275858", "text": "def samples_with_replacement(self, name, population, **kwargs): #590 (line in Coconut source) #591 (line in Coconut source)\n if not isinstance(name, Str): #592 (line in Coconut source)\n raise TypeError(\"name must be string, not {_coconut_format_0}\".format(_coconut_format_0=(name))) #593 (line in Coconut source)\n population = tuple(population) #594 (line in Coconut source)\n for i in count(): #595 (line in Coconut source)\n yield self.choice(\"{_coconut_format_0}[{_coconut_format_1}]\".format(_coconut_format_0=(name), _coconut_format_1=(i)), population, **kwargs) #596 (line in Coconut source)", "title": "" }, { "docid": "f85b0f0fe3a17642516d0fc92bea9348", "score": "0.5265477", "text": "def generate(number, isigma):\n\n result = []\n for j in range(number):\n result.append(random.gauss(1.0, 1.0 / isigma))\n\n return result", "title": "" }, { "docid": "8410e55ba3aad1e081ce476fb9dfd6bd", "score": "0.5259387", "text": "def generate_samples(text, # type: TokenizedText\n n_samples=500, # type: int\n bow=True, # type: bool\n random_state=None,\n replacement='', # type: str\n min_replace=1, # type: Union[int, float]\n max_replace=1.0, # type: Union[int, float]\n group_size=1, # type: int\n ):\n # type: (...) -> Tuple[List[str], np.ndarray, np.ndarray]\n kwargs = dict(\n n_samples=n_samples,\n replacement=replacement,\n random_state=random_state,\n min_replace=min_replace,\n max_replace=max_replace,\n )\n if bow:\n num_tokens = len(text.vocab)\n res = text.replace_random_tokens_bow(**kwargs)\n else:\n num_tokens = len(text.tokens)\n res = text.replace_random_tokens(group_size=group_size, **kwargs)\n\n texts, num_removed_vec, masks = zip(*res)\n similarity = cosine_similarity_vec(num_tokens, num_removed_vec)\n return texts, similarity, vstack(masks)", "title": "" }, { "docid": "b2ca87d32e2033c8e35bd8cfdb79217e", "score": "0.5249479", "text": "def pi_random(self, s):\n # Make a random movement\n a = random.randrange(0,len(gl.WASD))\n # Calculate the qualities\n return self.Q(s,a), a", "title": "" }, { "docid": "b2ca87d32e2033c8e35bd8cfdb79217e", "score": "0.5249479", "text": "def pi_random(self, s):\n # Make a random movement\n a = random.randrange(0,len(gl.WASD))\n # Calculate the qualities\n return self.Q(s,a), a", "title": "" }, { "docid": "068309783d22b27eed93dc029ad059ef", "score": "0.5239996", "text": "def exp_sample(l, n):\n return [exp_transform(l, random.random()) for i in range(n)]", "title": "" }, { "docid": "9391aee28f80c08baa12d074486ff834", "score": "0.5227407", "text": "def sample(self):\n import random\n return random.choice(self.sentences)", "title": "" }, { "docid": "74b6a716f88e2c3df78f680f0146cdbe", "score": "0.5227086", "text": "def generate_q():\n g = random.randrange(161, 1185) # random generate seed's bit amount\n while (True):\n seed = random.getrandbits(g) # random seed\n U = int(hashlib.sha1(bin(seed).encode()).hexdigest(), 16) ^ \\\n int(hashlib.sha1(bin((seed + 1) % (1 << g)).encode()).hexdigest(), 16)\n q = U | (1 << 159) | 1\n if miller_rabin_test(q):\n return g, seed, q", "title": "" }, { "docid": "a3dc53482e6791d8fb6124e5f18b7a99", "score": "0.52076757", "text": "def sample(h, seed_ix, n):\n c = np.zeros((vocab_size, 1))\n c[seed_ix] = 1\n generated_chars = []\n for t in range(n):\n x = np.dot(Wex, c)\n h = np.tanh(np.dot(Wxh, x) + np.dot(Whh, h) + bh)\n o = np.dot(Why, h) + by\n p = softmax(o)\n\n # the the distribution, we randomly generate samples:\n ix = np.random.multinomial(1, p.ravel())\n c = np.zeros((vocab_size, 1))\n\n for j in range(len(ix)):\n if ix[j] == 1:\n index = j\n c[index] = 1\n generated_chars.append(index)\n\n return generated_chars", "title": "" }, { "docid": "d26de8fcff3ad673a3556c9c60e1f931", "score": "0.5194623", "text": "def sampling_from_unigram(vocab_prob, vocab_idx):\n samples = []\n for key in vocab_prob.keys():\n # (unigram_dist)^(3/4)/Z\n samples.extend([vocab_idx[key]] * int(math.pow(vocab_prob[key], 3/4)/0.001))\n\n return samples", "title": "" }, { "docid": "24c0e8ae942d16e479f08f8cf679bd92", "score": "0.5182176", "text": "def random_as_seq(n: int, lang=\"en\") -> List[str]:\n l = _Language.get(lang)\n return [secrets.choice(l.words) for _ in range(n)]", "title": "" }, { "docid": "e771ed3dd1247ffad83f599d6765fbff", "score": "0.5168391", "text": "def time_sampling(sequences, metadata, country, num_sequences):\n strains = [rec.id for rec in SeqIO.parse(sequences, 'fasta')] # filter the data by the latest sequences filter\n metadata = metadata[(metadata['strain'].isin(strains)) | (metadata['country'] == country)]\n intervals = 5\n num_samples = num_sequences // intervals\n metadata['datetime'] = metadata['date'].apply(lambda x: pd.to_datetime(x, format=\"%Y-%m-%d\"))\n\n # bin by weeks\n binned = bin_metadata_by_week(metadata)\n sampled = []\n for i in tqdm(binned['bin'].unique()):\n chosen = list(np.random.choice(binned[binned['bin'] == i]['strain'], num_samples))\n sampled.extend(chosen)\n # add all the sequences from the country to the sampled batch\n sampled = sampled + [s for s in strains if country in s]\n return sampled", "title": "" }, { "docid": "24667466d5adc913ddaa3cbb36beaa01", "score": "0.51602477", "text": "def get_randomized_pam_seqs(seq, num_pam_bases, num_randomized_bases, end='5p'):\n assert num_randomized_bases >= num_pam_bases\n all_randomized = (''.join(tup) for tup in itertools.product(bases, repeat=num_randomized_bases))\n if end == '5p':\n return set([rand_seq + seq[num_pam_bases:] for rand_seq in all_randomized])\n else:\n assert end == '3p', end\n return set([seq[:-num_pam_bases] + rand_seq for rand_seq in all_randomized])", "title": "" }, { "docid": "0cb5d3ad35619cef99cb5ec5e3b5a7eb", "score": "0.5157872", "text": "def _sample(self, q0):\n # Convert adaptive_scale_factor to a jump probability\n p_jump = 1. - .5 ** self.scaling\n\n rand_array = nr.random(q0.shape)\n q = np.copy(q0)\n\n # Locations where switches occur, according to p_jump\n switch_locs = (rand_array < p_jump)\n q[switch_locs] = True - q[switch_locs]\n\n accept = self.model.logp(q) - self.model.logp(q0)\n q_new, _ = metrop_select(np.exp(accept), q, q0)\n\n return q_new", "title": "" }, { "docid": "fe44abf7054ecdc2f608626e46c945a9", "score": "0.5152952", "text": "def get_substitutions(subs_json):\n with open(subs_json) as subs_file:\n raw_subs = json.loads(subs_file.read())\n return { k : random.choice(raw_subs[k]) for k in raw_subs }", "title": "" }, { "docid": "6cc4cbd1b59b309fe79425cedf3cc08d", "score": "0.5146705", "text": "def sample(Ns, xvals, yvals, t, e, q):\n seq_x = [''] * len(Ns)\n seq_y = [''] * len(Ns)\n for i, seq_len in enumerate(Ns):\n seq_x[i], seq_y[i] = sample_seq(seq_len, xvals, yvals, t, e, q)\n return seq_x, seq_y", "title": "" }, { "docid": "3cc94e9862c9f8a538d104c333762280", "score": "0.5136745", "text": "def apply_random_mutations(self, n_mutations, sequence):\n new_sequence = bytearray(sequence.encode())\n for segment, seq in self.pick_random_mutations(n_mutations, sequence):\n start, end = segment\n new_sequence[start:end] = seq.encode()\n return new_sequence.decode()", "title": "" }, { "docid": "359b93fd954ef41611d9a567be294995", "score": "0.5136698", "text": "def sample(self):\n choices = self.random.randint(0, self.num_choices, size=self.sample_count,\n dtype=np.int64)\n\n param = [self.id2param[choice] for choice in choices]\n return param", "title": "" }, { "docid": "b26946e5067a6538268ae8928e75b5d6", "score": "0.5122942", "text": "def uniform_sample_dag_plural(cpdag, num_samples, exact=False):\n dags = []\n if exact == False:\n for _ in range(num_samples):\n dags.append(fast_sample_dag(cpdag))\n return dags\n if exact:\n all_dags = enumerate_dags(cpdag)\n return all_dags[np.random.randint(len(all_dags),size=num_samples)]", "title": "" }, { "docid": "bbd6a758ac69aa3f7368c9502badb678", "score": "0.5122653", "text": "def uniformly_sample_list(vals):\n return random.choice(vals)", "title": "" }, { "docid": "6a281873d63b63348a8ba1e61c9eba52", "score": "0.51151747", "text": "def permutate_sequence(sequence, sequence_length):\n permutated_sequence = ''.join(random.sample(sequence, sequence_length))\n return permutated_sequence", "title": "" }, { "docid": "5c486b1ebb4289c7090a0b725bd7f933", "score": "0.51123667", "text": "def salutation(questions):\n \n for question in questions.split():\n if question.lower() in salutation_questions:\n return rndm.choice(salutation_reply)", "title": "" }, { "docid": "50ea2d5fff86be8033ddfb70caf9a4ec", "score": "0.5105699", "text": "def create_squad_example(text):\n question, context = text\n yield question, QuestionAnsweringPipeline.create_sample(question, context)", "title": "" }, { "docid": "34e39e1c05fdf03dbcefd864f14202c9", "score": "0.51036686", "text": "def _random_sample(self):\n individual = []\n prob = random.uniform(0, 1)\n for _ in range(self.length):\n s = random.uniform(0, 1)\n if s > prob:\n individual.append(0)\n else:\n individual.append(1)\n return self.codec.decode(individual)", "title": "" }, { "docid": "e25b22b8bb7eca70d39c1c2e553236c6", "score": "0.5101739", "text": "def sample_alias(alias, Q, row_idx, mt_ptrs):\n\n n_states = alias.shape[1]\n tot_samp = row_idx.shape[0]\n r_ints = numpy.random.randint(n_states, size=tot_samp)\n orig_success = near_uniform.par_bernoulli(Q[row_idx, r_ints], mt_ptrs)\n orig_idx = orig_success == 1\n alias_idx = numpy.logical_not(orig_idx)\n\n choices = numpy.zeros(tot_samp, dtype=numpy.uint)\n choices[orig_idx] = r_ints[orig_idx]\n choices[alias_idx] = alias[row_idx[alias_idx], r_ints[alias_idx]]\n choices = choices.astype(numpy.uint8)\n return choices", "title": "" }, { "docid": "1812493a612deee5dc83cf4a55ed1e9f", "score": "0.5100674", "text": "def random_encode(p):\n\n return [np.random.randint(20) for i in pep]", "title": "" }, { "docid": "4855d4efd09aabd62bb689ed4d300ebd", "score": "0.5087723", "text": "def sample(self, n_samps):\n # print('gmix trying to sample '+str(n_samps)+' from '+str(self.dist))\n # xs = np.array([self.sample_one() for n in range(n_samps)])\n # print(self.dist.to_json)\n xs = np.array(self.dist.sample(n_samps))\n # print('gmix sampled '+str(n_samps)+' from '+str(self.dist))\n return xs", "title": "" }, { "docid": "96251805027b22200dd345e3e1b1fa38", "score": "0.5078054", "text": "def sample_n_unique(sampling_f, n):\n res = []\n while len(res) < n:\n candidate = sampling_f()\n if candidate not in res:\n res.append(candidate)\n return res", "title": "" }, { "docid": "1612a23b207786b71b79075a108172e6", "score": "0.5072045", "text": "def generate_sample(grammar, prod, frags): \n if prod in grammar._lhs_index: # Derivation\n derivations = grammar._lhs_index[prod] \n derivation = random.choice(derivations) \n for d in derivation._rhs: \n generate_sample(grammar, d, frags)\n else:\n # terminal\n frags.append(str(prod))", "title": "" }, { "docid": "e4630c4a9d68f5712501efcd48c9ddad", "score": "0.5070153", "text": "def sampling_question():\n # TODO: assign value to choice and factor\n choice = 1\n options = ['Gibbs','Metropolis-Hastings']\n factor = 1\n return options[choice], factor", "title": "" }, { "docid": "9e908c6c91d214f6f3ff6ae6a245679d", "score": "0.50538963", "text": "def generate_questions_transformer(sentences, n):\n qs = []\n random.seed(123)\n sentences = random.sample(sentences, n)\n for i in range(len(sentences)):\n doc = get_doc(sentences[i])\n for ent in doc.ents:\n if ent.label_ in ['PERSON', 'DATE', 'GPE', 'LOC', 'ORG', 'EVENT']:\n context = sentences[i]\n #print(context)\n qs.append([get_question(ent, context), ent])\n r_qsts = set()\n for qq in qs:\n xx = qq[0]\n r_qsts.add(xx[xx.find(\":\")+2:-4])\n r_qsts = list(r_qsts)\n return r_qsts", "title": "" }, { "docid": "8326eeccc0a21890016b903269891cc0", "score": "0.5043468", "text": "def stationary_sample(self, seq_len, rng=None):\n probs = self.base_freqs\n char_state_indexes = [distributions.sample_multinomial(probs, rng) for i in range(seq_len)]\n return [self.state_alphabet[idx] for idx in char_state_indexes]", "title": "" }, { "docid": "a00cd5fc2cc402478a0081c1acf15ffe", "score": "0.5040966", "text": "def scramble(self):\n\n scramble_index = np.random.permutation(range(len(self.ids))) #permutation returns ndarray\n \n if len(scramble_index) == 0:\n return None\n\n self.pos = self.pos[scramble_index, ...]\n self.magnitudes = self.magnitudes[scramble_index]\n self.ids = self.ids[scramble_index]", "title": "" }, { "docid": "8ad3bee7fca99793a7bc36b836b6d734", "score": "0.50408673", "text": "def _get_random_data(self, length=FUZZ_LENGTH):\n chars_length = length * 5 / 100\n chars = [choice(SENSITVE_CHARACTERS) for i in range(chars_length)]\n chars_idx = [randrange(0, length-1) for i in range(chars_length)]\n rand = list(os.urandom(length))\n for idx, char in zip(chars_idx, chars):\n rand[idx] = char\n return ''.join(rand)", "title": "" }, { "docid": "67183d6c11d4dc2e98b97f9dad62edde", "score": "0.50390786", "text": "def sample(self):\n values = []\n for v in self.id2param.values(): # type: AbstractHyperParameter\n x = v.sample()\n\n if isinstance(x, Iterable) and not isinstance(x, six.string_types):\n values.extend(x)\n else:\n values.append(x)\n\n return values", "title": "" }, { "docid": "f04129520b7db7e1ddcdc825bb3806ff", "score": "0.5030302", "text": "def sample(self, q0, num_samples=1):\n if num_samples is None:\n num_samples = 1\n if not hasattr(q0, '__len__'):\n q0 = np.array([q0])\n\n samples = np.zeros([num_samples, len(q0)])\n for i in range(num_samples):\n samples[i] = self._sample(q0)\n q0 = samples[i]\n\n return samples", "title": "" }, { "docid": "f04129520b7db7e1ddcdc825bb3806ff", "score": "0.5030302", "text": "def sample(self, q0, num_samples=1):\n if num_samples is None:\n num_samples = 1\n if not hasattr(q0, '__len__'):\n q0 = np.array([q0])\n\n samples = np.zeros([num_samples, len(q0)])\n for i in range(num_samples):\n samples[i] = self._sample(q0)\n q0 = samples[i]\n\n return samples", "title": "" }, { "docid": "a84dbba1193cd08c06066012013515a4", "score": "0.50261974", "text": "def sample(self, rng_key):", "title": "" }, { "docid": "6b09cbcafb938b4d90fc1feab84f262b", "score": "0.5026157", "text": "def shuffled(self, seq):\n seq = list(seq)\n random.shuffle(seq)\n return seq", "title": "" }, { "docid": "268cf8b3bb3f83433f433f000c324650", "score": "0.50218827", "text": "def f_update_seq_reg(multigraph):\n seq = list(multigraph.Vs.keys())\n if multigraph.randomized_update_seq:\n random.shuffle(seq)\n return seq", "title": "" }, { "docid": "9606555924ef5d859d70f9022eca2668", "score": "0.5019861", "text": "def read_qpm_randoms(sample, version):\n assert version in MOCK_VERSIONS\n\n sample = 'ngc' if sample == 'N' else 'sgc'\n\n # get the file path\n filename = f\"qpm_random_v1.8.1_{sample}_vetoed_smeared.dat\"\n path = os.path.join(data_dir, 'mocks', 'qpm', 'randoms', filename)\n\n # load the source\n names = ['RA', 'DEC', 'Z_RSD', 'Z', 'COMP', 'NZ', 'WEIGHT_FKP']\n s = CSVCatalog(path, names=names)\n\n # inverse completeness\n s['INV_COMP'] = 1. / s['COMP']\n\n # up-weight expected angular completeness\n s['NZ'] *= s['INV_COMP']\n\n return s", "title": "" }, { "docid": "48447547e5728f13636381df81eca228", "score": "0.5012919", "text": "def create_random_proteins_from_distribution(length_lst, \n amino_acid_lst, \n amino_acid_distribution=None):\n protein_sequence_lst = []\n for length in length_lst:\n sequence = random.choices(amino_acid_lst, \n weights=amino_acid_distribution,\n k=length)\n protein_sequence_lst.append(\"\".join(sequence))\n return protein_sequence_lst", "title": "" }, { "docid": "7aeac7afa3c0050648e3b7bb1ab9c2d3", "score": "0.5011695", "text": "def bis_random(bigrams):\n for a in bigrams:\n n=len(bigrams[a])\n print (n)\n r = [random.random() for i in range(n)]\n s = sum(r)\n r = [ i/s for i in r ]\n i=0\n print (r[i])\n for b in bigrams[a]:\n bigrams[a][b] = np.log(r[i])\n i+=1\n return bigrams", "title": "" }, { "docid": "f6242690356b204ff2e32acd91e9e535", "score": "0.50029397", "text": "def variants(seq):\n var = []\n for i, char in enumerate(seq):\n if char in ambig:\n for char2 in ambig[char]:\n var += variants(seq[:i] + char2 + seq[i+1:])\n if not var:\n var.append(seq)\n return var", "title": "" }, { "docid": "dfdd94152e9433b37a2015ecb959066a", "score": "0.5001662", "text": "def unshuffled_sample(self, name, population, k, **kwargs): #563 (line in Coconut source) #565 (line in Coconut source)\n if not isinstance(name, Str): #566 (line in Coconut source)\n raise TypeError(\"name must be string, not {_coconut_format_0}\".format(_coconut_format_0=(name))) #567 (line in Coconut source)\n population = tuple(population) #568 (line in Coconut source)\n sample = [] #569 (line in Coconut source)\n for i, x in enumerate(population): #570 (line in Coconut source)\n if len(sample) == k: #571 (line in Coconut source)\n break #572 (line in Coconut source)\n if len(population) - i == k - len(sample): #573 (line in Coconut source)\n sample += population[i:] #574 (line in Coconut source)\n break #575 (line in Coconut source)\n proc_kwargs = (param_processor.modify_kwargs)(lambda val: 1 if x in val else 0, kwargs) #576 (line in Coconut source)\n if \"placeholder_when_missing\" not in proc_kwargs: #579 (line in Coconut source)\n proc_kwargs[\"placeholder_when_missing\"] = 0 #580 (line in Coconut source)\n if self.uniform(\"{_coconut_format_0}[{_coconut_format_1}]\".format(_coconut_format_0=(name), _coconut_format_1=(i)), 0, 1, **proc_kwargs) >= 1 - (k - len(sample)) / (len(population) - i): #581 (line in Coconut source)\n sample.append(x) #587 (line in Coconut source)\n return sample #588 (line in Coconut source)", "title": "" }, { "docid": "ab02968e042b665cec57f669c9f0bf21", "score": "0.49935117", "text": "def _get_normal_samples_for_shot(\n self,\n qubits: Sequence[int],\n ) -> np.ndarray:\n samples = [self._rng.normal(0, 1, size=1) for qubit in qubits]\n # we squeeze the second dimension because samples is List[qubit_number][0][0\\1] = I\\Q\n # and we want to change it to be List[qubit_number][0\\1]\n return np.squeeze(np.array(samples), axis=1)", "title": "" }, { "docid": "61f1780c70dbb7c4d3c2815f21497871", "score": "0.49923962", "text": "def fetch_sampled_pssms(length): \n \n target_entropy = 1.0\n relative_entropy_near = 1.0\n \n #relative_entropy_middle = 0.20\n #relative_entropy_far = 1.0\n \n \n #fix entropy of parent\n root = optimize_pssm_by_sampling(length, entropy=target_entropy, epsilon=0.001, sigma=0.01)\n \n #return [root, root]\n \n epsilon = 0.002\n sigma = 0.3 / length\n \n left = optimize_mutation_by_sampling(root, target_entropy, relative_entropy_near, epsilon*2, sigma*2)\n right = optimize_mutation_by_sampling(root, target_entropy, relative_entropy_near, epsilon*2, sigma*2)\n \n return [left, right]\n \n toy_0 = optimize_mutation_by_sampling(left, target_entropy, relative_entropy_near, epsilon, sigma)\n toy_1 = optimize_mutation_by_sampling(left, target_entropy, relative_entropy_near, epsilon, sigma)\n \n toy_2 = optimize_mutation_by_sampling(right, target_entropy, relative_entropy_near, epsilon, sigma)\n \n \n #note: last leaf has different relative entropy!!\n toy_3 = optimize_mutation_by_sampling(right, target_entropy, relative_entropy_near*3, epsilon*2, sigma*2)\n #toy_3 = optimize_mutation_by_sampling(right, target_entropy, relative_entropy_near, epsilon, sigma)\n \n #mutate the hell out of this one\n #toy_3.mutate()\n #toy_3.mutate()\n #toy_3.mutate()\n\n\n return [toy_0, toy_1, toy_2, toy_3]", "title": "" }, { "docid": "43fd4ebfe784f3a70667a47ae0e55349", "score": "0.49897859", "text": "def random_sampling(self, content):\n\t\tlines = random.sample(range(int(len(content)/3)), int(int(len(content)/3*.9)))\n\t\tfor i in range(int(len(content)/3)):\n\t\t\tif i in lines:\n\t\t\t\tself.train_indicies.append(i*3)\n\t\t\t\tself.train_indicies.append(i*3+1)\n\t\t\t\tself.train_indicies.append(i*3+2)\n\t\t\telse:\n\t\t\t\tself.test_indicies.append(i*3)\n\t\t\t\tself.test_indicies.append(i*3+1)\n\t\t\t\tself.test_indicies.append(i*3+2)", "title": "" }, { "docid": "a4dda018eaa510e4613c69bb8dd258c7", "score": "0.4988507", "text": "def calc_all_snp_quants(extra_data):\n for item in extra_data.mapArray:\n chromosome = item[0]\n snp = item[1]\n bp_pos = item[3] \n A1, A2 = extra_data.split(snp)\n c_m_p = extra_data.ct_allelic(snp, 'Phenotype')\n freq_A1_cases = (c_m_p[0,0] / (c_m_p[0,0]+c_m_p[0,1]))\n freq_A1_contr = (c_m_p[1,0] / (c_m_p[1,0]+c_m_p[1,1]))\n odds_ratio_pheno = calc_odds_ratio(c_m_p)\n\n print '\\t'.join([chromosome, snp, str(bp_pos), str(A1), str(freq_A1_cases),\n str(freq_A1_contr), str(A2), str(odds_ratio_pheno)])", "title": "" }, { "docid": "2b428772116b211eec531d9f0ad59934", "score": "0.49863958", "text": "def test_data_uniform(size):\n chunks = []\n for i in range(size):\n chunks.append(str(random.random()))\n \n return chunks", "title": "" }, { "docid": "ca7c79c5b586f7788053e3948a90d8fa", "score": "0.4983981", "text": "def sample_qubo(self, Q, **kwargs):\n variables = set().union(*Q)\n try:\n active_variables = sorted(variables)\n except TypeError:\n active_variables = list(variables)\n num_variables = len(active_variables)\n\n # developer note: in the future we should probably catch exceptions\n nodes = self.solver.nodes\n edges = self.solver.edges\n if not all(u in nodes if u == v else ((u, v) in edges or (v, u) in edges)\n for u, v in Q):\n msg = \"Problem graph incompatible with solver.\"\n raise BinaryQuadraticModelStructureError(msg)\n\n future = self.solver.sample_qubo(Q, **kwargs)\n\n return dimod.SampleSet.from_future(future, _result_to_response_hook(active_variables, dimod.BINARY))", "title": "" }, { "docid": "41599ccc4a3d8f8a1ea711fe14fe81f6", "score": "0.49794745", "text": "def rnd_family(input_set, index_set):\n\n\treturn [rnd_subset(input_set) for i in index_set]", "title": "" }, { "docid": "91ce3175406bd48416f0d9fe71031a6b", "score": "0.4975128", "text": "def randquote(all_quotes_obj):\n randquote = choice(all_quotes_obj)\n return randquote", "title": "" }, { "docid": "b8a14b02aae80880ea2a6ce29f5abf95", "score": "0.49687502", "text": "def newSeq(self, length):\n seq = np.random.choice(self.NUCLEOTIDES, # sample nucleotides with replacement \n size = length, \n replace = True)\n return seq", "title": "" }, { "docid": "c63300f60383d69f806a76bd6fd86ae1", "score": "0.49629033", "text": "def looking_at_lll_on_qary():\n n, q, sd, m, nu, embedding = 30, 5, sqrt(2/3), 35, 1, \"baigal\"\n # n, q, sd, m, nu, embedding = 30, 5, sqrt(2/3), 35, 2, \"baigal\"\n nu_denom = QQ(round(nu*100)/100).denominator()\n d = m + 1\n if embedding == \"baigal\":\n d += n\n tries = 1\n exp = [0] * d\n for _ in range(tries):\n (lwe, samples, A, C, BC_GSO, vol) = genLWEInstance(\n n, q, sd, m, nu=nu, embedding=embedding)\n for i in range(d):\n exp[i] += BC_GSO.get_r(i, i)/tries\n\n from sage.all import matrix\n basis = matrix(d)\n BC_GSO.B.to_matrix(basis)\n gs = basis.gram_schmidt(orthonormal=False)[0]\n for i in range(d):\n print(gs[i])", "title": "" }, { "docid": "842c702297a0e50863d5700b5a55fae3", "score": "0.49626026", "text": "def random_sampling(samples, data):\n return random.randint(0, len(data[\"query\"][\"x\"])-1)", "title": "" }, { "docid": "93042c3ce335ef1d6f9cbfe6d78ceba8", "score": "0.49609274", "text": "def lrandom_sample(pred, seq, random_state=None):\n return list(random_sample(pred, seq, random_state=random_state))", "title": "" }, { "docid": "5fa2039f9b6a60b901e8f9f5c8fb1b88", "score": "0.49560243", "text": "def sample_uniform(sample_count: int,\n initial_cnf: CNF,\n fresh: int,\n support: int,\n generation_requests: List[GenerationRequest],\n use_docker: bool = DEFAULT_DOCKER_MODE_ON,\n use_cmsgen: bool = False\n ) -> List[Solution]:\n with temporary_cnf_file() as cnf_file:\n combine_and_save_cnf(cnf_file, initial_cnf, fresh, support, generation_requests)\n solver_name = \"UniGen\" if not use_cmsgen else \"CMSGen\"\n print(f\"Running {solver_name}...\")\n solution_str = call_unigen(sample_count, cnf_file, docker_mode=use_docker, use_cmsgen=use_cmsgen)\n # TODO: Validate that skipping the comments is the intended\n # functionality. The Haskell code doesn't appear to need to do\n # this, but this could be due to the Unigen upgrade or something\n # else. Just check it.\n if not solution_str:\n return []\n sample_set = 0\n if \"we found only \" in solution_str:\n sample_set = int(solution_str[solution_str.index(\"we found only \")+14:].split(',')[0])\n\n return [build_solution(line) for line in solution_str.strip().splitlines() if line and not line.startswith('c')][sample_set:]", "title": "" }, { "docid": "e71f0a3940fa689cafa3561380e526f9", "score": "0.495005", "text": "def choose_random_questions(qset, user):\n r_tries = 20\n\n # Retrieve all previous questions attempted by the user and remove\n # these from consideration.\n\n\n # Add mandatory questions to the list\n mandatory = qset.inclusion_set.all()\n all_id = range(len(mandatory))\n\n #for attempt in range(r_tries):\n\n N_quest = np.random.random_integers(qset.min_num, qset.max_num)\n random.shuffle(all_id)\n qts = [mandatory[idx] for idx in all_id[0:N_quest]]\n\n\n # Finally, randomize (permute) the list order\n #np.random.shuffle(qts)\n return qts", "title": "" }, { "docid": "c9caeda4d9f19be985abec72c546ee95", "score": "0.49498746", "text": "def make_sampler(things):\n\n nb_things = len(things)\n shuffled_things = deepcopy(things)\n for i in cycle(range(nb_things)):\n if i == 0:\n random.shuffle(shuffled_things)\n yield shuffled_things[i]", "title": "" }, { "docid": "5745f1f2cd0817906b77ef8dca90a6a7", "score": "0.49483353", "text": "def _random_subset(seq, m, rng):\n targets = set()\n while len(targets) < m:\n x = rng.choice(seq)\n targets.add(x)\n return targets", "title": "" }, { "docid": "eabb0bfc3e8c770591294fe841b820dc", "score": "0.49439636", "text": "def define_samples(pop_params):\n sample_names = []\n for i, pop in enumerate(pop_params):\n times = [years_to_gen(t) for t in pop_params[pop][\"t_sample\"]]\n sample_names.extend([msp.Sample(population=i, time=t) for t in times])\n return sample_names", "title": "" }, { "docid": "fc91723e29a6767a6b8485ed22c7e8d2", "score": "0.4941077", "text": "def extract_mutations(self, ref_seq):\n\n for sq in self.sequences:\n sq.extract_mutations(ref_seq)\n\n print('This library contains %d unique mutations' % len(set(flatten([sq.mutations for sq in self.sequences]))))", "title": "" }, { "docid": "5c57aa0ab56f1141fea31982c0df0d62", "score": "0.49410275", "text": "def sample(self, name, population, k, **kwargs): #543 (line in Coconut source) #545 (line in Coconut source)\n if not isinstance(name, Str): #546 (line in Coconut source)\n raise TypeError(\"name must be string, not {_coconut_format_0}\".format(_coconut_format_0=(name))) #547 (line in Coconut source)\n sampling_population = [x for x in population] #548 (line in Coconut source)\n sample = [] #549 (line in Coconut source)\n for i in range(k): #550 (line in Coconut source)\n if len(sampling_population) <= 1: #551 (line in Coconut source)\n sample.append(sampling_population[0]) #552 (line in Coconut source)\n else: #553 (line in Coconut source)\n def _coconut_lambda_1(val): #554 (line in Coconut source)\n elem = _coconut_iter_getitem(val, i) #554 (line in Coconut source)\n return sampling_population.index(elem) if elem in sampling_population else 0 #554 (line in Coconut source)\n\n proc_kwargs = (param_processor.modify_kwargs)(_coconut_lambda_1, kwargs) #554 (line in Coconut source)\n ind = self.randrange(\"{_coconut_format_0}[{_coconut_format_1}]\".format(_coconut_format_0=(name), _coconut_format_1=(i)), len(sampling_population), **proc_kwargs) #559 (line in Coconut source)\n sample.append(sampling_population.pop(ind)) #560 (line in Coconut source)\n return sample #561 (line in Coconut source)", "title": "" }, { "docid": "932f2c4f4fe22be71736da1de21341f9", "score": "0.49391842", "text": "def get_randomized_stretch_seqs(seq, num_bases):\n outset = set()\n all_randomized = [''.join(tup) for tup in itertools.product(bases, repeat=num_bases)]\n for i in range(len(seq) - num_bases + 1):\n outset.update([seq[:i] + rand_seq + seq[i + num_bases:] for rand_seq in all_randomized])\n return outset", "title": "" }, { "docid": "815463d0dbc6de8d30ada5923a1391c8", "score": "0.49382743", "text": "def choices(self, k, seed=None):\n idxs = np.random.randint(0, self.n_samples, k)\n return [self.__getitem__(idx) for idx in idxs]", "title": "" }, { "docid": "855eb72d52790464dac354c794dff2a4", "score": "0.49342027", "text": "def _fluctuate(mult):\n return np.array([np.random.poisson(m) for m in mult])", "title": "" }, { "docid": "3ae067f5046f155de799857d10e59891", "score": "0.49317706", "text": "def do_inversion_mutation(gene_s,mutation_rate):\r\n \r\n gene = [c for c in gene_s]\r\n \r\n p = random.random()\r\n if p<= mutation_rate:\r\n\r\n\r\n start = random.randint(0,len(gene_s)-2)\r\n end = random.randint(start+1,len(gene_s)-1)\r\n\r\n temp = gene[start:end+1]\r\n temp.reverse()\r\n gene[start:end+1] = temp\r\n \r\n \r\n return \"\".join(gene)", "title": "" }, { "docid": "953a804993443211a2ff1fbffdfafe7a", "score": "0.49294025", "text": "def sample(self):\n return random.choice(self.space)", "title": "" }, { "docid": "ff47f4fc862cdf640f03360c4ca80259", "score": "0.49271485", "text": "def sim_data(length, trials):\n data = []\n for _ in xrange(trials):\n string = rand_sequence(length)\n data.append(form_suffix_tree(string) + 1)\n return data", "title": "" }, { "docid": "3c10f5657737cd515c4d572c5fd2d3aa", "score": "0.49232376", "text": "def sample_global(args):\n self = args[0]\n num_samples = args[1]\n\n ## Set up arguments/options for Nupack call\n strands = next(iter(self.restingset.complexes)).strands\n strand_seqs = [strand.sequence\n for strand\n in strands]\n\n ## Call Nupack\n structs = nupack.sample(strand_seqs, num_samples, **self._nupack_params)\n\n ## Convert each Nupack sampled structure (a dot-paren string) into a DNAObjects Complex object and process.\n sampled = [Complex(strands = strands, structure = s) for s in structs]\n\n return sampled", "title": "" }, { "docid": "6f6af71b303b1b55ad7b3212b42b2269", "score": "0.49230102", "text": "def candidate_combinations(chars):\n while True:\n yield random.choices(chars, k=NGRAM)", "title": "" }, { "docid": "0c220352888b6c75f28cde01a294c183", "score": "0.49190784", "text": "def fetch_samples_list(self):\n samples = []\n\n s = self.get_set_info([\"id\"], element_class=\"sample\")\n # Returns KeyedTuple that needs to be converted to list \n for place, item in enumerate(s):\n # Make item into list\n temp_list = list(s[place])\n # Save str to new list\n samples.append(temp_list[0])\n\n # Return list of samples\n return samples", "title": "" }, { "docid": "3db2960d5ff7f68d293e139be6e3dbbb", "score": "0.49185583", "text": "def test_stratification():\n result = randomization.stratification([10, 12], 2)\n assert len(result) == 2\n assert len(result[0]) == 10\n assert len(result[1]) == 12", "title": "" }, { "docid": "1818994d50449d2e66aff57f5b44b40d", "score": "0.49146765", "text": "def search(self):\n if self.random_count < self.random_models:\n self.random_count += 1\n return self.random_count, self._random_sample()\n pareto_front_results = self.get_pareto_front()\n pareto_front = pareto_front_results[\"encoding\"].tolist()\n if len(pareto_front) < 2:\n encoding1, encoding2 = pareto_front[0], pareto_front[0]\n else:\n encoding1, encoding2 = random.sample(pareto_front, 2)\n choice = random.randint(0, 1)\n # mutate\n if choice == 0:\n encoding1List = str2list(encoding1)\n encoding_new = self.mutatation(encoding1List)\n # crossover\n else:\n encoding1List = str2list(encoding1)\n encoding2List = str2list(encoding2)\n encoding_new, _ = self.crossover(encoding1List, encoding2List)\n self.ea_count += 1\n net_desc = self.codec.decode(encoding_new)\n return self.random_count + self.ea_count, net_desc", "title": "" }, { "docid": "4cb1ad42bc5c8ca3caf4169ba8fcf8d4", "score": "0.49126884", "text": "def sampling(self, n, random_sampling=False):\n\n\t\t# It's posiible to use Biopython module SeqIO\n\t\t# SeqIO.parse('...', 'fastq')\n\t\t# It return list generator of SeqRecord objects\n\n\t\tif not random_sampling:\n\t\t\tself.samples = list( islice(\n\t\t\t\tparse_fastq(self.pack_names[self.current]), n) )\n\t\telse:\n\t\t\t# Non-optimal but fast in implementation\n\t\t\tfull = list(parse_fastq(self.pack_names[self.current]))\n\t\t\trandom.shuffle(full)\n\n\t\t\tself.samples = full[:n]", "title": "" }, { "docid": "1d217f4b08eced01bc0237248c8a897f", "score": "0.49099508", "text": "def sample_transition(self, s, a):\n trans = self.transitions(s)[a]\n state_probs = [tran.prob for tran in trans]\n return trans[np.random.choice(len(trans), p=state_probs)]", "title": "" }, { "docid": "32e925af332e3c6157064d84cc0d811a", "score": "0.49059856", "text": "def sample(h, seed_ix, n):\n x = np.zeros((vocab_size, 1))\n x[seed_ix] = 1\n ixes = []\n for t in range(n):\n h = np.tanh(np.dot(Wxh, x) + np.dot(Whh, h) + bh)\n y = np.dot(Why, h) + by\n p = np.exp(y) / np.sum(np.exp(y))\n ix = np.random.choice(range(vocab_size), p=p.ravel())\n x = np.zeros((vocab_size, 1))\n x[ix] = 1\n ixes.append(ix)\n return ixes", "title": "" }, { "docid": "216417ad03525d93a7122630bad1b278", "score": "0.48958224", "text": "def trandom_sample(pred, seq, random_state=None):\n return tuple(random_sample(pred, seq, random_state=random_state))", "title": "" }, { "docid": "56567b315c79ce7b3c4fa46ffac372e8", "score": "0.48958203", "text": "def opg18():\n sc = {random.randint(1, 6) for i in range(10)}\n return sc", "title": "" }, { "docid": "7efbfa460b5652360cc0bfa27a4f5f9e", "score": "0.48873907", "text": "def guess_ssequence_factors(sequence, spec_seqs = SPECIAL_SEQUENCES, \n quick_eval = True): \n formula_matches = []\n for spseq in spec_seqs: \n search_func = SearchSequenceFactors(seqgen = spseq)\n spfactors = search_func.compute_special_sequence_factors(sequence)\n for (lcidx, remseq) in spfactors: \n fmatch = GuessSpecialSeqResult(remseq, spseq, lcidx)\n if quick_eval and fmatch.remseq_has_match():\n fmatch.finalize_match()\n ##\n formula_matches += [fmatch]\n ##\n ## \n return formula_matches", "title": "" } ]
b447f7bdab8614e6d27385d8724d2074
Returns a slice of the dataframe
[ { "docid": "5f62b1d87254e787540d0f413f5893c5", "score": "0.6506718", "text": "def get_data_chunck(self, df=None, iloc_start=None, iloc_end=None, chunck_size=1000):\n # Set iloc_start and iloc_end if not defined\n if not iloc_start:\n iloc_start = 0\n if not iloc_end:\n iloc_end = iloc_start + chunck_size\n\n # If no df passed as input, return a slice of self.df\n if df is None:\n try:\n return self.df.iloc[iloc_start:iloc_end]\n except:\n return self.df.iloc[iloc_start:]\n \n else:\n # If a df is passed as input, return its slice\n try:\n return df.iloc[iloc_start:iloc_end]\n except:\n return df.iloc[iloc_start:]", "title": "" } ]
[ { "docid": "d72bb54cad3c68f028e087e559ff977f", "score": "0.76382184", "text": "def slice(self, offset: int, length: int) -> \"DataFrame\":\n return wrap_df(self._df.slice(offset, length))", "title": "" }, { "docid": "6b983d995a0ae43a049731a50c24c1cb", "score": "0.703219", "text": "def slice_dataframe(\n df: pd.DataFrame, slicing_function: SlicingFunction\n) -> pd.DataFrame:\n\n S = PandasSFApplier([slicing_function]).apply(df)\n\n # Index into the SF labels by name\n df_idx = np.where(S[slicing_function.name])[0] # type: ignore\n return df.iloc[df_idx]", "title": "" }, { "docid": "fa5c39218c464a437aa0e7fc1cba8cc5", "score": "0.69110113", "text": "def slice_and_select_df(df : DataFrame,\n slices : Tuple[slice],\n columns : Columns = None)->Union[pd.Series, pd.DataFrame]:\n if columns == None:\n return df.loc[slices, :]\n else:\n return df.loc[slices, columns]", "title": "" }, { "docid": "8b7a1a331bb696582bb767f93002ac10", "score": "0.67578375", "text": "def get_df(df, e_start, e_end):\n\n t1_row_number = Testing._get_rownummber_for_event(df, e_start)\n e1_row_number = Testing._get_rownummber_for_event(df, e_end)\n\n return df[t1_row_number - 1: e1_row_number]", "title": "" }, { "docid": "00ec0b4866c5f9ed1a3e763270869e3c", "score": "0.65441215", "text": "def extract_data(self, idx_):\n data = self.df_.loc[self.idx[self.index_first[idx_[0]] , :], \n self.idx[self.columns_first[idx_[1]], \n self.columns_second[idx_[2]]]]\n return data", "title": "" }, { "docid": "e0bf42e676b4644ddc5f5ec540119568", "score": "0.65110403", "text": "def subset_(self, *args):\n df = self._subset(*args)\n if df is None:\n self.err(\"Can not get subset of data\")\n return\n return self._duplicate_(df)", "title": "" }, { "docid": "a9bd7d6b7093f0e923430fc09afa82c4", "score": "0.6397128", "text": "def get_subset_of_df(df, lp_subset, aname):\n if lp_subset is not None:\n curr_df = df[aname].copy()\n curr_df = curr_df.loc[lp_subset][zip([\"SVCCA Score\"] * len(lp_subset),\n lp_subset)]\n else:\n curr_df = df[aname]\n return curr_df", "title": "" }, { "docid": "8b1c3d1cff9a0fd28eaf3aeb8d5e85b0", "score": "0.63895977", "text": "def slice(self, begin, end):", "title": "" }, { "docid": "ed8e1d9b03c556c308e004becece8c4f", "score": "0.6375168", "text": "def subset(self, *args):\n df = self._subset(*args)\n if df is None:\n self.err(\"Can get subset of data\")\n return\n self.df = df", "title": "" }, { "docid": "2734f830233c0743d32fab6701f880d8", "score": "0.6345515", "text": "def get_data_frame(self):", "title": "" }, { "docid": "feb8ee85b79715fef2b95b902287b2a3", "score": "0.6304596", "text": "def get_row(dataframe, position):\n selected_row = dataframe.iloc[position].values.tolist()\n return np.array([selected_row[i:i+2] for i in range(0, 42, 2)])", "title": "" }, { "docid": "1214cf9f32488070742d5660722e2da8", "score": "0.6272061", "text": "def get_dataframe(self):\n return self.df", "title": "" }, { "docid": "859acaa180fbd22094447b556fbc1802", "score": "0.62538373", "text": "def get_subdataframe(self, start, end, condition=None, clause=None):\r\n subset = DataFrame(self.headers)\r\n subdata = dict()\r\n\r\n for category in subset.headers:\r\n if condition is None:\r\n subdata[category] = self.data[category][start:end]\r\n else:\r\n subdata[category] = list()\r\n for number in range(start, end):\r\n if self.data[condition][number] == clause:\r\n subdata[category].append(self.data[category][number])\r\n\r\n subset.data = subdata\r\n\r\n return subset", "title": "" }, { "docid": "d25a7e7c8a1e4bed6315c55045d2df82", "score": "0.62450105", "text": "def filter_by_interval(data):\n intervalStart = int(input('Select the start row: '))\n intervalEnd = int(input('Select the end row: '))\n\n print(data.loc[intervalStart:intervalEnd])", "title": "" }, { "docid": "bfee07db8c0991df68dae257fb83263d", "score": "0.6234639", "text": "def do_slice(self, line):\n start_col, end_col = map(int, line.split())\n self.csv.slice_columns(start_col, end_col)", "title": "" }, { "docid": "c433e80447dc41f5cc024522dc528fd5", "score": "0.6225376", "text": "def getSlice(self, slice: int = 0, view = 0):\n if slice < 0:\n slice = 0\n\n if view == 0 or view == \"axial\":\n if slice >= list(self.data.shape)[2]:\n slice = -1\n\n return self.data[:, :, slice]\n \n elif view == 1 or view == \"sagittal\":\n if slice >= list(self.data.shape)[0]:\n slice = -1\n\n return self.data[slice, :, :]\n\n elif view == 2 or view == \"coronal\":\n if slice >= list(self.data.shape)[1]:\n slice = -1\n\n return self.data[:, slice, :]", "title": "" }, { "docid": "20d32747ad44396210995f6c98c9d187", "score": "0.6212829", "text": "def slice(self, idx):\n return slice(self._starts[idx], self._starts[idx]+self._cardinalities[idx])", "title": "" }, { "docid": "55fbd0f118ce77e0efcbc95062f9cf7c", "score": "0.6199042", "text": "def df(self):\n return self._df", "title": "" }, { "docid": "25b72641bf0e1142d6d5f249686e0439", "score": "0.61747795", "text": "def __getrows_pandas__(self, idx):\r\n if StaticTypes.data_types.return_data_type(idx) == StaticTypes.output_types.iterable:\r\n i = [self.index.index(i) for i in idx]\r\n else:\r\n i = [self.index[idx]]\r\n return self.X.iloc[i]", "title": "" }, { "docid": "252a19ffe228d3e2c3255da863216e12", "score": "0.6162671", "text": "def tail(self, n_rows: Optional[int] = None) -> pd.DataFrame:\n return self.df.tail(n_rows)", "title": "" }, { "docid": "49357362feb4cf9fe751bc61766453a4", "score": "0.61590856", "text": "def get_slice(self, key, start, stop):", "title": "" }, { "docid": "1d2b9dee5c7b28df9280ea24e85e626d", "score": "0.61335504", "text": "def slice (self, start, length = None):\n\t\tif start is None: \n\t\t\tstart = self.width()\n\t\tif length is None: \n\t\t\tlength = self.width()\n\t\tif start < 0: \n\t\t\tstart = start + self.width()\n\t\tif start >= self.width(): \n\t\t\treturn channel.create()\n\t\tret = channel.create()\n\t\tif length == 0: \n\t\t\treturn ret\n\t\tfor ele in self:\n\t\t\trow = tuple (ele[start:start+length])\n\t\t\tret.rbind (row)\n\t\treturn ret", "title": "" }, { "docid": "3a425bd95911ebfab86ba0ff0f4e8063", "score": "0.61296636", "text": "def first(self) -> DataFrame:\n return self.select_all().first()", "title": "" }, { "docid": "6e8c2201eee6cd5c2369c419c59071c6", "score": "0.61163884", "text": "def getslice(self, r_start, r_end, c_start, c_end):\n # Insert your code here\n #Initializing the To be Slicing matrix\n Sl=[]\n for i in range(0,r_end-r_start):\n Sl.append([])\n for j in range(0,c_end-c_start):\n Sl[i].append(0)\n #Collecting the slicing matrix\n for i in range(r_start,r_end):\n # Loop over columns.\n for j in range(c_start,c_end):\n Sl[i-r_start][j-c_start]=self.mat[i][j]\n print(self.mat[i][j], end=\"\")\n print(end=\"\\n\")\n return Sl # Remove this!", "title": "" }, { "docid": "8191a0ef41b144fcc3ceba144edeab2f", "score": "0.61098987", "text": "def head(self, n: int = 10) -> \"pandas.DataFrame\": # pylint: disable=invalid-name\n return self._df.head(n=n)", "title": "" }, { "docid": "603e121d4510d903b931a70b6edbf3fd", "score": "0.6101418", "text": "def list_slice(seq_col, start: int, end: Optional[int] = None):\n df = cudf.DataFrame(seq_col)\n col_selector = ops.ColumnSelector(seq_col.name)\n slicer = ops.ListSlice(start, end)\n transformed = slicer.transform(col_selector, df)\n return transformed[seq_col.name]", "title": "" }, { "docid": "45d06fec0935ecb7f40871aea0b6f906", "score": "0.6094197", "text": "def get_testing_df(df):\n\n t1_row_number = Testing._get_rownummber_for_event(df, 'T1 ')\n e1_row_number = Testing._get_rownummber_for_event(df, 'E1 ')\n\n return df[t1_row_number - 20: e1_row_number]", "title": "" }, { "docid": "7e2c69a0ca15cf1ea15bde5e651da0cc", "score": "0.6087564", "text": "def head(self, n: int=5) -> 'PandasDataFrame':\n\n return self.rdd.first().head(n)", "title": "" }, { "docid": "0d5e16d886387222950c08d1699e64d3", "score": "0.60831946", "text": "def _get_data(self, rows, columns):\n return self.data.loc[rows, columns]", "title": "" }, { "docid": "86aec2edf5097fd9b937182411c2d328", "score": "0.6080092", "text": "def select_at_idx(self, idx: int) -> Series:\n return wrap_s(self._df.select_at_idx(idx))", "title": "" }, { "docid": "1b54b366482a81e82ac7b8908df66a46", "score": "0.60708386", "text": "def return_dataframe(self):\n if self.direction == 'higher':\n return self.df[self.df[self.field] > self.lim_value]\n elif self.direction == 'lower':\n return self.df[self.df[self.field] < self.lim_value]", "title": "" }, { "docid": "52d61def6b667e03a9b4728d355c58fd", "score": "0.60662836", "text": "def head(self, n_rows: Optional[int] = None) -> pd.DataFrame:\n return self.df.head(n_rows)", "title": "" }, { "docid": "39a6c02e9c05cbb0704c8104009c384d", "score": "0.60634667", "text": "def test_slice_retrieval():\n true_slice = subset\n slice_key = \"key \"\n h5df.save_slice(true_slice, slice_key)\n returned_slice = h5df.get_slice(slice_key)\n expected = pd.Series(true_slice)\n actual = pd.Series(returned_slice)\n assert expected.equals(actual)", "title": "" }, { "docid": "e15c200e66a4b838dd07cad2ddba642e", "score": "0.60579765", "text": "def __getslice__(self, i, j):\n return LazyExpr(\"self.eval()[i:j]\")", "title": "" }, { "docid": "2c2696b10b0448ab8a501a105ef929b8", "score": "0.6055532", "text": "def getSlice(self):\n\n return self._slice", "title": "" }, { "docid": "07f76cf5f91068ac4de67e6bc12ecbd5", "score": "0.6030022", "text": "def subselectDataFrame(pandasDF, minx,maxx,miny,maxy):\n data = pandasDF\n ## Create the geometric column\n data['geometry'] = data.apply(lambda z: Point(z.LON, z.LAT), axis=1)\n new_data = gpd.GeoDataFrame(data)\n ## Subselect the data\n section = new_data[lambda x: (x.LON > minx) & (x.LON < maxx) & (x.LAT > miny) & (x.LAT < maxy) ]\n return section", "title": "" }, { "docid": "a365e71b08478a10a24c2ddc6c79e97a", "score": "0.6020068", "text": "def take(self, n):\n alias = _random_id('inline_', 4)\n table_ref = InlineView(self._query_ast.to_sql(), alias)\n # SELECT alias.*\n select_list = [SelectItem(table_name=TableName(table_ref.name))]\n limit_elt = LimitElement(Literal(n), None)\n ast = SelectStmt(select_list, table_ref, limit=limit_elt)\n bdf = BigDataFrame(self._ic, ast)\n return as_pandas(bdf.__iter__())", "title": "" }, { "docid": "c905540793f1c7101b41ebfff2135aa4", "score": "0.6018424", "text": "def cut_long_members(ds):\n if 'time' in ds.dims:\n ds = ds.sel(time=slice(None, '2100'))\n return ds", "title": "" }, { "docid": "732f4cb4bffec4000c570b5966b05ec3", "score": "0.60122854", "text": "def get_slice(self, sweep):\n start, end = self.get_start_end(sweep)\n return slice(start, end + 1)", "title": "" }, { "docid": "25295c492147ae9c338b5480ba3ebc71", "score": "0.6011075", "text": "def _slice(self, _x, n, dim):\n if _x.ndim == 3:\n return _x[:, :, n * dim: (n + 1) * dim]\n return _x[:, n * dim: (n + 1) * dim]", "title": "" }, { "docid": "dc69b68513b925e5a34fbc4dcefb3c48", "score": "0.60045534", "text": "def showData(fr_num):\n single_df=data.iloc[[fr_num-1]]\n return single_df", "title": "" }, { "docid": "5a61cfcf347fd40ba0e55571368bc53d", "score": "0.59999394", "text": "def get_dataframe(self):\n return self._dataframe.copy()", "title": "" }, { "docid": "49ea3857e2424896dddaed086bbb0e47", "score": "0.592821", "text": "def __getslice__(self, i, j):\n return self.__getitem__(slice(i, j))", "title": "" }, { "docid": "48d3d503218d4d99833ea229ca2ee194", "score": "0.59271806", "text": "def take(self, count: int) -> 'Dataset':\n return SlicedDataset(self, 0, count)", "title": "" }, { "docid": "ad3003dcbe2cdd6dde5d758908fd5545", "score": "0.5924263", "text": "def get_visible_slice(self):\n\t\treturn slice(self.current_pos, self.current_pos + self.number_of_visible_items())", "title": "" }, { "docid": "b5fa6e33bf45047bf7504ea684a131c6", "score": "0.59234923", "text": "def get_subset(self, date, size, mode):\n ref_date = date + dt.timedelta(0.5)\n date_tuple = (ref_date - dt.timedelta(size / 2.0 -\n self.interval / 1440.0),\n ref_date + dt.timedelta(size / 2.0))\n if mode == \"day\":\n sub_labels = [\"NEE\", \"PPFD\", \"VPD\", \"TC\"]\n elif mode == \"night\":\n sub_labels = [\"ER\", \"TC\"]\n else:\n msg = \" These are not the droids you're looking for ...\"\n logger.error(msg)\n raise RuntimeError(msg)\n sub_df = self.df.loc[date_tuple[0]: date_tuple[1], sub_labels]\n self.subset_start = sub_df.index.values[0]\n self.subset_end = sub_df.index.values[-1]\n sub_df = sub_df.dropna()\n return sub_df", "title": "" }, { "docid": "c7f7b1dc73dc3f423fb5f166948989dd", "score": "0.5917651", "text": "def __getslice__(self, i=0, j=-1):\n return type(self)(*self.__getitem__(slice(i,j)))", "title": "" }, { "docid": "dc367cbc9646d5fce620b5bbc37e4472", "score": "0.5913325", "text": "def data_filter(df: DataFrame) -> DataFrame:\n if Enquiry(session_state.column): # checks if no column is added to column list\n return df # returns entire provided dataframe if no column is selected\n else:\n column_list = [\"Timestamp\"] + session_state.column # adds the \"Timestamp\" column to local column list\n return df[column_list] # returns the filtered data according to session state", "title": "" }, { "docid": "1f7ae5f43392b0a10b70b2fb02cea875", "score": "0.5909588", "text": "def get_dataset_slice(self, start, slice_size, dataset_type=DatasetType.TRAIN, delimeter=';',path=None):\n if path is None:\n file_stream = self.get_file_stream_from_dataset_type(dataset_type)\n else:\n file_stream = self.open_file_stream(path)\n slice = []\n for row in islice(csv.reader(file_stream, delimiter=delimeter), start, None):\n slice.append(self.convert_raw_line_to_item(row))\n if len(slice) >= slice_size:\n break\n return slice", "title": "" }, { "docid": "163f40d92ef6cabb05c22f1cdbd87360", "score": "0.59053457", "text": "def get_slice(self, var, slice_plane, time=None):\n varname = 'slice_{}_{}'.format(var, slice_plane)\n da = self.ds[varname]\n if time is not None:\n da = da.sel({'t': time}, method='nearest')\n\n return da", "title": "" }, { "docid": "27a2cdd0703fd77ca4f4dfec65df91ea", "score": "0.590518", "text": "def row( self, index, start=1 ):\n index = self.rowdex( index )\n return self.data[index][start:]", "title": "" }, { "docid": "43d0d7ea9c6b157defaafb8e78ee9b88", "score": "0.5896655", "text": "def get_subset(self, col_title, group):\n return self._mousedf.loc[self._mousedf[col_title] == group]", "title": "" }, { "docid": "1138c725da7b3e4aa5f7a4c05002e4e9", "score": "0.5884673", "text": "def get_df(self, index=0, header=0):\n pass", "title": "" }, { "docid": "eefd99c28e0195282b30f13c67199874", "score": "0.5876941", "text": "def _Slice(self, pdi, pdo, plane):\n # create slice\n cutter = vtk.vtkCutter() # Construct the cutter object\n cutter.SetInputData(pdi) # Use the grid as the data we desire to cut\n cutter.SetCutFunction(plane) # the the cutter to use the plane we made\n cutter.Update() # Perfrom the Cut\n slc = cutter.GetOutput() # grab the output\n pdo.ShallowCopy(slc)\n\n return pdo", "title": "" }, { "docid": "1a7d29f8de6818202d4661c72cf227bb", "score": "0.58323455", "text": "def __getitem_pandas__(self, i):\r\n return self.X[i]", "title": "" }, { "docid": "600d6824323719dbd5ff91db28fa4e86", "score": "0.58131915", "text": "def __getitem__(self,idx):\n if isinstance(idx,slice):\n idx = Range(idx.start,idx.stop,idx.step)\n return _data.Data.execute('$[$]',self,idx)", "title": "" }, { "docid": "b60861d908594b1419884c1c9823899a", "score": "0.5805217", "text": "def slice(x, h1, h2):\n return x[:, :, :, h1:h2, :]", "title": "" }, { "docid": "4cd746ca8a612cdbc980132c18ddc51e", "score": "0.5778364", "text": "def get_df(self):\n return self.df", "title": "" }, { "docid": "1f458b00b7e73136c8cfcb5c9d50ee0d", "score": "0.57776606", "text": "def set_dataframe(self) -> DataFrame:\n with Session() as session:\n query = session.query(STKDB)\n query = query.filter((self.start <= STKDB.open_date) &\n (STKDB.open_date < self.end))\n if self.market != 'ALL':\n query = query.filter(STKDB.mkt_nm.like(f'%{self.market}%'))\n if self.sector != 'ALL' and isinstance(self.sector, Iterable):\n query = query.filter(\n sqlalchemy.func.REGEXP_LIKE(STKDB.std_ind_cd, '|'.join(map(lambda x: f'..{x}..', self.sector)))\n )\n df = pd.read_sql(query.statement, query.session.bind)\n df.columns = [column.upper() for column in df.columns]\n df = df.set_index('OPEN_DATE')\n return df", "title": "" }, { "docid": "977702cc19f9762e8fce77e67bbe8cda", "score": "0.57764506", "text": "def select_region(df: pd.DataFrame) -> pd.DataFrame:\n df = df[df['region'] == REGION_ID]\n df.drop('region', axis=1, inplace=True)\n print(f'Selected {len(df)} samples in region {REGION_ID}.')\n return df", "title": "" }, { "docid": "e0dc43be1b38e00b6a9398212baf220e", "score": "0.5769344", "text": "def limit_(self, r: int = 5) -> \"Ds\":\n try:\n return self._duplicate_(self.df[:r])\n except Exception as e:\n self.err(e, \"Can not limit data\")", "title": "" }, { "docid": "6b84815601a2085f2417b5a328e8a7a8", "score": "0.5754018", "text": "def __getitem__(self, idx):\n return self.loc[idx]", "title": "" }, { "docid": "54ba918b90aefc413b226c34674a70f1", "score": "0.57433957", "text": "def select(df, region, cols=None):\n return df.loc[select_mask(df, region, cols)]", "title": "" }, { "docid": "b5666fdbca174330ce932e183ea614cf", "score": "0.57406104", "text": "def __getitem__(self, obj):\n # other select/filter fns should be implemented with this one\n if isinstance(obj, tuple) and len(obj) == 2:\n alias = _random_id('inline_', 4)\n table_ref = InlineView(self._query_ast.to_sql(), alias)\n (limit_elt, where) = self._query_ast._filter(obj[0])\n select_list = self._query_ast._projection(obj[1])\n return BigDataFrame(\n self._ic, SelectStmt(\n select_list, table_ref, where=where, limit=limit_elt))\n elif isinstance(obj, list):\n alias = _random_id('inline_', 4)\n table_ref = InlineView(self._query_ast.to_sql(), alias)\n select_list = self._query_ast._projection(obj)\n return BigDataFrame(self._ic, SelectStmt(select_list, table_ref))\n else:\n # single object, possibly a slice; wrap in list and get projection\n return self[[obj]]", "title": "" }, { "docid": "d5fd5d64ba10c93ca3794883ac9a4a7f", "score": "0.57076615", "text": "def nowrange_(self, col: str, timeframe: str) -> \"Ds\":\n df = self._nowrange(col, timeframe)\n if df is None:\n self.err(\"Can not select range data from now\")\n return\n return self._duplicate_(df)", "title": "" }, { "docid": "d063ec713fc0fec139b892c5135a1eb0", "score": "0.5703852", "text": "def getcolslice(self, columnname, blc, trc, incr, startrow=int(0), nrow=int(-1), rowincr=int(1)):\n schema = {'columnname': {'type': 'cStr'}, 'blc': {'type': 'cIntVec', 'coerce': [_coerce.to_list,_coerce.to_intvec]}, 'trc': {'type': 'cIntVec', 'coerce': [_coerce.to_list,_coerce.to_intvec]}, 'incr': {'type': 'cIntVec', 'coerce': [_coerce.to_list,_coerce.to_intvec]}, 'startrow': {'type': 'cInt'}, 'nrow': {'type': 'cInt'}, 'rowincr': {'type': 'cInt'}}\n doc = {'columnname': columnname, 'blc': blc, 'trc': trc, 'incr': incr, 'startrow': startrow, 'nrow': nrow, 'rowincr': rowincr}\n assert _pc.validate(doc,schema), str(_pc.errors)\n _getcolslice_result = _any_dc(self._swigobj.getcolslice(_str_ec(_pc.document['columnname']), _pc.document['blc'], _pc.document['trc'], _pc.document['incr'], _pc.document['startrow'], _pc.document['nrow'], _pc.document['rowincr']))\n return _getcolslice_result", "title": "" }, { "docid": "abdba4d69ce37e73dd944d658e3e9b6a", "score": "0.5694756", "text": "def slice(data=None, begin=_Null, end=_Null, step=_Null, out=None, name=None, **kwargs):\n return (0,)", "title": "" }, { "docid": "c6829d1ff27ae350d151139d8ad2066d", "score": "0.5694684", "text": "def get(self, **kwargs) -> pd.core.frame.DataFrame:\n\n raise NotImplementedError", "title": "" }, { "docid": "fd3cf27e25f21f7abb748e228573cb5a", "score": "0.56594986", "text": "def __getslice__(self, i, j):\n return self.signal[i:j]", "title": "" }, { "docid": "47012a506074aaee6ca0fcbfa9831d3b", "score": "0.5658356", "text": "def __getslice__( self, start=0, stop=sys.maxint, step=1 ):\n\t\tif start < 0:\n\t\t\tstart = len(self) + start\n\t\tif stop < 0:\n\t\t\tstop = len(self) + stop\n\t\tresult = []\n\t\tfor i in xrange( start, stop, step ):\n\t\t\ttry:\n\t\t\t\tresult.append( self[i] )\n\t\t\texcept (IndexError,KeyError):\n\t\t\t\tbreak\n\t\treturn result", "title": "" }, { "docid": "cdbb1f1761bf0d749226bdb76fd835ea", "score": "0.5658146", "text": "def __getitem__(self, key):\n return self.df[key]", "title": "" }, { "docid": "d8a0526dd1cccf56a2787c40475f19bb", "score": "0.56525016", "text": "def cut_rows(dataset, top=0, buttom=0):\n return dataset[:, 0 + top:160 - buttom, :, :]", "title": "" }, { "docid": "3e5aba681a8b8bcb620f7bac6834ea0b", "score": "0.5639328", "text": "def strslice(self, start, end, useCache=True):\r\n if useCache: # If it's in the cache, use that!\r\n try:\r\n return self.db.strsliceCache(self, start, end)\r\n except IndexError:\r\n pass\r\n\r\n return self.db.strslice(self.id, start, end)", "title": "" }, { "docid": "73a980222533519b396658aafa974c9e", "score": "0.56344557", "text": "def df(self):\n return self._df", "title": "" }, { "docid": "73a980222533519b396658aafa974c9e", "score": "0.56344557", "text": "def df(self):\n return self._df", "title": "" }, { "docid": "c78bfb3fc987e53d3bb2b6f375fdff1f", "score": "0.56317264", "text": "def first(self) -> DataFrame:\n if self.downsample:\n return wrap_df(self._df.downsample(self.by, self.rule, self.n, \"first\"))\n\n return wrap_df(self._df.groupby(self.by, self.selection, \"first\"))", "title": "" }, { "docid": "f9ca62c1f6a86e2f452d3cdd996741f1", "score": "0.563052", "text": "def __getitem__(self, item):\n if isinstance(item, slice):\n if item.start is None and item.stop is None and item.step is None:\n return self\n return SlicedDataset(self, item)\n elif item is ...:\n return self\n elif isinstance(item, int):\n return self.take(item).list()[-1]\n else:\n raise ValueError(\"Unsupported index:\", item)", "title": "" }, { "docid": "c5092239fb09dba49b8578a8444db1e5", "score": "0.56291056", "text": "def Sel(Regione,Dati):\r\n return numpy.asarray(df.sel(Dati = Dati, Regione = Regione))[1:]", "title": "" }, { "docid": "c1e661c11ffc1a95f74beaef29863c07", "score": "0.56221294", "text": "def select_subset():\n\n selected = df[df.year <= year.value]\n return selected", "title": "" }, { "docid": "f52b3e47d1f87877f628856ec20ed5be", "score": "0.56089836", "text": "def get_df(self) -> pd.DataFrame:\n raise NotImplementedError", "title": "" }, { "docid": "816479ade9bdf1faf79b7f6095192d46", "score": "0.56073123", "text": "def point4(self):\n dataframe = self.df.loc[self.df['Team'] == \"Boston Celtics\"]\n return dataframe", "title": "" }, { "docid": "a77599e196144d2fe64021e14defb13b", "score": "0.5607129", "text": "def show(self, limit=5, truncate=True) -> NoReturn:\n DataFrameShower(self.frame).show(limit, truncate)", "title": "" }, { "docid": "bc14b870c32d498f71aaa18ea9b6d2aa", "score": "0.55874246", "text": "def limit(self, r: int = 5):\n try:\n self.df = self.df[:r]\n except Exception as e:\n self.err(e, \"Can not limit data\")", "title": "" }, { "docid": "52f6cd218f6f800c0bb806000ff65c4b", "score": "0.5583436", "text": "def slice(self, start, end):\n start, end, mask = \\\n self._check_boundaries(start, end, allow_infinite=True)\n\n result = TimeSeries(default=self.default)\n for t0, t1, value in self.iterperiods(start, end):\n result[t0] = value\n\n result[t1] = self[t1]\n\n return result", "title": "" }, { "docid": "79692523165738dded669edba7e83fff", "score": "0.55789435", "text": "def position_slice(self, time):\n return self.p[:, :, time]", "title": "" }, { "docid": "4f7d206ce0c1af5333ef4c726db28cc6", "score": "0.5568763", "text": "def get_pandasframe(self):\r\n if self.dataset:\r\n self._load_dimensions()\r\n return self._get_pandasframe_one_dataset()\r\n return self._get_pandasframe_across_datasets()", "title": "" }, { "docid": "f54c6f0b0a08b7a382395fb876f4a32e", "score": "0.556874", "text": "def last(self) -> DataFrame:\n return self.select_all().last()", "title": "" }, { "docid": "025a06848b5733d76698b5a93dfa62d8", "score": "0.5560935", "text": "def getDataFrame(self) -> DataFrame:\n\n return self.df", "title": "" }, { "docid": "a9c8402b9ccf7eecf348c43ca02d9a5d", "score": "0.55566573", "text": "def snapshot_of_data(df):\n\n print('\\nShowing the 5 rows...\\n')\n print(df.head(5))", "title": "" }, { "docid": "ae9f9933aa7be4af6e7c3458cb8732c8", "score": "0.55490845", "text": "def dataframe(self):\n return self.generator.dataframe()", "title": "" }, { "docid": "a25de2a9cff29688948203f9b5394770", "score": "0.5542222", "text": "def slice_(input_layer, begin, size):\n return tf.slice(input_layer, begin, size)", "title": "" }, { "docid": "8698040e4fe21ecb974b7c908fe27bc9", "score": "0.55386776", "text": "def by_datetime_span(data_df, dt_start=None, dt_end=None):\n df = data_df.copy()\n \n if isinstance(dt_start, dt.datetime):\n df = df[df.index >= dt_start]\n elif dt_start is None:\n pass\n else:\n raise TypeError(\"'start' must be a datetime.datetime object \")\n \n if isinstance(dt_end, dt.datetime):\n df = df[df.index <= dt_end]\n elif dt_start is None:\n pass\n else:\n raise TypeError(\"'start' must be a datetime.datetime object \")\n \n return df", "title": "" }, { "docid": "ee6d5a3f3f0f62282469ffba63d615db", "score": "0.5537639", "text": "def custom_slice(self, dispaxis, index):\n if dispaxis == self.HORIZONTAL:\n return np.s_[:, index]\n elif dispaxis == self.VERTICAL:\n return np.s_[index, :]\n else:\n raise Exception", "title": "" }, { "docid": "98640bcbda449380c529db91497aae6b", "score": "0.5533943", "text": "def slice1(self, x, slide):\n x1 = x[:, :slide, :, :]\n return x1", "title": "" }, { "docid": "78a2ac15123251451b3b969284d4b5b8", "score": "0.55273104", "text": "def split_df(df): \n df = df.dropna()\n df['Date'] = pd.to_datetime(df.index)\n \n df_train = df[(df.Date>=_start)&(df.Date<=_mid)]\n df_test = df[(df.Date>_mid)&(df.Date<=_stop)]\n df_oos = df[(df.Date>_stop)&(df.Date <= _last)]\n df_train = df_train.drop(['Date'],axis=1)\n df_test = df_test.drop(['Date'],axis=1)\n df_oos = df_oos.drop(['Date'],axis=1)\n return(df_train, df_test,df_oos)", "title": "" }, { "docid": "3cb124cbe4fdbe9ffb7d8eab934b413c", "score": "0.55197453", "text": "def point2(self, columna):\n dataframe = self.df[columna].to_frame() \n return dataframe", "title": "" }, { "docid": "b4d2667d7dff7aba70bcbfb4fe000143", "score": "0.5519356", "text": "def current_data(self, date):\n return self.df.loc[date]", "title": "" }, { "docid": "65107353d8c6c6b97190ed703aecba97", "score": "0.5519197", "text": "def _get_dataset_slices(start, end):\n slices = []\n for shard_idx in range(int((end - 1) / _SHARD_SIZE) + 1):\n start_offset = shard_idx * _SHARD_SIZE\n end_offset = (shard_idx + 1) * _SHARD_SIZE\n if end_offset <= start:\n continue\n s = Slice(\n shard_idx,\n max(start, start_offset) % _SHARD_SIZE,\n (min(end, end_offset) - 1) % _SHARD_SIZE + 1,\n )\n slices.append(s)\n return slices", "title": "" }, { "docid": "46115c0041060047084786fee2deae5d", "score": "0.55135214", "text": "def copy(self):\n return DataframeView(**self.getParams())", "title": "" }, { "docid": "cff42c99c9891c6ed62cc5deade798aa", "score": "0.5511183", "text": "def fetch1_dataframe(self):\n return self.fetch_dataframe()[0]", "title": "" } ]
ce47eb18c87a2c70cffbf3ac3f075263
Get user list and their time left
[ { "docid": "27e2da0aa664e50e2ed56a8c1dfd9d42", "score": "0.6999063", "text": "def getUserList(self):\n \"\"\"Sets allowed days for the user\n server expects only the days that are allowed, sorted in ascending order\"\"\"\n # result\n result = 0\n message = \"\"\n userList = []\n\n try:\n # init store\n timekprUStore = timekprUserStore()\n # check if we have this user\n userList = timekprUStore.getSavedUserList(self._timekprConfig.getTimekprConfigDir())\n except Exception as unexpectedException:\n # logging\n log.log(cons.TK_LOG_LEVEL_INFO, \"Unexpected ERROR (%s): %s\" % (misc.whoami(), str(unexpectedException)))\n\n # result\n result = -1\n message = msg.getTranslation(\"TK_MSG_CONFIG_LOADER_USERLIST_UNEXPECTED_ERROR\")\n\n # result\n return result, message, userList", "title": "" } ]
[ { "docid": "1d6786e0b68ad28cca0eaedcc1105e35", "score": "0.6761791", "text": "def user_list(self) -> None:\n users_list = []\n\n for user in state.get_users()[\"records\"]:\n users_list.append(\n [\n str(user[\"id\"]),\n user[\"username\"],\n str(user[\"is_admin\"]),\n str(user[\"enabled\"]),\n date_util.humanize_datetime(user[\"updated_at\"]),\n ]\n )\n\n users_list.insert(0, [\"ID\", \"Username\", \"Admin\", \"Enabled\", \"Last Logon Time\"])\n\n table_util.print_table(users_list, \"Users\")", "title": "" }, { "docid": "c399c2b7ff790e2e3a93aa0adb3a17f8", "score": "0.6649149", "text": "def get_userlist(self, room):\n users = \"\"\n with Chat.lock:\n for user in room.users:\n users += \" * {}\".format(user)\n if user == self.name:\n users += \" (** this is you)\\n\"\n else:\n users += \"\\n\"\n users += \"end of list.\"\n return users", "title": "" }, { "docid": "b5834aa2f3853496571f895276ebd9b1", "score": "0.6622061", "text": "async def user_list():\n print(\"!!!!usrlist!!!! Scanning Servers and nicknames as requested\")\n await bot.say(\"A list of the users in the servers has been logged into the bot console.\")\n print(\"\\nLog datetime: \" + current_datetime)\n print(\"----------------------\")\n for server in bot.servers:\n for member in server.members:\n print(\n \"server: {0} | user: {1.name} | user_id: {1.id} | role: {1.top_role} | role_id: {1.top_role.id}\".format(\n server, member,\n member))", "title": "" }, { "docid": "b7ed5082d5aa096b42b3780523248784", "score": "0.6599963", "text": "def get_notable_users(client: Client, args: Dict):\n limit: int = args.get('limit', 10)\n time_period: str = args.get('time_period', '')\n time_ = time_period.split(' ')\n if not len(time_) == 2:\n return_error('Got invalid time period. Enter the time period number and unit.')\n num: str = time_[0]\n unit: str = time_[1]\n api_unit = unit[0]\n if api_unit == 'm':\n api_unit = api_unit.upper()\n\n if api_unit not in {'d', 'y', 'M', 'h'}:\n return_error('The time unit is incorrect - can be hours, days, months, years')\n\n contents = []\n headers = ['UserFullName', 'UserName', 'Title', 'Department', 'RiskScore', 'Labels', 'NotableSessionIds',\n 'EmployeeType', 'FirstSeen', 'LastSeen', 'LastActivity', 'Location']\n users = client.get_notable_users_request(api_unit, num, limit).get('users', [])\n if not users:\n return 'No users were found in this period of time.', {}, {}\n else:\n for user in users:\n user_ = user.get('user', {})\n user_info = user_.get('info', {})\n contents.append({\n 'UserName': user_.get('username'),\n 'RiskScore': round(user_.get('riskScore')),\n 'FirstSeen': convert_unix_to_date(user_.get('firstSeen')),\n 'LastSeen': convert_unix_to_date(user_.get('lastSeen')),\n 'LastActivity': user_.get('lastActivityType'),\n 'Labels': user_.get('labels'),\n 'UserFullName': user.get('userFullName'),\n 'Location': user_.get('info')['location'],\n 'NotableSessionIds': user.get('notableSessionIds'),\n 'NotableUser': True,\n 'HighestRiskSession': user.get('highestRiskSession'),\n 'EmployeeType': user_info.get('employeeType'),\n 'Department': user_info.get('department'),\n 'Title': user_info.get('title')\n })\n\n context = {\n 'Exabeam.User(val.UserName && val.UserName === obj.UserName)': contents\n }\n\n human_readable = tableToMarkdown('Exabeam Notable Users', contents, headers, removeNull=True)\n return human_readable, context, users", "title": "" }, { "docid": "5d3247cc81cf92d68256076923cdf19e", "score": "0.6577269", "text": "def get(self):\n userList = User.all().filter('isLoggedIn =', True)\n now = datetime.datetime.now()\n \n # If it is within 5 minutes\n for u in userList:\n # Get the delta\n lastActive = u.lastActive\n delta = now - lastActive\n \n # More than 5 minutes. You could be Anatartica~!!!\n if delta.seconds > 300:\n u.isLoggedIn = False\n u.put()", "title": "" }, { "docid": "438177acdc23e078511012c050dd57f6", "score": "0.6489011", "text": "def get_user_names():\n ClientHandlingThread.user_list_lock.acquire_read_lock()\n return_message = [i[0] for i in ClientHandlingThread.user_list]\n ClientHandlingThread.user_list_lock.release_read_lock()\n return return_message", "title": "" }, { "docid": "2a364acfc794b27b8b7ddff6b764ed1c", "score": "0.6459676", "text": "def get_users_on_waitlist(self):\n waitlist = self.waitlistslot_set.all()\n return UserProfile.objects.filter(waitlistslot__in=waitlist)", "title": "" }, { "docid": "464814c1223defdd1f13e10870ce27cf", "score": "0.6455602", "text": "def user_list():\n\n\tusers = User.query.all()\n\treturn render_template(\"user_list.html\", users=users)", "title": "" }, { "docid": "464814c1223defdd1f13e10870ce27cf", "score": "0.6455602", "text": "def user_list():\n\n\tusers = User.query.all()\n\treturn render_template(\"user_list.html\", users=users)", "title": "" }, { "docid": "42c7bb6ef5148c7c90493ec97c994c39", "score": "0.6432515", "text": "def user_list():\n if session['group_id'] != 'admin':\n return _error_permission_denied('Unable to show userlist for non-admin user')\n try:\n items = db.users.get_all()\n except CursorError as e:\n return _error_internal(str(e))\n return render_template('userlist.html', users=items)", "title": "" }, { "docid": "d4fe16988a5f9dcbf8556675d46fa035", "score": "0.6398875", "text": "def list(self, request):\n\n # Get header data and verify\n\n data = get_data(request)\n user = None\n\n if data[\"username\"]:\n try:\n user = User.objects.filter(username=data[\"username\"])[0]\n\n except:\n # No user with \"username\"\n return generate_error(ERROR_MESSAGE_NO_SUCH_USER)\n\n if not user:\n # If no \"username\" is present, then list all usernames\n users = User.objects.all()\n\n user_data = []\n for user in users:\n user_data += [user.username]\n\n return generate_success({\"users\": user_data})\n\n # Return \"username\" data\n\n return generate_success(\n {\"date_joined\": user.date_joined, \"is_staff\": user.is_staff}\n )", "title": "" }, { "docid": "6803c4673466adef5d946e3f0a0a47c6", "score": "0.6396353", "text": "def list_users(environ, start_response):\n store = environ['tiddlyweb.store']\n users = store.list_users()\n start_response('200 OK', [('Content-Type', 'text/plain')])\n return ('%s\\n' % user.usersign for user in users)", "title": "" }, { "docid": "600241ae7ed6d40aa735172f358f1424", "score": "0.63621044", "text": "def user_list():\n\n users = User.query.all()\n return render_template(\"user_list.html\", users=users,\n login=session.get('user'))", "title": "" }, { "docid": "bfc120a3a2d020c67d6ff6762c10500a", "score": "0.6359668", "text": "def user_list():\n\n users = crud.get_users()\n\n return render_template('users.html', users=users)", "title": "" }, { "docid": "13ef2d538b58ec2e44d6b465918072e6", "score": "0.6347124", "text": "def get_all_users(self):", "title": "" }, { "docid": "b9bc098d55e596b78957bbcccf388637", "score": "0.6345298", "text": "def user_list():\n\n users = User.query.all()\n \n return render_template(\"user_list.html\", users=users)", "title": "" }, { "docid": "1ea853f33be9d41506d4e3259491c1cb", "score": "0.63384837", "text": "def user_list():\n\n users = User.query.all()\n\n return render_template(\"user_list.html\", users=users)", "title": "" }, { "docid": "1ea853f33be9d41506d4e3259491c1cb", "score": "0.63384837", "text": "def user_list():\n\n users = User.query.all()\n\n return render_template(\"user_list.html\", users=users)", "title": "" }, { "docid": "b0ac3a5af99063a3aa69710eadec2e0c", "score": "0.633734", "text": "def listUsers(self):\n query = \"Select name, ID From Users \"\n query+= \"Where worksHere = 1 \"\n query+= \"Order by name\"\n users = self.queryDB(query, [])\n return users", "title": "" }, { "docid": "0f960ebfa2d56fe1e2da6558b9f42d23", "score": "0.6336355", "text": "def user_list():\n\n users = User.query.all()\n return render_template(\"user_list.html\", users=users)", "title": "" }, { "docid": "0f960ebfa2d56fe1e2da6558b9f42d23", "score": "0.6336355", "text": "def user_list():\n\n users = User.query.all()\n return render_template(\"user_list.html\", users=users)", "title": "" }, { "docid": "0f960ebfa2d56fe1e2da6558b9f42d23", "score": "0.6336355", "text": "def user_list():\n\n users = User.query.all()\n return render_template(\"user_list.html\", users=users)", "title": "" }, { "docid": "0f960ebfa2d56fe1e2da6558b9f42d23", "score": "0.6336355", "text": "def user_list():\n\n users = User.query.all()\n return render_template(\"user_list.html\", users=users)", "title": "" }, { "docid": "0f960ebfa2d56fe1e2da6558b9f42d23", "score": "0.6336355", "text": "def user_list():\n\n users = User.query.all()\n return render_template(\"user_list.html\", users=users)", "title": "" }, { "docid": "0f960ebfa2d56fe1e2da6558b9f42d23", "score": "0.6336355", "text": "def user_list():\n\n users = User.query.all()\n return render_template(\"user_list.html\", users=users)", "title": "" }, { "docid": "0f960ebfa2d56fe1e2da6558b9f42d23", "score": "0.6336355", "text": "def user_list():\n\n users = User.query.all()\n return render_template(\"user_list.html\", users=users)", "title": "" }, { "docid": "0f960ebfa2d56fe1e2da6558b9f42d23", "score": "0.6336355", "text": "def user_list():\n\n users = User.query.all()\n return render_template(\"user_list.html\", users=users)", "title": "" }, { "docid": "0f960ebfa2d56fe1e2da6558b9f42d23", "score": "0.6336355", "text": "def user_list():\n\n users = User.query.all()\n return render_template(\"user_list.html\", users=users)", "title": "" }, { "docid": "0f960ebfa2d56fe1e2da6558b9f42d23", "score": "0.6336355", "text": "def user_list():\n\n users = User.query.all()\n return render_template(\"user_list.html\", users=users)", "title": "" }, { "docid": "569dd2aa8242ebb731b9fd9b399786ff", "score": "0.6286491", "text": "def get_user_list():\n dbs = db.load_json()\n usr_lst = []\n for key in dbs[\"USER_DB\"].keys():\n usr_lst.append({\n \"user_id\": dbs[\"USER_DB\"][key][\"id\"],\n \"account_name\": dbs[\"USER_DB\"][key][\"name\"],\n \"first_name\": dbs[\"USER_DB\"][key][\"fname\"],\n \"last_name\": dbs[\"USER_DB\"][key][\"lname\"],\n \"email\": dbs[\"USER_DB\"][key][\"email\"],\n \"address\": dbs[\"USER_DB\"][key][\"address\"],\n \"city\": dbs[\"USER_DB\"][key][\"city\"],\n \"country\": dbs[\"USER_DB\"][key][\"country\"]\n })\n return usr_lst", "title": "" }, { "docid": "0bc58aba9a202f2ee3d03d37bb5e199b", "score": "0.6285738", "text": "def get_user_list(self):\n return {'users':USERS['users'].values()}", "title": "" }, { "docid": "4b70233af76c4577f4db0c2acfbcced1", "score": "0.628472", "text": "def get_users():\n print_log(f\"Getting last login sessions for users...\")\n endpoint = f\"{API_ENDPOINT}/getLogins\"\n response = requests.post(endpoint, headers=HEADERS)\n if str(response.content.decode(\"UTF-8\")) == 'null' or str(response.status_code)[0] != '2':\n return [] # if this is a first time login, return empty list\n return json.loads(response.content.decode(\"UTF-8\"))", "title": "" }, { "docid": "74ca2e723415b28513adafbd9762e76a", "score": "0.6255487", "text": "def _get_listed_users(self, client):\n return self._get_users(\n client, lambda user: user.name not in self.ignore_users)", "title": "" }, { "docid": "1e3a2ce3cd445e44c8bdec542130e083", "score": "0.62376547", "text": "def list_users():\n users = User.query.all()\n return render_template('list.html', users=users)", "title": "" }, { "docid": "aa2bfb8949995339f662c0e41d5785c3", "score": "0.6230124", "text": "def user_list():\n\n #Return list of User objects\n users = User.query.all()\n return render_template('user_list.html', users=users)", "title": "" }, { "docid": "4dc8768bb3585504a06df7acfd25bbf6", "score": "0.6226118", "text": "def get_users_waitlist_spot(self, profile):\n user_waitlist = WaitlistSlot.objects.filter(shift=self, user=profile)\n if not user_waitlist:\n return self.get_waitlist_length()\n w = WaitlistSlot.objects.filter(\n shift=self,\n time_added__lt=user_waitlist[0].time_added\n )\n return w.count()", "title": "" }, { "docid": "37bd7f5c264adc5e142a00f3c902238f", "score": "0.6221998", "text": "def get_user_list(self, request):\n ##['pic_url', 'employeeno', 'name', 'status', 'zpq']\n\n IP = request.args.get(\"IP\", request.remote_addr)\n callback = request.args.get(\"callback\", None)\n\n type_, wskey, addr, ip_, group, timestamp = parse_ip(IP)\n\n result = self.db.call_proc(\n \"sp_ws_emp\",\n [\"wip_order\", \"order_type\", \"operation\", \"step\", \"<CURSOR>\"],\n \"array\",\n )\n\n empinfo = []\n\n tmp = result[4]\n\n for item in tmp:\n employeeno = item[\"employeeno\"]\n login_ts = self.redis.hget(\"login:\" + employeeno, \"login_ts\")\n if login_ts == None:\n item[\"login\"] = False\n elif timestamp - float(login_ts) < 60 * 60 * 8 * 1000:\n item[\"login\"] = True\n else:\n item[\"login\"] = True\n empinfo.append(item)\n\n data = json.dumps({\"jsonrpc\": \"2.0\", \"result\": empinfo, \"id\": 1})\n\n if callback == None:\n return data\n else:\n return \"{}({});\".format(callback, data)", "title": "" }, { "docid": "bef17404a3aa5b3e976fb606bda4ab9c", "score": "0.62197125", "text": "def get_users(self):", "title": "" }, { "docid": "c425a19ed4d80ea2fdd1f3a4b0fd1079", "score": "0.62138414", "text": "def get_users():\n return USERS", "title": "" }, { "docid": "2c50d0cd5ae041fcbe8378de496d3d3f", "score": "0.6200315", "text": "def getUsers():\n\n from django.contrib.auth.models import User\n\n return User.objects.filter(date_joined__year = now.year,\n date_joined__month = now.month,\n date_joined__day = now.day)", "title": "" }, { "docid": "580da7bfe2bb32de710b208c759d63af", "score": "0.6194208", "text": "async def _list(self, ctx: commands.Context, user: discord.Member = None):\n desc = \"\"\n if not user:\n title = f\"{ctx.guild.name} TempRoles\"\n for member_id, temp_roles in (await self.config.all_members(ctx.guild)).items():\n member: discord.Member = ctx.guild.get_member(int(member_id))\n if member:\n if roles := [ctx.guild.get_role(int(r)) for r in temp_roles[\"temp_roles\"].keys()]:\n desc += f\"{member.mention}: {humanize_list([r.mention for r in roles])}\\n\"\n else:\n await self.config.member(member).clear()\n else:\n title = f\"{user.display_name} TempRoles\"\n async with self.config.member(user).temp_roles() as member_temp_roles:\n for temp_role, end_ts in member_temp_roles.items():\n role: discord.Role = ctx.guild.get_role(int(temp_role))\n if role:\n r_time = datetime.fromtimestamp(end_ts) - datetime.now()\n desc += f\"{role.mention}: ends in {r_time.days}d {round(r_time.seconds/3600, 1)}h\\n\"\n else:\n del member_temp_roles[temp_role]\n return await ctx.send(embed=discord.Embed(\n title=title,\n description=desc,\n color=await ctx.embed_color()\n ))", "title": "" }, { "docid": "b4fc05d004f7ab3e138e9bb4a336d39b", "score": "0.61901605", "text": "def list(self, subcmd):\n\n for user in self.db.get_users():\n print(user.name)", "title": "" }, { "docid": "7d3b28f0dfee84e9e52a4d7a7a999ecb", "score": "0.61721486", "text": "def list(client):\n user = User.singleton(client)\n User.display(client, [user])", "title": "" }, { "docid": "2ca868d1c0611f5fb7cff3b73341ccee", "score": "0.61390114", "text": "def get_stale_users(self):\n stale_users = {f\"{self.days_since_pwdlastset}\": []}\n users = self.get_users()\n for user_obj in users:\n log.debug(\"processing user: %s\", user_obj)\n ft = user_obj.get(\"pwdLastSet\", [False]).pop()\n desc = user_obj.get(\"description\", [False]).pop()\n days = self.get_days_since_pwdlastset(ft)\n user = {\n \"name\": user_obj.get(\"cn\", [False]).pop(),\n \"email\": user_obj.get(\"mail\", [False]).pop(),\n \"dn\": user_obj.get(\"distinguishedName\", [False]).pop(),\n \"days_since_last_pwd_change\": days,\n }\n if days >= self.days_since_pwdlastset or desc == \"***TEST***\":\n log.info(\"got stale user: %s\", user)\n stale_users[f\"{self.days_since_pwdlastset}\"].append(user)\n log.debug(\"retrieved the following stale users: %s\", stale_users)\n return stale_users", "title": "" }, { "docid": "a3d8ed37a3264c08c8e33d630f27f9cc", "score": "0.61370057", "text": "def get_user_list(self):\n api_call = self._get('user')\n userlist_dict = self._assert_json_response_stop_on_error(api_call, True)\n\n # for item in ('users',):\n # assert item in userlist_dict.keys(), AssertionError('Unable to find {0}'.format(item))\n assert 'users' in userlist_dict.keys(), AssertionError('Unable to find users.')\n\n user_id_list = list()\n user_name_list = list()\n user_group_list = list()\n userlist_list = userlist_dict['users']\n for list_item in userlist_list:\n logger.info('list_item is {0}'.format(list_item))\n for key in ('user-id',\n 'user-name',\n 'user-group',\n 'user-group-name'):\n assert key in list_item.keys(), AssertionError('Unable to find {0}'.format(key))\n user_id_list.append(list_item['user-id'])\n user_name_list.append(list_item['user-name'])\n user_group_list.append(list_item['user-group'])\n\n for user_id in user_id_list:\n if user_id not in _UserManagement_Keywords.user_ids.values():\n _UserManagement_Keywords.user_ids['USER{0}'.format(len(_UserManagement_Keywords.user_ids))] = user_id\n logger.info(_UserManagement_Keywords.user_ids)\n\n for user_name in user_name_list:\n if user_name not in _UserManagement_Keywords.user_names.values():\n _UserManagement_Keywords.user_names['USER{0}'.format(len(_UserManagement_Keywords.user_names))] = user_name\n logger.info(_UserManagement_Keywords.user_names)\n\n _UserManagement_Keywords.user_groups.clear()\n for user_group in user_group_list:\n _UserManagement_Keywords.user_groups['USER{0}'.format(len(_UserManagement_Keywords.user_groups))] = user_group\n logger.info(_UserManagement_Keywords.user_groups)", "title": "" }, { "docid": "c069e2ec99a38dfaf0a33a3673f46cfc", "score": "0.61314356", "text": "def all_users(self) -> None:", "title": "" }, { "docid": "c77a0957d84e92f1be12aedf328d8eec", "score": "0.6123298", "text": "def admin_get_usr_modr_list():\n stmt = \"SELECT * FROM user WHERE user_type=%s OR user_type=%s ORDER BY reg_time DESC\"\n params = (\"User\", \"Moderator\")\n rows = mydb.execute_query(stmt, params)\n usr_mdrs = []\n for row in rows:\n obj = User()\n obj.set_user(row)\n usr_mdrs.append(obj)\n return usr_mdrs", "title": "" }, { "docid": "c992e90cd03ad5674c0bef5c7a6d3ec7", "score": "0.6121563", "text": "def get_user_list_for(self, me):\n user_list = self.model.objects.exclude(pk=me.pk).filter(is_staff=False)\n\n # Add modified timestamp from thread\n user_list = user_list.annotate(thread_modified=RawSQL(\"\"\"\n SELECT ct.modified\n FROM chat_thread_participants ctp\n INNER JOIN chat_thread ct ON ctp.thread_id = ct.id\n WHERE ctp.customuser_id = accounts_customuser.id AND ctp.thread_id IN \n (SELECT thread_id FROM chat_thread_participants WHERE customuser_id = %s)\n \"\"\", (me.pk,)))\n\n # Add body from last_message\n user_list = user_list.annotate(last_message_body=RawSQL(\"\"\"\n SELECT cm.body\n FROM chat_thread_participants ctp\n INNER JOIN chat_thread ct ON ctp.thread_id = ct.id\n INNER JOIN chat_message cm ON ct.last_message_id = cm.id\n WHERE ctp.customuser_id = accounts_customuser.id AND ctp.thread_id IN \n (SELECT thread_id FROM chat_thread_participants WHERE customuser_id = %s)\n \"\"\", (me.pk,)))\n\n # Add userid from last_message\n user_list = user_list.annotate(last_message_sender_id=RawSQL(\"\"\"\n SELECT cm.sender_id\n FROM chat_thread_participants ctp\n INNER JOIN chat_thread ct ON ctp.thread_id = ct.id\n INNER JOIN chat_message cm ON ct.last_message_id = cm.id\n WHERE ctp.customuser_id = accounts_customuser.id AND ctp.thread_id IN \n (SELECT thread_id FROM chat_thread_participants WHERE customuser_id = %s)\n \"\"\", (me.pk,)))\n\n return user_list", "title": "" }, { "docid": "561b255c24d9604458117f1bd4d009ef", "score": "0.6120848", "text": "def userList(self):\n # XXX should we check authenticated?\n op = SQL('''\n SELECT sid\n FROM session\n ORDER BY sid''')\n def parseRows(rows):\n return (x[0] for x in rows)\n return self.runner.run(op).addCallback(parseRows)", "title": "" }, { "docid": "ee738b9bdc2c396f6d87208ae54ed3d6", "score": "0.61191773", "text": "def list_users(self):\n return self._get(route='user')", "title": "" }, { "docid": "271b09c8c98d95eaa02dd12748d87afa", "score": "0.60974765", "text": "def get_active_users(self):", "title": "" }, { "docid": "5c093d682870dd9c61706fa74f12c20b", "score": "0.60845804", "text": "def test_get_run_as_users_list(self):\n pass", "title": "" }, { "docid": "5e26d275f22868d09b70ce2eb2da3556", "score": "0.6074501", "text": "def user_list(self):\n user_list = list(self.users().itervalues())\n user_list.sort(key=lambda x: str(x))\n return user_list", "title": "" }, { "docid": "e533f741591eed0bd3b96abb924777a0", "score": "0.6071622", "text": "def list_users(ctx, details):\n with connect(ctx) as nethsm:\n user_ids = nethsm.list_users()\n\n print(f\"Users on NetHSM {nethsm.host}:\")\n print()\n\n headers = [\"User ID\"]\n if details:\n headers += [\"Real name\", \"Role\"]\n data = []\n for user_id in user_ids:\n user = nethsm.get_user(user_id=user_id)\n data.append([user_id, user.real_name, user.role.value])\n else:\n data = [[user_id] for user_id in user_ids]\n\n print_table(headers, data)", "title": "" }, { "docid": "fefa76b5abf5dbd82dcd53b1892c46a5", "score": "0.6071184", "text": "def list_users(self,request):\n\n\t\t# ----------- DEBUG -----------------\n\t\tMODULE.info(\"printers/users/query invoked with:\")\n\t\tpp = pprint.PrettyPrinter(indent=4)\n\t\tst = pp.pformat(request.options).split(\"\\n\")\n\t\tfor s in st:\n\t\t\tMODULE.info(\" << %s\" % s)\n\t\t# -----------------------------------\n\n\t\tself.lo, self.position = univention.admin.uldap.getMachineConnection(ldap_master=False)\n\t\tobjs = self.lo.search(base=self.position.getDomain(), filter='(&(|(&(objectClass=posixAccount)(objectClass=shadowAccount))(objectClass=univentionMail)(objectClass=sambaSamAccount)(objectClass=simpleSecurityObject)(&(objectClass=person)(objectClass=organizationalPerson)(objectClass=inetOrgPerson)))(!(uidNumber=0))(!(uid=*$)))', attr=['uid'])\n\t\tresult = [ obj[1][\"uid\"][0] for obj in objs ]\n\n\t\t# ---------- DEBUG --------------\n\t\tMODULE.info(\"printers/users/query returns:\")\n\t\tpp = pprint.PrettyPrinter(indent=4)\n\t\tst = ''\n\t\tif len(result) > 5:\n\t\t\ttmp = result[0:5]\n\t\t\tMODULE.info(\" >> %d entries, first 5 are:\" % len(result))\n\t\t\tst = pp.pformat(tmp).split(\"\\n\")\n\t\telse:\n\t\t\tst = pp.pformat(result).split(\"\\n\")\n\t\tfor s in st:\n\t\t\tMODULE.info(\" >> %s\" % s)\n\t\t# --------------------------------\n\n\t\tself.finished(request.id,result)", "title": "" }, { "docid": "1c6ffab2e451216ec9152ec228e015fc", "score": "0.60612226", "text": "def list_sessions(request):\n sessions = Session.objects.filter(expire_date__gte=timezone.now())\n uid_list = []\n\n for session in sessions:\n data = session.get_decoded()\n uid_list.append(data.get('_auth_user_id', None))\n users = []\n\n for user in User.objects.filter(id__in=uid_list):\n try:\n lastlogin = UserLogin.objects.filter(Subject=user).latest('Timestamp')\n users.append({'user': user, 'lastlogin': lastlogin})\n except: # a session without a user (should not happen, only when user is deleted recently)\n pass\n\n return render(request, \"godpowers/list_sessions.html\", {\"users\": users})", "title": "" }, { "docid": "b58a192aac0231ea2520f5cd01a1fdd1", "score": "0.6057584", "text": "def get_users_info(self):\n headers.update({'Referer': self.domain + '/standings.phtml'})\n req = self.session.get(self.urls['team'], headers=headers).content\n soup = BeautifulSoup(req, \"html.parser\")\n\n money_bids = self.get_money_bids(self.username)\n\n info = list()\n for row in soup.find('table', cellpadding=2).find_all('tr')[1:]:\n money, max_bid = [0, 0]\n name = row.a.text\n user_id = row.find('a')['href'].split('pid=')[1]\n req = self.session.get(self.urls['player'] % user_id, headers=headers).content\n html_h1 = BeautifulSoup(req, \"html.parser\").h1.text\n username = re.findall('\\((.+)\\)', html_h1)[0]\n user_points = int(row.find_all('td')[2].text.replace('.', ''))\n team_value = int(row.find_all('td')[3].text.replace('.', ''))\n for user in money_bids['data']['users']:\n if user['id'] == user_id:\n money = int(user['dinero'].replace('.', ''))\n max_bid = int(user['puja'].replace('.', ''))\n break\n\n info.append([name, username, int(user_id), user_points, team_value, money, max_bid])\n\n return info", "title": "" }, { "docid": "ab8db204e33ab7d8eb4255b3f1528c86", "score": "0.60548216", "text": "def list(self):\n url = '%s/users' % self.region['Python Web Services']\n r = self.session.get(url)\n result = response_check(r, 'users')\n return result", "title": "" }, { "docid": "828589521a292dcfd700350e94791245", "score": "0.60537755", "text": "def get_users():\n\n users = User.query.all()\n\n return render_template('user_list.html', users=users)", "title": "" }, { "docid": "14d271ae1fd555e25b3009c39f68aa5c", "score": "0.604675", "text": "def get_users_list():\n session = start_session()\n queryset = session.query(User)\n session.close()\n return queryset2list(queryset)", "title": "" }, { "docid": "8e6790d98a396fe09b2c26168e093711", "score": "0.60437846", "text": "def get_all_users(self):\n\n return []", "title": "" }, { "docid": "1261904fca8b600c2ca927d3f32bf756", "score": "0.6034384", "text": "def user_list(request):\n if check_admin_group(request, \"Администраторы\"):\n users = User.objects.filter(Q(groups__name=\"Менеджеры\") | Q(groups__name=\"Администраторы\")\n | Q(is_superuser=True)).order_by('-last_login')\n return render_to_response(\"mgmt/admin_user.html\", {'users': users},\n context_instance=RequestContext(request))\n else:\n # Return access denied\n return raise_403(request)", "title": "" }, { "docid": "b27431dd4fb0d5304f13be62109998e7", "score": "0.6028237", "text": "def requestTimeLeft(self, pUserName):\n # result\n result = -1\n message = msg.getTranslation(\"TK_MSG_CONFIG_LOADER_USER_NOTFOUND\") % (pUserName)\n\n # check if we have this user\n if pUserName in self._timekprUserList:\n # pass this to actual method\n self._timekprUserList[pUserName].getTimeLeft(True)\n\n # result\n result = 0\n message = \"\"\n\n # result\n return result, message", "title": "" }, { "docid": "50f924244cbba2fa530779ce420e3d20", "score": "0.6024228", "text": "def user_list():\n users = User.query.all()#query all user data from User class\n return render_template('user_list.html', users=users)#return users.html", "title": "" }, { "docid": "51ca36d207547dffe943f6ab0dfc3337", "score": "0.60128903", "text": "def get_all_user_data(self):\n return [self.url, self.name, self.username, self.about, self.location,\n self.website, self.joined_on.date(), self.joined_on.time(),\n self.verified, self.no_of_tweets,\n self.following, self.followers, self.listed_count]", "title": "" }, { "docid": "59692c95d0b172ffa4a867340d8da1b6", "score": "0.60064423", "text": "def get_users(self):\n\n return self._read('list/user')", "title": "" }, { "docid": "433a51172c026100e10e297655b4df81", "score": "0.59902114", "text": "def list_users(self, context, limit=None, marker=None,\n include_marker=False):\n return guestagent_utils.serialize_list(\n self._get_listed_users(self.client),\n limit=limit, marker=marker, include_marker=include_marker)", "title": "" }, { "docid": "9d8955a103ba48aa7e6bde1ed684e2ba", "score": "0.5987751", "text": "def get_users():\r\n connect() # Connect\r\n cursor.execute(\"SELECT * FROM users\") # Select all users\r\n item = cursor.fetchall()\r\n users = []\r\n for user in item:\r\n users.append(format_user(user)) # Format the users\r\n disconnect()\r\n return users", "title": "" }, { "docid": "6fc593bb0e0bfe4c1c1511cff896bd8b", "score": "0.5982436", "text": "def getScheduledUsers(userSource, date=Date()):\n # type: (String, Optional[Union[Date, int]]) -> List[PyUser]\n print(userSource, date)\n return [PyUser()]", "title": "" }, { "docid": "f38a68511237010667d4db9c6713eb6b", "score": "0.5975413", "text": "def get_user_list():\n users_list = []\n for user in Users.query.all():\n users_list.append(user.name)\n return jsonify(users_list), 200", "title": "" }, { "docid": "d2840e0d0202037c0a35d742ed4f3ce5", "score": "0.5964898", "text": "def list_users(self, hints):\n raise exception.NotImplemented() # pragma: no cover", "title": "" }, { "docid": "03b22397a6e42e27107564d7a7a1b2b5", "score": "0.5945334", "text": "def getActiveUsers(self):\n if self.activeUserCount == 0:\n logger.info(\"Empty room %s\" % self.roomName)\n return list()\n \n userList = list()\n for user in self.activeUsers:\n userList.append(user.userName)\n\n return userList", "title": "" }, { "docid": "16e09febd0edb139282376c5fa14558d", "score": "0.594166", "text": "def test_list_user_logins_users(self):\r\n user_id = None # Change me!!\r\n\r\n r = self.client.list_user_logins_users(user_id)", "title": "" }, { "docid": "48db846e89db32b64422ec0ad8857a62", "score": "0.59394574", "text": "def get_users():\n data = storage.all('User')\n return list(data) if data is not None else None", "title": "" }, { "docid": "eb254bf30fa7e3eaa8b86194c8da6ece", "score": "0.593879", "text": "def update_timer(self):\n cooled_down_users = []\n \n for user in self.users:\n if time() - self.users[user] < self.REPEAT_TIME:\n cooled_down_users.append(user)\n for user in cooled_down_users:\n del self.users[user]", "title": "" }, { "docid": "946b7606478e6b305d8c5fa46a8b9d35", "score": "0.59295356", "text": "async def get_all_userData_sorted(self):\r\n # get all user stats data from the server (username, wins, loses, games_played)\r\n # fetch_all_user_stats = [(\"isaac\", 12,0,12), (\"test1\", 0,10,10), (\"test2\", 4,10,14), (\"test3\", 10,4,14)]\r\n try:\r\n self.MSG.set(\"\")\r\n fetch_all_user_stats = await self.controller.SocketConnection.getAllPlayerData()\r\n if fetch_all_user_stats != True:\r\n self.MSG.set(fetch_all_user_stats)\r\n self.userDatas = self.controller.SocketConnection.leaderboard\r\n except:\r\n raise", "title": "" }, { "docid": "ee2d78fbd925d965e6f8f35572d20fbb", "score": "0.5923908", "text": "def test_all_users_since(self):\n since = 100000\n i = self.instance.all_users(since=since)\n self.get_next(i)\n\n self.session.get.assert_called_once_with(\n url_for(\"users\"),\n params={\"per_page\": 100, \"since\": since},\n headers={},\n )", "title": "" }, { "docid": "e1dcc036b2aac93c6ac920d6a642a316", "score": "0.59220815", "text": "def get_monitored_users(self):\n users = []\n \n for user in self.shuttledb.cursor().execute('SELECT name FROM Users'):\n users.append(str(user[0]))\n\n return users", "title": "" }, { "docid": "ea2585e7172124c1240f485492ce1520", "score": "0.59113944", "text": "def list_users():\n return bbs.dbproxy.DBProxy('userbase').keys()", "title": "" }, { "docid": "064133f7e652f377fc0fdab64756e15e", "score": "0.5908953", "text": "def user_show(cls):\n return cls.user_list", "title": "" }, { "docid": "54e4569524b7400ad37b11b8bbc17acf", "score": "0.589781", "text": "def get_users(self):\n return self.users", "title": "" }, { "docid": "c9e0c5330843908b2902a1934a053298", "score": "0.5897741", "text": "def list_users() -> None:\n app = make_app() # type: ignore\n with app.app_context():\n query = User.query.order_by(User.name)\n for row in query:\n print(row.name)", "title": "" }, { "docid": "31d85bddecf2b6124338a752d8fb2fa1", "score": "0.58975786", "text": "def get_user_list(self):\n temp_user_list = self.db(self.db.users.id >= 0).select().as_list()\n user_list = []\n for row in temp_user_list:\n user = User( row['username'],\n row['role'],\n row['email'],\n row['password'],\n row['last_name'],\n row['first_name'],\n row['id'] )\n user_list.append(user)\n \n return user_list", "title": "" }, { "docid": "2afc7b60e573c3218152fe8cd5cb8ead", "score": "0.5891037", "text": "def freeTimeBtw2Users(self, userList1, userList2):\n if userList1 == False or userList2 == False : \n return False\n else :\n if userList1 and userList2:\n freeTime = []\n j = 0\n i = 0\n while i < len(userList1) and j<len(userList2) :\n if userList1[i][1] < userList2[j][0]: \n i += 1\n elif userList1[i][0] > userList2[j][1]:\n j += 1\n else :\n start = max(userList1[i][0], userList2[j][0])\n end = min(userList1[i][1], userList2[j][1])\n if end == userList1[i][1] :\n i += 1\n else:\n j += 1\n if end > start :\n freeTime.append((start, end))\n return freeTime\n\n else : \n return userList1 if userList1 else userList2", "title": "" }, { "docid": "22271af9ec6458161d068954cc671a3c", "score": "0.5880105", "text": "def list_users(limit: int = 10, offset: int = 0):\n return USERS[offset:offset+limit]", "title": "" }, { "docid": "fe49d27521236b72ddc84ad60bb2a1b8", "score": "0.5877184", "text": "def list_users(self):\n return self.user_manager.list_objects()", "title": "" }, { "docid": "f1502594b5b207fb692528c772bd6f4d", "score": "0.5874288", "text": "def test_list_users(self):\n pass", "title": "" }, { "docid": "7ee3f5f3023c8fc15e4b394ce5885e99", "score": "0.58718735", "text": "def list_users(args):\n users_api = UsersApi(get_authenticated_client(args))\n for user in users_api.list():\n print user\n\n return ExitCodes.OK", "title": "" }, { "docid": "4c958084ef834d88a42ad466d41189aa", "score": "0.58648956", "text": "def get_users(self):\n \n return self.users", "title": "" }, { "docid": "0a4855cf01fcaa031091fd36d62d5320", "score": "0.5861235", "text": "async def read_users(\n db: Session = Depends(get_db),\n skip: int = 0,\n limit: int = 10,\n current_user: models.User = Depends(get_current_active_user),\n):\n if current_user.is_superuser:\n return crud.user.get_multi(db, skip=skip, limit=limit)\n else:\n return [current_user][skip:limit]", "title": "" }, { "docid": "9087cd115f68dc012041b5a01248c98b", "score": "0.585103", "text": "def get_users(self):\n sc = SlackClient(self.slack_token)\n return [{'name': user['name'],\n 'id': user['id'],\n 'first_name': user['profile']['first_name'],\n 'last_name': user['profile']['last_name']}\n for user in sc.api_call(\"users.list\")['members']\n if 'first_name' in user['profile'].keys() and 'last_name' in user['profile'].keys()]", "title": "" }, { "docid": "5bf31305e1f9893f1f97a19ee77114ab", "score": "0.5847081", "text": "def show_users(self):\n sql = \"SELECT username FROM dba_users ORDER BY 1\"\n self.cur.execute(sql)\n res = self.cur.fetchall()\n key = ['{#DBUSER}']\n lst = []\n for i in res:\n d = dict(zip(key, i))\n lst.append(d)\n print ( json.dumps({'data': lst}) )", "title": "" }, { "docid": "9b47e37a835655713623a72a4c006007", "score": "0.5845344", "text": "def get_all_usernames():\n return list(map(lambda u: u.username, get_all_users()))", "title": "" }, { "docid": "31b146824bdfbaa1047508928fddd9ed", "score": "0.58410865", "text": "def get(self):\n return user.get_all_users()", "title": "" }, { "docid": "bea1236195d1a8ece56183334f315370", "score": "0.5840651", "text": "def get_users(self):\n\t\tresult = []\n\n\t\t# get appropriate path from the config\n\t\tpath = self._config['server']['api_paths']['user_list']\n\n\t\t# send request and return response data\n\t\tresponse = self.send_request(path)\n\t\tif response:\n\t\t\t# filter user of the current bot execution\n\t\t\tresult = list(filter(lambda user: user['user']['username'] in self._credentials, response))\n\n\t\treturn result", "title": "" }, { "docid": "bf816d3e80ecc5102e04cf69b8a40876", "score": "0.58397985", "text": "def getUsers(self) -> int:\n return self.QueryResponse_int(\"Users?\\n\")", "title": "" }, { "docid": "0bc71231b8938bce5b508f8a1d2ae3ad", "score": "0.58391", "text": "def user_list_view(request):\n\n users = User.objects.all().exclude(id=request.user.id).values(\"id\", \"username\")\n\n context = {\n 'users': users,\n }\n return render(request, 'chats/user_list.html', context)", "title": "" }, { "docid": "71cc19adaadc31c1ed1f0efb3f8ac1c9", "score": "0.58385795", "text": "def get_user(self):\n\n self.navigate_to(self.CONFIGURE, self.CONFIGURE_USERS)\n\n user_total = self._get_total_number(self.info['loc_cfg_user_total_number_span'], \"Users\")\n user_list = []\n row = 1\n i = 0\n max_row_user = int(self.info['const_cfg_max_row_user'])\n if user_total == u'0':\n logging.info(\"The Users table is empty\")\n return []\n\n while i < int(user_total):\n username = self.info['loc_cfg_user_name_cell']\n username = username.replace(\"$_$\", str(i))\n get_username = self.s.get_text(username)\n user_list.append(get_username)\n if row == max_row_user:\n row = 0\n self.s.click_and_wait(self.info['loc_cfg_user_next_image'], 2)\n\n row = row + 1\n i = i + 1\n\n return user_list", "title": "" }, { "docid": "a9a07b351a3832b7f3b75c97cdc585aa", "score": "0.5837897", "text": "def show_all_users():\r\n conn, c = crd.make_conn_c()\r\n user_list = c.execute('SELECT id, name, email, user_group FROM USERS').fetchall()\r\n conn.close()\r\n\r\n return user_list\r\n\r\n # Get the longest name and email\r\n # l_name = 4 # Title name is 4 letters\r\n # l_email = 5 # Title email is 5 letters\r\n # l_usergroup = 9\r\n\r\n\r\n\r\n # for row in user_list:\r\n # l_name = len(row[0]) if len(row[0]) > l_name else l_name\r\n # l_email = len(row[1]) if len(row[1]) > l_email else l_email\r\n # l_usergroup = len(row[2]) if len(row[2]) > l_usergroup else l_usergroup\r\n\r\n # print row[0] + \"*****\" + row[1] + \"*****\" + row[2]\r\n\r\n # result = \"\"\r\n # result += \"Name\"+\" \"*(l_name-4) + \" | Email\" + \" \"*(l_email-5) + \" | Usergroup\"\r\n # result += \"\\n\" + \"_\"*l_name + \"_|_\" + \"_\"*l_email + \"_|_\" + \"_\" * l_usergroup\r\n #\r\n # for row in user_list:\r\n # result += \"\\n\" + row[0]+ \" \"*(l_name-len(row[0])) + \" | \" + row[1] +\" \"*(l_email-len(row[1])) + \" | \" + row[2]\r\n #\r\n # return result\r", "title": "" }, { "docid": "1032afef3f190e26a6b9a0b76f253eed", "score": "0.58351386", "text": "def list_users():\n users = User.query.all()\n return render_template(\"index.html\", users=users)", "title": "" } ]
640019edd0a391d862b878cb752234ba
Insert new course data
[ { "docid": "9a04d325b5531af61b6049bc93c6890e", "score": "0.7448042", "text": "def insert_course(self, course):\n largest_id = max([int(i) for i in self.data.keys()])\n new_course_id = str(largest_id + 1)\n self.data[new_course_id] = course\n return self.data", "title": "" } ]
[ { "docid": "026398ace73e1ad5dbfa7bb033766ef4", "score": "0.7772229", "text": "def add_course(self, info):\n\n\t\twith self.connection.cursor() as cur:\n\t\t\tcur.execute(\"\"\"INSERT INTO courses VALUES (%s, %s, %s, %s)\"\"\", info)\n\n\t\tself.commit()", "title": "" }, { "docid": "123aede24e9b2ea234412a25eabfd090", "score": "0.68744904", "text": "def add_course():\n conn = mysql.connect\n cur = conn.cursor()\n cur.execute(\"SELECT * FROM department\")\n names = cur.fetchall()\n error = None\n\n # Handle submit\n if request.method == 'POST':\n # Grab user info\n course_title = request.form[\"new_course_title\"]\n course_description = request.form[\"new_course_description\"]\n course_year = request.form[\"new_course_year\"]\n course_department = request.form[\"new_course_dep\"]\n try:\n # Insert new course into DB\n query = f\"INSERT INTO course (`crs_title`, `crs_description`, `crs_year`, `dep_code`)\" \\\n f\" VALUES ('{course_title}', '{course_description}', {course_year}, {course_department})\"\n print(query)\n cur.execute(query)\n conn.commit()\n error = \"Success!\"\n # Handle error\n except Exception as e:\n error = \"Problem creating course: \" + str(e)\n return render_template('addcourse.html', dep_names=names, error=error)", "title": "" }, { "docid": "d486f83852fdc48efb15466446e25a52", "score": "0.67363805", "text": "def saveCourse_btn_clicked(self):\n mat = self.var_txt_mat.get()\n course_name = self.var_txt_name.get()\n\n try:\n with connection.cursor() as cursor:\n # Create a new record\n sql = \"INSERT INTO Matiere (code_mat, nom_mat) VALUES \\\n (%s, %s)\"\n try:\n cursor.execute(sql, (mat, course_name,))\n except pymysql.err.IntegrityError as e:\n print(\"cannot duplicate Courses : \" + str(e))\n # connection is not autocommit by default.So lets commit to save changes\n connection.commit()\n finally:\n pass\n # connection.close()", "title": "" }, { "docid": "6636b8af4b35d54142786faf9cff6a83", "score": "0.6706542", "text": "def add_course(name, number, discipline):\n exists = (\n dal.DBSession.query(Course)\n .filter(\n Course.name == name,\n Course.number == number,\n Course.discipline == discipline,\n )\n .first()\n ) is not None\n\n if not exists:\n course = Course(name=name, number=number, discipline=discipline)\n dal.DBSession.add(course)\n dal.DBSession.commit()", "title": "" }, { "docid": "138bea8d6c4c00b476c32d4da24fb85e", "score": "0.6697207", "text": "def create_course():\n check_course_creation_body(request)\n course = {'name': request.json['name'],\n 'department': request.json['department'],\n 'teacher': request.json['teacher'],\n 'year': request.json['year'],\n 'semester': request.json['semester'],\n 'description': request.json['description'],\n 'schedule': request.json['schedule']}\n\n try:\n added_course = add_course(client, course)\n if added_course is None:\n abort(409) # Conflict\n return jsonify(added_course), 201 # Created\n except CoursesRepositoryException:\n abort(500) # Internal Server Error", "title": "" }, { "docid": "49684582abc49cc4f929bc57544b9391", "score": "0.66519773", "text": "def add_course(self, data):\n\n course = {'name': data[self.code],\n 'manualTutorialEnrolment': False\n }\n return course", "title": "" }, { "docid": "f99baa8cc3d1eaf47fd9a177a9d224d9", "score": "0.6632682", "text": "def test_create_new_course(self):\r\n # This method utilises the POST request method and will make changes to the Canvas instance. This needs consideration.\r\n pass", "title": "" }, { "docid": "6b380da63dba9e4366a369c112d5bd86", "score": "0.65756774", "text": "def add_course():\n\n print \"/new-course called\"\n user_id = session[\"user_id\"]\n print user_id\n course = request.form.get(\"course\")\n\n polyline = request.form.get(\"overview-polyline\")\n # print \"polyline is \", polyline\n\n directions_text = request.form.get(\"directions-text\")\n directions_distance = request.form.get(\"directions-distance\")\n\n start_address = request.form.get(\"start-address\")\n end_address = request.form.get(\"end-address\")\n\n new_course = Course(user_id=user_id, course_name=course,\n add_date=datetime.now(),\n start_lat=request.form.get(\"start-lat\"),\n start_long=request.form.get(\"start-long\"),\n end_lat=request.form.get(\"end-lat\"),\n end_long=request.form.get(\"end-long\"),\n course_distance=request.form.get(\"distance\"),\n favorite=request.form.get(\"favorite\"),\n polyline=polyline,\n directions_text=directions_text,\n directions_distance=directions_distance,\n start_address=start_address,\n end_address=end_address\n )\n new_course.add()\n\n return \"Course %s has been saved to your courses\" % course", "title": "" }, { "docid": "d01eb4995874dd506a6b2e644211a0a4", "score": "0.652634", "text": "def add_department(self, info):\n\n\t\twith self.connection.cursor() as cur:\n\t\t\tcur.execute(\"\"\"INSERT INTO courses VALUES (%s, %s, %s)\"\"\", info)\n\n\t\tself.commit()", "title": "" }, { "docid": "928ac07b53d208db36c9620be1536338", "score": "0.64894485", "text": "def courses(request):\n user = request.user\n if request.method == 'POST':\n form = OnlineCourseForm(request.POST)\n if form.is_valid():\n # save our data\n data = form.save(commit=False)\n data.created_by = user\n data.updated_by = user\n data.save()\n # insert data into informix\n sql = INSERT_CTC_REC(cid=user.id)\n do_sql(sql, key=DEBUG, earl=EARL)\n return HttpResponseRedirect(reverse_lazy('survey_success'))\n else:\n form = OnlineCourseForm()\n\n return render(request, 'survey/form.html', {'form': form})", "title": "" }, { "docid": "f56de4a944d58a0d1db643e485bfcd84", "score": "0.64234525", "text": "def add_course(self, title, topic, course_acronym, course_number, section_number, language=\"Python\"): #UPDATE\r\n self._courses.append(Section(title,topic,course_acronym, course_number,section_number,language))", "title": "" }, { "docid": "8d2c55f764822063ca7e9de23aa7757f", "score": "0.6401968", "text": "def test_add_course(self):\n\n file_path = os.path.join(os.path.dirname(__file__), 'testCourse.json')\n\n with open(file_path) as f:\n json_dict = json.loads(f.read())\n coursera_add_courses(json_dict)\n\n # there is one course in the test file\n all_courses = Course.objects.all()\n self.assertEquals(len(all_courses), 1)\n\n # the course has the right attributes\n the_course = all_courses[0]\n self.assertEquals(the_course.name, 'Machine Learning')\n self.assertEquals(the_course.description, 'Learn about the most effective machine learning techniques, and gain practice implementing them and getting them to work for yourself.')\n self.assertEquals(the_course.instructor, 'Andrew Ng, Associate Professor')\n\n # the course has the right university\n self.assertEquals(the_course.source.name, 'Stanford University')\n\n # the course has the right subjects\n the_course_subjects = the_course.subjects.all()\n self.assertEquals(len(the_course_subjects), 2)\n self.assertEquals(the_course_subjects[0].name, 'stats')\n self.assertEquals(the_course_subjects[1].name, 'cs')", "title": "" }, { "docid": "cb108c2b1b481c2f43a2965915a113dc", "score": "0.6361019", "text": "def add_course(self, courseID):\n self.current_courses.append(courseID)\n pass", "title": "" }, { "docid": "4f8aa880820d66e575b12d893c5d9920", "score": "0.63592136", "text": "def create_course(request):\n\n # Like before, get the request's context.\n context = RequestContext(request)\n course_added = False\n\n user = request.user\n profile = get_profile(user)\n\n if 'Instructor' in profile[1]:\n school = UserProfile.objects.get(user=user).school\n if not school:\n return render_permission_denied(context,\n 'create courses. Enrol in a school first.')\n else:\n #return HttpResponse(\"You don't have permission to create courses!\")\n return render_permission_denied(context, 'create courses')\n # If it's a HTTP POST, we're interested in processing form data.\n if request.method == 'POST':\n\n # Attempt to grab information from the raw form information.\n course_form = CourseForm(data=request.POST)\n if course_form.is_valid():\n # Save the event's form data to the database.\n course = course_form.save(commit=False)\n course.school = school\n course.creator = user\n\n # Add the personal calendar for the user\n calendar = Calendar( name = course.code + \" Calendar\")\n calendar.save()\n course.cal = calendar\n\n course.save()\n\n course_added = True\n # Invalid form or forms - mistakes or something else?\n # Print problems to the terminal.\n # They'll also be shown to the user.\n else:\n print course_form.errors\n\n # Not a HTTP POST, so we render our form using the EventForm.\n # These forms will be blank, ready for user input.\n else:\n course_form = CourseForm()\n\n # Render the template depending on the context.\n return render_to_response(\n 'school/create_course.html', {'course_form': course_form, 'user' : user,\n 'course_added': course_added, 'school': school},\n context)", "title": "" }, { "docid": "decfa14d717a3ae7dfdde8acb8f8324a", "score": "0.63294107", "text": "def CourseDetails(self, mydb, mycursor):\n try:\n sql = '''create table if not exists course_details(COURSE_ID VARCHAR(9) NOT NULL PRIMARY KEY,\n COURSE_NAME VARCHAR(20),\n COURSE_START_DATE DATETIME,\n COURSE_END_DATE DATETIME)'''\n #mycursor.execute(\"drop table course_details\")\n mycursor.execute(sql)\n mydb.commit()\n l = logfile.logger()\n l.info(\"DIMENSION : Course_Details created!\")\n except Exception as e:\n print(\"Error:\", e)", "title": "" }, { "docid": "aa8f6b4e62e6504d935e2299226b810c", "score": "0.63152754", "text": "def InsertCourseDetails(self,mydb,mycursor):\n try: \n filename = 'C:\\\\Users\\\\mohan\\\\Desktop\\\\csv\\\\dimension\\\\course_dim.csv'\n with open(filename,'r') as fr:\n for line in fr.readlines():\n col_value = line.replace('\\n','').split(',')\n insert_query = (\"insert into course_details values('{}','{}','{}','{}')\"\n .format(col_value[0],col_value[1],datetime.now(),col_value[2]))\n \n mycursor.execute(insert_query)\n mydb.commit()\n l = logfile.logger()\n l.info(\"Values inserted into the course_details table successfully!\")\n except Exception as e:\n l = logfile.logger()\n l.info(e)\n print(\"Error:\",e)", "title": "" }, { "docid": "943409489b95e4776b95ce92b16decd8", "score": "0.62980634", "text": "def add_course(self, course, grades=[]):\n self.courses[course] = grades", "title": "" }, { "docid": "35a2c60307d6ec25fff0d445e5a778df", "score": "0.6285272", "text": "def addCourse(self, course: Course):\n\n self.courseList.append(course)", "title": "" }, { "docid": "5ba108cbe77195a44d87ca98b2c22133", "score": "0.6275504", "text": "def add_course(self, course, comfort=None):\n for cuser in self.courseuser_set.all():\n if cuser.course == course:\n return\n if type(comfort) == type(None):\n CourseUser.objects.create(vsb_user=self, course=course, dirty_comfort=True, course_comfort='')\n else:\n CourseUser.objects.create(vsb_user=self, course=course, dirty_comfort=False, course_comfort=comfort)", "title": "" }, { "docid": "ef69bebe085e537b439668b2612e67c3", "score": "0.6264653", "text": "def _save(data):\n courses = shelve.open('../src/coursesPages/all_courses.db')\n courses['all_courses'] = data\n courses.close()", "title": "" }, { "docid": "9adf1395b17fe6ee31636eddce359336", "score": "0.6244702", "text": "def assign_course(self, course_id):\n yield self.modify(\n {\n '$addToSet': {'courses': course_id}\n }\n )", "title": "" }, { "docid": "4f1738a12cedd7cfb6fd8bfb4bb27d1b", "score": "0.62353545", "text": "def add_course(self, course, grade):\r\n self.courses[course] = grade", "title": "" }, { "docid": "1ff1c3fb5cd9da8be62de31186fc36e0", "score": "0.61824876", "text": "def courses_add(self, course: str, grade: str) -> None:\r\n\r\n self._courses[course] = grade", "title": "" }, { "docid": "73de286031d4334201d262a9fc6bad9b", "score": "0.61489636", "text": "def test_add_course(self):\n client = Client()\n client.login(username='testadmin', password='4epape?Huf+V')\n\n #checks that the course is created using the view\n client.post('/settings/courses/add_course', {'course_id': 'TDT4145', 'name': 'databaser', 'nickname': 'data',\n 'term': 'spring', 'description': 'test'})\n course = Course.objects.get(course_id=\"TDT4145\")\n self.assertEqual(course.name,'databaser')\n self.assertEqual(course.nickname, 'data')\n self.assertEqual(course.term, 'spring')\n self.assertEqual(course.description, 'test')\n\n #Checks that you cant overwrite a course that exsists, or make duplicates\n client.post('/settings/courses/add_course', {'course_id': 'TDT4140', 'name': 'programvare utvikling',\n 'nickname': 'pu1', 'term': 'spring 2017', 'description': 'test'})\n course = Course.objects.get(course_id=\"TDT4140\")\n self.assertNotEqual(course.name,'programvare utvikling')\n self.assertNotEqual(course.nickname, 'pu1')\n self.assertNotEqual(course.term, 'spring 2017')\n self.assertNotEqual(course.description, 'test')", "title": "" }, { "docid": "8ac58cfee422c67a9cd7e993d83f3d2a", "score": "0.61475503", "text": "def create_course(student, mnemonic, number):\n return Course.objects.create(student=student,mnemonic=mnemonic, number=number)", "title": "" }, { "docid": "8ac58cfee422c67a9cd7e993d83f3d2a", "score": "0.61475503", "text": "def create_course(student, mnemonic, number):\n return Course.objects.create(student=student,mnemonic=mnemonic, number=number)", "title": "" }, { "docid": "147a89bd401140fb086f1d631fbf3085", "score": "0.6083887", "text": "def add_course(self, course):\n if course.fetch_course_number() in self.courses:\n return False\n else:\n self.courses.update({course.fetch_course_number(): course})\n return True", "title": "" }, { "docid": "1db591493ee91473904e57c7ccba3f81", "score": "0.6080842", "text": "def handle_course_insert_input(input_value):\n handle_sub_input(input_value, insert_course, 5)\n return input_value", "title": "" }, { "docid": "8832135f03f996c7b72f20357ce51059", "score": "0.60423523", "text": "def add_course(self, course_id, term_id, program_year_id=None):\n user_course = _user_course.UserCourse.objects(\n user_id=self.id, course_id=course_id).first()\n\n if user_course is None:\n if _course.Course.objects.with_id(course_id) is None:\n # Non-existant course according to our data\n rmclogger.log_event(\n rmclogger.LOG_CATEGORY_DATA_MODEL,\n rmclogger.LOG_EVENT_UNKNOWN_COURSE_ID,\n course_id\n )\n return None\n\n user_course = _user_course.UserCourse(\n user_id=self.id,\n course_id=course_id,\n term_id=term_id,\n program_year_id=program_year_id,\n )\n else:\n # Record only the latest attempt for duplicate/failed courses\n if (term_id > user_course.term_id or\n user_course.term_id == _term.Term.SHORTLIST_TERM_ID):\n user_course.term_id = term_id\n user_course.program_year_id = program_year_id\n\n user_course.save()\n\n if user_course.id not in self.course_history:\n self.course_history.append(user_course.id)\n self.save()\n\n return user_course", "title": "" }, { "docid": "6e894c892ebdb4bab6f986ec7afdcb13", "score": "0.60161567", "text": "def test_add_to_django(self):\n provider, created = Provider.objects.get_or_create(name='iversity')\n sample_div = \"<article class='courses-list-item'><div class='ribbon-content'>Engineering</div></div><div class='course-body'><header><h2 class='truncate'><a href='https://iversity.org/courses/vehicle-dynamics-i-accelerating-and-braking'>Vehicle Dynamics I: Accelerating and Braking</a></h2><p class='instructors truncate'>Univ.-Prof. Dr.-Ing. Martin Meywerk</p></header><p class='description'>From Bugatti Veyron to Volkswagen Beetle, from racing to passenger car: study about their acceleration and braking and learn from two applications from automotive mechatronics. </p></div></div></div></div></div></div></div></article>\"\n sample_div = BeautifulSoup(sample_div)\n iversity.create_course(sample_div, provider)\n\n # Make sure the Engineering subject was created:\n new_subject = Subject.objects.get(name='engineering')\n self.assertIsNotNone(new_subject)\n\n # Make sure the course itself was created:\n new_course = Course.objects.get(name='Vehicle Dynamics I: Accelerating and Braking', provider=provider,\n subjects=new_subject)\n self.assertIsNotNone(new_course)\n\n # Make sure the course name is set properly:\n self.assertEqual('Vehicle Dynamics I: Accelerating and Braking', new_course.name)\n\n # Make sure the course URL is set properly:\n self.assertEqual('https://iversity.org/courses/vehicle-dynamics-i-accelerating-and-braking', new_course.url)\n\n # Make sure the course instructor is set properly:\n self.assertEqual('Univ.-Prof. Dr.-Ing. Martin Meywerk', new_course.instructor)\n\n # Make sure the course description is set properly:\n self.assertEqual('From Bugatti Veyron to Volkswagen Beetle, from racing to passenger car: study about their acceleration and braking and learn from two applications from automotive mechatronics.', new_course.description)", "title": "" }, { "docid": "4126a3c5783eba72f2aad12fb4354274", "score": "0.6015903", "text": "def admin_course():\n conn = mysql.connect\n cur = conn.cursor()\n error = None\n if request.method == 'POST':\n # User adds a new course\n if request.args.get(\"add\"):\n cur.execute(\"SELECT * FROM department\")\n course_title = request.form[\"new_course_title\"]\n course_description = request.form[\"new_course_description\"]\n course_year = request.form[\"new_course_year\"]\n course_department = request.form[\"new_course_dep\"]\n try:\n # Insert new record into table course\n query = f\"INSERT INTO course (`crs_title`, `crs_description`, `crs_year`, `dep_code`)\" \\\n f\" VALUES ('{course_title}', '{course_description}', {course_year}, {course_department})\"\n print(query)\n cur.execute(query)\n conn.commit()\n error = \"Success!\"\n except Exception as e:\n # Handle exception\n error = \"Problem creating course: \" + str(e)\n # User deletes the course\n elif request.args.get(\"delete\"):\n data = None\n jsonData = request.get_json()\n course_id = jsonData[\"id\"]\n try:\n # Delete id along with its reference in other tables\n delete_course_query(conn, cur, \"antirequisite\", \"crs_code\", course_id)\n delete_course_query(conn, cur, \"prerequisite\", \"crs_code\", course_id)\n delete_course_query(conn, cur, \"program_requirements\", \"program_crs\", course_id)\n delete_course_query(conn, cur, \"course\", \"crs_code\", course_id)\n data = {\"success\": \"true\"}\n except Exception as e:\n # Handle exception\n traceback.print_exc()\n data = {\"error\": \"Please try again.\"}\n finally:\n # Return json data\n return json.dumps(data)\n\n # User updates the course\n elif request.args.get(\"update\"):\n data = None\n jsonData = request.get_json()\n course_id = jsonData[\"id\"]\n title = jsonData[\"title\"]\n description = jsonData[\"description\"]\n year = jsonData[\"year\"]\n dep_code = jsonData[\"dep_code\"]\n pre_reqs = jsonData[\"pre_reqs\"] # JSON array containing ids of prerequisite\n anti_reqs = jsonData[\"anti_reqs\"] # JSON array containing ids of antirequisite\n\n try:\n # Update the record\n query = f\"UPDATE course \" \\\n f\"SET crs_title= '{title}', crs_description='{description}', crs_year={year}, dep_code={dep_code} \" \\\n f\"WHERE crs_code={course_id}\"\n cur.execute(query)\n conn.commit()\n update_req(conn, cur, \"prerequisite\", course_id, pre_reqs)\n update_req(conn, cur, \"antirequisite\", course_id, anti_reqs)\n data = {\"status\": \"success\"}\n\n except Exception as e:\n # Handle exception\n traceback.print_exc()\n data = {\"error\": 404}\n finally:\n # Return json\n return json.dumps(data)\n\n cur.execute(\"SELECT * FROM department\")\n deps = cur.fetchall()\n\n cur.execute(\"SELECT * FROM course\")\n courses = cur.fetchall()\n data = []\n # Retrieve course's info and add it to data array\n for row in courses:\n course = {}\n course[\"id\"] = row[0]\n course[\"code\"] = str(row[3]) + \"%02d\" % row[0]\n course[\"title\"] = row[1]\n course[\"description\"] = row[2]\n course[\"year\"] = row[3]\n course[\"dep_code\"] = row[4]\n\n query = f'SELECT dep_name FROM department WHERE dep_code={row[4]}'\n cur.execute(query)\n temp = cur.fetchone()[0]\n course[\"dep_name\"] = temp\n\n query = f'SELECT course.*, department.dep_name FROM course, prerequisite, department ' \\\n f'WHERE prerequisite.crs_code = {row[0]}' \\\n f' AND prerequisite.crs_requires = course.crs_code AND course.dep_code = department.dep_code'\n cur.execute(query)\n temp = cur.fetchall()\n pres = get_codes(temp)\n course[\"pre_reqs\"] = pres\n\n query = f'SELECT course.*, department.dep_name FROM course, antirequisite, department ' \\\n f'WHERE antirequisite.crs_code = {row[0]}' \\\n f' AND antirequisite.crs_anti = course.crs_code AND course.dep_code = department.dep_code'\n cur.execute(query)\n temp = cur.fetchall()\n antis = get_codes(temp)\n course[\"anti_reqs\"] = antis\n data.append(course)\n\n return render_template('admin-course.html', data=data, deps=deps, error=error)", "title": "" }, { "docid": "0aa3a712ac00197901fb0e7af5aeebcc", "score": "0.59774035", "text": "def test_create_course(self):\n parameters = ['config', 'database', 'semester', 'course']\n environment = 'course'\n module_text = 'Migration for a given Submitty course database.'\n parameter_text = \"\"\":param config: Object holding configuration details about Submitty\n :type config: migrator.config.Config\n :param database: Object for interacting with given database for environment\n :type database: migrator.db.Database\n :param semester: Semester of the course being migrated\n :type semester: str\n :param course: Code of course being migrated\n :type course: str\"\"\"\n self.create_test_runner(module_text, parameters, parameter_text, environment)", "title": "" }, { "docid": "2b7f844781d60708a5ec461926c57293", "score": "0.5972808", "text": "def create_or_update_course(request):\n\n global_stuff = User.objects.filter(is_staff=True).first()\n if global_stuff is not None:\n request.user = global_stuff\n else:\n raise PermissionDenied()\n\n course_key = modulestore().make_course_key(request.json[\"org\"], request.json[\"number\"], request.json[\"run\"])\n with modulestore().bulk_operations(course_key):\n course_key = modulestore().has_course(course_key)\n if course_key is None:\n response = _create_or_rerun_course(request)\n if response.status_code >= 400:\n return response\n course_key_string = json.loads(response.content).get(\"course_key\")\n if course_key_string is not None:\n course_key = CourseKey.from_string(course_key_string)\n else:\n return response\n course_data = request.json.copy()\n if course_data[\"start_date\"] is None:\n course_data[\"start_date\"] = format(DEFAULT_START_DATE, \"%Y-%m-%d\")\n course_data[\"end_date\"] = format(DEFAULT_START_DATE, \"%Y-%m-%d\")\n course_data[\"enrollment_end\"] = format(DEFAULT_START_DATE, \"%Y-%m-%d\")\n CourseDetails.update_from_json(course_key, course_data, global_stuff)\n set_course_cohort_settings(course_key, is_cohorted=True)\n modes = request.json.get(\"course_modes\", [])\n CourseMode.objects.filter(course_id=course_key).exclude(mode_slug__in=[mode[\"mode\"] for mode in modes]).delete()\n for mode in modes:\n mode_params = {\n \"course_id\": course_key,\n \"mode_slug\": mode[\"mode\"]\n }\n if \"price\" in mode:\n mode_params[\"min_price\"] = mode[\"price\"]\n if \"currency\" in mode:\n mode_params[\"currency\"] = mode[\"currency\"]\n if \"title\" in mode:\n mode_params[\"mode_display_name\"] = mode[\"title\"]\n if \"description\" in mode:\n mode_params[\"description\"] = mode[\"description\"]\n if \"upgrade_deadline\" in mode:\n mode_params[\"_expiration_datetime\"] = mode[\"upgrade_deadline\"]\n CourseMode.objects.update_or_create(course_id=course_key, mode_slug=mode[\"mode\"], defaults=mode_params)\n return JsonResponse({\n 'url': reverse_course_url('course_handler', course_key),\n 'course_key': unicode(course_key)\n })", "title": "" }, { "docid": "c7c3835a402bdb22bf899c9e913b1cd5", "score": "0.5949433", "text": "def add_course(self, course: str, grade: str) -> None:\n self._courses[course] = grade", "title": "" }, { "docid": "c7c3835a402bdb22bf899c9e913b1cd5", "score": "0.5949433", "text": "def add_course(self, course: str, grade: str) -> None:\n self._courses[course] = grade", "title": "" }, { "docid": "c7c3835a402bdb22bf899c9e913b1cd5", "score": "0.5949433", "text": "def add_course(self, course: str, grade: str) -> None:\n self._courses[course] = grade", "title": "" }, { "docid": "e26f3af68078dce9891952fb5e6b6be8", "score": "0.5937892", "text": "def post(self, request, institute_short_name):\n institute = request.user.institute\n form = AddCourseForm(request.POST, institute=institute)\n\n if form.is_valid():\n course = form.cleaned_data['course']\n\n try:\n \"\"\"\n the both field department and course must unique_together.\n if department and course not added before with same values,\n Then execute try block otherwise execute catch block.\n \"\"\"\n object = form.save(commit=False)\n object.institute = institute\n object.save()\n messages.success(request, f'{course} course is successfully added')\n return redirect('institute:administration', institute_short_name=institute_short_name)\n except:\n messages.error(request, f'{course} course with this Institute is already exists.')\n\n return redirect('institute:administration', institute_short_name=institute_short_name)\n\n context = {\n 'institute_short_name': institute_short_name,\n 'form': form,\n }\n\n return render(request, 'institute/administration.html', context)", "title": "" }, { "docid": "55a4bae7d39134776901c060874a2779", "score": "0.5934816", "text": "def insert_lesson(session, group, subject, teacher, day, start, end, room):\n session.add(Lesson(group, subject, teacher, day, start, end, room))", "title": "" }, { "docid": "5b97bb577662bf638706c4e669bf32b8", "score": "0.59191036", "text": "def test_copy_course_content(self):\r\n # This method utilises the POST request method and will make changes to the Canvas instance. This needs consideration.\r\n pass", "title": "" }, { "docid": "a5af280f94c6cf086c20f0d68622fdb6", "score": "0.5916784", "text": "def test_corresponding_course_exists(self):\n with self.assertRaises(ValidationError):\n AcademicPlan.objects.create(gradYear=self.year,\n planCode=\"Foobar\",\n courseCode=\"Foobar\",\n mcName=\"Foobar\",\n course_1=\"Foobar\", weight_1=0.5,\n weight_2=0.5)", "title": "" }, { "docid": "c10cb215667de11f3f135f5c206a5005", "score": "0.5904627", "text": "def save(cls, app_context, course):\n persistent = PersistentCourse13(\n next_id=course.next_id,\n units=course.units, lessons=course.lessons)\n\n fs = app_context.fs.impl\n filename = fs.physical_to_logical(cls.COURSES_FILENAME)\n app_context.fs.put(filename, vfs.FileStreamWrapped(\n None, persistent.serialize()))", "title": "" }, { "docid": "19ebc87c42ea9e86153723a4d55ba292", "score": "0.5888098", "text": "def copy_to(self, new_course):\n tasks_to_copy = self.course_tasks.all().prefetch_related(\"graded_work\")\n new_tasks = [\n CourseTask(\n course=new_course, description=task.description, duration=task.duration\n )\n for task in tasks_to_copy\n ]\n CourseTask.objects.bulk_create(new_tasks)\n\n # To avoid creating tasks individually, a second pass is needed\n # to add any graded work.\n tasks_to_compare = zip(\n tasks_to_copy, CourseTask.objects.filter(course=new_course)\n )\n graded_work = [\n GradedWork(course_task=copied_task)\n for task_to_copy, copied_task in tasks_to_compare\n if hasattr(task_to_copy, \"graded_work\")\n ]\n GradedWork.objects.bulk_create(graded_work)\n\n resources_to_copy = [\n CourseResource(\n course=new_course, title=resource.title, details=resource.details\n )\n for resource in self.resources.all()\n ]\n CourseResource.objects.bulk_create(resources_to_copy)", "title": "" }, { "docid": "b495111d451119975077934ae8724649", "score": "0.5882292", "text": "def insert(self):\n self.getDbRecord().insert()\n\n return", "title": "" }, { "docid": "3d3a4c6fd0ab43fadad19fa2d68ca191", "score": "0.58681166", "text": "def post(self):\n try:\n json_data = request.get_json(force=True)\n if not json_data:\n return {'message': 'No input data provided'}, 400\n\n if 'courses' not in json_data:\n return {'message': 'Invalid input data provided. courses key missing'}, 422\n\n if 'id' not in json_data:\n return {'message': 'Invalid input data provided'}, 422\n\n if json_data[\"courses\"]:\n for course_id in json_data[\"courses\"]:\n if course_id in courses:\n courses[course_id][\"students\"].append(json_data['id'])\n else:\n return {'message': 'Invalid input of course names provided'}, 422\n\n students[json_data['id']] = json_data\n return {'message': 'Student added successfully'}, 200\n\n except Exception as e:\n return {'message': 'Internal Server error', 'error': e}, 500", "title": "" }, { "docid": "73ed42a236d556b1181419bad637b66b", "score": "0.58580965", "text": "def create_course(self, org, course, run, user_id, fields=None, **kwargs): # lint-amnesty, pylint: disable=arguments-differ\n course_id = CourseKey.from_string('/'.join([org, course, run]))\n\n # Check if a course with this org/course has been defined before (case-insensitive)\n course_search_location = SON([\n ('_id.tag', 'i4x'),\n ('_id.org', re.compile(f'^{course_id.org}$', re.IGNORECASE)),\n ('_id.course', re.compile(f'^{course_id.course}$', re.IGNORECASE)),\n ('_id.category', 'course'),\n ])\n courses = self.collection.find(course_search_location, projection={'_id': True})\n try:\n course = courses.next() # lint-amnesty, pylint: disable=next-method-called\n raise DuplicateCourseError(course_id, course['_id'])\n except StopIteration:\n pass\n\n with self.bulk_operations(course_id):\n xblock = self.create_item(user_id, course_id, 'course', course_id.run, fields=fields, **kwargs)\n\n # create any other necessary things as a side effect\n super().create_course(\n org, course, run, user_id, runtime=xblock.runtime, **kwargs\n )\n\n return xblock", "title": "" }, { "docid": "0808beb259e91ec8233eeb5debddd24f", "score": "0.58566546", "text": "def insert_data(self):\n Assignment4.cursor.execute('INSERT INTO food (ref_date, geo, dgu_id, food_categories, commodity,uom, uom_id,scalar_factor,scalar_id,vector,coordinate,value,status,symbol,terminated, decimals) VALUES (?,?,?,?,?,?,?,?,?,?,?,?,?,?,?,?)',\n (self.entryRef_date.get(),self.entryGeo.get(),self.entryDguid.get(),self.entryCategory.get(),self.entryCommodity.get(),self.entryUom.get(),self.entryUom_id.get(),self.entryScalar.get(),\n self.entryScalar_id.get(),self.entryVector.get(),self.entryCoordinate.get(),self.entryValue.get(),self.entryStatus.get(),self.entrySymbol.get(),self.entryTerminated.get(),self.entryDecimal.get()))\n Assignment4.db.commit()\n print(\"New record inserted!!\")\n self.clear_fields()\n Assignment4.read_database(self)", "title": "" }, { "docid": "88da82953f8b57caa55257bd0cac81d4", "score": "0.58556044", "text": "def get_course(cls, department: Department, course_number: int, course_title: str, credit_hours: int):\n\n course, created = Course.objects.update_or_create(department=department,\n course_number=course_number,\n defaults={'course_title': course_title,\n 'credit_hours': credit_hours})\n\n if created:\n cls.logger.debug(f\"Course Created: {course.__str__()}\")\n\n return course", "title": "" }, { "docid": "b87628d8012ad90b93155110964bc904", "score": "0.58519685", "text": "def add_course_and_run():\n\n print \"/new-run in server.py called\"\n start_lat = request.form.get(\"start-lat\")\n start_long = request.form.get(\"start-long\")\n end_lat = request.form.get(\"end-lat\")\n end_long = request.form.get(\"end-long\")\n\n add_date = datetime.now()\n course = request.form.get(\"course\")\n distance = request.form.get(\"distance\")\n favorite = request.form.get(\"favorite\")\n date = request.form.get(\"date\")\n user_id = session[\"user_id\"]\n polyline = request.form.get(\"overview-polyline\")\n\n directions_text = request.form.get(\"directions-text\")\n directions_distance = request.form.get(\"directions-distance\")\n\n start_address = request.form.get(\"start-address\")\n end_address = request.form.get(\"end-address\")\n\n # print \"start_lat is %s, start_long is %s, end_lat is %s, end_long is %s, name is %s, distance is %s, favorite is %s\" % (start_lat, start_long, end_lat, end_long, course, distance, favorite)\n\n new_course = Course(user_id=user_id, course_name=course, add_date=add_date,\n start_lat=start_lat, start_long=start_long,\n end_lat=end_lat, end_long=end_long,\n course_distance=distance, favorite=favorite,\n polyline=polyline,\n directions_text=directions_text,\n directions_distance=directions_distance,\n start_address=start_address,\n end_address=end_address\n )\n\n new_course.add()\n # print \"course committed\"\n\n course_id = Course.get_by_course_name(course).course_id\n d = datetime.strptime(date, \"%m/%d/%Y\")\n duration = request.form.get(\"duration\")\n duration = int(duration)\n\n new_run = Run(user_id=user_id, course_id=course_id, run_date=d, duration=duration)\n new_run.add()\n\n return \"Your run was saved\"", "title": "" }, { "docid": "5faa041c1030a536bcac2d7dad2b6463", "score": "0.58512056", "text": "def course_update_or_create(course):\n defaults = {'name': course['course_name'], 'version_time': timezone.now}\n kwargs = {'courseId': course['courseId'], 'platform': course['platform'], 'defaults': defaults}\n Course.objects.update_or_create(**kwargs)", "title": "" }, { "docid": "c067bd637218deab0bfe0719ba21739f", "score": "0.5848478", "text": "def register(self, new_course):\n\n # TODO: check timeslot clashes\n\n if new_course.course_code in self.registered_courses:\n # already registered\n print(f\"{new_course.course_name} is already registered\")\n return\n # try to register in course\n if new_course.register_student(self):\n self.registered_courses[new_course.course_code] = new_course\n print(f\"{new_course.course_name} registered for {self}\")\n else:\n print(\"Course capacity full\")", "title": "" }, { "docid": "57a634624195500e03e5cf9b22773cb3", "score": "0.5842758", "text": "def store_course_student(self, course: str)-> None:\r\n self.courses[course] +=1", "title": "" }, { "docid": "30174b2f11abd041fecaaa2412227c5f", "score": "0.5805051", "text": "def add_student(self, course_index, name, id):\r\n if not self._courses:\r\n pass\r\n self._courses[course_index].add_student(name, id)", "title": "" }, { "docid": "53e17907975598270154533b2ee6a5c9", "score": "0.5786657", "text": "def insert(self, title, author, year, isbn):\n self.cur.execute(\"INSERT INTO book VALUES (NULL, ?,?,?,?)\", (title, author, year, isbn))\n self.conn.commit()", "title": "" }, { "docid": "33fc4596a642f302d01924bcf827ea31", "score": "0.5769123", "text": "def insert(self):\n self.__getDbRecord().insert()\n\n return", "title": "" }, { "docid": "f16a2421ea3c8b4722f3523fabaf27ed", "score": "0.576857", "text": "def insert_categories(self):\n cat_query = self.get_categories()\n if not cat_query:\n query = \"INSERT INTO Categories (id, name) VALUES (NULL, %s)\"\n self.mycursor.executemany(query, CATEGORIES)\n self.connector.commit()", "title": "" }, { "docid": "37d9e13f0864e4dc711869985f5a3799", "score": "0.5763789", "text": "def add_student(self, course: str) -> None:\n self._courses[course] += 1", "title": "" }, { "docid": "37d9e13f0864e4dc711869985f5a3799", "score": "0.5763789", "text": "def add_student(self, course: str) -> None:\n self._courses[course] += 1", "title": "" }, { "docid": "37d9e13f0864e4dc711869985f5a3799", "score": "0.5763789", "text": "def add_student(self, course: str) -> None:\n self._courses[course] += 1", "title": "" }, { "docid": "ba35b7e6819ab91e36d0b110594a16fd", "score": "0.5749186", "text": "def insert_data(self, data):\n self.db[self.table_name].insert(data)", "title": "" }, { "docid": "d603c4c601eee832fae70d8a1ab21b84", "score": "0.5748691", "text": "def add_student(self, course:str) -> None:\r\n \r\n self._students[course] +=1", "title": "" }, { "docid": "d72070bb066aa829c2a452e1bde657d9", "score": "0.57170534", "text": "def add_inst(self, course):\n self.course[course] += 1", "title": "" }, { "docid": "05310d1b6ca6293c890d69fa5e22ac38", "score": "0.5712089", "text": "def insert_data_into_db(self):\n data = self.text_fields_into_dict() ## Current item data dict\n parent = self.item_data if not self.new_cls else 'zero' ## set parent data if not new cls\n if self.ids.cls_nome.text.strip() and self.ids.cls_codigo.text.strip() != '':\n ## if required fields are not empty\n try:\n ## Instantiate data manager object \\/\n to_insert = ManageData(item_data=data,parent_data=parent)\n if not self.editing:\n to_insert.insert_into_db() ## inserts into database\n else:\n to_insert.update_db() ## updates database\n except:\n toast('Algo deu errado! Impossível salvar a Classe.')\n else:\n toast('Classe salva com sucesso!',1)\n self.app.root.ids.data_frame.clear_widgets() ## clear data management frame \n self.app.pcd_tree.regen_tree() ## Regenerates pcd treeview\n self.app.pcd_tree.switch_button(reset=True) ## switch toggle nodes button\n self.app.pcd_tree.disabled = False ## Unlocks treeview\n else:\n ## if required fields are empty\n toast('Campos obrigatórios estão em branco!')\n self.ids.cls_codigo.focus = True if self.ids.cls_codigo.text.strip() == '' else False\n self.ids.cls_nome.focus = True if self.ids.cls_nome.text.strip() == '' else False", "title": "" }, { "docid": "95eb9b2e5acb3514763e3a7c4f4769ab", "score": "0.5695588", "text": "def new_credit (col: pymongo.collection, credit_data: dict):\n try:\n col.insert_one(credit_data)\n except:\n print('Credit could not be saved')", "title": "" }, { "docid": "33bebbe91f643339f68045b8b0255fb3", "score": "0.56650835", "text": "def test_duplicate_course(self):\n plan = AcademicPlan.objects.create(gradYear=self.year,\n planCode=\"Foobar\",\n courseCode=\"Foobar\",\n mcName=\"Foobar\",\n course_1=\"Foobar\", weight_1=0.5,\n course_2=\"Foobar\", weight_2=0.5)\n with self.assertRaises(ValidationError):\n plan.clean()", "title": "" }, { "docid": "e55766a33c903ecd687eb83da294964b", "score": "0.566199", "text": "def assign_create(sessions_id, course_id):\n\n session = sessions.get_session(sessions_id)\n course = courses.get_course(course_id)\n\n if g.user['id'] != course['teacher_id']:\n abort(403)\n\n if course['course_num'] != session['course_id']:\n abort(403)\n\n if request.method == 'POST':\n\n name = request.form['name']\n points = request.form['points']\n description = request.form['description']\n due_date = request.form['due_date']\n\n if request.form['type'] == 'upload':\n type = 'upload'\n else:\n # Anything other than upload will default to standard\n type = 'standard'\n\n error = None\n\n try:\n int(points)\n except ValueError:\n error = 'Points are numbers only, check your values.'\n try:\n datetime.datetime.strptime(due_date, '%Y-%m-%dT%H:%M')\n except ValueError:\n error = 'Due Date only allows time data, check your values. Please format the time as such using military time. Year-Month-Day Hour:Minute ex. 2020-06-22 19:10'\n\n\n with db.get_db() as con:\n with con.cursor() as cur:\n\n if not name:\n error = 'Name is required.'\n\n if error is None:\n now = datetime.datetime.utcnow()\n cur.execute(\"\"\"INSERT INTO assignments (sessions_id, assign_name, description, points, due_time, type)\n VALUES (%s, %s, %s, %s, %s, %s)\"\"\",\n (sessions_id, name, description, points, due_date, type,)\n )\n con.commit()\n\n return redirect(url_for(\"assign.assign_manage\", sessions_id=session['id'], course_id=session['course_id'] ))\n\n flash(error)\n\n return render_template('assigns/assign_create.html', session=session)", "title": "" }, { "docid": "827b6bcae4ba073764f04f3c182eb73d", "score": "0.5649654", "text": "def add_new_question():\n\n # Insert question into database\n db.execute(\"INSERT INTO questions (teacher, question, choice_a, choice_b, choice_c, choice_d, answer) \\\n VALUES (:teacher, :question, :choice_a, :choice_b, :choice_c, :choice_d, :answer)\",\n {\"teacher\":session.get(\"teacher_id\"),\n \"question\":request.form.get(\"question\"),\n \"choice_a\":request.form.get(\"choice_a\"),\n \"choice_b\":request.form.get(\"choice_b\"),\n \"choice_c\":request.form.get(\"choice_c\"),\n \"choice_d\":request.form.get(\"choice_d\"),\n \"answer\":request.form.get(\"answer\")})\n db.commit()\n\n return redirect(\"/teacher\")", "title": "" }, { "docid": "d05c14bc9556c2bd7984172626be93c4", "score": "0.56445134", "text": "def add_data(self):\n values = []\n \n for entry in self.add_entries: \n if entry.get() == '': #Makes sure dat entries are not black\n msg.showerror('', 'Please fill the data appropriately')\n break\n else: values.append(entry.get()) #Entries are appended to values so dat it can be stored to the database\n \n else:\n if values[2].isdigit():\n\n try:\n execute_cmd(\"\"\"\n INSERT INTO student_data(Name, Class, Phone, DOB, DOA)\n VALUES(%s, %s, %s, %s, %s)\n \"\"\", values, 'school') #Adds valid record\n self.clear_entries(self.add_entries)\n \n for entry in self.add_entries:\n entry.config(state='readonly')\n \n #Threading\n self.create_thread(self.addprogress_thread, [])\n \n except Exception as e: msg.showerror('', 'Please fill dates in the designated')\n else: msg.showerror('', 'Please fill the phone number properly')", "title": "" }, { "docid": "c66ffaf81422c8f29fb290747f003634", "score": "0.5639091", "text": "def add(self, clist):\n if isinstance(clist, Course):\n # create course listing, rename as clist\n # get instructor and time info\n cnum = clist.getID()\n instr = raw_input(\"Instructor? \")\n time = raw_input(\"Time Block? \")\n clist = CourseListing(cnum, instr, time, 0)\n\n elif not isinstance(clist, CourseListing):\n print \"ERROR: \" + clist + \" is not a Course or CourseListing object\"\n return False\n\n # now we process CourseListing parameter\n cnum = clist.getCourse()\n sect = clist.getSect()\n if cnum in self._courses: # already at least one section\n # check and add section\n cldict = self._courses[cnum]\n if sect == 0 or sect in cldict:\n print \"These sections already exist: \" + str(cldict.keys())\n sect = int(raw_input(\"Enter new section # or existing to overwrite: \"))\n clist.setSect(sect)\n self._courses[cnum][sect] = clist\n else:\n # create empty dictionary of course listings for this course number\n self._courses[cnum] = {} \n self._courses[cnum][1] = clist # assume section 1 for first\n return True", "title": "" }, { "docid": "cd65987953f34ce4d6c3b9e6578e0ac4", "score": "0.56317604", "text": "def put(self):\n try:\n json_data = request.get_json(force=True)\n if not json_data:\n return {'message': 'No input data provided'}, 400\n\n if 'courses' not in json_data:\n return {'message': 'Invalid input data provided. courses key missing'}, 422\n\n if 'id' not in json_data:\n return {'message': 'Invalid input data provided'}, 422\n\n if json_data[\"courses\"]:\n for course_id in json_data[\"courses\"]:\n if course_id in courses:\n courses[course_id][\"students\"].append(json_data['id'])\n else:\n return {'message': 'Invalid input of course names provided'}, 422\n\n students[json_data['id']]['courses'] += json_data['courses']\n return {'message': 'Student enrolled successfully'}, 200\n\n except Exception as e:\n return {'message': 'Internal Server error', 'error': e}, 500", "title": "" }, { "docid": "c558fc358c87234f3a5c6f7015bb8df5", "score": "0.56305623", "text": "def insert_product(self, data, category):\n category += 1\n datas = data + (category, )\n query = \"INSERT INTO Products VALUES (NULL, %s, %s, %s, %s, %s)\"\n self.mycursor.execute(query, datas)\n self.connector.commit()", "title": "" }, { "docid": "8e101ad04d6bb1d205136f596c89ef38", "score": "0.5610089", "text": "def test_update_course(self):\r\n # This method utilises the PUT request method and will make changes to the Canvas instance. This needs consideration.\r\n pass", "title": "" }, { "docid": "5796f9c4e186f57feb35533eb731e0b2", "score": "0.559446", "text": "def insert_data(self):\n connection = sqlite3.connect(DATABASE_URI)\n cursor = connection.cursor()\n query_find_by_id = \"SELECT * FROM language WHERE _id=? and language = ?\"\n result = cursor.execute(query_find_by_id, (self._id, self.language))\n rows = result.fetchone()\n\n if rows is not None:\n query = \"UPDATE language \" \\\n \"SET language = ?, understand = ?, speak = ?, read_write = ?\" \\\n \"WHERE _id = ? and language=?\"\n cursor.execute(query,\n (self.language, self.understand, self.speak, self.read_write, self._id,\n self.language,))\n else:\n query = \"INSERT INTO language values(?,?,?,?,?)\"\n cursor.execute(query, (self._id, self.language, self.understand, self.speak, self.read_write,))\n\n connection.commit()\n connection.close()", "title": "" }, { "docid": "6f06b08fe2d201a5775827e36d9af5c7", "score": "0.55936116", "text": "def insert_data(self):\n for index, row in self.data.iterrows():\n try:\n # SQL declaration\n self.cursor.execute(f\"\"\"INSERT INTO countries(\n id, region, city_name, \n language, time\n ) VALUES (\n {index},\n '{row[\"Region\"]}',\n '{row[\"City Name\"]}',\n '{row[\"Language\"]}',\n {row[\"Time\"]}\n )\"\"\")\n except Exception as e:\n print(str(e))\n self.conn.commit()", "title": "" }, { "docid": "09f7428fbe28e5a7af317974062e7526", "score": "0.55889785", "text": "def save_course(courses, course, terms):\n for term in terms:\n course_tmp = copy.deepcopy(course)\n course_tmp['academic_period'] = term\n courses.append(course_tmp)\n return courses, {}", "title": "" }, { "docid": "4c76a56c58cc7404d4553390ea333a19", "score": "0.5587326", "text": "def load_course(course_data, blacklist, duplicates):\n # pylint: disable=too-many-branches,too-many-locals\n\n course_id = course_data.pop(\"course_id\")\n runs_data = course_data.pop(\"runs\", [])\n topics_data = course_data.pop(\"topics\", [])\n offered_bys_data = course_data.pop(\"offered_by\", [])\n\n if course_id in blacklist:\n course_data[\"published\"] = False\n\n duplicates_record = next(\n (\n record\n for record in duplicates\n if course_id in record[\"duplicate_course_ids\"]\n ),\n None,\n )\n\n if duplicates_record:\n course = Course.objects.filter(course_id=duplicates_record[\"course_id\"]).first()\n if not course:\n course_data[\"course_id\"] = duplicates_record[\"course_id\"]\n course = Course.objects.create(**course_data)\n created = True\n else:\n created = False\n\n if course_id != duplicates_record[\"course_id\"]:\n duplicate_course = Course.objects.filter(course_id=course_id).first()\n if duplicate_course:\n duplicate_course.published = False\n duplicate_course.save()\n search_task_helpers.delete_course(duplicate_course)\n else:\n platform = course_data.get(\"platform\")\n course, created = Course.objects.update_or_create(\n platform=platform, course_id=course_id, defaults=course_data\n )\n\n run_ids_to_update_or_create = [run[\"run_id\"] for run in runs_data]\n\n for course_run_data in runs_data:\n load_run(course, course_run_data)\n\n if duplicates_record and not created:\n most_relevent_run = get_most_relevant_run(course.runs.all())\n\n if most_relevent_run.run_id in run_ids_to_update_or_create:\n for attr, val in course_data.items():\n setattr(course, attr, val)\n course.save()\n\n load_topics(course, topics_data)\n load_offered_bys(course, offered_bys_data)\n\n if not created and not course.published:\n search_task_helpers.delete_course(course)\n elif course.published:\n search_task_helpers.upsert_course(course.id)\n\n return course", "title": "" }, { "docid": "06a77387548d110cd8f098bb6f74123d", "score": "0.5587003", "text": "def test_create_external_tool_courses(self):\r\n # This method utilises the POST request method and will make changes to the Canvas instance. This needs consideration.\r\n pass", "title": "" }, { "docid": "ec437d4eb5cf885245d0cda3dda8b529", "score": "0.5575287", "text": "def courses():\n if authed_user == None or authed_user.role != User.ROLE_ADMIN:\n session.flash = T('You are not authorized to view that page');\n redirect(URL('index'));\n \n form = FORM( TABLE(\n TR(\"Department: \", INPUT(_type='text', _name='dept', requires=IS_NOT_EMPTY())),\n TR(\"Course Number: \", INPUT(_type='text', _name='number', requires=[IS_NOT_EMPTY(),IS_INT_IN_RANGE(0,None)])),\n TR(\"Section: \", INPUT(_type='text', _name='section', requires=IS_NOT_EMPTY())),\n TR(\"Instructor: \", INPUT(_type='text', _name='instructor', requires=IS_NOT_EMPTY())),\n TR(INPUT(_type='submit', _name='submit')))\n )\n \n if form.accepts(request.vars, session, formname='CourseForm', keepvalues=True):\n editCourses.submit_course(form.vars.dept, form.vars.number, form.vars.section, form.vars.instructor);\n elif form.errors:\n response.flash = 'Required course information missing'; \n \n if request.vars.delete:\n deleteId = int(request.vars.delete)\n editCourses.delete_course(deleteId);\n session.flash = 'Course %d deleted' % (deleteId);\n redirect(\"courses\");\n \n courses = searcher.search_courses(CourseSearchParams());\n \n return dict(courses=courses, form=form);", "title": "" }, { "docid": "a7e8ead6d71670a5424fad5ed23975e0", "score": "0.55695456", "text": "def test_upload_and_import_course(self):\n pass", "title": "" }, { "docid": "79a7fd6cc7e3a335b5ca60503912bc6c", "score": "0.55639863", "text": "def add_new_booking(cursor, mydb, data):\n full_service = data[\"service\"] + \" \" + data[\"addons\"]\n # the date the comes is 1 day earlier so i'm moving it 1 day forward\n new_date = day_plus_one(data[\"date\"].split(\"T\")[0])\n sql = \"INSERT INTO Customers (Full_Name, Email, Phone, Address, Service, Date, Hour, Price, Comments) VALUES (%s, %s, %s, %s, %s, %s, %s, %s, %s)\"\n val = (data[\"fullName\"], data[\"email\"], data[\"phone\"], data[\"fullAddress\"], full_service, new_date, data[\"hour\"], data[\"price\"], data[\"comments\"], )\n cursor.execute(sql, val)\n add_to_region(cursor, mydb, data[\"cit\"], new_date)\n # mydb.commit()", "title": "" }, { "docid": "45dc39b077ca101388676ababbffcd83", "score": "0.55425406", "text": "def __init__(self, name, gtid, year = 1):\n self.name = name\n self.gtid = gtid\n self.year = year\n self.courses = {}", "title": "" }, { "docid": "2822c75cfddd77a293e6818f62a1f0bd", "score": "0.55265087", "text": "def add_session(self, data, course):\n\n session = data[self.session]\n course[session] = {'lectures': [], 'tutorials': []}\n self.add_section(data, course[session], course)\n return session", "title": "" }, { "docid": "7abc90f0143fdda86de30a4983326997", "score": "0.5513948", "text": "def insertFstudent(self,mydb, mycursor):\n try:\n with open(r\"C:\\Users\\k.a.ramasubramanian\\Desktop\\Training\\git\\DataModel\\prod_data\\fact\\student_fact.csv\") as data:\n for i in reader(data):\n sql = \"INSERT INTO factstudent VALUES(%s,%s,%s,%s,%s,%s)\"\n val = (i[0], i[1], i[2], i[3],datetime.now(), i[5])\n mycursor.execute(sql,val)\n mydb.commit()\n print(\"No. of rows inserted:\", mycursor.rowcount)\n l = logfile.logger()\n l.info(\"FACT_STUDENT-RECORDS inserted\")\n print(\"Rows Inserted\")\n except Exception as e:\n print(\"Error:\", e)", "title": "" }, { "docid": "1b82d3de84c8d2410123f0d5eba3ca5e", "score": "0.550831", "text": "def add_new_chapter(chap_name, desc, mod_name):\n if get_chapter_id(mod_name, chap_name):\n return False\n\n mod_id = get_module_id(mod_name)\n c.execute(\"INSERT INTO chapters (name, desc, module_id) VALUES (?, ?, ?)\", (chap_name, desc, mod_id))\n conn.commit()\n return True", "title": "" }, { "docid": "8e294c88ec7f630f77c14348cf449181", "score": "0.55069983", "text": "def add_course(self, flag: str, course: str) -> None:\n if flag == \"R\":\n self._required.append(course)\n elif flag == \"E\":\n self._elective.append(course)", "title": "" }, { "docid": "ec7af8dc1b1ecec764717bad17266646", "score": "0.550609", "text": "def _create(self, coach, students):\n return", "title": "" }, { "docid": "706b876fbe72403f7f6a3bad78d5fe5d", "score": "0.5489819", "text": "def _insert(self):\r\n with con:\r\n cur = con.cursor()\r\n fieldnames = \", \".join(self.fields)\r\n SQLPATTERN = \"INSERT INTO {tablename}({fieldnames}) VALUES {values};\"\r\n values = self._field_value_list()\r\n SQL = SQLPATTERN.format(tablename=self.table, fieldnames=fieldnames, values=values)\r\n cur.execute(SQL)\r\n \"\"\" cur.lastrowid = pk that was created in the most recent insert \"\"\"\r\n self.pk = cur.lastrowid", "title": "" }, { "docid": "dfce0fb3de6c5af5012bc9b30b48b0e4", "score": "0.5489746", "text": "def saveCourses_btn_clicked():\n controller.show_frame(RegisterCourses)", "title": "" }, { "docid": "ad0e18576b19f4aab479b4b5c270c0aa", "score": "0.5485913", "text": "def run_insert(tokens, description):\n indices = run_location(tokens[1:-1], description)\n inserted_text = run_insert_text(tokens[-1], description)\n new_text = []\n for line, line_indices in zip(description.text, indices):\n new_line = line\n for index in sorted(line_indices, reverse=True):\n new_line = new_line[:index] + inserted_text + new_line[index + 1:]\n new_text.append(new_line)\n return CourseDescription(\n description.dept_code,\n description.number,\n new_text,\n )", "title": "" }, { "docid": "8699c663337d311dd82bd507ec082bab", "score": "0.54819226", "text": "def store_course_student(self, course: str):\n self._courses[course] += 1", "title": "" }, { "docid": "f4d12d2b267091ff8744b09ec0f84330", "score": "0.547943", "text": "def add_stud(self, course, grade):\n self.course[course] = grade", "title": "" }, { "docid": "eefb6760a0f222a32724931d47afb6ac", "score": "0.5476564", "text": "def insert_record(self, attributes, values):\r\n command = \"INSERT INTO \" + self.name + \" \" + attributes + \" VALUES \" + values\r\n\r\n try:\r\n cur = self.db.cursor()\r\n cur.execute(command)\r\n self.db.commit()\r\n except Error as e:\r\n print(e)", "title": "" }, { "docid": "307f42093ef79a697bb8266763dccd24", "score": "0.54750353", "text": "def register_section_current(request, num_):\n course = Course.objects.get(number__exact = num_)\n section = Section.objects.get(course__id__exact=course.id, year__exact=2018)\n student = Student.objects.get(user__id__exact=request.user.id)\n\n # Create a form instance and populate it with data from the request (binding):\n if student not in section.students.all():\n section.students.add(student)\n section.save()\n\n return HttpResponseRedirect(reverse('index'))", "title": "" }, { "docid": "890fcfedc1f9531729d728b2c5b72a74", "score": "0.54736674", "text": "def test_insert(self):\n component = Component(self.env)\n component.name = 'component3'\n component.insert()\n component_field = self._get_component_ticket_field()\n\n self.assertEqual(component.name, component._old_name)\n self.assertEqual([('component3', None, None)], self.env.db_query(\"\"\"\n SELECT name, owner, description FROM component\n WHERE name='component3'\"\"\"))\n self.assertIsNotNone(component_field)\n self.assertIn('component3', component_field['options'])", "title": "" }, { "docid": "d8ad4af653acf35d4033765a6f2a7e51", "score": "0.54658467", "text": "def create_course(self, course_name, course_description):\n\n group_m = self._client.groups\n role_m = self._client.roles\n\n course = self.create(\n name=course_name.replace('|', ''),\n description=course_description\n )\n\n for role in ROLES:\n if role == 'admin':\n group = group_m.find(name='admin')\n else:\n group_name = '{}|{}s'.format(course.name, role)\n group = group_m.create(\n name=group_name,\n description='{}s for course {}'.format(role, course_name),\n domain=course.id\n )\n\n os_role = role_m.find(name=role)\n role_m.grant(role=os_role, group=group.id, domain=course.id)\n\n return course.id", "title": "" }, { "docid": "cbba207cc6fb32f3c5ae5536ed8f78d6", "score": "0.5455851", "text": "def test_update_courses(self):\r\n # This method utilises the PUT request method and will make changes to the Canvas instance. This needs consideration.\r\n pass", "title": "" }, { "docid": "3aa16d1009edbda7cf273fdce6f640ba", "score": "0.54481393", "text": "def generate_course(cls):\n courses = cls._load_courses()\n\n if not courses:\n courses = cls._generate_courses()\n course = CourseDataGenerator._get_random_course_from_courses_list(courses)\n\n course_template['course']['full_name']['course_full_name'] = course.get('course_name')\n course_template['course']['short_name']['short_name'] = course.get('course_short_name')\n course_template['course']['course_id']['id'] = course.get('course_id')\n course_template['course']['summary']['course_summary'] = course.get('course_synopsis')\n return course_template.get('course')", "title": "" }, { "docid": "a836f945b75f912043c5312774650f96", "score": "0.5442326", "text": "def add_data_Students(dataFileName, cur): \n with open(dataFileName) as csv_file:\n csv_reader = csv.reader(csv_file, delimiter=',')\n for row in csv_reader:\n cur.execute(\"\"\"SELECT ID\n FROM Students\n WHERE ID=?\"\"\",(row[0],))\n \n result = cur.fetchone()\n if result:\n print(\"Student \" + row[0] + \" Already in Database\")\n else:\n cur.execute(\"INSERT INTO STUDENTS VALUES (?, ?, ?, ?, ?)\", row)\n \n print(\"Addition Successful\") \n csv_file.close()", "title": "" }, { "docid": "5efd2e90da36b34b8a31775cf1088d56", "score": "0.5434153", "text": "def add_student(self, first_name, middle_name, last_name, first_major,\n\tsecond_major, first_minor, second_minor, current_gpa, id, graduation_date,\n\tadvisor_id):\n\n\t\twith self.connection.cursor() as cur:\n\t\t\tcur.execute(\"\"\"INSERT INTO students VALUES \n\t\t\t(%s, %s, %s, %s, %s, %s, %s, %s, %s, %s, %s, %s, %s) \"\"\", info)\n\n\t\tself.commit()", "title": "" }, { "docid": "add98144f29a67beae7c23fe4753d4e7", "score": "0.54313934", "text": "def insert_data(self):\r\n query2 = (\r\n \"INSERT IGNORE INTO Category (id, name) VALUE (%s, %s)\"\r\n )\r\n for name_category in data_categories:\r\n self.cursor.execute(query2, (name_category[0], name_category[1]))\r\n connexion.commit()", "title": "" }, { "docid": "6ccaa88ead1b14777ca8729c43e69602", "score": "0.5428111", "text": "def on_start(self):\n self.course_data = self.parent.course_data", "title": "" } ]
565d4025c023804111e8665eb2b2f138
Sets the campaign_id of this Context132.
[ { "docid": "eaf13e8218bd8433db01dd35c98a3556", "score": "0.8459122", "text": "def campaign_id(self, campaign_id):\n\n self._campaign_id = campaign_id", "title": "" } ]
[ { "docid": "a618fa22da5ee9e1f5b3ee60371da942", "score": "0.8029553", "text": "def campaign_id(self, value):\n\n self._campaign_id.set(value)", "title": "" }, { "docid": "80ed485b0d0c85fc62a94f2903a5d87d", "score": "0.7275301", "text": "def setCampaign(self, campaign):\n self.data.campaign = campaign\n return", "title": "" }, { "docid": "741a2be4e558e66f7d5ee3271b1391e1", "score": "0.7208308", "text": "def campaign_id(self):\n\n return self._campaign_id.value", "title": "" }, { "docid": "1e135d87bd26144b6f9810704e1b741a", "score": "0.71861076", "text": "def campaign_id(self):\n return self._campaign_id", "title": "" }, { "docid": "1e135d87bd26144b6f9810704e1b741a", "score": "0.71861076", "text": "def campaign_id(self):\n return self._campaign_id", "title": "" }, { "docid": "f373fc641927f976ed7f8937b2b936cf", "score": "0.70853436", "text": "def campaign_id(self):\n\n return self._identifier.campaign_id", "title": "" }, { "docid": "2b9f95e2d784376b48511079de923ee2", "score": "0.7004804", "text": "def campaign(self, campaign):\n\n self._campaign = campaign", "title": "" }, { "docid": "2b9f95e2d784376b48511079de923ee2", "score": "0.7004804", "text": "def campaign(self, campaign):\n\n self._campaign = campaign", "title": "" }, { "docid": "2b9f95e2d784376b48511079de923ee2", "score": "0.7004804", "text": "def campaign(self, campaign):\n\n self._campaign = campaign", "title": "" }, { "docid": "0fb53a0293118699e0c907164a6eb8b7", "score": "0.69668645", "text": "def campaign(self, campaign):\n if self.local_vars_configuration.client_side_validation and campaign is None: # noqa: E501\n raise ValueError(\"Invalid value for `campaign`, must not be `None`\") # noqa: E501\n\n self._campaign = campaign", "title": "" }, { "docid": "3dc757a331e36741d82671500c6abcc3", "score": "0.61387277", "text": "def contact_id(self, contact_id):\n self._contact_id = contact_id", "title": "" }, { "docid": "011bccaa67473256f2152bfcc9b8159c", "score": "0.6089699", "text": "def getCampaignId(self):\n return self.config['campaign']['id']", "title": "" }, { "docid": "c059083ce10f05f03542abbc1d1e0aa8", "score": "0.6061528", "text": "def contact_id(self, contact_id):\n\n self._contact_id = contact_id", "title": "" }, { "docid": "c059083ce10f05f03542abbc1d1e0aa8", "score": "0.6061528", "text": "def contact_id(self, contact_id):\n\n self._contact_id = contact_id", "title": "" }, { "docid": "c059083ce10f05f03542abbc1d1e0aa8", "score": "0.6061528", "text": "def contact_id(self, contact_id):\n\n self._contact_id = contact_id", "title": "" }, { "docid": "d68a9c11950ac004d715ba2de22d2c81", "score": "0.6013564", "text": "def campaign_name(self, campaign_name):\n if self.local_vars_configuration.client_side_validation and campaign_name is None: # noqa: E501\n raise ValueError(\"Invalid value for `campaign_name`, must not be `None`\") # noqa: E501\n\n self._campaign_name = campaign_name", "title": "" }, { "docid": "d68a9c11950ac004d715ba2de22d2c81", "score": "0.6013564", "text": "def campaign_name(self, campaign_name):\n if self.local_vars_configuration.client_side_validation and campaign_name is None: # noqa: E501\n raise ValueError(\"Invalid value for `campaign_name`, must not be `None`\") # noqa: E501\n\n self._campaign_name = campaign_name", "title": "" }, { "docid": "c89c0d0f8c54fbd3df3ce9545ace3426", "score": "0.5931133", "text": "def activity_id(self, activity_id):\n self._activity_id = activity_id", "title": "" }, { "docid": "c89c0d0f8c54fbd3df3ce9545ace3426", "score": "0.5931133", "text": "def activity_id(self, activity_id):\n self._activity_id = activity_id", "title": "" }, { "docid": "91f316044a9f123555fcbff106168300", "score": "0.58913976", "text": "def activity_id(self, activity_id):\n\n self._activity_id = activity_id", "title": "" }, { "docid": "91f316044a9f123555fcbff106168300", "score": "0.58913976", "text": "def activity_id(self, activity_id):\n\n self._activity_id = activity_id", "title": "" }, { "docid": "446a7c4e8c264e79a7b6b2d5afd38c9e", "score": "0.58248484", "text": "def get_campaign_id(self, campaign_name):\n return self._client.get_campaign_id(campaign_name)", "title": "" }, { "docid": "92a5bfffabd745ca6d8441e7492f78b8", "score": "0.5823947", "text": "def contact_entity_id(self, contact_entity_id):\n\n self._contact_entity_id = contact_entity_id", "title": "" }, { "docid": "de96a6f5bb09709e5139e0aab6736e66", "score": "0.5787186", "text": "def utm_campaign(self, utm_campaign):\n\n self._utm_campaign = utm_campaign", "title": "" }, { "docid": "0628abfeb4671af20d3fdfc870a50463", "score": "0.5767397", "text": "def campaign_delete(self, campaign_id: ID):\n response = requests.delete(\n self._url(FW_CAMPAIGN, id=campaign_id),\n headers=self._headers()\n )\n response.raise_for_status()\n LOG.info('Deleted campaign %s', campaign_id)", "title": "" }, { "docid": "103cef5cc582e118625aba3be7a648f3", "score": "0.5732969", "text": "def addToCampaign(contactId: int, campaignId: int) -> bool:\n pass", "title": "" }, { "docid": "5bd3ab71765283a7eae12a61eb3e2d52", "score": "0.5703808", "text": "def attribute_set_id(self, attribute_set_id):\n\n self._attribute_set_id = attribute_set_id", "title": "" }, { "docid": "721994961924ac44067e6867b58e1245", "score": "0.56745857", "text": "def email_campaign_id(self, email_campaign_id):\n if self.local_vars_configuration.client_side_validation and email_campaign_id is None: # noqa: E501\n raise ValueError(\"Invalid value for `email_campaign_id`, must not be `None`\") # noqa: E501\n\n self._email_campaign_id = email_campaign_id", "title": "" }, { "docid": "3d3dd8719db3188fcd8fe888b113af73", "score": "0.5629351", "text": "def set_cid(cid):\n setattr(_thread_locals, 'CID', cid)", "title": "" }, { "docid": "f0c094006adaf28a8a57cba5dc509e26", "score": "0.5622132", "text": "def competition_id(self, competition_id):\n\n self._competition_id = competition_id", "title": "" }, { "docid": "c78a83ee0a80c33bf38527a8c0c543eb", "score": "0.5601656", "text": "def set_id(self, ssc_id):\r\n self.ssc_id = ssc_id", "title": "" }, { "docid": "142534183344c33e71c74df1b8457cd3", "score": "0.5561093", "text": "def set_id(self, a_id):\n self._id = a_id", "title": "" }, { "docid": "b7e0626e30bc722df2524dd1d8646baf", "score": "0.55458677", "text": "def delete_campaign(self, campaign_id):\n api = self._get_api(update_service.DefaultApi)\n api.update_campaign_destroy(campaign_id)\n return", "title": "" }, { "docid": "f001337f200e86a3bccbedfd64c2154f", "score": "0.5538001", "text": "def ad_id(self, ad_id):\n\n self._ad_id = ad_id", "title": "" }, { "docid": "e2ac30420aaa0084ac3621578be0372e", "score": "0.5534075", "text": "def facebook_add_campaign(self, integration_id, campaign_id):\n return super(Integration, self).post(\n 'facebook', integration_id, 'campaign')", "title": "" }, { "docid": "b74411fc0d35b5fd5aebb26f8510474e", "score": "0.5532426", "text": "def set_google_id(self, id):\n self._google_id = id", "title": "" }, { "docid": "4994e7268f40f0a6e3f558723146b27b", "score": "0.55277777", "text": "def conversation_id(self, conversation_id):\n self._conversation_id = conversation_id", "title": "" }, { "docid": "2addd684e941dbcdb4bab2deb391bc9c", "score": "0.5516335", "text": "def start_campaign(self, campaign_uuid, campaign_name, email):", "title": "" }, { "docid": "8d55c61ea9341cbd3398413163f4caf8", "score": "0.55095065", "text": "def sentence_id(self, sentence_id: int):\n\n self._sentence_id = sentence_id", "title": "" }, { "docid": "422cfe25cb77978a7b7eb3f2a508ca7c", "score": "0.5477686", "text": "def crf_id(self, crf_id):\n\n self._crf_id = crf_id", "title": "" }, { "docid": "daa199703035575471551d0f2fc17978", "score": "0.5474826", "text": "def SetId(self, id: int) -> None:\n ...", "title": "" }, { "docid": "c53423742254fc2f039fc471721691cd", "score": "0.5462202", "text": "def context_entity_id(self, context_entity_id):\n\n self._context_entity_id = context_entity_id", "title": "" }, { "docid": "f5d3c96fcba3e06e2c8c10a7d3f7c7b1", "score": "0.54597604", "text": "def set_cust_id(self, cid):\n self.cust_id_ent.delete(0, \"end\")\n self.cust_id_ent.insert(0, str(cid))\n\n self.clear_cust_id = False", "title": "" }, { "docid": "75fabfcbc4bd41a2c4e161928d7b871a", "score": "0.5452416", "text": "def put(self):\n data = flask.request.data\n campaign_json = json.loads(data.decode('utf-8'))\n prepid = campaign_controller.create_campaign(campaign_json)\n return self.output_text({'response': prepid, 'success': True, 'message': ''})", "title": "" }, { "docid": "619f39d1b0d49d61f950534a47910c62", "score": "0.54131067", "text": "def SetId(self, id):", "title": "" }, { "docid": "619f39d1b0d49d61f950534a47910c62", "score": "0.54131067", "text": "def SetId(self, id):", "title": "" }, { "docid": "619f39d1b0d49d61f950534a47910c62", "score": "0.54127693", "text": "def SetId(self, id):", "title": "" }, { "docid": "0fdc7c5430446c68df50efa412283c38", "score": "0.54058033", "text": "def conversation_id(self, conversation_id):\n\n self._conversation_id = conversation_id", "title": "" }, { "docid": "427e1750b109981cf6af5d474cf57c89", "score": "0.5392319", "text": "def SetAffirmativeId(self, id):", "title": "" }, { "docid": "9ea9514e692d9351da7405754dba5335", "score": "0.5381878", "text": "def customer_group_id(self, customer_group_id):\n\n self._customer_group_id = customer_group_id", "title": "" }, { "docid": "dbba99b621e7a6c3afb06877fc831a68", "score": "0.5365705", "text": "def add_campaign(self, campaign_name, campaign_target):\n\n self.campaigns.add((campaign_name, campaign_target))", "title": "" }, { "docid": "a94696757d455591bdf6ff15ff0e3007", "score": "0.5349297", "text": "def incident_id(self, incident_id):\n\n self._incident_id = incident_id", "title": "" }, { "docid": "85071f7b01eedda052e3e42465e7c451", "score": "0.53443485", "text": "def _set_contact_id(cls, data):\n document_id = data.get(\"_id\")\n if document_id:\n data[\"contact_id\"] = document_id\n return data", "title": "" }, { "docid": "c81f56c5f3923ea9ede4bd6d46565e44", "score": "0.53192264", "text": "def client_acct_id(self, client_acct_id):\n\n self._client_acct_id = client_acct_id", "title": "" }, { "docid": "861ab0b76e09482e1dd7316def5217d3", "score": "0.5318342", "text": "def id_conta(self, id_conta):\n self._id_conta = id_conta", "title": "" }, { "docid": "861ab0b76e09482e1dd7316def5217d3", "score": "0.5318342", "text": "def id_conta(self, id_conta):\n self._id_conta = id_conta", "title": "" }, { "docid": "cb3e5bbdbb66c571625cb964fe303d44", "score": "0.5313205", "text": "def set_id(self, entity):\n self._id = entity", "title": "" }, { "docid": "a809e15447b322b85d2026cea20aa46a", "score": "0.53080744", "text": "def campaign_create(\n self,\n name: str,\n manifest_id: ID,\n device_filter: str\n ) -> ID:\n try:\n response = requests.post(\n self._url(FW_CAMPAIGNS),\n headers=self._headers(),\n json={\n 'campaign_strategy': 'one-shot',\n 'description': 'Development campaign',\n 'device_filter': device_filter,\n 'name': name,\n 'root_manifest_id': manifest_id\n }\n )\n response.raise_for_status()\n campaign_id = response.json()['id']\n LOG.info('Created Campaign ID: %s', campaign_id)\n return campaign_id\n except requests.HTTPError:\n LOG.error('Failed to create campaign')\n raise", "title": "" }, { "docid": "a123c45ca4e33fa910bf5d02188f50de", "score": "0.53037035", "text": "def setId(self, id):\n self.__id = id", "title": "" }, { "docid": "a9eecb56e683ba60ca77593bd4d04a1b", "score": "0.5300951", "text": "def set_id(self, id):\r\n self._id = id", "title": "" }, { "docid": "ee1f68ce7dc94a233e968c37748ab47b", "score": "0.5285731", "text": "def set_id(self, id):\n self.id = id", "title": "" }, { "docid": "ee1f68ce7dc94a233e968c37748ab47b", "score": "0.5285731", "text": "def set_id(self, id):\n self.id = id", "title": "" }, { "docid": "d0d1fdaa6e8094557687e88cbb4a8c3c", "score": "0.52790743", "text": "def set_id( self, id ):\n self.id = id", "title": "" }, { "docid": "b0a90661eb2ffcfbe19fed184bb00c0c", "score": "0.5277671", "text": "def activity_process_id(self, activity_process_id):\n\n self._activity_process_id = activity_process_id", "title": "" }, { "docid": "d648da41884c3f9e0cd4f001e00a4d90", "score": "0.52734834", "text": "def campaign_name(self):\n return self._identifier.campaign_name", "title": "" }, { "docid": "478a8a2a37ef4a5bb18749169eaf0892", "score": "0.52688164", "text": "def campaign_name(self):\n\n return self._identifier.campaign_name", "title": "" }, { "docid": "a918a56f117643c82d9596d93448daa4", "score": "0.5264504", "text": "def course_id(self, value):\r\n self.logger.warn(\"Setting values on course_id will NOT update the remote Canvas instance.\")\r\n self._course_id = value", "title": "" }, { "docid": "1ab19028a38b8f443e9d2ed0f4c5ed38", "score": "0.5247934", "text": "def set_id(self, id):\r\n self.id = id", "title": "" }, { "docid": "1ab19028a38b8f443e9d2ed0f4c5ed38", "score": "0.5247934", "text": "def set_id(self, id):\r\n self.id = id", "title": "" }, { "docid": "6269a0886f4313a771d77c130300a5e4", "score": "0.5244841", "text": "def SetId(self, id):\n self._id = id", "title": "" }, { "docid": "5b5128e5dc7594984c3547d83fa3197a", "score": "0.5243339", "text": "def course_id(self, value):\n self.logger.warn(\n \"Setting values on course_id will NOT update the remote Canvas instance.\"\n )\n self._course_id = value", "title": "" }, { "docid": "1c9a642c8a21aaeb7b5a7936e31a061e", "score": "0.5241628", "text": "def build_set_configuration_id(self, build_set_configuration_id):\n self._build_set_configuration_id = build_set_configuration_id", "title": "" }, { "docid": "61f018b2a549ddfdbb66bd43b1fd29d5", "score": "0.5238848", "text": "def id(self, id):\n if self.local_vars_configuration.client_side_validation and id is None: # noqa: E501\n raise ValueError(\"Invalid value for `id`, must not be `None`\") # noqa: E501\n\n self._id = id", "title": "" }, { "docid": "61f018b2a549ddfdbb66bd43b1fd29d5", "score": "0.5238848", "text": "def id(self, id):\n if self.local_vars_configuration.client_side_validation and id is None: # noqa: E501\n raise ValueError(\"Invalid value for `id`, must not be `None`\") # noqa: E501\n\n self._id = id", "title": "" }, { "docid": "61f018b2a549ddfdbb66bd43b1fd29d5", "score": "0.5238848", "text": "def id(self, id):\n if self.local_vars_configuration.client_side_validation and id is None: # noqa: E501\n raise ValueError(\"Invalid value for `id`, must not be `None`\") # noqa: E501\n\n self._id = id", "title": "" }, { "docid": "61f018b2a549ddfdbb66bd43b1fd29d5", "score": "0.5238848", "text": "def id(self, id):\n if self.local_vars_configuration.client_side_validation and id is None: # noqa: E501\n raise ValueError(\"Invalid value for `id`, must not be `None`\") # noqa: E501\n\n self._id = id", "title": "" }, { "docid": "61f018b2a549ddfdbb66bd43b1fd29d5", "score": "0.5238848", "text": "def id(self, id):\n if self.local_vars_configuration.client_side_validation and id is None: # noqa: E501\n raise ValueError(\"Invalid value for `id`, must not be `None`\") # noqa: E501\n\n self._id = id", "title": "" }, { "docid": "61f018b2a549ddfdbb66bd43b1fd29d5", "score": "0.5238848", "text": "def id(self, id):\n if self.local_vars_configuration.client_side_validation and id is None: # noqa: E501\n raise ValueError(\"Invalid value for `id`, must not be `None`\") # noqa: E501\n\n self._id = id", "title": "" }, { "docid": "61f018b2a549ddfdbb66bd43b1fd29d5", "score": "0.5238848", "text": "def id(self, id):\n if self.local_vars_configuration.client_side_validation and id is None: # noqa: E501\n raise ValueError(\"Invalid value for `id`, must not be `None`\") # noqa: E501\n\n self._id = id", "title": "" }, { "docid": "61f018b2a549ddfdbb66bd43b1fd29d5", "score": "0.5238848", "text": "def id(self, id):\n if self.local_vars_configuration.client_side_validation and id is None: # noqa: E501\n raise ValueError(\"Invalid value for `id`, must not be `None`\") # noqa: E501\n\n self._id = id", "title": "" }, { "docid": "61f018b2a549ddfdbb66bd43b1fd29d5", "score": "0.5238848", "text": "def id(self, id):\n if self.local_vars_configuration.client_side_validation and id is None: # noqa: E501\n raise ValueError(\"Invalid value for `id`, must not be `None`\") # noqa: E501\n\n self._id = id", "title": "" }, { "docid": "61f018b2a549ddfdbb66bd43b1fd29d5", "score": "0.5238848", "text": "def id(self, id):\n if self.local_vars_configuration.client_side_validation and id is None: # noqa: E501\n raise ValueError(\"Invalid value for `id`, must not be `None`\") # noqa: E501\n\n self._id = id", "title": "" }, { "docid": "61f018b2a549ddfdbb66bd43b1fd29d5", "score": "0.5238848", "text": "def id(self, id):\n if self.local_vars_configuration.client_side_validation and id is None: # noqa: E501\n raise ValueError(\"Invalid value for `id`, must not be `None`\") # noqa: E501\n\n self._id = id", "title": "" }, { "docid": "61f018b2a549ddfdbb66bd43b1fd29d5", "score": "0.5238848", "text": "def id(self, id):\n if self.local_vars_configuration.client_side_validation and id is None: # noqa: E501\n raise ValueError(\"Invalid value for `id`, must not be `None`\") # noqa: E501\n\n self._id = id", "title": "" }, { "docid": "61f018b2a549ddfdbb66bd43b1fd29d5", "score": "0.5238848", "text": "def id(self, id):\n if self.local_vars_configuration.client_side_validation and id is None: # noqa: E501\n raise ValueError(\"Invalid value for `id`, must not be `None`\") # noqa: E501\n\n self._id = id", "title": "" }, { "docid": "61f018b2a549ddfdbb66bd43b1fd29d5", "score": "0.5238848", "text": "def id(self, id):\n if self.local_vars_configuration.client_side_validation and id is None: # noqa: E501\n raise ValueError(\"Invalid value for `id`, must not be `None`\") # noqa: E501\n\n self._id = id", "title": "" }, { "docid": "61f018b2a549ddfdbb66bd43b1fd29d5", "score": "0.5238848", "text": "def id(self, id):\n if self.local_vars_configuration.client_side_validation and id is None: # noqa: E501\n raise ValueError(\"Invalid value for `id`, must not be `None`\") # noqa: E501\n\n self._id = id", "title": "" }, { "docid": "61f018b2a549ddfdbb66bd43b1fd29d5", "score": "0.5238848", "text": "def id(self, id):\n if self.local_vars_configuration.client_side_validation and id is None: # noqa: E501\n raise ValueError(\"Invalid value for `id`, must not be `None`\") # noqa: E501\n\n self._id = id", "title": "" }, { "docid": "61f018b2a549ddfdbb66bd43b1fd29d5", "score": "0.5238848", "text": "def id(self, id):\n if self.local_vars_configuration.client_side_validation and id is None: # noqa: E501\n raise ValueError(\"Invalid value for `id`, must not be `None`\") # noqa: E501\n\n self._id = id", "title": "" }, { "docid": "61f018b2a549ddfdbb66bd43b1fd29d5", "score": "0.5238848", "text": "def id(self, id):\n if self.local_vars_configuration.client_side_validation and id is None: # noqa: E501\n raise ValueError(\"Invalid value for `id`, must not be `None`\") # noqa: E501\n\n self._id = id", "title": "" }, { "docid": "61f018b2a549ddfdbb66bd43b1fd29d5", "score": "0.5238848", "text": "def id(self, id):\n if self.local_vars_configuration.client_side_validation and id is None: # noqa: E501\n raise ValueError(\"Invalid value for `id`, must not be `None`\") # noqa: E501\n\n self._id = id", "title": "" }, { "docid": "55b54cc05feaa243f46ebbb64a3ec12d", "score": "0.5237134", "text": "def audience_id(self, audience_id):\n if self.local_vars_configuration.client_side_validation and audience_id is None: # noqa: E501\n raise ValueError(\"Invalid value for `audience_id`, must not be `None`\") # noqa: E501\n\n self._audience_id = audience_id", "title": "" }, { "docid": "ac79da4555cc6c635c3bf2f195dcc556", "score": "0.5234369", "text": "def get_campaign(self, campaign_id):\n api = self._get_api(update_service.DefaultApi)\n return Campaign(api.update_campaign_retrieve(campaign_id))", "title": "" }, { "docid": "bf8a9e049cd358ac4ab970252eae8c2e", "score": "0.522874", "text": "def set_id(self, key):\n self._id = key", "title": "" }, { "docid": "21234ee1cd54a22bc9f756e427a2e56c", "score": "0.5228145", "text": "def campaign(self):\n\n return self._campaign", "title": "" }, { "docid": "effb789ae800a290c5dea3f6ff8e7fea", "score": "0.5227302", "text": "def campaign_ids(self):\n\n return self._campaign_ids", "title": "" }, { "docid": "fe21965c625db913856fb3c445c2d072", "score": "0.52220494", "text": "def advisory_id(self, advisory_id):\n\n self._advisory_id = advisory_id", "title": "" }, { "docid": "81cb8c9d6725531b71faf40c23919e6d", "score": "0.52166605", "text": "def dataset_id(self, dataset_id):\n\n self._dataset_id = dataset_id", "title": "" } ]
48738414c52251aace9e1552c271cc99
Finds the nearest region to given coordinates.
[ { "docid": "c8906b8ca44ffb522de320e48c3f128e", "score": "0.7113429", "text": "def FindNearestRegion(self, lat, lon):\n self._Refresh()\n return self._cities.FindNearestNeighbor(lat, lon).parent", "title": "" } ]
[ { "docid": "cc5224e323d654e0d4732c87fd7ff737", "score": "0.6737705", "text": "def nearest(coordinate, coordinate_list, limit):", "title": "" }, { "docid": "11396b53fc89921c28fd992f59f2363d", "score": "0.6671556", "text": "def get_nearest_valid_coordinates(self, coordinates):\n pass", "title": "" }, { "docid": "e348d8ecb20c1ceba88dd9f2efe2a611", "score": "0.660837", "text": "def locate(self, lat, lon):\n for key, region in self.regions.iteritems():\n if region.contains(lat, lon):\n return key\n return None", "title": "" }, { "docid": "eb5f877eea7163bc2e96be144c6b64c7", "score": "0.6579592", "text": "def nearest_boundaries_location(self,x,y,xi,yi):\n mdgrd = np.array(zip( x.ravel(),y.ravel() ))\n kdt = kd.KDTree(mdgrd)\n self.mdi_dist, self.mdi = kdt.query(np.array(zip( xi, yi) ))", "title": "" }, { "docid": "cf233f76183b0e1d9c61461acc74d290", "score": "0.6503124", "text": "def nearest_grid_point(distances):\n return distances.index(min(distances))", "title": "" }, { "docid": "5adfd91824ae7915f5603993294d9411", "score": "0.6436937", "text": "def getClosest(self, coordinates: tuple, searchTolerance: str = \"\"):\n pass", "title": "" }, { "docid": "04803e163799e7eb1f0e470bfcc01801", "score": "0.63568676", "text": "def closest_to(cls, coordinates):\n # I should be using the db's geo stuff for this, but I'm keeping it\n # database-agnostic\n cities = cls.objects.all()\n cities = list(cities)\n\n cities.sort(key=lambda x: x.distance_from_coordinates(coordinates))\n return cities[0] if cities else None", "title": "" }, { "docid": "cb304253516b3ff2c27e50ffd59628ee", "score": "0.62382346", "text": "def nearestPoint( pos ):\n ( current_row, current_col ) = pos\n\n grid_row = int( current_row + 0.5 )\n grid_col = int( current_col + 0.5 )\n return ( grid_row, grid_col )", "title": "" }, { "docid": "75c1caf8d0aa9507b97abb12caeda00e", "score": "0.6215297", "text": "def find_node(self, coordinates):\n node = self.origin\n if node == None:\n return None\n z = 0\n while node.up and z < coordinates[2]:\n node = node.up\n z += 1\n y = 0\n while node.north and y < coordinates[1]:\n node = node.north\n y += 1\n x = 0\n while node.east and x < coordinates[0]:\n node = node.east\n x += 1\n if node.coordinates != coordinates:\n return None\n else:\n return node", "title": "" }, { "docid": "003552daf7ae1c21048ffad336e9d359", "score": "0.6199199", "text": "def find_closest_intersection(instructions_a, instructions_b, origin = (0, 0)):\n a = instructions_to_locations(instructions_a)\n b = instructions_to_locations(instructions_b)\n #print(a)\n #print(b)\n crossing_points = list(a.intersection(b))\n #print(crossing_points)\n winner = find_nearest_location(crossing_points)\n return winner", "title": "" }, { "docid": "46d0d1e97a5f63ed5a25dc28228c685f", "score": "0.61891586", "text": "def find_nearest_center(x, y):\n xs = np.array([j.x for j in self.current_scan.joints])\n ys = np.array([j.y for j in self.current_scan.joints])\n distance_partial = (ys - y) ** 2 + (xs - x) ** 2\n loc = np.where(distance_partial == distance_partial.min())[0][0]\n return loc", "title": "" }, { "docid": "9820e07d9f1673cb07cc0d472b858a34", "score": "0.61860967", "text": "def Nearest(*args):\n return _AIS.ais_Nearest(*args)", "title": "" }, { "docid": "13b7b6f1fae7e949e6d6d01b589220b5", "score": "0.61570233", "text": "def getClosestRegion(self, ids=None):\n if ids is None:\n ids = self.ids\n\n if ids is None:\n res = None\n elif len(ids) == 0:\n res = numpy.array([])\n else:\n res = numpy.zeros(max(ids)+1) -1\n res[ids] = self._closestRegion\n\n return res", "title": "" }, { "docid": "641fcfcef8604c7f379f01819355cc91", "score": "0.6150702", "text": "def get_nearest(points, coord):\n dists = [(pow(point[0] - coord[0], 2) + pow(point[1] - coord[1], 2), point)\n for point in points] # list of (dist, point) tuples\n nearest = min(dists)\n return nearest[1]", "title": "" }, { "docid": "a4a9d7af195790da3fe334f7abd2c496", "score": "0.61413366", "text": "def find_nearest(shapes, x_pos, y_pos):\n for shape in shapes:\n for x2, y2 in shape:\n if distance([x_pos, y_pos], [x2, y2]) < 1 / 100:\n return [x2, y2]\n return [x_pos, y_pos]", "title": "" }, { "docid": "27b2dd6751eeb9d2d13ec9eea442330c", "score": "0.6138079", "text": "def nearest(x,y,z,refGrid,N):\r\n pass\r\n \"\"\" NOT NEEDED!!!\"\"\"", "title": "" }, { "docid": "dad98dd0f74b37d6526365798a3977d6", "score": "0.6120982", "text": "def in_grid(coordinates):", "title": "" }, { "docid": "7c523dc2290b12d877d6290466b2032b", "score": "0.60085577", "text": "def get_nearest(self, lon, lat, shape):\n\t\tnearest = \"\"\n\t\tnearest_dist = 1000000 #arbitary huge number\n\t\tfor i in range(len(shape)):\n\t\t\tzone_lon, zone_lat = self.get_midpoint(shape[i].shape.points)\n\t\t\tdist = self.dist((lon, lat), (zone_lon, zone_lat))\n\t\t\tif dist < nearest_dist:\n\t\t\t\tnearest_dist = dist\n\t\t\t\tnearest = shape[i].record[1]\n\t\treturn nearest", "title": "" }, { "docid": "4e1ff8cf2a65de3cc45dc7016a68feb6", "score": "0.60053796", "text": "def near_geo(ddsx, ddsy, XA):\n\n d = {'x': ddsx, 'y': ddsy, 'myx': XA[0], 'myy': XA[1]}\n df = pd.DataFrame(d)\n df['dis'] = df.apply(_pd_haversine, axis=1)\n ind = df['dis'].idxmin()\n dis = df.iloc[ind]['dis']\n return dis, ind", "title": "" }, { "docid": "6087a8a634e11f47a4fbeb54dc24678c", "score": "0.5978894", "text": "def closest_aqhi(self, lat, lon):\n region_list = self.get_aqhi_regions()\n\n def site_distance(site):\n \"\"\"Calculate distance to a region.\"\"\"\n return distance.distance(\n (lat, lon), (site[\"latitude\"], site[\"longitude\"])\n )\n closest = min(region_list, key=site_distance)\n\n return closest['abbreviation'], closest['cgndb']", "title": "" }, { "docid": "ad7be3724656e9aa69eb7bc1cbdd2bdb", "score": "0.59621394", "text": "def get_nearest_node(self, coords, n=1):\n\n if not coords or len(coords) < 1:\n return\n elif n < 1:\n # TODO: print warning or throw an exception \n # TODO: search the KD tree for the coordinates\n return", "title": "" }, { "docid": "967ffce5cad7a5a57e4109b2d34cfa67", "score": "0.59406275", "text": "def _find_nearest_from_rectilinear_centroids(lon_centroids, lat_centroids,\n lon_point, lat_point,\n maximum_distance=None):\n\n # First calculate the difference in longitudes and warp overflowing\n # values back to [0, 360]\n distance_lon = np.mod(np.abs(lon_centroids-lon_point), 360)\n # For distances over 180, there's a shorter distance going the other\n # way around the globe.\n warp_indices = np.where(distance_lon > 180)\n distance_lon[warp_indices] = np.abs(distance_lon[warp_indices]-360.0)\n indices = np.where(distance_lon.min() == distance_lon)\n if len(indices[0]) > 1:\n msg = \"More than one nearest meridian, returning first index.\"\n logging.warning(msg)\n i = indices[0][0]\n distance_lat = np.abs(lat_centroids-lat_point)\n indices = np.where(distance_lat.min() == distance_lat)\n if len(indices[0]) > 1:\n msg = \"More than one nearest parallel, returning first index.\"\n logging.warning(msg)\n j = indices[0][0]\n minimum_distance = distance_lon_lat(lon_centroids[i], lat_centroids[j],\n lon_point, lat_point)\n if maximum_distance is not None:\n if minimum_distance > maximum_distance:\n raise GeogridError(\"No points within provided maximum distance.\")\n return (i, j)", "title": "" }, { "docid": "66e07eb2189a8cde0a23d88aaa69df63", "score": "0.59346557", "text": "def nearest(coord, url_config=RequestConfig):\n\thost = check_host(url_config.host)\n\turl = '/'.join(\n\t\t[host, 'nearest', url_config.version, url_config.profile,\n\t\t str(coord).replace('(', '').replace(')', '').replace(' ', '')]\n\t)\n\trep = requests.get(url)\n\tparsed_json = json.loads(rep.text.decode('utf-8'))\n\treturn parsed_json", "title": "" }, { "docid": "c69c40ba2e72a1eedaa30077c730cde8", "score": "0.59068733", "text": "def find_node_by_coords(coords, nodes):\n (x_value, y_value) = coords\n node = [node for node in nodes if ((node.x_coord == x_value) and\n (node.y_coord == y_value))]\n if len(node):\n return node[0]\n else:\n return None", "title": "" }, { "docid": "3a2fc2c5c3cdff6545416ff12c4bb988", "score": "0.5888296", "text": "def nearest_point(lat_var, lon_var, lats, lons):\r\n # this part is to handle if lons are givn 0-360 or -180-180\r\n if any(lons > 180.0) and (lon_var < 0.0):\r\n lon_var = lon_var + 360.0\r\n else:\r\n lon_var = lon_var\r\n \r\n lat = lats\r\n lon = lons\r\n\r\n if lat.ndim == 2:\r\n lat = lat[:, 0]\r\n else:\r\n pass\r\n if lon.ndim == 2:\r\n lon = lon[0, :]\r\n else:\r\n pass\r\n\r\n index_a = np.where(lat >= lat_var)[0][-1]\r\n index_b = np.where(lat <= lat_var)[0][-1]\r\n\r\n if abs(lat[index_a] - lat_var) >= abs(lat[index_b] - lat_var):\r\n index_lat = index_b\r\n else:\r\n index_lat = index_a\r\n\r\n index_a = np.where(lon >= lon_var)[0][0]\r\n index_b = np.where(lon <= lon_var)[0][0]\r\n if abs(lon[index_a] - lon_var) >= abs(lon[index_b] - lon_var):\r\n index_lon = index_b\r\n else:\r\n index_lon = index_a\r\n\r\n return index_lat, index_lon", "title": "" }, { "docid": "524960b4f799fd8efa8ba23560e634a2", "score": "0.58849317", "text": "def idx_of_nearest(coords, val):\n X = np.abs(coords.flatten()-val)\n idx = np.where(X == X.min())\n idx = idx[0][0]\n return coords.flatten()[idx]", "title": "" }, { "docid": "d593075f229e7283f1c473d80d053f87", "score": "0.58619833", "text": "def nearest_loc(query_location, reference_grid, tolerance=0.5):\n\n distances = np.abs(reference_grid - query_location)\n ind = distances.argmin()\n if distances[ind] >= tolerance:\n return np.nan\n else:\n return ind", "title": "" }, { "docid": "2773c32f94f7ca7ed1ac866d9ada5791", "score": "0.58555204", "text": "def getclosest_ij(lats, lons, latpt, lonpt):\n # find squared distance of every point on grid\n lats_dist = (lats - latpt)**2\n lons_dist = (lons - lonpt)**2\n return lats_dist.argmin(), lons_dist.argmin()", "title": "" }, { "docid": "dddde2a127500d05870f601bc55995b5", "score": "0.58491766", "text": "def get_closest_point(point, coordinates):\n line = geom.LineString(coordinates)\n return line.interpolate(line.project(geom.Point(point))).coords[0]", "title": "" }, { "docid": "c5995ec63c283ab222953455240feb46", "score": "0.58040106", "text": "def find_closest(location, centroids):\n # BEGIN Question 3\n return min(centroids, key=lambda x: distance(location, x))\n # END Question 3", "title": "" }, { "docid": "8a59b7afef3d7249813f76ff1b7ecd55", "score": "0.5791258", "text": "def get_region_of_interest(self):\n cv.namedWindow(\"Source Image\")\n cv.setMouseCallback(\"Source Image\", self.__click_mouse_callback)\n\n print(\"Getting the region of interest...\")\n while (True):\n cv.imshow(\"Source Image\", self.__image)\n if self.print_flag:\n print(f\"Location: ({self.coordenates})\")\n\n #! Fix the problem with negative-coordinates\n for j in iter(range(self.coordenates.shape[1])):\n self.coordenates[:,j] = np.sort(self.coordenates[:,j], 0)\n\n self.roiImage = self.__image[\n self.coordenates[0,0]:self.coordenates[1,0],\n self.coordenates[0,1]:self.coordenates[1,1]\n ]\n\n cv.imshow(\"Source Image - ROI\",self.roiImage)\n self.print_flag = False\n if (cv.waitKey(1) & 0xFF == ord('q')):\n break", "title": "" }, { "docid": "7bb2f88773d2a0a8532eb2a034515f25", "score": "0.57878023", "text": "def surrounding_pixels(array, coords, mode=\"direct neighborhood\"):\n shape = array.shape\n new_positions = []\n first, second = coords[0], coords[1]\n if mode == \"full neighborhood\":\n for ypos in range(first - 1, first + 2):\n for xpos in range(second - 1, second + 2):\n # print(xpos, ypos)\n # if xpos == first and ypos == second:\n # continue\n if ypos >= shape[0] or xpos >= shape[1] or xpos < 0 or ypos < 0:\n continue\n new_positions.append((ypos, xpos))\n elif mode == \"direct neighborhood\":\n positions = ((first - 1, second), (first + 1, second), (first, second),\n (first, second - 1), (first, second + 1))\n for xpos, ypos in positions:\n if xpos >= shape[0] or ypos >= shape[1] or xpos < 0 or ypos < 0:\n continue\n new_positions.append((xpos, ypos))\n return new_positions", "title": "" }, { "docid": "51ab6fde2af7b5e8988fe4871b387183", "score": "0.5765581", "text": "def closest_point(self, xy, r=float('inf')):\n dist,i = self.kdtree.query(xy, distance_upper_bound=r)\n return i,dist", "title": "" }, { "docid": "3a543c35d288c17ce8641dad981853a5", "score": "0.57636493", "text": "def FindNearestCity(self, lat, lon):\n self._Refresh()\n return self._cities.FindNearestNeighbor(lat, lon)", "title": "" }, { "docid": "b80541dda12456bae03254413e4d6f5a", "score": "0.5722385", "text": "def nearest(cls, lat, long, **kwargs):\n if kwargs.get('buses', False):\n buses = kwargs['buses']\n elif kwargs.get('route', False):\n buses = kwargs['route'].get_buses()\n else:\n buses = cls.all()\n\n dists = map(lambda bus: distance(\n # Avoid using cached location, to avoid more requests.\n (bus.__lat__, bus.__long__),\n (lat, long)\n ), buses)\n\n return min(zip(dists, buses))[::-1]", "title": "" }, { "docid": "1e436d2968f540e0620113f220016de8", "score": "0.5721261", "text": "def FindNearestCountry(self, lat, lon):\n self._Refresh()\n return self._cities.FindNearestNeighbor(lat, lon).parent.parent", "title": "" }, { "docid": "1c1d118146c51e1a7388af04c2c12d00", "score": "0.56799155", "text": "def __find_neighbor_at(self, x, y, list_of_nodes):\n for node in list_of_nodes:\n if node.location == (x, y):\n return node", "title": "" }, { "docid": "f8f0a2ed6ec77f521c7afc727861d76f", "score": "0.5674876", "text": "def find_node_location(coordinates):\n latitude = coordinates[0];\n longitude = coordinates[1];\n \n if((latitude <= 79 and latitude >= 54.548) and(longitude >= -180 and longitude <= -60.021)):\n return \"Node_1\";\n elif((latitude <= 79 and latitude >= 54.548) and(longitude >=-60 and longitude <= 59.989)):\n return \"Node_2\";\n elif((latitude <= 79 and latitude >= 54.548) and(longitude >=60 and longitude <= 180)):\n return \"Node_3\";\n elif((latitude <=54.52 and latitude >= -2.155) and(longitude >= -180 and longitude <= -60.021)):\n return \"Node_4\";\n elif((latitude <=54.52 and latitude >= -2.155) and(longitude >= -60 and longitude <= 59.989)):\n return \"Node_5\";\n elif((latitude <=54.52 and latitude >= -2.155) and(longitude >= 60 and longitude <= 180)):\n return \"Node_6\";\n elif((latitude <=-2.187 and latitude >= -56.97) and(longitude >= -180 and longitude <= -60.021)):\n return \"Node_7\";\n elif((latitude <=-2.187 and latitude >= -56.97) and(longitude >= -60 and longitude <= 59.989)):\n return \"Node_8\";\n elif((latitude <=-2.187 and latitude >= -56.97) and(longitude >= 60 and longitude <= 180)):\n return \"Node_9\";", "title": "" }, { "docid": "93872353d084d89ddd73ec86d8323b8c", "score": "0.5665834", "text": "def neighbourhoodsearch(self, center, iselite):\n result = center\n neighbourfabric = NeighbourCoordinateFabric(center,\n self.settings)\n\n if (iselite): itnum = self.settings.RECRUITEDELITE\n else: itnum = self.settings.RECRUITEDNONELITE\n\n for i in range(itnum):\n current = neighbourfabric.getcoordinate()\n if (current.z < result.z): result = current\n return result", "title": "" }, { "docid": "3d09415059f6485df6b0023aba155fe0", "score": "0.5664631", "text": "def find_closest2d(A):", "title": "" }, { "docid": "8c6370426cfc9e2e55d3a2327935b9f7", "score": "0.56597936", "text": "def region(allcoordinates, coordinates):\n p = False #auxilary variable to check if the region was found or not\n n = 0\n while n <= len(allcoordinates)-1 and not p:\n name = allcoordinates[n]['name']\n a = 0\n while a <= len(allcoordinates[n]['coordinates'])-1:\n p = polyregions(verts1, coordinates)\n a += 1\n n += 1\n if p:\n return name", "title": "" }, { "docid": "5c09582c05815ee02e53117dc32bb866", "score": "0.56463283", "text": "def nearxy(x,y,xi,yi):\n ind=ones(len(xi),dtype=int)\n dd=ones(len(xi),dtype='float')\n for i in arange(len(xi)):\n dist=sqrt((x-xi[i])**2+(y-yi[i])**2)\n ind[i]=dist.argmin()\n dd[i]=dist[ind[i]]\n return ind,dd", "title": "" }, { "docid": "7152b94eec4e5c542164ff68cbef577b", "score": "0.56310844", "text": "def nearest_neighbor(nodes, origin):\n distances = [distance(origin, node, 100) for node in nodes]\n if origin in nodes:\n distances[nodes.index(origin)] = float(\"Inf\")\n result = min(distances)\n return (distances.index(result), result)", "title": "" }, { "docid": "11033e415150acd05c1bb3250bcd89cf", "score": "0.5630308", "text": "def getXY(lats,lons,ptlat,ptlon):\n # Find closest lat/lon in array\n minlat = abs(lats-ptlat).min()\n minlon = abs(lons-ptlon).min()\n # Find where these are in the grid\n wherelat = N.where(abs(lats-ptlat) == minlat)\n wherelon = N.where(abs(lons-ptlon) == minlon)\n # pdb.set_trace()\n lat_idx = N.where(lats==lats[wherelat])[0][0]\n lon_idx = N.where(lons==lons[wherelon])[0][0]\n exactlat = lats[wherelat]\n exactlon = lons[wherelon]\n return lat_idx,lon_idx, exactlat, exactlon", "title": "" }, { "docid": "2117f3c9795dc5c2f64faafc3ef0476c", "score": "0.56120026", "text": "def get_nearest_neighbors(self):\n current_x, current_y = self.get_current_pos()\n\n # North\n try:\n tile_type = self.fountain.get_tile(\n current_x, current_y + TILE_LENGTH).get_tile_type()\n north = state_to_int[tile_type]\n except KeyError as e:\n north = 2\n\n # East\n try:\n tile_type = self.fountain.get_tile(\n current_x + TILE_LENGTH, current_y).get_tile_type()\n east = state_to_int[tile_type]\n except KeyError as e:\n east = 2\n\n # South\n try:\n tile_type = self.fountain.get_tile(\n current_x, current_y - TILE_LENGTH).get_tile_type()\n south = state_to_int[tile_type]\n except KeyError as e:\n south = 2\n\n # West\n try:\n tile_type = self.fountain.get_tile(\n current_x - TILE_LENGTH, current_y).get_tile_type()\n west = state_to_int[tile_type]\n except KeyError as e:\n west = 2\n\n # Below\n try:\n tile_type = self.fountain.get_tile(\n current_x, current_y).get_tile_type()\n center = state_to_int[tile_type]\n except KeyError as e:\n center = 2\n\n temp = north * (3**4)\n temp += east * (3**3)\n temp += south * (3**2)\n temp += west * 3\n temp += center\n\n return self.moveset[temp]", "title": "" }, { "docid": "290f28eaaa9721f1628746b4683b9b75", "score": "0.5609461", "text": "def _propose_nearby_loc(self,pos):\n pos = (pos[0] + random.uniform(-self.jitter,self.jitter), pos[1] + random.uniform(-self.jitter,self.jitter), pos[2])\n return self.mesh.nearest_point(pos=pos, radius=self.snap_radius)", "title": "" }, { "docid": "eee54e83f6f532091cd65612cd85b92a", "score": "0.5603823", "text": "def _findClosest(self, objects):\n closest = None\n obj = None\n relativePos = None\n robotPos = self.robot.position + \\\n (self.robot.orientation * ogre.Vector3(0.5, 0, 0))\n\n for o in objects:\n toObj = o.position - robotPos\n \n # None yet found, default to this one\n if closest is None:\n closest = (o, toObj)\n else:\n obj, realtivePos = closest\n if toObj.squaredLength() < realtivePos.squaredLength():\n # Found a better one switch to it\n closest = (o, toObj)\n\n return closest", "title": "" }, { "docid": "d951914b3601f5741466523814872000", "score": "0.5602097", "text": "def nearest_interp(xi, x, y):\r\n idx = abs(x - xi[:,None])\r\n return y[idx.argmin(axis=1)]", "title": "" }, { "docid": "34b3353d5f2fd0b3dac6867a4492a71c", "score": "0.55902964", "text": "def find_closest(self, x, y, halo=None, start=None):\r\n cx,cy = self._o2c(x,y)\r\n return self.find('closest', cx, cy, halo, start)", "title": "" }, { "docid": "3e6c7af74376f050780de921f8cdbff6", "score": "0.5590099", "text": "def cityreader_stretch(lat1, lon1, lat2, lon2, cities=[]):\n # within will hold the cities that fall within the specified region\n within = []\n\n # TODO Ensure that the lat and lon valuse are all floats\n coord_list = [lat1, lon1, lat2, lon2]\n\n # separate and sort lat and lon coordinates \n lat_coords = [coord_list[0], coord_list[2]]\n lon_coords = [coord_list[1], coord_list[3]]\n lat_coords = sorted(lat_coords)\n lon_coords = sorted(lon_coords)\n\n # Loop through each city and check to see if it falls within \n # the specified coordinates.\n for city in cities:\n if (city.lat > lat_coords[0] and city.lat < lat_coords[1]) and (\n city.lon > lon_coords[0] and city.lon < lon_coords[1]):\n within.append(city)\n\n return within", "title": "" }, { "docid": "a0c7b5117542191038df843b2dfec0af", "score": "0.55851114", "text": "def FindNearestNeighbors(self, lat, lon):\n return {'country': self.FindNearestCountry(lat, lon),\n 'region': self.FindNearestRegion(lat, lon),\n 'city': self.FindNearestCity(lat, lon)}", "title": "" }, { "docid": "178bfb3ff391f1d750cb74bc48e697b8", "score": "0.55836624", "text": "def closest(c, coords):\n best = 0\n best_distance = distance(c, coords[0])\n for i in range(1, len(coords)):\n c2 = coords[i]\n d = distance(c, c2)\n\n if d < best_distance:\n best = i\n best_distance = d\n return best", "title": "" }, { "docid": "fbd5a115ee44277f8b0decc5151ebf1d", "score": "0.5573302", "text": "def get_area_of_interest(image, search_area):\n top = search_area[0][1]\n bottom = search_area[1][1]\n left = search_area[0][0]\n right = search_area[1][0]\n\n return image[top:bottom, left:right, :]", "title": "" }, { "docid": "8c95c0a7408895c6269ea6c1c14226cf", "score": "0.55606455", "text": "def getlocation(indices, labels, Ni, Nj):\n zc = indices[labels == 1]\n if len(zc) == 0:\n return 0, None\n else:\n xmin = int(min(zc[:,1]))\n ymin = int(min(zc[:,0]))\n xmax = int(xmin + Nj)\n ymax = int(max(zc[:,0]) + Ni)\n return 1, ((xmin, ymin), (xmax, ymax))", "title": "" }, { "docid": "fbc2651e39806aed12199fe44b54109b", "score": "0.55543536", "text": "def _getSearchBoundaries(self, target_pixel) :\n n, m = target_pixel\n\n if self.local_radius :\n upper_i = min(n + self.local_radius, self.image.shape[0] - self.offset - 1)\n lower_i = max(n - self.local_radius, self.offset)\n upper_j = min(m + self.local_radius, self.image.shape[1] - self.offset - 1)\n lower_j = max(m - self.local_radius, self.offset)\n else :\n upper_i = self.image.shape[0] - self.offset - 1\n lower_i = self.offset\n upper_j = self.image.shape[1] - self.offset - 1\n lower_j = self.offset\n \n return upper_i, lower_i, upper_j, lower_j", "title": "" }, { "docid": "ac11c3f6bb2faf404c6b419d816195e9", "score": "0.5552873", "text": "def FindNearestNeighbor(self, lat, lon):\n cart = self._LatLonToCartesian(lat, lon)\n _, locale, _ = self._tree.nearest_neighbor(cart)\n return locale", "title": "" }, { "docid": "a6736bace561b3f2208b155fa6a605c6", "score": "0.5551941", "text": "def FindClosestPointWithinRadius(self, p_float, , p_float_4):\n ...", "title": "" }, { "docid": "e7e89de8e8759695f20a5b149cdc8766", "score": "0.55451214", "text": "def get_nearest(src_points, candidates, k_neighbors=1, distance_threshold=None):\n\n # Create tree from the candidate points\n coordinates = np.vstack(candidates.geometry.centroid.apply(lambda geom: (geom.x,geom.y)))\n tree = BallTree(coordinates, leaf_size=15, metric='haversine')\n\n # Find closest points and distances\n #src_points = src_points.reset_index() \n src_x = src_points.geometry.centroid.x\n src_y = src_points.geometry.centroid.y\n \n src_points = np.array([src_x, src_y]).reshape(-1,2)\n \n #If there are not enough neighbors, reduce K, then pad to original\n if k_neighbors > candidates.shape[0]:\n effective_neighbors = candidates.shape[0]\n else:\n effective_neighbors = k_neighbors\n \n distances, indices = tree.query(src_points, k=effective_neighbors)\n \n neighbor_geoms = candidates[candidates.index.isin(indices[0])]\n neighbor_geoms = neighbor_geoms.loc[indices[0]]\n \n #order by the indices\n neighbor_geoms[\"distance\"] = distances[0]\n\n if distance_threshold:\n neighbor_geoms = neighbor_geoms[neighbor_geoms.distance > distance_threshold]\n \n # Return indices and distances\n return neighbor_geoms", "title": "" }, { "docid": "76bd88fe9b7da42e6fe9ba2bebf0d8fc", "score": "0.5535252", "text": "def nearest(pos, board):\n\n a, b = pos\n i = 0 # Number of cells away from target\n possible = [] # Possibilities for the nearest cell\n while True:\n added = False\n if a + i <= 27 and board[a+i][b]: # Right\n possible.append((a+i, b))\n added = True\n if a - i > 0 and board[a-i][b]: # Left\n possible.append((a-i, b))\n added = True\n if b + i <= 35 and board[a][b+i]: # Top\n possible.append((a, b+i))\n added = True\n if b - i > 0 and board[a][b-i]: # Bottom\n possible.append((a, b-i))\n added = True\n if added:\n break\n else: # If not valid tile is found increment i\n i += 1\n\n return random.choice(possible) # Choose a random cell based on all of the possible nearest ones", "title": "" }, { "docid": "cd69b910a9e25ed1f782728f2b396169", "score": "0.5529872", "text": "def find_closest_room(self, position):\n position = self.map_from_global(position)\n self.update_room_positions()\n new_room = None\n min_dist = -1\n x_pos = position.x()\n y_pos = position.y()\n # O(n^2) solution, but too fast to need optimizations\n for room, loc in self.room_locations.items():\n dist = np.sqrt(\n np.square(x_pos - loc[0]) + np.square(y_pos - loc[1]))\n if min_dist < 0:\n min_dist = dist\n new_room = room\n elif dist < min_dist:\n min_dist = dist\n new_room = room\n return new_room", "title": "" }, { "docid": "9049129fc607a4ac1819a238708ab215", "score": "0.55296147", "text": "def nearest_node(point, nodes):\n return nearest(point, nodes)", "title": "" }, { "docid": "9049129fc607a4ac1819a238708ab215", "score": "0.55296147", "text": "def nearest_node(point, nodes):\n return nearest(point, nodes)", "title": "" }, { "docid": "0f0f70c736cb344abb97633c6570ff75", "score": "0.5518578", "text": "def _find_nearest_from_irregular_centroids(lon_centroids, lat_centroids,\n lon_point, lat_point,\n maximum_distance=None):\n\n d = distance_lon_lat(lon_centroids, lat_centroids, lon_point, lat_point)\n minimum_distance = d.min()\n if maximum_distance is not None:\n if minimum_distance > maximum_distance:\n raise GeogridError(\"No points within provided maximum distance.\")\n indices = np.where(d == d.min())\n if len(indices[0]) > 1:\n logging.warning(\"More than one nearest point, returning first index.\")\n return (indices[0][0], indices[1][0])", "title": "" }, { "docid": "77c459c86bdf9eb1c02e3614d75f604f", "score": "0.5518351", "text": "def locate_object(self, name, x0=None, y0=None):\n # TODO: Find out what this does and optimize it.\n if x0 is None:\n x0 = len(self.vision[0])\n if y0 is None:\n y0 = len(self.vision)\n number_of_coordinates = 0\n x_sum = 0\n y_sum = 0\n for y, row in enumerate(self.vision[:y0]):\n for x, object_name in enumerate(row[:x0]):\n if object_name == name:\n x_sum += x\n y_sum += y\n number_of_coordinates += 1\n if number_of_coordinates == 0:\n return None\n # Why is this writen this way?\n x_coordinate = x_sum / number_of_coordinates\n y_coordinate = y_sum / number_of_coordinates\n return x_coordinate, y_coordinate", "title": "" }, { "docid": "e0a8c8e86327a57d005e9db11dc2118f", "score": "0.55168664", "text": "def _find_region(self, base: Geometry, region: RegionLocator):\n if not isinstance(base, Undefined):\n self.logger.warning(\"Using absolute region coordinates\")\n\n position = Region(region.left, region.top, region.right, region.bottom)\n return [position]", "title": "" }, { "docid": "b95d523053f2132e3e5561a855e62a50", "score": "0.5512935", "text": "def find_location(location: Tuple[float, float]) -> Optional[SensorPoint]:\n\n # Get all rows\n rows = SensorPoint.objects.all()\n # Case the database is empty\n if len(rows) == 0:\n return None\n smallest_distance = None\n closest_point = None\n for i in range(0, len(rows)):\n d_distance = haversine_formula(location, (float(rows[i].latitude), float(rows[i].longitude)))\n if d_distance < 500.0:\n if smallest_distance is None or d_distance < smallest_distance:\n smallest_distance = d_distance\n closest_point = rows[i]\n return closest_point", "title": "" }, { "docid": "5e4d0d1a8b38a12a394d9b9a0088ed82", "score": "0.55100685", "text": "def find_location(coordinates):\n\n locator = Nominatim(user_agent='Name')\n location = locator.reverse(coordinates, language='en')\n\n return (location.raw['address']['country'],\\\n location.raw['address']['city'],\\\n location.raw['address']['state'])", "title": "" }, { "docid": "bc35d484f7caf786a23b46b9abc66bf7", "score": "0.5509586", "text": "def find_nearest(longitudes, latitudes, longitude, latitude,\n maximum_distance=None, grid_type=None,\n lon_dimensions=None, lat_dimensions=None):\n\n if grid_type is None:\n grid_type = detect_grid(longitudes, latitudes, lon_dimensions,\n lat_dimensions)\n if grid_type == 'list_of_2d_points':\n f = _find_nearest_from_list_of_points\n return f(longitudes, latitudes, longitude, latitude, maximum_distance)\n elif grid_type == 'rectilinear_2d_centroids':\n f = _find_nearest_from_rectilinear_centroids\n return f(longitudes, latitudes, longitude, latitude, maximum_distance)\n elif grid_type == 'rectilinear_2d_bounds':\n f = rectilinear_2d_bounds_to_vertices\n (lon_vertices, lat_vertices) = f(longitudes, latitudes)\n f = _find_nearest_from_rectilinear_vertices\n return f(lon_vertices, lat_vertices, longitude, latitude,\n maximum_distance)\n elif grid_type == 'rectilinear_2d_vertices':\n f = _find_nearest_from_rectilinear_vertices\n return f(longitudes, latitudes, longitude, latitude, maximum_distance)\n elif grid_type == 'irregular_2d_centroids':\n f = _find_nearest_from_irregular_centroids\n return f(longitudes, latitudes, longitude, latitude, maximum_distance)\n elif grid_type == 'irregular_2d_vertices':\n f = _find_nearest_from_irregular_vertices\n return f(longitudes, latitudes, longitude, latitude, maximum_distance)", "title": "" }, { "docid": "b578caa94ddfc78ca5365195ecb5f568", "score": "0.55021286", "text": "def find_closest_neighbor(point, neighbors):\n uv_point = ll2uv(point[0], point[1])\n id_neighbor = None\n angle_min = float(\"inf\")\n for i, n in enumerate(neighbors):\n angle = angular_distance(uv_point, ll2uv(n[0], n[1]))\n if angle < angle_min:\n id_neighbor = i\n angle_min = angle\n\n return id_neighbor", "title": "" }, { "docid": "58768599a8c020c8ab3c9e447ef2620d", "score": "0.55007684", "text": "def _find_region(self, point: Tuple[int, int]) -> str:\n if point[0] <= self._centre[0] and point[1] <= self._centre[1]: # NW\n return 'NW'\n elif point[0] <= self._centre[0]: # SW\n return 'SW'\n elif point[1] <= self._centre[1]: # NE\n return 'NE'\n else: # SE\n return 'SE'", "title": "" }, { "docid": "e3752a0e50cb0779cee68e879efba6d6", "score": "0.5499344", "text": "def get_river_src(coords):\r\n # Get elevation information from .tif file\r\n gdal.UseExceptions()\r\n try:\r\n src_ds = gdal.Open( 'H:\\spatial entity\\data\\shpdata\\\\chinasrtm24000_16000.tif' )\r\n except RuntimeError, e:\r\n print 'Unable to open INPUT.tif'\r\n print e\r\n sys.exit(1) \r\n try:\r\n srcband = src_ds.GetRasterBand(1)\r\n except RuntimeError, e:\r\n print 'Band ( %i ) not found' # band_num\r\n print e\r\n sys.exit(1) \r\n elevation = srcband.ReadAsArray()\r\n # Transform coordinates to pixel \r\n gt = src_ds.GetGeoTransform() \r\n coords_elevation = [] \r\n for i in range(0,len(coords)):\r\n px = int((coords[i][0] - gt[0]) / gt[1]) # y pixel\r\n py = int((coords[i][1] - gt[3]) / gt[5]) # x pixel\r\n coords_elevation += [elevation[py][px]]\r\n # Get the index of maximum elevation\r\n return coords_elevation.index(max(coords_elevation))", "title": "" }, { "docid": "53c9f99cce164ea8b3c00e42e71e81b5", "score": "0.549926", "text": "def ais_Nearest(*args):\n return _AIS.ais_Nearest(*args)", "title": "" }, { "docid": "79b252fa7089edd47c0662afa14699d6", "score": "0.5499103", "text": "def bestMatch(images, region = SCREEN, minOverlap = 0.9):\n if len(images) == 0:\n return None\n matches = getAllMatches(images, **{ 'region' : region, 'timeout' : 0 })\n if matches[0] is not None:\n best_match = 0\n best_score = matches[0].getScore()\n else:\n best_match = None\n best_score = 0\n for m in range(1, len(matches)):\n match = matches[m]\n if match is None:\n continue\n score = match.getScore()\n if best_match is None:\n best_match = m\n best_score = score\n else:\n if not sameRegion(matches[best_match], match, minOverlap):\n raise Exception('images %d, %d found in different regions' %\n (best_match, m))\n if score > best_score:\n best_match = m\n best_score = score\n if best_match is None:\n if region.getThrowException():\n raise FindFailed('none of the images was found')\n else:\n return None\n return best_match, matches[best_match]", "title": "" }, { "docid": "43aa4ea3c1ea8596124264226c36a81f", "score": "0.54961836", "text": "def NearestApex(*args):\n return _AIS.ais_NearestApex(*args)", "title": "" }, { "docid": "5bf75e2b9048c33cca8fc8d013b19436", "score": "0.54923683", "text": "def closest(self, coords=[], **kwargs):\n if kwargs and coords:\n raise ValueError(\"Specify coordinate using as either a list \"\n \"keyword arguments not both\")\n if kwargs:\n coords = []\n getter = []\n for k, v in kwargs.items():\n idx = self.get_dimension_index(k)\n if np.isscalar(v):\n coords.append((0, v) if idx else (v, 0))\n else:\n if isinstance(v, list):\n coords = [(0, c) if idx else (c, 0) for c in v]\n if len(coords) not in [0, len(v)]:\n raise ValueError(\"Length of samples must match\")\n elif len(coords):\n coords = [(t[abs(idx-1)], c) if idx else (c, t[abs(idx-1)])\n for c, t in zip(v, coords)]\n getter.append(idx)\n else:\n getter = [0, 1]\n getter = itemgetter(*sorted(getter))\n if len(coords) == 1:\n coords = coords[0]\n if isinstance(coords, tuple):\n return getter(self.closest_cell_center(*coords))\n else:\n return [getter(self.closest_cell_center(*el)) for el in coords]", "title": "" }, { "docid": "be71a08c4892610430148064741e02f0", "score": "0.5483117", "text": "def get_nearest(src_points, candidates, k_neighbors=1, distance_threshold=None):\n\n # Create tree from the candidate points\n coordinates = np.vstack(candidates.geometry.centroid.apply(lambda geom: (geom.x,geom.y)))\n tree = BallTree(coordinates, leaf_size=15, metric='haversine')\n\n # Find closest points and distances\n #src_points = src_points.reset_index() \n src_x = src_points.geometry.centroid.x\n src_y = src_points.geometry.centroid.y\n \n src_points = np.array([src_x, src_y]).reshape(-1,2)\n \n #If there are not enough neighbors, reduce K, then pad to original\n if k_neighbors > candidates.shape[0]:\n effective_neighbors = candidates.shape[0]\n else:\n effective_neighbors = k_neighbors\n \n distances, indices = tree.query(src_points, k=effective_neighbors)\n \n neighbor_geoms = candidates[candidates.index.isin(indices[0])]\n neighbor_geoms[\"distance\"] = distances[0]\n\n if distance_threshold:\n neighbor_geoms = neighbor_geoms[neighbor_geoms.distance > distance_threshold]\n \n # Return indices and distances\n return neighbor_geoms", "title": "" }, { "docid": "1546d6e2909192819d711d30d4d2715d", "score": "0.54806334", "text": "def get_closest_obstacle_distance(self,x,y):\n x, y = int(x), int(y)\n\n # check if we are in bounds\n if x >= self.width or x < 0 or y >= self.height or y < 0:\n return float('nan')\n\n return self.closest_occ[y,x]", "title": "" }, { "docid": "f6f16c3e9603699ef3c6cf2af03a2746", "score": "0.5479858", "text": "def match_to_closest_location(self, locations):\r\n # Convert the list of locations to arrays.\r\n end_lats = []\r\n end_lons = []\r\n for location in locations:\r\n end_lats.append(location[0])\r\n end_lons.append(location[1])\r\n end_lats = np.array(end_lats)\r\n end_lons = np.array(end_lons)\r\n\r\n distances = self.get_distances_to_lat_lons(end_lats, end_lons)\r\n closest_locations = np.argmin(distances, axis=0)\r\n return closest_locations", "title": "" }, { "docid": "bdf8d14c75e758483fa886e367084bdf", "score": "0.5461208", "text": "def get_distance_binary_search(coordinates: Tuple[float, float]) -> int:\n # Find four equidistant points on the MKAD with which to compare.\n index_list = [(len(mkad_coords) // 4) * i for i in range(4)]\n distance_list = []\n\n # Obtain the distances from the given coords to the four points\n for index in index_list:\n new_distance = distance(coordinates, (mkad_coords[index][2],\n mkad_coords[index][1])).km\n distance_list.append([index, new_distance])\n\n # Sort list from shorter to longer distances, keeping corresponding index\n distance_list.sort(key=lambda x: x[1])\n\n index_list = []\n\n # If the second shortest point is the same distance as the third shortest\n # point, then the given coordinates are in such a position that we cannot\n # take the first and second shortest point from the four indexes, instead\n # we take the second and third shortest.\n if (distance_list[1][1] == distance_list[2][1]):\n low_point = distance_list[1][0]\n high_point = distance_list[2][0]\n # Otherwise we take the first and second shortest.\n else:\n low_point = distance_list[0][0]\n high_point = distance_list[1][0]\n\n # We now proceed to eliminate points that are in between the two points\n # we have selected. All other points are discarded.\n\n while high_point - low_point > 1:\n low_distance = distance(\n coordinates,\n (mkad_coords[low_point][2], mkad_coords[low_point][1])\n ).km\n high_distance = distance(\n coordinates,\n (mkad_coords[high_point][2], mkad_coords[high_point][1])\n ).km\n\n if low_distance == high_distance:\n low_point += 1\n elif low_distance < high_distance:\n mid_point = (low_point + high_point) // 2\n high_point = mid_point\n elif low_distance > high_distance:\n mid_point = (low_point + high_point) // 2\n low_point = mid_point\n\n # After the while loop we have reduced the possibilities to two points\n # on the MKAD, indexed as low_point and high_point. We simply measure\n # the distances to these and return the shortest.\n\n low_distance = distance(\n coordinates,\n (mkad_coords[low_point][2], mkad_coords[low_point][1])\n ).km\n high_distance = distance(\n coordinates,\n (mkad_coords[high_point][2], mkad_coords[high_point][1])\n ).km\n\n if low_distance < high_distance:\n return int(low_distance)\n else:\n return int(high_distance)", "title": "" }, { "docid": "e7d7900777c579f740adcc5a7e431871", "score": "0.5457809", "text": "def _find_nearest_from_rectilinear_vertices(lon_vertices, lat_vertices,\n lon_point, lat_point,\n maximum_distance=None):\n\n f = rectilinear_vertices_to_centroids\n (lon_centroids, lat_centroids) = f(lon_vertices, lat_vertices)\n return _find_nearest_from_rectilinear_centroids(lon_centroids,\n lat_centroids)", "title": "" }, { "docid": "7d162b990429663beaf951aa029aa61c", "score": "0.54533446", "text": "def find_nearest(self, position, at_sublattice=None):\n at_sublattice = self.lattice[at_sublattice] if at_sublattice is not None else -1\n if hasattr(self.impl, 'find_nearest'):\n # use cpp implementation\n return self.impl.find_nearest(position, int(at_sublattice))\n else:\n # fallback numpy implementation\n sites = Sites(self.positions, self.sublattices)\n return sites.find_nearest(position, at_sublattice)", "title": "" }, { "docid": "81e55c9393f488c89f3d79bf9b5d894d", "score": "0.54512286", "text": "def match_coordinates(longitude, latitude):\n\n lon = float(longitude)\n lat = float(latitude)\n pnt = Point(lon, lat)\n result = Biology.objects.filter(geom__distance_lte=(pnt, Distance(m=1)))\n match_result = (False, None)\n if (len(result)) == 1:\n match_result = (True, result)\n elif len(result) >= 1:\n match_result = (False, result)\n elif len(result) == 0:\n match_result = (False, None)\n return match_result", "title": "" }, { "docid": "7ec2f2607315ec5116b444c2c3babf8c", "score": "0.5449289", "text": "def _find_new_inside(self):\n npoints = len(self._points)\n if npoints > 4:\n points = np.vstack((self._points, self._points[1]))\n A = points[:-2]\n B = points[1:-1]\n C = points[2:]\n orient = great_circle_arc.triple_product(A-B, C-B, B)\n if np.sum(orient) < 0.0:\n orient = -1.0 * orient\n midpoint = great_circle_arc.midpoint(A, C)\n candidate = max(zip(orient, midpoint), key=lambda x: x[0])\n inside = candidate[1]\n else:\n # Fall back on computing the mean point\n inside = self._points.mean(axis=0)\n vector.normalize_vector(inside, output=inside)\n return inside", "title": "" }, { "docid": "8ae51fe0c7982db53c7040dc60b8021c", "score": "0.5442965", "text": "def star_closest_to_world_coords(self, ra, dec):\n\n if not len(self):\n raise ValueError(\"database is empty\")\n\n self._execute(\"SELECT id, ra, dec FROM stars\")\n\n closest_id = None\n closest_distance = float('inf')\n coordinates = astromatic.Coordinates(ra, dec)\n for star_id, star_ra, star_dec in self._rows:\n star_coords = astromatic.Coordinates(star_ra, star_dec)\n star_distance = coordinates.distance(star_coords)\n if star_distance < closest_distance:\n closest_id = star_id\n closest_distance = star_distance\n\n return closest_id, closest_distance", "title": "" }, { "docid": "e36f3085d79859f311dd23d1594063b9", "score": "0.54392976", "text": "def _get_nearest(self, pose, window=2):\r\n min_idx = self._idx\r\n max_idx = np.minimum(self._pos.shape[0], self._idx + window)\r\n idx = range(min_idx, max_idx)\r\n nearest = idx[np.argmin(distance(pose[:2], self._pos[idx]))]\r\n self._idx = nearest\r\n\r\n # Check if goal has been reached\r\n if distance(pose[:2], self._pos[-1]) <= self._goal_threshold:\r\n self._goal_reached = True\r\n\r\n return nearest", "title": "" }, { "docid": "e3300b5093810499a478443a0c1343f2", "score": "0.54318255", "text": "def find_neighborhood(shapes, lat, lng):\n point = Point(lng, lat)\n\n # sorts the shapes by distance from shape centroid the point\n sorted_shapes = sort_shapes(shapes, point)\n\n # now that shapes are sorted, we should find the\n # boundary shape pretty quick\n for shape in sorted_shapes:\n if shape.polygon.contains(point):\n return shape.name\n\n return None", "title": "" }, { "docid": "741adc48acb2c4eba9ca299514f1c08f", "score": "0.5428988", "text": "def getLocation( min_point: tuple, max_point: tuple ) -> tuple:\n # Unpack the tuples into min/max values\n xmin, ymin = min_point\n xmax, ymax = max_point\n # Take midpoint of x-coordinate and ymax for bottom middle of box\n x_result = ( xmin + xmax ) // 2\n y_result = ymax\n # Return location\n return ( x_result, y_result )", "title": "" }, { "docid": "03b0be6306604db546410e7b94452660", "score": "0.54253185", "text": "def findRoute(self, x1, y1, x2, y2):\r\n pass", "title": "" }, { "docid": "1a740a9f694277f8e6fb36c9495a3e40", "score": "0.5423184", "text": "def get_closest_obstacle_distance(self, x, y):\n x_coord = (x - self.map.info.origin.position.x)/self.map.info.resolution\n y_coord = (y - self.map.info.origin.position.y)/self.map.info.resolution\n if type(x) is np.ndarray:\n x_coord = x_coord.astype(np.int)\n y_coord = y_coord.astype(np.int)\n else:\n x_coord = int(x_coord)\n y_coord = int(y_coord)\n\n is_valid = (x_coord >= 0) & (y_coord >= 0) & (x_coord < self.map.info.width) & (y_coord < self.map.info.height)\n if type(x) is np.ndarray:\n distances = np.float('nan')*np.ones(x_coord.shape)\n distances[is_valid] = self.closest_occ[x_coord[is_valid], y_coord[is_valid]]\n return distances\n else:\n return self.closest_occ[x_coord, y_coord] if is_valid else float('nan')", "title": "" }, { "docid": "3456e7a84b76647762c0113c07daef64", "score": "0.5418462", "text": "def region_index(lats0, lons0): \n Bi = np.zeros(lons0.shape[0]) #The grid cell index where a particle is released at the bottom\n\n for i in range(lons0.shape[0]):\n lo = find_down(Lons,lons0[i]);lon = Lons[lo];\n la = find_down(Lats,lats0[i]);lat = Lats[la];\n Bi[i] = np.where(np.logical_and(vLons==lon,vLats==lat))[0] \n return Bi", "title": "" }, { "docid": "651b63ee59610a63edd359da7fb72936", "score": "0.5415594", "text": "def nearest_neighbour(src_lon, src_lat, dst_lon, dst_lat, src_grid=\"regular\", dst_grid=\"unstructured\", npoints=1, method=\"mean\"):\n # create an array containing all coordinates\n if src_grid == \"regular\":\n if src_lon.ndim == 1:\n lon_2d, lat_2d = np.meshgrid(src_lon, src_lat)\n elif src_lon.ndim == 2:\n if src_lon.shape != src_lat.shape:\n raise ValueError(\"for 2d-coordinates, the shapes have to match!\")\n lon_2d, lat_2d = src_lon, src_lat\n else:\n raise ValueError(\"only 1d- and 2d-coordinates are supported for regular grids\")\n coords = np.stack((lon_2d.flatten(), lat_2d.flatten()), axis=1)\n input_dims = lon_2d.shape\n elif src_grid == \"unstructured\":\n if src_lon.ndim == 1:\n coords = np.stack((src_lon, src_lat), axis=1)\n input_dims = src_lon.shape\n else:\n raise ValueError(\"an unstructured grid is supposed to have 1d-coordinate arrays!\")\n\n # convert point coordinates if given as scalar\n if not hasattr(dst_lon, \"__len__\"):\n dst_lon = np.array((dst_lon,))\n dst_lat = np.array((dst_lat,))\n\n # keep the target coordinates\n target_lon = dst_lon\n target_lat = dst_lat\n\n # create coordinates for regular output grid\n if dst_grid == \"regular\" and dst_lon.ndim == 1 and dst_lat.ndim == 1:\n point_lon_2d, point_lat_2d = np.meshgrid(dst_lon, dst_lat)\n dst_lon = point_lon_2d.flatten()\n dst_lat = point_lat_2d.flatten()\n target_shape = point_lon_2d.shape\n else:\n target_shape = (len(dst_lon),)\n\n # create the kd-tree and calculate the indices and the weights for the indices\n kdtree = scipy.spatial.cKDTree(coords)\n\n # estimate the distance between grid points by calculating the distance to the next neighbour for three points\n grid_point_distance, _ = kdtree.query(coords[[0, len(coords) // 2, len(coords)-1]], k=2)\n mean_grid_point_distance = grid_point_distance[:, 1].mean()\n\n # calculate indices and distances for the target points\n if len(dst_lon) == 1:\n distances, indices_flat = kdtree.query((dst_lon[0], dst_lat[0]), k=npoints)\n else:\n distances, indices_flat = kdtree.query(np.stack((dst_lon, dst_lat), axis=1), k=npoints)\n if npoints > 1 and len(distances.shape) == 1:\n distances = np.expand_dims(distances, 0)\n indices_flat = np.expand_dims(indices_flat, 0)\n if npoints > 1:\n if method == \"mean\":\n weights = np.empty(distances.shape)\n weights[:] = 1.0 / npoints\n elif method == \"d-mean\":\n weights = 1.0 / np.maximum(distances, mean_grid_point_distance / 2.0)**2\n weights = weights / np.tile(np.expand_dims(np.sum(weights, 1), 1), npoints)\n else:\n raise ValueError(\"unsupported method: %s\" % method)\n else:\n weights = 1\n if len(input_dims) > 1:\n indices = np.unravel_index(indices_flat, input_dims)\n else:\n indices = indices_flat\n if type(indices) == int:\n indices = np.asarray([indices])\n\n # construct and return the interpolator object\n return NearestNeighbourInterpolator(indices, weights, distances, input_dims, len(dst_lon), npoints, target_shape, target_lon, target_lat)", "title": "" }, { "docid": "4b1d0eb9b1e9408b0119b6b5781d02d6", "score": "0.5405547", "text": "def find_coordinates(numpoints, refcoords, defcoords, defatomcoords):\n refcenter, fitcenter, rotation = qfit(numpoints, refcoords, defcoords)\n newcoords = qtransform(1, defatomcoords, refcenter, fitcenter, rotation)\n # Only return the first coordinates\n return newcoords[0]", "title": "" }, { "docid": "1ed839944b066a0f4bf559cd0a7d2589", "score": "0.54023427", "text": "def FindClosestPoint(self, ):\n ...", "title": "" }, { "docid": "cf310dc4796a27ff572a99502dcb2789", "score": "0.5400604", "text": "def query_coordinates(self, coordinates):\r\n try:\r\n distances, indices = self.tree.query(coordinates, k=1) #, distance_upper_bound=0.1\r\n except ValueError as e:\r\n logger.erro('Unable to parse coordinates:', coordinates)\r\n raise e\r\n else:\r\n results = []\r\n for distance, index in zip(distances, indices):\r\n if not isinf(distance):\r\n result = self.locations[index]\r\n result['distance'] = distance\r\n\r\n results.append(result)\r\n\r\n return results", "title": "" }, { "docid": "c960465a5c8453728418b7c7c8f12741", "score": "0.53992957", "text": "def findAt(self, coordinates: tuple, printWarning: Boolean = True):\n pass", "title": "" }, { "docid": "c960465a5c8453728418b7c7c8f12741", "score": "0.53992957", "text": "def findAt(self, coordinates: tuple, printWarning: Boolean = True):\n pass", "title": "" }, { "docid": "8f96a121329590f95ea92a94091931f0", "score": "0.5396925", "text": "def th_nearest_interp2d(input, coords):\n # take clamp of coords so they're in the image bounds\n x = th.clamp(coords[:, :, 0], 0, input.size(1) - 1).round()\n y = th.clamp(coords[:, :, 1], 0, input.size(2) - 1).round()\n\n stride = th.LongTensor(input.stride())\n x_ix = x.mul(stride[1]).long()\n y_ix = y.mul(stride[2]).long()\n\n input_flat = input.view(input.size(0), -1)\n\n mapped_vals = input_flat.gather(1, x_ix.add(y_ix))\n\n return mapped_vals.view_as(input)", "title": "" }, { "docid": "77db11cf9ca2b313312a460149fbd7ce", "score": "0.5393471", "text": "def nearest(self, pt):\n tidx = super(FlatCAMRTreeStorage, self).nearest(pt)\n return (tidx.bbox[0], tidx.bbox[1]), self.objects[tidx.object]", "title": "" }, { "docid": "f1e97c3d2cecb342529408dd6e9e4a19", "score": "0.5390935", "text": "def getIntersectionPoint(non_street_point,street_point,coordinates):\n (x1,y1) = coordinates[non_street_point[0]]\n (x2,y2) = coordinates[non_street_point[1]]\n (x3,y3) = coordinates[street_point[0]]\n (x4,y4) = coordinates[street_point[1]]\n den = (x1-x2)*(y3-y4) - (y1-y2)*(x3-x4)\n n1 = (x1*y2 - y1*x2)\n n2 = (x3*y4 - y3*x4)\n px = (n1*(x3 - x4) - n2*(x1 - x2)) / den\n py = (n1*(y3 - y4) - n2*(y1 - y2)) / den\n return (px,py)", "title": "" }, { "docid": "44ddab67f26a3d04c067d32fe7738816", "score": "0.5386557", "text": "def test_get_near_point():\n sh = SpatialHash(cell_size=10)\n sprite_1 = arcade.SpriteSolidColor(10, 10, center_x=0)\n sprite_2 = arcade.SpriteSolidColor(10, 10, center_x=5)\n sh.add(sprite_1)\n sh.add(sprite_2)\n\n nearby_sprites = sh.get_sprites_near_point((0, 0))\n assert isinstance(nearby_sprites, set)\n assert len(nearby_sprites) == 2\n assert nearby_sprites == set([sprite_1, sprite_2])\n\n nearby_sprites = sh.get_sprites_near_point((20, 0))\n assert isinstance(nearby_sprites, set)\n assert len(nearby_sprites) == 0", "title": "" } ]
63dca1f63f881be5338907f45eeddff6
Creates a SQL context
[ { "docid": "cf9d4d144a78107634dc376e8e35f83b", "score": "0.6197181", "text": "def __init__(self, url=None, connection=None, schema=None):\r\n\r\n if not url and not connection:\r\n raise AttributeError(\"Either url or connection should be provided\" \\\r\n \" for SQL data source\")\r\n\r\n super(SQLContext, self).__init__()\r\n\r\n if connection:\r\n self.connection = connection\r\n self.should_close = False\r\n else:\r\n engine = sqlalchemy.create_engine(url)\r\n self.connection = engine.connect()\r\n self.should_close = True\r\n\r\n self.metadata = sqlalchemy.MetaData()\r\n self.metadata.bind = self.connection.engine\r\n self.schema = schema", "title": "" } ]
[ { "docid": "22361e6b7fd296a7df81b6110d25b91b", "score": "0.68279225", "text": "def _make_context():\n return {'app': app, 'db': db}", "title": "" }, { "docid": "4c9b643c0b896ecd18fb03e4b3fd641b", "score": "0.6694832", "text": "def sql_context(self):\n if not self._sql_context:\n import pyspark.sql\n self._sql_context = pyspark.sql.SQLContext(self.context())\n return self._sql_context", "title": "" }, { "docid": "78512e5377e9ce4ef25c1f733886afb2", "score": "0.667269", "text": "def create_context(cls):\n pass", "title": "" }, { "docid": "d4b71f5e84a0ea98d66c4f057d26e875", "score": "0.6592965", "text": "def _make_context():\n return {'app': app, 'db': db, 'User': User}", "title": "" }, { "docid": "1401ef394cb69f1c31de173d34e1320e", "score": "0.6579943", "text": "def _make_context():\n return {'app': app, 'db': db, 'User': User}", "title": "" }, { "docid": "b8d86042691083f634b2bee3382ffcae", "score": "0.6545147", "text": "def _make_context():\n\n return {\n 'app': app,\n 'db': db,\n 'User': User\n }", "title": "" }, { "docid": "5214707e030aec7c546d18e1fa94fe03", "score": "0.65256286", "text": "def _make_context():\n return {'app': app,\n 'db': db,\n 'User': User\n }", "title": "" }, { "docid": "60d04494aa38c7b632691e33d5f9bf73", "score": "0.6358311", "text": "def build_from_environment():\n\n pg_username = os.environ['POSTGRES_USERNAME']\n pg_password = os.environ['POSTGRES_PASSWORD']\n\n try:\n conn = psycopg2.connect(\"user=\" + pg_username + \" host='localhost' password=\" + pg_password)\n # program ends if the connection fails.\n except psycopg2.OperationalError, oe:\n raise oe\n sys.exit(1)\n\n return PgDbContext(conn)", "title": "" }, { "docid": "3e6477a1c1cf47b95e5bf15126746aca", "score": "0.62872374", "text": "def _make_context():\n return {'User': User, 'CreditCard': CreditCard, 'Transaction': Transaction, 'db': db, 'jsonify':jsonify}", "title": "" }, { "docid": "d910fe18fbef4319354befcc35d03628", "score": "0.61972845", "text": "def create_db(self):\n return None", "title": "" }, { "docid": "3bd7d69a3dfe3dd8a3d938923ea3db8d", "score": "0.61969036", "text": "def create_db(self):", "title": "" }, { "docid": "b0ecc694dfd3ed2931ba205d16a5ce25", "score": "0.6186409", "text": "def create_db():\n init_postgres(current_app.config['SQLALCHEMY_DATABASE_URI'])", "title": "" }, { "docid": "20f5026a936d29e11c080ff400ee5b64", "score": "0.61260194", "text": "def make_shell_context():\n return {'db': db, 'User': User, 'Post': Post}", "title": "" }, { "docid": "45993aef131bdde8469803413d2faacd", "score": "0.60089785", "text": "def prepare_database(config):\n global Session\n engine = sqlalchemy.create_engine(config.db_string)\n session_factory = sqlalchemy.orm.sessionmaker(bind=engine)\n Session = sqlalchemy.orm.scoped_session(session_factory)", "title": "" }, { "docid": "fedddbc9c1637fc554a9ee398b3f7b17", "score": "0.5995722", "text": "def init_db(self):\n\n # The user can provide a custom string\n if self.database is None:\n self.logger.error(\"You must provide a database url, exiting.\")\n sys.exit(1)\n\n self.engine = create_engine(self.database, convert_unicode=True)\n self.session = scoped_session(\n sessionmaker(autocommit=False, autoflush=False, bind=self.engine)\n )\n\n # Database Setup\n Base.query = self.session.query_property()\n\n # import all modules here that might define models so that\n # they will be registered properly on the metadata. Otherwise\n # you will have to import them first before calling init_db()\n import expfactory.database.models\n\n self.Base = Base\n self.Base.metadata.create_all(bind=self.engine)", "title": "" }, { "docid": "8cfb194104acbd857427d389d5d1ff7e", "score": "0.59844375", "text": "def generate_context(self) -> Context:\n self._transient_context = Context()\n return self._transient_context", "title": "" }, { "docid": "512dd1266a0b346338850a76b4c521fa", "score": "0.5969605", "text": "def make_context(self, engine, args):\n args = self.normalize_args(args)\n _, ctx = self._make_argkey_and_context(engine, args)\n return ctx", "title": "" }, { "docid": "6a5ab511e6551482b2a0dabf9b2acedc", "score": "0.5920828", "text": "def shell_context():\n from .models import db, Politico, Reembolso, Feedback\n return dict(db=db, Politico=Politico,\n Reembolso=Reembolso, Feedback=Feedback)", "title": "" }, { "docid": "bc792ec70f31d198a4b10b2af6b35849", "score": "0.5920192", "text": "def __init__(self):\n engine = db_connect()\n create_tables(engine)\n self.Session = sessionmaker(bind=engine)", "title": "" }, { "docid": "6373708b7fd02359f3bb1b349b87bcb6", "score": "0.59160876", "text": "def make_shell_context():\n return dict(app=app,\n db=models.db,\n User=models.User,\n Post=models.Post,\n Comment=models.Comment,\n Tag=models.Tag,\n Server=Server)", "title": "" }, { "docid": "7664c5e9b3875976116dd44e487725fd", "score": "0.590667", "text": "def session_context(func):\r\n def wrapper(*args, **kwargs):\r\n self = args[0]\r\n with self._create_db_session() as db:\r\n self.db = db\r\n return func(*args, **kwargs)\r\n return wrapper", "title": "" }, { "docid": "99e3181df349f4040581c96e82492919", "score": "0.5887355", "text": "def init_database(cls):\n conn = config.db_connection_string(Settings)\n cls.Engine = create_engine(conn, echo=Settings.get('DEBUG'))\n cls.Session = sessionmaker(bind=cls.Engine)\n return cls", "title": "" }, { "docid": "f40b1d77807d96063903410f8f9b0a5a", "score": "0.58802176", "text": "def __init__(self, path=None, echo=False):\r\n super(SqlAlchemyDB, self).__init__()\r\n if path is None:\r\n path = _SQL_ALCHEMY_MEMORY\r\n else:\r\n if path != _SQL_ALCHEMY_MEMORY:\r\n dirname = os.path.dirname(path)\r\n if dirname and dirname != \"\":\r\n os.makedirs(dirname, exist_ok=True)\r\n path = \"sqlite:///\" + path\r\n self._engine = create_engine(path, echo=echo)\r\n Base.metadata.create_all(self._engine)\r\n self._Session = sessionmaker(bind=self._engine)", "title": "" }, { "docid": "e91ab6ab7af3d2303df2d758e664ee48", "score": "0.5877039", "text": "def init_db():\n # We are setting the module variables here for the first time, so disable the warning\n global DB_USER_TABLE # pylint: disable=global-variable-undefined\n global DB_CUSTOMER_TABLE # pylint: disable=global-variable-undefined\n global DB_USER_CUSTOMER_RELS_TABLE # pylint: disable=global-variable-undefined\n global DB_TICKET_TABLE # pylint: disable=global-variable-undefined\n global DB_COMMENT_TABLE # pylint: disable=global-variable-undefined\n\n db = TinyDB(app.config['DB_NAME'])\n\n DB_USER_TABLE = db.table('users')\n DB_CUSTOMER_TABLE = db.table('customers')\n DB_USER_CUSTOMER_RELS_TABLE = db.table('user_customer_rels')\n DB_TICKET_TABLE = db.table('tickets')\n DB_COMMENT_TABLE = db.table('comments')", "title": "" }, { "docid": "2e3f1f32101d4306b8b932b88f41c4a1", "score": "0.5877013", "text": "def __init__(self, *args, **kwargs):\n self.context = kwargs.get(\"config\")\n if hasattr(self.context, 'database'):\n # XXX this should be replaced with connection_context instead\n self.context.database['database_host'] = \\\n self.context.database.database_hostname\n self.context.database['database_port'] = \\\n self.context.database.database_port\n self.context.database['database_name'] = \\\n self.context.database.database_name\n self.context.database['database_username'] = \\\n self.context.database.database_username\n self.context.database['database_password'] = \\\n self.context.database.database_password\n self.database = db.Database(self.context.database)\n else:\n # the old middleware\n self.database = db.Database(self.context)", "title": "" }, { "docid": "e928fa48f22ea0bcb4157c6581181d4f", "score": "0.58652604", "text": "def make_shell_context():\n\n return dict(app=app, db=db, User=User)", "title": "" }, { "docid": "e928fa48f22ea0bcb4157c6581181d4f", "score": "0.58652604", "text": "def make_shell_context():\n\n return dict(app=app, db=db, User=User)", "title": "" }, { "docid": "c657b038e3358f2fc4f4ba1d83a4beca", "score": "0.58486253", "text": "def create():\n\tcreate_db()", "title": "" }, { "docid": "954520a01001cb314c73bbf161c84eaa", "score": "0.58432645", "text": "def __init__(\n self,\n sql_username,\n sql_password,\n sql_host,\n sql_port,\n sql_db,\n sql_url_template=(\"mysql+mysqldb://{username}:{password}@\"\n \"{host}:{port}/{db}?charset=utf8mb4\"),\n **kwargs\n ):\n\n # Internalize arguments.\n self.sql_username = sql_username\n self.sql_password = sql_password\n self.sql_host = sql_host\n self.sql_port = sql_port\n self.sql_db = sql_db\n self.sql_url_template = sql_url_template\n\n # Inspecting the presence of keyword arguments and (should they not be\n # defined) setting defaults.\n self.sql_engine_pool_size = kwargs.get(\"sql_engine_pool_size\", 1)\n self.sql_engine_pool_recycle = kwargs.get(\n \"sql_engine_pool_recycle\", 3600\n )\n self.sql_engine_echo = kwargs.get(\"sql_engine_echo\", False)\n self.mysqldb_sscursor = kwargs.get(\"mysqldb_sscursor\", False)\n self.expire_on_commit = kwargs.get(\"expire_on_commit\", False)\n\n # create DB engine.\n self.engine = self.connect()\n\n # create new session.\n self.session_factory = sqlalchemy.orm.sessionmaker(\n bind=self.engine,\n expire_on_commit=self.expire_on_commit\n )", "title": "" }, { "docid": "d731712486f72ab154bcacf33f884f62", "score": "0.582744", "text": "def init_database(self):\n engine = create_engine('sqlite:///todo.db?check_same_thread=False')\n self.Base.metadata.create_all(engine)\n self.session = sessionmaker(bind=engine)()", "title": "" }, { "docid": "e82aa9a0d85f00aefbb11c18077005e3", "score": "0.5823347", "text": "def make_context(source, frmat='table'):\n return Context.fromstring(source, frmat=frmat)", "title": "" }, { "docid": "34d719710f9a8478ca30d1a3c704876b", "score": "0.5819967", "text": "def __init__(self):\n engine = db_connect()\n create_table(engine)\n self.session = sessionmaker(bind=engine)", "title": "" }, { "docid": "0f256e4d6ecd9594b4bc894645fa9f35", "score": "0.58028", "text": "def _session_maker(self) -> ContextManager[Session]:\r\n session = self._Session()\r\n try:\r\n yield session\r\n session.commit()\r\n except DBAPIError:\r\n session.rollback()\r\n raise\r\n finally:\r\n session.close()", "title": "" }, { "docid": "8866cfd358f5b03b4c13d3794cb52783", "score": "0.5800551", "text": "async def database(self, *_):\n config = {'dsn': os.getenv('DATABASE_URL')}\n self.db = await aiopg.sa.create_engine(**config)\n yield\n self.db.close()\n await self.db.wait_closed()", "title": "" }, { "docid": "372e7ae0b7c210e988d2526375a9a85f", "score": "0.5800246", "text": "def __init__(self):\r\n self.conn = create_connection(DATABASE_PATH)", "title": "" }, { "docid": "19571ab4a7054106e5000b1f245500ce", "score": "0.57969123", "text": "def initialize_database():\n\n global DatabaseSession\n from config import config\n\n engine = create_engine(\n config[saq.CONFIG['global']['instance_type']].SQLALCHEMY_DATABASE_URI, \n **config[saq.CONFIG['global']['instance_type']].SQLALCHEMY_DATABASE_OPTIONS)\n\n DatabaseSession = sessionmaker(bind=engine)\n saq.db = scoped_session(DatabaseSession)", "title": "" }, { "docid": "b4b39dcc6bd9c89b0956e0d369da9deb", "score": "0.5791556", "text": "def _initTestingDB(): \n from sqlalchemy import create_engine\n engine = create_engine('sqlite://')\n from .models import (\n Base,\n TodoUser,\n )\n DBSession.configure(bind=engine)\n Base.metadata.create_all(engine)\n \n return DBSession", "title": "" }, { "docid": "0c549d12998dd88468d960202c7a0aaa", "score": "0.57832694", "text": "def mkcontext(self,\n context= [],\n contextobj=None):\n if contextobj == None:\n raise ValueError, \"mkcontext: contextobj is None\"\n return jsoncall.do_call(\"mkcontext\", {'modelname':self.modelname,\\\n 'user':self.user,\\\n 'password':self.password,\\\n 'context':context,\\\n 'contextobj':contextobj.__dict__},\n self.connection)", "title": "" }, { "docid": "d225a91ad16014e80a89c3b1afb7f90a", "score": "0.57599616", "text": "def make_shell_context():\n return dict(server=server,\n db=db,\n User=User,\n Article=Article,\n Topic=Topic,\n Comment=Comment,\n Subscription=Subscription)", "title": "" }, { "docid": "60284ebce2ca97eb6768c58fa2df711f", "score": "0.57571405", "text": "def __init__(self):\n engine = db_connect()\n create_table(engine)\n self.Session = sessionmaker(bind=engine)", "title": "" }, { "docid": "60284ebce2ca97eb6768c58fa2df711f", "score": "0.57571405", "text": "def __init__(self):\n engine = db_connect()\n create_table(engine)\n self.Session = sessionmaker(bind=engine)", "title": "" }, { "docid": "7b3b999ddc7dd29dcd305eb277bbcd16", "score": "0.5750913", "text": "def init_db():\n return SQLAlchemy(app, session_options={\n 'expire_on_commit': False,\n })", "title": "" }, { "docid": "2fde45b60b00c93e74b97456fd80ec79", "score": "0.5750327", "text": "def make_shell_context():\n\n context = dict(app=app, db=db)\n for class_ in [FormData, CaseData, CaseIndex, Synclog, OwnershipCleanlinessFlag]:\n context[class_.__name__] = class_\n return context", "title": "" }, { "docid": "3b457a0c0061049dcaa30aae04dbe1d7", "score": "0.57479906", "text": "def _db():\n db_fd, db_path = tempfile.mkstemp()\n app = create_app()\n app.config['TESTING'] = True\n app.config['SQLALCHEMY_TRACK_MODIFICATIONS'] = False\n app.config['DATABASE'] = db_path\n app.config['SQLALCHEMY_DATABASE_URI'] = \"sqlite://\"\n\n with app.app_context():\n db.init_app(app)\n db.create_all()\n\n yield db\n\n os.close(db_fd)\n os.unlink(db_path)", "title": "" }, { "docid": "28583a5c3fa06235c2e8dab4b0d88f9e", "score": "0.5745354", "text": "def make_shell_context():\n\n return dict(app=app, db=db, Posts = Posts, Comments=Comments)", "title": "" }, { "docid": "4ebbae0cad9ef8def93995393582a79d", "score": "0.57446367", "text": "def _initDb(self):\n CREATE_TOKEN_TABLE = '''create table token\n (token text, id int primary key)\n '''\n CREATE_DOCS_TABLE = '''create table docs\n (local_path text, resource_id text primary key, etag text, title text)\n '''\n \n try:\n self.db.execute(CREATE_TOKEN_TABLE)\n self.db.execute(CREATE_DOCS_TABLE)\n except sqlite3.OperationalError, error:\n pass", "title": "" }, { "docid": "f3f110e2e4bcbd7a581b4cdce1d1c83a", "score": "0.5742504", "text": "def dbinit( *args, **kwargs ):", "title": "" }, { "docid": "e7d7bf46ca0da667d19f9ff4cd67223f", "score": "0.574025", "text": "def get_sql_context_instance(spark_configuration):\n if 'sparkSessionSingletonInstance' not in globals():\n globals()['sparkSessionSingletonInstance'] = SparkSession.builder.config(conf=spark_configuration).getOrCreate()\n return globals()['sparkSessionSingletonInstance']", "title": "" }, { "docid": "b2c62a3dd4afec88118519fd468ad628", "score": "0.5739886", "text": "def db_connect():\n return create_engine(URL(**product_crawlers.settings.DATABASE))", "title": "" }, { "docid": "00a9888a639feb1a52665f6c5a7b0ab4", "score": "0.5734747", "text": "def construct_global_ctx(self):\n super().construct_global_ctx()\n gtx = self.gtx\n rc = self.rc\n if \"groups\" in self.needed_dbs:\n rc.pi_id = get_pi_id(rc)\n rc.coll = f\"{TARGET_COLL}\"\n try:\n if not rc.database:\n rc.database = rc.databases[0][\"name\"]\n except:\n pass\n colls = [\n sorted(\n all_docs_from_collection(rc.client, collname), key=_id_key\n )\n for collname in self.needed_dbs\n ]\n for db, coll in zip(self.needed_dbs, colls):\n gtx[db] = coll\n gtx[\"all_docs_from_collection\"] = all_docs_from_collection\n gtx[\"float\"] = float\n gtx[\"str\"] = str\n gtx[\"zip\"] = zip", "title": "" }, { "docid": "192cf231f1d4576cad44e57e69213fd6", "score": "0.57337713", "text": "def create_db_statement(self):\n return Engine.create_db_statement(self).replace(\"DATABASE\", \"SCHEMA\")", "title": "" }, { "docid": "d348e563e20bcad9aa2d1d8708ce814d", "score": "0.57224196", "text": "def __init__(self, connection_url, echo=False):\n if not connection_url:\n raise ValueError('No database connection URL provided.')\n engine = create_engine(connection_url, echo=echo)\n PipelineRun.metadata.create_all(engine)\n self.session_factory = sessionmaker(bind=engine)", "title": "" }, { "docid": "fd2a6fd00d1fe9979eed36be7f190638", "score": "0.57155734", "text": "def init_with_context(self, context):\n pass", "title": "" }, { "docid": "5553f3498bd2c6bed14afdb824210e97", "score": "0.57128376", "text": "def db_session_context(db_name):\n try:\n db = psycopg2.connect(database=db_name)\n yield db\n finally:\n db.close()", "title": "" }, { "docid": "4794c5f681478a05bfdbc767b1e61ac2", "score": "0.5697702", "text": "def setup_db():\n\n engine = config['tg.app_globals'].sa_engine\n # model.init_model(engine)\n # model.metadata.create_all(engine)", "title": "" }, { "docid": "5e3d4bddff67a3bc1a4ec20ccaf8c29b", "score": "0.5693486", "text": "def _create_db(self):\n self.db = easydms.dbcore.Database(\":memory:\")\n self.db.create_db()", "title": "" }, { "docid": "519c3acbe9341d4d5642815536e0fb71", "score": "0.56910944", "text": "def init_database(self):\n # init_database(self.engine)", "title": "" }, { "docid": "ea3b1ea3d4565089f8aad10f2c647b1d", "score": "0.56887364", "text": "def init_db(connection, echo):\r\n\r\n # create the database tables as defined\r\n engine = create_engine(connection, echo=echo)\r\n Base.metadata.create_all(engine)\r\n\r\n # create a session\r\n Base.metadata.bind = engine\r\n BaseSession = sessionmaker(bind=engine)\r\n session = BaseSession()\r\n\r\n # set the shared Model session\r\n Model.use_session(session)\r\n\r\n return (engine, session)", "title": "" }, { "docid": "aab77d341977da7da8bde92082e1194d", "score": "0.5685786", "text": "def sql_engine(tmpdir: Path) -> Generator[SQLEngine, None, None]:\n db_file_path = tmpdir / \"test.db\"\n sql_engine = create_sql_engine(db_file_path)\n yield sql_engine\n sql_engine.dispose()", "title": "" }, { "docid": "24c2f9a03f641ca31795b69b41185432", "score": "0.5685584", "text": "def create_gt_database_template(self):\n pass\n with self.connection as cursor:\n fn = os.path.join(os.path.dirname(__file__), 'gtlog.sql')\n self.cursor.execute(open(fn, \"r\").read())", "title": "" }, { "docid": "33276f8ae22ca4dc106cf877316baa01", "score": "0.5678844", "text": "def __init__(self, sql_config = {}, verbose = True):\n\n self.sql_config = sql_config\n self.verbose = verbose\n self._setup()", "title": "" }, { "docid": "996f2d647af300387d2170a56979ba93", "score": "0.56667346", "text": "def database_setup():\n Base.metadata.create_all(bind=engine)\n db = LocalSession()\n try:\n populate_from_json(db, Vector, str(VECTORS))\n populate_from_json(db, Gender, str(GENDERS))\n populate_from_json(db, Tag, str(TAGS))\n finally:\n db.close()", "title": "" }, { "docid": "13887516abec59b7a50b684b708b8618", "score": "0.5660363", "text": "def init_db(self):\n self.db_config = databaseutils.process_db_config(self.config['db'])\n\n from sqlalchemy import create_engine\n from sqlalchemy.orm import sessionmaker, scoped_session\n self.engine = create_engine(self.db_config.constr, pool_recycle=3600)\n self.session = scoped_session(sessionmaker(bind=self.engine))\n\n # Make sure tables are created\n DB_Base.metadata.create_all(self.engine)\n\n if self.sqlite_file is not None:\n dbname = 'sqlite:///%s' % self.sqlite_file\n self.sqlite_engine = create_engine(dbname, echo=False)\n self.sqlite_session = scoped_session(sessionmaker(bind=self.sqlite_engine))\n DB_Base.metadata.create_all(self.sqlite_engine)\n logger.info('Using SQLite %s' % self.sqlite_engine)", "title": "" }, { "docid": "c8cbb1fc0adf1f17ea7355a562797994", "score": "0.56507915", "text": "def generate_database_object(**kwargs):\n return app.database.Database(\"test.db\", **kwargs)", "title": "" }, { "docid": "39cc6c47e34de172016a324ad010e821", "score": "0.5650551", "text": "def create_connection(self, context, *, engine):\n try:\n connection = engine.connect()\n except OperationalError as exc:\n raise UnrecoverableError('Could not create SQLAlchemy connection: {}.'.format(str(exc).replace('\\n', ''))\n ) from exc\n\n with connection:\n yield connection", "title": "" }, { "docid": "20852cc79ae276c170ed99e88f0a5cf9", "score": "0.56468374", "text": "def _create_sqlite_file_engine( conn=next( file_path_generator ), echo=True ):\n print( \"creating connection: %s \" % conn )\n return create_engine( conn, echo=echo )", "title": "" }, { "docid": "bbe3abd6806bbb4df2cd9defaf0af95e", "score": "0.56466085", "text": "def create_engine(self):\n return create_engine('sqlite:///' + self.database_name, echo=True)", "title": "" }, { "docid": "07de65ef0e9c1dac254701f6f539b0d5", "score": "0.56367636", "text": "def create_sqlite_engine( echo=False ):\n conn = 'sqlite:///:memory:'\n print( \"creating connection: %s \" % conn )\n return create_engine( conn, echo=False )", "title": "" }, { "docid": "55795f4779f4c418d600fa5b252ccd7d", "score": "0.56306756", "text": "def create_database():\n with connection:\n connection.execute(CREATE_MOVIE_TABLE)\n connection.execute(CREATE_USER_TABLE)\n connection.execute(CREATE_WATCHED_TABLE)", "title": "" }, { "docid": "a529a94312627b14eb74b7678bd98687", "score": "0.5626167", "text": "def get_sqls(self):\n return {\n \"prepare_check\": \"select id from dbo.sysobjects where id = object_id('{0}')\".format(self.table_name),\n \"prepare_create\": \"create table {0} (channel NVARCHAR(20), channel_user_id NVARCHAR(100), timestamp DATETIME2, topic_name NVARCHAR(100), topic_status NVARCHAR(100), topic_previous NVARCHAR(4000), topic_priority INT, data NVARCHAR(MAX), primary key(channel, channel_user_id))\".format(self.table_name),\n \"get_context\": \"select top 1 * from {0} where channel=? and channel_user_id=?\".format(self.table_name),\n \"save_context\": \"\"\"\n merge into {0} as A\n using (select ? as channel, ? as channel_user_id, ? as timestamp, ? as topic_name, ? as topic_status, ? as topic_previous, ? as topic_priority, ? as data) as B\n on (A.channel = B.channel and A.channel_user_id = B.channel_user_id)\n when matched then\n update set timestamp=B.timestamp, topic_name=B.topic_name, topic_status=B.topic_status, topic_previous=B.topic_previous, topic_priority=B.topic_priority, data=B.data\n when not matched then\n insert (channel, channel_user_id, timestamp, topic_name, topic_status, topic_previous, topic_priority, data) values (B.channel, B.channel_user_id, B.timestamp, B.topic_name, B.topic_status, B.topic_previous, B.topic_priority, B.data);\n \"\"\".format(self.table_name)\n }", "title": "" }, { "docid": "04f8eac0f9fbab51d117b39064c06d77", "score": "0.5624793", "text": "def init_database():\n\n # The current dir should be the script home\n homedir = os.path.normpath(\n os.path.dirname(\n sys.executable if getattr(sys, 'frozen', False) else\n __file__)) # cx_Freeze compatibility\n os.chdir(homedir)\n\n engine = create_engine(\"sqlite:///data.db\")\n BASE.metadata.bind = engine\n BASE.metadata.create_all()\n\n return engine", "title": "" }, { "docid": "5699fa8fd217e24b0877e1e82519e960", "score": "0.5613715", "text": "def session_context(pytestconfig, request, tmp_env):\n ctx = Context.only()\n\n # Temporary, empty local directory for local data\n session_tmp_dir = Path(request.config._tmp_path_factory.mktemp(\"data\"))\n\n # Set the cache path according to whether pytest --local-cache was given. If True,\n # pick up the existing setting from the user environment. If False, use a pytest-\n # managed cache directory that persists across test sessions.\n ctx.cache_path = (\n ctx.local_data.joinpath(\"cache\")\n if request.config.option.local_cache\n # TODO use pytestconfig.cache.mkdir() when pytest >= 6.3 is available\n else Path(pytestconfig.cache.makedir(\"cache\"))\n )\n\n # Other local data in the temporary directory for this session only\n ctx.local_data = session_tmp_dir\n\n platform_name = \"message-ix-models\"\n\n # Add a platform connected to an in-memory database\n # NB cannot call Config.add_platform() here because it does not support supplying a\n # URL for a HyperSQL database.\n # TODO add that feature upstream.\n ixmp_config.values[\"platform\"][platform_name] = {\n \"class\": \"jdbc\",\n \"driver\": \"hsqldb\",\n \"url\": f\"jdbc:hsqldb:mem://{platform_name}\",\n }\n\n # Launch Platform and connect to testdb (reconnect if closed)\n mp = Platform(name=platform_name)\n mp.open_db()\n\n ctx.platform_info[\"name\"] = platform_name\n\n yield ctx\n\n ctx.close_db()\n ixmp_config.remove_platform(platform_name)", "title": "" }, { "docid": "de43df056ad075876853bc882b943f6a", "score": "0.5610763", "text": "def init_db():\n with LoggerApi.app_context():\n db = get_db()\n with LoggerApi.open_resource('schema.sql', mode='r') as f:\n db.cursor().executescript(f.read())\n db.commit()", "title": "" }, { "docid": "79fe40448334a47535d6216cd550a9fd", "score": "0.56061286", "text": "def context(self) -> CONTEXT:", "title": "" }, { "docid": "49bc95c1c1caf1564178c54a906156d9", "score": "0.55998296", "text": "def __init__(self):\n engine = db_connect()\n create_table(engine)\n self.Session = sessionmaker(bind=engine)\n logging.info(\"****DuplicatesPipeline: database connected****\")", "title": "" }, { "docid": "569aed92cd6b3dbc0d12e81fc8780059", "score": "0.55969864", "text": "def __init__(self):\n engine = db_connect()\n self.Session = sessionmaker(bind=engine)", "title": "" }, { "docid": "4e2473f46ea6b831e377a1497d3a192e", "score": "0.5595775", "text": "def session_context(self):\n session = self.Session()\n try:\n yield session\n session.commit()\n except: # noqa E722\n session.rollback()\n raise\n finally:\n session.close()", "title": "" }, { "docid": "4aed93ec66a3914852af452593b745dd", "score": "0.55913657", "text": "def session_scope( engine=None ):\n try:\n if engine is None:\n engine = initialize_engine( environment.ENGINE )\n # DataTools's handle to database at global level\n # SessionFactory = make_scoped_session_factory(engine)\n SessionFactory = make_session_factory( engine )\n\n if environment.ENGINE == 'sqlite' or environment.ENGINE == 'sqlite-file':\n # We need to get the db into memory when start up\n # environmental variables will determine details of the\n # db\n create_db_tables( engine )\n\n session = SessionFactory()\n yield session\n session.commit()\n\n except:\n session.rollback()\n raise\n\n finally:\n if session:\n session.close()", "title": "" }, { "docid": "9acf907fd3230a868c35813b62d84a30", "score": "0.5588803", "text": "def init():\n\n # delete existing file\n if os.path.exists(DBFILE):\n os.remove(DBFILE)\n\n db = sqlite3.connect(DBFILE)\n # create tables\n create(db, PARAGRAPH, \"paragraph\")\n create(db, QUESTION, \"question\")\n create(db, ANSWER, \"answer\")\n\n return db", "title": "" }, { "docid": "0da328eecf2a7a047cbc5c450aed8410", "score": "0.5585826", "text": "def __init__(self, db_sql_path, mode='r', verbose=1):\n \n self._exec_date = datetime.now()\n if not os.path.exists(db_sql_path):\n raise Exception('path %s does not exist'%db_sql_path)\n if not os.path.isdir(db_sql_path):\n raise Exception('path %s is not a directory'%db_sql_path)\n db_sql_path = os.path.abspath(db_sql_path)\n self._path = os.path.join(db_sql_path, 'database.sql')\n if mode not in ['r', 'w']:\n raise Exception('Mode must be either r: read only or w: read and write')\n self._mode = mode\n self._verbose = verbose\n self.__within_context = False\n self.__special_exit = None\n self.__initialized = False\n if self._mode == 'r':\n self.__initialization__()", "title": "" }, { "docid": "58ba988fa59c6c64c5c0874596e05198", "score": "0.55807924", "text": "def _context():\n global _trident_context\n if _trident_context is None:\n _trident_context = _Context()\n return _trident_context", "title": "" }, { "docid": "8ed913bfec489c7d0a3fb53665a4ff27", "score": "0.5580359", "text": "def init_db(self):\n self.db_config = databaseutils.process_db_config(self.config['db'])\n\n from sqlalchemy import create_engine\n from sqlalchemy.orm import sessionmaker, scoped_session\n self.engine = create_engine(self.db_config.constr, pool_recycle=3600)\n self.session = scoped_session(sessionmaker(bind=self.engine))\n\n # Make sure tables are created\n DB_Base.metadata.create_all(self.engine)", "title": "" }, { "docid": "6977fcfed11e3cce4391ba1343ba89d8", "score": "0.5579277", "text": "def _context_new(self):\n assert self._pa_mainloop is not None\n app_name = self._get_app_name()\n context = pa.pa_context_new(self._pa_mainloop,\n app_name.encode('ASCII')\n )\n return context", "title": "" }, { "docid": "86f4f54fcea59b51df4534630b411709", "score": "0.5578862", "text": "def __init__(self):\n engine = connect_to_db()\n create_lyrics_table(engine) #declarative base stuff\n self.Session = sessionmaker(bind=engine)\n\n # self.create_connection()\n # self.create_table()", "title": "" }, { "docid": "a3eb57211c08ee9b7eb0f801656f3676", "score": "0.5578381", "text": "def setup_db():\n engine = create_engine(settings.DATABASE)\n ModelBase.metadata.bind = engine\n ModelBase.metadata.create_all(engine)\n\n return scoped_session(sessionmaker(bind=engine))", "title": "" }, { "docid": "a79a7efbb8a73688769289c54b63f4e5", "score": "0.557326", "text": "def create_database():\n create_db(app)", "title": "" }, { "docid": "70c8418c7da4556e3f9c626b6781540a", "score": "0.5572984", "text": "def __init__(self):\n engine = db_connect()\n create_table(engine)\n self.Session = sessionmaker(bind=engine)\n logging.info(\"****SaveRestaurantsPipeline: database connected****\")", "title": "" }, { "docid": "c0d535c6c4943d5eaa8e53098caf59f1", "score": "0.5565452", "text": "def get_template_context(self):\n context = super(Spamassassin, self).get_template_context()\n if self.dbengine == \"postgres\":\n store_module = \"Mail::SpamAssassin::BayesStore::PgSQL\"\n dsn = \"DBI:Pg:dbname={};host={};port={}\".format(\n self.dbname, self.dbhost, self.dbport)\n else:\n store_module = \"Mail::SpamAssassin::BayesStore::MySQL\"\n dsn = \"DBI:mysql:{}:{}:{}\".format(\n self.dbname, self.dbhost, self.dbport)\n context.update({\n \"store_module\": store_module, \"dsn\": dsn, \"dcc_enabled\": \"#\"})\n return context", "title": "" }, { "docid": "83bd8b12c77cde96e2493c3709df894d", "score": "0.55614823", "text": "def db_connect():\n if 'db' not in g:\n g.db = sql.connect(current_app.config[\"DATABASE\"], detect_types=sql.PARSE_DECLTYPES)\n g.db.row_factory = sql.Row\n return g.db", "title": "" }, { "docid": "3d0e8a68e10c671dbacccbfff7c8a38c", "score": "0.5556699", "text": "def init_context(self) -> None:\n self.context = AKSManagedClusterContext(\n self.cmd, AKSManagedClusterParamDict(self.__raw_parameters), self.models, DecoratorMode.CREATE\n )", "title": "" }, { "docid": "c728dfa0817337b4b45ef95022b84cbf", "score": "0.55560327", "text": "def logic_db_engine(self):\n try:\n boto_session = boto3.Session(profile_name='loidsig')\n except:\n boto_session = boto3.Session()\n sm_client = boto_session.client(\n service_name='secretsmanager',\n region_name='us-east-1',\n endpoint_url='https://secretsmanager.us-east-1.amazonaws.com'\n )\n get_secret_value_response = sm_client.get_secret_value(SecretId='Loidsig_DB')\n cred_dict = ast.literal_eval(get_secret_value_response['SecretString'])\n db_user, db_pass = cred_dict['username'], cred_dict['password']\n db_host, db_port, db_name = cred_dict['host'], cred_dict['port'], cred_dict['dbname']\n try:\n postgres_engine = create_engine(f'postgresql://{db_user}:{db_pass}@{db_host}:{db_port}/{db_name}')\n except Exception as e:\n print(\"Unable to connect to postgres! Error: {}\".format(e))\n raise\n return postgres_engine", "title": "" }, { "docid": "e821c05bc98bd1105a4a30258244d9a7", "score": "0.5545746", "text": "def create(self) -> SQLAlchemy:\n self.db.create_all()\n self.db.session.commit()\n return self.db", "title": "" }, { "docid": "737ff63c6084fac9d5d6b8952505d61d", "score": "0.55449027", "text": "def _init_db(self):\n cursor = self._main_connection.cursor()\n cursor.execute(self.sql[\"create_table\"])\n self._main_connection.commit()", "title": "" }, { "docid": "b9c50738f72c88bc6fd9de3b3f6fe293", "score": "0.5543919", "text": "def make_data_connection():\n\n global _engine # pylint: disable=W0603\n\n db_url = 'postgres://{0}:{1}@{2}:{3}/{4}'.format(DataBase.USER,\n DataBase.PASSWORD,\n DataBase.HOST,\n DataBase.PORT,\n DataBase.DB_NAME)\n _engine = create_engine(db_url, echo=DataBase.ECHO)", "title": "" }, { "docid": "7810d7680fea6ef69953e371a29f2795", "score": "0.5539203", "text": "def __init__(self):\n engine = db_connect()\n create_reals_table(engine)\n self.Session = sessionmaker(bind=engine)", "title": "" }, { "docid": "496a18896d6d8a741dbc034811867c05", "score": "0.5536988", "text": "def create_student_db(connection):\r\n with connection:\r\n connection.execute(CREATE_TABLE_STUDENTS_DATA)", "title": "" }, { "docid": "c40966d70f9366d76af8dc418610d536", "score": "0.5535386", "text": "def sqlite_setup(driver=':memory:', extensions=None):\n conn = __Handler(driver, extensions)\n return conn.conn", "title": "" }, { "docid": "0a08683a5e1cfd691dc70b2270151524", "score": "0.5531912", "text": "def initialize_sqlite_memory():\n global engine\n if engine is None:\n engine = create_engine(\"sqlite://\")\n Session.configure(bind=engine)\n _populate()", "title": "" }, { "docid": "e635ebbb0a26390cc87cb0bb29f5e263", "score": "0.55299956", "text": "def init():\n print(\"Executing initialization\")\n print(db.dsn)\n cursor = yield momoko.Op(\n db.execute,\n \"\"\"\n DROP SCHEMA public CASCADE;\n CREATE SCHEMA public;\n CREATE TABLE game\n (\n game_id text PRIMARY KEY,\n players integer,\n state bytea,\n timestamp timestamp\n );\n CREATE UNIQUE INDEX ix_game_id\n ON game\n (game_id);\n CREATE INDEX ix_timestamp\n ON game\n (timestamp);\n \"\"\")\n try:\n print(cursor.fetchall())\n except psycopg2.ProgrammingError:\n pass\n io = ioloop.IOLoop.instance()\n io.stop()", "title": "" }, { "docid": "3dee2a9ce3e3c0b886dcc3c834ea8765", "score": "0.5516978", "text": "def __init__(self, filename):\n # this boilerplate is turning me off from sqlalchemy. Why can't I have\n # one object that I interact with, instead of four levels of crap?\n self.sql_engine = create_engine(\n 'sqlite:///'+filename,\n isolation_level='READ UNCOMMITTED'\n )\n self.sql_session_maker = sessionmaker(bind=self.sql_engine)\n self.sql_session = self.sql_session_maker()\n Base.metadata.create_all(bind=self.sql_engine)\n self.term_cache = {}", "title": "" } ]
09a4bd3f156469c3b8e7375097f7f947
Gets is_grade attribute of instance Returns
[ { "docid": "56b57c1cb0e5c0ace8d84a7e09e28fdb", "score": "0.7083966", "text": "def get_is_graded(self):\n return self.is_graded", "title": "" } ]
[ { "docid": "ebc51373a9a05f5f317713d900e8baf8", "score": "0.743497", "text": "def get_grade(self):\n return self.grade", "title": "" }, { "docid": "dfb54dfff723a785b613028952041e0a", "score": "0.6901927", "text": "def grade(self):\n return self._grade", "title": "" }, { "docid": "2a5d7ef1ebc409c567dbe6e12fba2bef", "score": "0.65599567", "text": "def getGrValue(self):\n return self.grade_value", "title": "" }, { "docid": "1aa05500c7bdd0cb87a6f59d2735bd49", "score": "0.6482489", "text": "def grade(self):\n return self._response['grade']", "title": "" }, { "docid": "a5fe758fde8f3bda2996f53addb2c27a", "score": "0.6173418", "text": "def passed(grade):\n grades = {\n 'E': True,\n 'Exceptional': True,\n 'G': True,\n 'Good': True,\n 'A': True,\n 'Acceptable': True,\n 'P': False,\n 'Poor': False,\n 'H': False,\n 'Horrible': False,\n }\n\n return grades.get(grade, False)", "title": "" }, { "docid": "7714522ac571d5de7cf1d4767ae0ffa1", "score": "0.6038436", "text": "def get_grader(self):\n return self.to_dict().get(GRADER_KEY)", "title": "" }, { "docid": "31dd21dc3f59110e89e8dd00e741b2fd", "score": "0.6010256", "text": "def __str__(self):\n return self.grade", "title": "" }, { "docid": "b91402ca672aad76a420bf4421f192cc", "score": "0.5985245", "text": "def get_course_grade(self):\n return CourseGradeFactory().read(self.student_user, self.course)", "title": "" }, { "docid": "3abec9684f9e65d83213a6b739d22af8", "score": "0.595143", "text": "def can_assign_grade_system(self):\n return # boolean", "title": "" }, { "docid": "013696b64c22fa3919567870088174f5", "score": "0.57968026", "text": "def can_overridecalculated_grade_entries(self):\n return # boolean", "title": "" }, { "docid": "5fd3fdcbe39490364fab3c1bc03f9e51", "score": "0.57591295", "text": "def DSG(self):\n return self.has_property(\"featureType\")", "title": "" }, { "docid": "533d669b64048f80beff8c7a74035f71", "score": "0.57512814", "text": "def get_gradebook(self):\n return # osid.grading.Gradebook", "title": "" }, { "docid": "533d669b64048f80beff8c7a74035f71", "score": "0.57512814", "text": "def get_gradebook(self):\n return # osid.grading.Gradebook", "title": "" }, { "docid": "533d669b64048f80beff8c7a74035f71", "score": "0.57512814", "text": "def get_gradebook(self):\n return # osid.grading.Gradebook", "title": "" }, { "docid": "533d669b64048f80beff8c7a74035f71", "score": "0.57512814", "text": "def get_gradebook(self):\n return # osid.grading.Gradebook", "title": "" }, { "docid": "533d669b64048f80beff8c7a74035f71", "score": "0.57512814", "text": "def get_gradebook(self):\n return # osid.grading.Gradebook", "title": "" }, { "docid": "533d669b64048f80beff8c7a74035f71", "score": "0.57512814", "text": "def get_gradebook(self):\n return # osid.grading.Gradebook", "title": "" }, { "docid": "533d669b64048f80beff8c7a74035f71", "score": "0.57512814", "text": "def get_gradebook(self):\n return # osid.grading.Gradebook", "title": "" }, { "docid": "533d669b64048f80beff8c7a74035f71", "score": "0.57512814", "text": "def get_gradebook(self):\n return # osid.grading.Gradebook", "title": "" }, { "docid": "533d669b64048f80beff8c7a74035f71", "score": "0.57512814", "text": "def get_gradebook(self):\n return # osid.grading.Gradebook", "title": "" }, { "docid": "533d669b64048f80beff8c7a74035f71", "score": "0.57512814", "text": "def get_gradebook(self):\n return # osid.grading.Gradebook", "title": "" }, { "docid": "533d669b64048f80beff8c7a74035f71", "score": "0.57512814", "text": "def get_gradebook(self):\n return # osid.grading.Gradebook", "title": "" }, { "docid": "533d669b64048f80beff8c7a74035f71", "score": "0.57512814", "text": "def get_gradebook(self):\n return # osid.grading.Gradebook", "title": "" }, { "docid": "533d669b64048f80beff8c7a74035f71", "score": "0.57512814", "text": "def get_gradebook(self):\n return # osid.grading.Gradebook", "title": "" }, { "docid": "533d669b64048f80beff8c7a74035f71", "score": "0.57512814", "text": "def get_gradebook(self):\n return # osid.grading.Gradebook", "title": "" }, { "docid": "c206f07fc977fd877f085a581db27bf2", "score": "0.57271826", "text": "def is_graded(self):\n return hasattr(self, \"graded_work\")", "title": "" }, { "docid": "069f33e975ec0d964df88d39f5519fe5", "score": "0.5716374", "text": "def can_lookup_grade_entries(self):\n return # boolean", "title": "" }, { "docid": "26769f46bdbc6cbc1257456900f5c453", "score": "0.56508", "text": "def badges_enabled(self):\n if \"badgesEnabled\" in self._prop_dict:\n return self._prop_dict[\"badgesEnabled\"]\n else:\n return None", "title": "" }, { "docid": "e3ff5fa8b05c4ee3cf54cd3f76aa8488", "score": "0.55828094", "text": "def by_grade(cls, grade):\n if grade == None or grade > 100:\n return cls.NOT_STARTED\n if grade == 100:\n return cls.SUCCEEDED\n if grade <= 99 and grade >= 1:\n return cls.PART_SUCC\n if grade == 0:\n return cls.FAILED\n if grade > -1:\n return cls.STARTED\n if grade <= -2 and grade >= -100:\n return cls.TEACHER_EXC\n return cls.SANDBOX_EXC", "title": "" }, { "docid": "0a439fe5d8e65d619c992fead33ff338", "score": "0.5567315", "text": "def review_grades(self):\r\n return self._review_grades", "title": "" }, { "docid": "49661d6ad193e96d3748f2aae057d755", "score": "0.5564851", "text": "def clean_grade(self):\n\n grade = self.cleaned_data['grade']\n # map to bool\n grade_vals = {'pass': True, 'fail': False, '': ''}\n\n return grade_vals.get(grade, None)", "title": "" }, { "docid": "1bae47008fded4dd53034e668f922e96", "score": "0.5563756", "text": "def get_boolean(self):", "title": "" }, { "docid": "12e34874826e552a0e44d0920592efb8", "score": "0.5553184", "text": "def fetch_grades(self):\n return self.enrolled_students", "title": "" }, { "docid": "cb8c13bb13c8cbb65df7214c71862ead", "score": "0.55406314", "text": "def _get_isG2(self) -> \"bool\" :\n return _fusion.FilletFeature__get_isG2(self)", "title": "" }, { "docid": "5fa72acbb4f2a081487d66b4c3a53da0", "score": "0.55338603", "text": "def is_on(self) -> bool:\n val = False\n if self._attr in self._data:\n val = self._data[self._attr]\n\n return val", "title": "" }, { "docid": "46036a8387b6760c6f83c210991c2f15", "score": "0.55083734", "text": "def can_lookup_grade_systems(self):\n return # boolean", "title": "" }, { "docid": "c13a5c7f6601f346033adf0bba2229ad", "score": "0.5495199", "text": "def requires_grad(self):\n return self._requires_grad", "title": "" }, { "docid": "c13a5c7f6601f346033adf0bba2229ad", "score": "0.5495199", "text": "def requires_grad(self):\n return self._requires_grad", "title": "" }, { "docid": "4eae4a236ad2e5dd5d83ac9d187e284f", "score": "0.549249", "text": "def Enabled(self):\r\n\t\treturn self._get_attribute('enabled')", "title": "" }, { "docid": "e3d274cb14e2e16579df66b7bcb820e0", "score": "0.54852563", "text": "def get_grade_system(self, grade_system_id):\n return # osid.grading.GradeSystem", "title": "" }, { "docid": "39efe85f48adf5151ea0b34414adfe40", "score": "0.5473113", "text": "def get_grade_entry_query(self):\n return # osid.grading.GradeEntryQuery", "title": "" }, { "docid": "134fe98541fa7b039c43aeb203a66db9", "score": "0.54713374", "text": "def get_student(self):\n return self.student", "title": "" }, { "docid": "dde5f91a741f95f95a09fc86b17de1ab", "score": "0.54577273", "text": "def __bool__(self):\n\n return bool(self.attribute)", "title": "" }, { "docid": "dde5f91a741f95f95a09fc86b17de1ab", "score": "0.54577273", "text": "def __bool__(self):\n\n return bool(self.attribute)", "title": "" }, { "docid": "ab5dcbb52dd3e421c70969899a3bb753", "score": "0.54488", "text": "def maybe_enable_grading(self):\n\n if not self._lti_user.is_instructor:\n # Only instructors can grade assignments.\n return\n\n if \"lis_outcome_service_url\" not in self._request.params:\n # Only \"gradeable\" assignments can be graded.\n # Assignments that don't have the lis_outcome_service_url param\n # aren't set as gradeable in the LMS.\n return\n\n if self._context.is_canvas:\n # Don't show our built-in grader in Canvas because it has its own\n # \"SpeedGrader\" and we support that instead.\n return\n\n self._config[\"grading\"] = {\n \"enabled\": True,\n \"courseName\": self._request.params.get(\"context_title\"),\n \"assignmentName\": self._request.params.get(\"resource_link_title\"),\n \"students\": list(self._get_students()),\n }", "title": "" }, { "docid": "a18ab8b3a195891be76070e35887af9a", "score": "0.544498", "text": "def getFields(self, *args):\n\n # try to fetch from survey_record\n if hasattr(self.survey_record, 'grade'):\n grade = self.survey_record.grade\n\n # remap bool to string values as the ChoiceField validates on 'choices'.\n vals_grade = {True: 'pass', False: 'fail'}\n\n self.data['grade'] = vals_grade.get(grade, None) or grade\n\n return super(GradeSurveyRecordForm, self).getFields(*args)", "title": "" }, { "docid": "ca8cb2c658c26f61cabbcae9522f2368", "score": "0.5440322", "text": "def is_graded(self, code: str, name: Optional[str] = None) -> bool:\n if not name:\n name = self.submitter\n # TODO: yikes, 4 dictionary accesses xD. is there a better way?\n return (self._grades[name][\"scores\"][code][\"award\"]\n is not None)", "title": "" }, { "docid": "6b932a98f5e1ad276391ce4355730276", "score": "0.5438885", "text": "def __get_education__(self):\n return self.education", "title": "" }, { "docid": "7394ac41882520331f888575c63311d7", "score": "0.5435297", "text": "def testClassPropertyGrades(self):\n\n # data for the unit class grad setter\n unitGradesPass = [None, 5, 51.5, 0, '1.5']\n unitGradesTypeError = [[1, 5]]\n unitGradesValueError = ['testString', -2, 456]\n\n # appropriate values\n for testGrade in unitGradesPass:\n self.testModule.grade = testGrade\n if testGrade is None:\n self.assertEqual(self.testModule.grade, testGrade)\n else:\n testGrade = float(testGrade)\n if testGrade < 1:\n self.assertEqual(self.testModule.grade, 1)\n elif testGrade > 4:\n self.assertEqual(self.testModule.grade, 5)\n else:\n self.assertEqual(self.testModule.grade, testGrade)\n\n # inappropriate values\n with self.assertRaises(TypeError):\n for testGrade in unitGradesTypeError:\n self.testModule.grade = testGrade\n with self.assertRaises(ValueError):\n for testGrade in unitGradesValueError:\n self.testModule.grade = testGrade\n\n # data for the course class grad setter\n courseGrades = [0, 3, None, 78]\n\n # appropriate values\n for testGrade in courseGrades:\n self.testCourse.grade = testGrade\n\n if testGrade is None:\n self.assertFalse(self.testCourse.passed)\n elif testGrade > 4:\n self.assertFalse(self.testCourse.passed)\n else:\n self.assertTrue(self.testCourse.passed)", "title": "" }, { "docid": "486a0b80b3c68bfbe6afc69111c30a2d", "score": "0.5417543", "text": "def _get_isActive(self) -> \"bool\" :\n return _fusion.Occurrence__get_isActive(self)", "title": "" }, { "docid": "963c5b80b0453621e4572b502ea4b1ff", "score": "0.5396134", "text": "def can_create_grade_entries(self):\n return # boolean", "title": "" }, { "docid": "669de770250abf018713444db74d4fb5", "score": "0.53954", "text": "def _get_isUniform(self) -> \"bool\" :\n return _fusion.ScaleFeature__get_isUniform(self)", "title": "" }, { "docid": "ed76b871326cc3e4bddadfb79f33dedc", "score": "0.53860086", "text": "def has_many_grade_levels(self):\n return self.grade_levels.count() > 1", "title": "" }, { "docid": "b0c2104f44b104d93c88a0deea364dfc", "score": "0.5373989", "text": "def get_grade_entry(self, grade_entry_id):\n return # osid.grading.GradeEntry", "title": "" }, { "docid": "7753749ce34c637ede923ad064118e06", "score": "0.53579813", "text": "def can_search_grade_entries(self):\n return # boolean", "title": "" }, { "docid": "7967c1e9ab40557cc943f6c50e10548c", "score": "0.53566635", "text": "def gender(self):\n return self.member.gender", "title": "" }, { "docid": "57f442e835222c6a0ecde946e36042e7", "score": "0.5355678", "text": "def _get_isG2(self) -> \"bool\" :\n return _fusion.FilletFeatureInput__get_isG2(self)", "title": "" }, { "docid": "381b3ad44e4becb44f95c814d67b70e2", "score": "0.53447855", "text": "def gender(self):\n return self._gender", "title": "" }, { "docid": "381b3ad44e4becb44f95c814d67b70e2", "score": "0.53447855", "text": "def gender(self):\n return self._gender", "title": "" }, { "docid": "381b3ad44e4becb44f95c814d67b70e2", "score": "0.53447855", "text": "def gender(self):\n return self._gender", "title": "" }, { "docid": "a8f57247b7e5167cda45537a0d7e6301", "score": "0.5338988", "text": "def get_grade_entry_search(self):\n return # osid.grading.GradeEntrySearch", "title": "" }, { "docid": "34ce66b0f7d38fe54867d81745fa44da", "score": "0.5334409", "text": "def checked(self):\n # !!! this should be replaced by another logic that will get the real stat\n # from the DOM\n return self._checked", "title": "" }, { "docid": "69069e8afc1ae49373d093786458fea9", "score": "0.5323901", "text": "def contains_attr(self, gi):\n if gi is None:\n return False\n for gi_obj in self.gradual_items:\n if gi.attribute_col == gi_obj.attribute_col:\n return True\n return False", "title": "" }, { "docid": "501b46aeaded12751c5cbdb546560f7e", "score": "0.53137267", "text": "def can_lookup_gradebooks(self):\n return # boolean", "title": "" }, { "docid": "797026e094fc0d3c5d20c6e6c0f563e9", "score": "0.5297622", "text": "def getRating(self):\n\t\treturn self._rating", "title": "" }, { "docid": "32b90e312e8763287a5dd0bafb9daf91", "score": "0.5296641", "text": "def can_update_grade_systems(self):\n return # boolean", "title": "" }, { "docid": "4d0bf06783ca1edda099c8a5fb829759", "score": "0.5295841", "text": "def state(self):\n return self.attribute_value", "title": "" }, { "docid": "4d0bf06783ca1edda099c8a5fb829759", "score": "0.5295841", "text": "def state(self):\n return self.attribute_value", "title": "" }, { "docid": "1addd6bb6b9711972333a8e7ec389901", "score": "0.5293318", "text": "def _get_isActive(self) -> \"bool\" :\n return _fusion.FusionDocument__get_isActive(self)", "title": "" }, { "docid": "3d712c43e34c5fc77c108914b93e52d4", "score": "0.52853703", "text": "def student_call():\n return face_analyzer.student_attentiveness()", "title": "" }, { "docid": "140d0e23b5e5ca6130fbfac0791de0b4", "score": "0.5284669", "text": "def get_attribs(self):\n return self.attribs", "title": "" }, { "docid": "140d0e23b5e5ca6130fbfac0791de0b4", "score": "0.5284669", "text": "def get_attribs(self):\n return self.attribs", "title": "" }, { "docid": "c644dfcb6faeac973fd770f068dd55ba", "score": "0.52812827", "text": "def can_search_grade_systems(self):\n return # boolean", "title": "" }, { "docid": "c769007f6cc6c4d238b406d2bee69676", "score": "0.52785134", "text": "def can_create_grade_systems(self):\n return # boolean", "title": "" }, { "docid": "0b23e8570ba2cbbfbd66944b0fb0656f", "score": "0.5269091", "text": "def value(self) -> bool:\n ...", "title": "" }, { "docid": "0b23e8570ba2cbbfbd66944b0fb0656f", "score": "0.5269091", "text": "def value(self) -> bool:\n ...", "title": "" }, { "docid": "6e67af65f2341d31869a29bf36b4ba2c", "score": "0.5259277", "text": "def _inSample(self):\n #if hasattr(self._userModel(), \"_d_inSample\"):\n return self._d_inSample\n #else:\n # return False", "title": "" }, { "docid": "26673f7aea374d912e89c2b03c339d5f", "score": "0.52510893", "text": "def can_update_grade_entries(self):\n return # boolean", "title": "" }, { "docid": "c4d4f51f269ac04b1bb9848f7779f8e9", "score": "0.5250907", "text": "def get_gradebook(self, gradebook_id):\n return # osid.grading.Gradebook", "title": "" }, { "docid": "92fd067364305b2141a3c3bc62c543a7", "score": "0.52462745", "text": "def get_prop(self):\n return self.prop", "title": "" }, { "docid": "92fd067364305b2141a3c3bc62c543a7", "score": "0.52462745", "text": "def get_prop(self):\n return self.prop", "title": "" }, { "docid": "f6bdba43265e6c20c6dcfcd7a8e783bb", "score": "0.52400917", "text": "def review():\n context.grader.manual_grade()\n # raise NotImplemented(\"This feature is not currently implemented.\")", "title": "" }, { "docid": "7197c307090e4c811be6d11cadda4354", "score": "0.5238365", "text": "def get_gradebooks(self):\n return # osid.grading.GradebookList", "title": "" }, { "docid": "ed07c02854e0f184fb3ca19395b540e6", "score": "0.52303505", "text": "def is_active(self):\n return self.__user['object']['enabled']", "title": "" }, { "docid": "c1c65a6fe41d17f66f29a2c1bac42bfe", "score": "0.5229278", "text": "def get_score(self):\n\n\treturn self.__score", "title": "" }, { "docid": "1dec89c629e9527f502f50db18858774", "score": "0.52241856", "text": "def IsSexuallyActive( self ):\n # assert self.__gender == MALE\n return self.__age >= 16", "title": "" }, { "docid": "5b3fe1fb6fb49a71d7fdaa6a2d0467fa", "score": "0.52211696", "text": "def get_grade_system_query(self):\n return # osid.grading.GradeSystemQuery", "title": "" }, { "docid": "5b3fe1fb6fb49a71d7fdaa6a2d0467fa", "score": "0.52211696", "text": "def get_grade_system_query(self):\n return # osid.grading.GradeSystemQuery", "title": "" }, { "docid": "ce4e2fde558bac42a7538485c22927b5", "score": "0.5216244", "text": "def genders(self):\n\n return ('male', 'female')", "title": "" }, { "docid": "b949910c108d4bcfa102d69ed8cb9d8a", "score": "0.52103674", "text": "def inspect_grade_system_query(self):\n return # osid.grading.GradeSystemQueryInspector", "title": "" }, { "docid": "e88a081963c15b2a2d5084d2a9b8ea8d", "score": "0.5208438", "text": "def __getitem__(self, rubric_subitem) -> Dict[str, Union[bool, str]]:\n return self._grades[self.submitter][\"scores\"][rubric_subitem]", "title": "" }, { "docid": "58d907b6f7815d2824fc11d2d9c55b72", "score": "0.5201994", "text": "def get(self, grading_id):\n adm = ProjectAdministration()\n grad = adm.get_grading_by_id(grading_id)\n print(grad)\n return grad", "title": "" }, { "docid": "59356c14a35fe2683a442717efa6aeea", "score": "0.52018815", "text": "def grade(self):\r\n if self.is_royal_flush():\r\n return self.ROYAL_FLUSH\r\n if self.is_straight_flush():\r\n return self.STRAIGHT_FLUSH\r\n if self.is_flush():\r\n return self.FLUSH\r\n if self.is_four_of_a_kind():\r\n return self.FOUR_OF_A_KIND\r\n if self.is_full_house():\r\n return self.FULL_HOUSE\r\n if self.is_three_of_a_kind():\r\n return self.THREE_OF_A_KIND\r\n if self.is_two_pair():\r\n return self.TWO_PAIR\r\n if self.is_pair():\r\n return self.PAIR\r\n return self.LOW", "title": "" }, { "docid": "caa6b94bd23a5af0a5f4fb83a894ae89", "score": "0.520172", "text": "def get_color(self)->bool:\n state = self.get_state()\n return state.color", "title": "" }, { "docid": "5f683f127053f92bd3aa7da8dc853391", "score": "0.5186911", "text": "def classification(self):\n return self.Classification", "title": "" }, { "docid": "4d4dc49708fc4bfdf807f0d6a6b8fb32", "score": "0.5171784", "text": "def get_grade_entries(self):\n return # osid.grading.GradeEntryList", "title": "" }, { "docid": "42d7f1f37961b22a17fb5bcea776708d", "score": "0.5168761", "text": "def __repr__(self):\n return f'Grade(abbrev={self.abbreviation}, course_number={self.course_number}, ' \\\n f'section_number={self.section_number}, semester={self.semester}, ' \\\n f'year={self.year}, average={self.average})'", "title": "" }, { "docid": "d0ee07d28ecabe5258c42ebb6acddc3b", "score": "0.5161924", "text": "def can_create_gradebooks(self):\n return # boolean", "title": "" }, { "docid": "d25738771d9d3f137223e8b8ab971fe0", "score": "0.5161804", "text": "def available(self):\n return self.attributes is not None", "title": "" }, { "docid": "1a37089cb3259e90c60758b63e5d9933", "score": "0.51573265", "text": "def enrolled(self):\n return self._student_enrolled", "title": "" } ]
f293cb81966d4d558331b83cb872dc75
Ensure that all entries are returned.
[ { "docid": "57a0df6352d166edd9eb96351fc2fc59", "score": "0.6007018", "text": "def test_view_everything_returns_all_records(self):\n data = self.create_test_dates()['test_log_entry_data']\n\n records = self.dbm.view_everything()\n records = [dict(entry) for entry in records]\n\n self.assertEqual(len(data), len(records))\n for datum in data:\n self.assertIn(datum, records)", "title": "" } ]
[ { "docid": "b5acbfbedb9e1524417d1e5e14283e4f", "score": "0.7266865", "text": "def all_entries(self) -> requests.models.Response:", "title": "" }, { "docid": "151603cea417ba3dabcb63e9d0ba8d72", "score": "0.7057368", "text": "def getAllEntries(self):\n try:\n self.data.entryList = self.APP.getAllEntries()\n if not self.__sendAppOperationResult(lambda: self.data.entryList is not None,\n ControllerLogicException('Load entries failed.')):\n return\n except Exception, e:\n self.__sendError(e)", "title": "" }, { "docid": "1140cee3127b7a665943779f6ac9713b", "score": "0.68558085", "text": "def all(self):\n self.scan()\n return self.entries", "title": "" }, { "docid": "2fa33d70eb0c45040ec4be65d0f962f8", "score": "0.6569235", "text": "def get(self):\n return get_all_entries()", "title": "" }, { "docid": "e38b26abe2f00ae481e46756657afb0d", "score": "0.6526549", "text": "def get_entries(self) -> Generator[tuple, None, None]:\n for entry in self.entries_batch:\n yield entry\n\n self.entries_batch = []", "title": "" }, { "docid": "cfc93173820ff5d608d5508be434da0c", "score": "0.6519066", "text": "def get_all_entries(self):\n\n self.refresh()\n while True:\n for entry in self.entries:\n yield entry\n\n # Navigate to next page - HTTP call\n if self.next() is None:\n break", "title": "" }, { "docid": "508efee73a1728fda5a7d7bbc053e1e9", "score": "0.6487228", "text": "def get_all_entries():\n entries = Entry.objects.all()\n return entries", "title": "" }, { "docid": "b7c2253ebdf3305ee8361aed2414aab8", "score": "0.6355247", "text": "def all(self):\n self.scan()\n return list(self.entries)", "title": "" }, { "docid": "10d3aa0e5219be23c38d70d2faa7a9e9", "score": "0.6343515", "text": "def get_all(self):\n pass", "title": "" }, { "docid": "e144f30655b64b59c5dd1815d6186ccf", "score": "0.63129425", "text": "def get_all_items(self):", "title": "" }, { "docid": "a09470d44b62e478a3fa35fc04d187fb", "score": "0.62576747", "text": "def get_all(self):\n self.get_new_entries()\n self.get_changed_entries()\n self.get_removed_entries()", "title": "" }, { "docid": "2e12436e4249ed88b5e4636ace39fbd1", "score": "0.6249732", "text": "def get_all(self):\n raise NotImplemented()", "title": "" }, { "docid": "92b8360a16f7ca49f57c2bb2ad5a6581", "score": "0.6216486", "text": "def test_get_all_entries(self):\n\n response = self.client.get('/api/v1/entries',\n content_type='application/json',\n data=json.dumps(entry1))\n self.assertEqual(response.status_code, 200)\n self.assertIn('Technical Leader', str(response.data))", "title": "" }, { "docid": "acefbb95634855a960aa2b06fe50733a", "score": "0.6166555", "text": "def get_all_records(self):\n pass", "title": "" }, { "docid": "26d2f055d02402577a612db3cbc6caec", "score": "0.61484027", "text": "def test_get_all_entries(self):\n\n testing_user = app.test_client(self)\n\n # Add first entry\n res = testing_user.post('/api/v1/entries',\n data=json.dumps(self.my_entries[0]),\n content_type='application/json')\n self.assertEqual(res.status_code, 201)\n self.assertIn('Your memory entitled ' + self.my_entries[0]['title'] +\n ' has been saved', str(res.data))\n\n # Add second entry\n response = testing_user.post('/api/v1/entries',\n data=json.dumps(self.my_entries[1]),\n content_type='application/json')\n self.assertEqual(res.status_code, 201)\n self.assertIn('Your memory entitled ' + self.my_entries[1]['title'] +\n ' has been saved', str(response.data))\n\n # Get all entries\n final_response = testing_user.get('/api/v1/entries')\n self.assertEqual(final_response.status_code, 200)\n self.assertIn('Learning Flask', str(final_response.data))\n self.assertIn('My first Flask API', str(final_response.data))", "title": "" }, { "docid": "1c88e6c9bffa2edf8ce8ddc6b4d2e7e1", "score": "0.6137212", "text": "def all(self):\n return self._perform_query()", "title": "" }, { "docid": "f3477b6370bd0255f3ad55937aba438c", "score": "0.6130469", "text": "def all(self):\n return self.many()", "title": "" }, { "docid": "0c0a297d51ee3bdd7a06ecf8b68d7923", "score": "0.612449", "text": "def _fetch_all(self):\n if self._result_cache is None:\n self._result_cache = list(self.iterator())\n if not isinstance(self, ValuesListQuerySet):\n for x in self._result_cache:\n self._set_query_time(x)\n if self._prefetch_related_lookups and not self._prefetch_done:\n self._prefetch_related_objects()", "title": "" }, { "docid": "cd7e9c236d0c0319a066e981e180ed47", "score": "0.61147547", "text": "def GetAll(self):\n return self.ok + self.GetAllBroken()", "title": "" }, { "docid": "cd926c005ff344fbee36cfbf72cb4cd0", "score": "0.6110133", "text": "def _fetch_all(self):\n if self._result_cache is None:\n self._result_cache, self._pagination_item = self.model.get(self.request_options)", "title": "" }, { "docid": "55dec3a91ce2bbebac8afe451f850693", "score": "0.6080038", "text": "def test_read_all(self):\n with patch.object(LogEntries, \"read\",\n side_effect=self._get_entries):\n # assert returning all sorted entries\n entries = LogEntries.read_all(list(self._get_entries_dict().keys()),\n None, None, None)\n self.assertEqual(len(entries), 9)\n for i in range(len(entries) - 1):\n self.assertLessEqual(entries[i][\"time\"],\n entries[i + 1][\"time\"])\n\n # assert returning last 3 sorted entries\n entries = LogEntries.read_all(list(self._get_entries_dict().keys()),\n 3, None, None)\n self.assertEqual(len(entries), 3)\n expected_entries = [200, 600, 1000]\n for i in range(len(entries) - 1):\n # assert ordering\n self.assertLessEqual(entries[i][\"time\"],\n entries[i + 1][\"time\"])\n # assert values\n self.assertEqual(entries[i][\"time\"], expected_entries[i])\n self.assertEqual(entries[i+1][\"time\"], expected_entries[i+1])", "title": "" }, { "docid": "70d00effe8da0d6965aa9b0aee15b104", "score": "0.60755086", "text": "def get_entries():\n data = Diary.all_entries()\n return data", "title": "" }, { "docid": "3adc7b3b70576031e93ca35999f393da", "score": "0.6066714", "text": "def get_all(self):\n raise NotImplementedError", "title": "" }, { "docid": "3adc7b3b70576031e93ca35999f393da", "score": "0.6066714", "text": "def get_all(self):\n raise NotImplementedError", "title": "" }, { "docid": "3adc7b3b70576031e93ca35999f393da", "score": "0.6066714", "text": "def get_all(self):\n raise NotImplementedError", "title": "" }, { "docid": "b1ff18e18ca7e02635504b6d58f2295f", "score": "0.60474014", "text": "def all(self):\n with self._lock:\n self.update()\n\n return list(self.entries)", "title": "" }, { "docid": "cf62aef74ebace865132164fc86a403a", "score": "0.6045557", "text": "def has_all_entries(self):\n for entry in self.entries:\n self.related_entry_ids.update(entry.get_related_entries())\n self.related_entry_ids.add(entry.id)\n if set(self.related_entry_ids) == set([entry.id for entry in self.entries]):\n return True\n else:\n print 'Study', id, 'does not have all entries', set(self.related_entry_ids), set([entry.id for entry in self.entries])\n return False", "title": "" }, { "docid": "e0d951e59a0e7292b0e921618ed26f62", "score": "0.601289", "text": "def fetch(self) -> list[Entry]:", "title": "" }, { "docid": "8f5653134c7333ac74551e33c9740853", "score": "0.5974942", "text": "def all() -> dict:\n ...", "title": "" }, { "docid": "8ef3c2ecbbb8cb7eb306396200d95024", "score": "0.5950022", "text": "def get_entries(self) -> Generator[str, None, None]:\n return (entry for entry in self.entries)", "title": "" }, { "docid": "54973ce35943e3c38300e700dad39c91", "score": "0.5945292", "text": "def fetchall(self):\n raise NotImplementedError('Method not implemented.')", "title": "" }, { "docid": "9c3b2eca379418af3e13aa23352b4a94", "score": "0.59449697", "text": "def _get_results(self):\n return []", "title": "" }, { "docid": "65d39b400093177d6242184752bd95f1", "score": "0.59397185", "text": "def test_all_empty(self):\n\n nutrition = MemoryNutritionStore()\n self.assertEqual({ }, nutrition.all())", "title": "" }, { "docid": "c025e6597f6840dce3a0a5fa39737957", "score": "0.59388345", "text": "def all(self) -> dict:\n ...", "title": "" }, { "docid": "4ac57927482ae5ff1d0a99476e2f865a", "score": "0.5933919", "text": "def get_entries(self):\n return self.entries", "title": "" }, { "docid": "37c3254db7dfc91a82e6fe9467b0dcdb", "score": "0.5930503", "text": "def fetchall(self):\n raise NotImplementedError", "title": "" }, { "docid": "52f07dd693782254758243910bf44548", "score": "0.5916642", "text": "def test_entries(self):\n self.assertEqual(len(self.entries), 25)", "title": "" }, { "docid": "8db8e7df3a91186728da2daa09586a5d", "score": "0.5894018", "text": "def get_all_library_entries():\n key = 'library_entries'\n data = cache.get(key)\n if data is None:\n try:\n data = models.ApplicationLibraryEntry.objects.filter(listing__is_deleted=False).all()\n cache.set(key, data)\n return data\n except ObjectDoesNotExist:\n return None\n else:\n return data", "title": "" }, { "docid": "2653d04dc013f02a87b9a94af4dcf7e2", "score": "0.5882078", "text": "def entries(self):\n return self._entries", "title": "" }, { "docid": "635397129a843d61780e474acebd5a07", "score": "0.5880765", "text": "def get_all_data(self):\n pass", "title": "" }, { "docid": "21ec344036edd163caaf2fa6862ccf84", "score": "0.58756435", "text": "def test_getall(self):\n pass", "title": "" }, { "docid": "1b1103d01b49838bd90680bfa2fa702e", "score": "0.5836943", "text": "def get_all(self):\n\n raise NotImplementedError()", "title": "" }, { "docid": "1b1103d01b49838bd90680bfa2fa702e", "score": "0.5836943", "text": "def get_all(self):\n\n raise NotImplementedError()", "title": "" }, { "docid": "0d55610edf56149e4f189fb9a0420ce6", "score": "0.5823098", "text": "def test_find_all_return_empty_list():\n all_entries = rldb.find_all({ 'env-title': 'There is no env with this title.' })\n\n assert all_entries == []", "title": "" }, { "docid": "6c953ca53d9e516e498bbd168b9f7285", "score": "0.58151126", "text": "def get_all_for_all(self):\n\n for name, rs in self.result_sets.items():\n print(\"Retrieving {}s for result set {}...\".format(self.item, name))\n self.get_all_items(name)", "title": "" }, { "docid": "9c08bc29b8130efab99fe58edf1975d4", "score": "0.5813334", "text": "def _get_entries(self, query):\n # @see http://www.solrtutorial.com/solr-query-syntax.html\n return self._kibana.get_rows(match={\"@exception.class\": \"Wikia\\\\Util\\\\AssertionException\"}, limit=self.LIMIT)", "title": "" }, { "docid": "f81be40393f2c4d6f97b57f2def8730f", "score": "0.58030194", "text": "def fetchall(self):\n out = self._results[:]\n self._results = []\n return out", "title": "" }, { "docid": "2c15db1d5b5d20654669e10701db451f", "score": "0.57954216", "text": "def all(self) -> Iterable[\"Row\"]:", "title": "" }, { "docid": "147940dd0eb713d6564cc769d0fea4ea", "score": "0.57885486", "text": "def all(self):\n return self.multi()", "title": "" }, { "docid": "22fed8c1386721bb5acd9c28b742684b", "score": "0.5781892", "text": "def get_all(self):\n return imap(self.get_one, ifilter(None, self.list()))", "title": "" }, { "docid": "71c2424c45edc9bef93928b9bd78e287", "score": "0.57688826", "text": "def all(self):\n return [record for record in iter(self)]", "title": "" }, { "docid": "e4dd194f2629aeb1d13ab85de93a4443", "score": "0.57681453", "text": "def get_all_entries(self, slug: str, batch_size: int = 10) -> Iterable[Entry]:\n offset = 0\n entries_batch = self.get_entries(slug, limit=batch_size, offset=offset)\n while entries_batch:\n yield from entries_batch\n offset += batch_size\n entries_batch = self.get_entries(slug, limit=batch_size, offset=offset)", "title": "" }, { "docid": "8a6c99a887b406c94c789281f8170476", "score": "0.5756834", "text": "def get_entries(self):\n\n\t\treturn self.__entries", "title": "" }, { "docid": "fc32de4e88ee73fcde536b94daf09ea4", "score": "0.57488054", "text": "def validateAllDataItems(self):\n return self.dataStore.validateAllDataItems()", "title": "" }, { "docid": "50b2a92a3568ccc67ec1db535189e61d", "score": "0.5737981", "text": "def iterentries(self):\n raise NotImplementedError(self.iterentries)", "title": "" }, { "docid": "65e723eb7b341aa7f70d3fe77fd3e9e7", "score": "0.57354146", "text": "def all_entries():\n s = Session()\n\n try:\n entries_list = s.query(Entry).all()\n s.close()\n return render_template(\"entries/entries_list.html\", entries=entries_list)\n except Exception as e:\n return str(e)", "title": "" }, { "docid": "1932615a364c14bf335b501c2f8f50a8", "score": "0.5729247", "text": "def getAllOrNone(self):\n return self.__allOrNone", "title": "" }, { "docid": "1375a1c6a839613dfa00215f808fe747", "score": "0.5728215", "text": "def get_entries(self):\n return self.__entries", "title": "" }, { "docid": "1375a1c6a839613dfa00215f808fe747", "score": "0.5728215", "text": "def get_entries(self):\n return self.__entries", "title": "" }, { "docid": "c280454ea6baedd522a64a308cf32a91", "score": "0.5722788", "text": "def get_all():\n res = get_aps()\n if res is None:\n return flask.Response('Organization not found', 404)\n return res", "title": "" }, { "docid": "e69d8f59b3aeefeffbeb49475cc395ad", "score": "0.5720823", "text": "def test_all(self):\n self.objs = self.storage.all()\n self.assertIsNotNone(self.objs)\n self.assertEqual(type(self.objs), dict)", "title": "" }, { "docid": "e69d8f59b3aeefeffbeb49475cc395ad", "score": "0.5720823", "text": "def test_all(self):\n self.objs = self.storage.all()\n self.assertIsNotNone(self.objs)\n self.assertEqual(type(self.objs), dict)", "title": "" }, { "docid": "cd36a8943e7c0bfb9389eeb133cd7d78", "score": "0.5720548", "text": "def all(self):\n self.query()\n return self.list", "title": "" }, { "docid": "b5ee4eab50f885f6bf909fc5e1c9692b", "score": "0.57047886", "text": "def _all(self):\n pass", "title": "" }, { "docid": "c09f395ca324c4b1a107106e058810ae", "score": "0.57030624", "text": "def all():\n return list()", "title": "" }, { "docid": "80758f8a539f3723cb7d10823a5ba77b", "score": "0.56985116", "text": "def all(self) -> dict[str, _t.Any]: # pragma: no cover\n return self.get_all()", "title": "" }, { "docid": "40ad81d997f1200865f8b5b6b8bfc98f", "score": "0.56823474", "text": "def fetchall(self):\n return []", "title": "" }, { "docid": "40536cef1a989823dd85581f29555f9b", "score": "0.56790763", "text": "def get_all_records(self) -> Tuple[Optional[List[Records]], Optional[Exception]]:\n return self.get_all_objects(Records)", "title": "" }, { "docid": "de76411e01d9d399573017b03d6dff2e", "score": "0.56751245", "text": "def _validate_market_data_entries(entries):\n if entries is None:\n entries = [entry for entry in MarketDataEntry]\n else:\n for entry in entries:\n if not isinstance(entry, MarketDataEntry):\n print(\"WARNING: Market Data Entry not defined: \" + str(entry))\n\n return entries", "title": "" }, { "docid": "bdd72b8bdf466ba1b4c1db1ac658bb1c", "score": "0.5673406", "text": "def _collect_results(self, items):\r\n for item in items:\r\n if not utils.is_url(item[\"link\"]):\r\n continue\r\n if item in self.results:\r\n continue\r\n if self.ignore_duplicate_urls and item[\"link\"] in self.results.links():\r\n continue\r\n if self.ignore_duplicate_domains and item[\"host\"] in self.results.hosts():\r\n continue\r\n self.results.append(item)", "title": "" }, { "docid": "48f8674942c023df087b892ef6cfcc2a", "score": "0.5658856", "text": "def get_all_ids(self):\n while True:\n results = self.get_ids()\n ids = results[\"id_set\"]\n if not ids - self.article_ids:\n break\n self.article_ids = self.article_ids | ids\n if len(self.article_ids) >= results[\"result_count\"]:\n break", "title": "" }, { "docid": "d23483c5246ae5acba008f0d1938dba1", "score": "0.5650488", "text": "def retrieve_all(self, key):\n raise NotImplementedError", "title": "" }, { "docid": "84e288d3572c28a5b2bc4622b49218c9", "score": "0.5641535", "text": "def get_all_items(self) -> Iterable[\"Item_Type\"]:\n if self.assert_conforms_to(ConformanceClasses.ITEM_SEARCH):\n yield from self.get_items()\n else:\n yield from super().get_items()", "title": "" }, { "docid": "f344706ceae6dda9e6b5e17112715621", "score": "0.5628937", "text": "def get_entries(self):\n return filter(\n lambda e: not e.name.endswith(self.empty_index),\n self.entries\n )", "title": "" }, { "docid": "a1dbcc52b323a4375656f71e5af724f1", "score": "0.56237346", "text": "def All(self) -> List[dict]:\r\n # Default return if nothing matches the query\r\n if len(self.results) < 1:\r\n return []\r\n\r\n return self.__get_unindexed_all_results()", "title": "" }, { "docid": "acb8aca500c6135ce12baf6b6c5c8665", "score": "0.5622763", "text": "def get_all(self) -> dict[str, _t.Any]:", "title": "" }, { "docid": "e028b389d835bc29da31f863604d9cdd", "score": "0.5619233", "text": "def entries():", "title": "" }, { "docid": "e028b389d835bc29da31f863604d9cdd", "score": "0.5619233", "text": "def entries():", "title": "" }, { "docid": "e30e1a2ba33f9419a8e383333e8538cd", "score": "0.56087714", "text": "def test_all_items(self):\n logging.info(\"Start test case: get all items\")\n get_items = get_all_items()\n assert get_items.status_code == 200, f\"The status_code is {get_items.status_code}\"", "title": "" }, { "docid": "919a2bb51208826d11baeef10a327bff", "score": "0.5602807", "text": "def query_all(self) -> List[Any]:\n return [v for v in self.batch_query(self.query_keys()) if v is not None]", "title": "" }, { "docid": "6611cbc29a55f641d06fdb7fbff46c4f", "score": "0.559653", "text": "def __iter__(self):\n return self.all()", "title": "" }, { "docid": "f3cd3ae4bc4102406c84db58261e60f1", "score": "0.5594151", "text": "def all(self):\n return [(key, self[key]) for key in self.map.keys() \\\n if self[key] is not None and len(self[key])]", "title": "" }, { "docid": "cd229cfe9c7a9e21557899d22e256c87", "score": "0.5584198", "text": "def getAll(self):", "title": "" }, { "docid": "580c1d7e78593508cb7a235f0243173c", "score": "0.55834055", "text": "def get_all_data(self) -> Optional[list[dict]]:\n data = []\n\n try:\n entries = self.db.collection(constants.COLLECTION).get()\n\n for entry in entries:\n if entry.exists:\n data.append(entry.to_dict())\n\n except FirebaseError:\n raise DataDidNotLoadError\n\n return data", "title": "" }, { "docid": "0d5bf396d3a4d29da148574892acdead", "score": "0.55720246", "text": "def test_get_entries():\n entries = get_entries()\n assert len(entries) == 1\n assert entries[0] == {\n 'accelerated_justification': 'Closing Time',\n 'additional_text': 'Additional Text',\n 'attachments': 'Attachments',\n 'awarded_date': 'Awarded Value',\n 'awarded_value': 'Supplier [Name|Address|Ref type|Ref Number|Is SME|Is VCSE]',\n 'closing_date': 'Closing Date',\n 'closing_time': 'Is sub-contract',\n 'contact_address_1': 'Contact Address 1',\n 'contact_address_2': 'Contact Address 2',\n 'contact_country': 'Contact Country',\n 'contact_email': 'Contact Email',\n 'contact_name': 'Contact Name',\n 'contact_postcode': 'Contact Postcode',\n 'contact_telephone': 'Contact Telephone',\n 'contact_town': 'Contact Town',\n 'contact_website': 'Contact Website',\n 'contract_end_date': 'OJEU Procedure Type',\n 'contract_start_date': 'Contract end date',\n 'cpv_codes': 'Cpv Codes',\n 'description': 'Description',\n 'end_date': 'End Date',\n 'is_sub_contract': 'Parent Reference',\n 'links': 'Links',\n 'nationwide': 'Nationwide',\n 'notice_identifier': 'Notice Identifier',\n 'notice_type': 'Notice Type',\n 'ojeu_contract_type': 'Value Low',\n 'ojeu_procedure_type': 'Accelerated Justification',\n 'organisation_name': 'Organisation Name',\n 'parent_reference': 'Suitable for SME',\n 'postcode': 'Postcode',\n 'published_date': 'Published Date',\n 'region': 'Region',\n 'start_date': 'Start Date',\n 'status': 'Status',\n 'suitable_for_sme': 'Suitable for VCO',\n 'suitable_for_vco': 'Supply Chain',\n 'supplier_contact_name': 'Contract start date',\n 'supplier_info': \"Supplier's contact name\",\n 'supply_chain': 'OJEU Contract Type',\n 'title': 'Title',\n 'value_high': 'Awarded Date',\n 'value_low': 'Value High',\n }", "title": "" }, { "docid": "36d9e826a8effd4f93a35f7386b37371", "score": "0.5565586", "text": "def getall(self, *args, **kwargs): # real signature unknown\n pass", "title": "" }, { "docid": "36d9e826a8effd4f93a35f7386b37371", "score": "0.5565586", "text": "def getall(self, *args, **kwargs): # real signature unknown\n pass", "title": "" }, { "docid": "26a710f8c06e4f042143d2650ee4890d", "score": "0.55651474", "text": "def _validate_results(self):\n pass", "title": "" }, { "docid": "c556656a02ebe5f64396ac2ca36dafe1", "score": "0.5555562", "text": "def CheckAll(self):\r\n for ii in xrange(self.GetItemCount()):\r\n self.CheckItem(ii,True)", "title": "" }, { "docid": "102d37a700710c79040530ac57e320ae", "score": "0.55473876", "text": "def requestAll():\n pass", "title": "" }, { "docid": "bbd4ff7dd6b17bc3a5341aca549565ac", "score": "0.55456877", "text": "def _emptyRawResults(self):\n self._rawresults = {}", "title": "" }, { "docid": "21b9e57d8e6dd3c1751f469703756d4e", "score": "0.553741", "text": "def get_allresults(self):\r\n where = \"\"\r\n values = ()\r\n return Caremanagerzip.select_many(where, values)", "title": "" }, { "docid": "ebe79b1f6ec5b8b583c12aec2227bb8c", "score": "0.5533506", "text": "def check_results(self):\n pass", "title": "" }, { "docid": "ebe79b1f6ec5b8b583c12aec2227bb8c", "score": "0.5533506", "text": "def check_results(self):\n pass", "title": "" }, { "docid": "92e0d60b27be875ef157edb649f13846", "score": "0.55325264", "text": "def test_all(self):\n all = storage.all()\n self.assertIsInstance(all, dict)", "title": "" }, { "docid": "0428238d7b608a976582b176a47a9f87", "score": "0.5532113", "text": "def _get_missing_data(self, c, db_data, entries_required):\n parameters = self._requests_parameters(entries_required,\n settings.ROWS_LIMIT)\n reject_values = [to_string(day) for day in\n list_values(db_data, self.__column)]\n url = self.__api_url.safe_substitute(coin=self.__currency)\n worker = ApiWorker(\n self.__db, url, parameters, self.__modifications, self.__table,\n {'currency': c}, self.__column, reject_values\n )\n new_data, error = worker.data_one_to_many_or_no_relationship()\n return new_data, error", "title": "" }, { "docid": "6ae61d3df7046f36f3d7ec52255684a7", "score": "0.5526723", "text": "def all(self):\n return self._items", "title": "" }, { "docid": "fd585de38f8722a2dce2cda02eee3298", "score": "0.55102766", "text": "def on_get(self, req, resp):\n rv = self._service.all()\n resp.body = self._json_or_404(rv)", "title": "" }, { "docid": "0cf25196892c1aa51e025356409a0615", "score": "0.55056417", "text": "def test_set_many_returns_empty_list_on_success(self):\n failing_keys = cache.set_many({\"key1\": \"spam\", \"key2\": \"eggs\"})\n self.assertEqual(failing_keys, [])", "title": "" }, { "docid": "293975a783fbdf17d56480e30dcfee56", "score": "0.55036354", "text": "def test_fetch_all_items(self):\n event = {\"body\": None}\n result = todo_handler.fetch_items(event, None)\n self.assertEqual(result[\"statusCode\"], 200)", "title": "" } ]
d5d1ba5f2a340e997fd179d7fbbeb007
The rsquared value (a.k.a. coefficient of determination)
[ { "docid": "ac093f765728612363ed6781c28d505b", "score": "0.0", "text": "def r_squared(A, b):\n ssr = sum_of_squared_residuals(A, b)\n sst = total_sum_of_squares(A, b)\n return 1.0 - ssr / sst", "title": "" } ]
[ { "docid": "72fac2c5ab96a955bb8597aa092c64df", "score": "0.7222706", "text": "def rsq(self):\n return metrics.rsq(self.model, self.x, self.y)", "title": "" }, { "docid": "47d63c0cfd2b0557ff484c91495f6744", "score": "0.70095164", "text": "def get_res_rsquare(self, res_type: str) -> float:\n return self.config['resistor']['info'][res_type]['rsq']", "title": "" }, { "docid": "21855c2964f2efb00acc5b223af76ac0", "score": "0.68036026", "text": "def rsquared(predictions, actual):\n ybar = 1/len(actual)*actual.sum()\n\n var_yhat = predictions.var()\n var_y = actual.var()\n return var_yhat/var_y", "title": "" }, { "docid": "329213ae4abfed64e7373564dc738bb8", "score": "0.66765714", "text": "def residual(self):\n if self.expr.value is None:\n return None\n return np.abs(self.expr.value)", "title": "" }, { "docid": "329213ae4abfed64e7373564dc738bb8", "score": "0.66765714", "text": "def residual(self):\n if self.expr.value is None:\n return None\n return np.abs(self.expr.value)", "title": "" }, { "docid": "797fa1000f6c7b0c76177c2d73b70241", "score": "0.6674307", "text": "def dQR(self):\n return math.sqrt(self.d2(self.q, self.r))", "title": "" }, { "docid": "c123fd3a2e65923d1fedaa96d35215e4", "score": "0.6543117", "text": "def getResidual(self):\n\n return self.eval()", "title": "" }, { "docid": "767ae89be29b482fe10ca0c0f10d1a32", "score": "0.6506872", "text": "def compute_Rsquared(y1,y2):\n sum_var = np.sum( (y1-np.mean(y1))**2 )\n sum_res = np.sum( (y1-y2)**2 ) \n return float(1)-float(sum_res)/sum_var", "title": "" }, { "docid": "adc204316eb500a0c936f855aaa53635", "score": "0.65018874", "text": "def calculate_r(self):\n\n return np.exp(-(self.mc.x-self.centers_x)**2/(self.sigmax**2) - (self.mc.x_d-self.centers_xd)**2/(self.sigmay**2))", "title": "" }, { "docid": "36ff3d005be0c85ac09ce385ee238bc2", "score": "0.6469571", "text": "def compute_rsquared(self):\n # First read the total sum of squares for each run\n total_sumsquares = self.sum_squares(\"sstot\")\n\n for part in [\"full\", \"main\"]:\n\n # Read in the residual sum of squares and take grand sum\n res_sumsquares = self.sum_squares(\"ssres\", part)\n\n # Calculate the full model R2\n r2 = 1 - res_sumsquares / total_sumsquares\n\n # Save an image with these data\n self.save_image(r2, \"r2_\" + part)", "title": "" }, { "docid": "b6a62136e06437f44ca2627da1a02126", "score": "0.64525515", "text": "def rmse(self):\n return math.sqrt(self.mse())", "title": "" }, { "docid": "91bed44d2e771daa15a03eaa3ca6e77b", "score": "0.6398252", "text": "def _Rr(self, r):\r\n return np.sqrt(r / (self.M * self.N *\r\n (self.Tmn_meas * self._weights).max()**2))", "title": "" }, { "docid": "bbfbee56f58828a92a3382f34a1a28f4", "score": "0.6378755", "text": "def residual(self):\n if self.expr.value is None:\n return None\n return np.maximum(self.expr.value, 0)", "title": "" }, { "docid": "bbfbee56f58828a92a3382f34a1a28f4", "score": "0.6378755", "text": "def residual(self):\n if self.expr.value is None:\n return None\n return np.maximum(self.expr.value, 0)", "title": "" }, { "docid": "2a7008e723a7d6e8d2251a2a98d94121", "score": "0.6356604", "text": "def rmse(self):\n return self.metrics.rootMeanSquaredError", "title": "" }, { "docid": "117ce2876a1613428b7e089b9bb985dc", "score": "0.6264112", "text": "def calc_ri(self):\n\n # Function call.\n chi2 = self.func_mf(self.params)\n\n # Return the single value.\n return self.data[0].ri[0]", "title": "" }, { "docid": "a388f5fc351b9557d2df2d321d90cda5", "score": "0.62585545", "text": "def get_rsquared(x, y):\n results = {}\n\n coeffs = np.polyfit(x, y, 1)\n\n # Polynomial Coefficients\n results['polynomial'] = coeffs.tolist()\n\n # r-squared\n p = np.poly1d(coeffs)\n # fit values, and mean\n yhat = p(x) # or [p(z) for z in x]\n ybar = np.sum(y)/len(y) # or sum(y)/len(y)\n ssreg = np.sum((yhat-ybar)**2) # or sum([ (yihat - ybar)**2 for yihat in yhat])\n sstot = np.sum((y - ybar)**2) # or sum([ (yi - ybar)**2 for yi in y])\n results['determination'] = ssreg / sstot\n rsquared=(ssreg/sstot)\n\n return rsquared", "title": "" }, { "docid": "fe84229489d82dd645678eade051702b", "score": "0.625804", "text": "def rsq(model, x, y):\n yhat = model.predict(x)\n mse = keras.metrics.MeanSquaredError()\n mse(y, np.mean(y))\n sst = mse.result().numpy()\n mse = keras.metrics.MeanSquaredError()\n mse(y, yhat)\n sse = mse.result().numpy()\n return 1 - sse/sst", "title": "" }, { "docid": "0eb22a51d22073aad9eb8b03840c307a", "score": "0.6234662", "text": "def r_squared(x, y):\n return (covariance(x,y))**2 / float(variance(x)*variance(y))", "title": "" }, { "docid": "24c5ad95c02666f6c709161d954cef92", "score": "0.62283397", "text": "def get_rmse(self):\n try:\n rmse = ((2 * self.get_tsse()) / \n (len(self.training_vectors) * len(self.outputs)))\n except TypeError:\n # tsse returned None...\n return None\n return math.sqrt(rmse)", "title": "" }, { "docid": "c2232b5e48f8c11e336a131c49080f1d", "score": "0.6228258", "text": "def r_squared(y, estimated):\n assert isinstance(y, pylab.ndarray)\n assert isinstance(estimated, pylab.ndarray)\n assert len(y) == len(estimated)\n\n mean_y = y.mean()\n res_ss = ((y - estimated) ** 2).sum()\n tot_ss = ((y - mean_y) ** 2).sum()\n r2 = 1 - (res_ss / tot_ss)\n\n return r2", "title": "" }, { "docid": "b456a2ea4c88c25b77d1d66a84d7c356", "score": "0.62168825", "text": "def value(self) -> float:\n if self.norm:\n return math.sqrt(self.mean_square_error / self.norm)\n else:\n return None", "title": "" }, { "docid": "b426b0de9ff6aae7a207ec2f6c8122f0", "score": "0.62139463", "text": "def residual(self):\n if self.expr.value is None:\n return None\n return np.abs(np.minimum(self.expr.value, 0))", "title": "" }, { "docid": "524bef68be283e14e46262f2b3b37e6c", "score": "0.62038237", "text": "def rsqrt(self):\n return self.no_params_func(\"rsqrt\", return_response=True)", "title": "" }, { "docid": "524bef68be283e14e46262f2b3b37e6c", "score": "0.62038237", "text": "def rsqrt(self):\n return self.no_params_func(\"rsqrt\", return_response=True)", "title": "" }, { "docid": "3cd276773f9ef1afa8f35d3cb32d4cae", "score": "0.61621875", "text": "def rsq_adj(self):\n return metrics.rsq_adj(self.model, self.x, self.y)", "title": "" }, { "docid": "7a551d4a4b728b3d36361cc2f5d799ef", "score": "0.615304", "text": "def r2_calculation(y_true, y_predicted):\n y_true = y_true.reshape(y_true.shape[0], 1)\n y_predicted = y_predicted.reshape(y_predicted.shape[0], 1)\n input_y_mean = np.mean(y_true, axis=0)\n ss_total = np.sum((y_true - input_y_mean) ** 2)\n ss_residual = np.sum((y_predicted - y_true) ** 2)\n r_square = 1 - (ss_residual / ss_total)\n return r_square", "title": "" }, { "docid": "494a68bd2dc58895ae88b9ec718cfdb6", "score": "0.6138141", "text": "def crl(self, **kwargs):\n try:\n out = np.corrcoef(self.n, self.n+self.residual)[0,1]\n except:\n out = 0.\n\n if np.isfinite(out) is False or self.success is False:\n out = 0.\n return out", "title": "" }, { "docid": "626ac8650d4001a2ac8e2096e81915de", "score": "0.6123889", "text": "def dPR(self):\n return math.sqrt(self.d2(self.p, self.r))", "title": "" }, { "docid": "ec9c9d7bc2370b533feec7e66d875cb0", "score": "0.61001736", "text": "def rsq_cv(self, folds = 10):\n return metrics.rsq_cv(self.model, self.x, self.y, folds)", "title": "" }, { "docid": "109128bb07992740166c1265c1ff8a62", "score": "0.6097402", "text": "def r_squared(output, trait, p, dataset):\n\n R2 = 1 - output.redchi / np.var(trait) # Convert reduced chi to R2\n n = len(dataset)\n\n adj_R2 = 1 - (1 - R2) * ((n - 1) / (n - p - 1))\n\n return adj_R2", "title": "" }, { "docid": "02e52af532a14198b532483d61d11439", "score": "0.6087016", "text": "def mse(self):\n\t\txs, ys = self.R.nonzero()\n\t\tpredicted = self.full_matrix()\n\t\terror = 0\n\t\tfor x, y in zip(xs, ys):\n\t\t\terror += pow(self.R[x, y] - predicted[x, y], 2)\n\t\treturn np.sqrt(error)", "title": "" }, { "docid": "f00001f748fff63970a43737489cf7a4", "score": "0.6086425", "text": "def __abs__(self) -> float:\r\n square_sum = sum([val**2.0 for val in self.values])\r\n return sqrt(square_sum)", "title": "" }, { "docid": "3f828529d52da2be95b5a929a524822b", "score": "0.60673237", "text": "def _sklR2(self) :\n if self.yHat is None :\n self._sklPredict()\n self._R2 = metrics.r2_score(self.y, self.yHat)\n return self._R2", "title": "" }, { "docid": "a72f513618d6c908cf2ad3c4a3de537f", "score": "0.6065664", "text": "def get_coefficient(self):\n self.get_header()\n return self.coefficient", "title": "" }, { "docid": "9bb44bddb01dca3b9bd08ad26743d925", "score": "0.6064083", "text": "def get_rsquared_acv(cov, nsample_ratios, get_discrepancy_covariances):\n CF, cf = get_discrepancy_covariances(cov, nsample_ratios)\n if type(cov) == np.ndarray:\n try:\n rsquared = np.dot(cf, np.linalg.solve(CF, cf))/cov[0, 0]\n except:\n return np.array([0.0])*nsample_ratios[0]\n else:\n try:\n rsquared = torch.dot(cf, torch.mv(torch.inverse(CF), cf))/cov[0, 0]\n except:\n #print(\"Error computing inverse of CF\")\n return torch.tensor([0.0], dtype=torch.double)*nsample_ratios[0]\n return rsquared", "title": "" }, { "docid": "826f0db44d70495078d80f6990691465", "score": "0.60559535", "text": "def r_squared(y, estimated):\r\n # TODO\r\n y, estimated = np.array(y), np.array(estimated)\r\n SEE = ((estimated - y)**2).sum()\r\n mMean = y.sum()/float(len(y))\r\n MV = ((mMean - y)**2).sum()\r\n return 1 - SEE/MV", "title": "" }, { "docid": "b905af14728f9741979d0a6981efd778", "score": "0.60468847", "text": "def R(self) -> float:\n\n return self._R", "title": "" }, { "docid": "06d7df0bd13c6b6a80ec8fab1a2fd334", "score": "0.6036403", "text": "def res(self):\n return self._read_cached_property('RES', float)", "title": "" }, { "docid": "7c923b1bae75e2c93451a734c27dc28e", "score": "0.6034592", "text": "def r_squared(data: NDArray, data_p: NDArray) -> Number:\n\n return 1 - residual_sum_of_squares(data, data_p) / total_sum_of_squares(data)", "title": "" }, { "docid": "719a1e9651275db0a6d3eb97e5e93674", "score": "0.6023582", "text": "def calc_r2(trueVal,predVal):\n if len(trueVal) == len(predVal):\n if np.nansum(trueVal) > 0:\n #trueVal_a = np.array(trueVal).ravel()\n #predVal_a = np.array(predVal).ravel()\n #matVal = np.array([trueVal_a,predVal_a]).transpose()\n #matVal = matVal[~np.isnan(matVal).any(1)]\n #matVal = matVal[~np.isnan(matVal).any(1)]\n #trueVal_f = matVal[:,0]\n #predVal_f = matVal[:,1]\n #r2=r2_score(trueVal_f, predVal_f)\n SSR = np.nansum((np.array(trueVal) - np.array(predVal))**2)\n #ybar = np.nanmean(np.array(trueVal))\n #SST = np.nansum((np.array(trueVal)-ybar)**2)\n SST = np.nansum(np.array(trueVal)**2)\n r2 = 1-(SSR/SST)\n else:\n return np.NaN\n else:\n print(\"ERROR: number of predicted and measured values must be equal; \\n n(predicted):\"+str(len(predVal))+\"\\n n(measured):\"+str(len(trueVal)))\n sys.exit(-1)\n return r2", "title": "" }, { "docid": "777c882f8bf68b4d694727a27228b07b", "score": "0.6015555", "text": "def get_RMSE(x, y):\n return np.sqrt(get_MSE(x = x, y = y))", "title": "" }, { "docid": "adcc3adc89dfb1c497a15e7d91801e27", "score": "0.5998813", "text": "def Rsquared(y1,y2):\n sum_var = np.sum( (y1-np.mean(y1))**2 )\n sum_res = np.sum( (y1-y2)**2 ) \n return float(1)-float(sum_res)/sum_var", "title": "" }, { "docid": "3cd79e4dec4cecae7783b43aa3a54911", "score": "0.5991861", "text": "def calculate_score_1(self):\n chi2 = 0.\n residual_dicts = self.extrapolate.residual_dicts\n for detector in [self.residuals_source]:\n for axis in self.residuals_variables:\n residual = residual_dicts[detector][axis]\n for value in residual:\n chi2 += value**2/len(residual)\n self.best_guess[\"score\"] = {\"score\":chi2}\n return chi2", "title": "" }, { "docid": "9d3ee0b72c6bc36a3a7deeec3d2731d4", "score": "0.5989096", "text": "def rmse(self):\r\n return np.round(self.mse() ** 0.5, 2)", "title": "" }, { "docid": "9dc17e0f647a491cfc1a5bf5fe025b53", "score": "0.59805596", "text": "def get_regressor_r_score(self):\n return self.regressor_r_score", "title": "" }, { "docid": "ee98ab7ef2bca3fc20e178b0a3672581", "score": "0.59724665", "text": "def get_r_squared(t, dof):\r\n\r\n\tt_squared = t * t\r\n\treturn t_squared / (t_squared + dof)", "title": "" }, { "docid": "ecef1a22303389bc2d5ddf08f61bf471", "score": "0.5969", "text": "def R(self):\n return 287.058", "title": "" }, { "docid": "7c159535025cd33f823e276f2668fec5", "score": "0.5957302", "text": "def sqrt(self):\n return self.__pow__(0.5)", "title": "" }, { "docid": "61dcd90bf97a18e055ae189701f78fce", "score": "0.595717", "text": "def calculateRmse(self, actualData, predictedData):\n\n mse = mean_squared_error(actualData, predictedData)\n return sqrt(mse)", "title": "" }, { "docid": "a7c08e4e4e3fbb815b6e55a4341b31dd", "score": "0.59528166", "text": "def mse(self):\n xs, ys = self.R.nonzero()\n predicted = self.full_matrix()\n error = 0\n for x, y in zip(xs, ys):\n error += pow(self.R[x, y] - predicted[x, y], 2)\n return np.sqrt(error)", "title": "" }, { "docid": "a7c08e4e4e3fbb815b6e55a4341b31dd", "score": "0.59528166", "text": "def mse(self):\n xs, ys = self.R.nonzero()\n predicted = self.full_matrix()\n error = 0\n for x, y in zip(xs, ys):\n error += pow(self.R[x, y] - predicted[x, y], 2)\n return np.sqrt(error)", "title": "" }, { "docid": "3cac7c33e810869c3e8d9c21c394497e", "score": "0.5952339", "text": "def fric_coeff(self):\n rn = self.reynolds_no()\n cf = 0.075/((math.log10(rn) - 2)**2)\n print(\"rn : %0.3f, cf : %0.5f\"%(rn, cf))\n return cf", "title": "" }, { "docid": "36081732ecdca0f90e14dd0be20999d4", "score": "0.59481925", "text": "def value(self):\n nd1 = self.nd1()\n nd2 = self.nd2()\n f1 = nd1 * self.s \n f2 = nd2 * self.k * math.e ** (-self.r * self.t)\n return f1 - f2", "title": "" }, { "docid": "1d56f0170a72d65338c32f2b09b1628d", "score": "0.5946827", "text": "def celly(self) -> float:\n return self._obj.res[1]", "title": "" }, { "docid": "313f271c189ca148cdfdb5059fd68a6b", "score": "0.59432983", "text": "def score(self, x_test, y_test):\n\n #######################################################################\n # ** START OF YOUR CODE **\n #######################################################################\n\n return self.mse_r2_var(x_test, y_test)[0]\n\n #######################################################################\n # ** END OF YOUR CODE **\n #######################################################################", "title": "" }, { "docid": "17fd11e49da5b65aa23b45d974467663", "score": "0.5939473", "text": "def _eval_rmse(self, mse: float) -> float:\n return np.sqrt(mse)", "title": "" }, { "docid": "9db9a6b0886cb9df3f9323a1380755b4", "score": "0.5928769", "text": "def compute(self):\n return self.matthews_corr_coef / self.total", "title": "" }, { "docid": "26f3d876fede6c4846209d5b05b45898", "score": "0.5911921", "text": "def resnik(self) -> float:\n return self._resnik_mica[0]", "title": "" }, { "docid": "aa9ed7c890e9eb76ddb9493050cdc89f", "score": "0.58806205", "text": "def pseudo_r2(self):\n y_reg = self.yvals['model'][self.start_ind:]\n y_dat = self.y[self.start_ind:]\n \n SSres = ((y_dat - y_reg)**2).sum()\n SStot = ((y_dat - y_dat.mean())**2).sum()\n return 1 - SSres/SStot", "title": "" }, { "docid": "ca485874773a91f1db3a768cd4bcb8c5", "score": "0.5876993", "text": "def getvalue(self):\n ce = self.solver.getValue(self.cvc_expr)\n rational = ce.getConstRational()\n numerator = int(rational.getNumerator().toString())\n denominator = int(rational.getDenominator().toString())\n if rational.isIntegral():\n return numerator // denominator\n else:\n return numerator / denominator", "title": "" }, { "docid": "0488a2f3e858d856f51ce63eaef6318f", "score": "0.5868227", "text": "def residual(self):\n return self.product.length - self.sumCutLength()", "title": "" }, { "docid": "3c00867b058eddd432fbdfd807738401", "score": "0.5863317", "text": "def get_length_sqrd(self) -> float:\r\n return self.x ** 2 + self.y ** 2", "title": "" }, { "docid": "6c69fa90b57b74aa35702e4e37e5355b", "score": "0.5854565", "text": "def var(self):\n return self._sd ** 2", "title": "" }, { "docid": "f3b70c55c0923728b745c9897a3dcbd5", "score": "0.58493006", "text": "def determinant(self):\n return float()", "title": "" }, { "docid": "f3b70c55c0923728b745c9897a3dcbd5", "score": "0.58493006", "text": "def determinant(self):\n return float()", "title": "" }, { "docid": "f63e51efcc05eae56a956c43d59dfb3b", "score": "0.5837071", "text": "def value(self) -> float:\n if self.random_mean_square:\n return (self.data_mean - self.random_mean) / math.sqrt(self.random_mean_square)\n else:\n return None", "title": "" }, { "docid": "cae342d34afaa6e36b104e7176b71ee8", "score": "0.5828752", "text": "def evaluate_predicated(test, predict):\n rmse = math.sqrt(((test - predict) ** 2).sum()/test.shape[0])\n corr = np.corrcoef(predict, y=test)\n return rmse, corr[0, 1]", "title": "" }, { "docid": "b36ce0f0d0cd1cb57a95c9d74ae6cfd5", "score": "0.5828699", "text": "def residual_risk(self, benchmark, rf_tick=\"$TNX\", tick=None):\r\n return np.std(self.residual_return(benchmark=benchmark, rf_tick=rf_tick, tick=tick))", "title": "" }, { "docid": "ef2af29a8344c05730117cd04063d73b", "score": "0.58236355", "text": "def r2score(self, X, y):\n y_predict = self.predict(X)\n r2score = 1 - ((np.sum((y - y_predict)**2))/(np.sum((y - np.mean(y))**2)))\n return r2score", "title": "" }, { "docid": "e3cdade95861a35c93c36e79715dadad", "score": "0.58210474", "text": "def reciprocal(self):\n return self.no_params_func(\"reciprocal\", return_response=True)", "title": "" }, { "docid": "e3cdade95861a35c93c36e79715dadad", "score": "0.58210474", "text": "def reciprocal(self):\n return self.no_params_func(\"reciprocal\", return_response=True)", "title": "" }, { "docid": "bdb8f34920b7d6fc4502657c8a74dc56", "score": "0.58178693", "text": "def evaluate(self) -> float:\n # TODO: finish\n return self.evaluate_by_material()", "title": "" }, { "docid": "8cdc7bedf7ab13e5e1b9a5253eec9f3a", "score": "0.5817181", "text": "def squared_error(actual: np.array, predicted: np.array) -> np.array:\n\n return (actual - predicted)**2", "title": "" }, { "docid": "716dd69a16337cf6a6a1312f095a1860", "score": "0.5813695", "text": "def p_R(q):\n return float(q[0] * q[4]**(-1))", "title": "" }, { "docid": "716dd69a16337cf6a6a1312f095a1860", "score": "0.5813695", "text": "def p_R(q):\n return float(q[0] * q[4]**(-1))", "title": "" }, { "docid": "b3d1b811a585c6a6e55d5b65e05d86d9", "score": "0.5789468", "text": "def residuals(self,Y): \n\n return (Y-np.dot(self._create_B(Y),self._create_Z(Y)))", "title": "" }, { "docid": "1bd17615ca11e54e77f554cf652f1124", "score": "0.5788133", "text": "def result(self):\r\n return self.best.get() + (self.counteval, int(self.counteval/self.lam), self.xmean)", "title": "" }, { "docid": "261f8adc854ac8b58567f44b4671a1ed", "score": "0.57801414", "text": "def rrse(actual: np.ndarray, predicted: np.ndarray):\n return np.sqrt(np.sum(np.square(actual - predicted)) / np.sum(np.square(actual - np.mean(actual))))", "title": "" }, { "docid": "6209fb18bf8bc817b044392b8f96df57", "score": "0.57765824", "text": "def rmse(self,output):\r\n y_pred, y = output\r\n squared_errors = torch.pow(torch.abs(y_pred - y.view_as(y_pred)), 2)\r\n self._sum_of_errors_rmse += torch.sum(squared_errors).item()\r\n self._num_examples_rmse += y.shape[0]\r\n\r\n if self._num_examples_rmse == 0:\r\n raise NotComputableError(\"RootMeanSquaredError must have at least one example before it can be computed.\")\r\n return round(math.sqrt(self._sum_of_errors_rmse / self._num_examples_rmse), 3)", "title": "" }, { "docid": "847ad5512ca77c7387cafad39c12e77f", "score": "0.57648116", "text": "def getResidual(self):\n res = self.u - self.u_Dt\n return res", "title": "" }, { "docid": "8e1d4a2121f9a02e22521424adf03839", "score": "0.5759966", "text": "def norm_squared(self):\n absolutes = Array(self.length, self.value_type.value_type)\n\n if self.value_type.value_type == sbitfix:\n @for_range(absolutes.length)\n def _(i):\n absolutes[i] = self[i].square_abs()\n else:\n @for_range_parallel(absolutes.length, absolutes.length)\n def _(i):\n # absolutes[regint(i)] = self[regint(i)].square_abs()\n absolutes[i] = self[i].square_abs()\n norm_squared = sum(absolutes)\n return norm_squared", "title": "" }, { "docid": "60629db67c0b2148d63acfd33d9b3eda", "score": "0.57557845", "text": "def rsqrt_(self):\n return self.no_params_func(\"rsqrt_\")", "title": "" }, { "docid": "917a33ed003ead3eba237bb949d75f32", "score": "0.5755451", "text": "def get_regressor_r2_score(self):\n return self.regressor_r2_score", "title": "" }, { "docid": "aff01260d5ef413bc9f751ed7f636f06", "score": "0.57535493", "text": "def var(self):\n if self.n <= 1:\n return np.zeros(self.shape)\n\n val = np.array((self._Ex2 - (self._Ex*self._Ex)/np.float64(self.n)) / np.float64(self.n-1.))\n\n return val", "title": "" }, { "docid": "56b0ae522b33dd52921987f8ba7c8fe5", "score": "0.57487357", "text": "def c(self):\n return self._scalar", "title": "" }, { "docid": "83147ffa4db817baf16f1d1ce43cbfbc", "score": "0.5747919", "text": "def result(self):\n return self.best.get() + (self.counteval,\n int(self.counteval / self.lam),\n self.xmean)", "title": "" }, { "docid": "bf486d9522d6b294b4e44de6ab89f51e", "score": "0.5743605", "text": "def __call__(self):\n x, _ = self.y()\n error = self.r() - x\n return (error,)", "title": "" }, { "docid": "7d927b3b8c6f4bebfae631184b58aa36", "score": "0.57410794", "text": "def _manualR2(self) :\n if self.yHat is None :\n self._manualPredict()\n yMean = (1.0 / self.y.size) * np.sum(self.y)\n self._R2 = 1.0 - np.sum((self.yHat - self.y)**2) / np.sum((self.y - yMean)**2)\n return self._R2", "title": "" }, { "docid": "2d901eee5034fe3823a869ad6ac3099a", "score": "0.5732048", "text": "def r2(self):\n return norm(self.p2 - self.C)", "title": "" }, { "docid": "5181a7366d2e9dbd030365a2fcfa671f", "score": "0.5729349", "text": "def vc(self,r):\n return numpy.sqrt(self.vc2(r))", "title": "" }, { "docid": "bd05aa7058e5f4f0e55db99947635e10", "score": "0.5723851", "text": "def frobenius_norm(self):\n # check if A S and Y exist\n if hasattr(self,'A') and hasattr(self,'S') and hasattr(self,'Y'):\n if scipy.sparse.issparse(self.data):\n tmp = self.data[:,:] - ((self.A * self.S) * self.Y)\n tmp = tmp.multiply(tmp).sum()\n #err = tmp\n err = np.sqrt(tmp)\n else:\n #err = np.sum((self.data[:,:] - np.dot(np.dot(self.A, self.S), self.Y))**2 )\n err = np.sqrt( np.sum((self.data[:,:] - np.dot(np.dot(self.A, self.S), self.Y))**2 ))\n else:\n err = None\n\n return err", "title": "" }, { "docid": "5d4196a0a931ae5e46f7eb5a1714dce7", "score": "0.5720659", "text": "def evaluate(self, X_test, y_test):\n y_pred = self.run().predict(X_test)\n rmse = compute_rmse(y_pred, y_test)\n # self.mlflow_log_param('model', \"LinearRegression\")\n # self.mlflow_log_param('rmse', rmse)\n return rmse", "title": "" }, { "docid": "eaed03f90f4b886d3282fe514d521ce0", "score": "0.57184035", "text": "def risk(self):\n return 1 - self.accuracy()", "title": "" }, { "docid": "a41d5423ccc1a20ec70e824f5a9d78f1", "score": "0.57113546", "text": "def calc_R(xc, yc):\n return sqrt((x-xc)**2 + (y-yc)**2)", "title": "" }, { "docid": "95de0220335f98921c8fe0faec44a817", "score": "0.56980366", "text": "def r1(self):\n return norm(self.p1 - self.C)", "title": "" }, { "docid": "df2f598d1654e392a8aa43f1c68952c7", "score": "0.5687673", "text": "def rmse(y_true, y_pred):\n res_sq = (y_pred - y_true) ** 2\n score = np.sqrt(sum(res_sq/len(y_true)))\n return score", "title": "" }, { "docid": "ec641346c92c4108ec1e747da8264b6e", "score": "0.56868243", "text": "def __abs__(self):\n return math.sqrt(self*self)", "title": "" }, { "docid": "d49c8293ce8ff24a5b5583f8b5b79b40", "score": "0.56856936", "text": "def CoefDetermination(ys, res):\n ybar, vary = thinkstats.MeanVar(ys)\n resbar, varres = thinkstats.MeanVar(res)\n return 1 - varres / vary", "title": "" }, { "docid": "5d83586749c3c8b4ed56ccfe9033d895", "score": "0.56839424", "text": "def dGm_correction(self):\n return settings.RT * self._GetSumCoeff() * log(1e-3)", "title": "" }, { "docid": "0b7408c9c70b90166ab20cd99212040d", "score": "0.5680366", "text": "def residuals(params,f_model,x_values,y_values):\n\n d_values = y_values - f_model(x_values,params)\n return d_values", "title": "" } ]
2e27c39c5690eadd8bfa2d8d5fe35f2e
Given a username, sign in the user.
[ { "docid": "adaa9822983a2910836001f830cdc2cc", "score": "0.77780646", "text": "def sign_in(self, username):\n if self.url == 'about:blank':\n # We need a page loaded in order to set an authentication cookie.\n self.visit('/')\n # This is duplicated from User.sign_in to work with Splinter's cookie API.\n token = user.uuid.uuid4().hex\n expires = user.utcnow() + user.SESSION_TIMEOUT\n P(username).update_session(token, expires)\n self.cookies.add({user.SESSION: token})", "title": "" } ]
[ { "docid": "386a8dc8087d28e7989fe62d2c220f31", "score": "0.7601162", "text": "def login(self, username, password):\n self._set_username(username=username)\n self._set_password(password=password)\n self._sign_in()", "title": "" }, { "docid": "c5fae1aed819e10ecaea99aa83104ab4", "score": "0.7356293", "text": "def login_as(self, username):\n self.assert_(self.client.login(username=username, password=username),\n \"Login as \" + username + \" failed\")", "title": "" }, { "docid": "b0238089df44d328aea5b9b98980a495", "score": "0.7245241", "text": "def login(username, password):\n auth.login(username, password)", "title": "" }, { "docid": "bdbe0c04ee258efaf1d762daf6ca6807", "score": "0.70926285", "text": "def signin():\r\n if current_user.is_authenticated:\r\n return \"You need to logout first\"\r\n args = request.args\r\n password = args.get(\"password\")\r\n username = args.get(\"username\")\r\n try:\r\n hashed_password = bcrypt.generate_password_hash(password).decode('utf8')\r\n user = User(username=username, password=hashed_password)\r\n user_check = User.query.filter_by(username=username).first()\r\n if user_check:\r\n return \"Existing username, please choose a different one.\"\r\n else:\r\n db.session.add(user)\r\n db.session.commit()\r\n return \"Success sign in!\"\r\n except:\r\n return \"Please sign in\"", "title": "" }, { "docid": "ec950f2e8cf5f5bf7a70c99b882d0db3", "score": "0.6877121", "text": "def do_login(username, password):\n user = get_user(username=username)\n if user is None: raise ValueError\n if not check_password_hash(user[\"password\"], password):\n raise ValueError\n if user[\"status\"] != constants.ENABLED:\n raise ValueError\n flask.session[\"username\"] = user[\"username\"]\n flask.session.permanent = True\n utils.get_logger().info(f\"logged in {user['username']}\")", "title": "" }, { "docid": "27029794cbd6cdb8e52e96f6b30e3b46", "score": "0.68768674", "text": "def login_the_user(self, username='testusername', password='testpassword'):\n self.go_to_url_name('authentication:login')\n self.write_in_id('id_username', username)\n self.write_in_id('id_password', password)\n self.click_on_id('login_form_btn')", "title": "" }, { "docid": "ba53b49763a312fabcaf8eae3e17bc20", "score": "0.68750507", "text": "def sign_in(self, username, password, exit_on_failure=True):\n sel = self.selenium\n login_url = urljoin(self.home_url, 'user')\n sel.go_to(login_url)\n sel.input_text('id=edit-name', username)\n sel.input_password('id=edit-pass', password)\n sel.click_button('id=edit-submit')\n\n # Checking...\n with FailureManager(exit_on_failure, \"Authentication failed\"):\n sel.page_should_contain_link(\"xpath=//a[contains(@href, '/user/logout')]\")", "title": "" }, { "docid": "e2c6f0ba18fe2fc71575a2e874d4b392", "score": "0.68492544", "text": "def loginUser(self, username, password):\n self.driver.get(self.base_url + '/example3/login/')\n self.find(id='username').send_keys(username)\n self.find(id='password').send_keys(password)\n self.find(id='submit').click()", "title": "" }, { "docid": "8e75286fae8caedc540eb0ade2d4d17e", "score": "0.6846441", "text": "def login(self, username, password, optional=None):\n self.http.authenticate(username, password, optional)", "title": "" }, { "docid": "8e75286fae8caedc540eb0ade2d4d17e", "score": "0.6846441", "text": "def login(self, username, password, optional=None):\n self.http.authenticate(username, password, optional)", "title": "" }, { "docid": "0bfae1194156ddd139471064260ee26f", "score": "0.6827666", "text": "def login(self, username: str, password: str):\n self.service.login(username=username, password=password)", "title": "" }, { "docid": "108fba5d87094d89eaa30f558f0c31d9", "score": "0.6827072", "text": "def login_user(username, password): # noqa: E501\n return 'do some magic!'", "title": "" }, { "docid": "03b8ead030bc62a96764a6375bd563d3", "score": "0.6822801", "text": "def login_user(self, username, password):\n login = {\n \"username\": username,\n \"password\": password\n }\n return self.client.post(\n '/api/v1/auth/login',\n content_type=\"application/json\",\n data=json.dumps(login)\n )", "title": "" }, { "docid": "48c180f75858027e2448e2e4a458a145", "score": "0.6773298", "text": "async def login_user(self, request):\n data = await request.post()\n username = data.get('username')\n password = data.get('password')\n if self.ldap_config:\n verified = await self._ldap_login(username, password)\n else:\n verified = await self._check_credentials(request.app.user_map, username, password)\n\n if verified:\n self.log.debug('%s logging in:' % username)\n response = web.HTTPFound('/')\n await remember(request, response, username)\n raise response\n self.log.debug('%s failed login attempt: ' % username)\n raise web.HTTPFound('/login')", "title": "" }, { "docid": "198b210d53109878f5acad8c2f66ab8c", "score": "0.6741499", "text": "def submit_login():\n username_test = request.form.get('username', '').lower()\n password_test = request.form.get('password', '')\n user_to_login = User(username=username_test)\n if user_to_login.authenticate(username_test, password_test):\n session['username'] = user_to_login.username\n return redirect(url_for('home'))\n flash(\"Incorrect username or password\")\n return redirect(url_for('login'))", "title": "" }, { "docid": "2617cf4ab6a1aec5737e98320f16ca9a", "score": "0.6728161", "text": "def username(self, username: str):\n self._invalid_credentials = False\n self.__credentials[\"username\"] = username", "title": "" }, { "docid": "48bc3c530e1370d055112e395d25eafc", "score": "0.6708446", "text": "def signin():\n error = None\n form_data = None\n # get request.form data\n try:\n required_keys = ('email', 'password')\n form_data = controller.process_form_data(dict(request.form), *required_keys)\n except (AttributeError, ValueError):\n error = \"Invalid form input\"\n \n if form_data:\n try:\n user = db.get_user_by_email(form_data['email'])\n if user is None:\n raise KeyError('User non-existent')\n except KeyError:\n error = \"User does not exist\"\n else:\n # if user exists, check against the saved password\n if user.password == form_data['password']:\n # if it is the same, save username to session\n controller.add_user_to_session(user.key)\n flash('Login successful')\n return redirect(url_for('orders_list',\n user_key=user.key))\n else:\n error = \"Invalid password or username\"\n if error:\n flash(error)\n return redirect(url_for('index'))", "title": "" }, { "docid": "b0638b4fe91404b89ace2722b05c9eb4", "score": "0.6697413", "text": "def login(self):\r\n if not hasattr(self, 'username'):\r\n self.username = 'fred'\r\n if not hasattr(self, 'password'):\r\n self.password = 'bob'\r\n if not User.objects.filter(username=self.username).exists():\r\n User.objects.create_user(self.username, password=self.password)\r\n self.user = User.objects.get(username=self.username)\r\n logged_in = self.client.login(username=self.username,\r\n password=self.password)\r\n if not logged_in:\r\n self.fail(\"LOGIN failed\")", "title": "" }, { "docid": "3501d6c88fd16bea5daaac24292389fe", "score": "0.6681337", "text": "def log_in(self, username, password):\n username_field = self.driver.find_element_by_id(self.USERNAME_FIELD_ID)\n password_field = self.driver.find_element_by_id(self.PASSWORD_FIELD_ID)\n log_in_button = self.driver.find_element_by_id(self.LOG_IN_BUTTON_ID)\n\n username_field.click()\n sleep(self.KEYBOARD_ANIMATION_DELAY)\n username_field.send_keys(username)\n\n password_field.click()\n sleep(self.KEYBOARD_ANIMATION_DELAY)\n password_field.send_keys(password)\n\n log_in_button.click()", "title": "" }, { "docid": "f4ea3f0398aa88c1edb22ebbb9329b38", "score": "0.6673321", "text": "def login(self, username, password):\n return self.app.post(\"/login\", data={\"username\": username, \"password\": password})", "title": "" }, { "docid": "f4ea3f0398aa88c1edb22ebbb9329b38", "score": "0.6673321", "text": "def login(self, username, password):\n return self.app.post(\"/login\", data={\"username\": username, \"password\": password})", "title": "" }, { "docid": "8de78ba8d6b9207edf6fdc3058b52780", "score": "0.667124", "text": "def login(self, username):\n if not self.browser:\n assert RuntimeError(\"Cant use browser methods \"\n \"without specifying a site type\")\n return self.automation.login(username, self.keys[username].key)", "title": "" }, { "docid": "e9d513884850610ccabf17a7fe33c99b", "score": "0.66253185", "text": "def login(self, username, password):\n return self.app.post('/login', data=dict(\n username=username,\n password=password\n ), follow_redirects=True)", "title": "" }, { "docid": "3740dd5b77c24810fe6aaf86465c74ce", "score": "0.66232705", "text": "def login(self, username, password):\n resp = self.client.post('/auth/login/?',\n {'username': username, 'password': password})\n if 'error' in resp['authentication']:\n raise AuthenticationFailed('username or password is incorrect')\n # the API returns what headers should be set\n self.client.add_headers(resp['authentication']['parameters']['header'])\n self.logged_in = True", "title": "" }, { "docid": "0cd2a8160a146ad9a2f14bd952716937", "score": "0.659948", "text": "def login(self, username=None, password=None):\n if username is None:\n username = self.read_username_from_user_input()\n if password is None:\n password = self.read_password_from_user_input()\n\n # TODO: implement timeout\n self._browser.open(self._url_login)\n\n # Select the signup form\n self._browser.select_form('form[action=\"/' + self._name + '/profil/loginaction\"]')\n\n # Fill it out and submit\n self._browser['kennung'] = username\n self._browser['passwort'] = password\n self._browser.submit_selected()\n\n if self._browser.get_url() == self._url: # redirection to group page successful?\n return True\n else:\n return False", "title": "" }, { "docid": "3daf82221f1f85e92234750bc4162447", "score": "0.6591645", "text": "def sign_in_standard(self, username, password):\n body = self.SIGNIN_STANDARD_BODY % (username, password)\n response = requests.post(self.signin_url, data=body)\n return self._process_sign_in_result(response)", "title": "" }, { "docid": "3ffa57fde91017387aa48dbd85fd46b7", "score": "0.65669054", "text": "def login(self, user, password):\n pass", "title": "" }, { "docid": "c8a237349e17f370c9be05f8193b732a", "score": "0.6522416", "text": "def login(self, username, password):\n if self.authenticate_user(username, password):\n return \"Login successful\"\n else:\n return \"Login unsuccessful\"", "title": "" }, { "docid": "083ff04cb12d5b8df872bf63919ce70b", "score": "0.65044355", "text": "def user_sign_in(connection: ServerConnection, user_name: str) -> None:\n _write_line(connection, \"I32CFSP_HELLO\" + \" \" + user_name)\n _outcome_(connection, \"WELCOME\" + \" \" + user_name)", "title": "" }, { "docid": "4c252c54001ec6a5d013b028e37d2beb", "score": "0.6500778", "text": "def enter_username(self, username):\n self.se2lib.input_text(self.locator.username, username)", "title": "" }, { "docid": "85880a8c38618d1c9d94852a7077daca", "score": "0.6483652", "text": "def login(self, username, password):\n self._unset_auth()\n r = self._request('login', {\n 'username': username,\n 'password': password\n })\n result = r.json()\n if 'auth_token' in result:\n self.auth_token = result['auth_token']\n if 'username' in result:\n self.username = username\n return result", "title": "" }, { "docid": "0cf82748c3a2aca9d00b66ba37891d99", "score": "0.64766437", "text": "def login_user(self):\n self.client.logout()\n self.client.login(username='testuser', password='12345')", "title": "" }, { "docid": "101c43a30bcf86ab4c82bd6a24a663c6", "score": "0.64733917", "text": "def sign_in(username, password):\n # If already logged in, don't log in again\n global global_session\n if global_session is not None:\n return True\n # Create Non-JS browser\n browser = RoboBrowser(parser='html.parser')\n # Open login page\n browser.open('https://doh.arcabc.ca/user/login')\n # Get the login form\n form = browser.get_form(id='user-login')\n # Set the username & password\n form['name'].value = username\n form['pass'].value = password\n # Submit the form\n browser.submit_form(form)\n # If successfully signed in\n h1 = browser.find(class_='page__title')\n if h1.text == username:\n # Set the global session\n global_session = browser.session\n return True\n else:\n return False", "title": "" }, { "docid": "26921c3f46860e86c3606bcb1011f33c", "score": "0.6456115", "text": "def login(self):\n get_username = self.username_entry_var.get()\n get_password = self.password_entry_var.get()\n\n self.login_for_use(get_username, get_password)", "title": "" }, { "docid": "8c92e5d75568f88e33abe4aba943847d", "score": "0.6443906", "text": "def login_user(username, password):\n\n check_user = Attributes.authenticate_user(username, password)\n return check_user", "title": "" }, { "docid": "1b8afe97d6a50dbf7d1d39879c25de32", "score": "0.64202285", "text": "def login():\n user = User.get_by_email_or_username(r.json['username'].lower())\n if user is not None and user.check_password(r.json['password']):\n login_user(user)\n return jsonify(marshal_current_user(user))\n return form_error('Invalid username/password.')", "title": "" }, { "docid": "40c8f0111e3de13308e5ec4810ea2e3d", "score": "0.64131665", "text": "def login_account():\n username = request.form.get('name')\n password = request.form.get('password')\n\n if username and password:\n user = db.execute('SELECT * FROM users WHERE username = :username AND password = :password', {'username': username, 'password': password}).fetchone()\n if user:\n session['user_id'] = user[0]\n return render_template('success.html', username=username)\n return render_template('error_login.html', message='username or password is incorrect.')\n else:\n return render_template('error_login.html', message='Enter a username and password.')", "title": "" }, { "docid": "8133ff8757a2a83a3888e3a178da78f2", "score": "0.641015", "text": "def login(client, username, password):\n data = {\n 'username': username,\n 'password': password\n }\n return client.post('/login', data=data, follow_redirects=True)", "title": "" }, { "docid": "7894d83f330e1bb8ec6f38623d14763a", "score": "0.64075035", "text": "def process_sign_in(request):\n uname = request.POST['username']\n passwd = request.POST['password']\n user = auth.authenticate(username=uname, password=passwd)\n if user is not None and user.is_active:\n auth.login(request, user)\n s = Session()\n s.user = user\n s.login_timestamp = datetime.datetime.now()\n s.save() \n next = \"/account/\"\n log.info(\"%s logged in succesfully.\"%(user)) \n return HttpResponseRedirect(next) \n else:\n messages.error(request, \"Incorrect username or password!!!\")\n log.error(\"Login failed for user: %s.\"%(uname)) \n return HttpResponseRedirect(reverse('home'))", "title": "" }, { "docid": "7aac08a8dff4c8fafe09cff3eeece814", "score": "0.64017224", "text": "def signin():\n # request form data\n form = SignInForm(request.form)\n # validate form\n if request.method == \"POST\" and form.validate():\n # check if username exists in database\n user_name = request.form.get(\"username\").lower()\n existing_user = user_get(user_name)\n if existing_user:\n entered_password = form.password.data\n if user_password_check(user_name, entered_password, existing_user):\n return redirect(url_for(\"home\"))\n else:\n return redirect(url_for(\"userauth.signin\"))\n else:\n return redirect(url_for(\"userauth.signin\"))\n\n return render_template(\"signin.html\", form=form)", "title": "" }, { "docid": "a26e6e7922b882db3d984a353dd6ae72", "score": "0.6393919", "text": "def log_in(self, user=None):\n return self.client.login(\n username=(user or self.student).username,\n password=self.password,\n )", "title": "" }, { "docid": "bff1e032dcd4dc841197a89799571533", "score": "0.63717514", "text": "def login():\n\n if \"username\" in session:\n return redirect(f'/users/{session[\"username\"]}')\n\n form = LoginUserForm()\n if form.validate_on_submit():\n data = {k: v for k, v in form.data.items() if k != \"csrf_token\"}\n user = User.authenticate(**data)\n\n if user:\n flash(f\"Welcome back {user.username}!\", \"info\")\n session[\"username\"] = user.username\n return redirect(f\"/users/{user.username}\")\n else:\n form.username.errors = [\"Invalid username or password\"]\n\n return render_template(\"login.html\", form=form)", "title": "" }, { "docid": "47d69fc0876a0c48b4f6761e4099a2cd", "score": "0.63702494", "text": "def login(self):\n self._username = input(\"Username:\")\n self._password = getpass.getpass(\"Password:\")", "title": "" }, { "docid": "a03008e5b9665f3b55235d1ba516affb", "score": "0.6369439", "text": "def ncsa_login():\n import getpass\n output(\"Using %s for username.\" % username)\n username = getpass.getuser()\n if password is None:\n\tpassword = getpass.getpass()\n ncsa_wireless_login(username, password)", "title": "" }, { "docid": "5bb7284462d49e1a428ef0fadea3030e", "score": "0.63649523", "text": "def login(args):\n _check_credentials(args)\n _save_credentials(args.edi, args.token)\n\n LOG.info('Success! Credentials were saved.')", "title": "" }, { "docid": "0b408ecd709600939914c2a5909afa83", "score": "0.6359406", "text": "def login():\n\tform = LoginForm()\n\tif form.validate_on_submit():\n\t\tuser = User.query.filter_by(username=form.username.data).first()\n\t\t\n\t\tlogin_user(user, remember=form.remember.data)\n\t\treturn redirect(url_for('user.profile', some_name=user.username))\n\t\t\n\treturn render_template('user/login.html', form = form)", "title": "" }, { "docid": "fa71cf235d48adf3623db2cc60e161f9", "score": "0.6352569", "text": "def login_user(client, create_user):\n user = create_user\n\n response = client.post(\n reverse(\"log_in\"), data={\"email\": user.email, \"password\": \"pAssw0rd!\"}\n )\n\n client.login(email=user.email, password=\"pAssw0rd!\")\n\n return response", "title": "" }, { "docid": "780f8de7501b3c6cf32b91b1b2b80903", "score": "0.6346898", "text": "def logging_in():\n print('Logging in...')\n while True:\n username = input('username: ')\n password = getpass.getpass('password: ')\n try:\n authenticator.login(username, password)\n except (InvalidUsername, InvalidPassword) as er:\n print(er)\n login = input('maybe you don\\'t have an account\\n'\n 'Do you want to sign up now?\\n'\n '(enter yes if you want, leave field empty '\n 'otherwise)\\n')\n if login == 'yes':\n create_user()\n continue\n else:\n break", "title": "" }, { "docid": "79f30c2b345af327df64441c8d02b9ea", "score": "0.63243645", "text": "def login():\n if g.user:\n return redirect(url_for('timeline'))\n error = None\n if request.method == 'POST':\n user = query_db('''select * from user where\n username = ?''', [request.form['username']], one=True)\n if user is None:\n error = 'Invalid username'\n elif not check_password_hash(user['pw_hash'],\n request.form['password']):\n error = 'Invalid password'\n else:\n flash('You were logged in')\n session['user_id'] = user['user_id']\n return redirect(url_for('timeline'))\n return render_template('login.html', error=error)", "title": "" }, { "docid": "8e5368fe3065d146f1a057f0d1cbb31c", "score": "0.63217384", "text": "def login():\n if \"username\" in session:\n return redirect(f\"/users/{session['username']}\")\n\n form = LoginForm()\n\n if form.validate_on_submit():\n username = form.username.data\n password = form.password.data\n\n user = User.authenticate(username, password)\n if user:\n session['username'] = user.username\n return redirect(f\"/users/{user.username}\")\n form.username.errors = [\"Invalid Input. Try Again\"]\n return render_template(\"users/login.html\", form=form)\n\n return render_template(\"users/login.html\", form=form)", "title": "" }, { "docid": "ad786ca25e50228037212ced42084b7f", "score": "0.6318838", "text": "def login(username, password, cmd_args=None):\n r = Result(\"login\")\n r.add_action(oc_action(cur_context(), \"login\", cmd_args=['-u', username, '-p', password, cmd_args]))\n r.fail_if('Error when trying to login')\n return True", "title": "" }, { "docid": "8d2fe9abb727d6c215bd5ac37dc37ae6", "score": "0.6314192", "text": "def test_login_with_username(client, user_A):\n data = {\"username\": user_A.username, \"password\": user_A.password}\n\n response = client.post(\"/account/login/\", data)\n\n assert response.status_code == 200", "title": "" }, { "docid": "0a3e898ece2297d95d1d3ae3a5bbe130", "score": "0.631078", "text": "def login(self, user):\n token = os.urandom(16).encode('hex')\n self.redis.set('session:%s' % token, json.dumps({\n 'id': user[0],\n 'first_name': user[1],\n 'last_name': user[2],\n 'email': user[3]\n }), Account.SESSION_EXPIRE)\n self.set_secure_cookie('session', token)", "title": "" }, { "docid": "71f0a408ae2be37b7e53758a25afe97e", "score": "0.6308988", "text": "def login(self, username, password):\n\n self.session = requests.Session()\n\n if not username or not password:\n raise Exception(\"Cannot log in, need both username and password\")\n\n # Get CSRF token for login\n # [todo] Find this properly with beautifulsoup or something\n resp = self.session.get(self.base_url + '/signin', headers=self.headers)\n match = re.search(r'input value=\"(.+?)\" name=\"_csrf\"', resp.text)\n if match:\n csrf_token = match.group(1)\n else:\n raise Exception(\"Couldn't get csrf token\")\n\n auth = {\n 'username': username,\n 'password': password,\n '_csrf': csrf_token,\n }\n\n # log in\n resp = self.session.post('https://bank.simple.com/signin',\n headers=self.headers,\n data=auth)\n\n if resp.status_code not in range(200, 400) or 'form id=\"login\"' in resp.text:\n self.session = None # log in failed, clobber session\n\n return self.session is not None", "title": "" }, { "docid": "99bee85d616cac9b08da888b556d01ec", "score": "0.6300775", "text": "def login(username, password, registry):\n click.echo(login_client_func(username, password, registry))", "title": "" }, { "docid": "6c0cc48b77c80fca3aac2e558c735dca", "score": "0.6297087", "text": "def login():\n form = LoginForm()\n\n if form.validate_on_submit():\n user = User.authenticate(form.username.data,\n form.password.data)\n\n if user:\n do_login(user)\n flash(f\"Hello, {user.username}!\", \"success\")\n return redirect(\"/q\")\n\n flash(\"Invalid credentials.\", 'danger')\n\n return render_template('userLoginSignupForm/login.html', form=form)", "title": "" }, { "docid": "cdda07abeeb4e17c84c1a65a011e70f7", "score": "0.62922907", "text": "def login(self, username, password):\n return self._command_and_check('login', username, password, unpack=True)", "title": "" }, { "docid": "0e63c137801504eb31426df460c29a58", "score": "0.6283022", "text": "def login(self, username, password):\n with FlaskClient(self.app) as client:\n return client.post('/login', data=dict(\n username=username,\n password=password\n ), follow_redirects=True)", "title": "" }, { "docid": "ed1974735a364e12f81e19fd375d93d7", "score": "0.62815565", "text": "def login(self, username, password, session):\n response = SHARED_SERVER.get_validate_client(username, password)\n if response.status_code == 200:\n session['username'] = username\n return response", "title": "" }, { "docid": "43d940a1bee85da65300816843589701", "score": "0.6281021", "text": "def _loginAsUser(self, username):\n uf = self.getPortal().acl_users\n user = uf.getUserById(username)\n self.assertNotEquals(user, None, 'No user %s' % username)\n newSecurityManager(None, user.__of__(uf))", "title": "" }, { "docid": "22d7e2e9ef5538e97d17b299cc70981c", "score": "0.62752986", "text": "def user(username):\n env.user = username", "title": "" }, { "docid": "9733884fdb73a367568875288fe72c35", "score": "0.6263563", "text": "def login(username, password):\n error = list()\n if len(username) == 0 or len(password) == 0:\n error.append('Please fill up the login form')\n return error\n username = escape_string(username)\n password = sha256_crypt.encrypt(escape_string(password))\n conn, c = connect()\n query = c.execute(\"SELECT * FROM users WHERE username=(%s) AND password=(%s)\",\n (username, password))\n c.close()\n conn.close()\n if int(query) > 0:\n session['logged_in'] = True\n session['username'] = username\n gc.collect()\n return SUCCESS\n error.append('Invalid login credentials')\n return error", "title": "" }, { "docid": "ecaaeaf81a2faef0623d68d3c6d5b324", "score": "0.62588984", "text": "def login(session, username, password):\n auth = dict(username=username, password=password)\n return session.post(path + \"/users/login\", auth).json()", "title": "" }, { "docid": "03c31d0426e5190f7067fd31ddf144a9", "score": "0.6253474", "text": "def login():\r\n if current_user.is_authenticated:\r\n return \"You need to logout first\"\r\n args = request.args\r\n password = args.get(\"password\")\r\n username = args.get(\"username\")\r\n\r\n user = User.query.filter_by(username=username).first()\r\n if user and bcrypt.check_password_hash(user.password, password):\r\n login_user(user)\r\n return \"Success log in!\"\r\n else:\r\n return \"Wrong username or password, try again.\"", "title": "" }, { "docid": "4c94acdd766f282dda5631e83bd7682a", "score": "0.6250542", "text": "def login(self, username, password):\n try:\n self.__logged_in = True\n response = self.server.user.login(username, password)\n self.trans.add_csrf(response[\"token\"])\n return response\n except xmlrpc.client.Fault as e:\n raise BoaException(e).with_traceback(e.__traceback__)", "title": "" }, { "docid": "3ab8426c0810367cb0225754898b791a", "score": "0.6247292", "text": "def login(self, username, password):\n self.session.post(\n '{base_url}/rest_login'.format(base_url=self.base_url),\n data={'username': username, 'password': password},\n )", "title": "" }, { "docid": "50739c40685ef224474680a1292e8cc6", "score": "0.6239849", "text": "def login_user(self, request, user, user_status):\n login(\n request,\n user,\n backend=\"tdpservice.users.authentication.CustomAuthentication\",\n )\n logger.info(\"%s: %s on %s\", user_status, user.username, timezone.now)", "title": "" }, { "docid": "6f7fe50ca24074a55ed5513212aec214", "score": "0.6236192", "text": "def login():\n return _authn('login-action')", "title": "" }, { "docid": "949ef0f68aae049d8f127973336dfea0", "score": "0.62256086", "text": "def _signIn(self):\n login_url='https://www.glassdoor.com/profile/login_input.htm'\n self.getURL(login_url)\n \n if self._isLoginRequired():\n try:\n self.driver.find_element_by_name('username').send_keys(self.email)\n self.driver.find_element_by_name('password').send_keys(self.password)\n self.driver.find_element_by_xpath('//button[@type=\"submit\"]').click()\n print('Login was successful.')\n except:\n print('Login was NOT successful.')\n else:\n pass", "title": "" }, { "docid": "c9ac967a39a65f44524bf493d04b4dea", "score": "0.62111574", "text": "def post(self):\n\n\t\tusername = self.request.get('username')\n\t\tpassword = self.request.get('password')\n\n\t\ttry:\n\t\t\tuser_id = User.authenticate(username, password)\n\t\t\tself.set_secure_cookie('user', str(user_id))\n\t\t\tself.redirect('/')\n\t\t\n\t\texcept Exception, e:\n\t\t\tself.render(\"login.html\", user=self.user, error = e)", "title": "" }, { "docid": "cc5b2e76f2ffea81aed21925a9acc405", "score": "0.62042177", "text": "def _sign_in(self):\n\n sign_button = self.find_locator_by_element(locator=LoginPageLocators.LOGIN_BUTTON)\n sign_button.click()", "title": "" }, { "docid": "2fb0060432308b724db02664fd389a1e", "score": "0.61974937", "text": "def _login(self, username: str, password: str) -> requests.models.Response:\n url = \"v1/auth/login/\"\n auth = {\"username\": username, \"password\": password}\n response = self.post(url, auth)\n self._set_csrf_header()\n return response", "title": "" }, { "docid": "cbd5e486ca675cf352a7956f7eaa0291", "score": "0.6195577", "text": "def login(self, username, userid, remember):\n session = web.ctx.session\n session.username = username\n session.userid = userid\n if remember:\n rememberme = self.rememberme()\n web.setcookie('remember', rememberme , 300000000)\n\n models.user.update_login_info_by_username(username)", "title": "" }, { "docid": "203badebdd5fb77a9f366eeb735fe917", "score": "0.6193408", "text": "def login(self):\n # collect username via CLI\n username = input('Username: ')\n # use getpass to not expose password\n password = getpass.getpass('Password: ')\n\n self.__login(username, password)\n self.__bypass_wall()", "title": "" }, { "docid": "55f75c7212613a5752a840df4f1ce21d", "score": "0.6192581", "text": "def login(self, username, password):\n form = \"username=\" + username + \"&password=\" + password\n try:\n self.curl.get(Crawler.LOG_IN_PAGE)\n except:\n raise Exception(\"Cannot access log in page\")\n try:\n csrf_token = self.curl.get_cookie('csrftoken')\n except:\n raise Exception(\"No csrf_token in the header of response from login page\")\n form += (\"&csrfmiddlewaretoken=\" + csrf_token)\n headers = {}\n try:\n loginResponse = self.curl.post(Crawler.LOG_IN_PAGE, headers, str(form))\n except:\n raise Exception(\"Cannot make post to log in page\")\n if loginResponse.status_code != HTTP_STATUS.FOUND:\n raise Exception(\"fail to login\")\n self.response_processor(Crawler.LOG_IN_PAGE, loginResponse)", "title": "" }, { "docid": "ce829ac22ee62df0c56ccdb6ec297be2", "score": "0.61877537", "text": "def login(self, username, password):\n\n # send the username to the server\n self.send(\"AUTHINFO USER \" + username)\n\n # get code 381 if a password is required\n if self.code != '381':\n return False\n\n # send the password to the server\n self.send(\"AUTHINFO PASS \" + password)\n\n # get code 281 if successfully logged in\n if self.code != '281':\n return False\n\n # all went well, return true\n return True", "title": "" }, { "docid": "f07d6b6e91b0759b17c7c974d19be7f5", "score": "0.6171236", "text": "def login(self, username, password):\n token = username + password\n data = dict(username=username, password=password, token=token)\n\n result = self.app.post('/login', data=data)\n assert result.status_code == 200\n\n return token", "title": "" }, { "docid": "3bcb516c8f6a11738f95ca685df8ca8c", "score": "0.61581534", "text": "def login(self, username, password):\n\n # check username and password\n if self.users.has_key(username):\n if self.users[username] == password:\n # generate session id and save it\n session_id = self._generate_session_id(username)\n self.sessions[session_id] = {\"username\" : username,\n \"session_id\": session_id,\n \"password\" : password,\n \"last_visit\": get_timestamp()}\n\n return session_id\n\n raise Fault(\"unknown username or password\", \"Please check your username and password\")", "title": "" }, { "docid": "478d1d3752178c6cfd75f59aa6cf0632", "score": "0.6156539", "text": "def login(self, user):\n self.set_secure_cookie('user_id', str(user.key().id()))", "title": "" }, { "docid": "478d1d3752178c6cfd75f59aa6cf0632", "score": "0.6156539", "text": "def login(self, user):\n self.set_secure_cookie('user_id', str(user.key().id()))", "title": "" }, { "docid": "478d1d3752178c6cfd75f59aa6cf0632", "score": "0.6156539", "text": "def login(self, user):\n self.set_secure_cookie('user_id', str(user.key().id()))", "title": "" }, { "docid": "487a755a09d8b8af40ff227ab00f7f8e", "score": "0.6156364", "text": "def login(self, user, password):\n self._query.login(user, password)", "title": "" }, { "docid": "ba1d3ab64a144c59986f919c6ab77dd3", "score": "0.6152977", "text": "def login():\n try:\n return user.login()\n except errors.ServerError as e:\n abort(e.status)", "title": "" }, { "docid": "7a71fb61d912cd63a481618fb062b838", "score": "0.6151953", "text": "def login():\n if g.user:\n return redirect(url_for('index'))\n error = None\n if request.method == 'POST':\n rv = mongo.db.users.find({\"username\": request.form['username']})\n user = rv[0] if rv.count() != 0 else None\n if user is None:\n error = 'Invalid username'\n elif not check_password_hash(user['pw_hash'],\n request.form['password']):\n error = 'Invalid password'\n else:\n flash('You were logged in')\n session['user_id'] = user['user_id']\n return redirect(url_for('index'))\n return render_template('login.html', error=error)", "title": "" }, { "docid": "91d454cb161039587491fdd393c446af", "score": "0.6149912", "text": "def enter_username(self, username):\n WebDriverWait(self.driver, 25).until(EC.visibility_of_element_located((By.CSS_SELECTOR, 'button.login-to-checkout-btn')))\n # WebDriverWait(self.driver, 3).until(EC.invisibility_of_element_located((By.CSS_SELECTOR, self.CSS_LOADER)))\n time.sleep(2)\n login_to_checkout=self.driver.find_element_by_css_selector(\"button.login-to-checkout-btn\")\n action = TouchActions(self.driver)\n action.tap(login_to_checkout).perform()\n WebDriverWait(self.driver, 10).until(EC.visibility_of_element_located((By.CSS_SELECTOR, self.CSS_LOGINVIAPASSWORD)))\n self.driver.find_element_by_css_selector(self.CSS_LOGINVIAPASSWORD).click()\n self.driver.implicitly_wait(2)\n element = self.driver.find_element_by_css_selector(self.CSS_USERNAME)\n element.send_keys(username)", "title": "" }, { "docid": "e00367771f84dcbdc866c1e41165f707", "score": "0.61398", "text": "def login(self, username=None, password=None):\n if not username:\n username = self._username\n\n if not password:\n password = self._password\n\n body_node, doc = generate_envelope('Login')\n append_text_node_to('USERNAME', username, body_node)\n append_text_node_to('PASSWORD', password, body_node)\n\n (success, tree, self.error) = self.get(doc, False)\n if success:\n session_id = tree.find(\"Body/RESULT/SESSIONID\").text\n self._session = Session(session_id)\n\n return success", "title": "" }, { "docid": "4117b5c4b50be8a3599cc500306a3731", "score": "0.61172205", "text": "def login(self, username=None, password=None):\n if password and not username:\n raise Exception('Username must be provided when password is.')\n\n user = username or self.config.user or raw_input('Username: ')\n if username and username == self.config.user:\n pswd = password or self.config.pswd\n elif not username and self.config.user:\n pswd = self.config.pswd\n else:\n import getpass\n pswd = password or getpass.getpass('Password for %s: ' % user)\n\n params = {'api_type': 'json',\n 'passwd': pswd,\n 'user': user}\n response = self.request_json(self.config['login'] % user, params)\n self.modhash = response['data']['modhash']\n self.user = self.get_redditor(user)\n self.user.__class__ = reddit.objects.LoggedInRedditor", "title": "" }, { "docid": "c735d467136416fce74fe728fdf35aa8", "score": "0.61100256", "text": "def signin(request):\n form = handle_signin(request)\n if request.user.is_authenticated():\n redirect_url = get_next_url(request) or reverse('innovate_splash')\n return HttpResponseRedirect(redirect_url)\n return jingo.render(request, 'users/signin.html', {\n 'form': form,\n })", "title": "" }, { "docid": "b1b60457a40889cb77523b76270b7dd8", "score": "0.61078954", "text": "def login(ctx, email, password):\n save_session(\n email=email,\n session_id=get_client().user.login(\n email=email,\n password=password\n )\n )\n click.echo('Successfully logged in!')", "title": "" }, { "docid": "2338ee8620391330caa6b1a08a9856fc", "score": "0.60997355", "text": "def user_login(user):\n session['username'] = user.username\n session[\"Passes\"] = {}", "title": "" }, { "docid": "93969058159cb5e86d883373395a80d1", "score": "0.6094007", "text": "def login():\n if hasattr(g, 'user') and g.user.is_authenticated():\n return redirect(url_for('snaps.listing'))\n\n form = LoginForm()\n\n if form.validate_on_submit():\n\n user = User.query.filter_by(username=form.username.data).first()\n\n if not user or not flask_crypt.check_password_hash(user.password, form.password.data):\n flash(\"No such user exists.\")\n return render_template('users/login.html', form=form)\n login_user(user, remember=True)\n return redirect(url_for('snaps.listing'))\n return render_template('users/login.html', form=form)", "title": "" }, { "docid": "85f157a2e3ce56373db3fa3d75a163ce", "score": "0.60905105", "text": "def login():\n if request.method == 'POST':\n # check if username exists in db\n existing_user = mongo.db.users.find_one(\n {'username': request.form.get('username').lower()})\n\n if existing_user:\n # ensure hashed password matches user input\n if check_password_hash(\n existing_user['password'], request.form.get('password')):\n session['user'] = request.form.get('username').lower()\n return redirect(url_for('profile', username=session['user']))\n else:\n # invalid password match\n flash('Incorrect Username and/or Password', 'login')\n return redirect(url_for('login'))\n\n else:\n # username doesn't exist\n flash('Incorrect Username and/or Password', 'login')\n return redirect(url_for('login'))\n\n return render_template('login.html')", "title": "" }, { "docid": "c1c7af1be28a1e27a8837b34c0c0a8eb", "score": "0.6085751", "text": "def _log_in(self):\n # Pass cookies.\n self.driver.find_element_by_xpath(\n '/html/body/div[2]/div/div/div/div[2]/button[1]').click()\n # Insert login and password.\n self.driver.find_element_by_xpath(\n '//input[@name=\\\"username\\\"]').send_keys(self.username)\n self.driver.find_element_by_xpath(\n '//input[@name=\\\"password\\\"]').send_keys(self.password)\n # Click login.\n self.driver.find_element_by_xpath(\n '//*[@id=\"loginForm\"]/div/div[3]').click()\n # Pass remember login popoup.", "title": "" }, { "docid": "4e938c9f48516edda8971b2cb5bc4b29", "score": "0.6084318", "text": "def user_auth():\n form = request.form.to_dict()\n existing_user = user_collection.find_one({\"username\": form[\"username\"]})\n # Checks if the user is in db\n if existing_user:\n # If passwords match (hashed / real)\n if check_password_hash(\n (existing_user[\"password\"]),\n form[\"user_password\"]):\n # Log in the user\n session[\"user\"] = form[\"username\"]\n # If user is admin redirect to admin page\n if session[\"user\"] == \"admin\":\n return redirect(url_for(\"admin\"))\n\n flash(\"You are now logged in!\")\n return redirect(url_for(\n \"profile\", user=existing_user[\"username\"]))\n\n flash(\"Wrong username and/or password\")\n return redirect(url_for(\"login\"))\n\n flash(\"You must be registered first before you can log in.\")\n return redirect(url_for(\"pages/signup\"))", "title": "" }, { "docid": "408fdd4d2de07806b026da8e50e75805", "score": "0.6081615", "text": "def login(self):\n username = self.cleaned_data.get('username')\n password = self.cleaned_data.get('password')\n user = authenticate(username=username, password=password)\n\n return user", "title": "" }, { "docid": "f41ae63048ef9f10f44465607c95a873", "score": "0.6080548", "text": "def login(self, username, password):\n driver = self.driver\n driver.find_element_by_id(\"topbar-login\").click()\n driver.find_element_by_id(\"username\").clear()\n driver.find_element_by_id(\"username\").send_keys(username)\n driver.find_element_by_id(\"password\").clear()\n driver.find_element_by_id(\"password\").send_keys(password)\n driver.find_element_by_id(\"login\").click()\n time.sleep(1)", "title": "" }, { "docid": "fd84b219c040708407f670203c44bdc3", "score": "0.6076396", "text": "def login(self, controller):\n \n username = self.entries[self.fieldnames[0]].get()\n \n if not username:\n showinfo(title=\"Pop-up\", message=\"Please enter a username.\")\n return\n\n password = self.entries[self.fieldnames[1]].get()\n \n if not password:\n showinfo(title=\"Pop-up\", message=\"Please enter the password.\")\n return\n\n if not self.verifyUser(username, password):\n showinfo(title=\"Pop-up\", message=\"Invalid username or password.\")\n return\n\n controller.showFrame(PageOne)", "title": "" }, { "docid": "b3d50a31e394df5c0aba0a60e3f2e4f1", "score": "0.6071949", "text": "def login(self, username, password):\r\n\t\tself.username = username\r\n\t\tself.password = password\r\n\t\t\r\n\t\tcreds = {\"username\":username,\"password\":password}\r\n\t\t\r\n\t\tr = requests.post(self.url+\"/session\", data=creds, verify=False)\r\n\t\tif r.status_code != 200: \r\n\t\t\traise ValueError(\"Login failed with error code: \"+str(r.status_code)+\"\\n\"+r.text)\r\n\t\t\t\r\n\t\tself.token = json.loads(r.text)[\"token\"]\r\n\t\tself.getKeys()", "title": "" }, { "docid": "db58246792872edea06234cfa1d5acb1", "score": "0.6068966", "text": "def logon(self, username, password):\n if self._token:\n self.logoff()\n try:\n response = self.__makerequest('logon', email=username, password=password)\n except FogBugzAPIError, e:\n raise FogBugzLogonError(e)\n \n self._token = response.token.string\n if type(self._token) == CData:\n self._token = self._token.encode('utf-8')", "title": "" }, { "docid": "a14a7bca5d6c1a785a6d6c7855f60eef", "score": "0.6068324", "text": "def login():\n login_form = classes.LogInForm()\n if login_form.validate_on_submit():\n username = login_form.username.data\n password = login_form.password.data\n # Look for it in the database.\n user = classes.User.query.filter_by(username=username).first()\n\n # Login and validate the user.\n if user is not None and user.check_password(password):\n login_user(user)\n return redirect(url_for('upload'))\n # return(\"<h1> Welcome {}!</h1>\".format(username))\n\n return render_template('login.html', form=login_form)", "title": "" } ]
404671f607e8e6fefe302d3dceab2092
Sets the access control policy on the specified resource. Replaces any existing policy.
[ { "docid": "9cd543d8f92bc3bf059f6ebd6f1d2a6b", "score": "0.50599295", "text": "def SetIamPolicy(self, request, context):\n context.set_code(grpc.StatusCode.UNIMPLEMENTED)\n context.set_details('Method not implemented!')\n raise NotImplementedError('Method not implemented!')", "title": "" } ]
[ { "docid": "43c26021a7a50ef9c9a7379a2cefa46d", "score": "0.7376781", "text": "def set_resource_policy(self, resource, policy):\n\n @lockutils.synchronized(self.lock_name)\n def _set_resource_policy():\n policy_id = self.resource_2_qos_policies.get(resource)\n old_policy = self.known_policies.get(policy_id)\n self.known_policies[policy.id] = policy\n self.resource_2_qos_policies[resource] = policy.id\n self.qos_policy_2_resources[policy.id].add(resource)\n if old_policy and old_policy.id != policy.id:\n self.qos_policy_2_resources[old_policy.id].remove(resource)\n\n _set_resource_policy()", "title": "" }, { "docid": "db34fd03f6a13178c904eb26f447d294", "score": "0.67079204", "text": "def set_iam_policy(\n self,\n resource,\n policy,\n retry=google.api_core.gapic_v1.method.DEFAULT,\n timeout=google.api_core.gapic_v1.method.DEFAULT,\n metadata=None,\n ):\n # Wrap the transport method to add retry and timeout logic.\n if \"set_iam_policy\" not in self._inner_api_calls:\n self._inner_api_calls[\n \"set_iam_policy\"\n ] = google.api_core.gapic_v1.method.wrap_method(\n self.transport.set_iam_policy,\n default_retry=self._method_configs[\"SetIamPolicy\"].retry,\n default_timeout=self._method_configs[\"SetIamPolicy\"].timeout,\n client_info=self._client_info,\n )\n\n request = iam_policy_pb2.SetIamPolicyRequest(resource=resource, policy=policy)\n if metadata is None:\n metadata = []\n metadata = list(metadata)\n try:\n routing_header = [(\"resource\", resource)]\n except AttributeError:\n pass\n else:\n routing_metadata = google.api_core.gapic_v1.routing_header.to_grpc_metadata(\n routing_header\n )\n metadata.append(routing_metadata)\n\n return self._inner_api_calls[\"set_iam_policy\"](\n request, retry=retry, timeout=timeout, metadata=metadata\n )", "title": "" }, { "docid": "db34fd03f6a13178c904eb26f447d294", "score": "0.67079204", "text": "def set_iam_policy(\n self,\n resource,\n policy,\n retry=google.api_core.gapic_v1.method.DEFAULT,\n timeout=google.api_core.gapic_v1.method.DEFAULT,\n metadata=None,\n ):\n # Wrap the transport method to add retry and timeout logic.\n if \"set_iam_policy\" not in self._inner_api_calls:\n self._inner_api_calls[\n \"set_iam_policy\"\n ] = google.api_core.gapic_v1.method.wrap_method(\n self.transport.set_iam_policy,\n default_retry=self._method_configs[\"SetIamPolicy\"].retry,\n default_timeout=self._method_configs[\"SetIamPolicy\"].timeout,\n client_info=self._client_info,\n )\n\n request = iam_policy_pb2.SetIamPolicyRequest(resource=resource, policy=policy)\n if metadata is None:\n metadata = []\n metadata = list(metadata)\n try:\n routing_header = [(\"resource\", resource)]\n except AttributeError:\n pass\n else:\n routing_metadata = google.api_core.gapic_v1.routing_header.to_grpc_metadata(\n routing_header\n )\n metadata.append(routing_metadata)\n\n return self._inner_api_calls[\"set_iam_policy\"](\n request, retry=retry, timeout=timeout, metadata=metadata\n )", "title": "" }, { "docid": "3ba4d69be746325e853980f9a6f4ee3d", "score": "0.6511385", "text": "def policy(self, policy):\n\n self._policy = policy", "title": "" }, { "docid": "e6c5bfefe93820638a51a3a234b42790", "score": "0.64612085", "text": "def set_iam_policy(self, policy):\n table_client = self._instance._client.table_admin_client\n resp = table_client.set_iam_policy(\n request={\"resource\": self.name, \"policy\": policy.to_pb()}\n )\n return Policy.from_pb(resp)", "title": "" }, { "docid": "3678a0df4233c2bcbda47d811913151c", "score": "0.63890755", "text": "def set_iam_policy(self, policy):\n instance_admin_client = self._client.instance_admin_client\n resp = instance_admin_client.set_iam_policy(\n request={\"resource\": self.name, \"policy\": policy.to_pb()}\n )\n return Policy.from_pb(resp)", "title": "" }, { "docid": "4a89ec0988bff1b9e978a9fea14f991c", "score": "0.6303304", "text": "def put_policy(self, policy):\n try:\n self.bucket.Policy().put(Policy=json.dumps(policy))\n logger.info(\"Put policy %s for bucket '%s'.\", policy, self.bucket.name)\n except ClientError:\n logger.exception(\"Couldn't apply policy to bucket '%s'.\", self.bucket.name)\n raise", "title": "" }, { "docid": "6cb567bb19d9e41e66d792e8a8539ccf", "score": "0.61875004", "text": "def set_policy(cmd, client, resource_group_name, vault_name,\n object_id=None, application_id=None, spn=None, upn=None, key_permissions=None, secret_permissions=None,\n certificate_permissions=None, storage_permissions=None, no_wait=False):\n\n VaultCreateOrUpdateParameters = cmd.get_models('VaultCreateOrUpdateParameters',\n resource_type=ResourceType.MGMT_KEYVAULT)\n AccessPolicyEntry = cmd.get_models('AccessPolicyEntry', resource_type=ResourceType.MGMT_KEYVAULT)\n Permissions = cmd.get_models('Permissions', resource_type=ResourceType.MGMT_KEYVAULT)\n object_id = _object_id_args_helper(cmd.cli_ctx, object_id, spn, upn)\n vault = client.get(resource_group_name=resource_group_name,\n vault_name=vault_name)\n\n key_permissions = _permissions_distinct(key_permissions)\n secret_permissions = _permissions_distinct(secret_permissions)\n certificate_permissions = _permissions_distinct(certificate_permissions)\n storage_permissions = _permissions_distinct(storage_permissions)\n\n try:\n enable_rbac_authorization = getattr(vault.properties, 'enable_rbac_authorization')\n except: # pylint: disable=bare-except\n pass\n else:\n if enable_rbac_authorization:\n raise CLIError('Cannot set policies to a vault with \\'--enable-rbac-authorization\\' specified')\n\n # Find the existing policy to set\n policy = next((p for p in vault.properties.access_policies\n if object_id.lower() == p.object_id.lower() and\n (application_id or '').lower() == (p.application_id or '').lower() and\n vault.properties.tenant_id.lower() == p.tenant_id.lower()), None)\n if not policy:\n # Add new policy as none found\n vault.properties.access_policies.append(AccessPolicyEntry(\n tenant_id=vault.properties.tenant_id,\n object_id=object_id,\n application_id=application_id,\n permissions=Permissions(keys=key_permissions,\n secrets=secret_permissions,\n certificates=certificate_permissions,\n storage=storage_permissions)))\n else:\n # Modify existing policy.\n # If key_permissions is not set, use prev. value (similarly with secret_permissions).\n keys = policy.permissions.keys if key_permissions is None else key_permissions\n secrets = policy.permissions.secrets if secret_permissions is None else secret_permissions\n certs = policy.permissions.certificates \\\n if certificate_permissions is None else certificate_permissions\n storage = policy.permissions.storage if storage_permissions is None else storage_permissions\n policy.permissions = Permissions(keys=keys, secrets=secrets, certificates=certs, storage=storage)\n\n return _azure_stack_wrapper(cmd, client, 'create_or_update',\n resource_type=ResourceType.MGMT_KEYVAULT,\n min_api_version='2018-02-14',\n resource_group_name=resource_group_name,\n vault_name=vault_name,\n parameters=VaultCreateOrUpdateParameters(\n location=vault.location,\n tags=vault.tags,\n properties=vault.properties),\n no_wait=no_wait)", "title": "" }, { "docid": "44d0aa117a57e5df63889bbcd878b942", "score": "0.6023322", "text": "def set_policy(self, policy_name):\n assert (type(policy_name) == str)\n\n params = bytes(policy_name, 'ascii')\n if len(params) > 8:\n raise RuntimeError(\"Policy name is too long\")\n params += bytes(8 - len(params))\n\n self._send_and_check(api.Request(api.MKE_REQUEST_SET_POLICY,\n params=params),\n [api.MKE_REPLY_OK])", "title": "" }, { "docid": "ecce04a3d65d59dac5bbe208c5448537", "score": "0.59802914", "text": "def set_bucket_policy(self, bucket_name, policy):\n check_bucket_name(bucket_name)\n is_valid_policy_type(policy)\n self._execute(\n \"PUT\",\n bucket_name,\n body=policy,\n headers={\"Content-MD5\": md5sum_hash(policy)},\n query_params={\"policy\": \"\"},\n )", "title": "" }, { "docid": "3c197c17468bf8a78cf54e44db635b85", "score": "0.596857", "text": "def rewritepolicy(self, rewritepolicy) :\n\t\ttry :\n\t\t\tself._rewritepolicy = rewritepolicy\n\t\texcept Exception as e:\n\t\t\traise e", "title": "" }, { "docid": "4ac3fa6bf03d6ca53082112e6634c986", "score": "0.5948425", "text": "def _update_policy(self):\n pass", "title": "" }, { "docid": "d07035cb31e53823209b7b5e64cbc728", "score": "0.59340125", "text": "def set_policy(self, name, rules):\n if isinstance(rules, dict):\n rules = json.dumps(rules)\n params = {\n 'rules': rules,\n }\n api_path = '/v1/sys/policy/{name}'.format(\n name=name,\n )\n self._adapter.put(api_path, json=params)", "title": "" }, { "docid": "beea47a3a1adf91defe3bc1bd359b10f", "score": "0.5890776", "text": "def _set_iam_policies(self, file_name, role, policy_name):\n\n with open(file_name, 'r') as policy_file:\n policy_string = policy_file.read()\n\n deploy_template = jinja2.Template(policy_string)\n policy_string = deploy_template.render(cluster_name=self.cluster_name,\n aws_account=self.cluster_config['aws_account'],\n s3_bucket=self.cluster_config['s3_bucket'],\n dns_zone=self.cluster_config['dns_zone_id'])\n\n # Loop through inline policies for this role and update the one with the right name\n for policy in role.policies.all():\n if policy.policy_name == policy_name:\n logging.info(\"Updating policy %s\", policy_name)\n policy.put(PolicyDocument=policy_string)\n return\n\n raise SystemError('Policy not found')", "title": "" }, { "docid": "79c9ea07102eeaa93625b53cb9dab8e7", "score": "0.5769831", "text": "def set_project_policy(service, project, policy):\n policy = service.projects().setIamPolicy(\n resource=project, body={\n 'policy': policy\n }).execute()\n return policy", "title": "" }, { "docid": "a02e347c845aca3289ebbc10a87b053e", "score": "0.5739763", "text": "def enablePolicy(self, policy):\n self.log.debug(\"Enabling policy %s\", str(policy))\n policyInfo = self.policyDatabase.getPolicyInfo(policy.id)\n if policyInfo is None:\n self.log.error(\"Unable to get policy from the database: %s\", str(policy))\n return\n # enable the policy in memory and in the database\n if not policy.isEnabled():\n policy.state = States.ENABLED\n self.policyDatabase.enablePolicy(policy.id)\n else:\n self.log.info(\"Policy is already enabled %s\", str(policy))", "title": "" }, { "docid": "1035aa37685e605ba17fc5a77e6d7291", "score": "0.5714539", "text": "def update_policy(self,\n id,\n name,\n datacenters=None,\n description=None,\n rules=None):\n return self._put_response_body([\"policy\", id], {},\n dict(\n model.ACLPolicy(\n name=name,\n datacenters=datacenters,\n description=description,\n rules=rules)))", "title": "" }, { "docid": "ae527c3afb0fa7dd40eea5f74298504a", "score": "0.5702328", "text": "def policy_uri(self, policy_uri):\n\n self._policy_uri = policy_uri", "title": "" }, { "docid": "6f36d1a285b1fc0ddfba8b08ff333ea8", "score": "0.563909", "text": "def update_policy():\n self.policy = self.inner + (parallel(rps) & self.query)", "title": "" }, { "docid": "403c697b840a3186db5363ed67e9c797", "score": "0.56348604", "text": "def policies(self, policies):\n\n self._policies = policies", "title": "" }, { "docid": "73a3a726c7ae1098e33fd299db16e628", "score": "0.5606487", "text": "def set_forecast_permissions(self, name, data_bucket_name_resource: CfnResource):\n function = self.functions[name]\n function.role.attach_inline_policy(\n self.policies.forecast_read_write_policy(name)\n )\n function.role.attach_inline_policy(\n self.policies.s3_bucket_read_policy(name, data_bucket_name_resource)\n )\n function.role.attach_inline_policy(self.policies.forecast_kms_read_policy)", "title": "" }, { "docid": "da8fe4e55be1c74cfd9345477cd4ca4b", "score": "0.55380553", "text": "def policy_rules(self, policy_rules):\n\n self._policy_rules = policy_rules", "title": "" }, { "docid": "ba052a5cc26f1cbda6be5dc65bc5cae2", "score": "0.5526146", "text": "def dns_policy(self, dns_policy):\n\n self._dns_policy = dns_policy", "title": "" }, { "docid": "cd5b79afbf6ceaa1dd55b156e95bf4ff", "score": "0.550821", "text": "def __attach_inline_policy(self, username, policy_document):\n client = self.client\n\n response = client.put_user_policy(\n UserName=username,\n PolicyName=\"threatresponse-temporal-key-revocation\",\n PolicyDocument=policy_document\n )\n return response", "title": "" }, { "docid": "dbb8102bae8047e236675c6b8a29d725", "score": "0.5502422", "text": "def cachepolicy(self, cachepolicy) :\n\t\ttry :\n\t\t\tself._cachepolicy = cachepolicy\n\t\texcept Exception as e:\n\t\t\traise e", "title": "" }, { "docid": "b94b11b9fdee7f235df0368f83ce8668", "score": "0.54893875", "text": "def AddPolicy (self, policy):\n self.policies.append(policy)", "title": "" }, { "docid": "e27165074db779b7f4642f89bf61da32", "score": "0.54777825", "text": "def responderpolicy(self, responderpolicy) :\n\t\ttry :\n\t\t\tself._responderpolicy = responderpolicy\n\t\texcept Exception as e:\n\t\t\traise e", "title": "" }, { "docid": "8f5e04253171804d0194a67ecdc9ed6e", "score": "0.5467731", "text": "def update_policy():\n self.policy = self.forward + self.query", "title": "" }, { "docid": "956f02f04a7094a429188784fbbbb65e", "score": "0.5461217", "text": "def full_policy(self, resource: str) -> dict:\n return dict(Version=\"2012-10-17\", Statement=[dict(Effect=\"Allow\", Resource=\"*\", Action=f\"{resource}:*\")])", "title": "" }, { "docid": "65682b03d6ce1132e316f3c1a4549862", "score": "0.5456855", "text": "def block_policy(self, block_policy):\n\n self._block_policy = block_policy", "title": "" }, { "docid": "6630aa59baa2291d15952368224f9941", "score": "0.5426399", "text": "def SetIamPolicy(self, request, context):\n context.code(beta_interfaces.StatusCode.UNIMPLEMENTED)", "title": "" }, { "docid": "f086e1ddaeb8d88f0d539bd28394a5ab", "score": "0.5418963", "text": "def set_acl(self, acl_or_str, key_name='', headers=None, version_id=None,\n generation=None, if_generation=None, if_metageneration=None):\n if isinstance(acl_or_str, Policy):\n raise InvalidAclError('Attempt to set S3 Policy on GS ACL')\n elif isinstance(acl_or_str, ACL):\n self.set_xml_acl(acl_or_str.to_xml(), key_name, headers=headers,\n generation=generation,\n if_generation=if_generation,\n if_metageneration=if_metageneration)\n else:\n self.set_canned_acl(acl_or_str, key_name, headers=headers,\n generation=generation,\n if_generation=if_generation,\n if_metageneration=if_metageneration)", "title": "" }, { "docid": "aa5e264e01e6d5771bddf7dbe842151e", "score": "0.54021513", "text": "def set_access_policy( # pylint: disable=inconsistent-return-statements\n self,\n timeout: Optional[int] = None,\n request_id_parameter: Optional[str] = None,\n queue_acl: Optional[List[_models.SignedIdentifier]] = None,\n **kwargs: Any\n ) -> None:\n error_map = {\n 401: ClientAuthenticationError,\n 404: ResourceNotFoundError,\n 409: ResourceExistsError,\n 304: ResourceNotModifiedError,\n }\n error_map.update(kwargs.pop(\"error_map\", {}) or {})\n\n _headers = case_insensitive_dict(kwargs.pop(\"headers\", {}) or {})\n _params = case_insensitive_dict(kwargs.pop(\"params\", {}) or {})\n\n comp: Literal[\"acl\"] = kwargs.pop(\"comp\", _params.pop(\"comp\", \"acl\"))\n content_type: str = kwargs.pop(\"content_type\", _headers.pop(\"Content-Type\", \"application/xml\"))\n cls: ClsType[None] = kwargs.pop(\"cls\", None)\n\n serialization_ctxt = {\"xml\": {\"name\": \"SignedIdentifiers\", \"wrapped\": True}}\n if queue_acl is not None:\n _content = self._serialize.body(\n queue_acl, \"[SignedIdentifier]\", is_xml=True, serialization_ctxt=serialization_ctxt\n )\n else:\n _content = None\n\n request = build_set_access_policy_request(\n url=self._config.url,\n timeout=timeout,\n request_id_parameter=request_id_parameter,\n comp=comp,\n content_type=content_type,\n version=self._config.version,\n content=_content,\n template_url=self.set_access_policy.metadata[\"url\"],\n headers=_headers,\n params=_params,\n )\n request = _convert_request(request)\n request.url = self._client.format_url(request.url)\n\n _stream = False\n pipeline_response: PipelineResponse = self._client._pipeline.run( # pylint: disable=protected-access\n request, stream=_stream, **kwargs\n )\n\n response = pipeline_response.http_response\n\n if response.status_code not in [204]:\n map_error(status_code=response.status_code, response=response, error_map=error_map)\n error = self._deserialize.failsafe_deserialize(_models.StorageError, pipeline_response)\n raise HttpResponseError(response=response, model=error)\n\n response_headers = {}\n response_headers[\"x-ms-request-id\"] = self._deserialize(\"str\", response.headers.get(\"x-ms-request-id\"))\n response_headers[\"x-ms-version\"] = self._deserialize(\"str\", response.headers.get(\"x-ms-version\"))\n response_headers[\"Date\"] = self._deserialize(\"rfc-1123\", response.headers.get(\"Date\"))\n\n if cls:\n return cls(pipeline_response, None, response_headers)", "title": "" }, { "docid": "1ef50ce64babd6c77da321acf1c7b947", "score": "0.53903216", "text": "def billing_policy(self, billing_policy):\n\n\n self._billing_policy = billing_policy", "title": "" }, { "docid": "64a0deeba01bbab8e768b9a32576ebf2", "score": "0.53304446", "text": "def set_policy_state(self, policy_state: dict) -> None:\n self._policy.set_state(policy_state)", "title": "" }, { "docid": "24034104d470a702a5afa6117f78f7ca", "score": "0.53252965", "text": "def SetIamPolicy(self, request, context):\n context.code(beta_interfaces.StatusCode.UNIMPLEMENTED)", "title": "" }, { "docid": "25fd10c66eab40c25b387a4db764d2dc", "score": "0.5317774", "text": "def authenticationpolicy(self, authenticationpolicy) :\n\t\ttry :\n\t\t\tself._authenticationpolicy = authenticationpolicy\n\t\texcept Exception as e:\n\t\t\traise e", "title": "" }, { "docid": "752d0d2962249fc3c4561aa78d3fc7c2", "score": "0.52665293", "text": "async def set_iam_policy(\n self,\n request: Optional[Union[iam_policy_pb2.SetIamPolicyRequest, dict]] = None,\n *,\n resource: Optional[str] = None,\n retry: OptionalRetry = gapic_v1.method.DEFAULT,\n timeout: Union[float, object] = gapic_v1.method.DEFAULT,\n metadata: Sequence[Tuple[str, str]] = (),\n ) -> policy_pb2.Policy:\n # Create or coerce a protobuf request object.\n # Quick check: If we got a request object, we should *not* have\n # gotten any keyword arguments that map to the request.\n has_flattened_params = any([resource])\n if request is not None and has_flattened_params:\n raise ValueError(\n \"If the `request` argument is set, then none of \"\n \"the individual field arguments should be set.\"\n )\n\n # The request isn't a proto-plus wrapped type,\n # so it must be constructed via keyword expansion.\n if isinstance(request, dict):\n request = iam_policy_pb2.SetIamPolicyRequest(**request)\n elif not request:\n request = iam_policy_pb2.SetIamPolicyRequest(\n resource=resource,\n )\n\n # Wrap the RPC method; this adds retry and timeout information,\n # and friendly error handling.\n rpc = gapic_v1.method_async.wrap_method(\n self._client._transport.set_iam_policy,\n default_timeout=60.0,\n client_info=DEFAULT_CLIENT_INFO,\n )\n\n # Certain fields should be provided within the metadata header;\n # add these here.\n metadata = tuple(metadata) + (\n gapic_v1.routing_header.to_grpc_metadata(((\"resource\", request.resource),)),\n )\n\n # Send the request.\n response = await rpc(\n request,\n retry=retry,\n timeout=timeout,\n metadata=metadata,\n )\n\n # Done; return the response.\n return response", "title": "" }, { "docid": "ec5cd7d508e01fca5f6bf530e476fa58", "score": "0.52648395", "text": "def set_bucket_policy(self,bucket):\n policy = \"\"\"\n {\n \"Version\":\"2012-10-17\",\n \"Statement\":[\n {\n \"Sid\":\"PublicRead\",\n \"Effect\":\"Allow\",\n \"Principal\": \"*\",\n \"Action\":[\"s3:GetObject\"],\n \"Resource\":[\"arn:aws:s3:::%s/*\"]\n }\n ]\n }\n \"\"\" % bucket.name\n #remove any space\n policy = policy.strip()\n pol = bucket.Policy()\n pol.put(Policy=policy)", "title": "" }, { "docid": "1fa922c1920f39af328c7e0e3de71f97", "score": "0.5261521", "text": "def set_acl(self, path, version, acl):\n pc = utils.StatePipeCondition()\n ok = zookeeper.aset_acl(self._zhandle, path, version, acl,\n functools.partial(_generic_completion,\n pc))\n assert ok == zookeeper.OK\n results = pc.wait_and_get()\n pc.close()\n #unpack result as void_completion\n handle, rc = results\n assert handle == self._zhandle\n if rc == zookeeper.OK:\n return rc\n self._raise_exception(rc)", "title": "" }, { "docid": "1e4f1be8c19ba518c85957cc311aa6fd", "score": "0.52539706", "text": "def SetIamPolicy(self, request, global_params=None):\n config = self.GetMethodConfig('SetIamPolicy')\n return self._RunMethod(\n config, request, global_params=global_params)", "title": "" }, { "docid": "1e4f1be8c19ba518c85957cc311aa6fd", "score": "0.52539706", "text": "def SetIamPolicy(self, request, global_params=None):\n config = self.GetMethodConfig('SetIamPolicy')\n return self._RunMethod(\n config, request, global_params=global_params)", "title": "" }, { "docid": "1e4f1be8c19ba518c85957cc311aa6fd", "score": "0.52539706", "text": "def SetIamPolicy(self, request, global_params=None):\n config = self.GetMethodConfig('SetIamPolicy')\n return self._RunMethod(\n config, request, global_params=global_params)", "title": "" }, { "docid": "1e4f1be8c19ba518c85957cc311aa6fd", "score": "0.52539706", "text": "def SetIamPolicy(self, request, global_params=None):\n config = self.GetMethodConfig('SetIamPolicy')\n return self._RunMethod(\n config, request, global_params=global_params)", "title": "" }, { "docid": "1e4f1be8c19ba518c85957cc311aa6fd", "score": "0.52539706", "text": "def SetIamPolicy(self, request, global_params=None):\n config = self.GetMethodConfig('SetIamPolicy')\n return self._RunMethod(\n config, request, global_params=global_params)", "title": "" }, { "docid": "1e4f1be8c19ba518c85957cc311aa6fd", "score": "0.52539706", "text": "def SetIamPolicy(self, request, global_params=None):\n config = self.GetMethodConfig('SetIamPolicy')\n return self._RunMethod(\n config, request, global_params=global_params)", "title": "" }, { "docid": "1e4f1be8c19ba518c85957cc311aa6fd", "score": "0.52539706", "text": "def SetIamPolicy(self, request, global_params=None):\n config = self.GetMethodConfig('SetIamPolicy')\n return self._RunMethod(\n config, request, global_params=global_params)", "title": "" }, { "docid": "1e4f1be8c19ba518c85957cc311aa6fd", "score": "0.52539706", "text": "def SetIamPolicy(self, request, global_params=None):\n config = self.GetMethodConfig('SetIamPolicy')\n return self._RunMethod(\n config, request, global_params=global_params)", "title": "" }, { "docid": "1e4f1be8c19ba518c85957cc311aa6fd", "score": "0.52539706", "text": "def SetIamPolicy(self, request, global_params=None):\n config = self.GetMethodConfig('SetIamPolicy')\n return self._RunMethod(\n config, request, global_params=global_params)", "title": "" }, { "docid": "1e4f1be8c19ba518c85957cc311aa6fd", "score": "0.52539706", "text": "def SetIamPolicy(self, request, global_params=None):\n config = self.GetMethodConfig('SetIamPolicy')\n return self._RunMethod(\n config, request, global_params=global_params)", "title": "" }, { "docid": "8271b1ed755415fd8ba7b1f9f79a7945", "score": "0.5248899", "text": "def enable_fpolicy_policy(self, share_name, policy_name, sequence_number):\n volume = self._get_volume_by_args(vol_name=share_name)\n svm_uuid = volume['svm']['uuid']\n body = {\n 'priority': sequence_number,\n }\n\n self.send_request(\n f'/protocols/fpolicy/{svm_uuid}/policies/{policy_name}', 'patch',\n body=body)", "title": "" }, { "docid": "335e516e2b1058f7980eace0556dbc89", "score": "0.52447236", "text": "def SetIamPolicy(self, request, global_params=None):\n config = self.GetMethodConfig('SetIamPolicy')\n return self._RunMethod(config, request, global_params=global_params)", "title": "" }, { "docid": "335e516e2b1058f7980eace0556dbc89", "score": "0.52447236", "text": "def SetIamPolicy(self, request, global_params=None):\n config = self.GetMethodConfig('SetIamPolicy')\n return self._RunMethod(config, request, global_params=global_params)", "title": "" }, { "docid": "16e49b25fc9f6034915d647bc4d9a9c9", "score": "0.52340275", "text": "def _confirm_and_lock_retention_policy(self, api_client, bucket_resource,\n request_config):\n lock_prompt = (\n 'This will permanently set the retention policy on \"{}\" to the'\n ' following:\\n\\n{}\\n\\nThis setting cannot be reverted. Continue? '\n ).format(self._bucket_resource, bucket_resource.retention_policy)\n\n if not bucket_resource.retention_policy:\n raise command_errors.Error(\n 'Bucket \"{}\" does not have a retention policy.'.format(\n self._bucket_resource))\n elif bucket_resource.retention_policy_is_locked:\n log.error('Retention policy on \"{}\" is already locked.'.format(\n self._bucket_resource))\n elif console_io.PromptContinue(message=lock_prompt, default=False):\n log.status.Print('Locking retention policy on {}...'.format(\n self._bucket_resource))\n api_client.lock_bucket_retention_policy(bucket_resource, request_config)\n else:\n # Gsutil does not update the exit code here, so we cannot use\n # cancel_or_no with PromptContinue.\n log.error('Abort locking retention policy on \"{}\".'.format(\n self._bucket_resource))", "title": "" }, { "docid": "8517a81a6411b783dfc84575658ab955", "score": "0.5215342", "text": "def device_policy(policy):\n ctx = context()\n old_policy = ctx.device_policy\n try:\n ctx.device_policy = policy\n yield\n finally:\n ctx.device_policy = old_policy", "title": "" }, { "docid": "cefb29a09235947182c721b3dd09d28e", "score": "0.5215194", "text": "def set_enforcement(self, result, match_code='550',\n match_message='5.7.1 Access denied'):\n if result.lower() not in ['pass', 'permerror', 'fail', 'temperror',\n 'softfail', 'none', 'neutral']:\n raise ValueError(result)\n self.policies[result.lower()] = (match_code, match_message)", "title": "" }, { "docid": "f9c88c672c5fd709f096b8c54b12f111", "score": "0.521288", "text": "def set_def_acl(self, acl_or_str, headers=None):\n if isinstance(acl_or_str, Policy):\n raise InvalidAclError('Attempt to set S3 Policy on GS ACL')\n elif isinstance(acl_or_str, ACL):\n self.set_def_xml_acl(acl_or_str.to_xml(), headers=headers)\n else:\n self.set_def_canned_acl(acl_or_str, headers=headers)", "title": "" }, { "docid": "2e7ff2c4eb05c9a7534d8245854c0fd8", "score": "0.5203805", "text": "def icapolicy(self, icapolicy) :\n\t\ttry :\n\t\t\tself._icapolicy = icapolicy\n\t\texcept Exception as e:\n\t\t\traise e", "title": "" }, { "docid": "8bc7fe5176cd4d2f2eff8f2652979e61", "score": "0.51957875", "text": "def put(self, PolicyDocument: str):\n pass", "title": "" }, { "docid": "8bc7fe5176cd4d2f2eff8f2652979e61", "score": "0.51957875", "text": "def put(self, PolicyDocument: str):\n pass", "title": "" }, { "docid": "8bc7fe5176cd4d2f2eff8f2652979e61", "score": "0.51957875", "text": "def put(self, PolicyDocument: str):\n pass", "title": "" }, { "docid": "48d3f6b4a700a1f4de34904b41004711", "score": "0.5192792", "text": "def policy_scope(policy):\n old_policy = _global_policy\n try:\n set_global_policy(policy)\n yield\n finally:\n set_global_policy(old_policy)", "title": "" }, { "docid": "d3092c317db587e5d3c713f3a250b8e9", "score": "0.5181993", "text": "def set_access_control(self, timeout=None, owner=None, group=None, posix_permissions=None, posix_acl=None, request_id=None, lease_access_conditions=None, modified_access_conditions=None, cls=None, **kwargs):\n error_map = kwargs.pop('error_map', None)\n lease_id = None\n if lease_access_conditions is not None:\n lease_id = lease_access_conditions.lease_id\n if_match = None\n if modified_access_conditions is not None:\n if_match = modified_access_conditions.if_match\n if_none_match = None\n if modified_access_conditions is not None:\n if_none_match = modified_access_conditions.if_none_match\n if_modified_since = None\n if modified_access_conditions is not None:\n if_modified_since = modified_access_conditions.if_modified_since\n if_unmodified_since = None\n if modified_access_conditions is not None:\n if_unmodified_since = modified_access_conditions.if_unmodified_since\n\n action = \"setAccessControl\"\n\n # Construct URL\n url = self.set_access_control.metadata['url']\n path_format_arguments = {\n 'url': self._serialize.url(\"self._config.url\", self._config.url, 'str', skip_quote=True)\n }\n url = self._client.format_url(url, **path_format_arguments)\n\n # Construct parameters\n query_parameters = {}\n if timeout is not None:\n query_parameters['timeout'] = self._serialize.query(\"timeout\", timeout, 'int', minimum=0)\n query_parameters['action'] = self._serialize.query(\"action\", action, 'str')\n\n # Construct headers\n header_parameters = {}\n if owner is not None:\n header_parameters['x-ms-owner'] = self._serialize.header(\"owner\", owner, 'str')\n if group is not None:\n header_parameters['x-ms-group'] = self._serialize.header(\"group\", group, 'str')\n if posix_permissions is not None:\n header_parameters['x-ms-permissions'] = self._serialize.header(\"posix_permissions\", posix_permissions, 'str')\n if posix_acl is not None:\n header_parameters['x-ms-acl'] = self._serialize.header(\"posix_acl\", posix_acl, 'str')\n if request_id is not None:\n header_parameters['x-ms-client-request-id'] = self._serialize.header(\"request_id\", request_id, 'str')\n header_parameters['x-ms-version'] = self._serialize.header(\"self._config.version\", self._config.version, 'str')\n if lease_id is not None:\n header_parameters['x-ms-lease-id'] = self._serialize.header(\"lease_id\", lease_id, 'str')\n if if_match is not None:\n header_parameters['If-Match'] = self._serialize.header(\"if_match\", if_match, 'str')\n if if_none_match is not None:\n header_parameters['If-None-Match'] = self._serialize.header(\"if_none_match\", if_none_match, 'str')\n if if_modified_since is not None:\n header_parameters['If-Modified-Since'] = self._serialize.header(\"if_modified_since\", if_modified_since, 'rfc-1123')\n if if_unmodified_since is not None:\n header_parameters['If-Unmodified-Since'] = self._serialize.header(\"if_unmodified_since\", if_unmodified_since, 'rfc-1123')\n\n # Construct and send request\n request = self._client.patch(url, query_parameters, header_parameters)\n pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)\n response = pipeline_response.http_response\n\n if response.status_code not in [200]:\n map_error(status_code=response.status_code, response=response, error_map=error_map)\n raise models.DataLakeStorageErrorException(response, self._deserialize)\n\n if cls:\n response_headers = {\n 'Date': self._deserialize('rfc-1123', response.headers.get('Date')),\n 'ETag': self._deserialize('str', response.headers.get('ETag')),\n 'Last-Modified': self._deserialize('rfc-1123', response.headers.get('Last-Modified')),\n 'x-ms-request-id': self._deserialize('str', response.headers.get('x-ms-request-id')),\n 'x-ms-version': self._deserialize('str', response.headers.get('x-ms-version')),\n 'x-ms-client-request-id': self._deserialize('str', response.headers.get('x-ms-client-request-id')),\n }\n return cls(response, None, response_headers)", "title": "" }, { "docid": "dfc77def82982f7cdf06d605f562ee76", "score": "0.51793885", "text": "def policyname(self, policyname) :\n\t\ttry :\n\t\t\tself._policyname = policyname\n\t\texcept Exception as e:\n\t\t\traise e", "title": "" }, { "docid": "65f08fc466375e3cbc2e2dc0d8bd1624", "score": "0.51673204", "text": "def set_locking_policy(self, locking_policy):\n\n self._notnull()\n error = error_details_t()\n result = lib.set_locking_policy(self, locking_policy, byref(error))\n error.raise_on_error()\n return result", "title": "" }, { "docid": "7c395d5d6fd2c351cc9ef48785401dd0", "score": "0.515749", "text": "def _add_s3_access_policies_to_role(\n self, node: Union[HeadNode, BaseQueue, LoginNodesPool], role_ref: str, name: str\n ):\n read_only_s3_resources = []\n read_write_s3_resources = []\n for s3_access in node.iam.s3_access:\n for resource in s3_access.resource_regex:\n arn = self._format_arn(service=\"s3\", resource=resource, region=\"\", account=\"\")\n if s3_access.enable_write_access:\n read_write_s3_resources.append(arn)\n else:\n read_only_s3_resources.append(arn)\n _, policy_name = add_cluster_iam_resource_prefix(\n self._config.cluster_name, self._config, \"S3Access\", iam_type=\"AWS::IAM::Policy\"\n )\n\n s3_access_policy = iam.CfnPolicy(\n Stack.of(self),\n name,\n policy_document=iam.PolicyDocument(statements=[]),\n roles=[role_ref],\n policy_name=policy_name or \"S3Access\",\n )\n\n if read_only_s3_resources:\n s3_access_policy.policy_document.add_statements(\n iam.PolicyStatement(\n sid=\"S3Read\",\n effect=iam.Effect.ALLOW,\n actions=[\"s3:Get*\", \"s3:List*\"],\n resources=read_only_s3_resources,\n )\n )\n\n if read_write_s3_resources:\n s3_access_policy.policy_document.add_statements(\n iam.PolicyStatement(\n sid=\"S3ReadWrite\", effect=iam.Effect.ALLOW, actions=[\"s3:*\"], resources=read_write_s3_resources\n )\n )", "title": "" }, { "docid": "8dd5ddb75cac9857f880b02a066efe93", "score": "0.5149983", "text": "def update_policy(self,policy):\n grant = self.cleaned_data['grant']\n if grant != 'no':\n policy.default_grant = True\n policy.minimum_grade = grant\n else:\n policy.default_grant = False\n policy.minimum_grade = None\n policy.save()", "title": "" }, { "docid": "3f54e743097f8a34ebc704bf25493668", "score": "0.51313686", "text": "def sync_policy(self, sync_policy):\n\n self._sync_policy = sync_policy", "title": "" }, { "docid": "70f97c4e37956c1d1b3cc01d6a33f4c9", "score": "0.51287806", "text": "async def set_policy_aad_secured(self):\n\n write_banner(\"set_policy_aad_secured\")\n print(\"Set Secured Policy on an AAD mode attestation instance.\")\n async with DefaultAzureCredential() as credential, AttestationAdministrationClient(\n os.environ.get(\"ATTESTATION_AAD_URL\"), credential\n ) as admin_client:\n\n # [START set_secured_policy]\n # Create an RSA Key and wrap an X.509 certificate around\n # the public key for that certificate.\n rsa_key = create_rsa_key()\n cert = create_x509_certificate(rsa_key, u\"TestCertificate\")\n\n # Set a minimal policy.\n set_result, _ = await admin_client.set_policy(\n AttestationType.SGX_ENCLAVE,\n \"\"\"version= 1.0;authorizationrules{=> permit();};issuancerules {};\"\"\",\n signing_key=rsa_key,\n signing_certificate=cert,\n )\n print(\"Policy Set Resolution: \", set_result.policy_resolution)\n print(\"Resulting policy signer should match the input certificate:\")\n print(\"Policy Signer: \", set_result.policy_signer.certificates[0])\n print(\"Certificate: \", cert)\n # [END set_secured_policy]\n\n # Reset the policy now that we're done.\n await admin_client.reset_policy(AttestationType.SGX_ENCLAVE)", "title": "" }, { "docid": "f9c3888a648acf8a6b3be088250f8678", "score": "0.5115968", "text": "def _enforce(self, req, action, target=None):\n if target is None:\n target = {}\n try:\n self.policy.enforce(req.context, action, target)\n except exception.Forbidden:\n raise HTTPForbidden()", "title": "" }, { "docid": "328e0272a235290e256ce45b1f2819fa", "score": "0.5099834", "text": "def update_policy():\n self.policy = self.forward + (wp & self.query)", "title": "" }, { "docid": "f34a5087c07f0417df6451fde75a138f", "score": "0.50747025", "text": "def test_set_bucket_policy_readonly(log_entry):\n\n # Get a unique bucket_name\n bucket_name = _gen_bucket_name()\n log_entry[\"args\"] = {\n \"bucket_name\": bucket_name,\n }\n\n _CLIENT.make_bucket(bucket_name)\n try:\n # read-only policy\n policy = {\n \"Version\": \"2012-10-17\",\n \"Statement\": [\n {\n \"Sid\": \"\",\n \"Effect\": \"Allow\",\n \"Principal\": {\"AWS\": \"*\"},\n \"Action\": \"s3:GetBucketLocation\",\n \"Resource\": \"arn:aws:s3:::\" + bucket_name,\n },\n {\n \"Sid\": \"\",\n \"Effect\": \"Allow\",\n \"Principal\": {\"AWS\": \"*\"},\n \"Action\": \"s3:ListBucket\",\n \"Resource\": \"arn:aws:s3:::\" + bucket_name,\n },\n {\n \"Sid\": \"\",\n \"Effect\": \"Allow\",\n \"Principal\": {\"AWS\": \"*\"},\n \"Action\": \"s3:GetObject\",\n \"Resource\": f\"arn:aws:s3:::{bucket_name}/*\",\n },\n ],\n }\n # Set read-only policy\n _CLIENT.set_bucket_policy(bucket_name, json.dumps(policy))\n # Validate if the policy is set correctly\n if not _validate_policy(bucket_name, policy):\n raise ValueError(\"Failed to set ReadOnly bucket policy\")\n finally:\n _CLIENT.remove_bucket(bucket_name)", "title": "" }, { "docid": "6ad3ba9f00da842160108d2ffa80291b", "score": "0.50653607", "text": "def UpdateConsumerPolicy(self, request, global_params=None):\n config = self.GetMethodConfig('UpdateConsumerPolicy')\n return self._RunMethod(\n config, request, global_params=global_params)", "title": "" }, { "docid": "7b0c779d600eb5a79609c0f19fe950fe", "score": "0.50534487", "text": "def update(self, id, data):\n\t\tassert isinstance(id, str), 'The ID must be a string'\n\t\tassert id, 'The ID must not be an empty string'\n\t\tassert isinstance(data, dict), 'The data type must be a dictionary'\n\t\tassert data, 'Policy data must not be an empty dictionary'\n\n\t\turl = f'{self.root.url}/api/v1/policies/{id}'\n\t\tdata = json.dumps(data)\n\t\treturn self.root.r('PUT', url, data, headers=None, verify=self.root.verify)", "title": "" }, { "docid": "a042131a7d15399ddc48652b362f1071", "score": "0.5046923", "text": "def load_policy(self, policy):\n pass", "title": "" }, { "docid": "aaeee4f6f929a226380cbe405d533a97", "score": "0.50342745", "text": "def set_iam_policy(\n self,\n request: iam_policy_pb2.SetIamPolicyRequest = None,\n *,\n retry: retries.Retry = gapic_v1.method.DEFAULT,\n timeout: float = None,\n metadata: Sequence[Tuple[str, str]] = (),\n ) -> policy_pb2.Policy:\n # Create or coerce a protobuf request object.\n if isinstance(request, dict):\n # The request isn't a proto-plus wrapped type,\n # so it must be constructed via keyword expansion.\n request = iam_policy_pb2.SetIamPolicyRequest(**request)\n elif not request:\n # Null request, just make one.\n request = iam_policy_pb2.SetIamPolicyRequest()\n\n # Wrap the RPC method; this adds retry and timeout information,\n # and friendly error handling.\n rpc = self._transport._wrapped_methods[self._transport.set_iam_policy]\n\n # Certain fields should be provided within the metadata header;\n # add these here.\n metadata = tuple(metadata) + (\n gapic_v1.routing_header.to_grpc_metadata(((\"resource\", request.resource),)),\n )\n\n # Send the request.\n response = rpc(request, retry=retry, timeout=timeout, metadata=metadata,)\n\n # Done; return the response.\n return response", "title": "" }, { "docid": "2bda2eaa957476ec7f8a50ac78eb1019", "score": "0.50315076", "text": "def grant_put_permission_to(self, role: aws.iam.Role) -> None:\n aws.iam.RolePolicy(\n f\"{role._name}-writes-objects-to-{self._name}\",\n role=role.name,\n policy=self.arn.apply(\n lambda bucket_arn: json.dumps(\n {\n \"Version\": \"2012-10-17\",\n \"Statement\": [\n {\n \"Effect\": \"Allow\",\n \"Action\": \"s3:PutObject\",\n \"Resource\": f\"{bucket_arn}/*\",\n }\n ],\n }\n )\n ),\n opts=pulumi.ResourceOptions(parent=role),\n )", "title": "" }, { "docid": "254d847f3a7cda610c0dc88fda1f14eb", "score": "0.50277835", "text": "def failure_policy(self, failure_policy):\n\n self._failure_policy = failure_policy", "title": "" }, { "docid": "1c37234e2076d91b456c2d277365494d", "score": "0.49952072", "text": "def create_policy(name, description, actions, resource_arn):\n policy_doc = {\n \"Version\": \"2012-10-17\",\n \"Statement\": [\n {\n \"Effect\": \"Allow\",\n \"Action\": actions,\n \"Resource\": resource_arn\n }\n ]\n }\n try:\n policy = iam.create_policy(\n PolicyName=name, Description=description,\n PolicyDocument=json.dumps(policy_doc))\n logger.info(\"Created policy %s.\", policy.arn)\n except ClientError:\n logger.exception(\"Couldn't create policy %s.\", name)\n raise\n else:\n return policy", "title": "" }, { "docid": "ad937215f521d3b92e43c7a2e7b82440", "score": "0.49939954", "text": "def set_iam_policy(\n self,\n request: Optional[iam_policy_pb2.SetIamPolicyRequest] = None,\n *,\n retry: OptionalRetry = gapic_v1.method.DEFAULT,\n timeout: Union[float, object] = gapic_v1.method.DEFAULT,\n metadata: Sequence[Tuple[str, str]] = (),\n ) -> policy_pb2.Policy:\n # Create or coerce a protobuf request object.\n\n # The request isn't a proto-plus wrapped type,\n # so it must be constructed via keyword expansion.\n if isinstance(request, dict):\n request = iam_policy_pb2.SetIamPolicyRequest(**request)\n\n # Wrap the RPC method; this adds retry and timeout information,\n # and friendly error handling.\n rpc = gapic_v1.method.wrap_method(\n self._transport.set_iam_policy,\n default_timeout=None,\n client_info=DEFAULT_CLIENT_INFO,\n )\n\n # Certain fields should be provided within the metadata header;\n # add these here.\n metadata = tuple(metadata) + (\n gapic_v1.routing_header.to_grpc_metadata(((\"resource\", request.resource),)),\n )\n\n # Send the request.\n response = rpc(\n request,\n retry=retry,\n timeout=timeout,\n metadata=metadata,\n )\n\n # Done; return the response.\n return response", "title": "" }, { "docid": "1e7970143f88e6ec22ca04846093a850", "score": "0.49908513", "text": "def set_device_policy(device_policy):\n if device_policy == 'silent':\n context.context().device_policy = context.DEVICE_PLACEMENT_SILENT\n elif device_policy == 'silent_for_int32':\n context.context().device_policy = context.DEVICE_PLACEMENT_SILENT_FOR_INT32\n elif device_policy == 'warn':\n context.context().device_policy = context.DEVICE_PLACEMENT_WARN\n elif device_policy == 'explicit':\n context.context().device_policy = context.DEVICE_PLACEMENT_EXPLICIT\n elif device_policy is None:\n context.context().device_policy = None\n else:\n raise ValueError('Not a valid device policy: %r' % device_policy)", "title": "" }, { "docid": "3813ad93b73de96e9f5dd4874ab293af", "score": "0.4988412", "text": "def _enforce(self, req, action):\n try:\n self.policy.enforce(req.context, action)\n except heat_exception.Forbidden:\n msg = _(\"Action %s not allowed for user\") % action\n raise exception.HeatAccessDeniedError(msg)\n except Exception:\n # We expect policy.enforce to either pass or raise Forbidden\n # however, if anything else happens, we want to raise\n # HeatInternalFailureError, failure to do this results in\n # the user getting a big stacktrace spew as an API response\n msg = _(\"Error authorizing action %s\") % action\n raise exception.HeatInternalFailureError(msg)", "title": "" }, { "docid": "e07e9e7fc3a972d5188cde37223380e9", "score": "0.49798617", "text": "def object_change_policy(self, account, container, obj, policy, **kwargs):\n meta, stream = self.object_fetch(account, container, obj, **kwargs)\n # Before we started generating predictable chunk IDs, it was possible\n # to change to the same policy: it just renewed all chunks and updated\n # the modification time.\n # Now that we generate predictable chunk IDs, we must change something\n # in the object description in order to get a different set of chunks\n # (we don't want to change the object version).\n if meta[\"policy\"] == policy:\n del stream\n raise exc.Conflict(\n \"The object is already using the %s storage policy\" % policy\n )\n kwargs[\"version\"] = meta[\"version\"]\n return self.object_create_ext(\n account,\n container,\n obj_name=meta[\"name\"],\n data=stream,\n policy=policy,\n change_policy=True,\n **kwargs\n )", "title": "" }, { "docid": "28c8a95d2ffa623c7321a82f5a6f56a2", "score": "0.49760813", "text": "def auto_scaling_policy(self, auto_scaling_policy):\n self._auto_scaling_policy = auto_scaling_policy", "title": "" }, { "docid": "a7028222209980f7233b6e397b41d8a0", "score": "0.49747008", "text": "def add_grading_policy(self, grading_policy):\n\n self.course.grading_policy = grading_policy\n self.update_course(self.course, self.student_user.id)\n self.refresh_course()", "title": "" }, { "docid": "2bb7dc80fdb4824c9890ccb31c154e88", "score": "0.4970173", "text": "def scoring_policy(self, value):\r\n self.logger.warn(\"Setting values on scoring_policy will NOT update the remote Canvas instance.\")\r\n self._scoring_policy = value", "title": "" }, { "docid": "945e3850adfd2891919e63ca98e443c4", "score": "0.4963867", "text": "def SetIamPolicy(self, request, context):\n context.set_code(grpc.StatusCode.UNIMPLEMENTED)\n context.set_details(\"Method not implemented!\")\n raise NotImplementedError(\"Method not implemented!\")", "title": "" }, { "docid": "945e3850adfd2891919e63ca98e443c4", "score": "0.4963867", "text": "def SetIamPolicy(self, request, context):\n context.set_code(grpc.StatusCode.UNIMPLEMENTED)\n context.set_details(\"Method not implemented!\")\n raise NotImplementedError(\"Method not implemented!\")", "title": "" }, { "docid": "99a3adcdbb764c25e9e0196c45837347", "score": "0.49574706", "text": "async def set_iam_policy(\n self,\n request: Optional[iam_policy_pb2.SetIamPolicyRequest] = None,\n *,\n retry: OptionalRetry = gapic_v1.method.DEFAULT,\n timeout: Union[float, object] = gapic_v1.method.DEFAULT,\n metadata: Sequence[Tuple[str, str]] = (),\n ) -> policy_pb2.Policy:\n # Create or coerce a protobuf request object.\n\n # The request isn't a proto-plus wrapped type,\n # so it must be constructed via keyword expansion.\n if isinstance(request, dict):\n request = iam_policy_pb2.SetIamPolicyRequest(**request)\n\n # Wrap the RPC method; this adds retry and timeout information,\n # and friendly error handling.\n rpc = gapic_v1.method.wrap_method(\n self._client._transport.set_iam_policy,\n default_timeout=None,\n client_info=DEFAULT_CLIENT_INFO,\n )\n\n # Certain fields should be provided within the metadata header;\n # add these here.\n metadata = tuple(metadata) + (\n gapic_v1.routing_header.to_grpc_metadata(((\"resource\", request.resource),)),\n )\n\n # Send the request.\n response = await rpc(\n request,\n retry=retry,\n timeout=timeout,\n metadata=metadata,\n )\n\n # Done; return the response.\n return response", "title": "" }, { "docid": "3cda5d6c89eda901cbfa7b37c0667f53", "score": "0.49320412", "text": "def SetIamPolicy(self, request, timeout, metadata=None, with_call=False, protocol_options=None):\n raise NotImplementedError()", "title": "" }, { "docid": "45d78d1891e4c45a16941dadc0a0ccb0", "score": "0.49171555", "text": "async def set_policy_aad_unsecured(self):\n\n write_banner(\"set_policy_aad_unsecured\")\n print(\"Set an unsecured Policy on an AAD mode attestation instance.\")\n # [BEGIN set_policy_unsecured]\n async with DefaultAzureCredential() as credential, AttestationAdministrationClient(\n os.environ.get(\"ATTESTATION_AAD_URL\"), credential\n ) as admin_client:\n new_policy = \"\"\"\nversion= 1.0;\nauthorizationrules\n{\n [ type==\"x-ms-sgx-is-debuggable\", value==false ] &&\n [ type==\"x-ms-sgx-product-id\", value==1 ] &&\n [ type==\"x-ms-sgx-svn\", value>= 0 ] &&\n [ type==\"x-ms-sgx-mrsigner\", value==\"2c1a44952ae8207135c6c29b75b8c029372ee94b677e15c20bd42340f10d41aa\"]\n => permit();\n};\nissuancerules {\n c:[type==\"x-ms-sgx-mrsigner\"] => issue(type=\"My-MrSigner\", value=c.value);\n};\n \"\"\"\n\n set_result, _ = await admin_client.set_policy(\n AttestationType.OPEN_ENCLAVE, new_policy\n )\n print(\"Policy Set result: \", set_result.policy_resolution)\n # [END set_policy_unsecured]\n\n get_policy, _ = await admin_client.get_policy(AttestationType.OPEN_ENCLAVE)\n if new_policy != get_policy:\n print(\"Policy does not match set policy.\")\n # Attest an OpenEnclave using the new policy.\n await self._attest_open_enclave(os.environ.get(\"ATTESTATION_AAD_URL\"))", "title": "" }, { "docid": "2a83314bb8063f8505e4f49ca8ab1a98", "score": "0.49021062", "text": "def SetIamPolicy(self, request, timeout, metadata=None, with_call=False, protocol_options=None):\n raise NotImplementedError()", "title": "" }, { "docid": "0128bc9b4702a144822bc2ac77aca7dd", "score": "0.49009973", "text": "def set_interactive_policy(*, locals=None, banner=None, serve=None,\n prompt_control=None):\n policy = InteractiveEventLoopPolicy(\n locals=locals,\n banner=banner,\n serve=serve,\n prompt_control=prompt_control)\n asyncio.set_event_loop_policy(policy)", "title": "" }, { "docid": "a9866e188a7924529fc03cc3fa1de42f", "score": "0.48997706", "text": "def SetIamPolicy(self, request, context):\n context.set_code(grpc.StatusCode.UNIMPLEMENTED)\n context.set_details('Method not implemented!')\n raise NotImplementedError('Method not implemented!')", "title": "" }, { "docid": "f2121e22c850a7d268c5cc40db9134ea", "score": "0.4899416", "text": "def istio_authorization_policies(self, istio_authorization_policies):\n\n self._istio_authorization_policies = istio_authorization_policies", "title": "" }, { "docid": "3cca192a7d2b3e83c1093c89dba3c496", "score": "0.48831755", "text": "def restart_policy(self, restart_policy):\n\n self._restart_policy = restart_policy", "title": "" }, { "docid": "50de3157bcfcb639b479f8ce6278f09a", "score": "0.48806158", "text": "def update_policy(name, short_term_storage, long_term_storage, versioning, backup_duration):\n if not backup_duration and versioning:\n backup_duration = click.prompt(\n \"How many days backups of the datastorage will be stored? (Empty means deletion of the current rule)\",\n default=\"\")\n DataStorageOperations.policy(name, short_term_storage, long_term_storage, backup_duration, versioning)", "title": "" }, { "docid": "35fae65c3bec60e40e6f8bea107dd669", "score": "0.48776403", "text": "def external_traffic_policy(self, external_traffic_policy):\n\n self._external_traffic_policy = external_traffic_policy", "title": "" }, { "docid": "17865c02f427f4fe755fc7f9121ad54e", "score": "0.48763523", "text": "def write_authorize(cls, user, obj):\n if not obj.period.can_save(user):\n raise PermissionDenied()", "title": "" }, { "docid": "f5ec5c42c5a13498297e0ca9224103da", "score": "0.4874774", "text": "def setCheckingPolicy(self, checkingPolicy: cern.rbac.common.authorization.CheckingPolicy) -> 'AccessCheckerRequestBuilder':\n ...", "title": "" } ]
3ac87c776c8aadc9761f4a9bee3738c6
Generates a link allowing the data in a given panda dataframe to be downloaded
[ { "docid": "5f40d00bea1eb279ad6117ffc5b495d6", "score": "0.7543758", "text": "def get_table_download_link(df):\n csv = df.to_csv(index=False)\n b64 = base64.b64encode(\n csv.encode()\n ).decode() # some strings <-> bytes conversions necessary here\n return f'<a href=\"data:file/csv;base64,{b64}\" download=\"lineup_template.csv\">Download Template as csv file</a>'", "title": "" } ]
[ { "docid": "652badd8b29f86288a249339bc14ca84", "score": "0.7853004", "text": "def get_table_download_link(dataframe: pd.DataFrame):\n val = to_excel(dataframe)\n b64 = base64.b64encode(val)\n # TODO: Create a random number as filename, check generate_xlsx\n return ('<p style=\"text-align:center;\">'\n f'<a href=\"data:application/octet-stream;base64,{b64.decode()}\" '\n 'download=\"mann_kendall.xlsx\">Download Excel file</a></p>')\n # decode b'abc' => abc)", "title": "" }, { "docid": "b7ccfae5069af110c88e109ae4e718f8", "score": "0.7717478", "text": "def get_table_download_link(df):\r\n csv = df.to_csv(index=False)\r\n b64 = base64.b64encode(csv.encode()).decode() # some strings <-> bytes conversions necessary here\r\n href = f'<a href=\"data:file/csv;base64,{b64}\">Download csv file</a>'\r\n return href", "title": "" }, { "docid": "fd1e52f4bfee19580b16aabb680bca81", "score": "0.77031136", "text": "def get_table_download_link(df):\r\n csv = df.to_csv(index=False)\r\n b64 = base64.b64encode(csv.encode()).decode() # some strings <-> bytes conversions necessary here\r\n href = f'<a href=\"data:file/csv;base64,{b64}\">Download csv</a>'\r\n return href", "title": "" }, { "docid": "39fdbbc659b5b85ef19ee65c812a3d1e", "score": "0.76984584", "text": "def get_table_download_link(df):\n csv = df.to_csv(index=False)\n b64 = base64.b64encode(csv.encode()).decode() # some strings <-> bytes conversions necessary here\n href = f'<a href=\"data:file/csv;base64,{b64}\">Download csv file</a>'\n return href", "title": "" }, { "docid": "39fdbbc659b5b85ef19ee65c812a3d1e", "score": "0.76984584", "text": "def get_table_download_link(df):\n csv = df.to_csv(index=False)\n b64 = base64.b64encode(csv.encode()).decode() # some strings <-> bytes conversions necessary here\n href = f'<a href=\"data:file/csv;base64,{b64}\">Download csv file</a>'\n return href", "title": "" }, { "docid": "0b1bd2ccce3a783132c2ded38d50fffd", "score": "0.76605654", "text": "def get_table_download_link(df):\n csv = df.to_csv(index=False)\n b64 = base64.b64encode(\n csv.encode()\n ).decode() # some strings <-> bytes conversions necessary here\n return f'<a href=\"data:file/csv;base64,{b64}\" download=\"dataset.csv\">Download CSV file</a>'", "title": "" }, { "docid": "f3f9e9dc4d320916717e52208944911c", "score": "0.7629094", "text": "def get_table_download_link(df):\r\n csv = df.to_csv(index=False)\r\n b64 = pybase64.b64encode(\r\n csv.encode()\r\n ).decode() # some strings <-> bytes conversions necessary here\r\n return f'<a href=\"data:file/csv;base64,{b64}\" download=\"Prediction.csv\">⬇️ Download output CSV File</a>'", "title": "" }, { "docid": "30861d6c1da187b69b2626884f8cb8e9", "score": "0.756977", "text": "def get_table_download_link(df, out_file, label):\n csv = df.to_csv(index=False)\n b64 = base64.b64encode(\n csv.encode()\n ).decode() # some strings <-> bytes conversions necessary here\n out = f'<a href=\"data:file/csv;base64,{b64}\" download=\"%s\">%s</a>' % (out_file, label)\n return out", "title": "" }, { "docid": "ab186da2d19282b7f4186eecc2c8811e", "score": "0.7558905", "text": "def get_table_download_link(df):\r\n val = to_excel(df)\r\n b64 = base64.b64encode(val) # val looks like b'...'\r\n return f'<a href=\"data:application/octet-stream;base64,{b64.decode()}\" download=\"classified_data.xlsx\">Download file</a>' # decode b'abc' => abc\r", "title": "" }, { "docid": "59f5c6bd8e95d7a9f0efec004885c1b0", "score": "0.7536353", "text": "def get_table_download_link(df):\n csv = df.to_csv(index=False)\n b64 = base64.b64encode(csv.encode()).decode() # some strings <-> bytes conversions necessary here\n href = f'<a href=\"data:file/csv;base64,{b64}\">Download: Right click here and save as CSV</a>'\n return href", "title": "" }, { "docid": "13cd219f2f8562ed90eb577cddeff28d", "score": "0.7458118", "text": "def df_download_link(df: pd.DataFrame, text: str, filename: str):\n csv_bytes = df.to_csv(index=False).encode()\n b64_str = base64.b64encode(csv_bytes).decode()\n html_str = f'<a download=\"{filename}.csv\" href=\"data:file/csv;name={filename}.csv;base64,{b64_str}\">{text}</a>'\n st.markdown(html_str, unsafe_allow_html=True)", "title": "" }, { "docid": "338175ccbf76d0baae50a6d67562b6ec", "score": "0.7433467", "text": "def get_table_download_link(df):\n val = to_excel(df)\n b64 = base64.b64encode(val) # val looks like b'...'\n return f'<a href=\"data:application/octet-stream;base64,{b64.decode()}\" download=\"DataNews.xlsx\">Download Excel file</a>' # decode b'abc' => abc", "title": "" }, { "docid": "c722d25f2061bdb349f8131ba8eb8306", "score": "0.7415955", "text": "def get_table_download_link(df):\r\n val = to_excel(df)\r\n b64 = base64.b64encode(val) # val looks like b'...'\r\n return f'<a href=\"data:application/octet-stream;base64,{b64.decode()}\" download=\"Summary.xlsx\">Export to Excel</a>' # decode b'abc' => abc\r", "title": "" }, { "docid": "44c6033575b1e89a0c662e9a3936a73d", "score": "0.73187155", "text": "def get_table_download_link(df, text='Click here to download table'):\r\n csv = df.to_csv(index=False)\r\n b64 = base64.b64encode(csv.encode()).decode() # some strings <-> bytes conversions necessary here\r\n # href = f'<a href=\"data:file/csv;base64,{b64}\">Download csv file</a>'\r\n href = f'<a href=\"data:file/csv;base64,{b64}\" download=\"table.csv\">{text}</a>'\r\n return href", "title": "" }, { "docid": "0286fb87ce7f72036e960f4d95ae8053", "score": "0.7140357", "text": "def get_table_download_link_csv(df):\r\n csv = df.to_csv(index=False)\r\n b64 = base64.b64encode(\r\n csv.encode()\r\n ).decode() # some strings <-> bytes conversions necessary here\r\n return f'<a href=\"data:file/csv;base64,{b64}\" download=\"download.csv\">Download csv file</a>'", "title": "" }, { "docid": "97d8bc5f47d5a4d820f67967ca8a3fde", "score": "0.70307845", "text": "def get_table_download_link_xlsx(df):\r\n val = to_excel(df)\r\n b64 = base64.b64encode(val) # val looks like b'...'\r\n return f'<a href=\"data:application/octet-stream;base64,{b64.decode()}\" download=\"download.xlsx\">Download xlsx file</a>' # decode b'abc' => abc\r", "title": "" }, { "docid": "92bd98737850ed07411b4ab075a84eeb", "score": "0.6968001", "text": "def get_table_download_link(dafr):\r\n csv = dafr.to_csv(index=False)\r\n b64 = base64.b64encode(\r\n csv.encode()\r\n ).decode() # some strings <-> bytes conversions necessary here\r\n return f'<a href=\"data:file/csv;base64,{b64}\" download=\"Results.csv\">Download Result (CSV)</a>'", "title": "" }, { "docid": "b6bf54f464b06c3407b9706382237a5e", "score": "0.65727204", "text": "def download_link(object_to_download, download_filename, download_link_text):\n if isinstance(object_to_download,pd.DataFrame):\n object_to_download = object_to_download.to_csv(index=False)\n\n # some strings <-> bytes conversions necessary here\n b64 = base64.b64encode(object_to_download.encode()).decode()\n\n return f'<a href=\"data:file/txt;base64,{b64}\" download=\"{download_filename}\">{download_link_text}</a>'", "title": "" }, { "docid": "b6bf54f464b06c3407b9706382237a5e", "score": "0.65727204", "text": "def download_link(object_to_download, download_filename, download_link_text):\n if isinstance(object_to_download,pd.DataFrame):\n object_to_download = object_to_download.to_csv(index=False)\n\n # some strings <-> bytes conversions necessary here\n b64 = base64.b64encode(object_to_download.encode()).decode()\n\n return f'<a href=\"data:file/txt;base64,{b64}\" download=\"{download_filename}\">{download_link_text}</a>'", "title": "" }, { "docid": "2a1ab01ed05305edeab81026c97c94c3", "score": "0.65723854", "text": "def csv_downloader(data):\n time_stamp = time.strftime(\"%Y%m%d_%H%M%S\")\n \n csvfile = data.to_csv()\n \n b64 = base64.b64encode(csvfile.encode()).decode()\n \n new_filename = f\"fit_results{time_stamp}.csv\"\n href = f'** Download DataFrame as .csv: ** \\\n <a href=\"data:file/csv;base64,{b64}\" \\\n download=\"{new_filename}\">Click Here</a>'\n st.markdown(href, unsafe_allow_html=True)", "title": "" }, { "docid": "9285f24031cf527126dd828c71629438", "score": "0.6354674", "text": "def download_dataframe_as_csv(dataframe, filename):\n dataframe.to_csv(filename)\n files.download(filename)", "title": "" }, { "docid": "3185fb6527d6f3466514db45089a7fee", "score": "0.6178509", "text": "def url_string(it):\n \n b_link =\"https://raw.githubusercontent.com/pcm-dpc/COVID-19/master/dati-regioni/dpc-covid19-ita-regioni-\"\n \n return b_link + it + \".csv\"", "title": "" }, { "docid": "c07d89cb8a13d195b0fed1cfb2b1122d", "score": "0.61095786", "text": "def format_url(df):\n\n ix_col = df.index.name\n df = df.reset_index()\n df[ix_col] = df.apply(\n lambda x: f'<a href=\"{x[\"URL\"]}\">{x[ix_col]}</a>', axis=1\n )\n df = df.drop(\"URL\", axis=1)\n\n df = df.set_index(ix_col)\n\n return df", "title": "" }, { "docid": "c3bb5283123cd397d820dc67364a0dba", "score": "0.6088772", "text": "def download_on_dataframe(self, **kwargs):\n\n if self.blob is None:\n raise ValueError(\"No file selected. Set it with select_file method first.\")\n\n logger.debug(f\"gs path: {self.uri}\")\n return pd.read_csv(self.uri, **kwargs)", "title": "" }, { "docid": "cea8b8fc72a932b2bf26a80d88bf3e0c", "score": "0.6074464", "text": "def download():\r\n return response.download(request, db2)", "title": "" }, { "docid": "ce8da0ce41714656c57e002c29a67771", "score": "0.6072765", "text": "def download_csv(data):\n csv = data.to_csv().encode()\n b64 = base64.b64encode(csv).decode()\n href = f'<a href=\"data:file/csv;base64,{b64}\" download=\"result.csv\" target=\"_blank\">Download CSV</a>'\n\n return href", "title": "" }, { "docid": "775f4d5c7409fbeefe4e2d1d300dd7fa", "score": "0.60203224", "text": "def download_data_from_table(self, mydb, db_name, table_name, selection_col, file_name_for_download,file_download_location):\r\n try:\r\n query_download = f\"\"\"select * from {db_name}.{table_name} {selection_col};\"\"\"\r\n data = pd.read_sql(query_download, mydb)\r\n\r\n data.to_csv(f\"\"\"{file_download_location}\\\\{file_name_for_download}.csv\"\"\", index=False)\r\n logging.debug(f\"\"\" All records successfully downloaded at {file_download_location}\\\\{file_name_for_download}.csv\"\"\")\r\n\r\n return f\"\"\"All records successfully downloaded at {file_download_location}\\\\{file_name_for_download}.csv\"\"\"\r\n\r\n except Exception as e:\r\n logging.error(f\"\"\"ERROR IN : download_data_from_table(), ERROR : {str(e)}\"\"\")\r\n return f\"\"\"ERROR IN : download_data_from_table(), ERROR : {str(e)}\"\"\"", "title": "" }, { "docid": "681ec8a506d95ab2ebbb21031867317a", "score": "0.59827954", "text": "def toggle_download_data_link(_, dl_data_choice, stored_data):\n fname = \"%s_data.json\" % dl_data_choice\n fpath = \"%s%s\" % (stored_data['info']['dl_data_path'], fname)\n return fpath", "title": "" }, { "docid": "53c5d10f2d16687398ba6e396b736924", "score": "0.5969805", "text": "def export_dataframe_html(dataframe):\n global variables, result, resultMetadata\n import pandas as pd\n\n limit_output_view = variables.get(\"LIMIT_OUTPUT_VIEW\")\n limit_output_view = 5 if limit_output_view is None else int(limit_output_view)\n if limit_output_view > 0:\n info = \"The task preview is limited to \" + str(limit_output_view) + \" rows\"\n dataframe = dataframe.head(limit_output_view).copy()\n else:\n nrows = len(dataframe.index)\n info = \"Total of \" + str(nrows) + \" rows\"\n print(info)\n with pd.option_context('display.max_colwidth', -1):\n result = dataframe.to_html(escape=False, classes='table table-bordered table-striped', justify='center')\n result = \"\"\"\n <!DOCTYPE html>\n <html>\n <head>\n <meta charset=\"UTF-8\">\n <title>Machine Learning Preview</title>\n <link rel=\"stylesheet\" href=\"https://stackpath.bootstrapcdn.com/bootstrap/4.3.1/css/bootstrap.min.css\" \n integrity=\"sha384-ggOyR0iXCbMQv3Xipma34MD+dH/1fQ784/j6cY/iJTQUOhcWr7x9JvoRxT2MZw1T\" crossorigin=\"anonymous\">\n </head>\n <body>\n <h1 class=\"text-center my-4\" style=\"color:#003050;\">Data Preview</h1>\n <p align=\"center\">{0}</p>\n {1}\n </body></html>\"\"\".format(info, result)\n result = result.encode('utf-8')\n resultMetadata.put(\"file.extension\", \".html\")\n resultMetadata.put(\"file.name\", \"output.html\")\n resultMetadata.put(\"content.type\", \"text/html\")", "title": "" }, { "docid": "cb697b6a3a1b84b2066f131d094c3b9f", "score": "0.5959588", "text": "def link_to_pub(records):\n if 'metaknowledge' in str(type(records)).lower():\n recs = records.forNLP(extraColumns=[\"AU\", \"SO\", \"DE\", 'DOI'], lower=False, removeNonWords=False)\n df = pd.DataFrame(recs)\n elif 'dataframe' in str(type(records)).lower():\n df = records.copy()\n # End if\n\n df.loc[df['DOI'] != '', 'DOI link'] = \"https://dx.doi.org/\" + df.loc[df['DOI'] != '', 'DOI'].astype(str)\n\n return df", "title": "" }, { "docid": "56fb28cfd437b853155f7add3e263fe7", "score": "0.59426004", "text": "def get_download_link(self):\n raise NotImplementedError()", "title": "" }, { "docid": "fe5d9bf254dbc00d739a521a4dd728fb", "score": "0.591364", "text": "def download_link(object_to_download, download_filename, download_link_text):\n\n\n # some strings <-> bytes conversions necessary here\n b64 = base64.b64encode(object_to_download.encode()).decode()\n\n return f'<a href=\"data:file/txt;base64,{b64}\" download=\"{download_filename}\">{download_link_text}</a>'", "title": "" }, { "docid": "dd54626d8d88f05bcb411c40146284a3", "score": "0.58500165", "text": "def download():\n return response.download(request,db)", "title": "" }, { "docid": "dd54626d8d88f05bcb411c40146284a3", "score": "0.58500165", "text": "def download():\n return response.download(request,db)", "title": "" }, { "docid": "dd54626d8d88f05bcb411c40146284a3", "score": "0.58500165", "text": "def download():\n return response.download(request,db)", "title": "" }, { "docid": "dd54626d8d88f05bcb411c40146284a3", "score": "0.58500165", "text": "def download():\n return response.download(request,db)", "title": "" }, { "docid": "c2e0c541bf13a84c2d4711914076eeae", "score": "0.5832997", "text": "def download(self):", "title": "" }, { "docid": "9a42985872da94830e9c189c7cd1f035", "score": "0.5823003", "text": "def getData(startdt, savepath):\n \n #create filename url list (summer 2015 start date is 15,7,4)\n startdt = date.date(startdt[0],startdt[1],startdt[2])\n uri = \"http://web.mta.info/developers/data/nyct/turnstile/turnstile_\"\n urllist = [re.sub(\"-\",\"\",uri+str(startdt + date.timedelta(days=i))[2:]+str('.txt')) for i in np.arange(0,90,7)]\n \n #urllist = [\"http://web.mta.info/developers/data/nyct/turnstile/turnstile_150704.txt\",\n # \"http://web.mta.info/developers/data/nyct/turnstile/turnstile_150711.txt\",\n # \"http://web.mta.info/developers/data/nyct/turnstile/turnstile_150718.txt\"]\n \n \n #download from first url with header\n response = ulib.urlopen(urllist[0])\n df = pd.read_csv(response, \n header=0)\n #dfbig = pd.DataFrame({})\n response.close()\n \n #download from remaining urls and concatenate without header to dataframe\n for url in urllist[1:]:\n response = ulib.urlopen(url)\n df = pd.concat([df,pd.read_csv(response, \n skip_blank_lines=True)])\n response.close()\n \n df.drop(df.columns[[0,1,2,3,4,5,6,7,8,9,10]],axis=1)\n\n #df = pd.concat([df,dfbig], axis=0)\n #'/Users/ash/ds/projects/data/mta/mta-turnstile-summer2015data.csv'\n pd.DataFrame.to_csv(df, savepath)", "title": "" }, { "docid": "b6f3542c637ad94eb9d886dfb88eb380", "score": "0.5820612", "text": "def generate_urls():\n yield base_url.format(\"ProductionAndCurtailmentsData-May1_2014-May31_2017.xlsx\")\n yield base_url.format(\"ProductionAndCurtailmentsData-Jun1_2017-Dec31_2017.xlsx\")\n for year in range(2018, 2021):\n yield base_url.format(f\"ProductionAndCurtailmentsData_{year}.xlsx\")", "title": "" }, { "docid": "f5c0762107632a7cd8b72fd3f41afa96", "score": "0.5785434", "text": "def return_csv(new_df, name):\n redirect('http://localhost:5000')\n new_df.to_csv('./' + name + '.csv')\n return send_file('./' + name + '.csv',\n attachment_filename='./' + name + '.csv',\n as_attachment=True)", "title": "" }, { "docid": "db55dffdff914f0995e3eb4d9d381ece", "score": "0.57851064", "text": "def download_urls(self, df, start = 0, end = 10, headers = headers):\n all_download_urls = []\n urls = list(df['Album hrefs'])\n tracks = list(df['Track'])\n\n for i in range(start, end):\n #Print the current time for every 50 urls obtained\n if i%50 ==0:\n now = datetime.now()\n current_time = str(now.strftime(\"%H:%M:%S\"))\n print(\"{} Current Time = {}\".format(i, current_time))\n payload = {}\n headers = headers\n url = urls[i]\n track = tracks[i]\n\n \n try:\n response = requests.request(\"GET\", url, headers=headers, data=payload)\n if response.status_code == 200:\n all_download_urls.append(self.get_url_download_(response, str(track)))\n else:\n all_download_urls.append('None')\n except:\n all_download_urls.append('None')\n\n return all_download_urls", "title": "" }, { "docid": "a40b56a37f1e2d4218eaaf96d567ad8e", "score": "0.5767065", "text": "def download_dataset(url: str) -> pd.DataFrame:\n print(f'downloading training data from {DATA_URL}')\n data_file = urlopen(url)\n # app_train = pd.read_excel('final data sales details.xlsx', index_col = 'Client Account No.')\n return pd.read_excel(data_file, index_col = 'Client Account No.')", "title": "" }, { "docid": "cd30c2e3a4426e0af0135a664167e528", "score": "0.57450867", "text": "def download():\n return response.download(request, db)", "title": "" }, { "docid": "cd30c2e3a4426e0af0135a664167e528", "score": "0.57450867", "text": "def download():\n return response.download(request, db)", "title": "" }, { "docid": "cd30c2e3a4426e0af0135a664167e528", "score": "0.57450867", "text": "def download():\n return response.download(request, db)", "title": "" }, { "docid": "cd30c2e3a4426e0af0135a664167e528", "score": "0.57450867", "text": "def download():\n return response.download(request, db)", "title": "" }, { "docid": "cd30c2e3a4426e0af0135a664167e528", "score": "0.57450867", "text": "def download():\n return response.download(request, db)", "title": "" }, { "docid": "cd30c2e3a4426e0af0135a664167e528", "score": "0.57450867", "text": "def download():\n return response.download(request, db)", "title": "" }, { "docid": "cd30c2e3a4426e0af0135a664167e528", "score": "0.57450867", "text": "def download():\n return response.download(request, db)", "title": "" }, { "docid": "cd30c2e3a4426e0af0135a664167e528", "score": "0.57450867", "text": "def download():\n return response.download(request, db)", "title": "" }, { "docid": "cd30c2e3a4426e0af0135a664167e528", "score": "0.57450867", "text": "def download():\n return response.download(request, db)", "title": "" }, { "docid": "cd30c2e3a4426e0af0135a664167e528", "score": "0.57450867", "text": "def download():\n return response.download(request, db)", "title": "" }, { "docid": "cd30c2e3a4426e0af0135a664167e528", "score": "0.57450867", "text": "def download():\n return response.download(request, db)", "title": "" }, { "docid": "cd30c2e3a4426e0af0135a664167e528", "score": "0.57450867", "text": "def download():\n return response.download(request, db)", "title": "" }, { "docid": "cd30c2e3a4426e0af0135a664167e528", "score": "0.57450867", "text": "def download():\n return response.download(request, db)", "title": "" }, { "docid": "cd30c2e3a4426e0af0135a664167e528", "score": "0.57450867", "text": "def download():\n return response.download(request, db)", "title": "" }, { "docid": "cd30c2e3a4426e0af0135a664167e528", "score": "0.57450867", "text": "def download():\n return response.download(request, db)", "title": "" }, { "docid": "cd30c2e3a4426e0af0135a664167e528", "score": "0.57450867", "text": "def download():\n return response.download(request, db)", "title": "" }, { "docid": "cd30c2e3a4426e0af0135a664167e528", "score": "0.57450867", "text": "def download():\n return response.download(request, db)", "title": "" }, { "docid": "cd30c2e3a4426e0af0135a664167e528", "score": "0.57450867", "text": "def download():\n return response.download(request, db)", "title": "" }, { "docid": "cd30c2e3a4426e0af0135a664167e528", "score": "0.57450867", "text": "def download():\n return response.download(request, db)", "title": "" }, { "docid": "cd30c2e3a4426e0af0135a664167e528", "score": "0.57450867", "text": "def download():\n return response.download(request, db)", "title": "" }, { "docid": "cd30c2e3a4426e0af0135a664167e528", "score": "0.57450867", "text": "def download():\n return response.download(request, db)", "title": "" }, { "docid": "cd30c2e3a4426e0af0135a664167e528", "score": "0.57450867", "text": "def download():\n return response.download(request, db)", "title": "" }, { "docid": "cd30c2e3a4426e0af0135a664167e528", "score": "0.57450867", "text": "def download():\n return response.download(request, db)", "title": "" }, { "docid": "cd30c2e3a4426e0af0135a664167e528", "score": "0.57450867", "text": "def download():\n return response.download(request, db)", "title": "" }, { "docid": "cd30c2e3a4426e0af0135a664167e528", "score": "0.57450867", "text": "def download():\n return response.download(request, db)", "title": "" }, { "docid": "cd30c2e3a4426e0af0135a664167e528", "score": "0.57450867", "text": "def download():\n return response.download(request, db)", "title": "" }, { "docid": "cd30c2e3a4426e0af0135a664167e528", "score": "0.57450867", "text": "def download():\n return response.download(request, db)", "title": "" }, { "docid": "7c66ccaa44b5c533fc58ec1e61bbfd83", "score": "0.5743465", "text": "def download():\n\treturn response.download(request, db)", "title": "" }, { "docid": "340de45ce944ffe89fa16bdcc70aaa8c", "score": "0.57317215", "text": "def file_download_link(filename):\n location = \"./download/{}\".format(urlquote(filename))\n return html.A(filename, href=location)", "title": "" }, { "docid": "629f5567f6f66a8977ca26bf08e3c332", "score": "0.573103", "text": "def data_frame(list_of_dicts):\n df = pd.DataFrame(list_of_dicts)\n index_df = df.set_index('url')\n index_df.to_csv('result.csv')", "title": "" }, { "docid": "653480b3869fa8a43b8475173d5c2516", "score": "0.5708914", "text": "def append_download_urls_save_df(self, df, download_urls, filename):\n df['Download URLs'] = download_urls\n df.to_csv(filename, index=False)\n return", "title": "" }, { "docid": "5ab7d9f24c9712346d9a8eff654b65b1", "score": "0.5701151", "text": "def format_metadata_url(api_key, table_name):\n query_params = [('api_key', api_key), ('qopts.export', 'true')]\n\n return (\n QUANDL_DATA_URL + table_name + \".csv?\" + urlencode(query_params)\n )", "title": "" }, { "docid": "7c8fc4399952b787d6afb3b3ec916311", "score": "0.5682233", "text": "def file_download_link(filename):\n location = \"/download/{}\".format(urlquote(filename))\n return html.A(filename, href=location)", "title": "" }, { "docid": "4a7ee823b2ec283d9c83f8a33c5d9f22", "score": "0.5651486", "text": "def generate_link(df_drugs: pd.DataFrame, df_reference: pd.DataFrame) -> {}:\n\n drug_dict = dict()\n # define a list of drugs. Faster to loop on a list than on a pandas df.\n drugs = df_drugs['drug'].to_list()\n # print(\"drugs\" ,+ drugs )\n\n for drug in drugs:\n drug_dict[drug] = dict()\n\n # pubMed\n drug_dict[drug][\"pubMed\"] = finding_mention(drug, df_reference, \"title\")[\"title\"].to_list()\n drug_dict[drug][\"pubMedJournal\"] = finding_mention(drug, df_reference, \"title\")[\"journal\"].to_list()\n drug_dict[drug][\"pubMedDate\"] = finding_mention(drug, df_reference, \"title\")[\"date\"].to_list()\n\n # clinical_trials\n drug_dict[drug][\"clinicalTrial\"] = finding_mention(drug, df_reference, \"title\")[\"title\"].to_list()\n drug_dict[drug][\"clinicalTrialJournal\"] = finding_mention(drug, df_reference, \"title\")[\"journal\"].to_list()\n drug_dict[drug][\"clinicalTrialDate\"] = finding_mention(drug, df_reference, \"title\")[\"date\"].to_list()\n return drug_dict", "title": "" }, { "docid": "6494098b252ffd478e8c0fb4f2d53d70", "score": "0.56379694", "text": "def download_csv(readings, filename_base=\"results\"):\n df = pd.DataFrame(readings)\n output_buffer = io.BytesIO()\n df.to_csv(output_buffer)\n output_buffer.seek(0)\n filename = (\n filename_base + \"_\" + datetime.now().strftime(\"%d-%m-%Y_%H-%M-%S\") + \".csv\"\n )\n return send_file(\n output_buffer, download_name=filename, mimetype=\"text/csv\", as_attachment=True\n )", "title": "" }, { "docid": "fffafd15f6e6ef701e17d79348e9bde3", "score": "0.56352186", "text": "def downloadSubmissions(challenge, showMissing):\n\n # generator depends on challenge type and sometimes mode (2019 vs 2020) active\n if challenge == 'cea':\n if config.OUTPUT_2019_FORMAT:\n def generate():\n \"\"\"2019 format: col_id before row_id\"\"\"\n for row in db.getAllTargets('cea', not showMissing):\n yield '{},{},{},{}{}\\n'.format(\n row['table_name'],\n row['col_id'],\n row['row_id'],\n config.URL_PREFIX if config.OUTPUT_ADD_PREFIX else '',\n row['mapped']\n )\n else:\n def generate():\n \"\"\"2020 format: row_id before col_id\"\"\"\n for row in db.getAllTargets('cea', not showMissing):\n yield '{},{},{},{}{}\\n'.format(\n row['table_name'],\n row['row_id'],\n row['col_id'],\n config.URL_PREFIX if config.OUTPUT_ADD_PREFIX else '',\n row['mapped']\n )\n elif challenge == 'cpa':\n def generate():\n for row in db.getAllTargets('cpa', not showMissing):\n yield '{},{},{},{}{}\\n'.format(\n row['table_name'],\n row['sub_id'],\n row['obj_id'],\n config.URL_PREFIX if config.OUTPUT_ADD_PREFIX else '',\n row['mapped']\n )\n elif challenge == 'cta':\n def generate():\n for row in db.getAllTargets('cta', not showMissing):\n yield '{},{},{}{}\\n'.format(\n row['table_name'],\n row['col_id'],\n config.URL_PREFIX if config.OUTPUT_ADD_PREFIX else '',\n row['mapped']\n )\n else:\n raise Exception('Unknown challenge: {}'.format(challenge))\n\n # stream response\n filename = '{}{}.csv'.format(challenge, 'Missing' if showMissing else '')\n return flask.Response(\n generate(),\n mimetype='text/csv',\n headers={\n \"Content-disposition\": \"attachment; filename={}\".format(filename)\n }\n )", "title": "" }, { "docid": "96a50c95e27e648dee3cc8794bddfff5", "score": "0.56314445", "text": "def _get_data_url(self, **form_data):\n # For old data links of Google Spreadsheets\n if 'ccc?key' in form_data['googledocs_url']:\n return ''.join([form_data['googledocs_url'], '&output=csv'])\n # New data format for Google Drive import is like this:\n # https://docs.google.com/spreadsheets/d/key/edit?usp=sharing\n else:\n return ''.join([form_data['googledocs_url'].split('edit')[0],\n 'export?format=csv'])", "title": "" }, { "docid": "c422055b33969fb5a2700303d5edc175", "score": "0.5620762", "text": "def Download_Reg(self, StDt, EndDt, Days, db):\r\n t0 = datetime.now()\r\n print('Start time: ', t0)\r\n self.StDt = datetime.strptime(str(StDt), '%d.%m.%Y').date()\r\n\r\n if EndDt != 0:\r\n self.EndDt = datetime.strptime(str(EndDt), '%d.%m.%Y').date()\r\n self.Days_between = self.StDt - self.EndDt\r\n self.Days_to_Down = self.Days_between.days\r\n elif Days != 0:\r\n self.Days_to_Down = int(Days)\r\n self.EndDt = self.StDt - timedelta(days = Days)\r\n self.conn = sqlite3.connect(db)\r\n self.cur = self.conn.cursor()\r\n self.cur.execute(\"SELECT * from links_reg_match\")\r\n Dates = [tup[1] for tup in self.cur.fetchall()]\r\n self.cur.execute(\"SELECT * from links_reg_match\")\r\n links = [tup[0] for tup in self.cur.fetchall()] \r\n df = pd.DataFrame({'Dates':Dates, 'Link':links})\r\n df['Dates'] = pd.to_datetime(df['Dates'], format='%d.%m.%Y')\r\n l = []\r\n for i in range(0, len(df['Dates'])):\r\n l.append(df.at[i, 'Dates'].date())\r\n df['Dates'] = l\r\n df.sort_values('Dates', inplace=True, axis=0)\r\n df.set_index('Dates', inplace = True)\r\n df = df.loc[self.EndDt:self.StDt]\r\n df.reset_index(inplace = True)\r\n time_to_download = 0.30\r\n print('Downloading from ',self.EndDt,' to ',self.StDt)\r\n print('Matches to download: ',len(df['Link']))\r\n print('Estimated finish time: ',(t0 + timedelta(seconds=(time_to_download*len(df['Link'])))))\r\n for f in range(0,len(df['Link'])): \r\n if f == 0:\r\n print('Initiating the reg download...')\r\n #Progress\r\n if f%1000 == 0:\r\n print('Progress: ', round(f/len(df['Link']) * 100, 2), '%')\r\n page = requests.get(df.at[f, 'Link'])\r\n html_soup = soup(page.text,'html.parser')\r\n if html_soup.find('tr', class_='tour_head unpair') is not None:\r\n df.at[f, 'Date'] = html_soup.find('tr', class_='tour_head unpair').find_all('td')[0].text\r\n df.at[f, 'Date'] = df.at[i, 'Date'][:5] + '.20' + df.at[i, 'Date'][6:8]\r\n df.at[f, 'Round'] = html_soup.find('tr', class_='tour_head unpair').find_all('td')[1].text\r\n df.at[f, 'Player1'] = html_soup.find('tr', class_='tour_head unpair').find_all('td')[2].a.get('title')\r\n df.at[f, 'Player2'] = html_soup.find('tr', class_='tour_head unpair').find_all('td')[3].a.get('title')\r\n df.at[f, 'Score'] = html_soup.find('tr', class_='tour_head unpair').find_all('td')[4].select_one(\"span[id=score]\").text\r\n df.at[f, 'Location'] = html_soup.find('tr', class_='tour_head unpair').find_all('td')[5].a.get('title')\r\n df.at[f, 'Surface'] = html_soup.find('tr', class_='tour_head unpair').find_all('td')[6].text\r\n if html_soup.find('table', class_='table_stats_match') is not None:\r\n for r in range(1,len(html_soup.find('table', class_='table_stats_match').find_all('tr'))):\r\n if html_soup.find('table', class_='table_stats_match').find_all('tr')[r].find_all('td')[0].text == '1st SERVE %':\r\n df.at[f, '1st Serve % P1'] = html_soup.find('table', class_='table_stats_match').find_all('tr')[r].find_all('td')[1].text\r\n df.at[f, '1st Serve % P2'] = html_soup.find('table', class_='table_stats_match').find_all('tr')[r].find_all('td')[2].text\r\n elif html_soup.find('table', class_='table_stats_match').find_all('tr')[r].find_all('td')[0].text == '1st SERVE POINTS WON':\r\n df.at[f, '1st Serve Points Won P1'] = html_soup.find('table', class_='table_stats_match').find_all('tr')[r].find_all('td')[1].text\r\n df.at[f, '1st Serve Points Won P2'] = html_soup.find('table', class_='table_stats_match').find_all('tr')[r].find_all('td')[2].text\r\n elif html_soup.find('table', class_='table_stats_match').find_all('tr')[r].find_all('td')[0].text == '2nd SERVE POINTS WON':\r\n df.at[f, '2nd Serve Points Won P1'] = html_soup.find('table', class_='table_stats_match').find_all('tr')[r].find_all('td')[1].text\r\n df.at[f, '2nd Serve Points Won P2'] = html_soup.find('table', class_='table_stats_match').find_all('tr')[r].find_all('td')[2].text\r\n elif html_soup.find('table', class_='table_stats_match').find_all('tr')[r].find_all('td')[0].text == 'BREAK POINTS WON':\r\n df.at[f, 'BREAK POINTS WON P1'] = html_soup.find('table', class_='table_stats_match').find_all('tr')[r].find_all('td')[1].text\r\n df.at[f, 'BREAK POINTS WON P2'] = html_soup.find('table', class_='table_stats_match').find_all('tr')[r].find_all('td')[2].text\r\n elif html_soup.find('table', class_='table_stats_match').find_all('tr')[r].find_all('td')[0].text == 'TOTAL RETURN POINTS WON':\r\n df.at[f, 'TOTAL RETURN POINTS WON P1'] = html_soup.find('table', class_='table_stats_match').find_all('tr')[r].find_all('td')[1].text\r\n df.at[f, 'TOTAL RETURN POINTS WON P2'] = html_soup.find('table', class_='table_stats_match').find_all('tr')[r].find_all('td')[2].text\r\n elif html_soup.find('table', class_='table_stats_match').find_all('tr')[r].find_all('td')[0].text == 'TOTAL POINTS WON':\r\n df.at[f, 'TOTAL POINTS WON P1'] = html_soup.find('table', class_='table_stats_match').find_all('tr')[r].find_all('td')[1].text\r\n df.at[f, 'TOTAL POINTS WON P2'] = html_soup.find('table', class_='table_stats_match').find_all('tr')[r].find_all('td')[2].text\r\n elif html_soup.find('table', class_='table_stats_match').find_all('tr')[r].find_all('td')[0].text == 'DOUBLE FAULTS':\r\n df.at[f, 'DOUBLE FAULTS P1'] = html_soup.find('table', class_='table_stats_match').find_all('tr')[r].find_all('td')[1].text\r\n df.at[f, 'DOUBLE FAULTS P2'] = html_soup.find('table', class_='table_stats_match').find_all('tr')[r].find_all('td')[2].text\r\n elif html_soup.find('table', class_='table_stats_match').find_all('tr')[r].find_all('td')[0].text == 'ACES':\r\n df.at[f, 'ACES P1'] = html_soup.find('table', class_='table_stats_match').find_all('tr')[r].find_all('td')[1].text\r\n df.at[f, 'ACES P2'] = html_soup.find('table', class_='table_stats_match').find_all('tr')[r].find_all('td')[2].text\r\n \r\n #exclude doubles \r\n df = df[~df[\"Player1\"].str.contains(\"/\")]\r\n \r\n for i in range(0,len(df['Link'])):\r\n try:\r\n self.cur.execute(\"INSERT INTO reg_master VALUES (?, ?, NULL, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?)\", \r\n (df.at[i, 'Link'], df.at[i, 'Date'], df.at[i, 'Player1'], df.at[i, 'Player2'], df.at[i, 'Round'], df.at[i, 'Score'], \r\n df.at[i, 'Location'], df.at[i, 'Surface'], df.at[i, '1st Serve % P1'], df.at[i, '1st Serve Points Won P1'], \r\n df.at[i, '2nd Serve Points Won P1'], df.at[i, 'BREAK POINTS WON P1'], df.at[i, 'TOTAL RETURN POINTS WON P1'], \r\n df.at[i, 'TOTAL POINTS WON P1'], df.at[i, 'DOUBLE FAULTS P1'], df.at[i, 'ACES P1'], df.at[i, '1st Serve % P2'], \r\n df.at[i, '1st Serve Points Won P2'], df.at[i, '2nd Serve Points Won P2'], df.at[i, 'BREAK POINTS WON P2'], \r\n df.at[i, 'TOTAL RETURN POINTS WON P2'], df.at[i, 'TOTAL POINTS WON P2'], df.at[i, 'DOUBLE FAULTS P2'], df.at[i, 'ACES P2']))\r\n self.conn.commit()\r\n except:\r\n pass\r\n\r\n t1 = datetime.now()\r\n print(t1)\r\n print(\"It took \",(t1-t0),\" seconds.\")", "title": "" }, { "docid": "50e18eeb206cde9cf7002b24c5ec6eaa", "score": "0.56139016", "text": "def export_dataframe_tableau(dataframe, filename=\"dataframe.hyper\"):\n global result\n import pantab\n # TODO: Fix ModuleNotFoundError: No module named 'tableauhyperapi'\n result = dataframe.to_json(orient='split')\n pantab.frame_to_hyper(dataframe, filename)\n with open(filename, \"rb\") as binary_file:\n file_bin = binary_file.read()\n assert file_bin is not None\n result = file_bin\n resultMetadata.put(\"file.extension\", \".hyper\")\n resultMetadata.put(\"file.name\", filename)\n resultMetadata.put(\"content.type\", \"application/octet-stream\")", "title": "" }, { "docid": "dfb43b88dac405007d8a2b700c53b453", "score": "0.5594382", "text": "def get_link(df_list, files):\n pats = [re.compile(s) for s in ['http://','https://','www.']]\n link_df = []\n for i in range(len(df_list)):\n df = df_list[i]\n link = df[df.iloc[:,2].str.contains('link')].iloc[:,[1,3,7]]\n link.columns = ['time','origin','content']\n idx = pd.isnull(link.origin) != True\n link = link.loc[idx,['time','content']]\n for ii in range(len(link.content)):\n s = link.content.iloc[ii]\n for pat in pats:\n s = pat.sub('',s)\n link.content.iloc[ii] = s\n filenumber = int(re.findall(r'\\d+', files[i])[0])\n link['id'] = pd.Series((np.repeat(filenumber,np.shape(link)[0])), index = link.index)\n cols = link.columns.tolist()\n cols = cols[-1:] + cols[:-1]\n link_df.append(link[cols])\n link_df = pd.concat(link_df)\n link_df = link_df.reset_index()\n return link_df.drop('index',axis = 1)", "title": "" }, { "docid": "b2f53471845cbf3169c1bb51965fe49d", "score": "0.5583656", "text": "def download_url(self):\n return", "title": "" }, { "docid": "234cac62679fe769c31516daf3e9d5dc", "score": "0.5575599", "text": "def download_csv(data, filename):\n\theader = data[0].keys() #Data must have at least one record.\n\twith open('downloads/' + filename, \"w+\") as f:\n\t\twriter = csv.writer(f, delimiter=',', quotechar='\"', quoting=csv.QUOTE_MINIMAL)\n\t\twriter.writerow(header)\n\t\tfor row in data:\n\t\t\twriter.writerow(list(row.values()))\n\n\t#Push the file to the view\n\treturn send_file('downloads/' + filename,\n\t\t\t\t mimetype='text/csv',\n\t\t\t\t attachment_filename=filename,\n\t\t\t\t as_attachment=True)", "title": "" }, { "docid": "2fbb5e324590101c410ae1f35cc03c31", "score": "0.5571424", "text": "def csv_dl(links, start):\n for link in links:\n if link.string[:3] == 'Jan':\n start -= 1\n path = base + link['href']\n #2 types of links: a short one that leads to a page with the dl (else statemtent), and direct links to the file\n if len(link['href']) > 11:\n pass\n else:\n s2 = BeautifulSoup(requests.get(path).content)\n path = s2.find('a', {'href': re.compile(r'/files*')})['href']\n\n resp = requests.get(path)\n with open('data/' + str(start) + link.string[:3] + '.xlsx', 'wb') as file:\n file.write(resp.content)\n return start", "title": "" }, { "docid": "2a4dc1c63efe5bdd5cdb0ba77297e118", "score": "0.55572283", "text": "def export_data() -> None:\n connection = connect()\n curr = connection.cursor()\n curr.execute('''\n SELECT keyword, query_timestamp, title, price, rating, reviews_count, item_url, image_url\n FROM keywords\n INNER JOIN keywords_data ON keywords_data.keyword_id = keywords.id;''')\n rows = curr.fetchall()\n df = pd.DataFrame(rows, columns=[x[0] for x in curr.description])\n df.to_csv('data_export.csv', index=False)\n print('Data was exported successfully to data_export.csv')", "title": "" }, { "docid": "457a78255302d30511514d8f69d0f1e2", "score": "0.5556854", "text": "def downloads(self, result_loc_, date_):\n end_date = date_ + timedelta(days=1)\n query = Template(content_downloads.init())\n query = query.substitute(app=self.config['context']['pdata']['id']['app'],\n start_date=datetime.strftime(date_, '%Y-%m-%dT00:00:00+00:00'),\n end_date=datetime.strftime(end_date, '%Y-%m-%dT00:00:00+00:00'))\n\n headers = {\n 'Content-Type': \"application/json\"\n }\n url = \"{}druid/v2/\".format(self.druid_hostname)\n response = requests.request(\"POST\", url, data=query, headers=headers)\n records = [events['event'] for events in response.json()]\n data = pd.DataFrame(records)\n content = pd.read_csv(str(result_loc_.parent.joinpath('tb_metadata', date_.strftime('%Y-%m-%d'), 'textbook_snapshot.csv')))\n content = content[content['contentType']=='Resource']\n content = content[['identifier', 'channel']]\n content.drop_duplicates(inplace=True)\n content.rename(columns={'identifier': 'object_id'}, inplace=True)\n data = data.merge(content, on=\"object_id\", how=\"left\")\n data = data[data['channel'].notnull()]\n download_counts = data.groupby('channel').sum()\n download_counts.reset_index(inplace=True)\n\n result_loc_.joinpath(date_.strftime('%Y-%m-%d')).mkdir(exist_ok=True)\n download_counts.to_csv(result_loc_.joinpath(date_.strftime('%Y-%m-%d'), 'downloads.csv'), index=False)\n post_data_to_blob(result_loc_.joinpath(date_.strftime('%Y-%m-%d'), 'downloads.csv'), backup=True)", "title": "" }, { "docid": "675994d5ffa91308b8eea838a9d1afcb", "score": "0.5554471", "text": "def result_download_button(app, df):\n df_ser = encode_df(df)\n return html.Form([\n dcc.Input(\n value=df_ser,\n name='result',\n type='text',\n style={'display': 'none'}),\n html.Button([\n html.Img(src=app.get_asset_url('download.svg')),\n html.Span('Download results')\n ],\n type='submit',\n className='div-with-image small-image'\n )\n ],\n method='POST',\n action='/download_df/'\n )", "title": "" }, { "docid": "b8a30e726636ded875fbe4f249cc62d9", "score": "0.55536354", "text": "def file_download_link(filename):\n location = \"/downloads/{}\".format(urlquote(filename))\n return html.A(filename, href=location)", "title": "" }, { "docid": "021b6619ef24e0e66b8fabbb2787b8eb", "score": "0.555286", "text": "def download_dat_files():\r\n url = 'https://raw.githubusercontent.com/jpatokal/openflights/master/data/airports.dat'\r\n wget.download(url, 'C:\\sqlite3\\Python Database files')\r\n\r\n url2 = 'https://raw.githubusercontent.com/jpatokal/openflights/master/data/airlines.dat'\r\n wget.download(url2, 'C:\\sqlite3\\Python Database files')\r\n\r\n url3 = 'https://raw.githubusercontent.com/jpatokal/openflights/master/data/routes.dat'\r\n wget.download(url3, 'C:\\sqlite3\\Python Database files')", "title": "" }, { "docid": "05897914db6b7477d2432b34d9cd3fa6", "score": "0.55320585", "text": "def report_href(**kwargs):\n params = args.copy()\n if sort_col:\n params['sort'] = sort_col\n params['page'] = page\n if max:\n params['max'] = max\n params.update(kwargs)\n params['asc'] = '1' if params.get('asc', asc) else '0'\n return req.href.report(id, params)", "title": "" }, { "docid": "d3b4bd7fbb11900f01b0567c24099a08", "score": "0.5530999", "text": "def download(self):\n pass", "title": "" }, { "docid": "d3b4bd7fbb11900f01b0567c24099a08", "score": "0.5530999", "text": "def download(self):\n pass", "title": "" }, { "docid": "dd7c00a5d8646e39615e71a76a52dc61", "score": "0.5512493", "text": "def get_data(year,league=\"E0\",base_link=base_link):\n\n if not os.path.isdir(\"../Data/\"):\n os.mkdir(\"../Data/\")\n\n final_link = base_link + year + \"/\" + league\n filename = \"../Data/\" + year + \"_\" + league + \".csv\"\n\n datafile = urllib2.urlopen(final_link)\n output = open(filename,'wb')\n output.write(datafile.read())\n output.close()\n return pd.read_csv(filename)", "title": "" }, { "docid": "f6d73064848724cc1a96a46127c34fdc", "score": "0.55080026", "text": "def download(self, destination: str) -> str:\n data = self.materialize()\n try:\n data.to_csv(destination, index=False)\n return \"Success\"\n except Exception as e:\n return \"Failed: %s\" % str(e)", "title": "" }, { "docid": "f48eb01602359981bdcf48cd97ffa7db", "score": "0.5486758", "text": "def enhanced_generate_link(df_drugs: pd.DataFrame, df_reference: pd.DataFrame) -> {}:\n\n drug_dict = dict()\n # define a list of drugs. Faster to loop on a list than on a pandas df.\n drugs = df_drugs['drug'].to_list()\n\n for drug in drugs:\n drug_dict[drug] = dict()\n\n # pubMed\n drug_dict[drug][\"pubMed\"] = finding_pubmed_mention(drug, df_reference, \"title\")[\"title\"].to_list()\n drug_dict[drug][\"pubMedJournal\"] = finding_pubmed_mention(drug, df_reference, \"title\")[\"journal\"].to_list()\n drug_dict[drug][\"pubMedDate\"] = finding_pubmed_mention(drug, df_reference, \"title\")[\"date\"].to_list()\n\n # clinical_trials\n drug_dict[drug][\"clinicalTrial\"] = finding_clinical_mention(drug, df_reference, \"title\")[\"title\"].to_list()\n drug_dict[drug][\"clinicalTrialJournal\"] = finding_clinical_mention(drug, df_reference, \"title\")[\n \"journal\"].to_list()\n drug_dict[drug][\"clinicalTrialDate\"] = finding_clinical_mention(drug, df_reference, \"title\")[\"date\"].to_list()\n return drug_dict", "title": "" }, { "docid": "f5a01135a95d6d8def4f8a86867b1bff", "score": "0.54821986", "text": "def download_data(model_dir, season):\n file_name = model_dir + \"/\" + season + \".csv\"\n if (not skip_download): \n # urllib.request.urlretrieve(\n # \"http://217.160.223.109/mmz4281/\"+season+\"/D1.csv\",\n # file_name) # pylint: disable=line-too-long\n \n url = \"http://www.football-data.co.uk/mmz4281/\"+season+\"/D1.csv\"\n print(\"Downloading %s\" % url)\n urllib.request.urlretrieve(\n url,\n file_name) # pylint: disable=line-too-long\n print(\"Data is downloaded to %s\" % file_name)\n data = pd.read_csv(\n tf.gfile.Open(file_name),\n skipinitialspace=True,\n engine=\"python\",\n skiprows=0)\n data[\"Season\"]= season\n return data", "title": "" }, { "docid": "29fbabc6fc60102a26c4472381f6e06f", "score": "0.547121", "text": "def make_download_url(pano_id: str, zoom: int, x: int, y: int) -> str:\n return (\n \"https://cbk0.google.com/cbk\"\n f\"?output=tile&panoid={pano_id}&zoom={zoom}&x={x}&y={y}\"\n )", "title": "" }, { "docid": "56d1103bd7d2988abfcef6e378b6c655", "score": "0.54653656", "text": "def full_data_frame_extract(webdriver_path,base_url,numbers,folder_path):\n\n driver = webdriver.Chrome(webdriver_path)\n wait = WebDriverWait(driver,15)\n driver.get(base_url)\n \n\n Df = pd.DataFrame()\n Df['date_time'] = np.NaN\n Df['player'] = np.NaN\n Df['position_and_team'] = np.NaN\n Df['title'] = np.NaN\n Df['summary'] = np.NaN\n Df['social_headline'] = np.NaN\n Df['players_related'] = np.NaN\n Df['source'] = np.NaN\n \n \n \n for num in range(1,numbers+1):\n wait.until(EC.visibility_of_element_located((By.XPATH,\"//*[@id='player-news-page-wrapper']/div/div/div[3]/ul/li[1]/article/div[1]\")))\n content = driver.page_source\n full_df = source_code_to_df(content)\n Df = Df.append(full_df)\n \n click_next = wait.until(EC.element_to_be_clickable((By.XPATH,\"//*[@id='player-news-page-wrapper']/div/div/div[2]/a[2]\")))\n click_next.click()\n \n if(num%1000 == 0):\n path = folder_path + 'injuries' + str(num/1000) +'.csv'\n Df.to_csv(path, index = False)\n print(\"number of reports is:\", num)\n \n driver.quit()\n return Df", "title": "" } ]
13bc9786d72c182ae5556b7203767daf
Convert topology object into an ns2 Tcl script that deploys that topology into ns2.
[ { "docid": "602ccfd7732aa6a1f9642f34693d5188", "score": "0.55723596", "text": "def to_ns2(topology, path, stacks=True):\n try:\n from mako.template import Template\n except ImportError:\n raise ImportError('Cannot import mako.template module. '\n 'Make sure mako is installed on this machine.')\n set_buffers = True\n set_delays = True\n # if all links are annotated with weights, then set weights\n set_weights = all('weight' in topology.adj[u][v]\n for u, v in topology.edges())\n\n if not 'capacity_unit' in topology.graph:\n raise ValueError('The given topology does not have capacity data.')\n if not topology.graph['capacity_unit'] in capacity_units:\n raise ValueError('The given topology does not have a valid capacity unit')\n if not 'buffer_unit' in topology.graph:\n warn('Missing buffer size unit attribute in the topology. '\\\n 'Output file will be generated without buffer assignments.')\n set_buffers = False\n elif not topology.graph['buffer_unit'] == 'packets':\n warn('The buffer size unit of the topology is %s. The only supported '\n 'unit is packets. Output file will be generated without buffer '\n 'assignments' % topology.graph['buffer_unit'])\n set_buffers = False\n if not 'delay_unit' in topology.graph or not topology.graph['delay_unit'] in time_units:\n warn('Missing or invalid delay unit attribute in the topology. The '\n 'output file will be generated with all link delays set to 0.')\n set_delays = False\n if stacks:\n if not validate_ns2_stacks(topology):\n warn('Some application stacks cannot be parsed correctly. The '\n 'output file will be generated without stack assignments.')\n stacks = False\n if not any('stack' in topology.node[v] for v in topology.nodes()):\n stacks = False\n template = Template(__TEMPLATE)\n variables = {\n 'topology': topology,\n 'deploy_stacks': stacks,\n 'set_delays': set_delays,\n 'set_buffers': set_buffers,\n 'set_weights': set_weights\n }\n with open(path, \"w\") as out:\n out.write(template.render(**variables))", "title": "" } ]
[ { "docid": "2b5d43ca9342feb75ca534ffe1a11c9d", "score": "0.54831195", "text": "def topology(args):\n info(\"*** Creating network\\n\")\n topology_data = load_topology() \n \n add_hmds()\n add_base_stations(topology_data)\n sdn_controller = get_sdn_controller()\n\n info(\"*** Configuring propagation model\\n\")\n NET.setPropagationModel(model=\"logDistance\", exp=5)\n\n info(\"*** Configuring wifi nodes\\n\")\n NET.configureWifiNodes()\n\n create_bs_links(topology_data)\n \n info(\"*** Configuring mobility model\\n\")\n NET.setMobilityModel(time=0, model='RandomDirection', seed=20, AC='ssf')\n\n #info(\"\"\"*** Starting the graph interface\\n\"\"\")\n #NET.plotGraph(max_x=NET_GRAPH_DIMENSION, max_y=NET_GRAPH_DIMENSION) \n \n info(\"*** Starting network\\n\")\n NET.build()\n sdn_controller.start()\n \n config_ovs_switches(sdn_controller)\n\n info(\"*** Running CLI\\n\")\n CLI_THREAD = threading.Thread(target=lambda: CLI(NET))\n CLI_THREAD.daemon = True\n \n set_hmd_range_color(HMDS_SET)\n ping_nodes()\n\n #CLI(net)", "title": "" }, { "docid": "80bbf92dab4f27d633154a3b8cbaec89", "score": "0.5285671", "text": "def build_topology(self):\n self.dend.connect(self.soma(1))", "title": "" }, { "docid": "87102fc05b76af0cfd2afadcb4671032", "score": "0.5043602", "text": "def do_topo(self, args):\n\n self.spp_topo.run(args, self.get_sec_ids('nfv'))", "title": "" }, { "docid": "800183f4cb488876c0061e704366228e", "score": "0.5004334", "text": "def gen_tcl(self):\n s = ''\n s += '\\n'.join(self.bd_tcl_cmds['place_bd_ip'])\n s += '\\n'\n s += '\\n'.join(self.bd_tcl_cmds['build_bd'])\n s += '\\n'\n s += '\\n'.join(self.bd_tcl_cmds['assign_bd_addrs'])\n return s", "title": "" }, { "docid": "cde366ecb10bf14e7b028bb5eeb70a26", "score": "0.49921468", "text": "def produce_layout(eoi, graph):\n\n subprocess.run((\"/usr/bin/dot\", \"{}\".format(graph.filename), \"-o\", \n \"{}/layout.dot\".format(eoi)))", "title": "" }, { "docid": "f060bd93777451d23b0870552bfeda9d", "score": "0.49778432", "text": "def get_pipeline_script_cmdcall_snli_converter():\n # io snli files into jack format\n cmd = \"python3 jack/io/SNLI2jtr.py\"\n return cmd", "title": "" }, { "docid": "1366f6b2c834128797b0f73ad1884dda", "score": "0.4976361", "text": "def generate_ot2_script(ot2_script_path, template_path, **kwargs):\n with open(ot2_script_path, 'w') as wf:\n with open(template_path, 'r') as rf:\n for index, line in enumerate(rf):\n if line[:3] == 'def':\n function_start = index\n break\n else:\n wf.write(line)\n for key, value in kwargs.items():\n wf.write('{}='.format(key))\n if type(value) == dict:\n wf.write(json.dumps(value))\n elif type(value) == str:\n wf.write(\"'{}'\".format(value))\n else:\n wf.write(str(value))\n wf.write('\\n')\n wf.write('\\n')\n with open(template_path, 'r') as rf:\n for index, line in enumerate(rf):\n if index >= function_start - 1:\n wf.write(line)", "title": "" }, { "docid": "61ed9f3cab2b6c40ef8ec198e1044736", "score": "0.49728763", "text": "def py2tcl(pystr):\n # new = '\\n'.join(pystr.split()[1:]) # removes the import line\n new = pystr.replace('(', ' ')\n new = new.replace(')', ' ')\n new = new.replace('opy.', '')\n new = new.replace('ops.', '')\n new = new.replace(',', '')\n new = new.replace(\"'\", '')\n new = new.replace('\"', '')\n # lines = new.splitlines()\n # for i in range(len(lines)):\n # if 'for i in range(' in lines[i]:\n # line = lines.replace('for i in range(', 'for {set i 1} {$i <= num} {incr i 1} {')\n return new", "title": "" }, { "docid": "d3429b3bb8b494948e78f7a129fd8f81", "score": "0.49418485", "text": "def create_scionlab_as_local_gen(args, tp):\n new_ia = TopoID(args.joining_ia)\n core_ia = ISD_AS(args.core_ia)\n local_gen_path = os.path.join(args.package_path, args.user_id)\n try:\n os.makedirs(local_gen_path)\n except:\n pass\n os.chdir(local_gen_path) # functions from $SC/python/topology use relative paths\n local_gen_path = os.path.join(local_gen_path, 'gen')\n try:\n shutil.rmtree(local_gen_path)\n except:\n pass\n as_obj = generate_certificate(\n new_ia, core_ia, args.core_sign_priv_key_file, args.core_cert_file, args.trc_file)\n write_dispatcher_config(local_gen_path)\n write_toml_files(tp, new_ia)\n for service_type, type_key in TYPES_TO_KEYS.items():\n executable_name = TYPES_TO_EXECUTABLES[service_type]\n instances = tp[type_key].keys()\n for instance_name in instances:\n config = prep_supervisord_conf(tp[type_key][instance_name], executable_name,\n service_type, instance_name, new_ia)\n instance_path = get_elem_dir(local_gen_path, new_ia, instance_name)\n write_certs_trc_keys(new_ia, as_obj, instance_path)\n write_as_conf_and_path_policy(new_ia, as_obj, instance_path)\n write_supervisord_config(config, instance_path)\n write_topology_file(tp, type_key, instance_path)\n write_zlog_file(service_type, instance_name, instance_path)\n generate_sciond_config(TopoID(args.joining_ia), as_obj, tp, local_gen_path)\n write_overlay_config(local_gen_path)\n if not args.no_prometheus:\n generate_prom_config(new_ia, tp, local_gen_path)", "title": "" }, { "docid": "a9bc3bce81c646c858b8bedd15488fb3", "score": "0.49216047", "text": "def main(args):\n ic = lanenet_processer()\n rospy.init_node('template_node', anonymous=True)\n try:\n rospy.spin()\n except KeyboardInterrupt:\n print \"Shutting down ROS node\"", "title": "" }, { "docid": "64d75f535c716eed9d8ddc7d3e360263", "score": "0.4887493", "text": "def define_imine_topology(bb1, bb2, topology, property):\n # Define the topologies.\n topo_func = None\n bb_vect = None\n if property == 'stk_func':\n if topology == '2p3':\n topo_func = stk.cage.TwoPlusThree()\n bb_vect = None\n elif topology == '4p6':\n topo_func = stk.cage.FourPlusSix()\n bb_vect = None\n elif topology == '4p62':\n topo_func = stk.cage.FourPlusSix2()\n bb_vect = None\n elif topology == '6p9':\n topo_func = stk.cage.SixPlusNine()\n bb_vect = None\n elif topology == '1p1':\n topo_func = stk.cage.OnePlusOne()\n bb_vect = {\n bb1: [topo_func.vertices[0]],\n bb2: [topo_func.vertices[1]],\n }\n elif topology == '4p4':\n topo_func = stk.cage.FourPlusFour()\n bb_vect = {\n bb1: topo_func.vertices[0, 2, 4, 6],\n bb2: topo_func.vertices[1, 3, 5, 7],\n }\n\n return topo_func, bb_vect", "title": "" }, { "docid": "4acc506885c104ee134d68980939825e", "score": "0.4881019", "text": "def maketopo():\n nxpoints=200\n nypoints=200\n xupper=2.e0\n yupper=2.e0\n xlower = -2.e0\n ylower = -2.e0\n outfile= \"bowl.topotype2\"\n topo2writer(outfile,topo,xlower,xupper,ylower,yupper,nxpoints,nypoints)\n\n # convert to netcdf form:\n topo4 = topotools.Topography()\n topo4.read(outfile,2)\n topo4.write(\"bowl.nc\",topo_type=4)", "title": "" }, { "docid": "6598b14f3db27127fa4b0eb7492095d3", "score": "0.4849923", "text": "def insert_topologytemplate(description, rtable, readme, user, template_name, visibility=db.TopologyTemplate.PROTECTED):\n\n # Check we don't already have a template by this name\n # TODO: change so that django doesn't require a unique name, so we don't\n # have to do this check which allows any user who can create a template to\n # see the names already in use\n duplicate = True\n try:\n _ = db.TopologyTemplate.objects.get(name = template_name)\n except db.TopologyTemplate.DoesNotExist:\n duplicate = False\n \n if duplicate:\n raise TopologyNameError(\"This topology name is already in use\")\n \n # Dictionaries for the nodes, ports and links so we don't start putting\n # stuff in the DB and then come across an error\n\n # nodes is a dictionary mapping a node name to a type\n nodes = {}\n\n # paths maps names of webservers to paths\n paths = {}\n\n # ip_offsets maps node names to ip offsets\n ip_offsets = {}\n\n # ports is a dictionary mapping a node name to a list of interfaces\n ports = {}\n\n # links maps (name, iface) pairs together, and should be symmetric\n links = {}\n\n line_no = 0\n\n # List of node types that are grouped in Node\n # WebServer is a class of its own, since it has extra attributes, so is not\n # included here\n node_types = {\"gateway\":db.Node.GATEWAY_ID,\n \"hub\":db.Node.HUB_ID,\n \"blackhole\":db.Node.BLACK_HOLE_ID,\n \"virtual\":db.Node.VIRTUAL_NODE_ID}\n\n \n def check_valid_name(name):\n \"\"\"Check that a potential name is valid, i.e. starts wth a letter and\n contains only letters and numbers\"\"\"\n if name == \"\" or not name[0].isalpha() or not name.isalnum():\n raise TopologyNameError(\"Line %d: invalid name for node or interface; names must begin with a letter and contain only alphanumeric characters\" % line_no)\n\n def create_node(name, node_type, ip_offset):\n check_valid_name(name)\n try:\n ip_offset = int(ip_offset)\n except ValueError:\n raise TopologyArgumentError(\"Line %d: IP offset must be an integer\" % line_no)\n if name in nodes:\n raise TopologyNameError(\"Line %d: Duplicate node name\" % line_no)\n nodes[name] = node_type\n ip_offsets[name] = ip_offset\n\n def create_link(line):\n \n # We expect a line of the form \"name1.iface1=name2.iface2\"\n parts = line.split('=')\n if len(parts) != 2:\n raise TopologySyntaxError(\"Line %d: syntax error\" % line_no)\n \n ni1 = parts[0].split('.')\n if len(ni1) != 2:\n raise TopologySyntaxError(\"Line %d: syntax error\" % line_no)\n\n ni2 = parts[1].split('.')\n if len(ni2) != 2:\n raise TopologySyntaxError(\"Line %d: syntax error\" % line_no)\n\n name1 = ni1[0].strip()\n iface1 = ni1[1].strip()\n name2 = ni2[0].strip()\n iface2 = ni2[1].strip()\n\n check_valid_name (name1);\n check_valid_name (name2);\n check_valid_name (iface1);\n check_valid_name (iface2);\n\n # Check the nodes exist\n if name1 not in nodes:\n raise TopologyNameError(\"Line %d: node name '%s' does not exist\" % (line_no, name1))\n if name2 not in nodes:\n raise TopologyNameError(\"Line %d: node name '%s' does not exist\" % (line_no, name2))\n\n # Check the ports don't already exist\n if (name1, iface1) in links:\n (name3, iface3) = links[(name1, iface1)]\n raise TopologySyntaxError(\"Line %d: %s.%s is already connected to %s.%s\" % (line_no, name1, iface1, name3, iface3))\n if (name2, iface2) in links:\n (name3, iface3) = links[(name1, iface1)]\n raise TopologySyntaxError(\"Line %d: %s.%s is already connected to %s.%s\" % (line_no, name2, iface2, name3, iface3))\n \n # If we've got this far, it's safe to insert the link\n if name1 not in ports:\n ports[name1] = [iface1]\n else:\n if iface1 not in (ports[name1]):\n ports[name1].append(iface1)\n if name2 not in ports:\n ports[name2] = [iface2]\n else:\n if iface2 not in (ports[name2]):\n ports[name2].append(iface2)\n \n links[(name1, iface1)] = (name2, iface2)\n links[(name2, iface2)] = (name1, iface1)\n\n \n lines = description.splitlines()\n for l in lines:\n\n line_no += 1\n \n if l == \"\":\n # Ignore blank lines\n continue\n \n toks = l.split()\n if len(toks) < 1:\n continue\n \n if toks[0] == \"webserver\":\n # Create a new webserver node\n if len(toks) == 3:\n # We don't have to assign it a particular IP\n name = toks[1]\n path = toks[2]\n create_node(name, \"webserver\", -2)\n paths[name] = path\n elif len(toks) == 4:\n # We do have to assign it a particular IP\n create_node(toks[1], \"webserver\", toks[3])\n paths[toks[1]] = toks[2]\n else:\n raise TopologyArgumentError(\"Line %d: Wrong number of arguments - expect webserver <name> <path> [ip_offset]\" % line_no)\n\n elif toks[0] in node_types:\n # Create a new node of a non-webserver type\n if len(toks) == 2:\n # No IP offset\n create_node(toks[1], toks[0], -2)\n elif len(toks) == 3:\n # IP offset is present\n create_node(toks[1], toks[0], toks[2])\n else:\n raise TopologyArgumentError(\"Line %d: Wrong number of erguments - expect %s <name> [ip_offset]\" % (line_no, toks[0]))\n else:\n # It doesn't start with a node type; assume it's inserting a link \n create_link(l)\n\n # Add everything to the DB\n tt = db.TopologyTemplate()\n tt.name = template_name\n tt.owner = user\n tt.org = user.get_profile().org\n tt.visibility = visibility\n tt.readme = readme\n tt.rtable = rtable\n tt.specification = description\n tt.save()\n\n dbnodes = {}\n dbports = {}\n links_inserted = {}\n\n # Insert nodes into the database\n for (node_name, node_type) in nodes.iteritems():\n n = None\n if node_type == \"webserver\":\n # A webserver is a different class, as it has a path to serve\n p = db.WebServerPath()\n p.path = paths[node_name]\n p.save()\n n = db.WebServer()\n n.path_to_serve=p\n n.type=db.Node.WEB_SERVER_ID\n n.name=node_name\n n.template=tt\n else:\n n = db.Node()\n n.type=node_types[node_type]\n n.name=node_name\n n.template=tt\n n.save()\n dbnodes[node_name] = n\n\n # Add the ports to the database\n for (node_name, ifaces) in ports.iteritems():\n offset_2 = 0\n for iface in ifaces:\n\n # If we have an IP offset for the node, assign consecutive\n # IP offsets for each interface\n if ip_offsets[node_name] >= 0:\n ip = ip_offsets[node_name] + offset_2\n offset_2 += 1\n else:\n ip = ip_offsets[node_name]\n p = db.Port()\n p.node=dbnodes[node_name]\n p.name=iface\n p.ip_offset=ip\n dbports[(node_name, iface)] = p\n p.save()\n\n for (i1, i2) in links.iteritems():\n # links contains two entries, one in each direction, for each link, so\n # exclude duplicates\n if i1 not in links_inserted and i2 not in links_inserted:\n l = db.Link()\n l.port1=dbports[i1]\n l.port2=dbports[i2]\n l.lossiness=0.0\n links_inserted[i1] = True\n links_inserted[i2] = True\n l.save()", "title": "" }, { "docid": "7b79f779f4b45740c1a6527ee727b6ff", "score": "0.479371", "text": "def to_executables(nodes, label, context):\n raise NotImplementedError()", "title": "" }, { "docid": "42820c54e35a35645ac471c2d85cae6e", "score": "0.47915757", "text": "def convert(topology, backend, device, extra_config={}):\n assert topology is not None, \"Cannot convert a Topology object of type None.\"\n assert backend is not None, \"Cannot convert a Topology object into backend None.\"\n assert device is not None, \"Cannot convert a Topology object into device None.\"\n\n operator_map = {}\n\n for operator in topology.topological_operator_iterator():\n try:\n converter = get_converter(operator.type)\n\n if backend == onnx.__name__:\n # vers = LooseVersion(torch.__version__)\n # allowed_min = LooseVersion(\"1.6.0\")\n # Pytorch <= 1.6.0 has a bug with exporting GEMM into ONNX.\n # For the moment only tree_trav is enabled for pytorch <= 1.6.0\n # if vers < allowed_min:\n extra_config[constants.TREE_IMPLEMENTATION] = \"tree_trav\"\n\n operator_map[operator.full_name] = converter(operator, device, extra_config)\n except ValueError:\n raise MissingConverter(\n \"Unable to find converter for {} type {} with extra config: {}.\".format(\n operator.type, type(getattr(operator, \"raw_model\", None)), extra_config\n )\n )\n except Exception as e:\n raise e\n\n operators = list(topology.topological_operator_iterator())\n torch_model = PyTorchBackendModel(\n topology.raw_model.input_names, topology.raw_model.output_names, operator_map, operators, extra_config\n ).eval()\n\n if backend == onnx.__name__:\n onnx_model_name = output_model_name = None\n target_opset = 11\n\n # Set optional configuration options for ONNX if any.\n if constants.ONNX_OUTPUT_MODEL_NAME in extra_config:\n onnx_model_name = extra_config[constants.ONNX_OUTPUT_MODEL_NAME]\n output_model_name = onnx_model_name + \".onnx\"\n if constants.ONNX_TARGET_OPSET in extra_config:\n target_opset = extra_config[constants.ONNX_TARGET_OPSET]\n if output_model_name is None:\n output_model_name = str(uuid4().hex) + \".onnx\"\n\n # Put the tracing test input into the right format.\n trace_input = _get_trace_input_from_test_input(extra_config[constants.TEST_INPUT])\n\n # Generate the ONNX models\n torch.onnx.export(\n torch_model,\n trace_input,\n output_model_name,\n input_names=topology.raw_model.input_names,\n output_names=topology.raw_model.output_names,\n keep_initializers_as_inputs=False,\n opset_version=target_opset,\n do_constant_folding=True,\n )\n hb_model = onnx.load(output_model_name)\n os.remove(output_model_name)\n\n # Set the ONNX model name if any.\n if onnx_model_name is not None:\n hb_model.graph.name = onnx_model_name\n\n # Fix the model to use arbitrary batch dimensions\n def fix_dim(dim):\n updated = False\n if dim.HasField(\"dim_value\"):\n dim.Clear()\n updated = True\n dim.dim_param = \"sym\"\n\n return updated\n\n def fix_value_info(value):\n num_fixed = 0\n if value.type.HasField(\"tensor_type\"):\n shape = value.type.tensor_type.shape\n if shape:\n dim = shape.dim[0]\n if fix_dim(dim):\n num_fixed += 1\n\n return num_fixed\n\n def fix_graph(graph):\n num_fixed = 0\n for input in graph.input:\n num_fixed += fix_value_info(input)\n\n for output in graph.output:\n num_fixed += fix_value_info(output)\n\n for node in graph.node:\n for attr in node.attribute:\n if attr.HasField(\"g\"):\n num_fixed += fix_graph(attr.g)\n\n return num_fixed\n\n fix_graph(hb_model.graph)\n else:\n # Set the device for the model.\n if device != \"cpu\":\n if backend == torch.__name__ or torch.jit.__name__:\n torch_model = torch_model.to(device)\n\n # If the backend is tochscript, jit the model.\n if backend == torch.jit.__name__:\n trace_input = _get_trace_input_from_test_input(extra_config[constants.TEST_INPUT])\n if device != \"cpu\":\n trace_input.to(device)\n torch_model = torch.jit.trace(torch_model, trace_input).eval()\n torch.jit.optimized_execution(torch_model)\n hb_model = torch_model\n\n # Return if the container is not needed.\n if constants.CONTAINER in extra_config and not extra_config[constants.CONTAINER]:\n return hb_model\n\n # We scan the operators backwards until we find an operator with a defined type\n # This is necessary because ONNX models can have arbitrary operators doing casting, reshaping etc.\n idx = len(operators) - 1\n while (\n idx >= 0\n and not operator_map[operators[idx].full_name].regression\n and not operator_map[operators[idx].full_name].classification\n and not operator_map[operators[idx].full_name].anomaly_detection\n and not operator_map[operators[idx].full_name].transformer\n ):\n idx -= 1\n\n assert idx >= 0, \"Cannot detect container type. Please fill an issue at https://github.com/microsoft/hummingbird.\"\n\n # If is a transformer, we need to check whether there is another operator type before.\n # E.g., normalization after classification.\n tmp_idx = idx\n if operator_map[operators[idx].full_name].transformer:\n while (\n idx >= 0\n and not operator_map[operators[idx].full_name].regression\n and not operator_map[operators[idx].full_name].classification\n and not operator_map[operators[idx].full_name].anomaly_detection\n ):\n idx -= 1\n if idx < 0:\n idx = tmp_idx\n\n if operator_map[operators[idx].full_name].regression:\n # We are doing a regression task.\n if backend == torch.jit.__name__:\n container = TorchScriptSklearnContainerRegression\n elif backend == onnx.__name__:\n container = ONNXSklearnContainerRegression\n else:\n container = PyTorchSklearnContainerRegression\n elif operator_map[operators[idx].full_name].anomaly_detection:\n # We are doing anomaly detection.\n if backend == torch.jit.__name__:\n container = TorchScriptSklearnContainerAnomalyDetection\n elif backend == onnx.__name__:\n container = ONNXSklearnContainerAnomalyDetection\n else:\n container = PyTorchSklearnContainerAnomalyDetection\n elif operator_map[operators[idx].full_name].transformer:\n # We are just transforming the input data.\n if backend == torch.jit.__name__:\n container = TorchScriptSklearnContainerTransformer\n elif backend == onnx.__name__:\n container = ONNXSklearnContainerTransformer\n else:\n container = PyTorchSklearnContainerTransformer\n else:\n # We are doing a classification task.\n if backend == torch.jit.__name__:\n container = TorchScriptSklearnContainerClassification\n elif backend == onnx.__name__:\n container = ONNXSklearnContainerClassification\n else:\n container = PyTorchSklearnContainerClassification\n\n hb_model = container(hb_model, extra_config)\n\n return hb_model", "title": "" }, { "docid": "0ee2732725a729e31ad492835f691992", "score": "0.47445887", "text": "def topologytemplate_view(request, template):\n tn = 'vns/topologytemplate_view.html'\n\n return direct_to_template(request, tn, {\"t\":template})", "title": "" }, { "docid": "17a6a2297e34a2f5f7746a2d4e55d8b7", "score": "0.47296414", "text": "def write_cytoscape_script():\n\t\n\tprint \"Writing cytoscape script...\"\n\n\tf = open(file_cytoscape_script, 'w')\n\n\t### Import network\n\tf.write(\"\"\"network import file file=\"{file}\" firstRowAsColumnNames=true startLoadRow=1 indexColumnSourceInteraction=1 indexColumnTargetInteraction=2\"\"\".format(file=file_network_table)+\"\\n\")\n\n\t### Import node attributes\n\t# REF: http://wiki.cytoscape.org/Cytoscape_3/UserManual/Attributes\n\t# *OBS*: tab seperated\n\tf.write(r\"\"\"table import file file=\"{file}\" firstRowAsColumnNames=true startLoadRow=1 keyColumnIndex=1 delimiters=\"\\t\" \"\"\".format(file=file_node_attribute)+\"\\n\")\n\t\t### ^ use raw string to inhibit python from setting a \"\t\" character\n\n\t### Set layout\n\tf.write(\"\"\"layout kamada-kawai EdgeAttribute=Pearson_correlation unweighted=false\"\"\"+\"\\n\")\n\t\t# Kamada and Kawai (1988): same as \"Spring-Embedded Layout\"\n\t\t# Network nodes are treated like physical objects that repel each other, such as electrons. \n\t\t# The layout algorithm sets the positions of the nodes in a way that minimizes the sum of forces in the network\n\t# 1) [yWorks - not available through command line] yFiles Organic Layout: organic layout algorithm is a kind of *spring-embedded algorithm*\n\t# 2) layout force-directed EdgeAttribute=Pearson_correlation unweighted=false\n\t# ---> NOTE: the applying a WEIGHTED layout algorithm may fail if the network is small or has few edges. This is tricky...\n\n\t### Load and apply visual style\n\t# *OBS*: KEEP STYLE NAME UPDATED - must match name in .xml file\n\tf.write(\"\"\"vizmap load file file=\"{file}\" \"\"\".format(file=cytoscape_style)+\"\\n\")\n\tf.write(\"\"\"vizmap apply styles=\"DEPICT-style-v1\" \"\"\"+\"\\n\")\n\t\n\t### Set view to fit display\n\tf.write(\"\"\"view fit content\"\"\"+\"\\n\")\n\n\t### Export\n\tf.write(\"\"\"view export OutputFile=\"{file_out}\" options=PDF\"\"\".format(file_out=file_cytoscape_graphics)+\"\\n\")\n\tf.write(\"\"\"view export OutputFile=\"{file_out}\" options=PNG\"\"\".format(file_out=file_cytoscape_graphics)+\"\\n\")\n\n\tf.write(\"\"\"session save as file=\"{file_out}\" \"\"\".format(file_out=file_cytoscape_session)+\"\\n\")\n\n\t\n\n\t### OPTIONAL: exit script when done.\n\tif not interactive_cytoscape_session:\n\t\tf.write(\"\"\"command quit\"\"\"+\"\\n\")\n\t\n\n\tf.close()", "title": "" }, { "docid": "38d33016f8da8f7b624d3aea39ccb4b1", "score": "0.46605805", "text": "def main(args):\n create_output = []\n startup_output = []\n stop_output = []\n delete_output = []\n delete_net_output = []\n topo_yaml = openTopo(args.topo)\n create_startup = args.startup\n ceos_image = topo_yaml['images']['ceos']\n host_image = topo_yaml['images']['host']\n _tag = topo_yaml['topology']['name']\n nodes = topo_yaml['nodes']\n hosts = topo_yaml['hosts']\n _ceos_script_location = \"{0}/{1}\".format(CEOS_SCRIPTS, _tag)\n _tfa_version = 1\n BASE_TERMINATTR = \"\"\"\ndaemon TerminAttr\n exec /usr/bin/TerminAttr -cvaddr={0}:9910 -taillogs -cvcompression=gzip -cvauth=key,{1} -smashexcludes=ale,flexCounter,hardware,kni,pulse,strata -ingestexclude=/Sysdb/cell/1/agent,/Sysdb/cell/2/agent\n no shutdown\n!\n \"\"\"\n BASE_MGMT = \"\"\"\ninterface Management0\n description Management\n ip address {0}/24\n!\nip routing\n!\nip route 0.0.0.0/0 {1}\n!\nmanagement api http-commands\n no shutdown\n!\n \"\"\"\n BASE_STARTUP = \"\"\"\nhostname {0}\n!\n \"\"\"\n if create_startup:\n pS(\"INFO\", \"Bare Startup config will be created for all cEOS nodes\")\n # Check for mgmt Bridge\n try:\n mgmt_network = topo_yaml['infra']['bridge']\n if not mgmt_network:\n pS(\"INFO\", \"No mgmt bridge specified. Creating isolated network.\")\n except KeyError:\n pS(\"INFO\", \"No mgmt bridge specified. Creating isolated network.\")\n mgmt_network = False\n # Check for TFA Version\n try:\n _tfa_version = topo_yaml['topology']['vforward']\n if _tfa_version > 1:\n pS(\"INFO\", \"Leveraging ArFA forwarding agent\")\n else:\n pS(\"INFO\", \"Leveraging default dataplane\")\n except:\n pS(\"INFO\", \"Leveraging default dataplane\")\n\n # Load cEOS nodes specific information\n for _node in nodes:\n try:\n _node_ip = nodes[_node]['ipaddress']\n except KeyError:\n _node_ip = \"\"\n CEOS[_node] = CEOS_NODE(_node, _node_ip, nodes[_node]['mac'], nodes[_node]['neighbors'], _tag, ceos_image)\n # Load Host nodes specific information\n if hosts:\n for _host in hosts:\n _tmp_host = hosts[_host]\n HOSTS[_host] = HOST_NODE(_host, _tmp_host['ipaddress'], _tmp_host['mask'], _tmp_host['gateway'], _tmp_host['neighbors'], _tag, host_image)\n # Check for output directory\n if checkDir(_ceos_script_location):\n pS(\"OK\", \"Directory is present now.\")\n else:\n pS(\"iBerg\", \"Error creating directory.\")\n create_output.append(\"#!/bin/bash\\n\")\n create_output.append(NOTIFY_ADJUST)\n # Check for container images are present\n create_output.append(\"if [ \\\"$(docker image ls | grep ceosimage | grep -c {0})\\\" == 0 ]\\n\".format(ceos_image))\n create_output.append(\"then\\n echo \\\"Docker image not found for ceosimage:{0}, please build it first.\\\"\\n exit\\nfi\\n\".format(ceos_image))\n create_output.append(\"if [ \\\"$(docker image ls | grep chost | grep -c {0})\\\" == 0 ]\\n\".format(host_image))\n create_output.append(\"then\\n echo \\\"Docker image not found for chost:{0}, please build it first.\\\"\\n exit\\nfi\\n\".format(host_image))\n create_output.append(\"sudo ip netns add {0}\\n\".format(_tag))\n startup_output.append(\"#!/bin/bash\\n\")\n startup_output.append(NOTIFY_ADJUST)\n stop_output.append(\"#!/bin/bash\\n\")\n delete_output.append(\"#!/bin/bash\\n\")\n startup_output.append(\"sudo ip netns add {0}\\n\".format(_tag))\n delete_net_output.append(\"sudo ip netns delete {0}\\n\".format(_tag))\n # Get the veths created\n create_output.append(\"# Creating veths\\n\")\n for _veth in VETH_PAIRS:\n _v1, _v2 = _veth.split(\"-\")\n create_output.append(\"sudo ip link add {0} type veth peer name {1}\\n\".format(_v1, _v2))\n startup_output.append(\"sudo ip link add {0} type veth peer name {1}\\n\".format(_v1, _v2))\n delete_output.append(\"sudo ip link delete {0} type veth peer name {1}\\n\".format(_v1, _v2))\n create_output.append(\"#\\n#\\n# Creating anchor containers\\n#\\n\")\n # Create initial cEOS anchor containers\n create_output.append(\"# Checking to make sure topo config directory exists\\n\")\n create_output.append('if ! [ -d \"{0}/{1}\" ]; then mkdir {0}/{1}; fi\\n'.format(CONFIGS, _tag))\n for _node in CEOS:\n # Add in code to perform check in configs directory and create a basis for ceos-config\n create_output.append(\"# Checking for configs directory for each cEOS node\\n\")\n create_output.append('if ! [ -d \"{0}/{1}/{2}\" ]; then mkdir {0}/{1}/{2}; fi\\n'.format(CONFIGS, _tag, _node))\n create_output.append(\"# Creating the ceos-config file.\\n\")\n create_output.append('echo \"SERIALNUMBER={0}\" > {1}/{2}/{3}/ceos-config\\n'.format(CEOS[_node].ceos_name, CONFIGS, _tag, _node))\n create_output.append('echo \"SYSTEMMACADDR={0}\" >> {1}/{2}/{3}/ceos-config\\n'.format(CEOS[_node].system_mac, CONFIGS, _tag, _node))\n if _tfa_version > 1:\n create_output.append('echo \"TFA_VERSION={0}\" >> {1}/{2}/{3}/ceos-config\\n'.format(_tfa_version, CONFIGS, _tag, _node))\n # Perform check to see if a bare startup-config needs to be created\n if create_startup:\n create_output.append(\"# Creating a bare startup configuration for {0}\\n\".format(_node))\n _tmp_startup = []\n _tmp_startup.append(BASE_STARTUP.format(CEOS[_node].ceos_name))\n if mgmt_network:\n _tmp_startup.append(BASE_MGMT.format(CEOS[_node].ip, topo_yaml['infra']['gateway']))\n if 'cvpaddress' and 'cvp-key' in topo_yaml['topology']:\n _tmp_startup.append(BASE_TERMINATTR.format(topo_yaml['topology']['cvpaddress'], topo_yaml['topology']['cvp-key']))\n create_output.append('echo \"{0}\" > {1}/{2}/{3}/startup-config\\n'.format(''.join(_tmp_startup), CONFIGS, _tag, _node))\n # Creating anchor containers\n create_output.append(\"# Getting {0} nodes plumbing\\n\".format(_node))\n create_output.append(\"docker run -d --restart=always --log-opt max-size=10k --name={0}-net --net=none busybox /bin/init\\n\".format(CEOS[_node].ceos_name))\n startup_output.append(\"docker start {0}-net\\n\".format(CEOS[_node].ceos_name))\n create_output.append(\"{0}pid=$(docker inspect --format '{{{{.State.Pid}}}}' {0}-net)\\n\".format(CEOS[_node].ceos_name))\n create_output.append(\"sudo ln -sf /proc/${{{0}pid}}/ns/net /var/run/netns/{0}\\n\".format(CEOS[_node].ceos_name))\n # Stop cEOS containers\n startup_output.append(\"docker stop {0}\\n\".format(CEOS[_node].ceos_name))\n stop_output.append(\"docker stop {0}\\n\".format(CEOS[_node].ceos_name))\n stop_output.append(\"docker stop {0}-net\\n\".format(CEOS[_node].ceos_name))\n delete_output.append(\"docker stop {0}\\n\".format(CEOS[_node].ceos_name))\n delete_output.append(\"docker stop {0}-net\\n\".format(CEOS[_node].ceos_name))\n # Remove cEOS containers\n startup_output.append(\"docker rm {0}\\n\".format(CEOS[_node].ceos_name))\n delete_output.append(\"docker rm {0}\\n\".format(CEOS[_node].ceos_name))\n delete_output.append(\"docker rm {0}-net\\n\".format(CEOS[_node].ceos_name))\n delete_net_output.append(\"sudo rm -rf /var/run/netns/{0}\\n\".format(CEOS[_node].ceos_name))\n startup_output.append(\"{0}pid=$(docker inspect --format '{{{{.State.Pid}}}}' {0}-net)\\n\".format(CEOS[_node].ceos_name))\n startup_output.append(\"sudo ln -sf /proc/${{{0}pid}}/ns/net /var/run/netns/{0}\\n\".format(CEOS[_node].ceos_name))\n create_output.append(\"# Connecting cEOS containers together\\n\")\n # Output veth commands\n for _intf in CEOS[_node].intfs:\n _tmp_intf = CEOS[_node].intfs[_intf]\n if _node in _tmp_intf['veth'].split('-')[0]:\n create_output.append(\"sudo ip link set {0} netns {1} name {2} up\\n\".format(_tmp_intf['veth'].split('-')[0], CEOS[_node].ceos_name, _tmp_intf['port']))\n startup_output.append(\"sudo ip link set {0} netns {1} name {2} up\\n\".format(_tmp_intf['veth'].split('-')[0], CEOS[_node].ceos_name, _tmp_intf['port']))\n else:\n create_output.append(\"sudo ip link set {0} netns {1} name {2} up\\n\".format(_tmp_intf['veth'].split('-')[1], CEOS[_node].ceos_name, _tmp_intf['port']))\n startup_output.append(\"sudo ip link set {0} netns {1} name {2} up\\n\".format(_tmp_intf['veth'].split('-')[1], CEOS[_node].ceos_name, _tmp_intf['port']))\n # Perform check if mgmt network is available\n if mgmt_network:\n # Get MGMT VETHS\n create_output.append(\"sudo ip link add {0}-eth0 type veth peer name {0}-mgmt\\n\".format(CEOS[_node].ceos_name))\n create_output.append(\"sudo brctl addif {0} {1}-mgmt\\n\".format(mgmt_network, CEOS[_node].ceos_name))\n create_output.append(\"sudo ip link set {0}-eth0 netns {0} name eth0 up\\n\".format(CEOS[_node].ceos_name))\n create_output.append(\"sudo ip link set {0}-mgmt up\\n\".format(CEOS[_node].ceos_name))\n create_output.append(\"sleep 1\\n\")\n create_output.append(\"docker run -d --name={0} --log-opt max-size=1m --net=container:{0}-net --ip {1} --privileged -v /etc/sysctl.d/99-zceos.conf:/etc/sysctl.d/99-zceos.conf:ro -v {2}/{4}/{5}:/mnt/flash:Z -e INTFTYPE=et -e MGMT_INTF=eth0 -e ETBA=1 -e SKIP_ZEROTOUCH_BARRIER_IN_SYSDBINIT=1 -e CEOS=1 -e EOS_PLATFORM=ceoslab -e container=docker -i -t ceosimage:{3} /sbin/init systemd.setenv=INTFTYPE=et systemd.setenv=MGMT_INTF=eth0 systemd.setenv=ETBA=1 systemd.setenv=SKIP_ZEROTOUCH_BARRIER_IN_SYSDBINIT=1 systemd.setenv=CEOS=1 systemd.setenv=EOS_PLATFORM=ceoslab systemd.setenv=container=docker\\n\".format(CEOS[_node].ceos_name, CEOS[_node].ip, CONFIGS, CEOS[_node].image, _tag, _node))\n startup_output.append(\"sudo ip link add {0}-eth0 type veth peer name {0}-mgmt\\n\".format(CEOS[_node].ceos_name))\n startup_output.append(\"sudo brctl addif {0} {1}-mgmt\\n\".format(mgmt_network, CEOS[_node].ceos_name))\n startup_output.append(\"sudo ip link set {0}-eth0 netns {0} name eth0 up\\n\".format(CEOS[_node].ceos_name))\n startup_output.append(\"sudo ip link set {0}-mgmt up\\n\".format(CEOS[_node].ceos_name))\n startup_output.append(\"sleep 1\\n\")\n startup_output.append(\"docker run -d --name={0} --log-opt max-size=1m --net=container:{0}-net --ip {1} --privileged -v /etc/sysctl.d/99-zceos.conf:/etc/sysctl.d/99-zceos.conf:ro -v {2}/{4}/{5}:/mnt/flash:Z -e INTFTYPE=et -e MGMT_INTF=eth0 -e ETBA=1 -e SKIP_ZEROTOUCH_BARRIER_IN_SYSDBINIT=1 -e CEOS=1 -e EOS_PLATFORM=ceoslab -e container=docker -i -t ceosimage:{3} /sbin/init systemd.setenv=INTFTYPE=et systemd.setenv=MGMT_INTF=eth0 systemd.setenv=ETBA=1 systemd.setenv=SKIP_ZEROTOUCH_BARRIER_IN_SYSDBINIT=1 systemd.setenv=CEOS=1 systemd.setenv=EOS_PLATFORM=ceoslab systemd.setenv=container=docker\\n\".format(CEOS[_node].ceos_name, CEOS[_node].ip, CONFIGS, CEOS[_node].image, _tag, _node))\n else:\n create_output.append(\"sleep 1\\n\")\n create_output.append(\"docker run -d --name={0} --log-opt max-size=1m --net=container:{0}-net --privileged -v /etc/sysctl.d/99-zceos.conf:/etc/sysctl.d/99-zceos.conf:ro -v {1}/{3}/{4}:/mnt/flash:Z -e INTFTYPE=et -e MGMT_INTF=eth0 -e ETBA=1 -e SKIP_ZEROTOUCH_BARRIER_IN_SYSDBINIT=1 -e CEOS=1 -e EOS_PLATFORM=ceoslab -e container=docker -i -t ceosimage:{2} /sbin/init systemd.setenv=INTFTYPE=et systemd.setenv=MGMT_INTF=eth0 systemd.setenv=ETBA=1 systemd.setenv=SKIP_ZEROTOUCH_BARRIER_IN_SYSDBINIT=1 systemd.setenv=CEOS=1 systemd.setenv=EOS_PLATFORM=ceoslab systemd.setenv=container=docker\\n\".format(CEOS[_node].ceos_name, CONFIGS, CEOS[_node].image, _tag, _node))\n startup_output.append(\"sleep 1\\n\")\n startup_output.append(\"docker run -d --name={0} --log-opt max-size=1m --net=container:{0}-net --privileged -v /etc/sysctl.d/99-zceos.conf:/etc/sysctl.d/99-zceos.conf:ro -v {1}/{3}/{4}:/mnt/flash:Z -e INTFTYPE=et -e MGMT_INTF=eth0 -e ETBA=1 -e SKIP_ZEROTOUCH_BARRIER_IN_SYSDBINIT=1 -e CEOS=1 -e EOS_PLATFORM=ceoslab -e container=docker -i -t ceosimage:{2} /sbin/init systemd.setenv=INTFTYPE=et systemd.setenv=MGMT_INTF=eth0 systemd.setenv=ETBA=1 systemd.setenv=SKIP_ZEROTOUCH_BARRIER_IN_SYSDBINIT=1 systemd.setenv=CEOS=1 systemd.setenv=EOS_PLATFORM=ceoslab systemd.setenv=container=docker\\n\".format(CEOS[_node].ceos_name, CONFIGS, CEOS[_node].image, _tag, _node))\n # Create initial host anchor containers\n for _host in HOSTS:\n create_output.append(\"# Getting {0} nodes plumbing\\n\".format(_host))\n create_output.append(\"docker run -d --restart=always --log-opt max-size=10k --name={0}-net --net=none busybox /bin/init\\n\".format(HOSTS[_host].c_name))\n startup_output.append(\"docker start {0}-net\\n\".format(HOSTS[_host].c_name))\n create_output.append(\"{0}pid=$(docker inspect --format '{{{{.State.Pid}}}}' {0}-net)\\n\".format(HOSTS[_host].c_name))\n create_output.append(\"sudo ln -sf /proc/${{{0}pid}}/ns/net /var/run/netns/{0}\\n\".format(HOSTS[_host].c_name))\n # Stop host containers\n startup_output.append(\"docker stop {0}\\n\".format(HOSTS[_host].c_name))\n stop_output.append(\"docker stop {0}\\n\".format(HOSTS[_host].c_name))\n stop_output.append(\"docker stop {0}-net\\n\".format(HOSTS[_host].c_name))\n delete_output.append(\"docker stop {0}\\n\".format(HOSTS[_host].c_name))\n delete_output.append(\"docker stop {0}-net\\n\".format(HOSTS[_host].c_name))\n # Remove host containers\n startup_output.append(\"docker rm {0}\\n\".format(HOSTS[_host].c_name))\n delete_output.append(\"docker rm {0}\\n\".format(HOSTS[_host].c_name))\n delete_output.append(\"docker rm {0}-net\\n\".format(HOSTS[_host].c_name))\n startup_output.append(\"{0}pid=$(docker inspect --format '{{{{.State.Pid}}}}' {0}-net)\\n\".format(HOSTS[_host].c_name))\n startup_output.append(\"sudo ln -sf /proc/${{{0}pid}}/ns/net /var/run/netns/{0}\\n\".format(HOSTS[_host].c_name))\n create_output.append(\"# Connecting host containers together\\n\")\n # Output veth commands\n for _intf in HOSTS[_host].intfs:\n _tmp_intf = HOSTS[_host].intfs[_intf]\n if _host in _tmp_intf['veth'].split('-')[0]:\n create_output.append(\"sudo ip link set {0} netns {1} name {2} up\\n\".format(_tmp_intf['veth'].split('-')[0], HOSTS[_host].c_name, _tmp_intf['port']))\n startup_output.append(\"sudo ip link set {0} netns {1} name {2} up\\n\".format(_tmp_intf['veth'].split('-')[0], HOSTS[_host].c_name, _tmp_intf['port']))\n else:\n create_output.append(\"sudo ip link set {0} netns {1} name {2} up\\n\".format(_tmp_intf['veth'].split('-')[1], HOSTS[_host].c_name, _tmp_intf['port']))\n startup_output.append(\"sudo ip link set {0} netns {1} name {2} up\\n\".format(_tmp_intf['veth'].split('-')[1], HOSTS[_host].c_name, _tmp_intf['port']))\n create_output.append(\"sleep 1\\n\")\n create_output.append(\"docker run -d --name={0} --privileged --log-opt max-size=1m --net=container:{0}-net -e HOSTNAME={0} -e HOST_IP={1} -e HOST_MASK={3} -e HOST_GW={4} chost:{2} ipnet\\n\".format(HOSTS[_host].c_name, HOSTS[_host].ip, HOSTS[_host].image, HOSTS[_host].mask, HOSTS[_host].gw))\n startup_output.append(\"sleep 1\\n\")\n startup_output.append(\"docker run -d --name={0} --privileged --log-opt max-size=1m --net=container:{0}-net -e HOSTNAME={0} -e HOST_IP={1} -e HOST_MASK={3} -e HOST_GW={4} chost:{2} ipnet\\n\".format(HOSTS[_host].c_name, HOSTS[_host].ip, HOSTS[_host].image, HOSTS[_host].mask, HOSTS[_host].gw))\n # Check for iPerf3 commands\n if topo_yaml['iperf']:\n _iperf = topo_yaml['iperf']\n _port = _iperf['port']\n _brate = _iperf['brate']\n for _server in _iperf['servers']:\n create_output.append(\"docker exec -d {0} iperf3 -s -p {1}\\n\".format(HOSTS[_server].c_name, _port))\n startup_output.append(\"docker exec -d {0} iperf3 -s -p {1}\\n\".format(HOSTS[_server].c_name, _port))\n for _client in _iperf['clients']:\n _target = topo_yaml['hosts'][_client['target']]['ipaddress']\n create_output.append(\"docker exec -d {0} iperf3client {1} {2} {3}\\n\".format(HOSTS[_client['client']].c_name, _target, _port, _brate))\n startup_output.append(\"docker exec -d {0} iperf3client {1} {2} {3}\\n\".format(HOSTS[_client['client']].c_name, _target, _port, _brate))\n # Create the initial deployment files\n with open(CEOS_SCRIPTS + '/{0}/Create.sh'.format(_tag), 'w') as cout:\n for _create in create_output:\n cout.write(_create)\n with open(CEOS_SCRIPTS + '/{0}/Startup.sh'.format(_tag), 'w') as cout:\n for _start in startup_output:\n cout.write(_start)\n with open(CEOS_SCRIPTS + '/{0}/Stop.sh'.format(_tag), 'w') as cout:\n for _stop in stop_output:\n cout.write(_stop)\n with open(CEOS_SCRIPTS + '/{0}/Delete.sh'.format(_tag), 'w') as cout:\n for _delete in delete_output:\n cout.write(_delete)\n for _delete in delete_net_output:\n cout.write(_delete)", "title": "" }, { "docid": "5e525c13c9e792024a5a418c70244fcc", "score": "0.4641946", "text": "def topology (self):\n self.log.info(\"Call %s function: topology\" % self.LOGGER_NAME)\n # Forward call to main layer class\n topology = self._proceed_API_call('api_sas_get_topology')\n if topology is None:\n self.send_error(404, message=\"Resource info is missing!\")\n return\n # Setup OK status for HTTP response\n self.send_response(200)\n if topology is False:\n self.log.info(\n \"Requested resource has not changed! Respond with cached topology...\")\n if self.virtualizer_format_enabled:\n data = self.server.last_response.xml()\n else:\n data = self.server.last_response.dump()\n else:\n if self.virtualizer_format_enabled:\n self.log.debug(\"Convert internal NFFG to Virtualizer...\")\n converter = NFFGConverter(domain=None, logger=log)\n # Dump to plain text format\n v_topology = converter.dump_to_Virtualizer(nffg=topology)\n # Cache converted data for edit-config patching\n self.log.debug(\"Cache converted topology...\")\n self.server.last_response = v_topology\n # Dump to plain text format\n data = v_topology.xml()\n # Setup HTTP response format\n else:\n self.log.debug(\"Cache converted topology...\")\n self.server.last_response = topology\n data = topology.dump()\n if self.virtualizer_format_enabled:\n self.send_header('Content-Type', 'application/xml')\n else:\n self.send_header('Content-Type', 'application/json')\n self.log.log(VERBOSE, \"Responded topology for 'get-config':\\n%s\" % data)\n # Setup length for HTTP response\n self.send_header('Content-Length', len(data))\n self.end_headers()\n self.log.info(\"Send back topology description...\")\n self.wfile.write(data)\n self.log.debug(\"%s function: get-config ended!\" % self.LOGGER_NAME)", "title": "" }, { "docid": "a856608789fa792161e2084c7a8d3777", "score": "0.46241966", "text": "def maketopo():\n nxpoints=200\n nypoints=200\n xupper=2.e0\n yupper=2.e0\n xlower = -2.e0\n ylower = -2.e0\n outfile= \"bowl.topotype2\"\n\n topography = Topography(topo_func=topo)\n topography.x = linspace(xlower,xupper,nxpoints)\n topography.y = linspace(ylower,yupper,nypoints)\n topography.write(outfile, topo_type=2, Z_format=\"%22.15e\")", "title": "" }, { "docid": "a0c30869cfa1d840294e99235270f209", "score": "0.4614379", "text": "def _populate_bin(self, mount_point):\n for script_filename, tensor_to_tensors in [('inputs', self._create_tensor_to_inputs),\n ('outputs', self._create_tensor_to_outputs)]:\n script = _read_template_script('print_tensor_dependencies.py',\n MOUNT_POINT_PLACEHOLDER=\"'{}/'\".format(mount_point),\n TENSOR_TO_TENSORS_PLACEHOLDER=str(tensor_to_tensors()))\n self._bin_scripts['/bin/' + script_filename] = script\n self._files['/bin/' + script_filename] = _create_script_file(script)", "title": "" }, { "docid": "c1f240d5fc2079d31d48d2bae9e65276", "score": "0.46109778", "text": "def generate_topology(self, name='topol.top', xlinked=False, restrained=False):\n\n if restrained:\n r = restrain.RestrainedTopology(self.gro_name, self.restraint_residue, self.build_restraints, com=False,\n xlink=xlinked, vparams=None)\n r.add_position_restraints('xyz', self.restraints)\n r.write_topology()\n\n self.topology = SystemTopology(self.gro_name, xlink=xlinked, restraints=[self.restraint_residue])\n self.topology.write_top(name=name)", "title": "" }, { "docid": "0b1a402ee04ff032ed1ac469bfad8db8", "score": "0.4560311", "text": "def make_object(self, data):\n return OperationScript(**data)", "title": "" }, { "docid": "0b1a402ee04ff032ed1ac469bfad8db8", "score": "0.4560311", "text": "def make_object(self, data):\n return OperationScript(**data)", "title": "" }, { "docid": "0b1a402ee04ff032ed1ac469bfad8db8", "score": "0.4560311", "text": "def make_object(self, data):\n return OperationScript(**data)", "title": "" }, { "docid": "918281b18af420ed221fcbab10221621", "score": "0.45487487", "text": "def configure_junosphere(self):\n LOG.debug(\"Configuring Junosphere\") \n vmm_template = lookup.get_template(\"junos/topology_vmm.mako\")\n topology_data = {}\n # Generator for private0, private1, etc\n collision_to_bridge_mapping = {}\n private_bridges = []\n junosphere_predefined_bridge_count = 124 # have to explicitly create bridges past 124\n\n image_tuple = namedtuple('image', \"alias, basedisk\")\n\n if self.junosphere_olive:\n image = image_tuple(\"MY_DISK\", config.settings['Junosphere']['basedisk'])\n else:\n image = image_tuple(\"VJX1000_LATEST\", None)\n\n\n bridge_id_generator = (i for i in itertools.count(0))\n def next_bridge_id():\n bridge_id = bridge_id_generator.next()\n retval = \"private%s\" % bridge_id\n if bridge_id > junosphere_predefined_bridge_count:\n private_bridges.append(retval)\n return retval\n\n for device in sorted(self.network.devices(), key = lambda x: x.fqdn):\n hostname = device.hostname\n topology_data[hostname] = {\n 'image': image.alias,\n 'config': router_conf_file(self.network, device),\n 'interfaces': [],\n }\n for src, dst, data in sorted(self.network.graph.edges(device, data=True), key = lambda (s,t,d): t.fqdn):\n subnet = data['sn']\n description = 'Interface %s -> %s' % (\n ank.fqdn(self.network, src), \n ank.fqdn(self.network, dst))\n# Bridge information for topology config\n if subnet in collision_to_bridge_mapping:\n# Use bridge allocated for this subnet\n bridge_id = collision_to_bridge_mapping[subnet]\n else:\n# Allocate a bridge for this subnet\n bridge_id = next_bridge_id()\n collision_to_bridge_mapping[subnet] = bridge_id\n\n if not self.junosphere_olive:\n description += \"(%s)\" % self.int_id(data['id']) \n\n topology_data[hostname]['interfaces'].append({\n 'description': description,\n 'id': self.int_id_em(data['id']),\n 'id_ge': self.int_id(data['id']),\n 'bridge_id': bridge_id,\n })\n\n if self.junosphere_olive:\n# em2 is dead on Olive Junosphere platform\n topology_data[hostname]['interfaces'].append({\n 'description': \"dead interface\",\n 'id': \"em2\",\n 'bridge_id': \"dead\",\n })\n \n vmm_file = os.path.join(lab_dir(), \"topology.vmm\")\n with open( vmm_file, 'wb') as f_vmm:\n f_vmm.write( vmm_template.render(\n topology_data = topology_data,\n private_bridges = private_bridges,\n image = image,\n olive_based = self.junosphere_olive,\n ))", "title": "" }, { "docid": "80f381beed0799261fc2b6bc390b1329", "score": "0.4512595", "text": "def Run(args):\n\n args = args[list(args.keys())[0]]\n ###print args #debug\n \n\n oobj = Syntax([\n Template(\"YVARS\", subc=\"\", ktype=\"existingvarlist\", var=\"yvars\", islist=True),\n Template(\"XVARS\", subc=\"\", ktype=\"existingvarlist\", var=\"xvars\", islist=True),\n Template(\"COLOR\", subc=\"\", ktype=\"existingvarlist\", var=\"color\"),\n Template(\"SIZE\", subc=\"\", ktype=\"existingvarlist\", var=\"size\"),\n Template(\"SHAPE\", subc=\"\", ktype=\"existingvarlist\", var=\"shape\"),\n Template(\"LABEL\", subc=\"\", ktype=\"existingvarlist\", var=\"label\"),\n Template(\"LINEAR\", subc=\"FITLINES\", ktype=\"bool\", var=\"linear\"),\n Template(\"QUADRATIC\", subc=\"FITLINES\", ktype=\"bool\", var=\"quadratic\"),\n Template(\"CUBIC\", subc=\"FITLINES\", ktype=\"bool\", var=\"cubic\"),\n Template(\"LOESS\", subc=\"FITLINES\", ktype=\"bool\", var=\"loess\"),\n Template(\"IGNORE\", subc=\"FITLINES\", ktype=\"str\", var=\"ignore\"),\n Template(\"APPLYTO\", subc=\"FITLINES\", ktype=\"str\", var=\"applyfitto\", vallist=[\"total\", \"group\"]),\n Template(\"CATEGORICAL\", subc=\"OPTIONS\", ktype=\"str\", var=\"categorical\", vallist=[\"bars\", \"lines\", \"boxplot\"]),\n Template(\"GROUP\", subc=\"OPTIONS\", ktype=\"int\", var=\"group\"),\n Template(\"BOXPLOTS\", subc=\"OPTIONS\", ktype=\"bool\", var=\"boxplots\"),\n Template(\"HEXBIN\", subc=\"OPTIONS\", ktype=\"bool\", var=\"hexbin\"),\n Template(\"TITLE\", subc=\"OPTIONS\", ktype=\"literal\", var=\"title\"),\n Template(\"INDENT\", subc=\"OPTIONS\", ktype=\"int\", var=\"indent\", vallist=[0, 50]),\n Template(\"YSCALE\", subc=\"OPTIONS\", ktype=\"int\", var=\"yscale\", vallist=[50, 100]),\n Template(\"PAGEX\", subc=\"OPTIONS\", ktype=\"float\", var=\"pagex\", vallist=[1]),\n Template(\"PAGEY\", subc=\"OPTIONS\", ktype=\"float\", var=\"pagey\", vallist=[1]),\n Template(\"HELP\", subc=\"\", ktype=\"bool\")])\n \n # ensure localization function is defined\n global _\n try:\n _(\"---\")\n except:\n def _(msg):\n return msg\n\n # A HELP subcommand overrides all else\n if \"HELP\" in args:\n #print helptext\n helper()\n else:\n processcmd(oobj, args, plots, vardict=spssaux.VariableDict())", "title": "" }, { "docid": "ca9d9c073a8aa850c9b942946fbe7096", "score": "0.4506192", "text": "def get_score_tcl(coord_path, struct_path):\n tcl = \"\"\"coordinates %s\nstructure %s\nparaTypeCharmm on\nparameters %s\ntemperature 300.0\ngbis on\nalphaCutoff 15.0\nionConcentration 1.0\nGBISDelta 0.8\nGBISBeta 0.0\nGBISGamma 2.90912\nexclude scaled1-4\n1-4scaling 1.0\ncutoff 16.0\nswitching off\npairlistdist 24\nPME no\nfixedAtoms on\noutputName not_used\nbinaryoutput no\nrun 0\n\"\"\" % (coord_path, struct_path, FORCE_FIELD_PATH)\n return tcl", "title": "" }, { "docid": "26fd0556101ec62f6e3aec42499326f3", "score": "0.4505297", "text": "def setup_host(bridge='br0', int_port='p0', int_ip=None, ip='', \n contr_addr=None, connections=None):\n print \"Setting up ovs for {}\".format(ip)\n #read the template file\n if contr_addr is None:\n template_name = 'config_ovs_standalone.template.sh'\n else:\n template_name = 'config_ovs.template.sh'\n with open(template_name) as template_file:\n template = Template(template_file.read())\n\n #instantiate the template\n #create ovs-setup script for this host\n script_text = template.render(bridge=bridge, contr_addr=contr_addr, \n int_port=int_port, int_ip=int_ip, connections=connections)\n \n #write the script to an actual file\n script_name = \"config_ovs.sh\"\n with open(script_name, \"w\") as script:\n script.write(script_text) \n\n #call ansible\n results = ansible_wrapper.playbook(playbook='ovs_config_play.yaml', hosts=[ip])\n #print results\n print results", "title": "" }, { "docid": "b59486e672066b07fe47188f590e50d9", "score": "0.44782212", "text": "def __call__(self, taskObject):\n if taskObject['Type'] != \"StageOut\":\n return\n # //\n # // Lists of pre stage out and post stage out shell commands\n #// for setting up stage out env\n # //Should be bash shell commands as strings\n taskObject['PreStageOutCommands'] = []\n taskObject['PostStageOutCommands'] = []\n\n # //\n # // Determine what is being staged out from the parent node\n #// (This should be a CMSSW node)\n parent = taskObject.parent\n if parent == None:\n # no parent => dont know what to stage out\n return\n\n if parent['Type'] not in (\"CMSSW\", \"SVSuite\"):\n # parent isnt a CMSSW node, dont know what it does...\n return\n stageOutFor = parent['Name']\n taskObject['StageOutFor'] = taskObject['PayloadNode'].configuration\n return", "title": "" }, { "docid": "b080d63eaec2a94da81577af536fd5ad", "score": "0.44656765", "text": "def _get_master_script_template(self):\n \n commands=\"\"\"#!/bin/bash\n\nfunction go {\n hostname\n\n ls /opt/astro/SL53/bin/setup.astro.sh\n\n gmvers=\"%(version)s\"\n module unload gmix_image && module load gmix_image/work\n module unload psfex && module load psfex/work\n module unload meds && module load meds/work\n module unload gmix_meds && module load gmix_meds/$gmvers\n\n\n confname=\"%(config)s.yaml\"\n conf=\"$GMIX_MEDS_DIR/share/config/$confname\"\n\n python -u $GMIX_MEDS_DIR/bin/gmix-fit-meds \\\\\n --obj-range $start,$end \\\\\n --work-dir $tmpdir \\\\\n $conf $meds_file $out_file\n \n exit_status=$?\n\n}\n\n#nsetup_ess\nsource ~/.bashrc\n\nif [ $# -lt 5 ]; then\n echo \"error: meds_file start end out_file\"\n exit 1\nfi\n\n# this can be a list\nmeds_file=\"$1\"\nstart=\"$2\"\nend=\"$3\"\nout_file=\"$4\"\nlog_file=\"$5\"\n\nif [[ -n $_CONDOR_SCRATCH_DIR ]]; then\n tmpdir=$_CONDOR_SCRATCH_DIR\nelse\n tmpdir=$TMPDIR\nfi\n\noutdir=$(dirname $out_file)\nmkdir -p $outdir\n\nlbase=$(basename $log_file)\ntmplog=\"$tmpdir/$lbase\"\n\ngo &> \"$tmplog\"\ncp \"$tmplog\" \"$log_file\"\n\nexit $exit_status\\n\"\"\"\n\n return commands", "title": "" }, { "docid": "48d76382f1dc72fa140deeb279e9eeea", "score": "0.44400838", "text": "def cmd_from_template(node, vat_template_file, json_param=True, **vat_args):\n with VatTerminal(node, json_param=json_param) as vat:\n return vat.vat_terminal_exec_cmd_from_template(\n vat_template_file, **vat_args\n )", "title": "" }, { "docid": "387b444e71bb225d1039d41c2da20704", "score": "0.4438441", "text": "def write_tcl_one(options, A, tag):\n with open(\"POSCAR_A.%s\" % tag, \"w\") as f: pcwrite.poscar(A, f, vasp5=True)\n write_xyz(options, A, \"A.%s\" % tag,options.output_tiles)\n\n fout = file(\"struct.%s.tcl\" % tag, \"w\")\n center = False\n write_struct(fout, A, \"A.%s.xyz\" % tag, 0, center)\n fout.close()", "title": "" }, { "docid": "bbefe8e0b5e2c5b9ab19957df3d5d2e8", "score": "0.44263136", "text": "def acStartAutoscript():\n\n dctData = ImmutableMultiDict(request.values)\n dctData = ImmutableMultiDict(dctData).to_dict()\n acConProtocol = dctData['protocol'] \n acConIp = dctData['conIp']\n acConPort = dctData['conPort']\n\n try:\n objAutoScriptObject = clsMessageObjects()\n for acKeyName, objName in _Protocol_class.G_dctAutoScriptOnlyZMQ_Objects.items():\n objAutoScriptObject.vUnbindPubSoc(objName, acKeyName)\n\n except Exception as E:\n logging.error(\"can not unbind from this IP, error -> %s\", E)\n\n acAutoScriptingPath = os.path.join(os.path.dirname(__file__), \"./scriptingFiles/\")\n acAutoScriptFiles = os.listdir(acAutoScriptingPath)\n for iIndex, acItem in enumerate(acAutoScriptFiles):\n if acItem[-5:] == \".json\":\n acScriptFileName = acItem[:-5]\n try:\n with open(os.path.join(os.path.dirname(__file__), \"./scriptingFiles/\" + acItem), 'r') as f:\n dctFileData = json.load(f)\n\n \n for messageKey, messageContent in dctFileData.items():\n if acConProtocol == \"MQTT\":\n dctFileData[messageKey][\"Connection\"].update({\"Protocol\": acConProtocol})\n dctFileData[messageKey].update({\"Individual_Message_Protocol\": acConProtocol})\n dctFileData[messageKey][\"Connection\"].update({\"MQTTRemoteIP\": acConIp})\n dctFileData[messageKey][\"Connection\"].update({\"MQTTPort\": acConPort})\n elif acConProtocol == \"ZMQ\":\n dctFileData[messageKey][\"Connection\"].update({\"Protocol\": acConProtocol})\n dctFileData[messageKey].update({\"Individual_Message_Protocol\": acConProtocol})\n dctFileData[messageKey][\"Connection\"].update({\"ZeroMQRemoteIPPub\": acConIp})\n dctFileData[messageKey][\"Connection\"].update({\"ZeroMQPortPub\": acConPort})\n elif acConProtocol == \"previous\":\n pass\n\n _Protocol_class.G_dctJsonAutoScriptFile.update({acScriptFileName: dctFileData})\n f.close()\n\n except Exception as E:\n logging.error(\"unable to write to file, error -> %s\", E)\n return 'error'\n\n try:\n with open(os.path.join(os.path.dirname(__file__),\"./scriptingFiles/\"+acScriptFileName+\".json\"), 'w') as f:\n f.write(json.dumps(_Protocol_class.G_dctJsonAutoScriptFile[acScriptFileName]))\n f.close()\n except Exception as E:\n logging.error(\"unable to write to file, error -> %s\", E)\n return 'error'\n\n objNewAutoScriptObject = clsMessageObjects()\n acConnectionMessage = objNewAutoScriptObject.vMakeConnection()\n\n if acConnectionMessage == \"error\":\n return \"error\"\n return 'Auto Script started'", "title": "" }, { "docid": "aca718bc40db834783a24d1c632e52c5", "score": "0.44044036", "text": "def test_write_xml_from_topology(self):\n pass", "title": "" }, { "docid": "b93ccd2255198742a3d087a721caa757", "score": "0.44002232", "text": "def create_simulation_script(case_name, n_tasks, tasks_per_node, threads_per_core, partition, modules, scripts):\r\n\r\n header = create_script_header(n_tasks, tasks_per_node, threads_per_core, partition, \"{0}_sim\".format(case_name))\r\n\r\n module_string = create_script_modules(modules, scripts)\r\n\r\n if n_tasks > 1:\r\n commands = \"\"\"decomposePar\r\nmpirun -np {0} simpleFoam -parallel | tee simpleFoam.log\r\nreconstructPar\r\nrm -rf processor*\\n\"\"\".format(n_tasks)\r\n else:\r\n commands = \"simpleFoam | tee simpleFoam.log\\n\"\r\n\r\n commands += \"foamToVTK -latestTime -ascii\\n\"\r\n\r\n script = open(\"runSimulations.sh\", \"w\")\r\n script.write(header)\r\n script.write(module_string)\r\n script.write(commands)\r\n script.close()", "title": "" }, { "docid": "b81fab7e7f133435c3c3617646de9b7d", "score": "0.43991858", "text": "def __call__(self, taskObject):\n if taskObject['Type'] != \"StageOut\":\n return\n\n\n # //\n # // Pre and Post Stage out commands\n #//\n precomms = taskObject.get(\"PreStageOutCommands\", [])\n postcomms = taskObject.get(\"PostStageOutCommands\", [])\n\n\n # //\n # // Install the main script\n #//\n srcfile = inspect.getsourcefile(NewRuntimeStageOut)\n if not os.access(srcfile, os.X_OK):\n os.system(\"chmod +x %s\" % srcfile)\n taskObject.attachFile(srcfile)\n\n # //\n # // Failure script\n #//\n fsrcfile = inspect.getsourcefile(RuntimeStageOutFailure)\n if not os.access(fsrcfile, os.X_OK):\n os.system(\"chmod +x %s\" % fsrcfile)\n taskObject.attachFile(fsrcfile)\n\n exeScript = taskObject[taskObject['Executable']]\n\n envScript = taskObject[taskObject[\"BashEnvironment\"]]\n envCommand = \"%s %s\" % (envScript.interpreter, envScript.name)\n exeScript.append(envCommand)\n\n for precomm in precomms:\n exeScript.append(str(precomm))\n exeScript.append(\"chmod +x ./RuntimeStageOut.py\")\n exeScript.append(\"./RuntimeStageOut.py\")\n exeScript.append(_StageOutFailureScript )\n for postcomm in postcomms:\n exeScript.append(str(postcomm))\n\n\n\n # //\n # // Insert End Control Point check on exit status\n #//\n controlP = taskObject['ShREEKTask'].endControlPoint\n exitCheck = CheckExitCode()\n exitCheck.attrs['OnFail'] = \"skipToLog\"\n exitAction = SetNextTask(\"skipToLog\")\n exitAction.content = \"logArchive\"\n controlP.addConditional(exitCheck)\n controlP.addAction(exitAction)\n\n return", "title": "" }, { "docid": "3f254dcb606cff1eff9aeb30cb64062b", "score": "0.43946475", "text": "def transformation_results_yaml(topology,ignore=('addressing','defaults','nodes_map','includes')):\n for k in ignore:\n del topology[k]\n\n\n \"\"\"\n If we're using a dictionary extension that has to_yaml method use that,\n otherwise use pyyaml (hoping it won't generate extraneous attributes)\n \"\"\"\n if callable(getattr(topology,\"to_yaml\",None)):\n return topology.to_yaml()\n else:\n return yaml.dump(topology)", "title": "" }, { "docid": "ab9e26bce65abfc2b7d5804924974c10", "score": "0.4391522", "text": "def main():\n # TODO(mlegner): Add option specifying already existing keys and certificates\n parser = argparse.ArgumentParser()\n parser.add_argument(\"--joining_ia\",\n help='ISD-AS for which the configuration is generated.')\n parser.add_argument(\"--core_ia\",\n help='Signing Core ISD-AS',\n default='1-1')\n parser.add_argument(\"--core_sign_priv_key_file\",\n help='Signing private key of the core AS',\n default=DEFAULT_CORE_SIG_KEY)\n parser.add_argument(\"--core_cert_file\",\n help='Certificate file of the signing core AS',\n default=DEFAULT_CORE_CERT_FILE)\n parser.add_argument(\"--trc_file\",\n help='Trusted Root Configuration file',\n default=DEFAULT_TRC_FILE)\n parser.add_argument(\"--topo_file\",\n help='Topology file to be used for config generation.')\n parser.add_argument(\"--package_path\",\n help='Path to generate and store AS configurations.',\n default=DEFAULT_PACKAGE_PATH)\n parser.add_argument(\"--user_id\",\n help='User Identifier (email + IA)')\n parser.add_argument(\"--no-prometheus\",\n help='Don\\'t generate prometheus configuration',\n action='store_true',)\n args = parser.parse_args()\n\n def jsonhook(d):\n ret = {}\n for k,v in d.items():\n try:\n k = int(k)\n except:\n pass\n ret[k] = v\n return ret\n\n with open(args.topo_file) as json_data:\n topo_dict = json.load(json_data, object_hook=jsonhook)\n create_scionlab_as_local_gen(args, topo_dict)", "title": "" }, { "docid": "ee930de7751ccaec4231e7f9b9f96dc9", "score": "0.43838167", "text": "def __init__(self, n=1, cpu=.1, bw=10, delay=None,\n max_queue_size=None, **params):\n\n # Initialize topo\n Topo.__init__(self, **params)\n\n # Host and link configuration\n hconfig = {'cpu': cpu}\n lconfig = {'bw': bw, 'delay': delay,\n 'max_queue_size': max_queue_size }\n\n # Create the actual topology\n receiver = self.addHost('receiver')\n\n # Switch ports 1:uplink 2:hostlink 3:downlink\n uplink, hostlink, downlink = 1, 2, 3\n\n # The following template code creates a parking lot topology\n # for N = 1\n # TODO: Replace the template code to create a parking lot topology for any arbitrary N (>= 1)\n # Begin: Template code\n s1 = self.addSwitch('s1')\n h1 = self.addHost('h1', mac='00:00:00:00:00:01', **hconfig)\n switches = [s1]\n\n\n # create Linear topology\n for i in range(1, n):\n switch = self.addSwitch('s' + str(i+1))\n host = self.addHost('h' + str(i+1), mac='00:00:00:00:00:0'+str(i+1), **hconfig)\n self.addLink(switch, switches[i-1], port1=uplink, port2=downlink, loss=20, **lconfig)\n self.addLink(host, switch, port1=0, port2=hostlink, loss=20, **lconfig)\n switches.append(switch)\n\n # for i in range(1, n):\n # switch = self.addSwitch('s' + str(i+1))\n # host = self.addHost('h' + str(i+1), mac='00:00:00:00:00:0'+str(i+1), **hconfig)\n # self.addLink(host, switch, port1=0, port2=hostlink, **lconfig)\n # switches.append(switch)\n\n # for i in range(0, len(switches)):\n # for j in range(i+1, len(switches)):\n # self.addLink(switches[i], switches[j])\n \n\n # Wire up receiver\n self.addLink(receiver, s1,\n port1=0, port2=uplink, **lconfig)\n\n # Wire up clients:\n self.addLink(h1, s1,\n port1=0, port2=hostlink, **lconfig)\n\n # add link between the first switch and last switch to create the ring topology\n # self.addLink(s1, switches[-1], port1=4, port2=downlink, **lconfig)\n\n # Uncomment the next 8 lines to create a N = 3 parking lot topology\n #s2 = self.addSwitch('s2')\n #h2 = self.addHost('h2', **hconfig)\n #self.addLink(s1, s2,\n # port1=downlink, port2=uplink, **lconfig)\n #self.addLink(h2, s2,\n # port1=0, port2=hostlink, **lconfig)\n #s3 = self.addSwitch('s3')\n #h3 = self.addHost('h3', **hconfig)\n #self.addLink(s2, s3,\n # port1=downlink, port2=uplink, **lconfig)\n #self.addLink(h3, s3,\n # port1=0, port2=hostlink, **lconfig)\n\n # End: Template code", "title": "" }, { "docid": "03c8bf6b31a70635e1ee7567503c9881", "score": "0.43728969", "text": "def condor_bash_template():\n bash_template = \"\"\"#!/bin/tcsh\necho \" > Starting CONDOR job at `date` on `hostname`\"\n\n\n## Copy CMSSW tarball from eos, untar it, and remove the tarball\nxrdcp -s root://cmseos.fnal.gov/%(eos_path_to_tarball)s/%(cmsRelease)s.tgz .\ntar -xf %(cmsRelease)s.tgz\nrm %(cmsRelease)s.tgz\n\n## copy transferred data in batch directory (only need 3 files)\nif (! -d %(cmsRelease)s/src/Analysis/CyMiniAna/%(unique_id_batch_path)s/ ) then\n mkdir -p %(cmsRelease)s/src/Analysis/CyMiniAna/%(unique_id_batch_path)s/\nendif\n\nmv cmaConfig.txt %(cmsRelease)s/src/Analysis/CyMiniAna/%(unique_id_batch_path)s/\nmv listOfFiles.txt %(cmsRelease)s/src/Analysis/CyMiniAna/%(unique_id_batch_path)s/\nmv run_condor.sh %(cmsRelease)s/src/Analysis/CyMiniAna/%(unique_id_batch_path)s/\n\n## Setup CMSSW environment\nsetenv SCRAM_ARCH slc6_amd64_gcc530\ncd %(cmsRelease)s/src/\nscramv1 b ProjectRename\neval `scramv1 runtime -csh` # cmsenv is an alias not on the workers\nscram b -j8\n\n\n## Execute CyMiniAna\ncd Analysis/CyMiniAna\n\necho \" > Run the program \"\n%(executable)s %(cfg_filename)s\n\n\n## Move output file to EOS (make new directory if necessary)\necho \" > Finishing executing \"\necho \" > Move output file to EOS\"\n\n# -- Only copy files if they exist\nset outfiles = `ls %(local_output_path)s/`\nif ( \"$outfiles\" != \"\" ) then\n echo \" Output file exists, copy to EOS \"\n xrdcp %(local_output_path)s/*.root %(outputDir)s/%(local_output_path)s\nendif\n\n\n## Cleanup\nrm %(local_output_path)s/*.root # delete output files\ncd ${_CONDOR_SCRATCH_DIR} # delete working directory\nrm -rf %(cmsRelease)s\n\necho \" > Ended at `date` on `hostname`\"\nls -alh\nexit 0\n\"\"\"\n\n return bash_template", "title": "" }, { "docid": "939e928fe224db22921dd4e0c7562b06", "score": "0.43652397", "text": "def handleScriptTaskObject(self, taskObject):\n exeScript = taskObject[taskObject['Executable']]\n jobSpec = taskObject['JobSpecNode']\n exeCommand = jobSpec.application['Executable']\n exeScript.append(exeCommand)\n return", "title": "" }, { "docid": "ab7411ad677c7f55fc496f0e1baa5957", "score": "0.4357492", "text": "def condor_script_template():\n condor_template = \"\"\"\nUniverse = vanilla\nExecutable = %(condorExec)s\n\nShould_Transfer_Files = YES\nWhenToTransferOutput = ON_EXIT\nTransfer_Input_Files = %(unique_id_batch_path)s/cmaConfig.txt,%(unique_id_batch_path)s/listOfFiles.txt,%(unique_id_batch_path)s/run_condor.sh\nnotify_user = ${LOGNAME}@FNAL.GOV\nx509userproxy = $ENV(X509_USER_PROXY)\n\nLog = %(unique_id_batch_path)s/condor_$(Cluster)_$(Process).log\nError\t = %(unique_id_batch_path)s/condor_$(Cluster)_$(Process).error\nOutput = %(unique_id_batch_path)s/condor_$(Cluster)_$(Process).out\n\nQueue 1\n\"\"\"\n\n return condor_template", "title": "" }, { "docid": "00521d4ae95de8a5845136da236e5093", "score": "0.43561456", "text": "def main():\n args = parse_args()\n psyml = PSyml(args.file)\n getattr(psyml, args.command)()", "title": "" }, { "docid": "999147ffcf203b56fc6739d942246b29", "score": "0.43453547", "text": "def parse(self, data):\n graph = networkx.Graph()\n if 'topology' not in data:\n raise ParserError('Parse error, \"topology\" key not found')\n # loop over topology section and create networkx graph\n for link in data[\"topology\"]:\n try:\n source = link[\"lastHopIP\"]\n dest = link[\"destinationIP\"]\n cost = link[\"tcEdgeCost\"]\n except KeyError as e:\n raise ParserError('Parse error, \"%s\" key not found' % e)\n # original olsrd cost (jsoninfo multiplies by 1024)\n cost = float(cost) / 1024.0\n # add link to Graph\n graph.add_edge(source, dest, weight=cost)\n self.graph = graph\n # determine version and revision\n if 'config' in data:\n version_info = data['config']['olsrdVersion'].replace(' ', '').split('-')\n self.version = version_info[1]\n # try to get only the git hash\n if 'hash_' in version_info[-1]:\n version_info[-1] = version_info[-1].split('hash_')[-1]\n self.revision = version_info[-1]", "title": "" }, { "docid": "026fd333e1bd36c6b52b682cac784105", "score": "0.43439808", "text": "def convert(self, input, filename=None):\n if python2:\n if self.encoding and isinstance(input, str):\n input = input.decode(self.encoding)\n self._reset(input, filename)\n buf = []\n self.before_convert(buf)\n self.parse_stmts(buf, input)\n self.after_convert(buf)\n script = ''.join(buf)\n self.script = script\n return script", "title": "" }, { "docid": "54dec25a3217aa4d1d73341d52250ce5", "score": "0.43172985", "text": "def _get_topology_from_file(topology_file):\n topology_dir, mod_name = os.path.split(topology_file)\n # Remove .py extension before trying to import\n mod_name = mod_name[:-3]\n sys.path.append(os.path.join(topology_dir, '..', 'src'))\n sys.path.append(topology_dir)\n mod = importlib.import_module(mod_name)\n for attr in mod.__dict__.values():\n if isinstance(attr, TopologyType) and attr != Topology:\n topology_class = attr\n break\n else:\n raise ValueError('Could not find topology subclass in topology module.')\n return topology_class", "title": "" }, { "docid": "fb14debd4bfe39a60ef458002d3d4f75", "score": "0.43159938", "text": "def osm_prep_schema_task(self, result={}, task_uid=None, stage_dir=None, job_name=None):\n\n self.update_task_state(result=result, task_uid=task_uid)\n osm = os.path.join(stage_dir, '{0}.pbf'.format(job_name))\n gpkg = os.path.join(stage_dir, '{0}.gpkg'.format(job_name))\n osmconf_ini = os.path.join(stage_dir, '{0}.ini'.format(job_name))\n osmparser = osmparse.OSMParser(osm=osm, gpkg=gpkg, osmconf=osmconf_ini, task_uid=task_uid)\n osmparser.create_geopackage()\n osmparser.create_default_schema_gpkg()\n osmparser.update_zindexes()\n result['result'] = gpkg\n result['geopackage'] = gpkg\n return result", "title": "" }, { "docid": "39f5193f8d782a9cc400e0f88802ec45", "score": "0.43153542", "text": "def main():\n parser = argparse.ArgumentParser(prog=\"clustergen\")\n parser.add_argument(\"--clustercfg\",\n \"-c\",\n metavar=\"file\",\n type=argparse.FileType('r'),\n required=True,\n help=\"A cluster configuration file\")\n parser.add_argument(\"--outdir\",\n \"-o\",\n type=pathlib.Path,\n required=True,\n help=\"Target directory.\")\n parser.add_argument(\"--wrapper\",\n action=\"store_true\",\n help=\"Generate Snitch cluster wrapper\")\n parser.add_argument(\"--linker\",\n action=\"store_true\",\n help=\"Generate linker script\")\n parser.add_argument(\"--bootdata\",\n action=\"store_true\",\n help=\"Generate bootdata\")\n parser.add_argument(\"--memories\",\n action=\"store_true\",\n help=\"Generate memories\")\n parser.add_argument(\"--template\",\n metavar=\"template\",\n help=\"Name of the template file\")\n\n args = parser.parse_args()\n\n # Read HJSON description\n with args.clustercfg as file:\n try:\n srcfull = file.read()\n obj = hjson.loads(srcfull, use_decimal=True)\n obj = JsonRef.replace_refs(obj)\n except ValueError:\n raise SystemExit(sys.exc_info()[1])\n\n cluster_tb = SnitchClusterTB(obj)\n\n if not args.outdir.is_dir():\n exit(\"Out directory is not a valid path.\")\n\n outdir = args.outdir\n outdir.mkdir(parents=True, exist_ok=True)\n\n ##############\n # Misc files #\n ##############\n\n if args.wrapper:\n with open(outdir / \"snitch_cluster_wrapper.sv\", \"w\") as f:\n f.write(cluster_tb.render_wrapper())\n\n if args.linker:\n with open(outdir / \"link.ld\", \"w\") as f:\n f.write(cluster_tb.render_linker_script())\n\n if args.bootdata:\n with open(outdir / \"bootdata.cc\", \"w\") as f:\n f.write(cluster_tb.render_bootdata())\n\n if args.memories:\n with open(outdir / \"memories.json\", \"w\") as f:\n f.write(cluster_tb.cluster.memory_cfg())\n\n ####################\n # Generic template #\n ####################\n\n kwargs = {\n \"cfg\": cluster_tb.cfg,\n }\n\n if args.template:\n write_template(args.template, outdir, **kwargs)", "title": "" }, { "docid": "fe7bbba84772305469d9124b8945a4d7", "score": "0.43111256", "text": "def toTopoFile(topo, out, excludeHosts=set()):\n out.write(SECTION_NODES + '\\n')\n for host in topo.nodes(False):\n if host in excludeHosts:\n continue\n out.write('%s: _' % host)\n attributes = topo.nodeInfo(host).get('mnndn', dict())\n for kv in attributes.iteritems():\n out.write(' %s=%s' % kv)\n out.write('\\n')\n\n out.write(SECTION_LINKS + '\\n')\n for (src, dst) in topo.iterLinks():\n if src in excludeHosts or dst in excludeHosts:\n continue\n out.write('%s:%s' % (src, dst))\n attributes = topo.linkInfo(src, dst).get('mnndn', dict())\n for kv in attributes.iteritems():\n out.write(' %s=%s' % kv)\n out.write('\\n')", "title": "" }, { "docid": "f7c0ebbb5a797f27b90fb951a1feb58a", "score": "0.4310493", "text": "def main():\n\n from argparse import ArgumentParser, RawDescriptionHelpFormatter\n from textwrap import dedent\n parser = ArgumentParser(description=dedent(main.__doc__),\n formatter_class=RawDescriptionHelpFormatter)\n parser.add_argument('--version', action='version', version='%(prog)s 1.1')\n parser.add_argument('-a', '--atomtypes', help='The atom type numbers you want converted.'\n ' These should be written in \"# element\" format.', nargs='+')\n parser.add_argument('-n', '--newfile', help='New file name.', nargs='+')\n parser.add_argument('-c', '--convert', help='Convert atom type numbers to element names.',\n action='store_true', default=False)\n parser.add_argument('-t', '--topology', help='Create an MS-EVB topology file from the final '\n 'coordinates of the trajectory.', action='store_true', default=False)\n parser.add_argument('-e', '--evbin', help='The MS-EVB input file containing atom type information.') \n parser.add_argument('-d', '--datafile', help='The data file containing atom ID number information.') \n parser.add_argument('files', help='The files that you want converted.'\n , nargs='+')\n parser.add_argument('-k', '--cp2k', help='Create coordinate blocks for CP2K.',\n action='store_true', default=False)\n parser.add_argument('-s', '--snapshot', help='Chooses the snapshot of the system for CP2K output.',\n default=-1)\n args = parser.parse_args()\n\n # Store command line arguments as convenient variables\n fh = args.files[0]\n table = args.atomtypes\n conv = args.convert\n topology = args.topology\n evbin = args.evbin\n datafile = args.datafile\n cp2k = args.cp2k\n snapshot = int(args.snapshot)\n\n # Check that the user specified either convert or topology. Die here if not.\n if conv is False and topology is False and cp2k is False:\n error = 'Must specify the -c, -t, or -k flag. Use -h flag for information.'\n sys.exit('\\n' + '%'*len(error) + '\\n' + error + '\\n' + '%'*len(error) + '\\n')\n\n # Check if the user is trying to convert the file. Exit here if -c is specified\n # without giving the atom type numbers.\n if conv is True and table is None:\n error = 'Must use -a flag when converting a trajectory file. Use -h flag for information.'\n sys.exit('\\n' + '%'*len(error) + '\\n' + error + '\\n' + '%'*len(error) + '\\n')\n\n # Check if the user is trying to generate a topology file. Exit here if\n # -t is specified without giving the evbin, par4amber and data files.\n if topology is True and (evbin is None or datafile is None):\n error = 'Must use -e and -d flags when generating a topology file. Use -h flag for information.'\n sys.exit('\\n' + '%'*len(error) + '\\n' + error + '\\n' + '%'*len(error) + '\\n')\n \n # Convert the table to a dictionary\n if conv or cp2k:\n convert = {}\n for item in table:\n item = item.split()\n convert[item[0]] = item[1]\n\n # Data blocks\n data = ['ITEM: TIMESTEP',\n 'ITEM: NUMBER OF ATOMS',\n 'ITEM: BOX BOUNDS pp pp pp',\n 'ITEM: ATOMS id type x y z fx fy fz',\n 'ITEM: ATOMS id type x y z ix iy iz',\n 'ITEM: ATOMS id mol type x y z ix iy iz',\n 'ITEM: ATOMS id type q x y z', \n 'ITEM: ATOMS id mol type q x y z']\n\n # Collect all data into memory for data retention\n with open(fh) as fl:\n f = tuple([line.rstrip() for line in fl])\n\n # Locate data blocks\n # Need to find:\n # 1. Timestep value\n # 2. Number of atoms\n # 3. Box bounds\n # 4. Coordinates/forces for each timestep\n timestep = []\n natoms = 0\n box_bounds = []\n coord_and_force = []\n coord_and_index = []\n coord_and_charge = []\n coord_molid_and_index = []\n coord_and_molid = []\n for i,line in enumerate(f):\n # Timestep value\n if 'TIMESTEP' in line:\n timestep.append(int(f[i+1]))\n # Number of atoms\n if natoms == 0:\n if 'NUMBER OF ATOMS' in line:\n natoms = int(f[i+1])\n # Box bounds\n #if box_bounds == []:\n if ('BOX BOUNDS pp pp pp' in line or\n 'BOX BOUNDS ff ff ff' in line or\n 'BOX BOUNDS fm fm fm' in line or\n 'BOX BOUNDS mm mm mm' in line):\n tmp1 = f[i+1].split()\n tmp2 = f[i+2].split()\n tmp3 = f[i+3].split()\n tmp4 = []\n box_bounds.append([ [float(tmp1[0]),float(tmp1[1])],\n [float(tmp2[0]),float(tmp2[1])],\n [float(tmp3[0]),float(tmp3[1])] ])\n # Coordinates and forces\n if ('ITEM: ATOMS id type x y z fx fy fz' in line or\n 'ITEM: ATOMS id type xu yu zu fx fy fz' in line):\n # Temporary list\n tmp = []\n for ix in range(i+1,i+1+natoms):\n ln = f[ix] # Grab the current line\n ln = ln.split() # Split on whitespace\n if conv or cp2k:\n ln[1] = convert[ln[1]] # Use the conversion table to convert the label to element\n else:\n ln[1] = int(ln[1])\n ln[2] = float(ln[2])\n ln[3] = float(ln[3])\n ln[4] = float(ln[4])\n ln[5] = float(ln[5])\n ln[6] = float(ln[6])\n ln[7] = float(ln[7])\n tmp.append(ln) # Add the line to tmp\n coord_and_force.append(tmp) # Make coord_and_force a list of lists.\n # Coordinates and box indices\n if 'ITEM: ATOMS id type x y z ix iy iz' in line:\n tmp = []\n for ix in range(i+1,i+1+natoms):\n ln = f[ix] # Grab the current line\n ln = ln.split() # Split on whitespace\n if conv or cp2k:\n ln[1] = convert[ln[1]] # Use the conversion table to convert the label to element\n else:\n ln[1] = int(ln[1])\n ln[2] = float(ln[2])\n ln[3] = float(ln[3])\n ln[4] = float(ln[4])\n ln[5] = int(ln[5])\n ln[6] = int(ln[6])\n ln[7] = int(ln[7])\n tmp.append(ln) # Add the line to tmp\n coord_and_index.append(tmp) # Make coord_and_force a list of lists.\n # Coordinates, charges\n if 'ITEM: ATOMS id type q x y z' in line:\n tmp = []\n for ix in range(i+1,i+1+natoms):\n ln = f[ix] # Grab the current line\n ln = ln.split() # Split on whitespace\n if conv or cp2k:\n ln[1] = convert[ln[1]] # Use the conversion table to convert the label to element\n else:\n ln[1] = int(ln[1])\n ln[2] = float(ln[2])\n ln[3] = float(ln[3])\n ln[4] = float(ln[4])\n ln[5] = float(ln[5])\n tmp.append(ln) # Add the line to tmp\n coord_and_charge.append(tmp) # Make coord_and_force a list of lists.\n # Coordinates, mol_id and box indices\n if 'ITEM: ATOMS id mol type x y z ix iy iz' in line:\n tmp = []\n for ix in range(i+1,i+1+natoms):\n ln = f[ix] # Grab the current line\n ln = ln.split() # Split on whitespace\n if conv or cp2k:\n ln[2] = convert[ln[2]] # Use the conversion table to convert the label to element\n else:\n ln[2] = int(ln[2])\n ln[3] = float(ln[3])\n ln[4] = float(ln[4])\n ln[5] = float(ln[5])\n ln[6] = int(ln[6])\n ln[7] = int(ln[7])\n ln[8] = int(ln[8])\n tmp.append(ln) # Add the line to tmp\n coord_molid_and_index.append(tmp) # Make coord_molid_and_force a list of lists.\n # Coordinates and mol_id\n if 'ITEM: ATOMS id mol type q x y z' in line:\n tmp = []\n for ix in range(i+1,i+1+natoms):\n ln = f[ix] # Grab the current line\n ln = ln.split() # Split on whitespace\n if conv or cp2k:\n ln[2] = convert[ln[2]] # Use the conversion table to convert the label to element\n else:\n ln[2] = int(ln[2])\n ln[3] = float(ln[3])\n ln[4] = float(ln[4])\n ln[5] = float(ln[5])\n ln[6] = float(ln[6])\n tmp.append(ln) # Add the line to tmp\n coord_and_molid.append(tmp) # Make coord_molid_and_force a list of lists.\n\n # Close the trajectory file and empty the tuple\n f = () \n\n # Convert atom type numbers to element names.\n if conv:\n # Number of timesteps\n ntstep = len(timestep)\n\n # Print data to a new file.\n nfile = []\n if args.newfile is not None:\n for item in args.newfile:\n nfile.append(args.newfile)\n else:\n nfile.append(fh.split('.')[0] + '_new.lammpstrj')\n\n lstyle1 = '{0:>9.5f} {1:>9.5f}'\n lstyle2 = '{0:>4} {1:>4} {2:>9.5f} {3:>9.5f} {4:>9.5f} {5:>9.5f} {6:>9.5f} {7:>9.5f}'\n lstyle3 = '{0:>4} {1:>4} {2:>9.5f} {3:>9.5f} {4:>9.5f} {5:>4} {6:>4} {7:>4}'\n lstyle4 = '{0:>4} {1:>4} {2:>2} {3:>9.5f} {4:>9.5f} {5:>9.5f} {6:>4} {7:>4} {8:>4}'\n lstyle5 = '{0:>4} {1:>4} {2:>5.2f} {3:>9.5f} {4:>9.5f} {5:>9.5f}'\n lstyle6 = '{0:>4} {1:>4} {2:>2} {3:>5.2f} {4:>9.5f} {5:>9.5f} {6:>9.5f}'\n for item in nfile:\n fl = open(item, 'w')\n for ix in range(ntstep):\n print(data[0], file=fl)\n print(timestep[ix], file=fl)\n print(data[1], file=fl)\n print(natoms, file=fl)\n print(data[2], file=fl)\n print(lstyle1.format(box_bounds[ix][0][0], box_bounds[ix][0][1]), file=fl)\n print(lstyle1.format(box_bounds[ix][1][0], box_bounds[ix][1][1]), file=fl)\n print(lstyle1.format(box_bounds[ix][2][0], box_bounds[ix][2][1]), file=fl)\n if coord_and_force != []:\n print(data[3], file=fl)\n for ln in coord_and_force[ix]:\n print(lstyle2.format(ln[0], ln[1], ln[2], ln[3], ln[4], ln[5], ln[6], ln[7]), file=fl)\n elif coord_and_index != []:\n print(data[4], file=fl)\n for ln in coord_and_index[ix]:\n print(lstyle3.format(ln[0], ln[1], ln[2], ln[3], ln[4], ln[5], ln[6], ln[7]), file=fl)\n elif coord_and_charge != []:\n print(data[6], file=fl)\n for ln in coord_and_charge[ix]:\n print(lstyle5.format(ln[0], ln[1], ln[2], ln[3], ln[4], ln[5]), file=fl)\n elif coord_molid_and_index != []:\n print(data[5], file=fl)\n for ln in coord_molid_and_index[ix]:\n print(lstyle4.format(ln[0], ln[1], ln[2], ln[3], ln[4], ln[5], ln[6], ln[7], ln[8]), file=fl)\n elif coord_and_molid != []:\n print(data[7], file=fl)\n for ln in coord_and_molid[ix]:\n print(lstyle6.format(ln[0], ln[1], ln[2], ln[3], ln[4], ln[5], ln[6]), file=fl)\n # Generate a topology file for RAPTOR.\n elif topology:\n # Trajectory data\n traj_data = coord_and_force[-1]\n\n # /\\/\\/\\/\\/\\/\\/\\/\\/\\/\\/\\/\\/\\/\\/\\/\\/\\/\\/\\/\n # Read the input file for the MS-EVB code\n # /\\/\\/\\/\\/\\/\\/\\/\\/\\/\\/\\/\\/\\/\\/\\/\\/\\/\\/\\/\n # This tells us the atom types in RAPTOR related to the LAMMPS atom types\n with open(evbin) as data:\n evb = tuple([line.rstrip() for line in data])\n\n # Read the LAMMPS types and molecule numbers from the MS-EVB input\n lammps_type = {}\n reaction_mol_num = {}\n for i,line in enumerate(evb):\n if 'MODEL' in line:\n s1 = i + 3\n if 'Reaction' in line:\n m1 = i + 1\n if ': atom type' in line:\n s2 = i + 2\n if ': bond type' in line:\n e2 = i - 1\n \n # Store the reaction type(s)\n for data in evb[m1:]:\n if '#define' in data:\n tmp = data.split()\n reaction_mol_num[tmp[1]] = {}\n # Stop the loop if a blank line is encountered\n if data == '':\n break\n \n # Store the molecule names and numbers belonging to a reaction type\n # I assume that the naming convention of reactions has correspondence\n # with the naming of the molecular model\n for i,data in enumerate(evb[s1:m1]):\n # This part is pretty convoluted. At least it works.\n if '#define' in data:\n count = s1 + i + 1\n tmp = data.split()\n # Here we loop through the reaction types we stored above\n for reaction in reaction_mol_num.keys():\n if tmp[1] in reaction.split('_'):\n reaction_mol_num[reaction][tmp[1]] = {}\n nloop = 0\n # Here we look through lines greater and stash the molecule\n # number information. \n for line in evb[count:]:\n mol = line.split()\n if '#define' in mol:\n break\n elif mol == []:\n break\n else:\n reaction_mol_num[reaction][tmp[1]][mol[1]] = int(mol[2])\n\n # Store the LAMMPS types\n for data in evb[s2:e2]:\n if '#define' in data:\n tmp = data.split()\n lammps_type[tmp[1]] = int(tmp[2])\n\n # Clear out the tuple (free memory)\n evb = ()\n\n # /\\/\\/\\/\\/\\/\\/\\/\\/\\/\\/\\/\\/\\/\\/\\/\\/\\/\\/\\/\\/\n # Read the data file from the LAMMPS output\n # /\\/\\/\\/\\/\\/\\/\\/\\/\\/\\/\\/\\/\\/\\/\\/\\/\\/\\/\\/\\/\n # This tells us the atom topology information\n with open(datafile) as data:\n df = tuple([line.rstrip() for line in data])\n\n # Collect the 'Atoms' information from the data file.\n for i,line in enumerate(df):\n if 'Atoms' in line:\n s = i + 2\n elif 'Velocities' in line:\n e = i - 1\n elif 'Bonds' in line:\n s1 = i + 2\n elif 'Angles' in line:\n e1 = i - 1\n\n # Store the atom number and atom type.\n natoms = len(df[s:e])\n atomdata = {}\n for atom in df[s:e]:\n tmp = atom.split()\n atomdata[tmp[0]] = [int(tmp[2]),[]]\n\n # Also store the bond topology. Note that only only the \"central\"\n # atom in the molecule has bond information.\n for atom in df[s1:e1]:\n tmp = atom.split()\n atomdata[tmp[2]][1].append(int(tmp[3])) \n\n # Clear out the tuple (free memory)\n df = ()\n\n # What we have now\n # 1. lammps_type is a dictionary containing the correspondence between \n # RAPTOR atom types and LAMMPS atom ID numbers\n # 2. reaction_mol_num defines the correspondence between reaction names,\n # molecule names, and molecule numbers (\"kernels\").\n # 3. atomdata defines the atom number, its LAMMPS atom ID, and which atoms\n # it is bonded to (if it is a central atom in the molecule).\n # \n # This information can be used to print the bond topology file. The columns\n # in the topology file are:\n # atom number | molecule number | RAPTOR atom type\n\n # Molecule to atom name correspondence\n mol2atom = { 'OW' : 'H2O',\n 'HW' : 'H2O',\n 'OH' : 'H3O', \n 'HO' : 'H3O' }\n\n # Based on the atomtype of the central atom, it is now possible to determine\n # the types of the terminal atoms.\n ltop = [[0,0,0] for row in range(natoms)]\n for key in atomdata.keys():\n ltop[int(key)-1][0] = int(key)\n\n #fi = open('top.tmp', 'w')\n\n # Based on the bonding topology, we can determine the molecule number (\"kernel\").\n for key in atomdata.keys():\n # Identify central atoms. These contain bonding information.\n if atomdata[key][1] != []:\n #print(key, atomdata[key], file=fi)\n # List of bonded partners\n tmp = atomdata[key][1]\n # Type of the central atom\n ctype = atomdata[key][0]\n # Central atom\n # Here, we match ctype with what is contained in lammps_type.\n for key2 in lammps_type.keys():\n if lammps_type[key2] == ctype:\n mol = mol2atom[key2]\n # Subtraction is accounting for Python numbering.\n ltop[int(key)-1][1] = mol\n # Central atom is assumed to always have index 1 for convenience.\n # If this wasn't true, this part would be a lot more complicated.\n ltop[int(key)-1][2] = 1\n # Terminal atoms. Start the counter at 2 since the central atom is 1.\n count = 2\n for item in tmp:\n # Account for Python numbering\n i = item - 1\n # Terminal atom type to be compared with lammps_type\n ttype = atomdata[str(item)][0]\n for key3 in lammps_type.keys():\n if lammps_type[key3] == ttype:\n mol = mol2atom[key3]\n # Temporarily place the molecule name in the list.\n ltop[i][1] = mol\n # Place the counter in the list. This is trivial as\n # long as the terminal atoms are identical. We must\n # be careful in situations where this isn't true.\n ltop[i][2] = count\n count += 1\n\n # Finally we convert molecule strings to molecule numbers\n for i in range(len(ltop)):\n for reaction in reaction_mol_num.keys():\n for key in reaction_mol_num[reaction].keys():\n ltop[i][1] = reaction_mol_num[reaction][key][ltop[i][1]] \n\n # Output data to a file\n stop = 'lammps_msevb.top'\n top = open(stop, 'w')\n \n fmt = '{0:>4} {1:>2} {2:>2}'\n for i in range(len(ltop)):\n print(fmt.format(ltop[i][0], ltop[i][1], ltop[i][2]), file=top)\n elif cp2k:\n # Trajectory data\n traj_data = coord_and_force[snapshot]\n\n # Print out box bounds\n bb = box_bounds[snapshot]\n x = bb[0][1] - bb[0][0]\n y = bb[1][1] - bb[1][0]\n z = bb[2][1] - bb[2][0]\n fmt = ' ABC {0:7.5f} {1:7.5f} {2:7.5f}'\n print('&CELL')\n print(fmt.format(x,y,z))\n print('&END CELL')\n print('#')\n print('&COORD')\n lnstyle = ' {0:2} {1:10.5f} {2:10.5f} {3:10.5f}'\n for ln in traj_data:\n print(lnstyle.format(ln[1],ln[2],ln[3],ln[4]))\n print('&END COORD')", "title": "" }, { "docid": "b8477e9d7f1edbb350c4aa3147a9da9c", "score": "0.4306115", "text": "def create_subtop(self, label_type, label):\n from gmso.parameterization.molecule_utils import (\n molecule_angles,\n molecule_bonds,\n molecule_dihedrals,\n molecule_impropers,\n )\n\n of_group = True if label_type == \"group\" else False\n sites_dict = {\n site: (idx, site.clone())\n for idx, site in enumerate(self.iter_sites(label_type, label))\n }\n bonds_dict = {\n bond: tuple(\n sites_dict[bond.connection_members[i]][0] for i in range(2)\n )\n for bond in molecule_bonds(self, label, of_group)\n }\n\n angles_dict = {\n angle: tuple(\n sites_dict[angle.connection_members[i]][0] for i in range(3)\n )\n for angle in molecule_angles(self, label, of_group)\n }\n\n dihedrals_dict = {\n dihedral: tuple(\n sites_dict[dihedral.connection_members[i]][0] for i in range(4)\n )\n for dihedral in molecule_dihedrals(self, label, of_group)\n }\n\n impropers_dict = {\n improper: tuple(\n sites_dict[improper.connection_members[i]][0] for i in range(4)\n )\n for improper in molecule_impropers(self, label, of_group)\n }\n\n new_top = gmso.Topology(\n name=label if isinstance(label, str) else label[0]\n )\n\n for ref_site, new_site in sites_dict.items():\n new_top.add_site(new_site[1])\n for ref_conn, conn_idx in bonds_dict.items():\n bond = gmso.Bond(\n connection_members=[\n new_top.sites[conn_idx[i]] for i in range(2)\n ],\n bond_type=None\n if not ref_conn.connection_type\n else ref_conn.connection_type.clone(),\n )\n new_top.add_connection(bond)\n for ref_conn, conn_idx in angles_dict.items():\n angle = gmso.Angle(\n connection_members=[\n new_top.sites[conn_idx[i]] for i in range(3)\n ],\n angle_type=None\n if not ref_conn.connection_type\n else ref_conn.connection_type.clone(),\n )\n new_top.add_connection(angle)\n for ref_conn, conn_idx in dihedrals_dict.items():\n dihedral = gmso.Dihedral(\n connection_members=[\n new_top.sites[conn_idx[i]] for i in range(4)\n ],\n dihedral_type=None\n if not ref_conn.connection_type\n else ref_conn.connection_type.clone(),\n )\n new_top.add_connection(dihedral)\n for ref_conn, conn_idx in impropers_dict.items():\n improper = gmso.Improper(\n connection_members=[\n new_top.sites[conn_idx[i]] for i in range(4)\n ],\n improper_type=None\n if not ref_conn.connection_type\n else ref_conn.connection_type.clone(),\n )\n new_top.add_connection(improper)\n\n new_top.update_topology()\n return new_top", "title": "" }, { "docid": "861c27a581961250817921baa484d8fb", "score": "0.4301693", "text": "def topologytemplate_spec(request, template):\n return HttpResponse(template.specification, mimetype=\"text/plain\")", "title": "" }, { "docid": "2e5242a8f0213f785b0690639ec44fcf", "score": "0.42926726", "text": "def inject_label_matchers(self, expression, topology) -> str:\n if not topology:\n return expression\n if not self.path:\n logger.debug(\"`cos-tool` unavailable. Leaving expression unchanged: %s\", expression)\n return expression\n args = [str(self.path), \"transform\"]\n args.extend(\n [\"--label-matcher={}={}\".format(key, value) for key, value in topology.items()]\n )\n\n args.extend([\"{}\".format(expression)])\n # noinspection PyBroadException\n try:\n return self._exec(args)\n except subprocess.CalledProcessError as e:\n logger.debug('Applying the expression failed: \"%s\", falling back to the original', e)\n return expression", "title": "" }, { "docid": "555319af7520c988a93c5100170d8cc5", "score": "0.4291545", "text": "def _write_loader_script(path, state_name, package, top):\r\n if state_name.startswith(package):\r\n pkg_arg = ''\r\n else:\r\n pkg_arg = \", package='%s'\" % package\r\n\r\n if top:\r\n top_arg = ''\r\n else:\r\n top_arg = ', top_obj=False'\r\n\r\n out = open(path, 'w')\r\n out.write(\"\"\"\\\r\nimport os\r\nimport sys\r\nif not '.' in sys.path:\r\n sys.path.append('.')\r\n\r\ntry:\r\n from openmdao.main.api import Component, SAVE_CPICKLE\r\nexcept ImportError:\r\n print 'No OpenMDAO distribution available.'\r\n if __name__ != '__main__':\r\n print 'You can unzip the egg to access the enclosed files.'\r\n print 'To get OpenMDAO, please visit openmdao.org'\r\n sys.exit(1)\r\n\r\ndef load(**kwargs):\r\n '''Create object(s) from state file.'''\r\n return Component.load('%(name)s', SAVE_CPICKLE%(pkg)s%(top)s, **kwargs)\r\n\r\ndef main():\r\n '''Load state and run.'''\r\n model = load()\r\n model.run()\r\n\r\nif __name__ == '__main__':\r\n main()\r\n\"\"\" % {'name': state_name, 'pkg': pkg_arg, 'top': top_arg})\r\n out.close()", "title": "" }, { "docid": "334ad27ce5271dc9a26655e8f23fce5a", "score": "0.4276431", "text": "def main(server=None, input=None):\n if not input:\n input = {}\n\n try:\n # CUSTOM_SCRIPT00019\n #\n # Created by Matthew Misenhimer\n #\n def make_timestamp():\n #Makes a Timestamp for postgres\n import datetime\n now = datetime.datetime.now()\n return now.strftime(\"%Y-%m-%d %H:%M:%S\")\n \n def are_no_hackpipes_preceding(sob, ignore_code):\n #Checks to see if there are any manually inserted/non-pipeline Projects or Work Orders that lead in to the sob that are not completed yet\n #If there are incompleted hackpipe work orders or projs that precede the sob, then this will return false.\n #Ignore code is usually the code of the task just completed. We don't care about looking at it's status in determining this. \n boolio = True\n matcher = '' #This is the type of sob (PROJ or WORK_ORDER) that we care about looking at\n if 'PROJ' in sob.get('code'):\n matcher = 'PROJ'\n elif 'WORK_ORDER' in sob.get('code'):\n matcher = 'WORK_ORDER'\n pre_hacks_expr = \"@SOBJECT(twog/hackpipe_out['out_to','%s'])\" % sob.get('code') #See what hackpipes lead in to this sob\n pre_hacks = server.eval(pre_hacks_expr)\n for ph in pre_hacks:\n if matcher in ph.get('lookup_code') and ignore_code not in ph.get('lookup_code'): #If it's the type we care about and it isn't the main sob\n ph_task = server.eval(\"@SOBJECT(sthpw/task['lookup_code','%s'])\" % ph.get('lookup_code'))\n if ph_task:\n ph_task = ph_task[0]\n if ph_task.get('status') != 'Completed': #If it hasn't been completed, then there is an incomplete hackpipe preceding sob, so return false\n boolio = False\n return boolio\n \n def block_manual_status_adjust_for_inactive_hackup(sob, task):\n #This was created because people used to be able to change the status of inactive work orders\n #I think this is prevented in other ways now, but I am keeping it in just in case it is still needed.\n from pyasm.common import TacticException\n good = True\n if sob.get('creation_type') == 'hackup':\n if task.get('active') not in [True,'true','t',1,'1']:\n raise TacticException('This needs to be active in order to change the status')\n return good\n \n from pyasm.common import TacticException, Environment\n # input and server are assumed variables\n # define some contants here\n #print \"\\n\\nIN KICKOFF\"\n COMPLETE = 'Completed'\n READY = 'Ready'\n PENDING = 'Pending'\n sobj = input.get('sobject')\n this_process = sobj.get('process')\n this_lookup = sobj.get('lookup_code')\n sk = input.get('search_key')\n task_code = sobj.get('code')\n update_data = input.get('update_data') #These are the new values\n prev_data = {}\n old_status = ''\n if 'prev_data' in input.keys():\n prev_data = input.get('prev_data') #These are the old values\n if 'status' in prev_data.keys():\n old_status = prev_data.get('status')\n new_status = update_data.get('status')\n login = Environment.get_login()\n user_name = login.get_login()\n assigned_login_group = sobj.get('assigned_login_group')\n parent_obj = None\n title = None\n order = None\n if 'PROJ' in this_lookup:\n parent_obj = server.eval(\"@SOBJECT(twog/proj['code','%s'])\" % this_lookup)[0] #Parent Obj is the Proj attached to the task\n proj = parent_obj\n title = server.eval(\"@SOBJECT(twog/title['code','%s'])\" % parent_obj.get('title_code'))[0]\n order = server.eval(\"@SOBJECT(twog/order['code','%s'])\" % title.get('order_code'))[0] \n elif 'WORK_ORDER' in this_lookup:\n parent_obj = server.eval(\"@SOBJECT(twog/work_order['code','%s'])\" % this_lookup)[0] #Parent Obj is the Work Order attached to the task\n work_order = parent_obj\n proj = server.eval(\"@SOBJECT(twog/proj['code','%s'])\" % work_order.get('proj_code'))[0]\n title = server.eval(\"@SOBJECT(twog/proj['code','%s'].twog/title)\" % parent_obj.get('proj_code'))[0] \n order = server.eval(\"@SOBJECT(twog/order['code','%s'])\" % title.get('order_code'))[0] \n \n if 'PROJ' in this_lookup and title.get('priority_triggers') != 'No':\n #If the new status for this Proj is ready, then grab the priority attached to he proj and give it to the Title\n #This is to control priority order per department\n if new_status == 'Ready':\n server.update(title.get('__search_key__'), {'priority': parent_obj.get('priority')}, triggers=False)\n elif 'WORK_ORDER' in this_lookup:\n t_wo_completed = title.get('wo_completed') #This is for the completion ratio on title\n o_wo_completed = order.get('wo_completed') #This is for the completion ratio on order\n if new_status == COMPLETE:\n title_str = title.get('title') #This is for a potential alert/exception\n if title.get('episode') not in [None,'']:\n title_str = '%s: %s' % (title_str, title.get('episode'))\n #Block QC and Edel from completing their work orders if the TRT or TRT w/Textless are not filled in\n if 'qc' in assigned_login_group or 'edeliveries' in assigned_login_group:\n total_program_runtime = title.get('total_program_runtime')\n total_runtime_w_textless = title.get('total_runtime_w_textless')\n say_str = ''\n say_str2 = ''\n if total_program_runtime in [None,''] or total_runtime_w_textless in [None,'']:\n if total_program_runtime in [None,'']:\n say_str = 'Total Program Runtime has' \n if total_runtime_w_textless in [None,'']:\n if say_str == '': \n say_str = 'Total Runtime With Textless has'\n else:\n say_str = '%s and Total Runtime With Textless have' % (say_str[:-4]) \n say_str2 = \"%s (%s)'s %s not been filled. You must enter this data before trying to complete this work order.\" % (title_str, title.get('code'), say_str)\n if 'qc' in assigned_login_group:\n if total_program_runtime in [None,''] or total_runtime_w_textless in [None,'']:\n raise TacticException(say_str2)\n else:\n #They were filled in, so finish completing the task and send a note\n from pyasm.biz import Note\n from pyasm.search import Search\n title_obj2 = Search.get_by_search_key(title.get('__search_key__')) #This is the type of object required for Note creation\n note_text = '%s (%s) has been Passed and Completed by %s in QC' % (sobj.get('process'), this_lookup, user_name)\n note = Note.create(title_obj2, note_text, context='QC Completed', process='QC Completed')\n elif 'edeliveries' in assigned_login_group and (total_program_runtime in [None,''] or total_runtime_w_textless in [None,'']):\n raise TacticException(say_str2)\n #This section is turned off due to logistical problems with it. \n #It intended to block machine room, edit, and compression from completing a work order unless the pulled_blacks had been filled out.\n #if 'machine_room' in assigned_login_group or 'edit' in assigned_login_group or 'compression' in assigned_login_group:\n # pulled_blacks = title.get('pulled_blacks')\n # if pulled_blacks in [None,'','0']:\n # raise TacticException(\"%s (%s)'s pulled_blacks has not been filled, or is still '0'.\" % (title_str, title.get('code'))) \n t_wo_completed = t_wo_completed + 1\n o_wo_completed = o_wo_completed + 1\n #Update the completion ratios attached, since there were no blocking exceptions \n server.update(title.get('__search_key__'), {'wo_completed': t_wo_completed})\n server.update(order.get('__search_key__'), {'wo_completed': o_wo_completed})\n elif old_status == COMPLETE:\n t_wo_completed = t_wo_completed - 1\n o_wo_completed = o_wo_completed - 1\n #Reduce the completion ratio, since it was completed but has now been taken off that status\n server.update(title.get('__search_key__'), {'wo_completed': t_wo_completed})\n server.update(order.get('__search_key__'), {'wo_completed': o_wo_completed})\n #Still doing this, but don't know if it's neccessary anymore \n mmkay = block_manual_status_adjust_for_inactive_hackup(parent_obj, sobj)\n if new_status == COMPLETE and 'PROJ' not in this_lookup:\n #Make sure they have set the assigned person to the work order.\n if sobj.get('assigned') in [None,'']:\n task_assigned_expr = \"@GET(sthpw/task['code','%s'].assigned)\" % sobj.get('code') #MTM: Do I need to retrieve the task again, or can I just use the sobj's \"assigned\"? \n task_assigned = server.eval(task_assigned_expr)\n if task_assigned:\n task_assigned = task_assigned[0]\n if task_assigned in [None,'']:\n raise TacticException('Before completing a work order, someone must be assigned to it.')\n #Make sure they have added work hours. If not, error out.\n whs_expr = \"@SOBJECT(sthpw/work_hour['task_code','%s'])\" % task_code\n whs = server.eval(whs_expr)\n sum = 0\n for wh in whs:\n straight_time = wh.get('straight_time')\n if straight_time in [None,'']:\n straight_time = 0\n else:\n straight_time = float(straight_time)\n sum = float(sum) + straight_time\n sum = str(sum)\n if sum in ['0','',0,0.0]:\n raise TacticException('You need to save the hours you worked on this before you can set the status to \"Completed\".')\n \n now_timestamp = make_timestamp() \n #Since there have been no blocking exceptions, record the status change\n server.insert('twog/status_log', {'login': user_name, 'timestamp': now_timestamp, 'from_status': old_status, 'status': new_status, 'task_code': task_code, 'lookup_code': this_lookup, 'order_code': sobj.get('order_code'), 'title_code': sobj.get('title_code'), 'process': this_process})\n if new_status == COMPLETE:\n #Record the completion date on the work order, and take it off the BigBoard\n import datetime\n now = datetime.datetime.now()\n timestamp_str = '%s-%s-%s %s:%s:%s' % (now.year, now.month, now.day, now.hour, now.minute, now.second)\n updict = {'actual_end_date': timestamp_str}\n if 'WORK_ORDER' in this_lookup:\n updict['bigboard'] = False\n server.update(sk, updict)\n elif new_status not in ['Pending','Ready','Completed'] and 'WORK_ORDER' in this_lookup:\n server.update(server.build_search_key('sthpw/task', proj.get('task_code')), {'status': new_status}) #NEW\n if 'PROJ' in sobj.get('lookup_code'):\n #MTM: This annoying section is for passing Proj's their task's status.\n #I don't know if this is needed at all anymore. Will have to check other triggers and reports.\n #The \"tripwire\" stuff was just to keep it from infinitely passing statuses from proj to task, task to proj\n do_it = True\n if 'tripwire' in update_data.keys():\n if update_data.get('tripwire') == 'No Send Back': #?\n do_it = False\n server.update(input.get('search_key'), {'tripwire': ''}, triggers=False) #Empty the tripwire and do nothing\n server.update(proj.get('__search_key__'), {'tripwire': '', 'status': sobj.get('status')}, triggers=False) #?\n if do_it:\n if proj:\n server.update(proj.get('__search_key__'), {'status': new_status})\n if title.get('priority_triggers') != 'No':\n #Update Title Priority for On Hold Status, or having that status removed -- BEGIN\n if sobj.get('status') in ['On_Hold','On Hold']:\n title_priority = title.get('priority')\n server.update(title.get('__search_key__'), {'saved_priority': title_priority, 'priority': 200}, triggers=False) \n else:\n if old_status in ['On_Hold','On Hold']:\n saved_priority = title.get('saved_priority')\n server.update(title.get('__search_key__'), {'priority': saved_priority}, triggers=False) \n #Update Title Priority for On Hold Status, or having that status removed -- END\n \n #Update Title Priority for Client Response Status, or having that status removed -- BEGIN\n if sobj.get('status') == 'Client Response':\n title_priority = title.get('priority')\n crc = title.get('client_response_count')\n crc_num = 0\n if crc not in [None,'']:\n crc_num = int(crc)\n crc_num = crc_num + 1 \n server.update(title.get('__search_key__'), {'saved_priority': title_priority, 'priority': 300, 'client_response_count': crc_num}, triggers=False) \n else:\n if old_status == 'Client Response':\n saved_priority = title.get('saved_priority')\n crc = title.get('client_response_count')\n crc_num = 0\n if crc not in [None,'']:\n crc_num = int(crc)\n if crc_num > 0:\n crc_num = crc_num - 1 \n server.update(title.get('__search_key__'), {'priority': saved_priority, 'client_response_count': crc_num}, triggers=False) \n #Update Title Priority for Client Response Status, or having that status removed -- END\n \n if sobj.get('status') in ['In_Progress','In Progress','DR In_Progress','DR In Progress', 'Amberfin01_In_Progress', 'Amberfin01 In Progress', 'Amberfin02_In_Progress', 'Amberfin02 In Progress','BATON In_Progress','BATON In Progress','Export In_Progress','Export In Progress','Buddy Check In_Progress','Buddy Check In Progress','Need Buddy Check','Completed'] and old_status not in ['In_Progress','DR In_Progress','DR In Progress','Amberfin01_In_Progress','Amberfin01 In Progress', 'Amberfin02_In_Progress','Amberfin02 In Progress','BATON In_Progress','BATON In Progress','Export In_Progress','Export In Progress','Buddy Check In_Progress','Buddy Check In Progress','Need Buddy Check','In Progress']:\n #Update the actual start date if they just set the status to 'In Progress'\n if sobj.get('actual_start_date') in ['',None]:\n now_timestamp = make_timestamp() \n server.update(sk, {'actual_start_date': now_timestamp})\n if sobj.get('status') in ['Ready','In_Progress','In Progress'] and 'WORK_ORDER' in sobj.get('lookup_code'):\n if title.get('client_status') != 'In Production':\n server.update(title.get('__search_key__'), {'client_status': 'In Production', 'status': 'In Production'})\n elif sobj.get('status') in ['Rejected','Fix Needed'] and 'WORK_ORDER' in sobj.get('lookup_code'):\n from pyasm.biz import Note\n from pyasm.search import Search\n server.insert('twog/production_error', {'error_type': sobj.get('status'), 'process': sobj.get('process'), 'work_order_code': sobj.get('lookup_code'), 'title': title.get('title'), 'episode': title.get('episode'), 'title_code': title.get('code'), 'order_code': order.get('code'), 'order_name': order.get('name'), 'po_number': order.get('po_number'), 'proj_code': proj.get('code'), 'scheduler_login': sobj.get('creator_login'), 'operator_login': user_name, 'login': user_name}) \n if sobj.get('status') == 'Rejected':\n server.update(title.get('__search_key__'), {'client_status': 'QC Rejected'})\n if title.get('priority_triggers') != 'No':\n server.update(title.get('__search_key__'), {'priority': 90}, triggers=False)\n title_obj2 = Search.get_by_search_key(title.get('__search_key__')) #This is the type of object required for Note creation\n note_text = '%s (%s) has been Rejected, as marked by %s' % (sobj.get('process'), this_lookup, user_name)\n note = Note.create(title_obj2, note_text, context='QC Rejected', process='QC Rejected')\n \n if sobj.get('status') == COMPLETE and title.get('status_triggers') != 'No':\n # Now we need to set the next task(s) statuses to 'Ready'\n parent = server.get_parent(sk)\n # Get all process information from the pipeline regarding processes linked to this process in the normal pipeline\n info = server.get_pipeline_processes_info(parent.get('__search_key__'), related_process=this_process)\n input_processes = info.get('input_processes')\n output_processes = info.get('output_processes')\n # this combines all other input_processes and this process\n # including this_process in case this process has more than 1 task\n ready = False\n if input_processes:\n input_processes.append(this_process)\n ready = True\n input_tasks = server.query('sthpw/task', filters = [('search_type', sobj.get('search_type')), ('search_id', sobj.get('search_id')), ('process', input_processes), ('title_code',sobj.get('title_code'))])\n for task in input_tasks:\n if task.get('status') != COMPLETE:\n ready = False\n else:\n ready = True\n #Now we need to check the manually entered work orders and projs \n #This section may be replaced with the are_no_hackpipes_preceding function, but for the sake of stability, I won't do it until I test it again\n hack_outs_expr = \"@SOBJECT(twog/hackpipe_out['out_to','%s'])\" % sobj.get('lookup_code')\n hack_outs = server.eval(hack_outs_expr)\n stype = ''\n stype_st = ''\n if 'PROJ' in sobj.get('lookup_code'):\n stype = 'PROJ'\n stype_st = 'twog/proj'\n elif 'WORK_ORDER' in sobj.get('lookup_code'):\n stype = 'WORK_ORDER'\n stype_st = 'twog/work_order'\n for ho in hack_outs:\n lookup_code = ho.get('lookup_code')\n if stype in lookup_code:\n hacktasks = server.eval(\"@SOBJECT(sthpw/task['lookup_code','%s'])\" % lookup_code)\n for hacktask in hacktasks:\n if hacktask.get('status') != COMPLETE:\n ready = False\n \n # make the next process ready\n if ready == True:\n output_tasks = server.query('sthpw/task', filters = [('search_type', sobj.get('search_type')), ('search_id', sobj.get('search_id')), ('process', output_processes), ('title_code',sobj.get('title_code'))])\n update_data = {}\n \n for task in output_tasks:\n if task.get('lookup_code') not in [None,'']:\n if task.get('status') == 'Pending':\n #Need to make sure other tasks leading into this one (an output task for the triggered task) are also complete - both ways, with pipeline and hackpipe - before allowing this status update \n \n out_info = server.get_pipeline_processes_info(parent.get('__search_key__'), related_process=task.get('process'))\n input_to_out = out_info.get('input_processes') \n if this_process in input_to_out:\n prc_idx = input_to_out.index(this_process)\n input_to_out.pop(prc_idx)\n ready2 = False\n if input_to_out:\n #input_to_out.append(task.get('process'))\n ts_st = ''\n if 'PROJ' in task.get('lookup_code'):\n ts_st = 'twog/proj'\n elif 'WORK_ORDER' in task.get('lookup_code'):\n ts_st = 'twog/work_order'\n tsob = server.eval(\"@SOBJECT(%s['code','%s'])\" % (ts_st, task.get('lookup_code')))[0] \n ready2 = are_no_hackpipes_preceding(tsob, sobj.get('lookup_code'))\n into_out_tasks = server.query('sthpw/task', filters = [('search_type', task.get('search_type')), ('search_id', task.get('search_id')), ('process', input_to_out)])\n for iotask in into_out_tasks:\n # If preceding tasks have not yet been set to 'Completed', do not change the next task's status\n if iotask.get('status') != COMPLETE:\n ready2 = False\n else:\n ready2 = True\n hacks_expr = \"@SOBJECT(twog/hackpipe_out['out_to','%s'])\" % task.get('lookup_code')\n hacks = server.eval(hacks_expr)\n for hack in hacks:\n lu_code = hack.get('lookup_code') \n hacktasks = server.eval(\"@SOBJECT(sthpw/task['lookup_code','%s'])\" % lu_code)\n for hacktask in hacktasks:\n if hacktask.get('status') != COMPLETE:\n ready2 = False\n if ready2 == True:\n update_data[task.get('__search_key__')] = { 'status': READY }\n if 'PROJ' in task.get('lookup_code'):\n get_proj_expr = \"@SOBJECT(twog/proj['code','%s'])\" % task.get('lookup_code')\n proj = server.eval(get_proj_expr)[0]\n # FIND ALL NON HACK WOS\n wos = server.eval(\"@SOBJECT(twog/work_order['proj_code','%s'])\" % proj.get('code'))\n for wo in wos: \n if wo.get('creation_type') not in ['hackpipe','hackup']:\n this_processer = wo.get('process')\n info2 = server.get_pipeline_processes_info(proj.get('__search_key__'), related_process=this_processer)\n input_processes2 = info2.get('input_processes')\n okayed = are_no_hackpipes_preceding(wo, sobj.get('lookup_code'))\n len_proc = 0\n if input_processes2 not in ['',{},[],None]:\n len_proc = len(input_processes2)\n if len_proc < 1 and okayed:\n task2 = server.eval(\"@SOBJECT(sthpw/task['code','%s'])\" % wo.get('task_code')) \n if task2:\n task2 = task2[0]\n if task2.get('status') == 'Pending':\n server.update(task2.get('__search_key__'), {'status': READY}) #?\n # FIND ALL HACK WOS HERE.........\n hack_dudes = server.eval(\"@SOBJECT(twog/hackpipe_out['lookup_code','%s'])\" % proj.get('code'))\n for ho in hack_dudes:\n ready3 = True\n out_to = ho.get('out_to')\n label = ''\n if 'PROJ' in out_to:\n label = 'twog/proj'\n elif 'WORK_ORDER' in out_to:\n label = 'twog/work_order'\n sob_guy = server.eval(\"@SOBJECT(%s['code','%s'])\" % (label, out_to))\n for sobby in sob_guy:\n ready3 = are_no_hackpipes_preceding(sobby, sobj.get('lookup_code'))\n if ready3:\n htask = server.eval(\"@SOBJECT(sthpw/task['lookup_code','%s'])\" % out_to)\n if htask:\n htask = htask[0]\n if htask.get('status') == 'Pending':\n server.update(htask.get('__search_key__'), {'status': READY})\n \n hackers = server.eval(\"@SOBJECT(twog/hackpipe_out['lookup_code','%s'])\" % sobj.get('lookup_code'))\n for hack in hackers:\n if stype in hack.get('out_to'):\n out_to = hack.get('out_to')\n out_sob = server.eval(\"@SOBJECT(%s['code','%s'])\" % (stype_st, out_to))\n if out_sob:\n out_sob = out_sob[0] \n ready4 = are_no_hackpipes_preceding(out_sob, sobj.get('lookup_code'))\n if ready4:\n tasker = server.eval(\"@SOBJECT(sthpw/task['code','%s'])\" % out_sob.get('task_code'))\n if tasker:\n tasker = tasker[0]\n if tasker.get('status') == 'Pending':\n server.update(tasker.get('__search_key__'), {'status': READY})\n if stype == 'PROJ':\n # Need to do the same thing here, looking at pipeline and hackpipe\n hack_wos = server.eval(\"@SOBJECT(twog/hackpipe_out['lookup_code','%s'])\" % out_to)\n for hos in hack_wos:\n if 'PROJ' not in hos.get('out_to'):\n ho_wo = server.eval(\"@SOBJECT(twog/work_order['code','%s'])\" % hos.get('out_to'))\n if ho_wo:\n ho_wo = ho_wo[0]\n ready5 = are_no_hackpipes_preceding(ho_wo, out_sob.get('code'))\n if ready5:\n ho_wo_task_sk = server.build_search_key('sthpw/task', ho_wo.get('task_code'))\n #7/10/2014 --- WAIT. WTF. THERE IS NO STATUS ON WOS, JUST THE TASK..... MTMMTMMTM!!!\n if ho_wo.get('status') == 'Pending':\n server.update(ho_wo_task_sk, {'status': READY})\n # NEED TO LOOK AT PROJ PIPELINE NOW\n proj_sk = server.build_search_key('twog/proj', out_to)\n pipe_wos = server.eval(\"@SOBJECT(twog/work_order['proj_code','%s'])\" % out_to)\n for pwos in pipe_wos:\n if pwos.get('creation_type') not in ['hackpipe','hackup']:\n info2 = server.get_pipeline_processes_info(proj_sk, related_process=pwos.get('process'))\n if 'input_processes' in info2.keys():\n input_processes2 = info2.get('input_processes')\n whack_says = are_no_hackpipes_preceding(pwos, out_to)\n # If there are no input processes, it must be a work order that should also be set to ready - as long as hackpipe says it's ok\n len_proc2 = 0\n if input_processes2 not in ['',{},[],None]:\n len_proc2 = len(input_processes2)\n if len_proc2 < 1 and whack_says:\n wtask_code = pwos.get('task_code')\n # Get the task sobject associated with this work order\n wtask = server.eval(\"@SOBJECT(sthpw/task['code','%s'])\" % wtask_code)\n if wtask:\n wtask = wtask[0]\n # If the task's status has not been touched yet ('Pending') and active is set to true, update the status with 'Ready'\n if wtask.get('status') == 'Pending':\n wdata = {}\n wdata['status'] = 'Ready'\n if wtask.get('status') == 'Pending':\n server.update(wtask.get('__search_key__'), wdata)\n \n \n \n #\n # HERE NEED TO FIND ANY OTHER TASKS THIS GOES 'OUT_TO' and make sure they don't also depend on another task's status (if so, make sure it is completed)\n #\n \n # this is optional, for simplicity, turn off triggers for these updates\n if update_data != {} and title.get('priority_triggers') != 'No':\n #make title priority the proj priority, if proj is becoming \"Ready\"\n for tkey in update_data.keys():\n record = update_data.get(tkey)\n tkcode = tkey.split('code=')[1]\n if 'status' in record.keys():\n if record.get('status') == 'Ready':\n ttt = server.eval(\"@GET(sthpw/task['code','%s'].lookup_code)\" % tkcode)[0]\n if 'PROJ' in ttt:\n pjj = server.eval(\"@SOBJECT(twog/proj['code','%s'])\" % ttt)[0]\n proj_title_code = pjj.get('title_code')\n proj_prio = pjj.get('priority')\n server.update(server.build_search_key('twog/title',proj_title_code), {'priority': proj_prio}, triggers=False)\n server.update_multiple(update_data, triggers=False) #? Should triggers=False?\n \n \n # Now see if all wos under proj or all projs under title are completed. If so, make their parent's status completed\n all_wos_completed = False\n all_wos_pending = False\n prj = None\n if new_status in [COMPLETE,PENDING]: \n if 'WORK_ORDER' in this_lookup:\n wo = server.eval(\"@SOBJECT(twog/work_order['code','%s'])\" % sobj.get('lookup_code'))\n wo = wo[0]\n other_wotasks_expr = \"@SOBJECT(twog/proj['code','%s'].twog/work_order.WT:sthpw/task)\" % wo.get('proj_code')\n other_wo_tasks = server.eval(other_wotasks_expr)\n all_wos_completed = True\n all_wos_pending = True\n if new_status == PENDING:\n all_wos_completed = False\n else:\n all_wos_pending = False\n for owt in other_wo_tasks:\n if owt.get('lookup_code') != wo.get('code'):\n if owt.get('status') != COMPLETE:\n all_wos_completed = False\n if owt.get('status') != PENDING:\n all_wos_pending = False\n prj = server.eval(\"@SOBJECT(twog/proj['code','%s'])\" % wo.get('proj_code'))\n if len(prj) > 0:\n prj = prj[0]\n else:\n prj = None\n if (all_wos_completed or all_wos_pending) and prj not in [None,'']:\n if title.get('status_triggers') != 'No' or all_wos_pending == True:\n prj_task = server.eval(\"@SOBJECT(sthpw/task['lookup_code','%s'])\" % prj.get('code'))\n if prj_task:\n prj_task = prj_task[0]\n server.update(prj_task.get('__search_key__'), {'status': new_status})\n elif 'PROJ' in this_lookup:\n prj = server.eval(\"@SOBJECT(twog/proj['code','%s'])\" % this_lookup)\n if prj:\n prj = prj[0]\n else:\n prj = None\n \n all_projs_completed = True\n all_projs_pending = True\n all_titles_completed = False\n all_titles_pending = False\n if prj not in [None,'']:\n title_proj_tasks = server.eval(\"@SOBJECT(twog/title['code','%s'].twog/proj.PT:sthpw/task)\" % prj.get('title_code'))\n for tpt in title_proj_tasks:\n if tpt.get('status') != COMPLETE:\n all_projs_completed = False\n if tpt.get('status') != PENDING:\n all_projs_pending = False\n title_updated = False\n if all_projs_completed:\n title_sk = server.build_search_key('twog/title', prj.get('title_code'))\n if title.get('priority_triggers') != 'No' and title.get('status_triggers') != 'No':\n server.update(title_sk, {'status': COMPLETE, 'bigboard': False, 'priority': 5000})\n titles_completed = order.get('titles_completed')\n title_codes_completed = order.get('title_codes_completed')\n if title.get('code') not in title_codes_completed:\n if titles_completed in [None,'']:\n titles_completed = 0\n else:\n titles_completed = int(titles_completed)\n titles_completed = titles_completed + 1\n if title_codes_completed == '':\n title_codes_completed = title.get('code')\n else:\n title_codes_completed = '%s,%s' % (title_codes_completed, title.get('code'))\n server.update(order.get('__search_key__'), {'titles_completed': titles_completed, 'title_codes_completed': title_codes_completed}) \n title_updated = True\n all_titles_completed = True\n title = server.eval(\"@SOBJECT(twog/title['code','%s'])\" % prj.get('title_code'))\n if title and title_updated:\n title = title[0]\n other_titles = server.eval(\"@SOBJECT(twog/order['code','%s'].twog/title)\" % title.get('order_code'))\n for ot in other_titles:\n if title.get('code') != ot.get('code'):\n if ot.get('status') != COMPLETE:\n all_titles_completed = False\n else:\n all_titles_completed = False\n if all_projs_pending:\n title_sk = server.build_search_key('twog/title', prj.get('title_code'))\n if title.get('priority_triggers') != 'No' and title.get('status_triggers') != 'No':\n server.update(title_sk, {'status': '', 'bigboard': False})\n title_codes_completed = order.get('title_codes_completed')\n if title.get('code') in title_codes_completed:\n title_codes_completed = title_codes_completed.replace(',%s' % title.get('code'),'').replace('%s,' % title.get('code'),'').replace('%s' % title.get('code'),'')\n titles_completed = order.get('titles_completed')\n if titles_completed in [None,'']:\n titles_completed = 0\n else:\n titles_completed = int(titles_completed) - 1\n server.update(order.get('__search_key__'), {'titles_completed': titles_completed, 'title_codes_completed': title_codes_completed})\n title_updated = True\n all_titles_pending = True\n title = server.eval(\"@SOBJECT(twog/title['code','%s'])\" % prj.get('title_code'))\n if title and title_updated:\n title = title[0]\n other_titles = server.eval(\"@SOBJECT(twog/order['code','%s'].twog/title)\" % title.get('order_code'))\n for ot in other_titles:\n if title.get('code') != ot.get('code'):\n if ot.get('status') != '':\n all_titles_pending = False\n else:\n all_titles_pending = False\n if all_titles_pending:\n server.update(server.build_search_key('twog/order', title.get('order_code')), {'needs_completion_review': False}) \n if all_titles_completed:\n server.update(server.build_search_key('twog/order', title.get('order_code')), {'needs_completion_review': True}) \n \n #print \"LEAVING KICKOFF\"\n except AttributeError as e:\n traceback.print_exc()\n print str(e) + '\\nMost likely the server object does not exist.'\n raise e\n except KeyError as e:\n traceback.print_exc()\n print str(e) + '\\nMost likely the input dictionary does not exist.'\n raise e\n except Exception as e:\n traceback.print_exc()\n print str(e)\n raise e", "title": "" }, { "docid": "fec6355e967e199b1e27d6e5414498ca", "score": "0.42743507", "text": "def _substitute(self, tpl):\n p = self.params\n # We do not yet know NFS_SERVER_IP_ADDRESS so leave it a template.\n # Same with DB_IDENTIFIER\n return tpl.render(CLUSTERNAME=p['kubernetes_cluster_name'],\n OAUTH_PROVIDER=self.encode_value(\n 'oauth_provider'),\n OAUTH_CLIENT_ID=self.encode_value(\n 'oauth_client_id'),\n OAUTH_SECRET=self.encode_value(\n 'oauth_secret'),\n OAUTH_CALLBACK_URL=self.encode_value(\n 'oauth_callback_url'),\n GITHUB_ORGANIZATION_WHITELIST=self.encode_value(\n 'github_organization_whitelist'),\n CILOGON_GROUP_WHITELIST=self.encode_value(\n 'cilogon_group_whitelist'),\n GITHUB_ORGANIZATION_DENYLIST=self.encode_value(\n 'github_organization_denylist'),\n CILOGON_GROUP_DENYLIST=self.encode_value(\n 'cilogon_group_denylist'),\n SESSION_DB_URL=self.encode_value(\n 'session_db_url'),\n JUPYTERHUB_CRYPTO_KEY=self.encode_value(\n 'crypto_key'),\n CONFIGPROXY_AUTH_TOKEN=self.encode_value(\n 'configproxy_auth_token'),\n CLUSTER_IDENTIFIER=p[\n 'kubernetes_cluster_namespace'],\n SHARED_VOLUME_SIZE=p[\n 'nfs_volume_size'],\n PHYSICAL_SHARED_VOLUME_SIZE=p[\n 'volume_size'],\n ROOT_CHAIN_PEM=self.encode_file('tls_root_chain'),\n DHPARAM_PEM=self.encode_value(\"dhparams\"),\n TLS_CRT=self.encode_file('tls_cert'),\n TLS_KEY=self.encode_file('tls_key'),\n HOSTNAME=p['hostname'],\n FQDN=p['hostname'],\n CA_CERTIFICATE=self.encode_file('beats_ca'),\n BEATS_CERTIFICATE=self.encode_file('beats_cert'),\n BEATS_KEY=self.encode_file('beats_key'),\n SHIPPER_NAME=p['log_shipper_name'],\n RABBITMQ_PAN_PASSWORD=self.encode_value(\n 'rabbitmq_pan_password'),\n RABBITMQ_TARGET_HOST=p['rabbitmq_target_host'],\n RABBITMQ_TARGET_VHOST=p['rabbitmq_target_vhost'],\n DEBUG=p['debug'],\n PREPULLER_IMAGE_LIST=p['prepuller_image_list'],\n PREPULLER_NO_SCAN=p['prepuller_no_scan'],\n PREPULLER_REPO=p['prepuller_repo'],\n PREPULLER_OWNER=p['prepuller_owner'],\n PREPULLER_IMAGE_NAME=p['prepuller_image_name'],\n PREPULLER_EXPERIMENTALS=p['prepuller_experimentals'],\n PREPULLER_DAILIES=p['prepuller_dailies'],\n PREPULLER_WEEKLIES=p['prepuller_weeklies'],\n PREPULLER_RELEASES=p['prepuller_releases'],\n PREPULLER_PORT=p['prepuller_port'],\n PREPULLER_SORT_FIELD=p['prepuller_sort_field'],\n PREPULLER_COMMAND=p['prepuller_command'],\n PREPULLER_NAMESPACE=p['prepuller_namespace'],\n PREPULLER_MINUTE=p['prepuller_minute'],\n LAB_REPO_HOST=p['lab_repo_host'],\n LAB_REPO_OWNER=p['lab_repo_owner'],\n LAB_REPO_NAME=p['lab_repo_name'],\n LAB_IMAGE=p['lab_image'],\n LAB_SELECTOR_TITLE=p['lab_selector_title'],\n LAB_IDLE_TIMEOUT=p['lab_idle_timeout'],\n LAB_MEM_LIMIT=p['lab_mem_limit'],\n LAB_CPU_LIMIT=p['lab_cpu_limit'],\n LAB_MEM_GUARANTEE=p['lab_mem_guarantee'],\n LAB_CPU_GUARANTEE=p['lab_cpu_guarantee'],\n LAB_NODEJS_MAX_MEM=p['lab_nodejs_max_mem'],\n TINY_MAX_CPU=p['tiny_max_cpu'],\n MB_PER_CPU=p['mb_per_cpu'],\n LAB_SIZE_RANGE=p['lab_size_range'],\n AUTO_REPO_URLS=p['auto_repo_urls'],\n ALLOW_DASK_SPAWN=p['allow_dask_spawn'],\n MAX_DASK_WORKERS=p['max_dask_workers'],\n SIZE_INDEX=p['size_index'],\n HUB_ROUTE=p['hub_route'],\n FIREFLY_ADMIN_PASSWORD=self.encode_value(\n 'firefly_admin_password'),\n FIREFLY_REPLICAS=p['firefly_replicas'],\n FIREFLY_CONTAINER_MEM_LIMIT=p[\n 'firefly_container_mem_limit'],\n FIREFLY_CONTAINER_CPU_LIMIT=p[\n 'firefly_container_cpu_limit'],\n FIREFLY_MAX_JVM_SIZE=p['firefly_max_jvm_size'],\n FIREFLY_UID=p['firefly_uid'],\n FIREFLY_ROUTE=p['firefly_route'],\n JS9_ROUTE=p['js9_route'],\n API_ROUTE=p['api_route'],\n TAP_ROUTE=p['tap_route'],\n SODA_ROUTE=p['soda_route'],\n EXTERNAL_INSTANCE_URL=p['external_instance_url'],\n EXTERNAL_FIREFLY_URL=p['external_firefly_url'],\n EXTERNAL_JS9_URL=p['external_js9_url'],\n EXTERNAL_API_URL=p['external_api_url'],\n EXTERNAL_TAP_URL=p['external_tap_url'],\n EXTERNAL_SODA_URL=p['external_soda_url'],\n EXTERNAL_URL=p['external_soda_url'],\n RESTRICT_DASK_NODES=p['restrict_dask_nodes'],\n RESTRICT_LAB_NODES=p['restrict_lab_nodes'],\n MAX_HTTP_HEADER_SIZE=p['max_http_header_size'],\n DB_IDENTIFIER='{{DB_IDENTIFIER}}',\n NFS_SERVER_IP_ADDRESS='{{NFS_SERVER_IP_ADDRESS}}',\n )", "title": "" }, { "docid": "6c20f2bb9733fea4788200595a686f03", "score": "0.4263419", "text": "def tree(input_file, output_file):\n\n\ttree=\"muscle -maketree -in \"+input_file+\" -out \"+output_file+ \\\n\t\t\" -cluster neighborjoining \"+ \" 2>/dev/null \"\n\tos.system(tree)", "title": "" }, { "docid": "f3eae7d3f04468a6936ad7e5af7cbbdf", "score": "0.4260095", "text": "def Initialise_plan():\n rp = rospkg.RosPack()\n pkg_path = rp.get_path('eyantra_task')\n arg_file_path = pkg_path + '/config/ur5_2_trajectories/'\n\n saved_plans_to_homes = { 'zero': {'home1': 'zero_to_home1.yaml' , \n 'home3':'zero_to_home3.yaml'},\n 'red' : {'home1': 'red_to_home1.yaml' , \n 'home2': 'red_to_home2.yaml' , \n 'home3' : 'red_to_home3.yaml'} ,\n 'yellow': {'home1': 'yellow_to_home1.yaml' , \n 'home2': 'yellow_to_home2.yaml' , \n 'home3' : 'yellow_to_home3.yaml'},\n 'green': { 'home1': 'green_to_home1.yaml' , \n 'home2': 'green_to_home2.yaml' , \n 'home3' : 'green_to_home3.yaml'}}\n\n saved_homes_to_bin = { 'home1' : { 'red' : 'home1_to_red.yaml' , \n 'yellow': 'home1_to_yellow.yaml' } ,\n 'home2' : { 'red' : 'home2_to_red.yaml' , \n 'yellow': 'home2_to_yellow.yaml' } ,\n 'home3' : { 'green' : 'home3_to_green.yaml' } }\n\n\n loaded_home = { 'zero': { 'home1': ' ' , 'home3': ' '} , \n 'red' : { 'home1': ' ' , 'home2': ' ' , 'home3': ' ' } ,\n 'yellow' : { 'home1': ' ' , 'home2': ' ' , 'home3': ' ' } ,\n 'green': { 'home1': ' ' , 'home2': ' ' , 'home3': ' ' } }\n\n loaded_pkg = { 'home1': {'red' : ' ' , 'green': ' ' } , 'home2':{'red':' ' , 'green': ' '} , 'home3': { 'green':' '} }\n\n saved_plans = [ saved_plans_to_homes , saved_homes_to_bin ]\n loaded_plans = [ loaded_home , loaded_pkg ]\n\n for i in range(len(saved_plans)):\n temp_keys = list(saved_plans[i].keys())\n for keys in temp_keys:\n temp_keys2 = list(saved_plans[i][keys].keys())\n for keys2 in temp_keys2:\n file_name = saved_plans[i][keys][keys2]\n file_path = arg_file_path + file_name\n with open(file_path, 'r') as file_open:\n loaded_plan = yaml.load(file_open)\n loaded_plans[i][keys][keys2] = loaded_plan\n\n rospy.loginfo(\"Loaded the plans....\")\n return loaded_plans", "title": "" }, { "docid": "49162f00f04e935c6f5bf0caf8df634f", "score": "0.42598808", "text": "def generate(c):\n check_main_jsonnet_here()\n res = c.run(\"tk eval .\", hide=True)\n data = json.loads(res.stdout)\n for stack_name, files_cfg in data.items():\n for fname, content in files_cfg.items():\n path = DEPLOY_PATH / stack_name / fname\n path.parent.mkdir(parents=True, exist_ok=True)\n path.write_text(content, encoding=\"utf-8\")\n print(path)", "title": "" }, { "docid": "d8e15e15a5bafb64267fd149eb21c64d", "score": "0.42580685", "text": "def _generateScripts(simconfig, curcomp):\n from supercomputer import JobConfig\n from datetime import time\n from os.path import join\n from shutil import copy\n #Configure job\n jconfig = JobConfig()\n jconfig.nodes = simconfig.node_count\n jconfig.mpn = simconfig.mpn\n jconfig.omp = simconfig.omp_thds\n jconfig.label = simconfig.run_label\n jconfig.time = time(4,0,0) \n jconfig.exe = simconfig.exe\n jconfig.inputs = simconfig.inputs\n\n #Make strings\n outpath = join(curcomp.myconfig.projs_base, simconfig.project_label, 'Runs',\n simconfig.run_label, 'run')\n tpath = join(curcomp.myconfig.projs_base, simconfig.project_label, 'Templates',\n 'titan.run.template')\n\n #Generate job script\n curcomp.generateJobScript(outpath, jconfig, tpath)\n\n #Copy process script over\n proc_temp = join(curcomp.myconfig.projs_base, simconfig.project_label, 'Templates',\n 'process.titan.template')\n proc_dst = join(curcomp.myconfig.projs_base, simconfig.project_label, 'Runs',\n simconfig.run_label, 'run', 'process.titan')\n copy(proc_temp, proc_dst)", "title": "" }, { "docid": "b3c8eee520b6fd06d2613bbe4cf74624", "score": "0.42566642", "text": "def convert_from_backup(backup_file, output_file=None):\n if not os.path.exists(backup_file):\n raise ValueError(\"%s does not exists.\" % backup_file)\n\n tree = ET.parse(backup_file)\n root = tree.getroot()\n\n script_output = _generate_query(root)\n imports_str = _generate_imports(import_list)\n boilerplate_start_str = \"\"\"#!/usr/bin/env python\n\nfrom ucsmsdk.ucshandle import UcsHandle\n\nhandle = UcsHandle(ip=\"\", username=\"\", password=\"\")\nhandle.login()\n\n\"\"\"\n\n boilerplate_end_str = \"\"\"handle.logout()\"\"\"\n\n final_str = boilerplate_start_str + imports_str + script_output + boilerplate_end_str\n if output_file:\n _outfile = open(output_file, 'w')\n _outfile.write(final_str)\n _outfile.close()\n else:\n print(final_str)", "title": "" }, { "docid": "24cd93a75cd645c67a51996d385bd54f", "score": "0.42557374", "text": "def main():\n parser = argparse.ArgumentParser(usage=\"python generate_scene_desc.py --pb=<path> --tmpl=<scene-template>\")\n parser.add_argument('--tmpl', type=str, help='Scene template json file.')\n parser.add_argument('--pb', type=str, help='Path to the protobuf file.')\n parser.add_argument('--out', type=str, required=True, help='Output directory')\n parser.add_argument('--data-root', type=str, default='./', help='Data root.')\n\n args = parser.parse_args()\n print(args)\n\n protobuf_path = os.path.expanduser(args.pb)\n data_root_path = os.path.expanduser(args.data_root)\n out_dir = os.path.expanduser(args.out)\n\n if not os.path.exists(out_dir):\n os.makedirs(out_dir)\n\n trajectories = sn.Trajectories()\n try:\n with open(protobuf_path, 'rb') as f:\n trajectories.ParseFromString(f.read())\n except IOError:\n print('Scenenet protobuf data not found at location:{0}'.format(data_root_path))\n\n print('Number of trajectories:{0}'.format(len(trajectories.trajectories)))\n\n for traj_no, traj in enumerate(trajectories.trajectories):\n layout_type = sn.SceneLayout.LayoutType.Name(traj.layout.layout_type)\n layout_path = traj.layout.model\n\n print('=' * 20)\n print('Render path:{0}'.format(traj.render_path))\n print('Layout type:{0} path:{1}'.format(layout_type, layout_path))\n print('=' * 20)\n obj_filename = 'trajectory_{}.obj'.format(traj_no)\n print(obj_filename)\n scene_desc = {\"file-format\": {\"type\": \"diffrend\", \"version\": \"0.2\"},\n \"glsl\": {\"vertex\": \"../shaders/phong/vs.glsl\", \"fragment\": \"../shaders/phong/fs.glsl\"},\n \"camera\": {\n \"proj_type\": \"perspective\",\n \"viewport\": [0, 0, 640, 480],\n \"fovy\": 1.04,\n \"focal_length\": 1.0,\n \"eye\": [3.5, 2.0, 3.5, 1.0],\n \"up\": [0.0, 1.0, 0.0, 0.0],\n \"at\": [0.0, 1.0, 0.0, 1.0],\n \"near\": 0.1, \"far\": 1000.0\n },\n \"lights\": {\"pos\": [],\n \"color_idx\": [],\n \"attenuation\": [],\n \"ambient\": [0.01, 0.01, 0.01]\n },\n \"colors\": [],\n \"materials\": {\n \"albedo\": [[0.0, 0.0, 0.0], [0.1, 0.1, 0.1], [0.2, 0.2, 0.2], [0.5, 0.5, 0.5],\n [0.8, 0.8, 0.8], [0.44, 0.55, 0.64]],\n \"coeffs\": [[1.0, 0.0, 0.0], [1.0, 0.0, 0.0], [1.0, 0.0, 0.0], [1.0, 0.0, 0.0],\n [1.0, 0.0, 0.0], [1.0, 0.0, 0.0]]\n },\n \"objects\": {\"obj\": [{\"path\": \"../objs/scenenetrgbd/\" + obj_filename, \"material_idx\": 4,\n \"scale\": [1.0, 1.0, 1.0]\n }\n ]\n },\n \"tonemap\": {\"type\": \"gamma\", \"gamma\": [1.0]}\n }\n for instance in traj.instances:\n if instance.instance_type == sn.Instance.LIGHT_OBJECT:\n light = instance.light_info\n light_type = sn.LightInfo.LightType.Name(light.light_type)\n light_intensity = [light.light_output.r, light.light_output.g, light.light_output.b]\n light_pos = [light.position.x, light.position.y, light.position.z]\n print('=' * 20)\n print('Light type:{0}'.format(light_type))\n print(light)\n print('emission: ', light_intensity)\n print('pos: ', light_pos)\n scene_desc[\"colors\"].append(light_intensity)\n scene_desc['lights'][\"color_idx\"].append(len(scene_desc[\"colors\"]) - 1)\n scene_desc['lights'][\"pos\"].append(light_pos)\n scene_desc['lights'][\"attenuation\"].append([0.0, 0.0, 0.4])\n\n print(scene_desc)\n outfilename = 'scenenet_{:04}.json'.format(traj_no)\n print(outfilename)\n with open(os.path.join(out_dir, outfilename), 'w') as fid:\n json.dump(scene_desc, fid)", "title": "" }, { "docid": "b2bad0ed2603a1d075b46b3e09430b00", "score": "0.42413738", "text": "def write_tcl(options, A, B, pairs, tag=\"\", center=False):\n AA = deepcopy(A)\n BB = deepcopy(B)\n if pairs != None and len(pairs) > 0:\n for p in pairs:\n ia = p[0]\n ib = p[1]\n AA[ia].pos = p[3]\n BB[ib].pos = p[4]\n\n with open(\"POSCAR_A.%s\" % tag, \"w\") as f: pcwrite.poscar(AA, f, vasp5=True)\n with open(\"POSCAR_B.%s\" % tag, \"w\") as f: pcwrite.poscar(BB, f, vasp5=True)\n write_xyz(options, AA, \"A.%s\" % tag,options.output_tiles)\n write_xyz(options, BB, \"B.%s\" % tag,options.output_tiles)\n\n fout = file(\"plotpairs.%s.tcl\" % tag, \"w\")\n write_struct(fout, AA, \"A.%s.xyz\" % tag, 0, center)\n write_struct(fout, BB, \"B.%s.xyz\" %tag, 1, center)\n\n linestr = \"draw color green; graphics top line {%f %f %f} {%f %f %f} width 3 style dashed\\n\"\n# sum = 0\n for p in pairs:\n print p\n ia = p[0]\n ib = p[1]\n p1 = AA.scale * AA[ia].pos\n p2 = BB.scale * BB[ib].pos\n## double check! print \"TCL types: %s %s\" % (A[ia].type, B[ib].type)\n# val = npl.norm(np.array(p1)-np.array(p2))\n# sum += val\n# print \"write_tcl pairing: \", p1, p2, val\n fout.write(linestr % (p1[0], p1[1], p1[2], p2[0], p2[1], p2[2]))\n# print \"write_tcl total pairing dist: \", sum\n\n fout.close()", "title": "" }, { "docid": "61d95e842da2c0d243564c422879ccf9", "score": "0.4233163", "text": "def create_topology(nc, num):\n rolls = ['Input', 'Output']\n for i in range(num):\n NEAT.GeneticOperation.add_hidden(nc, rolls[np.random.randint(0, 2)])", "title": "" }, { "docid": "187e0437b755b557a9ee96c680de76da", "score": "0.42321697", "text": "def generate_test_topology():\n config = FBOSSTestTopology(\"rsw1aj.20.snc1\")\n\n \"\"\" This example has two testing hosts \"\"\"\n f154 = TestHost(\"fboss154.20.snc1\")\n f154.add_interface(\"eth0\", \"2401:db00:111:400a::154\")\n config.add_host(f154)\n\n f155 = TestHost(\"fboss155.20.snc1\")\n f155.add_interface(\"eth0\", \"2401:db00:111:400a::155\")\n config.add_host(f155)\n return config", "title": "" }, { "docid": "cd1168a769b7395c168ad1580cb3e005", "score": "0.4226332", "text": "def build_tree(script):\n return ast.parse(script,mode=\"exec\")", "title": "" }, { "docid": "4c3b837db91a3e356985d1b8d94ed16d", "score": "0.4226303", "text": "def notify_topology_update(self):\n name = 'kytos/topology.updated'\n event = KytosEvent(name=name, content={'topology': self.topology})\n self.controller.buffers.app.put(event)", "title": "" }, { "docid": "2c4bdaac2dae6e2288734e103617b789", "score": "0.422377", "text": "def _build_network_definition_using_topology(self):\n log.debug(\"%r: _build_network_definition_using_topology: %s\",\n self._platform_id, self._topology)\n\n def build(platform_id, children):\n \"\"\"\n Returns the root NNode for the given platform_id with its\n children according to the given list.\n \"\"\"\n nnode = NNode(platform_id)\n if self._agent_device_map:\n self._set_attributes_and_ports_from_agent_device_map(nnode)\n\n log.debug('Created NNode for %r', platform_id)\n\n for subplatform_id in children:\n subplatform_children = self._topology.get(subplatform_id, [])\n sub_nnode = build(subplatform_id, subplatform_children)\n nnode.add_subplatform(sub_nnode)\n\n return nnode\n\n children = self._topology.get(self._platform_id, [])\n return build(self._platform_id, children)", "title": "" }, { "docid": "9e8f498922156d0d2c106d55511a4497", "score": "0.4215529", "text": "def do_topo_subgraph(self, args):\n\n # logger.info(\"Topo initialized with sec IDs %s\" % sec_ids)\n\n # delimiter of node in dot file\n delim_node = '_'\n\n args_cleaned = re.sub(r\"\\s+\", ' ', args).strip()\n # Show subgraphs if given no argments\n if (args_cleaned == ''):\n if len(self.spp_topo.subgraphs) == 0:\n print(\"No subgraph.\")\n else:\n for label, subg in self.spp_topo.subgraphs.items():\n print('label: %s\\tsubgraph: \"%s\"' % (label, subg))\n else: # add or del\n tokens = args_cleaned.split(' ')\n # Add subgraph\n if tokens[0] == 'add':\n if len(tokens) == 3:\n label = tokens[1]\n subg = tokens[2]\n if ',' in subg:\n subg = re.sub(r'%s' % delim_node, ':', subg)\n subg = re.sub(r\",\", \";\", subg)\n\n # TODO(yasufum) add validation for subgraph\n self.spp_topo.subgraphs[label] = subg\n print(\"Add subgraph '%s'\" % label)\n else:\n print(\"Invalid syntax '%s'!\" % args_cleaned)\n # Delete subgraph\n elif ((tokens[0] == 'del') or\n (tokens[0] == 'delete') or\n (tokens[0] == 'remove')):\n del(self.spp_topo.subgraphs[tokens[1]])\n print(\"Delete subgraph '%s'\" % tokens[1])\n\n else:\n print(\"Ivalid subcommand '%s'!\" % tokens[0])", "title": "" }, { "docid": "d4d7ab3893642a4825fc02c383858933", "score": "0.4205685", "text": "def get_topology(self):\n return json.dumps(self.topology.to_json())", "title": "" }, { "docid": "b139a4ba887e5b907890a7a41d3efc90", "score": "0.4204748", "text": "def main():\n args = prepare_argparser(allowed_formats)\n inp_format, inp_loc = filter_dict(\"input\", args.__dict__)\n out_format, out_loc = filter_dict(\"output\", args.__dict__)\n output_nodes = args.output_nodes.split(',')\n convert(inp_format, inp_loc, out_format, out_loc, output_nodes,\n args.ngbackend)\n print('Converted the model. Exiting now')", "title": "" }, { "docid": "5ff53d6dca043bfefcbf28508ebded68", "score": "0.42006487", "text": "def create_vm(self, name, conn, template):\n xml = \"\"\n tree = ElementTree.parse(template)\n root = tree.getroot()\n tree.find('.//name').text = name\n xml = ElementTree.tostring(root, encoding='utf8', method='xml')\n vm = conn.defineXML(xml)\n return vm", "title": "" }, { "docid": "71eb8d4147fea5ad60498aab0fff6c5c", "score": "0.4198723", "text": "def generate(self, namespace):", "title": "" }, { "docid": "c875bd57eca71458b3ba5dcaa68f9c42", "score": "0.4195698", "text": "def create_root(parser: ConfigParser):\n slsoptions = parser[\"sls\"]\n\n sls_source = slsoptions[\"sls_source\"]\n mount_point = Path(slsoptions[\"mount_point\"])\n disk_path = slsoptions[\"disk_path\"]\n tarfile = slsoptions[\"tarfile\"]\n newfs = str(Path(sls_source, \"tools\", \"newfs_sls\", \"newfs_sls\"))\n\n bashcmd([newfs, disk_path])\n bashcmd([\"mount\", \"-t\", \"slsfs\", disk_path, str(mount_point)])\n bashcmd([\"tar\", \"-xf\", tarfile, \"-C\", mount_point])\n bashcmd([\"mount\", \"-t\", \"devfs\", \"devfs\", str(mount_point / \"dev\")])", "title": "" }, { "docid": "0d4138f4f892dfe00691614705265d02", "score": "0.41942257", "text": "def j2template (self, template_file):\n expansion = {}\n for host in self.hosts():\n expansion[host] = ansible.utils.template.template_from_file (\n None,\n template_file,\n self.hostvars(host)\n )\n return expansion", "title": "" }, { "docid": "c9b4f1364a1f9d99f912b951ca1bfc8f", "score": "0.41930586", "text": "def CASE102( self, main ):\n main.HA.startTopology( main )", "title": "" }, { "docid": "9975a53c433dd8b35f917e1896375275", "score": "0.4192809", "text": "def __init__(self,itpname,groname):\n Topology.__init__(self)\n fid = open(itpname)\n topology = fid.readlines()\n fid.close()\n fid = open(groname)\n grofile = fid.readlines()\n grodata = grofile[2:(len(grofile)-1)]\n box = grofile[len(grofile)-1].split()\n self.box = [float(box[0]),float(box[1]),float(box[2])]\n fid.close()\n flag = 'notes'\n for line in topology:\n if line.strip() == '[ moleculetype ]':\n flag = 'moltype'\n continue\n if line.strip() == '[ atoms ]':\n flag = 'atoms'\n continue\n if line.strip() == '[ bonds ]':\n flag = 'bonds'\n continue\n if line.strip() == '[ constraints ]':\n flag = 'cons'\n continue\n if line.strip() == '[ angles ]':\n flag = 'angles'\n continue\n if line.strip() == '[ dihedrals ]':\n flag = 'dihs'\n continue\n if flag == 'notes':\n self.title += line\n else:\n if line[0] == ';' or line == '\\n' or line[0] == '#':\n continue\n else:\n spline = line.split()\n if flag == 'moltype':\n self.moltype[0] = spline[0]\n self.moltype[1] = int(spline[1])\n elif flag == 'atoms':\n resNo = int(spline[2])\n resname = spline[3]\n beadname = spline[4]\n beadno = int(spline[0])\n beadtype = spline[1]\n gline = grodata[beadno-1]\n spgline = gline.split()\n pos = np.array([float(spgline[3]),float(spgline[4]),\n float(spgline[5])])\n vel = np.array([0.,0.,0.])\n A = Bead(resNo,resname,beadname,beadno,pos,vel,\n beadtype)\n self.atomlist.append(A)\n elif flag == 'bonds':\n try:\n (beads,params,notes) = self.__bangle__(spline,2)\n except:\n pdb.set_trace()\n B = Bond(beads,params,notes)\n self.bondlist.append(B)\n elif flag == 'cons':\n (beads,params,notes) = self.__bangle__(spline,2)\n C = Bond(beads,params,notes)\n self.conlist.append(C)\n elif flag == 'angles':\n (beads,params,notes) = self.__bangle__(spline,3)\n A = Angle(beads,params,notes)\n self.anglist.append(A)\n elif flag == 'dihs':\n (beads,params,notes) = self.__bangle__(spline,4)\n D = Dihedral(beads,params,notes)\n self.dihlist.append(D)", "title": "" }, { "docid": "61a9f042ff6c709c4f5d9380a9ec8f6a", "score": "0.4184373", "text": "def transform_object(obj_name, t=None, r=None, s=None):\n t = \"\" if t is None else (\"T=(%f %f %f)\" % (t[0], t[1], t[2]))\n r = \"\" if r is None else (\"R=(%f %f %f)\" % (r[0], r[1], r[2]))\n s = \"\" if s is None else (\"S=(%f %f %f)\" % (s[0], s[1], s[2]))\n msg = (\"TransformObject {name} {t} {r} {s}\"\n .format(name=obj_name, t=t, r=r, s=s))\n connection.send_message(msg)", "title": "" }, { "docid": "e9ef855f1ce211e92cd665286f68e7b3", "score": "0.41660273", "text": "def main():\n args = prepare_argparser(allowed_formats)\n inp_format, inp_loc = filter_dict(\"input\", args.__dict__)\n out_format, out_loc = filter_dict(\"output\", args.__dict__)\n output_nodes = args.output_nodes.split(',')\n extra_params = parse_extra_params_string(args.extra_params)\n convert(inp_format, inp_loc, out_format, out_loc, output_nodes,\n args.ng_backend, args.device_id, extra_params)\n print('Converted the model. Exiting now')", "title": "" }, { "docid": "c12e7aa9a130cfe410569818f50af000", "score": "0.41633734", "text": "def t2_mapping(filename):\n\n scan_key = filename.split('-')[0]\n mkdir(scan_key)\n command = 'mv {}-* {};' \\\n 'cd {};' \\\n 'fslsplit {}-5T2s echo -t;' \\\n ''.format(scan_key, scan_key, scan_key, scan_key)\n for i in range(1,5):\n command += f'fslmaths echo{i:04} -add 0 echo{(i-1):04} -odt float;'\n command += 'gunzip echo0*;' \\\n 't2_map_mask_float 4 40 60 80 100;' \\\n 'gzip chi2.nii mask.nii t2vol.nii pdw.nii;' \\\n 'bet echo0000 brain -S;' \\\n 'fslmaths brain_mask -fillh26 brain_mask -odt char;' \\\n 'fslmaths t2vol -mas brain_mask -abs t2vol1;' \\\n 'fslmaths t2vol1 -thr 250 -bin mask250;' \\\n 'fslmaths brain_mask -sub mask250 -ero chi2mask -odt char;' \\\n 'rm echo0* brain.nii.gz brain_skull.nii.gz mask250.nii.gz t2vol1.nii.gz;' \\\n 'mv chi2.nii.gz ../../QA/chi2/{}-chi2.nii.gz;' \\\n 'mv mask.nii.gz ../../t2s/masks/{}-headmask.nii.gz;' \\\n 'mv t2vol.nii.gz ../../t2s/raw/{}-rawT2.nii.gz;' \\\n 'mv brain_mask.nii.gz ../../t2s/masks/{}-brainmask.nii.gz;' \\\n 'mv chi2mask.nii.gz ../../QA/chi2/{}-chi2mask.nii.gz;' \\\n 'mv pdw.nii.gz ../../pdw/raw/{}-rawPDw.nii.gz;' \\\n 'fslmaths ../../pdw/raw/{}-rawPDw -mas ../../t2s/masks/{}-headmask ../../pdw/{}-pdw;' \\\n 'fslmaths ../../t2s/raw/{}-rawT2 -mas ../../t2s/masks/{}-headmask -thr 0 ../../t2s/{}-t2;' \\\n 'mv *-5T2s.nii.gz ..;' \\\n 'cd ..;' \\\n 'rmdir {}' \\\n ''.format(scan_key, scan_key, scan_key, scan_key, scan_key,\n scan_key, scan_key, scan_key, scan_key, scan_key,\n scan_key, scan_key, scan_key)\n\n result = run(command, stdout=PIPE, stderr=STDOUT, check=True, shell=True)\n if result.stdout.decode('utf-8') != '':\n with open(f'{scan_key}.log', 'wb') as logfile:\n logfile.write(result.stdout)\n return", "title": "" }, { "docid": "eda91de728bf59c213eaef2fd349e3d3", "score": "0.4162229", "text": "def save_topology(name):\n if not request.is_json:\n return json.dumps('{\"error\": \"gt was not a JSON request\"}'), 400\n topology = request.get_json()\n with open(join(settings.TOPOLOGY_DIR, name + '.json'), 'w') as outfile:\n json.dump(topology, outfile)\n return json.dumps({'response': 'Saved'}), 201", "title": "" }, { "docid": "0ab9b4eee4d0582845eaacf04541b042", "score": "0.41589352", "text": "def write_bd_tcl(tclFileName, force=False):\n params = []\n if force:\n params.append('-force')\n return \"write_bd_tcl %s %s\" % (' '.join(params), tclFileName)", "title": "" }, { "docid": "00e4e791123a8163d4e227767c7283d2", "score": "0.41488856", "text": "def setrun(claw_pkg='geoclaw'):\n#------------------------------\n\n from clawpack.clawutil import data\n\n assert claw_pkg.lower() == 'geoclaw', \"Expected claw_pkg = 'geoclaw'\"\n\n num_dim = 2\n\n rundata = data.ClawRunData(claw_pkg, num_dim)\n\n topofile = 'topos/TetonLarge.topo'\n\n #------------------------------------------------------------------\n # GeoClaw specific parameters:\n #------------------------------------------------------------------\n rundata = setgeo(rundata)\n\n #------------------------------------------------------------------\n # Standard Clawpack parameters to be written to claw.data:\n # (or to amr2ez.data for AMR)\n #------------------------------------------------------------------\n clawdata = rundata.clawdata # initialized when rundata instantiated\n\n\n # Set single grid parameters first.\n # See below for AMR parameters.\n\n\n # ---------------\n # Spatial domain:\n # ---------------\n\n # Number of space dimensions:\n clawdata.num_dim = num_dim\n\n m_topo,n_topo,xllcorner,yllcorner,cellsize = tools.read_topo_data(topofile)\n\n\n # Topo info (TetonDamLatLong.topo)\n # m_topo = 4180\n # n_topo = 1464\n # xllcorner = -112.390734400000\n # yllcorner = 43.581746970335\n # cellsize = 0.000277729665\n\n # Derived info from the topo map\n mx_topo = m_topo - 1\n my_topo = n_topo - 1\n xurcorner = xllcorner + cellsize*mx_topo\n yurcorner = yllcorner + cellsize*my_topo\n ll_topo = np.array([xllcorner, yllcorner])\n ur_topo = np.array([xurcorner, yurcorner])\n\n print(\"\")\n print(\"Topo domain\")\n print(\"%-12s (%14.8f, %12.8f)\" % (\"Lower left\",ll_topo[0],ll_topo[1]))\n print(\"%-12s (%14.8f, %12.8f)\" % (\"Upper right\",ur_topo[0],ur_topo[1]))\n print(\"\")\n\n dims_topo = ur_topo - ll_topo\n\n # Try to match aspect ratio of topo map\n clawdata.num_cells[0] = 54\n clawdata.num_cells[1] = 54\n\n dim_topo = ur_topo - ll_topo\n mdpt_topo = ll_topo + 0.5*dim_topo\n\n dim_comp = 0.975*dim_topo # Shrink domain inside of given bathymetry.\n\n clawdata.lower[0] = mdpt_topo[0] - dim_comp[0]/2.0\n clawdata.upper[0] = mdpt_topo[0] + dim_comp[0]/2.0\n\n clawdata.lower[1] = mdpt_topo[1] - dim_comp[1]/2.0\n clawdata.upper[1] = mdpt_topo[1] + dim_comp[1]/2.0\n\n print(\"\")\n print(\"Computational domain\")\n print(\"%-12s (%14.8f, %12.8f)\" % (\"Lower left\",clawdata.lower[0],clawdata.lower[1]))\n print(\"%-12s (%14.8f, %12.8f)\" % (\"Upper right\",clawdata.upper[0],clawdata.upper[1]))\n print(\"\")\n\n print(\"Approximate aspect ratio : {0:16.8f}\".format(float(clawdata.num_cells[0])/clawdata.num_cells[1]))\n print(\"Actual aspect ratio : {0:16.8f}\".format(dims_topo[0]/dims_topo[1]))\n\n # print \"[{0:20.12f},{1:20.12f}]\".format(*clawdata.lower)\n # print \"[{0:20.12f},{1:20.12f}]\".format(*clawdata.upper)\n\n dims_computed = np.array([clawdata.upper[0]-clawdata.lower[0], clawdata.upper[1]-clawdata.lower[1]])\n print(\"Computed aspect ratio : {0:20.12f}\".format(dims_computed[0]/dims_computed[1]))\n\n print(\"\")\n print(\"Details in km : \") \n\n lon = np.array([clawdata.lower[0],clawdata.upper[0]])\n lat = np.array([clawdata.lower[1],clawdata.upper[1]])\n d = tools.compute_distances(lon,lat)\n \n # ---------------\n # Size of system:\n # ---------------\n\n # Number of equations in the system:\n clawdata.num_eqn = 3\n\n # Number of auxiliary variables in the aux array (initialized in setaux)\n clawdata.num_aux = 3\n\n # Index of aux array corresponding to capacity function, if there is one:\n clawdata.capa_index = 2\n\n # -------------\n # Initial time:\n # -------------\n\n clawdata.t0 = 0.0\n\n\n # Restart from checkpoint file of a previous run?\n # Note: If restarting, you must also change the Makefile to set:\n # RESTART = True\n # If restarting, t0 above should be from original run, and the\n # restart_file 'fort.chkNNNNN' specified below should be in\n # the OUTDIR indicated in Makefile.\n\n clawdata.restart = False # True to restart from prior results\n clawdata.restart_file = 'fort.chk00006' # File to use for restart data\n\n # -------------\n # Output times:\n #--------------\n\n # Specify at what times the results should be written to fort.q files.\n # Note that the time integration stops after the final output time.\n # The solution at initial time t0 is always written in addition.\n\n clawdata.output_style = 1\n\n if clawdata.output_style == 1:\n # Output nout frames at equally spaced times up to tfinal:\n n_hours = 2.0\n frames_per_minute = 60.0/5.0 # Frames every 5 seconds\n clawdata.num_output_times = int(frames_per_minute*60*n_hours) # Plot every 10 seconds\n clawdata.tfinal = 60*60*n_hours\n clawdata.output_t0 = True # output at initial (or restart) time?\n\n elif clawdata.output_style == 2:\n # Specify a list of output times.\n clawdata.output_times = [0.5, 1.0]\n\n elif clawdata.output_style == 3:\n # Output every iout timesteps with a total of ntot time steps:\n clawdata.output_step_interval = 1\n clawdata.total_steps = 1\n clawdata.output_t0 = True\n\n\n clawdata.output_format = 'ascii' # 'ascii' or 'netcdf'\n\n clawdata.output_q_components = 'all' # could be list such as [True,True]\n clawdata.output_aux_components = 'none' # could be list\n clawdata.output_aux_onlyonce = True # output aux arrays only at t0\n\n\n\n # ---------------------------------------------------\n # Verbosity of messages to screen during integration:\n # ---------------------------------------------------\n\n # The current t, dt, and cfl will be printed every time step\n # at AMR levels <= verbosity. Set verbosity = 0 for no printing.\n # (E.g. verbosity == 2 means print only on levels 1 and 2.)\n clawdata.verbosity = 1\n\n\n\n # --------------\n # Time stepping:\n # --------------\n\n # if dt_variable==1: variable time steps used based on cfl_desired,\n # if dt_variable==0: fixed time steps dt = dt_initial will always be used.\n clawdata.dt_variable = True\n\n # Initial time step for variable dt.\n # If dt_variable==0 then dt=dt_initial for all steps:\n clawdata.dt_initial = 0.0001\n\n # Max time step to be allowed if variable dt used:\n clawdata.dt_max = 1e+99\n\n # Desired Courant number if variable dt used, and max to allow without\n # retaking step with a smaller dt:\n clawdata.cfl_desired = 0.75\n clawdata.cfl_max = 1.0\n\n # Maximum number of time steps to allow between output times:\n clawdata.steps_max = 5000\n\n\n\n\n # ------------------\n # Method to be used:\n # ------------------\n\n # Order of accuracy: 1 => Godunov, 2 => Lax-Wendroff plus limiters\n clawdata.order = 2\n\n # Use dimensional splitting? (not yet available for AMR)\n clawdata.dimensional_split = 'unsplit'\n\n # For unsplit method, transverse_waves can be\n # 0 or 'none' ==> donor cell (only normal solver used)\n # 1 or 'increment' ==> corner transport of waves\n # 2 or 'all' ==> corner transport of 2nd order corrections too\n clawdata.transverse_waves = 2\n\n # Number of waves in the Riemann solution:\n clawdata.num_waves = 3\n\n # List of limiters to use for each wave family:\n # Required: len(limiter) == num_waves\n # Some options:\n # 0 or 'none' ==> no limiter (Lax-Wendroff)\n # 1 or 'minmod' ==> minmod\n # 2 or 'superbee' ==> superbee\n # 3 or 'mc' ==> MC limiter\n # 4 or 'vanleer' ==> van Leer\n clawdata.limiter = ['mc', 'mc', 'mc']\n\n clawdata.use_fwaves = True # True ==> use f-wave version of algorithms\n\n # Source terms splitting:\n # src_split == 0 or 'none' ==> no source term (src routine never called)\n # src_split == 1 or 'godunov' ==> Godunov (1st order) splitting used,\n # src_split == 2 or 'strang' ==> Strang (2nd order) splitting used, not recommended.\n clawdata.source_split = 'godunov'\n\n\n # --------------------\n # Boundary conditions:\n # --------------------\n\n # Number of ghost cells (usually 2)\n clawdata.num_ghost = 2\n\n # Choice of BCs at xlower and xupper:\n # 0 => user specified (must modify bcN.f to use this option)\n # 1 => extrapolation (non-reflecting outflow)\n # 2 => periodic (must specify this at both boundaries)\n # 3 => solid wall for systems where q(2) is normal velocity\n\n clawdata.bc_lower[0] = 'extrap'\n clawdata.bc_upper[0] = 'extrap'\n\n clawdata.bc_lower[1] = 'extrap'\n clawdata.bc_upper[1] = 'extrap'\n\n # Specify when checkpoint files should be created that can be\n # used to restart a computation.\n\n clawdata.checkpt_style = 0\n\n if clawdata.checkpt_style == 0:\n # Do not checkpoint at all\n pass\n\n elif clawdata.checkpt_style == 1:\n # Checkpoint only at tfinal.\n pass\n\n elif clawdata.checkpt_style == 2:\n # Specify a list of checkpoint times.\n clawdata.checkpt_times = [0.1,0.15]\n\n elif clawdata.checkpt_style == 3:\n # Checkpoint every checkpt_interval timesteps (on Level 1)\n # and at the final time.\n clawdata.checkpt_interval = 5\n\n\n # -----------------------------------------------\n # AMR parameters:\n # -----------------------------------------------\n amrdata = rundata.amrdata\n\n maxlevel = 7\n\n amrdata.amr_levels_max = maxlevel # Set to 3 for best results\n amrdata.refinement_ratios_x = [2]*7\n amrdata.refinement_ratios_y = [2]*7\n amrdata.refinement_ratios_t = [2]*7\n # rundata.tol = -1\n # rundata.tolsp = 0.001\n\n # Specify type of each aux variable in amrdata.auxtype.\n # This must be a list of length maux, each element of which is one of:\n # 'center', 'capacity', 'xleft', or 'yleft' (see documentation).\n\n amrdata.aux_type = ['center','capacity','yleft']\n\n\n # Flag using refinement routine flag2refine rather than richardson error\n amrdata.flag_richardson = False # use Richardson?\n amrdata.flag2refine = True\n amrdata.regrid_interval = 3\n amrdata.regrid_buffer_width = 2\n amrdata.clustering_cutoff = 0.700000\n amrdata.verbosity_regrid = 0\n\n # -----------------------------------------------\n # INL Regions\n # Regions to be refined :\n # (1) Refine initial reservoir to level 4\n # (otherwise, we won't resolve valley, and\n # won't fill the reservoir properly)\n # (2) Refine around nuclear power plant (indicated by gauge\n # 100, 101, ..., 115, below)\n # (3) Computational domain, with maxlevel=4\n #\n # To specify regions of refinement append lines of the form\n # regions.append([minlevel,maxlevel,t1,t2,x1,x2,y1,y2])\n\n # -----------------------------------------------\n regions = rundata.regiondata.regions\n\n # Region containing initial reservoir\n regions.append([maxlevel,maxlevel, 0, 1.e10,-111.543,-111.24,43.88, 43.965])\n\n # Box containing gauge location locations\n\n# xll = [-111.64, 43.913661] # From email\n# xur = [-111.60, 43.92] # from email\n# region_lower, region_upper,_ = tools.region_coords(xll,xur,\n# clawdata.num_cells,\n# clawdata.lower,\n# clawdata.upper)\n#\n# regions.append([maxlevel,maxlevel,0, 1e10, region_lower[0],region_upper[0],\n# region_lower[1],region_upper[1]])\n\n # Computational domain. With exception of region above, don't go beyond level 4\n regions.append([0,maxlevel-1,0, 1e10, clawdata.lower[0],clawdata.upper[0],\n clawdata.lower[1],clawdata.upper[1]])\n\n # -------------------------------------------------------\n # INL Gauges\n # -- Set gauges at Teton City and Wilford\n # -- Remaining gauges build border around power plant\n #\n # For gauges append lines of the form [gaugeno, x, y, t1, t2]\n # -------------------------------------------------------\n\n # Wilford\n xc,yc = [-111.672222,43.914444]\n rundata.gaugedata.gauges.append([1,xc,yc,0.,clawdata.tfinal]) # Wilford\n\n # Teton City\n xc,yc = [-111.669167,43.887778]\n rundata.gaugedata.gauges.append([2,xc,yc,0.,clawdata.tfinal]) # Teton City\n\n # Power plant, with border constructed of 4*m gauges\n # Start at SW corner; build gauges in counter-clockwise order in a\n # square around the region [xll,xur].\n\n# m = 2 # Gauge spacing along one edge (m=4 --> edge divided into four sections)\n# gauge_counter = 100\n#\n# # South West corner of power plant\n# xll = [-111.623926, 43.913661] # From email\n#\n# # North East corner of power plant\n# xur = [-111.620150, 43.916382] # from email\n#\n# s = np.linspace(0,1.,m+1)\n# for i in range(0,m):\n# x = xll[0] + (xur[0] - xll[0])*s[i]\n# rundata.gaugedata.gauges.append([gauge_counter,x,xll[1],0.,clawdata.tfinal])\n# gauge_counter = gauge_counter + 1\n#\n# for i in range(0,m):\n# y = xll[1] + (xur[1] - xll[1])*s[i]\n# rundata.gaugedata.gauges.append([gauge_counter,xur[0],y,0.,clawdata.tfinal])\n# gauge_counter = gauge_counter + 1\n#\n# for i in range(0,m):\n# x = xur[0] + (xll[0] - xur[0])*s[i]\n# rundata.gaugedata.gauges.append([gauge_counter,x,xur[1],0.,clawdata.tfinal])\n# gauge_counter = gauge_counter + 1\n#\n# for i in range(0,m):\n# y = xur[1] + (xll[1] - xur[1])*s[i]\n# rundata.gaugedata.gauges.append([gauge_counter,xll[0],y,0.,clawdata.tfinal])\n# gauge_counter = gauge_counter + 1\n\n\n # -------------------------------------------------------\n # For developers\n # -- Toggle debugging print statements:\n # -------------------------------------------------------\n amrdata.dprint = False # print domain flags\n amrdata.eprint = False # print err est flags\n amrdata.edebug = False # even more err est flags\n amrdata.gprint = False # grid bisection/clustering\n amrdata.nprint = False # proper nesting output\n amrdata.pprint = False # proj. of tagged points\n amrdata.rprint = False # print regridding summary\n amrdata.sprint = False # space/memory output\n amrdata.tprint = True # time step reporting each level\n amrdata.uprint = False # update/upbnd reporting\n\n\n return rundata\n # end of function setrun\n # ----------------------", "title": "" }, { "docid": "0522bf7c5e5b77e0cef1cfb67b4a936a", "score": "0.4146013", "text": "def script(self):\n text = []\n\n # Load waveforms\n if self.waveforms:\n par_string = \" \".join([f\"constants.{wfm.name}={wfm.name}\" for wfm in self.waveforms])\n text.append(\n f\".exec\\n\"\n f\"load {self.wfm_file}\\n\"\n f\"let constants.tm=tm {par_string}\\n\"\n f\".endc\"\n )\n\n # Declare all subcircuits\n for name,subckt in self.subcircuits.items():\n text.append(subckt.script())\n # Declare models\n for name, model in self.models.items():\n line = [\".model \", name, ' ',model[\"type\"],'(']\n pams = []\n for k,v in model[\"params\"].items():\n pams.append(\"%s=%s\" %(str(k),str(v)))\n line.append(','.join(pams))\n line.append(')')\n text.append(''.join(line))\n # Declare components\n for k,comp in self.components.items():\n text.append(comp.script())\n # Extra scripts\n if len(self._extrascript)>0:\n text.append(self._extrascript)\n return \"\\n\".join(text)", "title": "" }, { "docid": "5c5dea2a65f2331c88844a437b30ccfb", "score": "0.4136075", "text": "def tasks_merge_cluster_and_release(self):\n self.show_step(1) # Revert snapshot\n self.env.revert_snapshot('pre_provision_ubuntu_slaves_3')\n graph_type = 'custom_graph'\n\n self.show_step(2) # Create cluster\n cluster_id = self.fuel_web.create_cluster(name=self.__class__.__name__)\n rel_id = self.fuel_web.get_cluster_release_id(cluster_id)\n self.move_ubuntu_target_image(rel_id, cluster_id)\n\n self.show_step(3) # Upload 'custom_graph' tasks to release\n rel_tasks_filename = 'release_custom_tasks.yaml'\n local_tasks_file = os.path.join(os.path.dirname(fuelweb_test.__file__),\n 'config_templates',\n rel_tasks_filename)\n with open(local_tasks_file, 'r') as yaml_file:\n release_tasks_yaml_data = yaml.load(yaml_file)\n upload_tasks_path = '/tmp/{}'.format(rel_tasks_filename)\n\n self.ssh_manager.upload_to_remote(\n ip=self.ssh_manager.admin_ip,\n source=local_tasks_file,\n target=upload_tasks_path)\n upload_tasks_cmd = 'fuel2 graph upload -e {cluster_id} -t ' \\\n '{graph_type} -f {path}'.format(\n cluster_id=cluster_id,\n graph_type=graph_type,\n path=upload_tasks_path\n )\n self.ssh_manager.execute_on_remote(\n ip=self.ssh_manager.admin_ip,\n cmd=upload_tasks_cmd)\n\n self.show_step(4) # Upload 'custom_graph' tasks to cluster\n c_tasks_filename = 'custom_graph_tasks.yaml'\n local_tasks_file = os.path.join(os.path.dirname(fuelweb_test.__file__),\n 'config_templates',\n c_tasks_filename)\n with open(local_tasks_file, 'r') as yaml_file:\n cluster_tasks_yaml_data = yaml.load(yaml_file)\n upload_tasks_path = '/tmp/{}'.format(rel_tasks_filename)\n\n self.ssh_manager.upload_to_remote(\n ip=self.ssh_manager.admin_ip,\n source=local_tasks_file,\n target=upload_tasks_path)\n upload_tasks_cmd = 'fuel2 graph upload -e {cluster_id} -t ' \\\n '{graph_type} -f {path}'.format(\n cluster_id=cluster_id,\n graph_type=graph_type,\n path=upload_tasks_path\n )\n self.ssh_manager.execute_on_remote(\n ip=self.ssh_manager.admin_ip,\n cmd=upload_tasks_cmd)\n\n self.show_step(5) # Download 'custom_graph' deployment graph\n custom_tasks = \\\n self.fuel_web.client.get_custom_cluster_deployment_tasks(\n cluster_id,\n graph_type)\n\n self.show_step(6) # 'custom_graph' is a merge of release and cluster.\n generated_names = set([t['task_name'] for t in custom_tasks])\n uploaded_names = set(\n [t['id'] for t in release_tasks_yaml_data] +\n [t['id'] for t in cluster_tasks_yaml_data])\n diff = generated_names - uploaded_names\n assert_equal(diff, set([]), 'Tasks are not result of merge!')\n\n self.show_step(7) # Create snapshot 'tasks_diff'\n self.env.make_snapshot('tasks_diff')", "title": "" }, { "docid": "bc5b467462eca59fc358e848492095e1", "score": "0.41351083", "text": "def config_composer(self, model, hostname, junos_on_box_version):\n if (\"SRX3\" in model) or (\"vsrx\" in model):\n template_filename = \"SRX_template.j2\"\n network_parameter_filename = \"SRX_networkParameters.yaml\"\n elif \"EX\" in model:\n template_filename = \"EX_template.j2\"\n network_parameter_filename = \"EX_networkParameters.yaml\"\n complete_path = os.path.join(os.getcwd(), 'Config')\n ENV = jinja2.Environment(loader=jinja2.FileSystemLoader(complete_path))\n template = ENV.get_template(template_filename)\n with open(complete_path + \"/\" + network_parameter_filename) as yamlfile:\n dict = yaml.load(yamlfile) # yaml file is loaded as a dictionary with key value pairs\n addition = {\"hostname\": hostname, \"version\": junos_on_box_version}\n dict.update(addition)\n # print dict\n content = template.render(dict)\n # print content\n target = open(\"Config_History/\" + hostname + \".set\", 'w')\n target.write(content)\n target.close()", "title": "" }, { "docid": "6ce304f3414997de3316712e68a7af5d", "score": "0.41311955", "text": "def main():\n\n #Example usage:\n #rosrun tf2seds.py -b <path_to_tf_bag_files> -o <path_to_seds_bagfile>\n\n\n # rospy gets first crack at sys.argv\n rospy.myargv(argv=sys.argv)\n (options,args) = getopt.getopt(sys.argv[1:], 'b:o:s:t:', ['bags=','ouptut=','source=','target='])\n\n rospy.init_node(\"tf2seds\")\n\n # should be in tf2seds namespace\n source_frameid = rospy.get_param(\"/tf2seds/source_frameid\",\"torso_lift_link\")\n target_frameid = rospy.get_param(\"/tf2seds/target_frameid\",\"r_gripper_tool_frame\")\n path = rospy.get_param(\"/tf2seds/source_directory\", None)\n outfile = rospy.get_param(\"/tf2seds/outputfile\", None)\n\n # or specify any of these options on the command line\n for o,a in options:\n if o in ('-b','--bags'):\n path = a\n elif o in ('-o','--output'):\n outfile = a\n elif o in ('-s','--source'):\n source_frameid = a\n elif o in ('-t','--target'):\n target_frameid = a\n\n if not path or not outfile:\n raise Exception, \"You must specify bagfile input and output locations.\"\n\n bagfiles = glob.glob(os.path.normpath(path + \"/*.bag\"))\n bagfiles.sort() # given the default naming these should be sorted by time\n\n rospy.loginfo(\"dir: %s out: %s sf: %s tf: %s\", path, outfile, source_frameid, target_frameid)\n\n\n process_bags(outfile, bagfiles, source_frameid,target_frameid)", "title": "" }, { "docid": "c524b2440cf453469e52ff6b59e4c599", "score": "0.41297665", "text": "def generate_launch_description():\n\n tensorrt_yolo3_dir = get_package_share_directory('tensorrt_yolo3')\n # Arguments\n prototxt_file = DeclareLaunchArgument(\n 'prototxt_file',\n default_value=os.path.join(tensorrt_yolo3_dir, 'data/yolov3_416_trt.prototxt'),\n description='prototxt file for tensorrt yolo3'\n )\n\n caffemodel_file = DeclareLaunchArgument(\n 'caffemodel_file',\n default_value=os.path.join(tensorrt_yolo3_dir, 'data/yolov3_416.caffemodel'),\n description='caffemodel file for tensorrt yolo3'\n )\n\n input_topic = DeclareLaunchArgument(\n 'input_topic',\n default_value='/image_raw',\n description='input topic name'\n )\n\n output_topic = DeclareLaunchArgument(\n 'output_topic',\n default_value='rois',\n description='output topic name'\n )\n\n # Nodes\n tensorrt_yolo3 = Node(\n package='tensorrt_yolo3',\n node_executable='tensorrt_yolo3_node_exe',\n node_name='tensorrt_yolo3_node',\n node_namespace='',\n output='screen',\n parameters=[{'prototxt_file': LaunchConfiguration('prototxt_file'),\n 'caffemodel_file': LaunchConfiguration('caffemodel_file'), }],\n remappings=[\n (\"in_image_base_topic\", LaunchConfiguration('input_topic')),\n (\"out_image_base_topic\", [LaunchConfiguration('output_topic'), '/debug/image']),\n (\"out_objects_topic\", LaunchConfiguration('output_topic'))\n ]\n )\n\n return LaunchDescription([\n prototxt_file,\n caffemodel_file,\n input_topic,\n output_topic,\n tensorrt_yolo3\n ])", "title": "" }, { "docid": "e1d99c5fbd13bad88498da44b3a83fd1", "score": "0.41294044", "text": "def populate_from_topology(graph):\n topology = Topology()\n topology.hosts = graph.find(is_a=Host)\n # TODO(cs): broken: can't set attribute\n topology.switches = graph.find(is_a=SoftwareSwitch)\n topology.access_links = [AccessLink(host, switch[0], switch[1][0], switch[1][1])\n for host in topology.hosts\n for switch in filter(\n lambda n: isinstance(n[1][0], SoftwareSwitch),\n graph.ports_for_node(host).iteritems())]\n return topology", "title": "" }, { "docid": "fd9b69f1582e19e6edcf5af4cdf2a3f9", "score": "0.41284353", "text": "def main(args):\n\n optparser = argparse.ArgumentParser(description=\"Generate OWL from Comments in an ICD-11 file\")\n optparser.add_argument('owlfile', help=\"Input OWL file\")\n optparser.add_argument('-f', '--fullydefined', help=\"Definitions are fully defined\", action=\"store_true\")\n optparser.add_argument('-p', '--port', help=\"SCT Converter gateway port\", type=int)\n optparser.add_argument('-o', '--out', help=\"Output file\", required=True)\n optparser.add_argument('-s', '--shorturi', help=\"Shorten URI's for readability\", action=\"store_true\")\n optparser.add_argument('-r', '--removesctid', help=\"Remove the SCT class declarations\", action=\"store_true\")\n\n opts = optparser.parse_args(args)\n\n gw = SCTConverterGateway(opts.port) if opts.port else SCTConverterGateway()\n\n g = Graph()\n target_graph = add_namespaces(Graph())\n g.parse(opts.owlfile)\n target_graph.parse(opts.owlfile)\n\n # Iterate over the comments with the compositional expressions\n for subj, desc in list(g.subject_objects(icdf_comments)):\n if cgre1.search(desc):\n cgexpr = cgre1.sub(r'\\2', desc)\n elif cgre2.search(desc):\n cgexpr = cgre2.sub(r'\\2', desc)\n else:\n cgexpr = None\n\n if cgexpr:\n if not parse_and_load(gw, subj, not bool(opts.fullydefined), cgexpr, target_graph):\n print(\"Conversion error on %s (%s)\" % (subj, cgexpr), file=sys.stderr)\n else:\n print(\"No conversion available for %s (%s)\" % (subj, desc), file=sys.stderr)\n target = serialize_graph(target_graph, removesctid=opts.removesctid, shorturi=opts.shorturi)\n open(opts.out, 'w').write(target)", "title": "" }, { "docid": "fb387bdc0789d7bb01742e074fd4ce4d", "score": "0.41276404", "text": "def convert_stp_files_to_neutronics_geometry():\n\n os.system(\"trelis -batch -nographics make_faceteted_neutronics_model.py\")\n\n os.system(\"make_watertight dagmc_notwatertight.h5m -o dagmc.h5m\")", "title": "" }, { "docid": "584509ba00369834101cb5dc6af0c7df", "score": "0.41266462", "text": "def create_script(self):\n\n # script prep, headers and variable assignments\n self.align_script = self.script_init()\n self.align_script += self.script_set_vars()\n\n # do some actual work\n self.align_script += self.script_results_dir()\n self.align_script += self.script_copy_data()\n if self.uvars.align_centers == 'yes':\n self.align_script += self.script_align_centers()\n self.align_script += self.script_align_datasets()\n\n # add commands ...\n\n if len(self.errors) > 0: return # if any errors so far, give up\n\n return", "title": "" }, { "docid": "6d82916bdbda8b06a443feddd5c899bf", "score": "0.41259828", "text": "def model_to_python(pipeline: Pipeline) -> str:\n imports = []\n for name, step in pipeline.steps:\n imports.append(f\"from {step.__module__} import {step.__class__.__name__}\")\n\n script = (\"from sklearn.pipeline import Pipeline\\n\" +\n '\\n'.join(imports) + '\\n\\n' +\n 'pipeline = ' + str(pipeline) + '\\n')\n\n return script", "title": "" }, { "docid": "d6af484b5dc21648ba50fa9fc7cb4df6", "score": "0.41259146", "text": "def create_exec_stmt(self, script):\n return 'exec \"{0}\"'.format(script)", "title": "" }, { "docid": "4bba512a0742004032fcd93b1c514d82", "score": "0.41226125", "text": "def main(ymlfile):\n\n with ymlfile as f:\n cfg = yaml.load(f, Loader)\n\n kwargs = {}\n cfg['params']['config_file'] = ymlfile.name\n kwargs['params'] = cfg['params']\n root = cfg['params']['data_dir']\n kwargs['data'] = complete_paths(cfg['data'], root)\n kwargs['velocity'] = complete_vel_paths(cfg['velocity'], root)\n kwargs['layers'] = complete_map_paths(cfg['map_layers'], root)\n kwargs['potfields'] = complete_pot_paths(cfg['potfields'], root)\n\n tc = TransectContainer(**kwargs)\n\n tc.plot()", "title": "" }, { "docid": "faab9fa27f38a3099e6625863955b81b", "score": "0.41208908", "text": "def make_dbt_task(self, node_name, dbt_verb):\n\n model_name = node_name.split('.')[-1]\n if dbt_verb == 'test':\n node_name = node_name.replace('model', 'test') # Just a cosmetic renaming of the task\n task_group = self.dbt_test_group\n else:\n task_group = self.dbt_run_group\n\n dbt_task = BashOperator(\n task_id=node_name,\n task_group=task_group,\n bash_command=f'\\\ndbt {self.dbt_global_cli_flags} {dbt_verb} --target {self.dbt_target} --models {model_name} \\\n--profiles-dir {self.dbt_profiles_dir} --project-dir {self.dbt_project_dir}\\\n ',\n dag=self.dag,\n )\n # Keeping the log output, it's convenient to see when testing the python code outside of Airflow\n logging.info('Created task: %s', node_name)\n return dbt_task", "title": "" }, { "docid": "70386385f58597a916719d311b96c081", "score": "0.41149753", "text": "def template_to_binary(self, template):\n\t\tleft = Chem.MolFromSmarts(template.split('>>')[0]).ToBinary()\n\t\tright = Chem.MolFromSmarts(template.split('>>')[1]).ToBinary()\n\t\treturn b'>>'.join([left, right])", "title": "" }, { "docid": "223150bc4d587d947c9d3ce6229401c7", "score": "0.4113279", "text": "def create_tcl_for_talb(base_dir):\n\n binding_residues = ['179', '226', '224', '130', '152', '174', '15', '33', '240', '30', '93']\n base_dir_ligand = ''.join([base_dir, '/TalB/3_MD_post_dock2016/'])\n file_names_list = [''.join([base_dir_ligand, '1_APO/1_S7P_linear/cl101/0_complex_prep/TALB_WT_APO-103_receptor_S7P_linear_docked.pdb']),\n ''.join([base_dir_ligand, '1_APO/1_S7P_linear/cl101_2/0_complex_prep/TALB_WT_APO-515_receptor_S7P_docked.pdb']),\n ''.join([base_dir_ligand, '1_APO/1_S7P_linear/cl102/0_complex_prep/TALB_WT_APO-091_receptor_S7P_linear_docked.pdb']),\n ''.join([base_dir_ligand, '1_APO/1_S7P_linear/cl102_2/0_complex_prep/TALB_WT_APO-535_receptor_S7P_docked.pdb']),\n ''.join([base_dir_ligand, '1_APO/1_S7P_linear/cl212/0_complex_prep/TALB_WT_APO-036_receptor_S7P_linear_docked.pdb']),\n ''.join([base_dir_ligand, '1_APO/1_S7P_linear/cl212_2/0_complex_prep/TALB_WT_APO-777_receptor_S7P_docked.pdb']),\n ''.join([base_dir_ligand, '2_HALO_S7P_remS7P/1_S7P_linear/cl101/0_complex_prep/TALB_WT_HALO_S7P_remS7P-237_receptor_S7P_linear_docked.pdb']),\n ''.join([base_dir_ligand, '2_HALO_S7P_remS7P/1_S7P_linear/cl101_2/0_complex_prep/TALB_WT_HALO_S7P_remS7P-260_receptor_S7P_docked.pdb']),\n ''.join([base_dir_ligand, '2_HALO_S7P_remS7P/1_S7P_linear/cl302/0_complex_prep/TALB_WT_HALO_S7P_remS7P-091_receptor_S7P_linear_docked.pdb']),\n ''.join([base_dir_ligand, '2_HALO_S7P_remS7P/1_S7P_linear/cl302_2/0_complex_prep/TALB_WT_HALO_S7P_remS7P-492_receptor_S7P_docked.pdb']),\n ''.join([base_dir_ligand, '2_HALO_S7P_remS7P/1_S7P_linear/cl322/0_complex_prep/TALB_WT_HALO_S7P_remS7P-042_receptor_S7P_linear_docked.pdb']),\n ''.join([base_dir_ligand, '2_HALO_S7P_remS7P/1_S7P_linear/cl322_2/0_complex_prep/TALB_WT_HALO_S7P_remS7P-184_receptor_S7P_docked.pdb']),\n ''.join([base_dir, '/TalB/0_Crystalline_structures/1UCW_wt_S7P_watbox.pdb'])]\n\n file_out = ''.join([base_dir, '/TalB/3_MD_post_dock2016/load_s7p_frames.tcl'])\n ligand_list = ['S7P']\n create_vmd_file(file_names_list, file_out, ligand_list, binding_residues)\n\n\n base_dir_ligand = ''.join([base_dir, '/TalB/3_MD_post_dock2016/'])\n file_names_list = [''.join([base_dir_ligand, '1_APO/2_F6P/cl202/0_complex_prep/TALB_WT_APO-533_receptor_F6P_docked.pdb']),\n ''.join([base_dir_ligand, '1_APO/2_F6P/cl202_2/0_complex_prep/TALB_WT_APO-314_receptor_F6P_docked.pdb']),\n ''.join([base_dir_ligand, '1_APO/2_F6P/cl212/0_complex_prep/TALB_WT_APO-139_receptor_F6P_docked.pdb']),\n ''.join([base_dir_ligand, '1_APO/2_F6P/cl212_2/0_complex_prep/TALB_WT_APO-464_receptor_F6P_docked.pdb']),\n ''.join([base_dir_ligand, '1_APO/2_F6P/cl321/0_complex_prep/TALB_WT_APO-799_receptor_F6P_docked.pdb']),\n ''.join([base_dir_ligand, '1_APO/2_F6P/cl321_2/0_complex_prep/TALB_WT_APO-003_receptor_F6P_docked.pdb']),\n ''.join([base_dir_ligand, '2_HALO_S7P_remS7P/2_F6P/cl101/0_complex_prep/TALB_WT_HALO_S7P_remS7P-492_receptor_F6P_docked.pdb']),\n ''.join([base_dir_ligand, '2_HALO_S7P_remS7P/2_F6P/cl101_2/0_complex_prep/TALB_WT_HALO_S7P_remS7P-056_receptor_F6P_docked.pdb']),\n ''.join([base_dir_ligand, '2_HALO_S7P_remS7P/2_F6P/cl201/0_complex_prep/TALB_WT_HALO_S7P_remS7P-381_receptor_F6P_docked.pdb']),\n ''.join([base_dir_ligand, '2_HALO_S7P_remS7P/2_F6P/cl201_2/0_complex_prep/TALB_WT_HALO_S7P_remS7P-117_receptor_F6P_docked.pdb']),\n ''.join([base_dir_ligand, '2_HALO_S7P_remS7P/2_F6P/cl311/0_complex_prep/TALB_WT_HALO_S7P_remS7P-014_receptor_F6P_docked.pdb']),\n ''.join([base_dir_ligand, '2_HALO_S7P_remS7P/2_F6P/cl311_2/0_complex_prep/TALB_WT_HALO_S7P_remS7P-222_receptor_F6P_docked.pdb']),\n ''.join([base_dir, '/TalB/0_Crystalline_structures/1UCW_wt_S7P_watbox.pdb'])]\n\n\n file_out = ''.join([base_dir, '/TalB/3_MD_post_dock2016/load_f6p_frames.tcl'])\n ligand_list = ['F6P']\n create_vmd_file(file_names_list, file_out, ligand_list, binding_residues)", "title": "" }, { "docid": "54f3c8d20c155c9fb4f0a77e33a5fe8d", "score": "0.4107464", "text": "def topology(self, name):\n if request.method == 'POST':\n return self.save_topology(name)\n return self.get_topology(name)", "title": "" } ]
4cca531af85e252640b38a122ef3ef97
Creates a new archive_file (hdf5) with groups for gpt and distgen. Calls .archive method of GPT and Distgen objects, into these groups.
[ { "docid": "5cf03ca819bee0c064fc670222362c11", "score": "0.76831263", "text": "def archive_gpt_with_distgen(gpt_object,\n distgen_object,\n archive_file=None,\n gpt_group ='gpt',\n distgen_group ='distgen'):\n \n h5 = File(archive_file, 'w')\n \n #fingerprint = tools.fingerprint(astra_object.input.update(distgen.input))\n \n g = h5.create_group(distgen_group)\n distgen_object.archive(g)\n \n g = h5.create_group(gpt_group)\n gpt_object.archive(g)\n \n h5.close()", "title": "" } ]
[ { "docid": "095f84ec623087ed69bf071f02b382e4", "score": "0.604668", "text": "def write_hdf5(out_file, data):\n gmu_proc_file = h5py.File(out_file, 'w')\n print(gmu_proc_file)\n for g in data:\n group = gmu_proc_file.create_group(g)\n for datum in data[g]:\n try:\n group.create_dataset(\"mel_spects_{}\".format(datum[0]),\n data=datum[1])\n except Exception as e:\n print(group.name, datum[0], e)\n\n gmu_proc_file.close()\n exit()", "title": "" }, { "docid": "11d07c00908b6787f7b49c996368fbaf", "score": "0.58894116", "text": "def make_fasta_for_groups(dict_groups, dict_fasta, path_name):\n if os.path.isdir(path_name):\n shutil.rmtree(path_name)\n os.makedirs(path_name)\n for group_name, group in dict_groups.iteritems():\n records = [dict_fasta[member] for member in group.members]\n write_fasta(path_name + group_name, records)", "title": "" }, { "docid": "363821f753ef4460b9f520da6011eed7", "score": "0.58364344", "text": "def save_groups(fname, gtable):\n np.save(fname, table_to_group_list(gtable))", "title": "" }, { "docid": "e809cff4c7d178bfc464bd4130b4d1c4", "score": "0.5739024", "text": "def write_groups(out_file, groupname):\r\n print(\"To create a single group please just enter the main group name i.e. Group Name\")\r\n print('To create a subgroup to an exisitng group, please enter /Group Name/Subgroup Name/etc/etc/')\r\n print() \r\n attributes = {}\r\n print(\"Enter attributes for\", groupname)\r\n meta = input(\"Is there a metadata file? (Y/N): \")\r\n if meta == \"Y\" or meta == \"y\":\r\n metapath = input(\"Enter metadata file path: \")\r\n with open(metapath, 'r') as metafile:\r\n for line in metafile:\r\n line = line.split('\\t')\r\n item = line[0].strip('\\n')\r\n value = line[-1].strip('\\n')\r\n if item in attributes.keys():\r\n attributes[item].append(value)\r\n else:\r\n attributes[item] = [value]\r\n else:\r\n input_attributes = input(\"Enter an attribute followed by a value. i.e. Project Name: iknowit, Date: 04-11-2019: \")\r\n for attribute in input_attributes.split(','):\r\n attribute = attribute.split(':')\r\n attributes[attribute[0].strip(' ')] = attribute[1].strip(' ')\r\n data_file = h5py.File(out_file, 'a')\r\n dset = data_file.create_group(groupname)\r\n for k, v in attributes.items():\r\n dset.attrs[k] = v", "title": "" }, { "docid": "0d90e8720ae0c70ea2b513271db63034", "score": "0.57272446", "text": "def write_func(in_files, out_file, groups):\r\n data_file = h5py.File(out_file, 'a')\r\n image_extensions = ['jpg', 'jpeg', 'png', 'bmp', 'tiff']\r\n count = 0\r\n try:\r\n for in_file in in_files:\r\n if in_file.split('.')[-1] not in image_extensions:\r\n try:\r\n with open(in_file) as ocf:\r\n data = ocf.read()\r\n str_type = h5py.special_dtype(vlen=str)\r\n dset = data_file.create_dataset(\r\n groups[count] + in_file.split('/')[-1],\r\n data=data, shape=(1,),\r\n dtype=str_type\r\n )\r\n attributes = generate_attributes_to_add(\r\n groups[count] + in_file.split('/')[-1])\r\n for k, v in attributes.items():\r\n dset.attrs[k] = v\r\n except FileNotFoundError:\r\n print(in_file, \"not found\")\r\n else:\r\n dset = image_to_hdf5(in_file, data_file, groups[count])\r\n attributes = generate_attributes_to_add(\r\n groups[count] + in_file.split('/')[-1])\r\n for k, v in attributes.items():\r\n dset.attrs[k] = v\r\n if len(groups) == 1:\r\n count = 0\r\n else:\r\n count += 1\r\n except RuntimeError:\r\n pass", "title": "" }, { "docid": "d243c65ab03e1df39a736a3ea2592385", "score": "0.5663915", "text": "def to_hdf(self, hdf=None, group_name=None):\n super().to_hdf(hdf=hdf, group_name=group_name)\n self.input.to_hdf(hdf=self.project_hdf5)\n self.potential.to_hdf(hdf=self.project_hdf5)\n self.structures.to_hdf(hdf=self.project_hdf5)\n self.output.to_hdf(hdf=self.project_hdf5)", "title": "" }, { "docid": "5276f57c5b2208e31ba280da31513c61", "score": "0.5663336", "text": "def write_h5_stack(npy_vol, fn, group='stack', compression=None, chunks=None,\n shuffle=None, attrs=None):\n fn = os.path.expanduser(fn)\n fout = h5py.File(fn, 'a')\n if group in fout:\n del fout[group]\n fout.create_dataset(group, data=npy_vol, compression=compression,\n chunks=chunks, shuffle=shuffle)\n if attrs is not None:\n for attr, value in attrs.items():\n fout[group].attrs[attr] = value\n fout.close()", "title": "" }, { "docid": "e0966f0dca7fd6474c392e4b03bc4ab2", "score": "0.56151164", "text": "def export_set(output_dir, name, data, labels, classes):\n\n assert len(data) == len(labels)\n\n # Variable-length datatypes for encoded png streams and label names\n dt_int = h5py.vlen_dtype(np.dtype('uint8'))\n dt_str = h5py.string_dtype(encoding='utf-8')\n\n # Initialize hdf5 file pointer\n f = h5py.File(f\"{output_dir}/{name}_{len(data)}.h5\", \"w\")\n\n # Create group and store data/labels\n x = f.create_dataset(\"data\", (len(data),), dtype=dt_int, data=data)\n y = f.create_dataset(\"label\", data=np.array(labels, dtype=int))\n\n # Store <mapping from (0, 1 ...) to class names> as group attribute\n y.attrs.create(\"class_names\", data=np.array(classes, dtype=dt_str))\n\n f.close()", "title": "" }, { "docid": "84d667309fb7e68e1ed0fd66c315d755", "score": "0.55947095", "text": "def writing_to_output_file(mockgal_pd, mockgroup_pd, zz_mock, \n param_dict, proj_dict, output_fmt = 'hdf5', perf_catl=False):\n ## Keys\n gal_key = '/gal_catl'\n group_key = '/group_catl'\n ## Filenames\n if perf_catl:\n ## Perfect Galaxy catalogue\n gal_file = os.path.join(proj_dict['mock_cat_mc_perf'],\n '{0}_cat_{1}_{2}_memb_cat_perf.{3}'.format(\n param_dict['survey'], zz_mock, \n param_dict['cosmo_choice'], output_fmt))\n ## Perfect Group catalogue\n group_file = os.path.join(proj_dict['mock_cat_gc_perf'],\n '{0}_cat_{1}_{2}_group_cat_perf.{3}'.format(\n param_dict['survey'], zz_mock,\n param_dict['cosmo_choice'], output_fmt))\n else:\n ## Normal galaxy catalogue\n gal_file = os.path.join(proj_dict['mock_cat_mc'],\n '{0}_cat_{1}_{2}_memb_cat.{3}'.format(\n param_dict['survey'], zz_mock,\n param_dict['cosmo_choice'], output_fmt))\n ## Normal group catalogue\n group_file = os.path.join(proj_dict['mock_cat_gc'],\n '{0}_cat_{1}_{2}_group_cat.{3}'.format(\n param_dict['survey'], zz_mock,\n param_dict['cosmo_choice'], output_fmt))\n ##\n ## Saving DataFrames to files\n # Member catalogue\n cu.pandas_df_to_hdf5_file(mockgal_pd, gal_file, key=gal_key)\n # Group catalogue\n cu.pandas_df_to_hdf5_file(mockgroup_pd, group_file, key=group_key)\n ##\n ## Checking for file's existence\n cu.File_Exists(gal_file)\n cu.File_Exists(group_file)\n print('{0} gal_file : {1}'.format(param_dict['Prog_msg'], gal_file))\n print('{0} group_file: {1}'.format(param_dict['Prog_msg'], group_file))", "title": "" }, { "docid": "2c4d3780e809abec0808ba417109f9f6", "score": "0.55542684", "text": "def to_hdf(self, hdf, group_name=\"structure\"):\n # import time\n with hdf.open(group_name) as hdf_structure:\n # time_start = time.time()\n hdf_structure[\"TYPE\"] = str(type(self))\n for el in self.species:\n if isinstance(el.tags, dict):\n with hdf_structure.open(\"new_species\") as hdf_species:\n el.to_hdf(hdf_species)\n hdf_structure[\"species\"] = [el.Abbreviation for el in self.species]\n hdf_structure[\"indices\"] = self.indices\n\n with hdf_structure.open(\"tags\") as hdf_tags:\n for tag in self._tag_list.keys():\n tag_value = self._tag_list[tag]\n if isinstance(tag_value, SparseList):\n tag_value.to_hdf(hdf_tags, tag)\n hdf_structure[\"units\"] = self.units\n hdf_structure[\"dimension\"] = self.dimension\n\n if self.cell is not None:\n with hdf_structure.open(\"cell\") as hdf_cell:\n # Convert ASE cell object to numpy array before storing\n hdf_cell[\"cell\"] = np.array(self.cell)\n hdf_cell[\"pbc\"] = self.pbc\n\n # hdf_structure[\"coordinates\"] = self.positions # \"Atomic coordinates\"\n hdf_structure[\"positions\"] = self.positions # \"Atomic coordinates\"\n\n # potentials with explicit bonds (TIP3P, harmonic, etc.)\n if self.bonds is not None:\n hdf_structure[\"explicit_bonds\"] = self.bonds\n\n # print ('time in atoms.to_hdf: ', time.time() - time_start)\n\n if self._high_symmetry_points is not None:\n hdf_structure[\"high_symmetry_points\"] = self._high_symmetry_points\n\n if self._high_symmetry_path is not None:\n hdf_structure[\"high_symmetry_path\"] = self._high_symmetry_path\n\n hdf_structure[\"info\"] = self.info", "title": "" }, { "docid": "1177ebb729269e891d16e45e5ae0a0b3", "score": "0.55385166", "text": "def pack_waveforms_to_hdf5(args):\n\n # Arguments & parameters\n audios_dir = args.audios_dir\n csv_path = args.csv_path\n waveforms_hdf5_path = args.waveforms_hdf5_path\n mini_data = args.mini_data\n\n split_dir = args.split_dir\n split_to_test_id = args.split_to_test_id\n\n clip_samples = config.clip_samples\n classes_num = config.classes_num\n sample_rate = config.sample_rate\n id_to_ix = config.id_to_ix\n\n\n\n #id_dict = {'screaming':'/m/03qc9zr','crying_sobbing':'/m/0463cq4','chatter':'/m/07rkbfh','motor_vehicle_(road)':'/m/012f08','emergency_vehicle':'/m/03j1ly','siren':'/m/03kmc9','explosion':'/m/014zdl','gunshot_gunfire':'/m/032s66', 'breaking': '/m/07pc8lb'}\n\n\n\n\n sample_rate = config.sample_rate\n id_to_ix = config.id_to_ix\n\n\n\n id_dict = {'screaming':'/m/03qc9zr','crying_sobbing':'/m/0463cq4','chatter':'/m/07rkbfh','motor_vehicle_(road)':'/m/012f08','emergency_vehicle':'/m/03j1ly','siren':'/m/03kmc9','explosion':'/m/014zdl','gunshot_gunfire':'/m/032s66', 'breaking': '/m/07pc8lb'}\n\n\n\n\n # Paths\n if mini_data:\n prefix = 'mini_'\n waveforms_hdf5_path += '.mini'\n else:\n prefix = ''\n\n create_folder(os.path.dirname(waveforms_hdf5_path))\n\n logs_dir = '_logs/pack_waveforms_to_hdf5/{}{}'.format(prefix, get_filename(csv_path))\n create_folder(logs_dir)\n create_logging(logs_dir, filemode='w')\n logging.info('Write logs to {}'.format(logs_dir))\n\n # Read csv file\n split_file = open(split_dir+\"/fold_\"+str(split_to_test_id)+\".txt\",\"r\")\n #split_file = open(split_dir+\"/fold_filter_\"+str(split_to_test_id)+\".txt\",\"r\")\n lines = split_file.readlines()\n audio_names = []\n target_names = []\n for line in lines:\n audio_names.append(line.split(' ')[0]+'.wav')\n if line.split(' ')[2][0:-1]==\"others\":\n target_names.append(\"others\")\n else:\n target_names.append(id_dict[line.split(' ')[2][0:-1]])\n\n\n\n\n\n meta_dict = read_metadata(csv_path, classes_num, id_to_ix)\n\n '''if mini_data:\n mini_num = 10\n for key in meta_dict.keys():\n meta_dict[key] = meta_dict[key][0 : mini_num]'''\n\n #audios_num = len(meta_dict['audio_name'])\n audios_num = len(lines)\n\n\n\n # Pack waveform to hdf5\n total_time = time.time()\n\n with h5py.File(waveforms_hdf5_path, 'w') as hf:\n hf.create_dataset('audio_name', shape=((audios_num,)), dtype='S20')\n hf.create_dataset('waveform', shape=((audios_num, clip_samples)), dtype=np.int16)\n hf.create_dataset('target', shape=((audios_num, classes_num)), dtype=np.bool)\n hf.attrs.create('sample_rate', data=sample_rate, dtype=np.int32)\n\n targets = np.zeros((audios_num, classes_num), dtype=np.bool)\n\n # Pack waveform & target of several audio clips to a single hdf5 file\n for n in range(audios_num):\n audio_path = os.path.join(audios_dir, audio_names[n])\n\n if os.path.isfile(audio_path):\n logging.info('{} {}'.format(n, audio_path))\n (audio, _) = librosa.core.load(audio_path, sr=sample_rate, mono=True)\n audio = pad_or_truncate(audio, clip_samples)\n\n hf['audio_name'][n] = audio_names[n].encode()\n hf['waveform'][n] = float32_to_int16(audio)\n ix = id_to_ix[target_names[n]]\n targets[n, ix] = 1\n #print(targets[n])\n hf['target'][n] = targets[n]\n else:\n logging.info('{} File does not exist! {}'.format(n, audio_path))\n\n logging.info('Write to {}'.format(waveforms_hdf5_path))\n logging.info('Pack hdf5 time: {:.3f}'.format(time.time() - total_time))", "title": "" }, { "docid": "5d035cede7bd46397be40298c6d9fd68", "score": "0.55122924", "text": "def image_to_hdf5(filename, f, group):\r\n img = Image.open(filename)\r\n image = np.array(img.getdata()).reshape(img.size[1], img.size[0], -1)\r\n data = np.array(image)\r\n dset = f.create_dataset(group + filename.split('/')\r\n [-1], shape=(4, img.size[1], img.size[0], 4))\r\n dset[0] = data\r\n im = Image.fromarray(dset[0].astype('uint8'))\r\n im.save(filename)\r\n return dset", "title": "" }, { "docid": "5533f5f19d0b74ec6473df1e509a92bd", "score": "0.5512146", "text": "def createGroup(filename, group, subgroups = []):\n\n #deleteGroup(filename, group)\n FILE = h5py.File(filename, \"r+\")\n try:\n GROUP = FILE.create_group(group)\n print(\"[CREATE]: <{:s}> group created.\".format(group))\n\n for sg in subgroups:\n GROUP.create_group(sg)\n except:\n print(\"Group exists, so not creating a new one.\")\n pass\n\n try:\n FILE.close()\n except:\n pass", "title": "" }, { "docid": "0999e38982a0b581097a9cf4ea0ef139", "score": "0.55031323", "text": "def createArchiveGenTask(self):\n archiveTask = author.Task()\n archiveTask.title = 'Generate {} files'.format(self.archiveExt)\n archiveTask.service = self.service\n\n archiveTaskCmd = author.Command()\n archiveTaskCmd.argv = [os.path.join(self.houdiniBinPath, 'hbatch.exe').replace('\\\\', '/')]\n archiveTaskCmd.argv.append('-c')\n # hscriptCmd = 'tcur {}; render -w -i -V 1 -f {} {} {}; quit'.format(\n hscriptCmd = 'render -w -i -V 2 -f {} {} {}; quit'.format(\n self.start,\n self.end,\n self.outputDriverPath\n )\n archiveTaskCmd.argv.append(hscriptCmd)\n archiveTaskCmd.argv.append(self.snapshotScene)\n archiveTaskCmd.retryrc = range(0, 11) # WATCHME : exit codes that will restart the task\n archiveTask.addCommand(archiveTaskCmd)\n\n # Add archive files to be deleted by cleanup task\n archiveFiles = [\n self.archiveOutput.replace('$F4', str(frame).zfill(4))\n for frame in range(self.start, self.end + 1)\n ]\n\n self.toDelete.extend(archiveFiles)\n\n return archiveTask", "title": "" }, { "docid": "1ad02e77510cb35a15e0d2991413f733", "score": "0.546978", "text": "def pack_project_waveforms_to_hdf5(args):\n\n # Arguments & parameters\n audios_dir = args.audios_dir\n csv_path = args.csv_path\n waveforms_hdf5_path = args.waveforms_hdf5_path\n mini_data = args.mini_data\n\n split_dir = args.split_dir\n split_to_test_id = args.split_to_test_id\n\n clip_samples = config.clip_samples\n #classes_num = config.classes_num\n classes_num = 9\n sample_rate = config.sample_rate\n #id_to_ix = config.id_to_ix\n id_to_ix = {'screaming':0,'crying_sobbing':1,'chatter':2,'motor_vehicle_(road)':3,'emergency_vehicle':4,'siren':5,'explosion':6,'gunshot_gunfire':7, 'breaking':8 ,'others': 9}\n\n\n id_dict = {'screaming':'/m/03qc9zr','crying_sobbing':'/m/0463cq4','chatter':'/m/07rkbfh','motor_vehicle_(road)':'/m/012f08','emergency_vehicle':'/m/03j1ly','siren':'/m/03kmc9','explosion':'/m/014zdl','gunshot_gunfire':'/m/032s66', 'breaking': '/m/07pc8lb'} \n\n\n\n\n # Paths\n if mini_data:\n prefix = 'mini_'\n waveforms_hdf5_path += '.mini'\n else:\n prefix = ''\n\n create_folder(os.path.dirname(waveforms_hdf5_path))\n\n logs_dir = '_logs/pack_waveforms_to_hdf5/{}{}'.format(prefix, get_filename(csv_path))\n create_folder(logs_dir)\n create_logging(logs_dir, filemode='w')\n logging.info('Write logs to {}'.format(logs_dir))\n \n\t# Read csv file\n \n #split_file = open(split_dir+\"/fold_filter_\"+str(split_to_test_id)+\".txt\",\"r\")\n split_file = open(split_dir+\"/fold_\"+str(split_to_test_id)+\".txt\",\"r\")\n lines = split_file.readlines()\n audio_names = []\n target_names = []\n for line in lines:\n audio_names.append(line.split(' ')[0]+'.wav')\n target_names.append(id_to_ix[line.split(' ')[2][0:-1]])\n\n\n\n\n\n #meta_dict = read_metadata(csv_path, classes_num, id_to_ix)\n\n '''if mini_data:\n mini_num = 10\n for key in meta_dict.keys():\n meta_dict[key] = meta_dict[key][0 : mini_num]'''\n\n #audios_num = len(meta_dict['audio_name'])\n audios_num = len(lines)\n\n\n\n # Pack waveform to hdf5\n total_time = time.time()\n\n with h5py.File(waveforms_hdf5_path, 'w') as hf:\n hf.create_dataset('audio_name', shape=((audios_num,)), dtype='S20')\n hf.create_dataset('waveform', shape=((audios_num, clip_samples)), dtype=np.int16)\n hf.create_dataset('target', shape=((audios_num, classes_num)), dtype=np.bool)\n hf.attrs.create('sample_rate', data=sample_rate, dtype=np.int32)\n\n targets = np.zeros((audios_num, classes_num), dtype=np.bool)\n\n # Pack waveform & target of several audio clips to a single hdf5 file\n for n in range(audios_num):\n audio_path = os.path.join(audios_dir, audio_names[n])\n if os.path.isfile(audio_path):\n logging.info('{} {}'.format(n, audio_path))\n (audio, _) = librosa.core.load(audio_path, sr=sample_rate, mono=True)\n audio = pad_or_truncate(audio, clip_samples)\n\n hf['audio_name'][n] = audio_names[n].encode()\n hf['waveform'][n] = float32_to_int16(audio)\n \n ix = target_names[n]\n if ix < classes_num:\n targets[n, ix] = 1\n logging.info('{}'.format(targets[n]))\n hf['target'][n] = targets[n]\n else:\n logging.info('{} File does not exist! {}'.format(n, audio_path))\n\n logging.info('Write to {}'.format(waveforms_hdf5_path))\n logging.info('Pack hdf5 time: {:.3f}'.format(time.time() - total_time))", "title": "" }, { "docid": "3dd42d631e95e3cacfbc4d6c50940ab2", "score": "0.54462826", "text": "def save_metadata(self, hdf5: h5py.File, group_name: str) -> None:\n pass", "title": "" }, { "docid": "3ce0a608a1e3aa0296337311022976b1", "score": "0.5441013", "text": "def createFileGroups(self):\n self.dataset.buildListOfFiles()\n self.dataset_details['FileGroups'] = dict() \n for fileName in self.dataset.files: \n\n name = removeIndex(fileName) \n if name in self.dataset_details['FileGroups']: \n self.dataset_details['FileGroups'][name]['Files'].append(fileName) \n else: \n instanceGroup = {\"SizeInTB\":None, \n \"IsCrab\":None,\n \"Files\":None,\n \"FileEntries\":None,\n \"PrimaryDatasetFraction\":None,\n \"BadFiles\":None,\n \"NumberBadFiles\":None,\n \"MissingFiles\":None,\n \"NumberMissingFiles\":None,\n \"GoodFiles\":None,\n \"NumberGoodFiles\":None,\n \"TotalJobs\":None}\n instanceGroup['Files']=[]\n instanceGroup['Files'].append(fileName) \n isCrab = isCrabFile( fileName ) \n instanceGroup['IsCrab']=isCrab \n self.dataset_details['FileGroups'][name]=instanceGroup \n for fg in self.dataset_details['FileGroups']: \n self.dataset_details['FileGroups'][fg]['Files'].sort(key=lambda x: int(getIndex(x)))", "title": "" }, { "docid": "937ba09b1bcb1b00953fb95c26703f3b", "score": "0.5439032", "text": "def xr_save_by_grouping_along_dimension(ds,filepath, filename_prefix, grouping='chunk',parallel=True,dim='time'):\n \n return", "title": "" }, { "docid": "049219d45a71390f71f7457c993530d3", "score": "0.5436357", "text": "def to_h5(self, out_file, fields=None, group='/', replace=False, nocompression=False, attributes={}, fill_value=None, **kwargs):\n kwargs.setdefault('srs_proj4', None)\n kwargs.setdefault('srs_wkt', None)\n kwargs.setdefault('srs_epsg', None)\n kwargs.setdefault('grid_mapping_name', 'crs')\n # check whether overwriting existing files\n # append to existing files as default\n mode = 'w' if replace else 'a'\n\n if fields is None:\n fields=self.fields\n if group[0] != '/':\n group='/'+group\n\n # update fill values in fields\n if fill_value is not None:\n self.replace_invalid(fields=fields, fill_value=fill_value)\n\n # get crs attributes\n self.crs_attributes(**kwargs)\n with h5py.File(out_file,mode) as h5f:\n # try adding file level attributes\n try:\n for att_name,att_val in attributes['ROOT'].items():\n h5f.attrs[att_name] = att_val\n except Exception:\n pass\n\n # try creating the output group\n try:\n h5f.create_group(group)\n except Exception:\n pass\n\n # try adding group attributes\n try:\n for att_name,att_val in attributes[group].items():\n h5f[group].attrs[att_name] = att_val\n except Exception:\n pass\n\n for field in ['x','y','time', 't'] + fields:\n # if field exists, overwrite it\n if field in h5f[group]:\n if hasattr(self, field):\n h5f[group+'/'+field][...] = getattr(self, field)\n else:\n #Otherwise, try to create the dataset\n try:\n if nocompression or field in ['x','y','time']:\n h5f.create_dataset(group+'/'+field, data=getattr(self, field))\n else:\n\n h5f.create_dataset(group+'/'+field, data=getattr(self, field),\n chunks=True, compression=\"gzip\", fillvalue=self.fill_value)\n except Exception:\n pass\n # try adding field attributes\n try:\n for att_name,att_val in attributes[field].items():\n h5f[group+'/'+field].attrs[att_name] = att_val\n except Exception:\n pass\n # add crs attributes if applicable\n if self.crs:\n # add grid mapping attribute to each grid field\n for field in fields:\n h5f[group+'/'+field].attrs['grid_mapping'] = kwargs['grid_mapping_name']\n # add grid mapping variable with projection attributes\n h5crs = h5f.create_dataset(kwargs['grid_mapping_name'], (), dtype=np.byte)\n for att_name,att_val in self.crs.items():\n h5crs.attrs[att_name] = att_val", "title": "" }, { "docid": "9335eba7a943c66b3289267357718608", "score": "0.54333574", "text": "def write_by_file(fpath, *objs_list, **kwargs):\n group_size = kwargs.pop('group_size', len(objs_list))\n mode = 'wb'\n if kwargs.get('gzip', True):\n ofs = gzip.open(fpath, mode)\n else:\n ofs = open(fpath, mode)\n with stream.open(fileobj=ofs, mode=mode, buffer_size=group_size,\n **kwargs) as ostream:\n ostream.write(*objs_list)\n ofs.close()", "title": "" }, { "docid": "c0d5008a7deb52ba1830263c9061b685", "score": "0.5423882", "text": "def produce_bam_custom(kmers_trie, name, label, guides_filename, args,\n offdist, maxoffcount, processes, n, parts):\n guidesfiles = []\n # parts = 256\n tempdir = '%s%s' % (name,'/classifiedfiles/tempfiles')\n\n util.print_log('produce SAM files...')\n samfiles = ['%s/%s.sam' % (tempdir, i) for i in range(parts)]\n # samfiles = [tempfile.NamedTemporaryFile(dir=name, suffix='.sam%s' % i)\n # for i in xrange(parts)]\n # util.print_log('store SAM in these files (gzipped): %s'\n # % (', '.join([basename(f.name) for f in samfiles])))\n\n \n if isinstance(guides_filename, str):\n\n util.print_log('split %s in %s parts...' % (guides_filename, parts))\n guidesfiles = [tempfile.NamedTemporaryFile(dir=name,\n suffix='.guides%s' % i)\n for i in range(parts)]\n util.print_log('store guides in these files: %s'\n % (', '.join([basename(f.name) for f in guidesfiles])))\n guidesfile = gzip.open(guides_filename) \\\n if guides_filename.endswith('.gz') \\\n else open(guides_filename)\n index_num = 0\n guidecount = 0\n for line in guidesfile:\n kmer1 = line.split()[0][0:n]\n index_num = guides.get_num(kmer1, n)\n guidesfiles[index_num].write(line)\n \n guidecount += 1\n \n guidesfile.close()\n for f in guidesfiles:\n f.flush()\n util.print_log('%s guideRNAs to process' % guidecount)\n util.print_log('done')\n\n process_list = []\n all_task = Queue()\n for i in range(parts):\n task = (guides_filename[i].name, samfiles[i].name, i)\n all_task.put(task)\n\n for i in range(processes):\n p = Process(target=process_pool, args=(all_task, kmers_trie, args, offdist, maxoffcount, i, n, parts))\n p.start()\n process_list.append(p)\n\n for p in process_list:\n p.join()\n \n for i in range(parts):\n guidesfiles[i].close()\n\n else: \n process_list = []\n all_task = Queue()\n for i in range(parts):\n task = (guides_filename[i], samfiles[i], i)\n all_task.put(task)\n\n for i in range(processes):\n p = Process(target=process_pool, args=(all_task, kmers_trie, args, offdist, maxoffcount, i, n, parts))\n p.start()\n process_list.append(p)\n\n for p in process_list:\n p.join()\n\n util.print_log('produce sorted BAM files...')\n \n bamfiles = ['%s/%s.bam' % (tempdir, i) for i in range(parts)]\n # bamfiles = [tempfile.NamedTemporaryFile(dir=name, suffix='.bam%s' % i)\n # for i in xrange(parts)]\n # util.print_log('store BAM in these files: %s'\n # % (', '.join([basename(f.name) for f in bamfiles])))\n\n pool = Pool(processes)\n util.print_log('poolSize %s...' % processes)\n index=False\n for i in range(parts):\n pool.apply_async(sam_to_bam,(samfiles[i], bamfiles[i], index,))\n util.print_log('Waiting for all subprocesses done...')\n pool.close()\n pool.join()\n\n # for i in xrange(parts):\n # samfiles[i].close()\n util.print_log('merge into one BAM file...')\n bamfile = '%s/%s_guides%s.bam' % (name, name,\n '_%s' % label if label else '')\n util.print_log('store in %s' % bamfile)\n util.warn_file_exists(bamfile)\n if parts > 1000:\n mid = parts // 2\n bamfiles_temp = [tempfile.NamedTemporaryFile(dir=name, suffix='.bam%s' % i)\n for i in xrange(2)]\n samtools_command1 = 'samtools merge -f %s %s' \\\n % (bamfiles_temp[0].name, ' '.join(bamfiles[0:mid]))\n os.system(samtools_command1)\n\n samtools_command2 = 'samtools merge -f %s %s' \\\n % (bamfiles_temp[1].name, ' '.join(bamfiles[mid:parts]))\n os.system(samtools_command2)\n\n samtools_command = 'samtools merge -f %s %s' \\\n % (bamfile, ' '.join([f.name for f in bamfiles_temp]))\n os.system(samtools_command)\n\n for f in bamfiles_temp:\n f.close()\n\n else:\n samtools_command = 'samtools merge -f %s %s' \\\n % (bamfile, ' '.join(bamfiles))\n # print samtools_command\n os.system(samtools_command)\n samtools_index_command = 'samtools index %s' % bamfile\n # print samtools_index_command\n os.system(samtools_index_command)\n util.print_log('done')\n # for i in xrange(parts):\n # bamfiles[i].close()\n\n for i in range(parts):\n if(os.path.exists(samfiles[i])):\n os.remove(samfiles[i])\n if(os.path.exists(bamfiles[i])):\n os.remove(bamfiles[i])\n\n util.print_log('samtools version')\n samtools_version_command = 'samtools --version'\n print samtools_version_command\n os.system(samtools_version_command)", "title": "" }, { "docid": "2da00a9f5c41e9c6ab49a32f72eda32f", "score": "0.54021645", "text": "def evaluate_gpt_with_distgen(settings, \n archive_path=None, \n merit_f=None, \n gpt_input_file=None,\n distgen_input_file=None,\n workdir=None, \n use_tempdir=True,\n gpt_bin='$GPT_BIN',\n timeout=2500,\n auto_phase=False,\n verbose=False,\n gpt_verbose=False,\n asci2gdf_bin='$ASCI2GDF_BIN',\n kill_msgs=DEFAULT_KILL_MSGS,\n parse_layout=False):\n \n G = run_gpt_with_distgen(settings=settings,\n gpt_input_file=gpt_input_file,\n distgen_input_file=distgen_input_file,\n workdir=workdir, \n use_tempdir=use_tempdir,\n gpt_bin=gpt_bin,\n timeout=timeout,\n auto_phase=auto_phase,\n verbose=verbose,\n gpt_verbose=gpt_verbose,\n asci2gdf_bin=asci2gdf_bin,\n kill_msgs=kill_msgs,\n parse_layout=parse_layout\n )\n \n if merit_f:\n merit_f = tools.get_function(merit_f)\n output = merit_f(G)\n else:\n output = default_gpt_merit(G)\n \n if output['error']:\n raise ValueError('error occured!')\n \n #Recreate Generator object for fingerprint, proper archiving\n # TODO: make this cleaner\n gen = Generator(G.distgen_input)\n #gen = Generator()\n #gen.input = G.distgen_input \n \n fingerprint = fingerprint_gpt_with_distgen(G, gen)\n output['fingerprint'] = fingerprint \n \n if archive_path:\n path = tools.full_path(archive_path)\n assert os.path.exists(path), f'archive path does not exist: {path}'\n archive_file = os.path.join(path, fingerprint+'.h5')\n output['archive'] = archive_file\n \n # Call the composite archive method\n archive_gpt_with_distgen(G, gen, archive_file=archive_file) \n \n return output", "title": "" }, { "docid": "42c307bd366b33a33a34b314e83be7c0", "score": "0.5375089", "text": "def rewrite_hdf5(g, data_dict):\n for ddkey, dditem in data_dict.iteritems():\n if ddkey not in g.keys():\n g.create_group(ddkey)\n for attr in dditem.attrs:\n g[ddkey].attrs[attr]=dditem.attrs[attr]\n append=False\n else:\n append=dditem.attrs.get(\"append\", False)\n g[ddkey].attrs[\"append\"]=append\n if isinstance(dditem, dataset): #dataset\n if not append:\n namestr=\"{0}\".format(len(g[ddkey]))\n g[ddkey].create_dataset(namestr, data=dditem.data, dtype=dditem.datatype, maxshape=dditem.maxshape)#chunks=True)\n #g[ddkey][namestr].attrs[\"index\"]=len(dditem.data)-1\n else:\n namestr=\"{0}\".format(len(g[ddkey])-1)\n dset=g[ddkey][namestr]\n #n=dset.attrs[\"index\"]\n #if n>=len(dset)-1:\n n=len(dset)\n a=n+len(dditem.data)\n dset.resize(a,axis=0)\n dset[n:]=dditem.data\n #n+=len(dditem.data)\n #dset.attrs[\"index\"]=n\n elif isinstance(dditem, group):\n rewrite_hdf5(g[ddkey], dditem)", "title": "" }, { "docid": "f041639fea79d2b81b0fb6861c3e2e3c", "score": "0.5358876", "text": "def save_data(self):\n if not os.path.exists(os.path.dirname(self.filename)):\n os.makedirs(os.path.dirname(self.filename))\n with h5py.File(self.filename, \"w\") as f:\n grid_data = f.create_group('grid')\n self.grid.save_to_h5py(grid_data)\n\n all_species = f.create_group('species')\n for species in self.list_species:\n species_data = all_species.create_group(species.name)\n species.save_to_h5py(species_data)\n\n f.attrs['dt'] = self.dt\n f.attrs['NT'] = self.NT\n f.attrs['run_date'] = self.run_date\n f.attrs['git_version'] = self.git_version\n f.attrs['title'] = self.title\n f.attrs['runtime'] = self.runtime\n f.attrs['considered_large'] = self.considered_large\n print(f\"Saved file to {self.filename}\")\n return self", "title": "" }, { "docid": "4598538961ac7ac35010057b8031a57a", "score": "0.5351646", "text": "def WriteHDF5(self, fname, basedir, tag, m , nrings=9, fix_xco=False):\n modf = h5py.File(fname, 'w')\n\n if m is not None:\n if fix_xco is False:\n X_CO = np.array([m.values['pi0_H2_'+str(i)] for i in range(1,nrings+1)])\n else: \n X_CO = np.ones(nrings)\n #modf = h5py.File(fname, 'w')\n #try:\n # Generate Groups\n # template_group = modf.create_group(\"templates\")\n # fit_group = modf.create_group(\"fit_results\")\n\n\n # Get data dimensions\n tmp = pyfits.open(basedir+'/bremss_healpix_54_'+tag+'.gz')\n energies = tmp[2].data.field(0)\n tShape = (len(energies), tmp[1].data.shape[0])\n print tShape\n del tmp # free memory\n\n pi0 = modf.create_dataset(\"/templates/pi0\", tShape, dtype='float32',compression=\"gzip\")\n pi0_0 = modf.create_dataset(\"/templates/pi0_0\", tShape, dtype='float32',compression=\"gzip\")\n pi0_1 = modf.create_dataset(\"/templates/pi0_1\", tShape, dtype='float32',compression=\"gzip\")\n brem = modf.create_dataset(\"/templates/brem\", tShape, dtype='float32',compression=\"gzip\")\n brem_0 = modf.create_dataset(\"/templates/brem_0\", tShape, dtype='float32',compression=\"gzip\")\n brem_1 = modf.create_dataset(\"/templates/brem_1\", tShape, dtype='float32',compression=\"gzip\")\n ics_opt = modf.create_dataset(\"/templates/ics_opt\", tShape, dtype='float32',compression=\"gzip\")\n ics_fir = modf.create_dataset(\"/templates/ics_fir\", tShape, dtype='float32',compression=\"gzip\")\n ics_cmb = modf.create_dataset(\"/templates/ics_cmb\", tShape, dtype='float32',compression=\"gzip\")\n modf.create_dataset(\"/templates/energies\", data=energies, dtype='float32',compression=\"gzip\")\n # Now fill in the templates one by one.\n # Add fit metadata.\n # Add galdef metadata.\n\n\n #---------------------------------------------------------------\n # Create Diffuse Template from fitting results.\n def ReadFits(fname, length):\n d = pyfits.open(fname)[1].data\n return np.array([d.field(i) for i in range(length)])\n\n if m is not None:\n for i_ring in range(1,nrings+1):\n print \"Adding HI/HII ring\", i_ring\n\n pi0[...] += m.values['pi0HIHII']*ReadFits(basedir+'/pi0_decay_HIR_ring_'+str(i_ring)+'_healpix_54_'+tag+'.gz', len(energies)).clip(0)\n pi0[...] += m.values['pi0HIHII']*ReadFits(basedir+'/pi0_decay_HII_ring_'+str(i_ring)+'_healpix_54_'+tag+'.gz', len(energies)).clip(0)\n if fix_xco is False:\n pi0[...] += X_CO[i_ring-1]*ReadFits(basedir+'/pi0_decay_H2R_ring_'+str(i_ring)+'_healpix_54_'+tag+'.gz', len(energies)).clip(0)\n else:\n pi0[...] += m.values['pi0HIHII']*ReadFits(basedir+'/pi0_decay_H2R_ring_'+str(i_ring)+'_healpix_54_'+tag+'.gz', len(energies)).clip(0)\n\n brem[...] += m.values['pi0HIHII']*1.25*ReadFits(basedir+'/bremss_HIR_ring_'+str(i_ring)+'_healpix_54_'+tag+'.gz', len(energies)).clip(0)\n brem[...] += m.values['pi0HIHII']*1.25*ReadFits(basedir+'/bremss_HII_ring_'+str(i_ring)+'_healpix_54_'+tag+'.gz', len(energies)).clip(0)\n if fix_xco is False:\n brem[...] += 1.25*X_CO[i_ring-1]*ReadFits(basedir+'/bremss_H2R_ring_'+str(i_ring)+'_healpix_54_'+tag+'.gz', len(energies)).clip(0)\n else: \n brem[...] += m.values['pi0HIHII']*1.25*X_CO[i_ring-1]*ReadFits(basedir+'/bremss_H2R_ring_'+str(i_ring)+'_healpix_54_'+tag+'.gz', len(energies)).clip(0)\n \n if i_ring == 1 :\n pi0_0[...] += ReadFits(basedir+'/pi0_decay_H2R_ring_'+str(i_ring)+'_healpix_54_'+tag+'.gz', len(energies)).clip(0)\n brem_0[...] += ReadFits(basedir+'/bremss_H2R_ring_'+str(i_ring)+'_healpix_54_'+tag+'.gz', len(energies)).clip(0)\n if i_ring == 2:\n pi0_1[...] += ReadFits(basedir+'/pi0_decay_H2R_ring_'+str(i_ring)+'_healpix_54_'+tag+'.gz', len(energies)).clip(0)\n brem_1[...] += ReadFits(basedir+'/bremss_H2R_ring_'+str(i_ring)+'_healpix_54_'+tag+'.gz', len(energies)).clip(0)\n\n print 'Adding ICS'\n ics_opt[...] += m.values['ics']*ReadFits(basedir+'/ics_isotropic_comp_1_healpix_54_'+tag+'.gz', len(energies)).clip(0)\n ics_fir[...] += m.values['ics']*ReadFits(basedir+'/ics_isotropic_comp_2_healpix_54_'+tag+'.gz', len(energies)).clip(0)\n ics_cmb[...] += m.values['ics']*ReadFits(basedir+'/ics_isotropic_comp_3_healpix_54_'+tag+'.gz', len(energies)).clip(0)\n\n else:\n for i_ring in range(1,nrings+1):\n print \"Adding HI/HII/H2 ring\", i_ring\n\n pi0[...] += ReadFits(basedir+'/pi0_decay_HIR_ring_'+str(i_ring)+'_healpix_54_'+tag+'.gz', len(energies)).clip(0)\n pi0[...] += ReadFits(basedir+'/pi0_decay_HII_ring_'+str(i_ring)+'_healpix_54_'+tag+'.gz', len(energies)).clip(0)\n pi0[...] += ReadFits(basedir+'/pi0_decay_H2R_ring_'+str(i_ring)+'_healpix_54_'+tag+'.gz', len(energies)).clip(0)\n\n brem[...] += ReadFits(basedir+'/bremss_HIR_ring_'+str(i_ring)+'_healpix_54_'+tag+'.gz', len(energies)).clip(0)\n brem[...] += ReadFits(basedir+'/bremss_HII_ring_'+str(i_ring)+'_healpix_54_'+tag+'.gz', len(energies)).clip(0)\n brem[...] += ReadFits(basedir+'/bremss_H2R_ring_'+str(i_ring)+'_healpix_54_'+tag+'.gz', len(energies)).clip(0)\n\n if i_ring == 1 :\n pi0_0[...] += ReadFits(basedir+'/pi0_decay_H2R_ring_'+str(i_ring)+'_healpix_54_'+tag+'.gz', len(energies)).clip(0)\n brem_0[...] += ReadFits(basedir+'/bremss_H2R_ring_'+str(i_ring)+'_healpix_54_'+tag+'.gz', len(energies)).clip(0)\n if i_ring == 2:\n pi0_1[...] += ReadFits(basedir+'/pi0_decay_H2R_ring_'+str(i_ring)+'_healpix_54_'+tag+'.gz', len(energies)).clip(0)\n brem_1[...] += ReadFits(basedir+'/bremss_H2R_ring_'+str(i_ring)+'_healpix_54_'+tag+'.gz', len(energies)).clip(0)\n\n print 'Adding ICS'\n ics_opt[...] += ReadFits(basedir+'/ics_isotropic_comp_1_healpix_54_'+tag+'.gz', len(energies)).clip(0)\n ics_fir[...] += ReadFits(basedir+'/ics_isotropic_comp_2_healpix_54_'+tag+'.gz', len(energies)).clip(0)\n ics_cmb[...] += ReadFits(basedir+'/ics_isotropic_comp_3_healpix_54_'+tag+'.gz', len(energies)).clip(0)\n\n # except:\n # modf.close()\n \n try: \n modf.close()\n print 'Closed HDF5 file.' \n except: \n print 'Failed to close HDF5 file.' \n pass\n return", "title": "" }, { "docid": "edc55435f66927e97fd865b8b5016a59", "score": "0.53244704", "text": "def write_to_hdf5(data_by_test, filename):\n\twith hdf5.File('{}.hdf5'.format(filename), 'w') as file:\n\t\t# foreach test data\n\t\tfor test_index, test_data in enumerate(data_by_test.values()):\n\t\t\t# convert iterator 'test_data' to the list\n\t\t\tfile.create_dataset(\"test_{}\".format(test_index),\n\t\t\t data=list(test_data), compression=\"gzip\")", "title": "" }, { "docid": "adbad349ce105e54e030dbc519f364c4", "score": "0.5301874", "text": "def save(self):\n\n with h5py.File(self.filename, \"r+\") as f:\n\n source_handle = f.create_group(\"source\")\n if self.data.source:\n self.data.source.save(source_handle)\n\n uhecr_handle = f.create_group(\"uhecr\")\n if self.data.uhecr:\n self.data.uhecr.save(uhecr_handle)\n\n detector_handle = f.create_group(\"detector\")\n if self.data.detector:\n self.data.detector.save(detector_handle)\n\n model_handle = f.create_group(\"model\")\n if self.model:\n self.model.save(model_handle)\n\n fit_handle = f.create_group(\"fit\")\n if self.fit:\n\n # fit inputs\n fit_input_handle = fit_handle.create_group(\"input\")\n for key, value in self.fit_input.items():\n fit_input_handle.create_dataset(key, data=value)\n\n # samples\n samples = fit_handle.create_group(\"samples\")\n for key, value in self.chain.items():\n samples.create_dataset(key, data=value)\n\n else:\n plotvars_handle = f.create_group(\"plotvars\")\n\n if self.analysis_type == self.gmf_type:\n for key, value in list(self.defl_plotvars.items()):\n plotvars_handle.create_dataset(key, data=value)", "title": "" }, { "docid": "1e9f3d70ea62c4328ad931aea3ce24a4", "score": "0.52851725", "text": "def save(self):\n \n self.f = hdf.File(\"dbtest3.hdf5\",\"a\")\n dt = hdf.special_dtype(vlen=str)\n name = self.compoundA + \" + \" + self.compoundB + \" @ \" + str(self.temperature)\n subgrp = self.f.create_group(name)\n dset = subgrp.create_dataset(\"Specifics\",(10,),dtype=dt)\n dset[0] = self.compoundA\n dset[1] = self.compoundB\n dset[2] = self.reference\n dset[3] = self.etaA\n dset[4] = self.etaB\n dset[5] = self.rhoA\n dset[6] = self.rhoB\n dset[7] = self.massA\n dset[8] = self.massB\n dset[9] = self.temperature\n \n dset = subgrp.create_dataset(\"Data\",(3,len(self.x1)),dtype=\"f\")\n dset[0] = self.x1\n dset[1] = self.etaSystem\n dset[2] = self.rhoSystem\n print \"Saving data for \" + name\n self.f.close()", "title": "" }, { "docid": "a6516f1de6193780627bfc1ac1a160af", "score": "0.5283196", "text": "def _output(self):\n t0 = time.monotonic()\n self.sendProgress('Waiting for HDF5 writer lock...')\n with self.h5io.writer(f'Samples/{self.headers[0].title}/{self.headers[0].distance[0]:.2f}') as group:\n t1 = time.monotonic()\n self.sendProgress('Writing HDF5 file...')\n # Set attributes of the <dist> group from the averaged header.\n self.h5io.writeHeader(self.averagedHeader, group)\n self.h5io.writeExposure(self.averagedExposure, group)\n try:\n del group['badfsns']\n except KeyError:\n pass\n group['badfsns'] = np.array(sorted(self.result.badfsns), dtype=np.int)\n try:\n del group['goodindex']\n except KeyError:\n pass\n group['goodindex'] = self.goodindex\n self.h5io.writeCurve(self.averagedCurve, group, 'curve_averaged')\n self.h5io.writeCurve(self.reintegratedCurve, group, 'curve_reintegrated')\n try:\n del group['curve']\n except KeyError:\n pass\n group['curve'] = h5py.SoftLink('curve_averaged')\n self.h5io.writeOutlierTest(group.name, self.outliertest)\n # save all curves\n for groupname in ['allcurves', 'curves']:\n try:\n del group[groupname]\n except KeyError:\n pass\n curvesgroup = group.create_group('allcurves')\n goodcurvesgroup = group.create_group('curves')\n # we will write outlier results per-curve. Do the common calculations beforehand.\n scoreindex = 0\n for i, (h, isgood) in enumerate(zip(self.headers, self.goodindex)):\n self.h5io.writeCurve(self.curves[:, :, i], curvesgroup, str(h.fsn))\n dataset = curvesgroup[str(h.fsn)]\n self.h5io.writeHeader(h, dataset)\n if isgood:\n dataset.attrs['correlmat_bad'] = int(h.fsn in self.result.badfsns)\n dataset.attrs['correlmat_discrp'] = self.outliertest.score[scoreindex]\n scoreindex += 1\n else:\n dataset.attrs['correlmat_bad'] = -1\n dataset.attrs['correlmat_discrp'] = np.nan\n # make links in the 'curves' group to those fsns which were not bad at the beginning of this procedure\n if (h.fsn not in self.result.badfsns) or (h.fsn in self.result.newbadfsns):\n goodcurvesgroup[str(h.fsn)] = h5py.SoftLink(dataset.name)\n\n self.result.time_output_write = time.monotonic() - t1\n self.result.time_output_waitforlock = t1 - t0\n self.result.time_output = time.monotonic() - t0", "title": "" }, { "docid": "295b6199ac04d36bc7052db8ece4d1f2", "score": "0.5261463", "text": "def ProduceCombinedUnifiedTreeandHaloCatalog(fname,numsnaps,tree,numhalos,halodata,atime,\n\tdescripdata={'Title':'Tree and Halo catalog of sim', 'VELOCIraptor_version':1.15, 'Tree_version':1.1, 'Particle_num_threshold':20, 'Temporal_linking_length':1, 'Flag_gas':False, 'Flag_star':False, 'Flag_bh':False},\n\tcosmodata={'Omega_m':1.0, 'Omega_b':0., 'Omega_Lambda':0., 'Hubble_param':1.0,'BoxSize':1.0, 'Sigma8':1.0},\n\tunitdata={'UnitLength_in_Mpc':1.0, 'UnitVelocity_in_kms':1.0,'UnitMass_in_Msol':1.0, 'Flag_physical_comoving':True,'Flag_hubble_flow':False},\n\tpartdata={'Flag_gas':False, 'Flag_star':False, 'Flag_bh':False},\n\tibuildheadtail=0,ibuildmajormergers=0, TEMPORALHALOIDVAL=1000000000000):\n\n\tif (ibuildheadtail==1):\n\t\tBuildTemporalHeadTail(numsnaps,tree,numhalos,halodata)\n\tif (ibuildmajormergers==1):\n\t\tIdentifyMergers(numsnaps,tree,numhalos,halodata,boxsize,hval,atime)\n\thdffile=h5py.File(fname+\".snap.hdf.data\",'w')\n\theadergrp=hdffile.create_group(\"Header\")\n\t#store useful information such as number of snapshots, halos,\n\t#cosmology (Omega_m,Omega_b,Hubble_param,Omega_Lambda, Box size)\n\t#units (Physical [1/0] for physical/comoving flag, length in Mpc, km/s, solar masses, Gravity\n\t#and TEMPORALHALOIDVAL used to traverse tree information (converting halo ids to haloindex or snapshot), Reverse_order [1/0] for last snap listed first)\n\t#set the attributes of the header\n\theadergrp.attrs[\"NSnaps\"]=numsnaps\n\t#overall description\n\theadergrp.attrs[\"Title\"]=descripdata[\"Title\"]\n\t#simulation box size\n\theadergrp.attrs[\"BoxSize\"]=cosmodata[\"BoxSize\"]\n\tfindergrp=headergrp.create_group(\"HaloFinder\")\n\tfindergrp.attrs[\"Name\"]=\"VELOCIraptor\"\n\tfindergrp.attrs[\"Version\"]=descripdata[\"VELOCIraptor_version\"]\n\tfindergrp.attrs[\"Particle_num_threshold\"]=descripdata[\"Particle_num_threshold\"]\n\n\ttreebuildergrp=headergrp.create_group(\"TreeBuilder\")\n\ttreebuildergrp.attrs[\"Name\"]=\"VELOCIraptor-Tree\"\n\ttreebuildergrp.attrs[\"Version\"]=descripdata[\"Tree_version\"]\n\ttreebuildergrp.attrs[\"Temporal_linking_length\"]=descripdata[\"Temporal_linking_length\"]\n\n\t#cosmological params\n\tcosmogrp=headergrp.create_group(\"Cosmology\")\n\tfor key in cosmodata.keys():\n\t\tif (key!='BoxSize'): cosmogrp.attrs[key]=cosmodata[key]\n\t#unit params\n\tunitgrp=headergrp.create_group(\"Units\")\n\tfor key in unitdata.keys():\n\t\tunitgrp.attrs[key]=unitdata[key]\n\t#particle types\n\tpartgrp=headergrp.create_group(\"Parttypes\")\n\tpartgrp.attrs[\"Flag_gas\"]=descripdata[\"Flag_gas\"]\n\tpartgrp.attrs[\"Flag_star\"]=descripdata[\"Flag_star\"]\n\tpartgrp.attrs[\"Flag_bh\"]=descripdata[\"Flag_bh\"]\n\n\t#now have finished with header\n\n\t#now need to create groups for halos and then a group containing tree information\n\tsnapsgrp=hdffile.create_group(\"Snapshots\")\n\t#internal tree keys\n\ttreekeys=[\"RootHead\", \"RootHeadSnap\", \"Head\", \"HeadSnap\", \"Tail\", \"TailSnap\", \"RootTail\", \"RootTailSnap\", \"Num_progen\"]\n\n\tfor i in range(numsnaps):\n\t\t#note that I normally have information in reverse order so that might be something in the units\n\t\tsnapgrp=snapsgrp.create_group(\"Snap_%03d\"%(numsnaps-1-i))\n\t\tsnapgrp.attrs[\"Snapnum\"]=i\n\t\tsnapgrp.attrs[\"NHalos\"]=numhalos[i]\n\t\tsnapgrp.attrs[\"scalefactor\"]=atime[i]\n\t#now close file and use the pytables interface so as to write the table\n\thdffile.close()\n\t#now write tables using pandas interface\n\tfor i in range(numsnaps):\n\t\t#lets see if we can alter the code to write a table\n\t\tkeys=halodata[i].keys()\n\t\t#remove tree keys\n\t\tfor tkey in treekeys: keys.remove(tkey)\n\t\t#make temp dict\n\t\tdictval=dict()\n\t\tfor key in keys:\n\t\t\tdictval[key]=halodata[i][key]\n\t\t#make a pandas DataFrame using halo dictionary\n\t\tdf=pd.DataFrame.from_dict(dictval)\n\t\tdf.to_hdf(fname+\".snap.hdf.data\",\"Snapshots/Snap_%03d/Halos\"%(numsnaps-1-i), format='table', mode='a')\n\n\t#reopen with h5py interface\n\thdffile=h5py.File(fname+\".snap.hdf.data\",'a')\n\t#then write tree information in separate group\n\ttreegrp=hdffile.create_group(\"MergerTree\")\n\t#Tree group should contain\n\t\"\"\"\n\t\tHaloSnapID\n\t\tHaloSnapNum\n\t\tHaloSnapIndex\n\t\tProgenitorIndex\n\t\tProgenitorSnapnum\n\t\tProgenitorID\n\t\tDescendantIndex\n\t\t..\n\t\t..\n\t\tRootProgenitorIndex\n\t\t..\n\t\t..\n\t\tRootDescendantIndex\n\t\"\"\"\n\t#to save on memory, allocate each block separately\n\t#store halo information\n\ttothalos=sum(numhalos)\n\ttdata=np.zeros(tothalos,dtype=halodata[0][\"ID\"].dtype)\n\tcount=0\n\tfor i in range(numsnaps):\n\t\ttdata[count:int(numhalos[i])+count]=halodata[i][\"ID\"]\n\t\tcount+=int(numhalos[i])\n\ttreegrp.create_dataset(\"HaloSnapID\",data=tdata)\n\ttdata=np.zeros(tothalos,dtype=np.uint32)\n\tcount=0\n\tfor i in range(numsnaps):\n\t\ttdata[count:int(numhalos[i])+count]=i\n\t\tcount+=int(numhalos[i])\n\ttreegrp.create_dataset(\"HaloSnapNum\",data=tdata)\n\ttdata=np.zeros(tothalos,dtype=np.uint64)\n\tcount=0\n\tfor i in range(numsnaps):\n\t\ttdata[count:int(numhalos[i])+count]=range(int(numhalos[i]))\n\t\tcount+=int(numhalos[i])\n\ttreegrp.create_dataset(\"HaloSnapIndex\",data=tdata)\n\t#store progenitors\n\ttdata=np.zeros(tothalos,dtype=halodata[0][\"Tail\"].dtype)\n\tcount=0\n\tfor i in range(numsnaps):\n\t\ttdata[count:int(numhalos[i])+count]=halodata[i][\"Tail\"]\n\t\tcount+=int(numhalos[i])\n\ttreegrp.create_dataset(\"ProgenitorID\",data=tdata)\n\ttdata=np.zeros(tothalos,dtype=halodata[0][\"TailSnap\"].dtype)\n\tcount=0\n\tfor i in range(numsnaps):\n\t\ttdata[count:int(numhalos[i])+count]=halodata[i][\"TailSnap\"]\n\t\tcount+=int(numhalos[i])\n\ttreegrp.create_dataset(\"ProgenitorSnapnum\",data=tdata)\n\ttdata=np.zeros(tothalos,dtype=np.uint64)\n\tcount=0\n\tfor i in range(numsnaps):\n\t\ttdata[count:int(numhalos[i])+count]=(halodata[i][\"Tail\"]%TEMPORALHALOIDVAL-1)\n\t\tcount+=int(numhalos[i])\n\ttreegrp.create_dataset(\"ProgenitorIndex\",data=tdata)\n\t#store descendants\n\ttdata=np.zeros(tothalos,dtype=halodata[0][\"Head\"].dtype)\n\tcount=0\n\tfor i in range(numsnaps):\n\t\ttdata[count:int(numhalos[i])+count]=halodata[i][\"Head\"]\n\t\tcount+=int(numhalos[i])\n\ttreegrp.create_dataset(\"DescendantID\",data=tdata)\n\ttdata=np.zeros(tothalos,dtype=halodata[0][\"HeadSnap\"].dtype)\n\tcount=0\n\tfor i in range(numsnaps):\n\t\ttdata[count:int(numhalos[i])+count]=halodata[i][\"HeadSnap\"]\n\t\tcount+=int(numhalos[i])\n\ttreegrp.create_dataset(\"DescendantSnapnum\",data=tdata)\n\ttdata=np.zeros(tothalos,dtype=np.uint64)\n\tcount=0\n\tfor i in range(numsnaps):\n\t\ttdata[count:int(numhalos[i])+count]=(halodata[i][\"Head\"]%TEMPORALHALOIDVAL-1)\n\t\tcount+=int(numhalos[i])\n\ttreegrp.create_dataset(\"DescendantIndex\",data=tdata)\n\t#store progenitors\n\ttdata=np.zeros(tothalos,dtype=halodata[0][\"RootTail\"].dtype)\n\tcount=0\n\tfor i in range(numsnaps):\n\t\ttdata[count:int(numhalos[i])+count]=halodata[i][\"RootTail\"]\n\t\tcount+=int(numhalos[i])\n\ttreegrp.create_dataset(\"RootProgenitorID\",data=tdata)\n\ttdata=np.zeros(tothalos,dtype=halodata[0][\"RootTailSnap\"].dtype)\n\tcount=0\n\tfor i in range(numsnaps):\n\t\ttdata[count:int(numhalos[i])+count]=halodata[i][\"RootTailSnap\"]\n\t\tcount+=int(numhalos[i])\n\ttreegrp.create_dataset(\"RootProgenitorSnapnum\",data=tdata)\n\ttdata=np.zeros(tothalos,dtype=np.uint64)\n\tcount=0\n\tfor i in range(numsnaps):\n\t\ttdata[count:int(numhalos[i])+count]=(halodata[i][\"RootTail\"]%TEMPORALHALOIDVAL-1)\n\t\tcount+=int(numhalos[i])\n\ttreegrp.create_dataset(\"RootProgenitorIndex\",data=tdata)\n\t#store descendants\n\ttdata=np.zeros(tothalos,dtype=halodata[0][\"RootHead\"].dtype)\n\tcount=0\n\tfor i in range(numsnaps):\n\t\ttdata[count:int(numhalos[i])+count]=halodata[i][\"RootHead\"]\n\t\tcount+=int(numhalos[i])\n\ttreegrp.create_dataset(\"RootDescendantID\",data=tdata)\n\ttdata=np.zeros(tothalos,dtype=halodata[0][\"RootHeadSnap\"].dtype)\n\tcount=0\n\tfor i in range(numsnaps):\n\t\ttdata[count:int(numhalos[i])+count]=halodata[i][\"RootHeadSnap\"]\n\t\tcount+=int(numhalos[i])\n\ttreegrp.create_dataset(\"RootDescendantSnapnum\",data=tdata)\n\ttdata=np.zeros(tothalos,dtype=np.uint64)\n\tcount=0\n\tfor i in range(numsnaps):\n\t\ttdata[count:int(numhalos[i])+count]=(halodata[i][\"RootHead\"]%TEMPORALHALOIDVAL-1)\n\t\tcount+=int(numhalos[i])\n\ttreegrp.create_dataset(\"RootDescendantIndex\",data=tdata)\n\t#store number of progenitors\n\ttdata=np.zeros(tothalos,dtype=np.uint32)\n\tcount=0\n\tfor i in range(numsnaps):\n\t\ttdata[count:int(numhalos[i])+count]=halodata[i][\"Num_progen\"]\n\t\tcount+=int(numhalos[i])\n\ttreegrp.create_dataset(\"NProgen\",data=tdata)\n\n\thdffile.close()", "title": "" }, { "docid": "16445b72f0367edad1d91f339993db9e", "score": "0.5249218", "text": "def createh5(filename, mode=None, **kwargs):\n if mode is None: \n mode = 'a'\n params = {'libver':'latest', 'track_order':True}\n params.update(kwargs)\n attrs = params.pop('attrs') if 'attrs' in params else dict()\n with h5file(filename, mode, **params) as f:\n f.attrs.update(attrs)\n return Group_autoiter(__h5file__(filename, params), '/')", "title": "" }, { "docid": "ec650ccdd21dd4572ccb4eabfaa1a3a5", "score": "0.5233218", "text": "def _save_superstar_grids(self, hs_runner):\n ss_dir = join(self.out_dir, \"superstar_grids\")\n if not exists(ss_dir):\n os.mkdir(ss_dir)\n for s in hs_runner.superstar_grids:\n s.grid.write(join(ss_dir, \"superstar_{}.ccp4\".format(s.identifier)))\n shutil.make_archive(ss_dir, 'zip', ss_dir)\n shutil.rmtree(ss_dir)", "title": "" }, { "docid": "2af67287cd549a5b05ba8c242ce5370a", "score": "0.51985604", "text": "def tarball_create(param_dict, proj_dict, catl_ext='hdf5'):\n Prog_msg = param_dict['Prog_msg' ]\n ## List of Mock catalogues\n catl_path_arr = cu.Index(proj_dict['mock_cat_mc'], catl_ext)\n ## README file\n # Downloading working README file\n readme_file = os.path.join( proj_dict['base_dir'],\n 'references',\n 'README_RTD.pdf')\n cu.File_Download_needed(readme_file, param_dict['readme_url'])\n cu.File_Exists(readme_file)\n # Cartesian coordinates for all mocks\n cart_pos_fig = os.path.join( proj_dict['fig_dir'],\n '{0}_{1}_{2}_xyz_mocks.{3}'.format(\n param_dict['survey'],\n param_dict['halotype'],\n param_dict['cosmo_choice'],\n 'pdf'))\n # Luminosity function\n lum_func_catls = os.path.join( proj_dict['fig_dir'],\n '{0}_{1}_{2}_lum_function_mocks.{3}'.format(\n param_dict['survey'],\n param_dict['halotype'],\n param_dict['cosmo_choice'],\n 'pdf'))\n ## Saving to TAR file\n tar_file_path = os.path.join( proj_dict['tar_dir'],\n '{0}_{1}_catls.tar.gz'.format(\n param_dict['survey_name'],\n param_dict['halotype']))\n # Opening file\n with tarfile.open(tar_file_path, mode='w:gz') as tf:\n # README file\n tf.add(readme_file, arcname=os.path.basename(readme_file))\n # Figures\n tf.add(cart_pos_fig, arcname=os.path.basename(cart_pos_fig))\n tf.add(lum_func_catls, arcname=os.path.basename(lum_func_catls))\n for file_kk in catl_path_arr:\n ## Reading in DataFrame\n gal_pd_kk = cu.read_hdf5_file_to_pandas_DF(file_kk)\n ## DataFrame `without` certain columns\n gal_pd_mod = catl_drop_cols(gal_pd_kk)\n ## Saving modified DataFrame to file\n file_mod_kk = file_kk+'.mod'\n cu.pandas_df_to_hdf5_file(gal_pd_mod, file_mod_kk, key='\\gal_catl')\n cu.File_Exists(file_mod_kk)\n # Saving to Tar-file\n tf.add(file_mod_kk, arcname=os.path.basename(file_kk))\n # Deleting extra file\n os.remove(file_mod_kk)\n tf.close()\n cu.File_Exists(tar_file_path)\n if param_dict['verbose']:\n print('{0} TAR file saved as: {1}'.format(Prog_msg, tar_file_path))", "title": "" }, { "docid": "5c71b274a5e89e859c80d60018925cad", "score": "0.5181026", "text": "def write_partitioned(ratings, output_dir):\n\n dates = pd.to_datetime(ratings[\"timestamp\"], unit=\"s\")\n\n for (year, month), grp in ratings.groupby([dates.dt.year, dates.dt.month]):\n output_path = output_dir / str(year) / f\"{month:02d}.csv\"\n output_path.parent.mkdir(parents=True, exist_ok=True)\n grp.to_csv(output_path, index=False)", "title": "" }, { "docid": "0e8c61a1752f9dec28e5223291e3efea", "score": "0.516821", "text": "def create_seg_exports(self):\n try:\n if self.output_dir == None:\n out_dir = \"SEG_Output\"\n os.mkdir(\"SEG_Output\")\n else:\n out_dir = self.output_dir\n os.mkdir(self.output_dir)\n except:\n pass\n\n self.annots_df.apply(self.drop_dupes, axis=1)\n\n for dataset_id in self.annots_df[\"datasetId\"].unique():\n self.dataset_annots = self.annots_df[self.annots_df.datasetId == dataset_id]\n for study_uid in self.studies:\n dicom_hier = self.dicom_hierarchy[study_uid]\n for series_dict in dicom_hier:\n for series_uid in series_dict:\n sops = series_dict[series_uid]\n\n annotations = self.dataset_annots[\n self.dataset_annots.SeriesInstanceUID == series_uid\n ]\n annotations = annotations[annotations[\"scope\"] == \"INSTANCE\"]\n if annotations.empty:\n continue\n\n self.dicom_tags = self.tags_df[\n self.tags_df.SeriesInstanceUID == series_uid\n ].dicomTags.values[0]\n annotators = annotations.createdById.unique()\n instance_uid = pydicom.uid.generate_uid(prefix=None)\n date = datetime.now().strftime(\"%Y%m%d\")\n time = datetime.now().strftime(\"%H%M%S\")\n\n sop_instance_num_map = {}\n for sop in sops:\n sop_dicom_tags = self.tags_df[\n self.tags_df.SOPInstanceUID == sop\n ].dicomTags.values[0]\n if \"InstanceNumber\" in sop_dicom_tags:\n sop_instance_num_map[sop] = sop_dicom_tags[\"InstanceNumber\"]\n else:\n sop_instance_num_map[sop] = \"1\"\n\n def create_instance_number(row):\n return sop_instance_num_map[row[\"SOPInstanceUID\"]]\n\n try:\n annotations[\"instanceNumber\"] = annotations.apply(\n create_instance_number, axis=1\n )\n except:\n continue\n annotations = annotations.sort_values(\n [\"labelGroupName\", \"labelId\", \"instanceNumber\"], ignore_index=True\n ) # sort by label group then annotation then appearance in series\n\n # File meta info data elements\n file_meta = FileMetaDataset()\n file_meta.FileMetaInformationVersion = b\"\\x00\\x01\"\n file_meta.TransferSyntaxUID = \"1.2.840.10008.1.2.1\"\n file_meta.MediaStorageSOPInstanceUID = instance_uid # Create Instance UID # Media Storage SOP Instance UID\n file_meta.ImplementationClassUID = str(\n pydicom.uid.PYDICOM_IMPLEMENTATION_UID\n ) # Implementation Class UID\n file_meta.ImplementationVersionName = str(\n pydicom.__version__\n ) # Implementation Version Name\n file_meta.SourceApplicationEntityTitle = \"POSDA\"\n\n # Main data elements\n ds = Dataset()\n\n ds = self.place_tags(self.dicom_tags, ds, self.dicom_tag_heirarchy, True)\n\n ds.SpecificCharacterSet = \"ISO_IR 192\"\n ds.SOPClassUID = \"1.2.840.10008.5.1.4.1.1.66.4\"\n ds.SOPInstanceUID = instance_uid\n ds.SeriesDate = str(date) # Series Date\n ds.ContentDate = str(date) # Content Date\n ds.SeriesTime = str(time) # Series Time\n ds.ContentTime = str(time) # Series Time\n ds.Manufacturer = \"MDAI\"\n ds.Modality = \"SEG\"\n\n # Referenced Series Sequence\n refd_series_sequence = Sequence()\n ds.ReferencedSeriesSequence = refd_series_sequence\n\n # Referenced Series Sequence: Referenced Series 1\n refd_series1 = Dataset()\n refd_series_sequence.append(refd_series1)\n\n # Referenced Series Sequence: Referenced Series 1\n refd_series1 = Dataset()\n refd_series_sequence.append(refd_series1)\n refd_series1.SeriesInstanceUID = series_uid\n\n # Referenced Instance Sequence\n refd_instance_sequence = Sequence()\n refd_series1.ReferencedInstanceSequence = refd_instance_sequence\n\n ds.SegmentationType = \"BINARY\"\n\n for annotator_id in annotators:\n annotator_annots = annotations[annotations.createdById == annotator_id]\n\n if self.combine:\n label_group_sets = [annotator_annots.labelGroupName.unique()]\n else:\n label_group_sets = [\n [group] for group in annotator_annots.labelGroupName.unique()\n ]\n\n ds.SamplesPerPixel = 1\n ds.PhotometricInterpretation = \"MONOCHROME2\"\n ds.BitsAllocated = 1\n ds.BitsStored = 1\n ds.HighBit = 0\n ds.PixelRepresentation = 0\n ds.LossyImageCompression = \"00\"\n\n for label_group_set in label_group_sets:\n label_group_annots = annotator_annots[\n annotator_annots.labelGroupName.isin(label_group_set)\n ]\n\n # Segment Sequence\n segment_sequence = Sequence()\n ds.SegmentSequence = segment_sequence\n\n self.imgs = []\n self.seen_labels = set()\n self.name_number_map = {}\n self.included_sops = []\n self.unique_sops = set()\n self.label_groups = list(annotations.labelGroupName.unique())\n self.prev_annot = None\n label_group_annots.apply(self.img_insert, args=(ds,), axis=1)\n\n ds.NumberOfFrames = len(\n self.imgs\n ) # create during last parts of SEG file (should equal length of annot_df)\n ds.PixelData = pack_bits(np.array(self.imgs))\n\n for sop in self.unique_sops:\n sop_dicom_tags = self.tags_df[\n self.tags_df.SOPInstanceUID == sop\n ].dicomTags.values[0]\n refd_instance1 = Dataset()\n refd_instance_sequence.append(refd_instance1)\n if \"SOPClassUID\" in sop_dicom_tags:\n refd_instance1.ReferencedSOPClassUID = sop_dicom_tags[\n \"SOPClassUID\"\n ]\n refd_instance1.ReferencedSOPInstanceUID = sop_dicom_tags[\n \"SOPInstanceUID\"\n ]\n\n # Leaving it out for now but if nothing works then maybe try to add it back in blank and then with dummy values\n # Edit: added it back in but still unnecessary.\n # -----------------------------------------------------------------------\n # Dimension Index Sequence\n dimension_index_sequence = Sequence()\n ds.DimensionIndexSequence = dimension_index_sequence\n # -----------------------------------------------------------------------\n\n ds.ContentLabel = \"MDAI_SEG\"\n ds.ContentCreatorName = f\"annotator {annotator_id}\"\n\n # Leaving it out for now but if nothing works then maybe try to add it back in blank and then with dummy values\n # Edit: added it back in but still unnecessary.\n # -----------------------------------------------------------------------\n # Shared Functional Groups Sequence\n shared_functional_groups_sequence = Sequence()\n ds.SharedFunctionalGroupsSequence = (\n shared_functional_groups_sequence\n )\n # -----------------------------------------------------------------------\n\n # Per-frame Functional Groups Sequence\n per_frame_functional_groups_sequence = Sequence()\n ds.PerFrameFunctionalGroupsSequence = (\n per_frame_functional_groups_sequence\n )\n\n # Per-frame Functional Groups Sequence\n per_frame_functional_groups_sequence = Sequence()\n ds.PerFrameFunctionalGroupsSequence = (\n per_frame_functional_groups_sequence\n )\n\n # Per-frame Functional Groups Sequence\n per_frame_functional_groups_sequence = []\n\n # Loop through each frame with an annotation and create unique Per Frame Functional Group Sequence\n # ---------------------------------------------------------------------------\n for segment_number, sop in self.included_sops:\n label_names = \", \".join(\n label_group_annots[\"labelName\"].unique()\n )\n ds.SeriesDescription = (\n f\"Segmentation of {label_names} by annotator {annotator_id}\"\n )\n\n sop_dicom_tags = self.tags_df[\n self.tags_df.SOPInstanceUID == sop\n ].dicomTags.values[0]\n\n # Per-frame Functional Groups Sequence: Per-frame Functional Groups 1\n per_frame_functional_groups1 = Dataset()\n per_frame_functional_groups_sequence.append(\n per_frame_functional_groups1\n )\n\n # Derivation Image Sequence\n derivation_image_sequence = Sequence()\n per_frame_functional_groups1.DerivationImageSequence = (\n derivation_image_sequence\n )\n\n # Derivation Image Sequence: Derivation Image 1\n derivation_image1 = Dataset()\n derivation_image_sequence.append(derivation_image1)\n\n # Source Image Sequence\n source_image_sequence = Sequence()\n derivation_image1.SourceImageSequence = source_image_sequence\n\n # Source Image Sequence: Source Image 1\n source_image1 = Dataset()\n source_image_sequence.append(source_image1)\n if \"SOPClassUID\" in self.dicom_tags:\n source_image1.ReferencedSOPClassUID = self.dicom_tags[\n \"SOPClassUID\"\n ]\n source_image1.ReferencedSOPInstanceUID = self.dicom_tags[\n \"SOPInstanceUID\"\n ]\n\n # Purpose of Reference Code Sequence\n purpose_of_ref_code_sequence = Sequence()\n source_image1.PurposeOfReferenceCodeSequence = (\n purpose_of_ref_code_sequence\n )\n\n # Purpose of Reference Code Sequence: Purpose of Reference Code 1\n purpose_of_ref_code1 = Dataset()\n purpose_of_ref_code_sequence.append(purpose_of_ref_code1)\n purpose_of_ref_code1.CodeValue = \"121322\"\n purpose_of_ref_code1.CodingSchemeDesignator = \"DCM\"\n purpose_of_ref_code1.CodeMeaning = (\n \"Source image for image processing operation\"\n )\n\n # Derivation Code Sequence\n derivation_code_sequence = Sequence()\n derivation_image1.DerivationCodeSequence = (\n derivation_code_sequence\n )\n\n # Derivation Code Sequence: Derivation Code 1\n derivation_code1 = Dataset()\n derivation_code_sequence.append(derivation_code1)\n derivation_code1.CodeValue = \"113076\"\n derivation_code1.CodingSchemeDesignator = \"DCM\"\n derivation_code1.CodeMeaning = \"Segmentation\"\n\n # Segment Identification Sequence\n segment_id_seq = Dataset()\n per_frame_functional_groups1.SegmentIdentificationSequence = [\n segment_id_seq\n ]\n\n # Segment Number\n segment_id_seq.ReferencedSegmentNumber = segment_number\n\n per_frame_functional_groups1 = self.place_tags(\n sop_dicom_tags,\n per_frame_functional_groups1,\n self.dicom_tag_heirarchy[\n \"PerFrameFunctionalGroupsSequence\"\n ],\n False,\n )\n # -------------------------------------------------------------------------\n\n ds.PerFrameFunctionalGroupsSequence = (\n per_frame_functional_groups_sequence\n )\n\n ds.file_meta = file_meta\n ds.is_implicit_VR = False\n ds.is_little_endian = True\n\n if self.included_sops:\n if self.combine:\n ds.save_as(\n f\"{os.getcwd()}/{out_dir}/DICOM_SEG_{dataset_id}_{series_uid}_annotator_{annotator_id}.dcm\",\n False,\n )\n else:\n ds.save_as(\n f\"{os.getcwd()}/{out_dir}/DICOM_SEG_{dataset_id}_label_group_{label_group_set[0]}_series_{series_uid}_annotator_{annotator_id}.dcm\",\n False,\n )\n print(f\"Successfully exported DICOM SEG files into {out_dir}\")", "title": "" }, { "docid": "d9fb4baa1caf9d2e244af1752f0b023a", "score": "0.5165111", "text": "def compress_all_data():\n\n output_file = 'qbank_data.tar.gz'\n\n output_path = os.path.join(OUTPUT_DIR, 'qbank', output_file)\n\n with tarfile.open(output_path, 'w:gz') as tar:\n\n mongo_dump_dir = os.path.join(OUTPUT_DIR, 'qbank', 'mongo-dump')\n\n activity_dump_dir = os.path.join(OUTPUT_DIR, 'qbank', 'student-activities')\n\n tar.add(mongo_dump_dir, arcname=os.path.basename(mongo_dump_dir))\n\n #tar.add(activity_dump_dir, arcname=os.path.basename(activity_dump_dir))", "title": "" }, { "docid": "4d6011c79cbeceab7a098e4821f4bc3c", "score": "0.5148114", "text": "def write_by_name(fpath, *objs_list, **kwargs):\n length = len(objs_list)\n group_size = kwargs.pop('group_size', length)\n with stream.open(fpath, 'wb', **kwargs) as ostream:\n cursor = 0\n while cursor < length:\n ostream.write(*objs_list[cursor:cursor+group_size])\n cursor += group_size", "title": "" }, { "docid": "af681386ee088a4b407d27cbc964433f", "score": "0.5146606", "text": "def save(self, filepath, npf=-1, compression=None, verbose=1, n_jobs=1):\n\n path, name = os.path.split(filepath)\n if name.endswith('.h5'):\n name = '.'.join(name.split('.')[:-1])\n\n start = time.time()\n if npf != -1:\n \n global moddsets\n\n i = begin = end = 0\n args, moddsets = [], []\n while end < len(self.jets_i):\n end = min(end + npf, len(self.jets_i))\n\n arrays = {'jets_i': self.jets_i[begin:end], 'jets_i_cols': self.jets_i_cols,\n 'jets_f': self.jets_f[begin:end], 'jets_f_cols': self.jets_f_cols,\n 'filenames': self.filenames}\n\n if self.store_pfcs:\n arrays['pfcs'] = self.pfcs[begin:end]\n arrays['pfcs_cols'] = self.pfcs_cols\n\n if self.store_gens:\n arrays['gens'] = self.gens[begin:end]\n arrays['gens_cols'] = self.gens_cols\n\n filepath = os.path.join(path, '{}_{}'.format(name, i))\n\n moddset = self.__class__(_dataset=self.dataset, _arrays=arrays)\n\n if n_jobs == 1:\n moddset.save(filepath, compression=compression, verbose=verbose)\n\n else:\n moddsets.append(moddset)\n args.append((i, filepath, compression)) \n\n begin = end\n i += 1\n\n if n_jobs != 1:\n if verbose >= 1:\n l = len(args)\n pf = (l, 's' if l > 1 else '', time.time() - start)\n print('Constructed {} temporary MODDataset{} in {:.3f}s'.format(*pf))\n\n if n_jobs == -1:\n n_jobs = os.cpu_count()\n if n_jobs is None:\n n_jobs = 4\n\n start = time.time()\n with create_pool(processes=min(n_jobs, len(args))) as pool:\n for i,_ in enumerate(pool.imap_unordered(_moddset_save, args, chunksize=1)):\n if verbose >= 1 and ((i+1) % 5 == 0 or i+1 == len(args)):\n pf = (i+1, (i+1)/len(args)*100, time.time() - start)\n print(' Saved {} files, {:.2f}% done in {:.3f}s'.format(*pf))\n\n del moddsets\n \n return\n\n # compression opts\n compression = ({'compression': 'gzip', 'compression_opts': compression} \n if compression is not None else {})\n comp_str = '_compressed' if len(compression) else ''\n\n # ensure directory exists\n if not os.path.exists(path):\n if verbose >= 2:\n print('Creating', path)\n os.mkdir(path)\n\n if verbose >= 2:\n print('Saving to', path)\n\n filename = name + comp_str\n hf = h5py.File(os.path.join(path, filename + '.h5'), 'w')\n\n # jets_i\n jets_i = hf.create_dataset('jets_i', data=self.jets_i, **compression)\n jets_i.attrs.create('cols', np.asarray(self.jets_i_cols, dtype='S'))\n\n # jets_f\n jets_f = hf.create_dataset('jets_f', data=self.jets_f, **compression)\n jets_f.attrs.create('cols', np.asarray(self.jets_f_cols, dtype='S'))\n\n # pfcs\n if self.store_pfcs:\n pfcs = _write_large_object_array_to_h5(hf, 'pfcs', self.pfcs, \n ncols=len(self.pfcs_cols), **compression)\n pfcs.attrs.create('cols', np.asarray(self.pfcs_cols, dtype='S'))\n hf.create_dataset('pfcs_index', data=_make_particles_index(self.pfcs), **compression)\n\n # gens\n if self.store_gens:\n gens = _write_large_object_array_to_h5(hf, 'gens', self.gens, \n ncols=len(self.gens_cols), **compression)\n gens.attrs.create('cols', np.asarray(self.gens_cols, dtype='S'))\n hf.create_dataset('gens_index', data=_make_particles_index(self.gens), **compression)\n\n # filenames\n hf.create_dataset('filenames', data=self.filenames.astype('S'), **compression)\n\n # close\n hf.close()\n\n if verbose >= 1:\n args = (filename, len(self.jets_i), time.time() - start)\n print(' Saved {} with {} jets in {:.3f}s'.format(*args))", "title": "" }, { "docid": "c30f42120ef98b16049f68dfdfc9bc72", "score": "0.5138672", "text": "def _archive(self, label, files, delete_originals=True):\n if not os.path.exists(self.archive_store):\n os.mkdir(self.archive_store)\n tf = tarfile.open(label + \".tar.gz\",'w:gz')\n logging.info(\"Archiving data to file %s\" % tf.name)\n # Add data files\n archive_paths = []\n for file_path in files:\n archive_path = os.path.join(label, file_path)\n tf.add(os.path.join(self.root, file_path), archive_path)\n archive_paths.append(archive_path)\n tf.close()\n # Move the archive to self.archive_store\n shutil.copy(tf.name, self.archive_store) # shutil.move() doesn't work as expected if dataroot is a symbolic link\n os.remove(tf.name)\n # Delete original files.\n if delete_originals:\n for file_path in files:\n os.remove(os.path.join(self.root, file_path))\n self._last_label = label # useful for testing\n return archive_paths", "title": "" }, { "docid": "48e2e0577c8e31d6e77e6d744202225c", "score": "0.5128573", "text": "def write_hdf5_file(metadata, out_file, ts_file, tcoh_file, scoh_file, vel_file, mask_file, geom_file):\n ts_obj = timeseries(ts_file)\n ts_obj.open(print_msg=False)\n dateList = ts_obj.dateList\n numDate = len(dateList)\n\n # Open HDF5 File\n f = h5py.File(out_file, 'w')\n print('create HDF5 file: {} with w mode'.format(out_file))\n max_digit = 55\n\n ##### Group - Observation\n gName = 'HDFEOS/GRIDS/timeseries/observation'\n print('create group /{}'.format(gName))\n group = f.create_group(gName)\n\n ## O1 - displacement\n dsName = 'displacement'\n dsShape = (numDate, ts_obj.length, ts_obj.width)\n dsDataType = np.float32\n print(('create dataset /{d:<{w}} of {t:<10} in size of {s}'\n ' with compression={c}').format(d='{}/{}'.format(gName, dsName),\n w=max_digit,\n t='float32',\n s=dsShape,\n c=COMPRESSION))\n dset = group.create_dataset(dsName,\n shape=dsShape,\n maxshape=(None, dsShape[1], dsShape[2]),\n dtype=dsDataType,\n chunks=True,\n compression=COMPRESSION)\n\n print('write data acquition by acquition ...')\n prog_bar = ptime.progressBar(maxValue=numDate)\n for i in range(numDate):\n dset[i, :, :] = readfile.read(ts_file, datasetName=dateList[i])[0]\n prog_bar.update(i+1, suffix='{}/{} {}'.format(i+1, numDate, dateList[i]))\n prog_bar.close()\n\n # attributes\n dset.attrs['Title'] = dsName\n dset.attrs['MissingValue'] = FLOAT_ZERO\n dset.attrs['_FillValue'] = FLOAT_ZERO\n dset.attrs['Units'] = 'meters'\n\n ## O2 - date\n dsName = 'date'\n data = np.array(dateList, dtype=np.string_)\n dset = create_hdf5_dataset(group, dsName, data)\n\n ## O3 - perp baseline\n dsName = 'bperp'\n data = np.array(ts_obj.pbase, dtype=np.float32)\n dset = create_hdf5_dataset(group, dsName, data)\n\n ## O4 - velocity\n dsName = 'velocity'\n data = readfile.read(vel_file)[0]\n dset = create_hdf5_dataset(group, dsName, data)\n # attributes\n dset.attrs['Title'] = dsName\n dset.attrs['MissingValue'] = FLOAT_ZERO\n dset.attrs['_FillValue'] = FLOAT_ZERO\n dset.attrs['Units'] = 'm/yr'\n\n ##### Group - Quality\n gName = 'HDFEOS/GRIDS/timeseries/quality'\n print('create group /{}'.format(gName))\n group = f.create_group(gName)\n\n ## Q1 - temporalCoherence\n dsName = 'temporalCoherence'\n # read\n data = readfile.read(tcoh_file)[0]\n # write\n dset = create_hdf5_dataset(group, dsName, data)\n # attributes\n dset.attrs['Title'] = dsName\n dset.attrs['MissingValue'] = FLOAT_ZERO\n dset.attrs['_FillValue'] = FLOAT_ZERO\n dset.attrs['Units'] = '1'\n\n ## Q2 - avgSpatialCoherence\n dsName = 'avgSpatialCoherence'\n # read\n data = readfile.read(scoh_file)[0]\n # write\n dset = create_hdf5_dataset(group, dsName, data)\n # attributes\n dset.attrs['Title'] = dsName\n dset.attrs['MissingValue'] = FLOAT_ZERO\n dset.attrs['_FillValue'] = FLOAT_ZERO\n dset.attrs['Units'] = '1'\n\n ## Q3 - mask\n dsName = 'mask'\n # read\n data = readfile.read(mask_file, datasetName='mask')[0]\n # write\n dset = create_hdf5_dataset(group, dsName, data)\n # attributes\n dset.attrs['Title'] = dsName\n dset.attrs['MissingValue'] = BOOL_ZERO\n dset.attrs['_FillValue'] = BOOL_ZERO\n dset.attrs['Units'] = '1'\n \n\n ##### Group - Write Geometry\n # Required: height, incidenceAngle\n # Optional: rangeCoord, azimuthCoord, azimuthAngle, slantRangeDistance, waterMask, shadowMask\n gName = 'HDFEOS/GRIDS/timeseries/geometry'\n print('create group /{}'.format(gName))\n group = f.create_group(gName)\n\n geom_obj = geometry(geom_file)\n geom_obj.open(print_msg=False)\n for dsName in geom_obj.datasetNames:\n # read\n data = geom_obj.read(datasetName=dsName, print_msg=False)\n # write\n dset = create_hdf5_dataset(group, dsName, data)\n\n # attributes\n dset.attrs['Title'] = dsName\n if dsName in ['height', 'slantRangeDistance', 'bperp']:\n dset.attrs['MissingValue'] = FLOAT_ZERO\n dset.attrs['_FillValue'] = FLOAT_ZERO\n dset.attrs['Units'] = 'meters'\n\n elif dsName in ['incidenceAngle', 'azimuthAngle', 'latitude', 'longitude']:\n dset.attrs['MissingValue'] = FLOAT_ZERO\n dset.attrs['_FillValue'] = FLOAT_ZERO\n dset.attrs['Units'] = 'degrees'\n\n elif dsName in ['rangeCoord', 'azimuthCoord']:\n dset.attrs['MissingValue'] = FLOAT_ZERO\n dset.attrs['_FillValue'] = FLOAT_ZERO\n dset.attrs['Units'] = '1'\n\n elif dsName in ['waterMask', 'shadowMask']:\n dset.attrs['MissingValue'] = BOOL_ZERO\n dset.attrs['_FillValue'] = BOOL_ZERO\n dset.attrs['Units'] = '1'\n\n # Write Attributes to the HDF File\n print('write metadata to root level')\n for key, value in iter(metadata.items()):\n f.attrs[key] = value\n f.close()\n print('finished writing to {}'.format(out_file))", "title": "" }, { "docid": "dbbd598756eb39ad06f90109f086e14b", "score": "0.51182556", "text": "def write_group(pf, tag,mdef):\n tbl=pf.get_tbl(tag)\n filename=tag+\".csv\"\n fh=open(filename,\"w+\")\n fh.write('\\\"%s\\\",\\\"%s\\\",\\\"%s\\\",\\\"%s\\\"\\n' % (\"Key\",\"Type\",\"Mutable\",\"Concept\"))\n for k in tbl:\n t=mdef.type(k)\n tstr=\"undefined\"\n if(t==MDtype.Int64):\n tstr=\"int\"\n elif(t==MDtype.Double):\n tstr=\"double\"\n elif(t==MDtype.String):\n tstr=\"string\"\n elif(t==MDtype.Boolean):\n tstr=\"boolean\"\n writeable=mdef.writeable(k)\n wstr=\"undefined\"\n if(writeable):\n wstr=\"Yes\"\n else:\n wstr=\"No\"\n fh.write('\\\"%s\\\",\\\"%s\\\",\\\"%s\\\",\\\"%s\\\"\\n' % (k,tstr,wstr,mdef.concept(k)))\n fh.close()", "title": "" }, { "docid": "9d1c698f9e5670d4528426641511f69d", "score": "0.5116573", "text": "def create_group(self, name, attrs={}, track_order=True):\n with h5file(self.file.filename, 'a', **self.file.params) as f:\n o = f[self.name]\n g = o.create_group(name, track_order)\n g.attrs.update(attrs)\n return Group_autoiter(self.file, self.name+name+'/')", "title": "" }, { "docid": "464456aa5f2393a86d1b9683afc59df5", "score": "0.5110412", "text": "def create_group_directory(self, campaign_name, app_dir, group_name, runs,\n max_nprocs, nodes, launch_mode,\n component_subdirs, walltime, node_exclusive,\n timeout, machine,\n sosd_path=None,\n sos_analysis_path=None,\n tau_profiling=False, tau_tracing=False,\n kill_on_partial_failure=False,\n run_post_process_script=None,\n run_post_process_stop_on_failure=False,\n scheduler_options=None,\n run_dir_setup_script=None):\n script_dir = os.path.join(config.CHEETAH_PATH_SCHEDULER,\n self.scheduler_name, 'group')\n if not os.path.isdir(script_dir):\n raise ValueError(\"scheduler '%s' is not yet supported (path '%s')\"\n % (self.scheduler_name, script_dir))\n if scheduler_options is None:\n scheduler_options = {}\n copytree_to_dir(script_dir, self.output_directory)\n\n fobs_path = os.path.join(self.output_directory, 'fobs.json')\n min_nodes = 1\n\n f = open(fobs_path, 'w')\n fob_list = []\n for i, run in enumerate(runs):\n # TODO: abstract this to higher levels\n os.makedirs(run.run_path, exist_ok=True)\n\n # Create working dir for each component\n for rc in run.run_components:\n os.makedirs(rc.working_dir, exist_ok=True)\n\n if run.sosflow_profiling:\n run.insert_sosflow(sosd_path, sos_analysis_path,\n run.run_path,\n machine.processes_per_node)\n\n # Copy the global input files common to all components\n for input_rpath in run.inputs:\n copy_to_dir(input_rpath, run.run_path)\n\n # Copy input files requested by each component\n # save working dirs for later use\n working_dirs = {} # map component name to path\n for rc in run.run_components:\n working_dirs[rc.name] = rc.working_dir\n\n # if rc has an adios xml file, copy it to working dir\n if rc.adios_xml_file:\n copy_to_dir(rc.adios_xml_file, rc.working_dir)\n\n # now copy other inputs marked under component_inputs\n if rc.component_inputs is not None:\n for input_file in rc.component_inputs:\n dest = os.path.join(rc.working_dir,\n os.path.basename(\n input_file))\n # input type is symlink\n if type(input_file) == SymLink:\n os.symlink(input_file, dest)\n\n # input type is a regular file\n elif os.path.isfile(input_file):\n copy_to_dir(input_file, rc.working_dir)\n\n # Input file is a directory\n elif os.path.isdir(input_file):\n copytree_to_dir(input_file, dest)\n\n else:\n raise exc.CheetahException \\\n (\"Could not copy component input {}\"\n .format(input_file))\n\n # ADIOS XML param support\n adios_xml_params = \\\n run.instance.get_parameter_values_by_type(ParamAdiosXML) or \\\n run.instance.get_parameter_values_by_type(ParamADIOS2XML)\n for pv in adios_xml_params:\n working_dir = working_dirs[pv.target]\n\n # dirty way of getting the adios xml filename of the rc\n # that is represented by pv.target\n rc_adios_xml = self._get_rc_adios_xml_filename(\n run, pv.target)\n xml_filepath = os.path.join(working_dir,\n os.path.basename(rc_adios_xml))\n\n # Check if this is adios1 or adios2\n adios_version = get_adios_version(rc_adios_xml)\n\n if adios_version == 1:\n if pv.param_type == \"adios_transform\":\n adios_params.adios_xml_transform(\n xml_filepath,pv.group_name, pv.var_name, pv.value)\n elif pv.param_type == \"adios_transport\":\n # value could be\n # \"MPI_AGGREGATE:num_aggregators=64;num_osts\"\n # extract the method name and the method options\n method_name = pv.value\n method_opts = \"\"\n if \":\" in pv.value:\n value_tokens = pv.value.split(\":\", 1)\n method_name = value_tokens[0]\n method_opts = value_tokens[1]\n\n adios_params.adios_xml_transport(\n xml_filepath, pv.group_name, method_name,\n method_opts)\n else:\n raise exc.CheetahException(\"Unrecognized adios param\")\n\n else: # adios version == 2\n operation_value = list(pv.value.keys())[0]\n if pv.operation_name in ('engine', 'transport'):\n parameters = list(pv.value.values())[0]\n if pv.operation_name == 'engine':\n adios2.set_engine(xml_filepath, pv.io_name,\n operation_value, parameters)\n else:\n adios2.set_transport(xml_filepath, pv.io_name,\n operation_value, parameters)\n else: # operation_name == 'var_operation'\n var_name = list(pv.value.keys())[0]\n var_name_dict = pv.value[var_name]\n var_operation_value = list(var_name_dict.keys())[0]\n var_op_dict = var_name_dict[var_operation_value]\n parameters = var_op_dict\n adios2.set_var_operation(xml_filepath, pv.io_name,\n var_name,\n var_operation_value,\n parameters)\n\n # Calculate the no. of nodes required by this run.\n # This must be done after dataspaces support is added.\n if run.total_nodes > min_nodes:\n min_nodes = run.total_nodes\n\n # Generic config file support. Note: slurps entire\n # config file into memory, requires adding file to\n # campaign 'inputs' option.\n config_params = \\\n run.instance.get_parameter_values_by_type(ParamConfig)\n for pv in config_params:\n working_dir = working_dirs[pv.target]\n src_filepath = relative_or_absolute_path(app_dir,\n pv.config_filename)\n # Allow for relative pathnames in the spec\n src_filename = pv.config_filename\n if pv.config_filename[0] == '/':\n src_filename = os.path.basename(src_filepath)\n config_filepath = os.path.join(working_dir,\n src_filename)\n if not os.path.isfile(config_filepath):\n copy_to_path(src_filepath, config_filepath)\n lines = []\n # read and modify lines\n # hack: handle json files. currently works only on singly\n # nested json files\n if config_filepath.endswith(\".json\"):\n json_config_set_option(config_filepath, pv.match_string,\n pv.value)\n else: # handle other file types\n with open(config_filepath) as config_f:\n for line in config_f:\n line = line.replace(pv.match_string, pv.value)\n lines.append(line)\n # rewrite file with modified lines\n with open(config_filepath, 'w') as config_f:\n config_f.write(\"\".join(lines))\n\n # Key value config file support. Note: slurps entire\n # config file into memory, requires adding file to\n # campaign 'inputs' option.\n kv_params = \\\n run.instance.get_parameter_values_by_type(ParamKeyValue)\n for pv in kv_params:\n working_dir = working_dirs[pv.target]\n src_filepath = relative_or_absolute_path(app_dir,\n pv.config_filename)\n # Allow for relative pathnames in the spec\n src_filename = pv.config_filename\n if pv.config_filename[0] == '/':\n src_filename = os.path.basename(src_filepath)\n kv_filepath = os.path.join(working_dir, src_filename)\n if not os.path.isfile(kv_filepath):\n copy_to_path(src_filepath, kv_filepath)\n lines = []\n # read and modify lines\n key_found = False\n with open(kv_filepath) as kv_f:\n for line in kv_f:\n parts = line.split('=', 1)\n if len(parts) == 2:\n k = parts[0].strip()\n if k == pv.key_name:\n # assume all k=v type formats will\n # support no spaces around equals\n line = k + '=' + str(pv.value)\n # preserve a user comment if it exists\n if '!' in parts[1]:\n line = line + \" !\" + \\\n parts[1].strip().split('!')[1]\n line = line + '\\n'\n key_found = True\n lines.append(line)\n assert key_found, \\\n \"Issue parsing a ParamKeyValue: Could not find key {}\"\\\n \" in config file {}\".format(pv.key_name, src_filepath)\n # rewrite file with modified lines\n with open(kv_filepath, 'w') as kv_f:\n kv_f.write(\"\".join(lines))\n\n # Env var parameter values\n kv_params = run.instance.get_parameter_values_by_type(ParamEnvVar)\n for pv in kv_params:\n rc = run._get_rc_by_name(pv.target)\n rc.env[pv.option] = str(pv.value)\n\n # save code commands as text\n params_path_txt = os.path.join(run.run_path,\n self.run_command_name)\n with open(params_path_txt, 'w') as params_f:\n for rc in run.run_components:\n params_f.write(' '.join(map(shlex.quote,\n [rc.exe] + rc.args)))\n params_f.write('\\n')\n\n # save params as JSON for use in post-processing, more\n # useful for post-processing scripts then the command\n # text\n params_path_json = os.path.join(run.run_path,\n self.run_json_name)\n run_data = run.get_app_param_dict()\n with open(params_path_json, 'w') as params_f:\n json.dump(run_data, params_f, indent=2)\n\n fob_runs = []\n for j, rc in enumerate(run.run_components):\n if timeout is not None:\n rc.timeout = parse_timedelta_seconds(timeout)\n\n fob_runs.append(rc.as_fob_data())\n\n fob = dict(id=run.run_id, launch_mode=launch_mode, runs=fob_runs,\n working_dir=run.run_path, apps_dir=app_dir,\n kill_on_partial_failure=kill_on_partial_failure,\n post_process_script=run_post_process_script,\n post_process_stop_on_failure=\n run_post_process_stop_on_failure,\n post_process_args=[params_path_json],\n node_layout=run.node_layout.serialize_to_dict(),\n total_nodes=run.total_nodes,\n machine_name=machine.name,\n tau_profiling=tau_profiling, tau_tracing=tau_tracing)\n fob_list.append(fob)\n\n # write to file run dir\n run_fob_path = os.path.join(run.run_path,\n \"codar.cheetah.fob.json\")\n with open(run_fob_path, \"w\") as runf:\n runf.write(json.dumps(fob, sort_keys=True, indent=4))\n runf.write(\"\\n\")\n\n if run_dir_setup_script is not None:\n self._execute_run_dir_setup_script(run.run_path,\n run_dir_setup_script)\n\n # Get the size of the run dir. This should be the last step\n # in the creation of the run dir.\n self._get_pre_submit_dir_size(run)\n\n # Write fob_list to group-level json file\n f.write(json.dumps(fob_list, sort_keys=True, indent=4))\n f.close()\n\n if nodes is None:\n nodes = min_nodes\n elif nodes < min_nodes:\n raise exc.CheetahException(\n \"nodes for group is too low, need at least %d, got %d\"\n % (min_nodes, nodes))\n\n # TODO: what case does this handle? should have a test case for\n # it.\n if machine.node_exclusive:\n group_ppn = machine.processes_per_node\n else:\n group_ppn = math.ceil((max_nprocs) / nodes)\n\n env_path = os.path.join(self.output_directory, 'group-env.sh')\n group_env = templates.GROUP_ENV_TEMPLATE.format(\n walltime=parse_timedelta_seconds(walltime),\n max_procs=max_nprocs,\n processes_per_node=group_ppn,\n nodes=nodes,\n node_exclusive=node_exclusive,\n account=scheduler_options.get('project', ''),\n queue=scheduler_options.get('queue', ''),\n reservation=scheduler_options.get('reservation', ''),\n custom=scheduler_options.get('custom', ''),\n # TODO: require name be valid for all schedulers\n campaign_name='codar.cheetah.'+campaign_name,\n group_name=group_name,\n constraint=scheduler_options.get('constraint', ''),\n license=scheduler_options.get('license', ''),\n machine_name=machine.name\n )\n with open(env_path, 'w') as f:\n f.write(group_env)\n\n return nodes", "title": "" }, { "docid": "c0d0b11610732aabe5234e6c18d97dcc", "score": "0.51037985", "text": "def write_distributional_spf_dataset(data: Dict[str, np.array], full_path: str) -> None:\n file = h5py.File(full_path, 'w')\n for k, v in data.items():\n file.create_dataset(name=k, data=v)\n file.close()", "title": "" }, { "docid": "1ada4bdf303e3599dd657b9b298ede6d", "score": "0.51037645", "text": "def make_archive(sim_epoch: rebound.Simulation, \n mjd0: int, mjd1: int, \n time_step: int, save_step: int,\n save_elements: bool,\n progbar: bool) -> rebound.SimulationArchive:\n # Get archive filename from simulation\n fname_archive: str = calc_fname_archive(sim_epoch, mjd0, mjd1)\n # Path of archive\n path_archive = os.path.join(dir_archive, fname_archive)\n try:\n # First try to load the named archive\n sa = rebound.SimulationArchive(filename=path_archive)\n print(f'Loaded archive {fname_archive}.')\n except:\n # If the archive is not on disk, save it to disk\n print(f'Generating archive {fname_archive}\\n'\n f'from mjd {mjd0} to {mjd1}, time_step={time_step}, save_step={save_step}...')\n make_archive_impl(sim_epoch=sim_epoch, mjd0=mjd0, mjd1=mjd1, time_step=time_step, \n save_step=save_step, save_elements=save_elements, progbar=progbar)\n # Load the new archive into memory\n sa = rebound.SimulationArchive(filename=path_archive)\n\n # Bind the archive filename to the archive\n sa.fname_archive: str = fname_archive\n sa.fname_np: str = fname_archive.replace('.bin', '.npz')\n sa.body_collection = sim_epoch.body_collection\n # Bind various dates and time steps\n sa.epoch = sim_epoch.epoch\n sa.mjd0: int = mjd0\n sa.mjd1: int = mjd1\n sa.time_step: int = time_step\n # Bind the simulation at the epoch\n sa.sim_epoch = sim_epoch\n\n return sa", "title": "" }, { "docid": "6fda04613d566a0c745a47e068a70c02", "score": "0.5094346", "text": "def archiveBag(bags,queue):\n grouptasks=[]\n notValid=[]\n valid=[]\n for bag in bags:\n vBag=validateBag(bag,fast=True)\n if vBag['valid']:\n source=bag\n updateBagValidatationMetadata(bag.split('/')[-1],'local',{\"valid\":True,\"timestamp\":datetime.now().isoformat()})\n valid.append({\"bag\":source.split('/')[-1],\"valid\":datetime.now()})\n destination= os.path.join(petaLibrarySubDirectory,source.split('/')[-1])\n grouptasks.append(scpPetaLibrary.si(source,destination).set(queue=queue))\n else:\n updateBagValidatationMetadata(bag,'local',{\"valid\":False,\"timestamp\":datetime.now().isoformat()})\n notValid.append(bag)\n res = group(grouptasks)()\n return {\"subtasks\":len(grouptasks),\"valid\":valid,\"notvalid\":notValid}", "title": "" }, { "docid": "94fed718476a0dbcb6c7272365a02c7d", "score": "0.50901425", "text": "def ProduceUnifiedTreeandHaloCatalog(fname,numsnaps,tree,numhalos,halodata,atime,\n\tdescripdata={'Title':'Tree and Halo catalog of sim', 'VELOCIraptor_version':1.15, 'Tree_version':1.1, 'Particle_num_threshold':20, 'Temporal_linking_length':1, 'Flag_gas':False, 'Flag_star':False, 'Flag_bh':False},\n\tcosmodata={'Omega_m':1.0, 'Omega_b':0., 'Omega_Lambda':0., 'Hubble_param':1.0,'BoxSize':1.0, 'Sigma8':1.0},\n\tunitdata={'UnitLength_in_Mpc':1.0, 'UnitVelocity_in_kms':1.0,'UnitMass_in_Msol':1.0, 'Flag_physical_comoving':True,'Flag_hubble_flow':False},\n\tpartdata={'Flag_gas':False, 'Flag_star':False, 'Flag_bh':False},\n\tibuildheadtail=0, icombinefile=1):\n\tif (ibuildheadtail==1):\n\t\tBuildTemporalHeadTail(numsnaps,tree,numhalos,halodata)\n\ttotnumhalos=sum(numhalos)\n\tif (icombinefile==1):\n\t\thdffile=h5py.File(fname+\".snap.hdf.data\",'w')\n\t\theadergrp=hdffile.create_group(\"Header\")\n\t\t#store useful information such as number of snapshots, halos,\n\t\t#cosmology (Omega_m,Omega_b,Hubble_param,Omega_Lambda, Box size)\n\t\t#units (Physical [1/0] for physical/comoving flag, length in Mpc, km/s, solar masses, Gravity\n\t\t#and TEMPORALHALOIDVAL used to traverse tree information (converting halo ids to haloindex or snapshot), Reverse_order [1/0] for last snap listed first)\n\t\t#set the attributes of the header\n\t\theadergrp.attrs[\"NSnaps\"]=numsnaps\n\t\t#overall description\n\t\t#simulation box size\n\n\t\t#cosmological params\n\t\tcosmogrp=headergrp.create_group(\"Cosmology\")\n\t\tfor key in cosmodata.keys():\n\t\t\tcosmogrp.attrs[key]=cosmodata[key]\n\t\t#unit params\n\t\tunitgrp=headergrp.create_group(\"Units\")\n\t\tfor key in unitdata.keys():\n\t\t\tunitgrp.attrs[key]=unitdata[key]\n\t\t#particle types\n\t\tpartgrp=headergrp.create_group(\"Parttypes\")\n\t\tpartgrp.attrs[\"Flag_gas\"]=descripdata[\"Flag_gas\"]\n\t\tpartgrp.attrs[\"Flag_star\"]=descripdata[\"Flag_star\"]\n\t\tpartgrp.attrs[\"Flag_bh\"]=descripdata[\"Flag_bh\"]\n\n\t\tfor i in range(numsnaps):\n\t\t\tsnapgrp=hdffile.create_group(\"Snap_%03d\"%(numsnaps-1-i))\n\t\t\tsnapgrp.attrs[\"Snapnum\"]=(numsnaps-1-i)\n\t\t\tsnapgrp.attrs[\"NHalos\"]=numhalos[i]\n\t\t\tsnapgrp.attrs[\"scalefactor\"]=atime[i]\n\t\t\tfor key in halodata[i].keys():\n\t\t\t\tsnapgrp.create_dataset(key,data=halodata[i][key])\n\t\thdffile.close()\n\telse:\n\t\tfor i in range(numsnaps):\n\t\t\thdffile=h5py.File(fname+\".snap_%03d.hdf.data\"%(numsnaps-1-i),'w')\n\t\t\thdffile.create_dataset(\"Snap_value\",data=np.array([numsnaps-1-i],dtype=np.uint32))\n\t\t\thdffile.create_dataset(\"NSnaps\",data=np.array([numsnaps],dtype=np.uint32))\n\t\t\thdffile.create_dataset(\"NHalos\",data=np.array([numhalos[i]],dtype=np.uint64))\n\t\t\thdffile.create_dataset(\"TotalNHalos\",data=np.array([totnumhalos],dtype=np.uint64))\n\t\t\thdffile.create_dataset(\"scalefactor\",data=np.array([atime[i]],dtype=np.float64))\n\t\t\tfor key in halodata[i].keys():\n\t\t\t\thdffile.create_dataset(key,data=halodata[i][key])\n\t\t\thdffile.close()\n\n\thdffile=h5py.File(fname+\".tree.hdf.data\",'w')\n\thdffile.create_dataset(\"NSnaps\",data=np.array([numsnaps],dtype=np.uint32))\n\thdffile.create_dataset(\"TotalNHalos\",data=np.array([totnumhalos],dtype=np.uint64))\n\thdffile.create_dataset(\"NHalos\",data=np.array([numhalos],dtype=np.uint64))\n\tfor i in range(numsnaps):\n\t\tsnapgrp=hdffile.create_group(\"Snap_%03d\"%(numsnaps-1-i))\n\t\tfor key in tree[i].keys():\n\t\t\t\"\"\"\n\t\t\t#to be completed for progenitor list\n\t\t\tif (key==\"Progen\"):\n\t\t\t\tfor j in range(numhalos[i]):\n\t\t\t\t\thalogrp=snapgrp.create_group(\"Halo\"+str(j))\n\t\t\t\t\thalogrp.create_dataset(key,data=tree[i][key][j])\n\t\t\telse:\n\t\t\t\tsnapgrp.create_dataset(key,data=tree[i][key])\n\t\t\t\"\"\"\n\t\t\tif ((key==\"Progen\") | (key==\"Descen\")): continue\n\t\t\tsnapgrp.create_dataset(key,data=tree[i][key])\n\thdffile.close()", "title": "" }, { "docid": "8770ee73f6e6a459bcd63cd52b7503a8", "score": "0.5089119", "text": "def parse_single_12tped_to_hdf5(in_file_prefix='/home/bv25/data/Ls154/Ls154_12',\n out_file_prefix='/home/bv25/data/Ls154/Ls154_12',\n impute_type='mode', filter_monomorphic_snps=True,\n missing_val_thr=0.1):\n \n print 'Starting to parse genotypes'\n genotype_data = {}\n h5py_file = h5py.File(out_file_prefix + '.hdf5')\n genotype_data['hdf5p_file'] = h5py_file\n genot_group = h5py_file.create_group('genot_data')\n indiv_group = h5py_file.create_group('indiv_data')\n \n \n tot_num_snps = 0\n tot_num_missing_val_snps_removed = 0\n tot_num_ambiguous_loc_removed = 0\n curr_chrom = 1\n print 'Working on chromosome %d' % curr_chrom\n \n g_filename = '%s.tped' % (in_file_prefix) \n s_filename = '%s.bim' % (in_file_prefix)\n i_filename = '%s.tfam' % (in_file_prefix) \n\n \n \n indiv_ids = []\n phenotypes = [] \n sex = []\n print 'Parsing individuals file: %s' % i_filename\n with open(i_filename) as f:\n for line in f:\n l = line.split()\n iid = l[0]\n indiv_ids.append(iid)\n sex.append(int(l[4]))\n phenotypes.append(float(l[5]))\n tot_num_indiv = len(indiv_ids) \n \n print 'Storing individual data in individ. group'\n indiv_group.create_dataset('indiv_ids', data=indiv_ids)\n indiv_group.create_dataset('sex', data=sex)\n indiv_group.create_dataset('phenotypes', data=phenotypes)\n \n \n \n num_indiv = len(indiv_ids)\n print 'Found %d Individuals' % (num_indiv)\n\n print 'Parsing nucleotide map'\n nt_map = {}\n chromsomoes = []\n curr_chrom = 0\n with open(s_filename) as f:\n for line in f:\n l = line.split()\n chrom = l[0]\n if chrom != curr_chrom:\n chromsomoes.append(chrom)\n curr_chrom = chrom\n nt_map[l[1]] = (l[4], l[5]) \n assert len(chromsomoes) == len(set(chromsomoes)), 'Chromosomes need to be in order.'\n curr_chrom = chromsomoes[0]\n \n position = -1\n # Initializing containers.\n snps_mat = [] \n positions = []\n sids = []\n nts_list = []\n nt_counts_list = []\n missing_counts = []\n freqs = []\n num_missing_removed = 0\n num_monomorphic_removed = 0\n num_ambiguous_loc_removed = 0\n t0 = time.time()\n\n print 'Starting to parse SNP files'\n gf = open(g_filename)\n for g_line in gf:\n# if random.random() > 0.01:\n# continue\n gl = g_line.split()\n chrom = gl[0]\n if chrom != curr_chrom:\n \n # Store everything and reset.\n print 'Number of SNPs removed due to too many missing values: %d' % num_missing_removed\n print 'Number of SNPs removed due to ambiguous location: %d' % num_ambiguous_loc_removed\n print 'Number of monomorphic SNPs removed: %d' % num_monomorphic_removed\n print 'Number of SNPs retained: %d' % len(positions)\n print 'Number of individuals: %d' % num_indiv\n snps = sp.array(snps_mat, dtype='int8')\n h5py_chrom_group = genot_group.create_group('chrom_%s' % curr_chrom)\n h5py_chrom_group.create_dataset('raw_snps', compression='lzf', data=snps)\n h5py_chrom_group.create_dataset('positions', compression='lzf', data=positions)\n h5py_chrom_group.create_dataset('nts', compression='lzf', data=nts_list)\n h5py_chrom_group.create_dataset('nt_counts', compression='lzf', data=nt_counts_list)\n h5py_chrom_group.create_dataset('missing_counts', compression='lzf', data=missing_counts)\n h5py_chrom_group.create_dataset('freqs', compression='lzf', data=freqs)\n h5py_chrom_group.create_dataset('snp_ids', compression='lzf', data=sids) \n tot_num_snps += len(positions)\n tot_num_missing_val_snps_removed += num_missing_removed\n tot_num_ambiguous_loc_removed += num_ambiguous_loc_removed\n h5py_file.flush() \n t1 = time.time()\n t = t1 - t0\n print 'It took %d minutes and %0.2f seconds to parse Chromosome %s.' % (t / 60, t % 60, curr_chrom)\n t0 = time.time()\n\n \n\n # Reset containers\n snps_mat = [] \n positions = []\n sids = []\n nts_list = []\n nt_counts_list = []\n missing_counts = []\n freqs = []\n num_missing_removed = 0\n num_ambiguous = 0\n num_monomorphic_removed = 0\n num_ambiguous_loc_removed = 0\n \n curr_chrom = chrom\n\n sid = gl[1]\n prev_position = position\n position = int(gl[3])\n\n # Skipping unmappable locations\n if position == prev_position:\n num_ambiguous_loc_removed += 1\n continue\n if position == 0:\n num_ambiguous_loc_removed += 1\n continue\n\n nt = nt_map[sid]\n \n snp0 = sp.array(map(int, (g_line.strip()).split()[4:]), 'int8')\n a = sp.arange(tot_num_indiv * 2)\n even_map = a % 2 == 0\n odd_map = a % 2 == 1\n snp = snp0[even_map] + snp0[odd_map] - 2\n snp[snp < 0] = 9\n \n bin_counts = sp.bincount(snp)\n \n\n if len(bin_counts) > 3:\n missing_count = bin_counts[-1]\n # Filtering SNPs with too many missing values\n if missing_count > missing_val_thr * 2 * num_indiv:\n num_missing_removed += 1\n continue\n elif impute_type == 'mode':\n nt_counts = bin_counts[:3] \n v = sp.argmax(nt_counts)\n snp[snp == 9] = v\n bin_counts = sp.bincount(snp)\n else:\n raise Exception('Imputation type is unknown')\n else:\n missing_count = 0\n\n assert len(bin_counts) < 4, 'Issues with nucleotides.'\n nt_counts = bin_counts[:3] \n if len(nt_counts) == 2:\n nt_counts = sp.array([nt_counts[0], nt_counts[1], 0])\n elif len(nt_counts) == 1:\n nt_counts = sp.array([nt_counts[0], 0, 0])\n \n\n # Removing monomorphic SNPs\n if filter_monomorphic_snps:\n if max(nt_counts) == sum(nt_counts):\n num_monomorphic_removed += 1\n continue\n \n freq = sp.mean(snp) / 2.0 \n snps_mat.append(snp)\n positions.append(position)\n sids.append(sid)\n nts_list.append(nt)\n nt_counts_list.append(nt_counts)\n missing_counts.append(missing_count)\n freqs.append(freq) \n\n # Store everything and reset.\n print 'Number of SNPs removed due to too many missing values: %d' % num_missing_removed\n print 'Number of SNPs removed due to ambiguous location: %d' % num_ambiguous_loc_removed\n print 'Number of monomorphic SNPs removed: %d' % num_monomorphic_removed\n print 'Number of SNPs retained: %d' % len(positions)\n print 'Number of individuals: %d' % num_indiv\n snps = sp.array(snps_mat, dtype='int8')\n h5py_chrom_group = genot_group.create_group('chrom_%s' % chrom)\n h5py_chrom_group.create_dataset('raw_snps', compression='lzf', data=snps)\n h5py_chrom_group.create_dataset('positions', compression='lzf', data=positions)\n h5py_chrom_group.create_dataset('nts', compression='lzf', data=nts_list)\n h5py_chrom_group.create_dataset('nt_counts', compression='lzf', data=nt_counts_list)\n h5py_chrom_group.create_dataset('missing_counts', compression='lzf', data=missing_counts)\n h5py_chrom_group.create_dataset('freqs', compression='lzf', data=freqs)\n h5py_chrom_group.create_dataset('snp_ids', compression='lzf', data=sids) \n tot_num_snps += len(positions)\n tot_num_missing_val_snps_removed += num_missing_removed\n tot_num_ambiguous_loc_removed += num_ambiguous_loc_removed\n h5py_file.create_dataset('num_snps', data=sp.array(tot_num_snps))\n h5py_file.flush() \n t1 = time.time()\n t = t1 - t0\n print 'It took %d minutes and %0.2f seconds to parse chromosome %s.' % (t / 60, t % 60, chrom)\n\n \n gf.close()\n \n print 'Total number of SNPs parsed successfully was: %d' % tot_num_snps\n print 'Total number of SNPs removed due to too many missing values: %d' % tot_num_missing_val_snps_removed\n print 'Total number of SNPs removed due to ambiguous locations: %d' % tot_num_ambiguous_loc_removed\n h5py_file.close()\n \n print 'Done parsing genotypes.'", "title": "" }, { "docid": "5ae55e0ee0c569e234eaa99af838c30f", "score": "0.50849724", "text": "def write_hgrps(self, hgrp_list, dirname):\n self.host_group_manager.write_objects(hgrp_list, dirname)", "title": "" }, { "docid": "72d6f1dc1b4441ba7d808587f926551a", "score": "0.5082397", "text": "def ProduceWalkableHDFTree(fname,numsnaps,tree,numhalos,halodata,atime,\n\tdescripdata={'Title':'Tree catalogue', 'VELOCIraptor_version':1.3, 'Tree_version':1.1, 'Particle_num_threshold':20, 'Temporal_linking_length':1, 'Flag_gas':False, 'Flag_star':False, 'Flag_bh':False}\n\t):\n\thdffile=h5py.File(fname,'w')\n\theadergrp=hdffile.create_group(\"Header\")\n\t#store useful information such as number of snapshots, halos,\n\t#cosmology (Omega_m,Omega_b,Hubble_param,Omega_Lambda, Box size)\n\t#units (Physical [1/0] for physical/comoving flag, length in Mpc, km/s, solar masses, Gravity\n\t#and TEMPORALHALOIDVAL used to traverse tree information (converting halo ids to haloindex or snapshot), Reverse_order [1/0] for last snap listed first)\n\t#set the attributes of the header\n\theadergrp.attrs[\"NSnaps\"]=numsnaps\n\t#overall description\n\theadergrp.attrs[\"Title\"]=descripdata[\"Title\"]\n\tfindergrp=headergrp.create_group(\"HaloFinder\")\n\tfindergrp.attrs[\"Name\"]=\"VELOCIraptor\"\n\tfindergrp.attrs[\"Version\"]=descripdata[\"VELOCIraptor_version\"]\n\tfindergrp.attrs[\"Particle_num_threshold\"]=descripdata[\"Particle_num_threshold\"]\n\n\ttreebuildergrp=headergrp.create_group(\"TreeBuilder\")\n\ttreebuildergrp.attrs[\"Name\"]=\"VELOCIraptor-Tree\"\n\ttreebuildergrp.attrs[\"Version\"]=descripdata[\"Tree_version\"]\n\ttreebuildergrp.attrs[\"Temporal_linking_length\"]=descripdata[\"Temporal_linking_length\"]\n\n\t#now need to create groups for halos and then a group containing tree information\n\tsnapsgrp=hdffile.create_group(\"Snapshots\")\n\t#internal tree keys\n\thalokeys=[\"RootHead\", \"RootHeadSnap\", \"Head\", \"HeadSnap\", \"Tail\", \"TailSnap\", \"RootTail\", \"RootTailSnap\", \"ID\", \"Num_progen\"]\n\n\tfor i in range(numsnaps):\n\t\t#note that I normally have information in reverse order so that might be something in the units\n\t\tsnapgrp=snapsgrp.create_group(\"Snap_%03d\"%i)\n\t\tsnapgrp.attrs[\"Snapnum\"]=i\n\t\tsnapgrp.attrs[\"NHalos\"]=numhalos[i]\n\t\tsnapgrp.attrs[\"scalefactor\"]=atime[i]\n\t\tfor key in halokeys:\n\t\t\tsnapgrp.create_dataset(key,data=halodata[i][key])\n\thdffile.close()", "title": "" }, { "docid": "2665c63042d944fcd7380e9c9c8afa99", "score": "0.50810343", "text": "def setup_output_content(top_grp, pckt_summary):\n for where, smmry in pckt_summary.items():\n grp = top_grp.create_group(where)\n grp_path = grp.name\n nelems = smmry['count']\n\n if smmry['type'] == 'MIL1553_FMT_1':\n lggr.debug(f'Create HDF5 dataset data[{nelems}] in {grp_path}')\n dtype_1553 = np.dtype(\n [('time', '<i8'),\n ('timestamp', 'S30'),\n ('msg_error', '|u1'),\n ('ttb', '|u1'),\n ('word_error', '|u1'),\n ('sync_error', '|u1'),\n ('word_count_error', '|u1'),\n ('rsp_tout', '|u1'),\n ('format_error', '|u1'),\n ('bus_id', 'S1'),\n ('packet_version', '|u1'),\n ('messages', h5py.special_dtype(vlen=np.dtype('<u2')))])\n dset = grp.create_dataset(\n 'data', shape=(nelems,), chunks=True, dtype=dtype_1553)\n\n name_dtype = np.dtype(\n [('time', 'S30'),\n ('timestamp', 'S30'),\n ('msg_error', 'S30'),\n ('ttb', 'S30'),\n ('word_error', 'S30'),\n ('sync_error', 'S30'),\n ('word_count_error', 'S30'),\n ('rsp_tout', 'S30'),\n ('format_error', 'S30'),\n ('bus_id', 'S30'),\n ('packet_version', 'S30'),\n ('messages', 'S30')])\n names = ('1553 intra-packet time',\n '1553 intra-packet time stamp',\n '1553 message error flag',\n 'time tag bits',\n 'invalid word error',\n 'sync type error',\n 'word count error',\n 'response time out',\n 'format error',\n 'bus id',\n '1553 packet version',\n '1553 packet message data')\n dset.attrs.create('name', np.array(names, dtype=name_dtype))\n\n # Create alias HDF5 paths for created datasets...\n if 'alias' in smmry:\n for where in smmry['alias']:\n grp = top_grp.create_group(where)\n lggr.debug(f'Hard link {dset.name} from {grp.name}')\n grp['data'] = dset\n\n elif smmry['type'] == 'VIDEO_FMT_0':\n lggr.debug(f'Create HDF5 dataset data[{nelems}] in {grp_path}')\n dset = grp.create_dataset('data', shape=(nelems,),\n chunks=True, dtype=np.dtype('|V188'))\n dset.attrs['name'] = 'video transfer stream'", "title": "" }, { "docid": "b54fd67cd61bd5ec2435e4e05ac9693b", "score": "0.5071669", "text": "def export(gen, directory, file_prefix='{start[uid]}-', **kwargs):\n with Serializer(directory, file_prefix, **kwargs) as serializer:\n for item in gen:\n serializer(*item)\n\n return serializer.artifacts", "title": "" }, { "docid": "d790df9d50ef43ab9367d222729e1221", "score": "0.5059398", "text": "def write(self,data):\n if not os.path.exists(os.path.dirname(self.outfilename)):\n os.makedirs(os.path.dirname(self.outfilename))\n\n if os.path.exists(self.outfilename):\n self.outfile = h5py.File(self.outfilename,'a')\n else:\n self.outfile = h5py.File(self.outfilename,'w')\n\n # Set permissions and group\n if self.set_permissions:\n os.chmod(self.outfilename,0o664)\n shutil.chown(self.outfilename, group=self.permissions_group)\n\n if self.level2 in self.outfile:\n del self.outfile[self.level2]\n lvl2 = self.outfile.create_group(self.level2)\n\n tod_dset = lvl2.create_dataset('averaged_tod',data=self.avg_tod, dtype=self.avg_tod.dtype)\n tod_dset.attrs['Unit'] = 'K'\n tod_dset.attrs['Calibration'] = '{self.cal_mode}:{self.cal_prefix}'\n\n freq_dset = lvl2.create_dataset('frequency',data=self.avg_frequency, dtype=self.avg_frequency.dtype)\n\n # Link the Level1 data\n data_filename = data['level1'].file.filename\n fname = data['level1'].file.filename.split('/')[-1]\n vane_file = data['level2/Vane'].file.filename\n\n # Copy over the statistics\n if 'Statistics' in lvl2:\n del lvl2['Statistics']\n grp = lvl2.create_group('Statistics')\n for k,v in data['level2/Statistics'].items():\n if isinstance(v,h5py.Group):\n grp2 = grp.create_group(k)\n for k1,v1 in v.items():\n grp2.create_dataset(k1,data=v1,dtype=v1.dtype)\n else:\n grp.create_dataset(k,data=v,dtype=v.dtype)\n\n\n data.close()\n if 'level1' in self.outfile:\n del self.outfile['level1']\n self.outfile['level1'] = h5py.ExternalLink(data_filename,'/')\n lvl2.attrs['version'] = __level2_version__\n\n # Add version info\n lvl2.attrs['pipeline-version'] = comancpipeline.__version__\n\n # Link the Level1 data\n if 'Vane' in lvl2:\n del lvl2['Vane']\n lvl2['Vane'] = h5py.ExternalLink('{}'.format(vane_file),'/')", "title": "" }, { "docid": "356eeae777c7ba5e43f43fe1a16cbda4", "score": "0.50575787", "text": "def export_grammars(output_dir):\n d = {}\n d['tokenize_and_classify'] = {'classify': ClassifyFst().fst}\n d['verbalize'] = {'verbalize': VerbalizeFst().fst}\n\n for category, graphs in d.items():\n for stage, fst in graphs.items():\n out_dir = os.path.join(output_dir, stage)\n os.makedirs(out_dir, exist_ok=True)\n _generator_main(f\"{out_dir}/{category}_tmp.far\", fst, category.upper())", "title": "" }, { "docid": "63b4f9dc44b348fc611711f0e14a230a", "score": "0.5057459", "text": "def save_npz_in_hdf5(npz_files_list, hdf5_path=None,\n add_attributes=None, verbose=False):\n # Set default output hdf5 path.\n if hdf5_path is None:\n hdf5_path = Path(npz_files_list[0]).with_suffix('.hdf5')\n\n # Create output hdf5 file\n #\n # Iterate over npz files list. Create a group for each npz file,\n # using relative orders as groups keys.\n output = H5(hdf5_path, 'w')\n for model_pos, npz_path in enumerate(npz_files_list):\n model_no = model_pos + 1\n grp = output.create_group(str(model_no))\n\n # Set values of interest as group attributes.\n if add_attributes:\n for k, value in add_attributes[int(model_no)].items():\n grp.attrs[k] = value\n\n # Load npz arrays and save them as groups data-sets. Keys\n # will be whatever keys were present in each npz file.\n arrays = np.load(npz_path)\n for k, value in arrays.items():\n grp.create_dataset(k, data=value)\n\n # Print output file structure\n if verbose:\n output.show_hdf5_structure()\n return output", "title": "" }, { "docid": "5e1e889856913fee9c3ef9bbae82f21a", "score": "0.5057404", "text": "def main(args):\n\n group = Group()\n for f in GROUP_PICK_LIST:\n group.append(args.group_dir + '/' + f, check=True)\n \n twitter_info = TwitterInfo()\n twitter_info.load(args.twitter_file)\n\n allshop2pos = map_shop_to_twitter_pos(twitter_info.get_data())\n\n shop2pos = {}\n shopids = map(lambda x: x.strip(), open(args.shop_list).readlines())\n for s in shopids:\n shop2pos[s] = allshop2pos.get(s, set())\n\n result_map = {}\n for shop in shop2pos:\n local_group = {}\n ret = {'total': len(shop2pos[shop]), 'groups': local_group}\n for pos in shop2pos[shop]:\n group_pos = group.get_group(pos)\n if group_pos is None:\n continue\n if group_pos not in local_group:\n local_group[group_pos] = set()\n local_group[group_pos].add(pos)\n result_map[shop] = ret\n\n output_file = args.output\n if not os.path.exists(output_file):\n os.makedirs(output_file)\n\n for shopid, info in result_map.iteritems():\n temp_file = output_file + '/' + shopid + '.org'\n with open(temp_file, \"w\") as fh:\n print >> fh, \"#+ATTR_HTML: target=\\\"_blank\\\" \"\n print >> fh, \"* [[http://www.meilishuo.com/shop/%s][shopid=%s]] 总商品量%s\" % (shopid, shopid, info['total'])\n print >> fh\n\n local_group = info['groups']\n \n #ordered_keys = random.shuffle(local_group.keys())\n ordered_keys = local_group.keys()\n \n n = len(ordered_keys)\n list1 = range(n)[:args.maxgroup]\n list2 = range(n)[args.maxgroup:args.maxline]\n for i in list1:\n group_pos = ordered_keys[i]\n positions = local_group[group_pos]\n sz = output_group_image_table(twitter_info, list(positions), max_show=args.maxshow, max_col=args.max_col, flag=\"店内同款\")\n print >> fh, sz\n total_group_members = group.get_member(group_pos)\n positions2 = total_group_members.difference(positions)\n if positions2:\n sz = output_group_image_table(twitter_info, list(positions2), max_show=args.maxshow2, max_col=args.max_col, flag=\"跨店同款\")\n print >> fh, sz\n\n print >> fh\n\n if list2:\n print >>fh, \"* 避免拖垮浏览器,以下图片省略\"\n \n for i in list2:\n group_pos = ordered_keys[i]\n positions = local_group[group_pos]\n sz = output_group_text_table(twitter_info, list(positions), max_col=args.max_col, flag=\"店内同款\")\n print >> fh, sz\n total_group_members = group.get_member(group_pos)\n positions2 = total_group_members.difference(positions)\n sz = output_group_text_table(twitter_info, list(positions2), max_col=args.max_col, flag=\"跨店同款\")\n print >> fh, sz\n\n print >> fh\n cmd = \" emacs --kill --batch %s -f org-export-as-html \" % temp_file\n os.system(cmd)\n print \"shop %s is done\" % shopid", "title": "" }, { "docid": "7e4ea330f46465b7dc1e898dd814e89b", "score": "0.5049522", "text": "def export(gen, directory, file_prefix=\"{uid}-\", **kwargs):\n with Serializer(directory, file_prefix, **kwargs) as serializer:\n for item in gen:\n serializer(*item)\n\n return serializer.artifacts", "title": "" }, { "docid": "5e2acbd3d62f52259f015b0f2733f87f", "score": "0.5044531", "text": "def load_stacked_hourglass(data_dir,subjects,actions,verbose=True):\n data = {}\n for subj in subjects:\n for action in actions:\n if verbose:\n print('Reading subject {0}, action {1}'.format(subj, action))\n # CHANGED: training data directory path based on StackedHourglass folder in the h36m directory\n # dpath = os.path.join( data_dir, 'S{0}'.format(subj), 'post_accept_sh_finetuned_10it/{0}*.h5'.format(action))\n dpath = os.path.join( data_dir, 'S{0}'.format(subj), 'StackedHourglass/{0}*.h5'.format(action) )\n print( dpath )\n fnames = glob.glob( dpath )\n loaded_seqs = 0\n for fname in fnames:\n seqname = os.path.basename( fname )\n seqname = seqname.replace('_',' ')\n if action == \"Sitting\" and seqname.startswith( \"SittingDown\" ):\n continue\n if seqname.startswith( action ):\n # This filters out e.g. walkDog and walkTogether\n if verbose:\n print( fname )\n loaded_seqs = loaded_seqs + 1\n with h5py.File( fname, 'r' ) as h5f:\n poses = h5f['poses'][:]\n permutation_idx = np.array([6,2,1,0,3,4,5,7,8,9,13,14,15,12,11,10])\n ### PERMUTE TO MAKE IT COMPATIBLE with h36m\n poses = poses[:,permutation_idx,:]\n ### Reshape into n times 16*2\n poses = np.reshape(poses,[poses.shape[0],-1])\n poses_final = np.zeros([poses.shape[0],32*2])\n dim_to_use_x = np.array( [0,1,2,3,6,7,8,12,13,15,17,18,19,25,26,27],dtype=np.int32 )*2\n dim_to_use_y = dim_to_use_x+1\n dim_to_use = np.zeros(16*2,dtype=np.int32)\n dim_to_use[0::2] = dim_to_use_x\n dim_to_use[1::2] = dim_to_use_y\n poses_final[:,dim_to_use] = poses\n seqname = seqname+'-sh'\n data[ (subj, action, seqname) ] = poses_final\n\n # Make sure we loaded 8 sequences\n if (subj == 11 and action == 'Directions'): # <-- this video is damaged\n assert loaded_seqs == 7, \"Expecting 7 sequences, found {0} instead. S:{1} {2}\".format(loaded_seqs, subj, action )\n else:\n assert loaded_seqs == 8, \"Expecting 8 sequences, found {0} instead. S:{1} {2}\".format(loaded_seqs, subj, action )\n\n return data", "title": "" }, { "docid": "39deb588840405dd1a0942e93c5e05e4", "score": "0.50427383", "text": "def _dump_data(self, mode=['props']):\n with h5py.File(self.name + '.hdf5') as f:\n for obj in self:\n for key in list(obj.keys()):\n tempname = obj.name + '|' + '_'.join(key.split('.'))\n arr = obj[key]\n if 'U' in str(obj[key][0].dtype):\n pass\n elif 'all' in key.split('.'):\n pass\n else:\n f.create_dataset(name='/'+tempname, shape=arr.shape,\n dtype=arr.dtype, data=arr)\n for obj in self:\n obj.clear(mode=mode)", "title": "" }, { "docid": "832e72a4673ab2f71201de440f7b3494", "score": "0.503768", "text": "def create_hdf5_dataset(group, dsName, data, max_digit=55, compression=COMPRESSION):\n msg = 'create dataset {d:<{w}}'.format(d='{}/{}'.format(group.name, dsName), w=max_digit)\n msg += ' of {t:<10} in size of {s}'.format(t=str(data.dtype), s=data.shape)\n msg += ' with compression={c}'.format(c=compression)\n print(msg)\n\n if data.ndim == 1:\n dset = group.create_dataset(dsName,\n data=data,\n compression=compression)\n elif data.ndim == 2:\n dset = group.create_dataset(dsName,\n data=data,\n chunks=True,\n compression=compression)\n return dset", "title": "" }, { "docid": "b056273bcb844d5dc26474751ca077c6", "score": "0.5036801", "text": "def gen_sigs(cluster_type, sigmeta, cluster, file_directory, signature_directory, fdf):\n print \"Creating signature for cluster: %s from %s samples\" % (cluster, len(fdf.filename.unique()))\n meta = sigmeta.copy()\n filename = fdf.filename.value_counts().index.tolist()[0]\n print filename\n count = 0\n yara_rule_name = cluster_type + \"_cluster_\" + str(cluster)\n for sample in fdf.filename.unique().tolist():\n meta['sample_'+str(count)] = sample\n count += 1\n\n\n file_header_columns = [\"pointer to symbol table\", \"characteristics\", \"number of symbols\", \"size of optional header\",\n \"machine\", \"compile date\", \"number of sections\"]\n\n optional_header_columns = [\"subsystem\", \"major image version\", \n \"major operating system version\", \"section alignment\", \"loader flags\",\n \"minor subsystem version\", \"major linker version\",\n \"size of code\", \"size of image\", \"number of rva and sizes\", \"dll charactersitics\",\n \"file alignment\", \"minor linker version\", \"base of code\",\n \"size uninit data\", \"entry point address\", \"size init data\", \"major subsystem version\",\n \"magic\", \"checksum\", \"minor image version\",\n \"minor operating system version\", \"size of headers\", \"base of data\",\n \"data dir base relocation rva\", \"data dir base relocation size\", \"data dir debug rva\",\n \"data dir debug size\", \"data dir exception table rva\", \"data dir exception table size\",\n \"data dir export table rva\", \"data dir export table size\", \"data dir import address table rva\",\n \"data dir import address table rva\", \"data dir import address table size\",\n \"data dir import table rva\", \"data dir import table size\", \"data dir import table size\",\n \"data dir resource table rva\", \"data dir resource table size\", \"data dir tls table rva\",\n \"data dir tls table size\"]\n\n qword_columns = ['image base', 'size of stack reserve', 'size of stack commit', 'size of heap reserve', 'size of heap commit']\n\n sig = data_hacking.YaraPEGenerator(file_directory + \"/\" + filename, samplename=yara_rule_name, meta=meta)\n\n file_header = []\n optional_header = {}\n\n for col in fdf.columns:\n if len(fdf[col].unique()) == 1:\n if fdf[col].unique()[0] != -1:\n lower = [s for s in col if s.islower()]\n if fdf[col].unique()[0] != -1 or (len(lower) == len(col)):\n if col in file_header_columns:\n file_header.append(col)\n if col in optional_header_columns:\n optional_header[col] = struct.pack(\"<I\", int(fdf[col].unique()[0])).encode('hex')\n if col in qword_columns:\n if fdf['magic'][0] == 0x20b:\n optional_header[col] = struct.pack(\"<Q\", int(fdf[col].unique()[0])).encode('hex')\n else:\n optional_header[col] = struct.pack(\"<I\", int(fdf[col].unique()[0])).encode('hex')\n\n\n if len(fdf[col].unique()) > 1:\n if col not in optional_header_columns:\n continue\n\n if type(fdf[col].unique()[0]) == str or len(fdf[col].unique()) > 9:\n continue\n\n u = []\n z = []\n for value in fdf[col].unique():\n u.append(struct.pack(\"<I\", value).encode(\"hex\"))\n\n for d in zip(*u):\n match = True\n for idx in range(1,len(d)):\n if d[0] != d[idx]:\n match = False\n break\n if match:\n z.append(d[0])\n else:\n z.append('?')\n string = ''.join(z)\n if string != '????????':\n optional_header[col] = string\n\n if len(file_header) > 0:\n sig.add_file_header(file_header)\n\n if len(optional_header) > 0:\n sig.add_optional_header_with_values(optional_header)\n sig.get_signature(filename=signature_directory + '/' + yara_rule_name + '.yara', writesig=True)", "title": "" }, { "docid": "1ea55c513c5663457506c6db8054e96f", "score": "0.5035398", "text": "def prepH5(outputName):\n with h5py.File(outputName, 'w') as f:\n # Generate top level groups\n f.create_group('data')\n f.create_group('params')\n f.create_group('misc')\n f.create_group('info')\n\n # Write metadata\n # Package format version\n f.create_dataset('info/package_version', data=np.string_('SingFEL v0.2.0'))\n # Contact\n f.create_dataset('info/contact', data=np.string_('Carsten Fortmann-Grote <[email protected]>'))\n # Data Description\n f.create_dataset('info/data_description', data=np.string_('This dataset contains diffraction patterns generated using SingFEL.'))\n # Method Description\n f.create_dataset('info/method_description', data=np.string_('Form factors of the radiation damaged molecules are calculated in time slices. At each time slice, the coherent scattering is calculated and incoherently added to the final diffraction pattern (/data/nnnnnnn/diffr). Finally, Poissonian noise is added to the diffraction pattern (/data/nnnnnnn/data).'))\n # Data format version\n f.create_dataset('version', data=np.string_('0.2'))", "title": "" }, { "docid": "e9827420a61f03aecbf99fc7ad1d93e4", "score": "0.50345504", "text": "def make_heatmap(data: str, group_by, rds, output, process):\n with open(data) as r:\n data = json.load(r)\n\n tasks = []\n\n if not os.path.exists(output):\n os.makedirs(output)\n # rds, prefix, cluster, gene, group = data\n for k, v in data.items():\n tasks.append([rds, output, k, v, group_by])\n\n with Pool(processes=process) as p:\n p.map(run_r, tasks)", "title": "" }, { "docid": "7c74536aed662db5da03fb8569085822", "score": "0.50222504", "text": "def export_to_hdf5(cls, h5_file, model, eids):\n #comments = []\n pids = []\n nodes = []\n thetas = []\n for eid in eids:\n element = model.elements[eid]\n #comments.append(element.comment)\n pids.append(element.pid)\n nodes.append(element.nodes)\n\n theta = element.theta\n assert isinstance(theta, float), type(theta)\n thetas.append(theta)\n #h5_file.create_dataset('_comment', data=comments)\n h5_file.create_dataset('eid', data=eids)\n h5_file.create_dataset('pid', data=pids)\n h5_file.create_dataset('nodes', data=nodes)\n h5_file.create_dataset('theta', data=thetas)", "title": "" }, { "docid": "3ad35022a5b3e1d9b834080c7b8c1d9f", "score": "0.5018338", "text": "def createDataset(filename, group, dataset, data):\n\n deleteDataset(filename, group, dataset)\n\n FILE = h5py.File(filename, \"r+\")\n\n GROUP = FILE[group]\n\n GROUP.create_dataset(dataset, data = data)\n\n print(\"[CREATE]: <{:s}> dataset in <{:s}> group created.\".format(dataset, group))\n\n FILE.close()", "title": "" }, { "docid": "ad52655b1bb5f6792af20b2e04f60f47", "score": "0.5007411", "text": "def save_grouped_data(input_df, output_dir):\n\n df = pd.read_parquet(input_df)\n df[CDR3_LEN_COL_LABEL] = df['cdr3'].str.len()\n\n grouped_df = df.groupby(['v', 'j', CDR3_LEN_COL_LABEL])\n\n if grouped_df:\n path_dir = pathlib.Path(output_dir)\n path_dir.mkdir(parents=True, exist_ok=True)\n\n filenames = []\n for (v, j, cdr3_len), grouped_data in grouped_df:\n filename = f'{v}_{j}_{cdr3_len}'\n\n for char, replacement_char in FILENAME_CHAR_REPLACEMENTS.items():\n filename = filename.replace(char, replacement_char)\n \n filenames.append(filename)\n grouped_data.to_parquet(f'{output_dir}/{filename}.parquet', index=False)\n\n with open(f'{output_dir}/{GROUP_FILES_LIST_FILENAME}', 'w') as groups_list_file:\n groups_list_file.write('\\n'.join(filenames))", "title": "" }, { "docid": "c2309a1b61c6c278aa33a91a3a02f29f", "score": "0.49970445", "text": "def group_dls_data(directory = '', pattern = '*.ASC', output = 'data', size = 10):\n import os\n from labtools.io.dls import open_dls_group\n files = glob.glob(os.path.join(directory,pattern))\n data = open_dls_group(files, size)\n for i,d in enumerate(data):\n print('creating data')\n dat = DLS_Data_Old(data = d[0], cr = d[1])\n print('opening window')\n dat.configure_traits()\n print('calculating')\n dat.calculate()\n fname = os.path.join(directory, output + str(i) + '.npy')\n print('saving data+')\n dat.save(fname)", "title": "" }, { "docid": "ea006c100c3c9cddcb0768b5bd8c2011", "score": "0.4997002", "text": "def _export_dags_plugins_and_data(self: typing.Any) -> None:\n logger.info(\"Exporting DAGs, plugins and data...\")\n bucket = self.cp_bucket_name\n command = (\n f\"gsutil -m cp -r gs://{bucket}/dags \"\n f\"gs://{bucket}/export/dirs/dags && \"\n f\"gsutil -m cp -r gs://{bucket}/plugins \"\n f\"gs://{bucket}/export/dirs/plugins\"\n f\" && gsutil -m cp -r gs://{bucket}/data \"\n f\"gs://{bucket}/export/dirs/data\"\n )\n output = EnvironmentUtils.execute_command_in_a_pod(\n self.worker_pod_namespace,\n self.worker_pod_name,\n self.worker_container_name,\n command,\n )\n logger.info(output)", "title": "" }, { "docid": "b0c7ce81a5bc58a0a8e05d57b04e5940", "score": "0.4993694", "text": "def createOutputFiles():\n print \"\\nCreating outputs\"\n make_path(outPath)\n os.chdir(outPath)\n outFile = open(\"SipprModelling_%s.csv\" % start, \"wb\")\n outFile.write(\"readLength\\tfoldCoverage\\ttarget\\tkmerLength\\tMedianQualityScore\\t\"\n \"QualityScoreSD\\tMedianFoldCoverage\\tFoldCoverageSD\\tMedianPercentID\\tqualityMetric\\n\")\n for rLength in readLength:\n for fCov in foldCoverage:\n for target in targets:\n for size in kmer:\n total1 = 0\n sys.stdout.write('.')\n filename = os.path.split(target)[1]\n fileNoExt = filename.split(\".\")[0]\n megaName = \"rL%s_fC%s_%s_kmer%s\" % (rLength, fCov, fileNoExt, size)\n filePath = \"%s/tmp/rL%s/rL%s_fC%s\" % (path, rLength, rLength, fCov)\n vcfFile = megaName + \"_sorted.vcf\"\n newPath = \"%s/%s\" % (filePath, megaName)\n outputFile = \"%s/%s\" % (newPath, vcfFile)\n # Initialise the counter, which will be used to track lines in the vcf file - if positions in the\n # target are not mapped, then the position field will jump ahead of the counter\n count = 1\n # Initialise the arrays, which will keep track of the appropriate values for each dataset\n arrQual = []\n arrCov = []\n arrSum = []\n output = open(outputFile, \"r\")\n for line in output:\n # vcf files have 36 commented out lines at the top of each file - these are not necessary\n if re.search('#', line):\n pass\n else:\n total1 += 1\n # Format of file\n # CHROM\t POS\tID\tREF\tALT\tQUAL FILTER\tINFO\t FORMAT\n # adk-12\t8\t.\tG\t.\t32.7\t.\tDP=1;AF1=0;AC1=0;DP4=0,1,0,0;MQ=29;FQ=-30.3\tPL\t0\n # data[0] [1] [2] [3] [4] [5] [6] [7]\n data = line.split(\"\\t\")\n #target = data[0]\n pos = data[1]\n refSeq = data[3]\n mapSeq = data[4]\n qual = data[5]\n # Depth of coverage is reported prior to the first \";\"\n dpLine = data[7].split(\";\")[0]\n # For now, I'm skipping lines that indicated the presence of a possible indel\n # - I may return to this later\n if re.search(\"INDEL\", dpLine):\n pass\n else:\n # If the called base (mapSeq) is identical to the reference base (refSeq)\n # - denoted by a \".\", then set seq to equal refSeq, otherwise, pull the\n # value of mapSeq for seq\n avgQual = sum(arrQual)/total1\n if mapSeq == \".\":\n seq = refSeq\n match = 1\n # This section corrects for the fact that during the conversion of bam files to vcf\n # files, SNP calls and ambiguous calls look identical, except for the fact that for\n # SNPs, the qualityScore (qual) tends to be higher than the surrounding bases,\n # while ambiguous calls have a lower qualityScore - this loop screens for quality\n # scores that are at least 10 lower than the score of the previous base\n else:\n if float(arrQual[-1] - 10) >= 0:\n prevValue = float(arrQual[-1] - 10)\n else:\n prevValue = 0\n if float(qual) <= prevValue:\n seq = refSeq\n match = 1\n else:\n # This attempts to catch if there are two ambiguous bases in a row;\n # they will hopefully have the same value\n if float(qual) == prevValue:\n seq = refSeq\n match = 1\n else:\n # \"True\" SNPs seem to have increased qualityScore compared to the\n # surrounding values, this will catch that\n if float(qual) > prevValue:\n seq = mapSeq\n match = 0\n # Strip the \"DP=\" from dpLine\n DP = dpLine.split(\"=\")[1]\n #vcfData[pos] = (fileName, target, refSeq, mapSeq, DP)\n # If pos > count, then there is a gap in the mapping (or a deletion, but ignoring\n # this possibility for now). For my purposes, I want to have data reported for\n # every position, whether it is present in the vcf file or not, so I will use count\n # as the position, \"-\" as the seq, and 0 as the quality and depth of coverage\n if int(pos) > count:\n #print int(pos) - count, pos, count, range(count, int(pos))\n # the number of skipped positions is equal to the value for pos - count\n # For each skipped position (i), set appropriate variables to appropriate values\n for i in range(count, int(pos)):\n posAdj = count\n seqAdj = \"-\"\n matchAdj = 0\n qualAdj = 0\n DPAdj = 0\n #vcfData[fileName][rL][fC][target][size][int(posAdj)][seqAdj][matchAdj][qualAdj] = DP\n arrQual.append(float(qualAdj))\n arrCov.append(float(DPAdj))\n arrSum.append(float(matchAdj))\n count += 1\n if int(pos) == count:\n #vcfData[fileName][rL][fC][target][size][int(pos)][seq][match][qual] = DP\n arrQual.append(float(qual))\n arrCov.append(float(DP))\n arrSum.append(float(match))\n count += 1\n else:\n #vcfData[fileName][rL][fC][target][size][int(pos)][seq][match][qual] = DP\n arrQual.append(float(qual))\n arrCov.append(float(DP))\n arrSum.append(float(match))\n count += 1\n # In the case of no data being present in a file,\n total = count - 1\n if total == 0:\n avgQual = 0\n stdQual = 0\n avgCov = 0\n stdCov = 0\n avgID = 0\n qualMet = 0\n else:\n avgQual = sum(arrQual)/total\n stdQual = numpy.std(arrQual)\n avgCov = sum(arrCov)/total\n stdCov = numpy.std(arrCov)\n avgID = sum(arrSum)/total * 100\n qualMet = avgQual * avgCov\n\n outFile.write(\"%s\\t%s\\t%s\\t%s\\t%s\\t%s\\t%s\\t%s\\t%s\\t%s\\n\"\n % (rLength, fCov, fileNoExt, size, avgQual, stdQual, avgCov, stdCov, avgID, qualMet))\n\n output.close()\n outFile.close()", "title": "" }, { "docid": "ac211780d7535d82eeac4a04791d06a2", "score": "0.4985788", "text": "def write_checkgroups(groups, path):\n keys = groups.keys()\n keys.sort()\n checkgroups_file = file(path, 'wb')\n for key in keys:\n if len(key) < 8:\n checkgroups_file.write(key + '\\t\\t\\t' + groups[key] + '\\n')\n elif len(key) < 16:\n checkgroups_file.write(key + '\\t\\t' + groups[key] + '\\n')\n else:\n checkgroups_file.write(key + '\\t' + groups[key] + '\\n')\n checkgroups_file.close()\n print 'Checkgroups file written.'\n print", "title": "" }, { "docid": "b42b23e474853ee230bfb101382b3b57", "score": "0.49817586", "text": "def _dump(py_obj, h_group, name, memo, loader,attrs={} , **kwargs):\n\n py_obj_id = id(py_obj)\n py_obj_ref = memo.get(py_obj_id,None)\n if py_obj_ref is not None:\n\n # py_object already dumped to hdf5 file store a reference to it instead\n # instead of dumping it again.\n #\n # Note: reference dataset share their base_type and py_obj_type with the\n # referenced h5py.Group or h5py.Dataset. On load their h5py.ref_dtype type \n # dtype is used to distinguish them from datasets hosting pickled data.\n h_link = h_group.create_dataset(name,data = py_obj_ref[0].ref,dtype = h5.ref_dtype)\n h_link.attrs.update(attrs)\n return\n\n # Check if loader has already been loaded for the provided py_obj and \n # retrieve the most appropriate method for creating the corresponding\n # representation within HDF5 file\n py_obj_type, (create_dataset, base_type,memoise) = loader.load_loader(py_obj.__class__)\n try:\n h_node,h_subitems = create_dataset(py_obj, h_group, name, **kwargs)\n except NotHicklable:\n h_node,h_subitems = create_pickled_dataset(py_obj, h_group, name, reason = str(NotHicklable), **kwargs)\n else:\n # store base_type and type unless py_obj had to be pickled by create_pickled_dataset\n memo.store_type(h_node,py_obj_type,base_type,**kwargs)\n\n # add additional attributes and prevent modification of 'type' attribute\n h_node.attrs.update((name,attr) for name,attr in attrs.items() if name != 'type' )\n\n # if py_object shall be memoised to properly represent multiple references\n # to it in HDF5 file store it along with created h_node in the memo dictionary.\n # remembering the py_object along with the h_node ensures that py_object_id\n # which represents the memory address of py_obj refers to py_obj until the\n # whole structure is stored within hickle file.\n if memoise:\n memo[py_obj_id] = (h_node,py_obj)\n\n # loop through list of all sub items and recursively dump them\n # to HDF5 file\n for h_subname,py_subobj,h_subattrs,sub_kwargs in h_subitems:\n _dump(py_subobj,h_node,h_subname,memo,loader,h_subattrs,**sub_kwargs)", "title": "" }, { "docid": "09e6e1d03830d87a72f05725f5caf80c", "score": "0.49766046", "text": "def write_hdf5( self, iteration ):\n # Find the file name\n filename = \"data%08d.h5\" %iteration\n fullpath = os.path.join( self.write_dir, \"hdf5\", filename )\n\n # Create the file and setup its attributes\n zmin = self.top.zgrid + self.w3d.zmmin\n self.create_file_empty_meshes( fullpath, self.top.it,\n self.top.time, self.nz, zmin, self.dz, self.top.dt )\n\n # Open the file again (possibly in parallel), and get the field path\n f = self.open_file( fullpath, parallel_open=self.lparallel_output )\n # (f is None if this processor does not participate in writing data)\n if f is not None:\n field_path = \"/data/%d/fields/\" %iteration\n field_grp = f[field_path]\n else:\n field_grp = None\n\n # Loop over the different quantities that should be written\n for fieldtype in self.fieldtypes:\n # Scalar field\n if fieldtype == \"rho\":\n self.write_dataset( field_grp, \"rho\", \"rho\" )\n # Vector field\n elif fieldtype in [\"E\", \"B\", \"J\"]:\n for coord in self.coords:\n quantity = \"%s%s\" %(fieldtype, coord)\n path = \"%s/%s\" %(fieldtype, coord)\n self.write_dataset( field_grp, path, quantity )\n\n # Close the file\n if f is not None:\n f.close()", "title": "" }, { "docid": "695d51b0144f629dc9497cceee2ed69a", "score": "0.49762768", "text": "def _DumpGroup(parent, datasets):\n for key, value in datasets.iteritems():\n if isinstance(value, dict):\n child = parent.create_group(key)\n _DumpGroup(child, value)\n elif value is None:\n child = parent.create_group(key)\n else:\n compression_kwargs = {}\n if compression_level:\n compression_kwargs = {'compression': 'gzip',\n 'compression_opts': compression_level}\n parent.create_dataset(key, data=value, **compression_kwargs)", "title": "" }, { "docid": "adf54dc92a147885acc8fc08cad0cc4a", "score": "0.49743944", "text": "def export_join_data(toil, options, full_ids, clip_ids, clip_stats, filter_ids, idx_maps, og_chrom_ids):\n\n # make a directory for the chromosomal vgs\n if options.chrom_vg:\n clip_base = os.path.join(options.outDir, '{}.chroms'.format(options.outName))\n if not clip_base.startswith('s3://') and not os.path.isdir(clip_base):\n os.makedirs(clip_base)\n\n if 'full' in options.chrom_vg:\n # download the \"full\" vgs\n assert len(options.vg) == len(full_ids)\n for vg_path, full_id, in zip(options.vg, full_ids):\n name = os.path.splitext(vg_path)[0] + '.full.vg'\n toil.exportFile(full_id, makeURL(os.path.join(clip_base, os.path.basename(name))))\n\n if 'clip' in options.chrom_vg:\n # download the \"clip\" vgs\n assert len(options.vg) == len(clip_ids)\n for vg_path, clip_id, in zip(options.vg, clip_ids):\n name = os.path.splitext(vg_path)[0] + '.vg'\n toil.exportFile(clip_id, makeURL(os.path.join(clip_base, os.path.basename(name))))\n\n if 'filter' in options.chrom_vg:\n # download the \"filter\" vgs\n assert len(options.vg) == len(filter_ids)\n for vg_path, filter_id, in zip(options.vg, filter_ids):\n name = os.path.splitext(vg_path)[0] + '.d{}.vg'.format(options.filter)\n toil.exportFile(filter_id, makeURL(os.path.join(clip_base, os.path.basename(name))))\n\n # make a directory for the chromosomal ogs\n if options.chrom_og:\n clip_base = os.path.join(options.outDir, '{}.chroms'.format(options.outName))\n if not clip_base.startswith('s3://') and not os.path.isdir(clip_base):\n os.makedirs(clip_base)\n\n for gtype in options.chrom_og:\n # download all the chromosomal ogs\n tag = 'd{}'.format(options.filter) if gtype == 'filter' else gtype\n og_ids = og_chrom_ids[gtype]['og']\n assert len(options.vg) == len(og_ids)\n for vg_path, og_id in zip(options.vg, og_ids):\n name = os.path.splitext(vg_path)[0] + '{}.og'.format( '.' + tag if tag != 'clip' else '')\n toil.exportFile(og_id, makeURL(os.path.join(clip_base, os.path.basename(name))))\n\n # make a directory for the viz\n if options.viz + options.draw:\n viz_base = os.path.join(options.outDir, '{}.viz'.format(options.outName))\n if not viz_base.startswith('s3://') and not os.path.isdir(viz_base):\n os.makedirs(viz_base)\n\n for gtype in options.viz:\n # download all the chromosomal 1D visualizations\n assert len(options.vg) == len(og_chrom_ids[gtype]['viz'])\n tag = 'd{}'.format(options.filter) if gtype == 'filter' else gtype \n for vg_path, viz_id in zip(options.vg, og_chrom_ids[gtype]['viz']):\n if viz_id:\n viz_name = os.path.splitext(vg_path)[0] + '{}.viz.png'.format('.' + tag if tag != 'clip' else '')\n toil.exportFile(viz_id, makeURL(os.path.join(viz_base, os.path.basename(viz_name))))\n\n for gtype in options.draw:\n # download all the chromosomal 2D visualizations\n assert len(options.vg) == len(og_chrom_ids[gtype]['draw'])\n tag = 'd{}'.format(options.filter) if gtype == 'filter' else gtype \n for vg_path, draw_id in zip(options.vg, og_chrom_ids[gtype]['draw']):\n if draw_id:\n draw_name = os.path.splitext(vg_path)[0] + '{}.draw.png'.format('.' + tag if tag != 'clip' else '')\n toil.exportFile(draw_id, makeURL(os.path.join(viz_base, os.path.basename(draw_name))))\n \n # download the stats files\n if clip_stats:\n for stats_file in clip_stats.keys():\n toil.exportFile(clip_stats[stats_file], makeURL(os.path.join(options.outDir, '{}.{}'.format(options.outName, stats_file))))\n \n # download everything else\n for idx_map in idx_maps:\n for ext, idx_id in idx_map.items():\n # hacky filtering of intermediate indexes that the user doesn't want\n # ex if someone did --vcf clip --gfa full this would filter out clip.gfa etc.\n is_intermediate = False\n for out_ext, out_sel in [('gfa', options.gfa), ('gbz', options.gbz + options.giraffe), ('snarls', options.gbz)]:\n for phase in ['full', 'clip', 'filter']:\n if '{}.{}'.format(phase, out_ext) in ext and phase not in out_sel:\n is_intermediate = True\n if not is_intermediate:\n out_ext = ext\n if 'clip.' in out_ext:\n out_ext = out_ext.replace('clip.', '')\n if 'filter.' in out_ext:\n out_ext = out_ext.replace('filter.', 'd{}.'.format(options.filter))\n toil.exportFile(idx_id, makeURL(os.path.join(options.outDir, '{}.{}'.format(options.outName, out_ext))))", "title": "" }, { "docid": "e2de30753670716534c983083dd676a2", "score": "0.49726242", "text": "def create_tar_archive(self, outdir=None):\n # for the tar archive filename, we need a timestamp \n timestamp = datetime.now().strftime('%F-%H%M%S') # e.g., '2020-04-21-132052'\n directory_basename = os.path.basename(self.abspath)\n tar_archive_name = f\"{timestamp}-{directory_basename}.tar.gz\"\n try:\n tar_archive_abspath = os.path.join(outdir.abspath, tar_archive_name)\n except AttributeError:\n tar_archive_abspath = os.path.join(self.abspath, tar_archive_name)\n with tarfile.open(tar_archive_abspath, mode=\"w:gz\") as tar:\n print(f\"Creating tar archive {tar_archive_abspath} ...\")\n tar.add(self.abspath, arcname=f\"{timestamp}-{directory_basename}\")\n return tar_archive_abspath", "title": "" }, { "docid": "638f8e8259d7ee7bbd806492d862d438", "score": "0.49636325", "text": "def upload_usp_family(folder, group_label, group_description, stop_if_existing=True):\n import os\n\n import aiida.common\n\n # from aiida.common import aiidalogger\n from aiida.common import NotExistent, UniquenessError\n from aiida.orm.querybuilder import QueryBuilder\n\n from .otfg import OTFGGroup\n\n files = [\n os.path.realpath(os.path.join(folder, i))\n for i in os.listdir(folder)\n if os.path.isfile(os.path.join(folder, i))\n and (\n i.lower().endswith(\".usp\")\n or i.lower().endswith(\"recpot\")\n or i.lower().endswith(\".uspcc\")\n )\n ]\n\n nfiles = len(files)\n\n try:\n group = OTFGGroup.get(label=group_label)\n group_created = False\n except NotExistent:\n group = OTFGGroup(\n label=group_label,\n )\n group_created = True\n\n # Update the descript even if the group already existed\n group.description = group_description\n\n pseudo_and_created = [] # A list of records (UspData, created)\n\n for f in files:\n\n md5sum = md5_file(f)\n qb = QueryBuilder()\n qb.append(UspData, filters={\"attributes.md5\": {\"==\": md5sum}})\n existing_usp = qb.first()\n\n # Add the file if it is in the database\n if existing_usp is None:\n pseudo, created = UspData.get_or_create(f, use_first=True, store_usp=False)\n pseudo_and_created.append((pseudo, created))\n\n # The same file is there already\n else:\n if stop_if_existing:\n raise ValueError(\n \"A usp/recpot with identical MD5 to\"\n \" {} cannot be added with stop_if_existing\"\n \"\".format(f)\n )\n existing_usp = existing_usp[0]\n pseudo_and_created.append((existing_usp, False))\n\n # Check for unique per element\n elements = [(i[0].element, i[0].md5sum) for i in pseudo_and_created]\n\n # Check if we will duplicate after insertion\n\n if not group_created:\n for aiida_n in group.nodes:\n if not isinstance(aiida_n, UspData):\n continue\n elements.append((aiida_n.element, aiida_n.md5sum))\n\n # Discard duplicated pairs\n elements = set(elements)\n elements_names = [e[0] for e in elements]\n\n # Check the uniqueness of the complete group\n if not len(elements_names) == len(set(elements_names)):\n duplicates = {x for x in elements_names if elements_names.count(x) > 1}\n dup_string = \", \".join(duplicates)\n raise UniquenessError(\n \"More than one usp/recpot found for the elements: \" + dup_string + \".\"\n )\n\n if group_created:\n group.store()\n\n # Save the usp in the database if necessary and add them to the group\n\n for pseudo, created in pseudo_and_created:\n if created:\n pseudo.store()\n # aiidalogger.debug(\"New node {} created for file {}\".format(\n # pseudo.uuid, pseudo.filename))\n else:\n # aiidalogger.debug(\"Reusing node {} for file {}\".format(\n # pseudo.uuid, pseudo.filename))\n pass\n\n nodes_new = [pseduo for pseduo, created in pseudo_and_created if created is True]\n nodes_add = [pseduo for pseduo, created in pseudo_and_created]\n group.add_nodes(nodes_add)\n\n return nfiles, len(nodes_new)", "title": "" }, { "docid": "ab5f865c86861c4d958d1a3f574b4fda", "score": "0.49579674", "text": "def generate_files_distributed(generator,\n output_name,\n output_dir,\n num_shards=1,\n max_cases=None,\n task_id=0):\n assert task_id < num_shards\n output_filename = sharded_name(output_name, task_id, num_shards)\n output_file = os.path.join(output_dir, output_filename)\n tf.logging.info(\"Writing to file %s\", output_file)\n writer = tf.python_io.TFRecordWriter(output_file)\n\n counter = 0\n for case in generator:\n if counter % 100000 == 0:\n tf.logging.info(\"Generating case %d for %s.\" % (counter, output_name))\n counter += 1\n if max_cases and counter > max_cases:\n break\n example = to_example(case)\n writer.write(example.SerializeToString())\n\n writer.close()\n return output_file", "title": "" }, { "docid": "004a4f09de47aba4be8e696b01ccc681", "score": "0.49563918", "text": "def write_detections(detections, filename):\n with h5py.File(filename, 'w') as f:\n for d in detections:\n # Compression factor with 'poor, but fast' lzf compression almost \n # factor 10 for 1 dataset, ~factor 35 for 100 datasets\n f.create_dataset(d, data=detections[d], compression='lzf')", "title": "" }, { "docid": "0ea0d30855d7d271d195112a3479f67b", "score": "0.49468485", "text": "def createArchesFile(input_dataset,out_dir):\n\n ds_name = os.path.basename(input_dataset)\n if os.path.splitext(ds_name)[1] != \"\":\n ds_name = os.path.splitext(ds_name)[0]\n outfile = os.path.join(out_dir,ds_name+\".arches\")\n \n with open(outfile,\"wb\") as arches:\n arches.write(\n \"RESOURCEID|RESOURCETYPE|ATTRIBUTENAME|ATTRIBUTEVALUE|GROUPID\\r\\n\")\n return outfile", "title": "" }, { "docid": "8ba340db2468b7568736c7785fcfd76a", "score": "0.49412504", "text": "def write_entries_group(bib_entries):\n\n # copy icons\n icons_folder = os.path.join(params['htmlfile_group'], 'Icons')\n params['author_group_Icons'] = icons_folder\n if not os.path.exists(icons_folder):\n os.mkdir(icons_folder)\n if params['icon_www']:\n params['author_group_icon_www'] = os.path.join(icons_folder, os.path.basename(params['icon_www']))\n shutil.copyfile(params['icon_www'], params['author_group_icon_www'])\n if params['icon_pdf']:\n params['author_group_icon_pdf'] = os.path.join(icons_folder, os.path.basename(params['icon_pdf']))\n shutil.copyfile(params['icon_pdf'], params['author_group_icon_pdf'])\n\n # copy css file\n static_folder = os.path.join(params['htmlfile_group'], 'Static')\n params['author_group_Static'] = static_folder\n if not os.path.exists(static_folder):\n os.mkdir(static_folder)\n if params['css_file'] and os.path.exists(params['css_file']):\n params['author_group_css'] = os.path.join(static_folder, os.path.basename(params['css_file']))\n shutil.copyfile(params['css_file'], params['author_group_css'])\n params['css_file'] = params['author_group_css']\n if params['bootstrap_css'] and os.path.exists(params['bootstrap_css']):\n params['author_group_bootstrap_css'] = os.path.join(static_folder, os.path.basename(params['bootstrap_css']))\n shutil.copyfile(params['bootstrap_css'], params['author_group_bootstrap_css'])\n params['bootstrap_css'] = params['author_group_bootstrap_css']\n\n title = params['title']\n\n # write entries selected by authors\n _write_entries_group_author(bib_entries)\n params['dict_title'] = params['dict_title_group']\n\n # write complete-bibliography.html\n params['title'] = title\n _write_entries_group_complete(bib_entries)\n\n # write complete-bibliography.bib\n biblio_folder = os.path.join(params['htmlfile_group'], 'Bibliography')\n params['outbibfile'] = os.path.join(biblio_folder, 'complete-bibliography.bib')\n write_entries_to_bibfile(bib_entries)\n\n # write entries selected by publication venues\n _write_entries_group_venue(bib_entries)\n\n # write entries selected by years\n _write_entries_group_year(bib_entries)\n\n # write entries selected by categories\n _write_entries_group_category(bib_entries)\n\n # write index.html\n params['title'] = title\n _write_entries_group_index(bib_entries)", "title": "" }, { "docid": "10034300e76d05c86bccd28a52048efb", "score": "0.4940712", "text": "def order_by( self, inspp, outfolder=curr_path, make_csv=True, make_graph=False, make_tree=False, make_tree_x=False, make_tree_y=False, group_every=None, num_classes=None, evenly=False ):\n db_name_short = os.path.basename(self.db_name).replace('.pickle.gz', '')\n\n if inspp not in self.spps:\n print \"requested species(%s) does not exist\" % inspp\n print self.spps\n sys.exit( 1 )\n\n\n pos_extra = \"\"\n spp_extra = \"\"\n\n db_fn = os.path.join( outfolder, db_name_short + '.pickle.gz.' + inspp )\n\n if group_every is not None:\n db_fn += \".every_\" + str(group_every)\n pos_extra += \" - every %dbp\" % group_every\n\n elif num_classes is not None:\n db_fn += \".classes_\" + str(num_classes)\n pos_extra += \" - in %d groups\" % num_classes\n\n elif evenly:\n db_fn += \".evenly\"\n\n if make_tree:\n db_fn += \".tree\"\n spp_extra += \" with clustering\"\n pos_extra += \" with clustering\"\n\n elif make_tree_x:\n db_fn += \".treex\"\n pos_extra += \" with clustering\"\n\n elif make_tree_y:\n db_fn += \".treey\"\n spp_extra += \" with clustering\"\n\n db_fn += '.pickle.gz'\n\n excerpt = {}\n\n if os.path.exists( db_fn ):\n print \"ordering %s :: db file %s exists. reading\" % ( inspp, db_fn )\n excerpt = vcf_walk.loads( db_fn )\n\n else:\n print \"ordering %s :: db file %s does not exists. parsing\" % ( inspp, db_fn )\n excerpt = self.filter_by(inspp, group_every=None, num_classes=None, evenly=False)\n\n #leng = max([len(x) for x in spps])\n #fmt = \" %-\"+str(leng)+\"s: %12d %12d \"\n\n #for chromosome_name in excerpt:\n # print \" \", chromosome_name\n # for start, end, name, line in excerpt[ chromosome_name ]:\n # print \" %-12s %12d %12d %s\" % ( name, start, end, str(line) )\n\n print \"ordering %s :: ordered :: dumping\" % inspp\n vcf_walk.dumps( db_fn, excerpt )\n print \"ordering %s :: ordered :: done\" % inspp\n\n\n sppindex = self.spps[ inspp ]\n sppindexinv = self.getSppIndexInvert( )\n\n\n if make_csv or make_graph:\n print \"ordering %s :: exporting\" % inspp\n\n fhdcsv = None\n if make_csv:\n fhdcsv = open(db_fn + '.csv', 'w')\n\n for chromosome_name in sorted(excerpt):\n hlines = []\n dlines = []\n #print excerpt[chromosome_name]\n\n if make_csv:\n hlines.append( chromosome_name + \"\\n\" )\n hlines.append( \"start,\" )\n hlines.append( \",\".join([ str(x[vcf_walk.DB_START ] ) for x in excerpt[chromosome_name] ]) )\n hlines.append( \"\\n\" )\n\n hlines.append( \"end,\" )\n hlines.append( \",\".join([ str(x[vcf_walk.DB_END ] ) for x in excerpt[chromosome_name] ]) )\n hlines.append( \"\\n\" )\n\n hlines.append( \"num_unities,\" )\n hlines.append( \",\".join([ str(x[vcf_walk.DB_LEN_OBJ] ) for x in excerpt[chromosome_name] ]) )\n hlines.append( \"\\n\" )\n\n hlines.append( \"num_snps,\" )\n hlines.append( \",\".join([ str(x[vcf_walk.DB_LEN_SNP] ) for x in excerpt[chromosome_name] ]) )\n hlines.append( \"\\n\" )\n\n dlines.append( \"name,\" )\n dlines.append( \",\".join([ str(x[vcf_walk.DB_NAME ] ) for x in excerpt[chromosome_name] ]) )\n dlines.append( \"\\n\" )\n\n for spp in sppindexinv:\n sppindex = self.spps[ spp ]\n data = []\n\n for x in excerpt[chromosome_name]:\n xline = x[vcf_walk.DB_LINE]\n val = xline[sppindex]\n data.append( str(val) )\n\n dlines.append( spp + \",\" )\n dlines.append( \",\".join( data ) )\n dlines.append( \"\\n\" )\n\n\n if make_csv:\n print \"ordering %s :: exporting :: exporting chromosome %s to csv\" % (inspp, chromosome_name)\n\n fhdcsv.writelines( hlines )\n fhdcsv.writelines( dlines )\n fhdcsv.write( \"\\n\\n=\\n\\n\" )\n\n\n if make_graph:\n print \"ordering %s :: exporting :: exporting chromosome %s to image\" % (inspp, chromosome_name)\n #Rscript heat.R < /tmp/heat.csv\n\n pos_extra_str = pos_extra\n if evenly:\n db = excerpt[ chromosome_name ]\n dbLen = len(db)\n chromLen = db[-1][vcf_walk.DB_END] - db[0][vcf_walk.DB_START]\n\n num_classes = dbLen\n\n group_every = int(chromLen / num_classes)\n\n pos_extra_str += \" - evenly distributed %dbp in %d groups of %dbp\" % (chromLen, dbLen, group_every)\n\n\n fn_R = db_fn + '.csv.%s.R' % chromosome_name\n fn_R_o = db_fn + '.csv.%s.R.stdout' % chromosome_name\n fn_R_e = db_fn + '.csv.%s.R.stderr' % chromosome_name\n fn_Png = db_fn + '.csv.%s.png' % chromosome_name\n fn_Pdf = db_fn + '.csv.%s.pdf' % chromosome_name\n fn_Svg = db_fn + '.csv.%s.svg' % chromosome_name\n fn_csv = db_fn + '.csv.%s.csv' % chromosome_name\n\n R = genR(chromosome_name, inspp, fn_csv, fn_Png, fn_Pdf, fn_Svg, make_tree=make_tree, make_tree_x=make_tree_x, make_tree_y=make_tree_y, pos_extra=pos_extra_str, spp_extra=spp_extra)\n\n with open( fn_csv, 'w' ) as fhdcsvr:\n fhdcsvr.writelines( dlines )\n\n with open( fn_R, 'w' ) as fhd:\n fhd.write( R )\n\n print \"ordering %s :: exporting :: exporting chromosome %s to image :: running R script: %s\" % (inspp, chromosome_name, fn_R)\n #print R\n\n call([\"Rscript\", fn_R], stdout=open(fn_R_o, 'wb'), stderr=open(fn_R_e, 'wb'))\n\n #to_img( db_name, spps, dlines, inspp )\n\n print \"ordering %s :: exporting :: exported chromosome %s to image\" % (inspp, chromosome_name)\n print\n print \"ordering %s :: exporting :: done\" % inspp", "title": "" }, { "docid": "699ba597180d0c1e0c9fb26a91edf17d", "score": "0.49352628", "text": "def save_files_to_h5(X, Y, N, labels, output_file):\n\n store = h5py.File(output_file, \"w\")\n store.create_dataset(\n 'data', data=X,\n dtype='float32', compression='gzip', compression_opts=4\n )\n if compute_normals:\n store.create_dataset(\n 'normal', data=N,\n dtype='float32', compression='gzip', compression_opts=4\n )\n store.create_dataset(\n 'label', data=labels,\n dtype='uint8', compression='gzip', compression_opts=1\n )\n store.create_dataset(\n 'pid', data=Y,\n dtype='uint8', compression='gzip', compression_opts=1\n )", "title": "" }, { "docid": "01c9ab9a806582e213e219f00e408405", "score": "0.49234766", "text": "def create_archive(archive, folder):\n with tarfile.open(archive, \"w:gz\") as tar:\n tar.add(folder, arcname=os.path.basename(folder))", "title": "" }, { "docid": "9e1a15b6cf51d27cad3dd78ca3c6c115", "score": "0.49186638", "text": "def write_to_file(startdate, gen_output, nc_fname, metadata=None):\n ensemble_forecast = gen_output.get(\"ensemble_forecast\", None)\n deterministic = gen_output.get(\"deterministic\", None)\n motion_field = gen_output.get(\"motion_field\", None)\n ensemble_motion = gen_output.get(\"ensemble_motion\", None)\n\n if metadata is None:\n metadata = dict()\n\n if all((dataset is None for dataset in gen_output.values())):\n print(\"Nothing to store\")\n log(\"warning\", \"Nothing to store into .h5 file. Skipping.\")\n return None\n\n output_options = PD[\"output_options\"]\n # We don't support irregular nowcast outputs here\n nowcast_timestep = get_timesteps()\n\n ensemble_forecast, ens_scale_meta = prepare_data_for_writing(ensemble_forecast)\n deterministic, det_scale_meta = prepare_data_for_writing(deterministic)\n\n with h5py.File(os.path.join(output_options[\"path\"], nc_fname), 'w') as outf:\n if ensemble_forecast is not None and output_options[\"store_ensemble\"]:\n for eidx in range(PD[\"ensemble_size\"]):\n ens_grp = outf.create_group(\"member-{:0>2}\".format(eidx))\n utils.store_timeseries(ens_grp,\n ensemble_forecast[eidx, :, :, :],\n startdate,\n timestep=nowcast_timestep,\n metadata=ens_scale_meta)\n\n if ensemble_motion is not None and output_options[\"store_perturbed_motion\"]:\n for eidx in range(PD[\"ensemble_size\"]):\n try:\n ens_grp = outf[\"member-{:0>2}\".format(eidx)]\n except KeyError:\n ens_grp = outf.create_group(\"member-{:0>2}\".format(eidx))\n ens_grp.create_dataset(\"motion\", data=ensemble_motion[eidx])\n\n if deterministic is not None and output_options[\"store_deterministic\"]:\n det_grp = outf.create_group(\"deterministic\")\n utils.store_timeseries(det_grp, deterministic, startdate,\n timestep=nowcast_timestep,\n metadata=det_scale_meta)\n\n if output_options[\"store_motion\"]:\n outf.create_dataset(\"motion\", data=motion_field)\n\n meta = outf.create_group(\"meta\")\n # configuration \"OUTPUT_TIME_FORMAT\" is removed, new output uses ODIM standard\n meta.attrs[\"nowcast_started\"] = dt.datetime.strftime(metadata[\"time_at_start\"],\n \"%Y-%m-%d %H:%M:%S\")\n meta.attrs[\"nowcast_ended\"] = dt.datetime.strftime(metadata[\"time_at_end\"],\n \"%Y-%m-%d %H:%M:%S\")\n meta.attrs[\"nowcast_units\"] = metadata.get(\"unit\", \"Unknown\")\n meta.attrs[\"nowcast_seed\"] = metadata.get(\"seed\", \"Unknown\")\n meta.attrs[\"nowcast_init_time\"] = dt.datetime.strftime(startdate, \"%Y%m%d%H%M\")\n\n # Old configurations - may be used by postprocessing scripts\n old_style_configs = {\n # Method selections\n #\"DOMAIN\": \"fmi\", # postprocessing defines this instead of reading it here\n \"VALUE_DOMAIN\": \"rrate\" if PD[\"run_options\"][\"forecast_as_quantity\"] == \"RATE\" else \"dbz\", # Unused?\n # Z-R conversion parameters\n \"ZR_A\": PD[\"data_options\"][\"zr_a\"], #\n \"ZR_B\": PD[\"data_options\"][\"zr_b\"], #\n # Nowcasting parameters\n \"NOWCAST_TIMESTEP\": nowcast_timestep, #\n \"MAX_LEADTIME\": PD[\"run_options\"][\"max_leadtime\"], #\n \"NUM_TIMESTEPS\": PD[\"run_options\"][\"leadtimes\"], #\n \"ENSEMBLE_SIZE\": PD[\"ensemble_size\"], #\n \"NUM_CASCADES\": PD[\"nowcast_options\"].get(\"n_cascade_levels\", 6), # Unused?\n \"RAIN_THRESHOLD\": PD[\"out_rain_threshold\"], # Unused?\n \"NORAIN_VALUE\": PD[\"out_norain_value\"], #\n \"KMPERPIXEL\": PD[\"nowcast_options\"][\"kmperpixel\"], # Unused?\n \"CALCULATION_DOMAIN\": PD[\"nowcast_options\"][\"domain\"], # Unused?\n \"VEL_PERT_KWARGS\": PD[\"nowcast_options\"][\"vel_pert_kwargs\"],\n # Storing parameters\n \"FIELD_VALUES\": PD[\"output_options\"][\"as_quantity\"], # Unused?\n \"STORE_DETERMINISTIC\": output_options[\"store_deterministic\"], #\n \"STORE_PERTURBED_MOTION\": output_options[\"store_perturbed_motion\"], #\n }\n\n pd_meta = meta.create_group(\"configuration\")\n for key, value in old_style_configs.items():\n pd_meta.attrs[key] = str(value)\n\n proj_meta = meta.create_group(\"projection\")\n for key, value in metadata[\"projection\"].items():\n proj_meta.attrs[key] = value\n\n return None", "title": "" }, { "docid": "7eff8e1731b2ff5dd37ea11d753b7486", "score": "0.4917919", "text": "def generate_standard_outputs(self, output_dir, gtfs_day='19700101'):\n gngeojson.generate_standard_outputs_for_schedule(self, output_dir, gtfs_day)\n logging.info('Finished generating standard outputs. Zipping folder.')\n persistence.zip_folder(output_dir)", "title": "" }, { "docid": "eb5c69f8be18fe9723020203513e1a89", "score": "0.49162012", "text": "def _write_results_file(self, all_boxes, output_dir):\n\n #format_str = \"{}, -1, {}, {}, {}, {}, {}, -1, -1, -1\"\n\n files = {}\n for cls in all_boxes:\n for i, dets in enumerate(cls):\n path = self.image_path_at(i)\n img1, name = osp.split(path)\n # get image number out of name\n frame = int(name.split('.')[0])\n # smth like /train/MOT17-09-FRCNN or /train/MOT17-09\n tmp = osp.dirname(img1)\n # get the folder name of the sequence and split it\n tmp = osp.basename(tmp).split('-')\n # Now get the output name of the file\n out = tmp[0]+'-'+tmp[1]+'.txt'\n outfile = osp.join(output_dir, out)\n\n # check if out in keys and create empty list if not\n if outfile not in files.keys():\n files[outfile] = []\n\n for d in dets:\n x1 = d[0]\n y1 = d[1]\n x2 = d[2]\n y2 = d[3]\n score = d[4]\n files[outfile].append([frame, -1, x1+1, y1+1, x2-x1+1, y2-y1+1, score, -1, -1, -1])\n\n for k,v in files.items():\n #outfile = osp.join(output_dir, out)\n with open(k, \"w\") as of:\n writer = csv.writer(of, delimiter=',')\n for d in v:\n writer.writerow(d)", "title": "" }, { "docid": "e10a87863efd30bd513a6b3af03cf2d1", "score": "0.49148518", "text": "def create_hdf5(\n bigwig_paths, chrom_sizes_path, out_path, chunk_size, batch_size=100\n):\n bigwig_readers = [pyBigWig.open(path) for path in bigwig_paths]\n \n # Read in chromosome sizes\n with open(chrom_sizes_path, \"r\") as f:\n chrom_sizes = {}\n for line in f:\n tokens = line.strip().split(\"\\t\")\n chrom_sizes[tokens[0]] = int(tokens[1])\n \n # Convert batch size to be in terms of rows, not number of chunks\n batch_size = batch_size * chunk_size\n\n with h5py.File(out_path, \"w\") as f:\n # Store source paths\n f.create_dataset(\"bigwig_paths\", data=np.array(bigwig_paths, dtype=\"S\"))\n for chrom in sorted(chrom_sizes.keys()):\n chrom_size = chrom_sizes[chrom]\n num_batches = int(np.ceil(chrom_size / batch_size))\n chrom_dset = f.create_dataset(\n chrom, (chrom_size, len(bigwig_paths), 1), dtype=\"f\",\n compression=\"gzip\", chunks=(chunk_size, len(bigwig_paths), 1)\n )\n for i in tqdm.trange(num_batches, desc=chrom):\n start = i * batch_size\n end = min(chrom_size, (i + 1) * batch_size)\n\n values = np.stack([\n np.stack([\n np.nan_to_num(reader.values(chrom, start, end))\n ], axis=1) for reader in bigwig_readers\n ], axis=1)\n\n chrom_dset[start : end] = values", "title": "" }, { "docid": "256f02c777a0d1c72ad15dc7cd4b7905", "score": "0.49132812", "text": "def write(self, fname, **kwargs):\n\n f = fits.HDUList()\n\n f.append(fits.PrimaryHDU())\n\n thdr = fits.table_to_hdu(self.gal.get_table())\n thdr.name = 'GALAXY'\n f.append(thdr)\n\n if self.spec is not None:\n f[0].header['HASSPEC'] = True\n f.append(fits.ImageHDU(name='SPEC_DATA', data=self.spec.data.data))\n f.append(fits.ImageHDU(name='SPEC_VAR', data=self.spec.var.data))\n else:\n f[0].header['HASSPEC'] = False\n\n if self.ddata is not None:\n f[0].header['HASDATA'] = True\n f[0].header['NDSETS'] = len(self.datasets)\n\n f.append(fits.ImageHDU(name='DATA', data=self.ddata))\n for ds in self.datasets:\n f.append(fits.ImageHDU(**ds))\n\n fig = Figure()\n canvas = Canvas(fig)\n self.plot(fig.gca())\n canvas.draw()\n\n w, h = canvas.get_width_height()\n plot = np.fromstring(canvas.tostring_argb(), dtype='uint8').reshape(\n (h, w, 4))\n\n f.append(fits.ImageHDU(name='PLOT', data=plot))\n else:\n f[0].header['HASDATA'] = False\n\n f.writeto(fname, **kwargs)", "title": "" }, { "docid": "aff7c0cbddf3f4dd9273903d2593fdf8", "score": "0.49125293", "text": "def genGroupFromTraffic(self, trafficfile):\n pass", "title": "" }, { "docid": "6ad2b4afc7cb8c46a9619fb5ab83c86e", "score": "0.49042654", "text": "def open_halo_output(self):\n \n \n try:\n self.halo_output_file = h5.File(self.HDF_output_filepath, \"w\")\n\n except OSError:\n for obj in gc.get_objects(): # Browse through ALL objects\n if isinstance(obj, h5.File): # Just HDF5 files\n try:\n obj.close()\n except:\n pass # Was already closed \n self.halo_output_file = h5.File(self.HDF_output_filepath, \"w\")\n\n self.halo_output_dataset = self.halo_output_file.create_dataset(\n \"halo_data\", (0,), maxshape=(None,), dtype=self.dtype_halo, compression=\"gzip\"\n )\n \n self.subhalo_output_dataset = self.halo_output_file.create_dataset(\n \"subhalo_data\", (0,), maxshape=(None,), dtype=self.subhalo_dtype, compression=\"gzip\"\n )\n \n return None", "title": "" }, { "docid": "f12875305def5e7c09b75e00e041148b", "score": "0.4899638", "text": "def generate_files(self, output_dir: str) -> None:\n self._write_file(output_dir, self._OUTPUT_FILE, self._generate_zones())", "title": "" }, { "docid": "45e2e6d4f38e8aa157c0b1be0610871a", "score": "0.48976526", "text": "def process_set_metadata(self, data, set_name):\n hdf5_handler = self.hdf5_manager.get_group(set_name)\n image_filenames = []\n keypoints = []\n object_id = []\n object_fields = [\"image_filenames\", \"keypoints\"]\n\n if self.verbose:\n print('> Adding data to default group:')\n prgbar = progressbar.ProgressBar(max_value=len(data))\n\n for i, annot in enumerate(data):\n image_filenames.append(annot[\"filename\"])\n keypoints.append(annot[\"joints\"])\n\n object_id.append([i, i])\n\n # update progressbar\n if self.verbose:\n prgbar.update(i)\n\n # update progressbar\n if self.verbose:\n prgbar.finish()\n\n hdf5_write_data(hdf5_handler, 'image_filenames',\n str2ascii(image_filenames),\n dtype=np.uint8, fillvalue=0)\n hdf5_write_data(hdf5_handler, 'keypoints',\n np.array(keypoints, dtype=np.float),\n fillvalue=-1)\n hdf5_write_data(hdf5_handler, 'keypoint_names',\n str2ascii(self.keypoints_labels),\n dtype=np.uint8, fillvalue=0)\n hdf5_write_data(hdf5_handler, 'object_ids',\n np.array(object_id, dtype=np.int32),\n fillvalue=-1)\n hdf5_write_data(hdf5_handler, 'object_fields',\n str2ascii(object_fields),\n dtype=np.uint8, fillvalue=0)", "title": "" }, { "docid": "f27d38b86522261688a78a7346bc5e2f", "score": "0.48972598", "text": "def save_gtfs(filename,tables):\n\twith zipfile.ZipFile(filename,\"w\",compression=zipfile.ZIP_DEFLATED) as zipf:\n\t\tfor (name,table) in tables.items():\n\t\t\tfieldnames = table[0].keys()\n\t\t\ttablef = tempfile.NamedTemporaryFile(mode=\"w\",encoding=\"utf-8\")\n\t\t\t#FIXME: extrasaction is set to ignore due to bug in csv.py in Python3.6\n\t\t\twriter=csv.DictWriter(tablef,fieldnames,extrasaction=\"ignore\")\n\t\t\twriter.writeheader()\n\t\t\twriter.writerows(table)\n\t\t\ttablef.flush()\n\t\t\tzipf.write(tablef.name,name+\".txt\")\n\t\t\ttablef.close()", "title": "" }, { "docid": "6c59439b20e0ca626247f05fc7b5185e", "score": "0.48950222", "text": "def _save_results(self, zipdata, outdir, module, gmt, rank_metric, permutation_type):\n\n res = OrderedDict()\n for gs,gseale,ind,RES in zipdata:\n rdict = OrderedDict()\n rdict['es'] = gseale[0]\n rdict['nes'] = gseale[1]\n rdict['pval'] = gseale[2]\n rdict['fdr'] = gseale[3]\n rdict['gene_set_size'] = len(gmt[gs])\n rdict['matched_size'] = len(ind)\n #reformat gene list.\n _genes = rank_metric.iloc[ind, rank_metric.columns.get_loc('gene_name')]\n _genes = _genes.to_string(header=False, index=False).split(\"\\n\")\n rdict['genes'] = \",\".join([ g.strip() for g in _genes ])\n\n rdict['rank_ES'] = RES\n rdict['hit_index'] = ind\n res[gs] = rdict\n\n res_df = pd.DataFrame.from_dict(res, orient='index')\n res_df.index.name = 'Term'\n res_df.sort_values(by='fdr', inplace=True)\n res_df.drop(['rank_ES','hit_index'], axis=1, inplace=True)\n res_df.to_csv('{a}/gseapy.{b}.{c}.report.csv'.format(a=outdir, b=module, c=permutation_type),\n float_format ='%.7f')\n\n self.res2d = res_df\n self.results = res\n return", "title": "" }, { "docid": "5510a4f25e94771ef37f98b8d6c9c6b0", "score": "0.48928303", "text": "def addhdf5(self, HDF5group):\n HDF5group.attrs['type'] = self.__class__.__name__\n HDF5group['Nvstars'] = self.Nvstars\n HDF5group['vecposlist'], HDF5group['vecposindex'] = doublelist2flatlistindex(self.vecpos)\n HDF5group['vecveclist'], HDF5group['vecvecindex'] = doublelist2flatlistindex(self.vecvec)\n HDF5group['outer'] = self.outer", "title": "" }, { "docid": "ca6d4ca8628fa2a2bb2384bf5667b503", "score": "0.48912898", "text": "def background_group():\n obs_table = config.obs_table.copy()\n\n # Define observation groups\n # zenith_bins = np.array([0, 20, 30, 40, 50, 90])\n zenith_bins = np.array([0, 49, 90])\n # zenith_bins = np.array([0, 30, 90]) # for testing\n axes = [ObservationGroupAxis('ZEN_PNT', zenith_bins, 'bin_edges')]\n obs_groups = ObservationGroups(axes)\n log.info(obs_groups.info)\n\n # Apply observation grouping\n obs_table = obs_groups.group_observation_table(obs_table)\n\n # Store the results\n filename = config.obs_table_grouped_filename\n log.info('Writing {}'.format(filename))\n obs_table.write(str(filename), format='ascii.ecsv')\n\n filename = config.group_def_filename\n log.info('Writing {}'.format(filename))\n obs_groups.obs_groups_table.write(str(filename), format='ascii.ecsv')", "title": "" }, { "docid": "8ad1397340c695b5f0c7684be27b06a2", "score": "0.48905203", "text": "def main(outfolder = './gazette_data/', outtype = sys.argv[1]):\n \n # Check if gazette data exists (whether gazette.py has been run) #\n data_exists_check()\n # Check whether save directories exists, and if not, create them #\n save_directory_check()\n # Files that have extra spaces, indents, etc. and won't run smoothly through.\n bad_dates = ['2006/18_Sep30','2006/17_Sep15','2016/18_Sept30','2017/11_Jun15','2018/10_May31']\n # Instantiate empty lists for keeping data before saving out.\n incorporations, namechanges = [], [] \n # Format default output as CSVs if no sys.argv[1] provided\n if not sys.argv[1]:\n outtype = 'csv'\n \n # Begin parsing files by year #\n for year in range(2006,2018+1):\n files = os.listdir('cache/gazette/'+str(year))\n for filename in files:\n if filename.endswith('.txt'):\n date_string = str(year) + '/' + filename[:-4]\n if date_string not in bad_dates:\n # The following returns pandas DataFrames if outtype is 'csv', list of pickles if outtype is 'pickles' #\n inc, nmch = make_incorporations_and_namechange(date_string,outfolder = outfolder,outtype = outtype)\n if outtype == 'csv':\n incorporations.append(inc)\n namechanges.append(nmch)\n elif outtype == 'pickles':\n incorporations += inc\n namechanges += nmch\n \n # Write out files #\n \n if outtype == 'csv': \n make_master_tables(incorporations,namechanges)\n \n elif outtype =='pickles': \n # Incorporations #\n pickle_out = open(outfolder + \"incorporations_2006-2018.pickle\",\"wb\")\n pickle.dump(incorporations, pickle_out)\n pickle_out.close()\n # Namechanges #\n pickle_out = open(outfolder + \"namechanges_2006-2018.pickle\",\"wb\")\n pickle.dump(namechanges, pickle_out)\n pickle_out.close()", "title": "" }, { "docid": "8464957a2a23a652b7aeb986e25b2aa8", "score": "0.48893464", "text": "def createAgents(self, group):\n for ty in group.agentTypes:\n for ag in ty.agents:\n self.newagent(ag, ty.name, group.freezeAnimation)", "title": "" }, { "docid": "98b46f1f5f759ce89b74d373aba562c5", "score": "0.4879788", "text": "def main(args):\n\n\n data = pandas.read_csv(args.infile,index_col=0)\n if not args.nocls:\n data = data[args.group1+args.group2]\n\n samples = data.columns.tolist()\n genes = data.index.tolist()\n\n descriptions = ['NA']*data.shape[0]\n temp = ['NAME','DESCRIPTION'] + samples\n\n gct = pandas.DataFrame(columns=temp, index=data.index)\n gct['NAME'] = genes\n gct['DESCRIPTION'] = descriptions\n for i in samples:\n gct[i] = data[i]\n gct.to_csv('temp.txt',sep='\\t',index=False)\n\n gct = open('temp.txt', 'r').read()\n fout = open(args.prefix+'.gct', 'w')\n fout.write('#1.2\\n')\n fout.write(str(data.shape[0]) + '\\t' + str(len(samples)) + '\\n')\n fout.write(gct)\n fout.close()\n\n if not args.nocls:\n fout = open(args.prefix + '.cls','w')\n fout.write(str(len(args.group1) + len(args.group2)) + ' 2 1\\n')\n fout.write('# ' + args.phenotypes[0] + ' ' + args.phenotypes[1] + '\\n')\n fout.write(' '.join(['0']*len(args.group1)) + ' ' + ' '.join(['1']*len(args.group2)) + '\\n')\n fout.close()", "title": "" }, { "docid": "9a1524f71c467eaf3639777623947e2f", "score": "0.48788592", "text": "def _save_reconstructions(self, reconstructions):\n self.out_dir.mkdir(exist_ok=False) # Throw an error to prevent overwriting.\n gzip = dict(compression='gzip', compression_opts=1, shuffle=True, fletcher32=True)\n for file_name, recons in tqdm(reconstructions.items()):\n with h5py.File(self.out_dir / file_name, mode='x', libver='latest') as f: # Overwriting throws an error.\n f.create_dataset('reconstruction', data=recons, **gzip)", "title": "" } ]
92a56c7f57a736b69d2267ad2bc5e9d5
Authenticates a challenge message and responds to the requestor.
[ { "docid": "2b049151ea4c4fa28d8780963a4016be", "score": "0.8119253", "text": "def authenticate_challenge(self, message: Dict[Any, Any]):\n\n challenge = message[\"challenge\"]\n log.info(\"Authenticating a challenge from the server\")\n\n try:\n signed_challenge = sign_challenge(\n base64.b64decode(challenge), self.private_key\n )\n\n message = {\n \"ChallengeResponse\": {\n \"email\": self.email,\n \"model_name\": self.model_name,\n \"response\": base64.b64encode(signed_challenge).decode(\"utf-8\"),\n }\n }\n\n self._send_message(message)\n except KeyError:\n log.error(\"Failed to find the private key in the environment\")", "title": "" } ]
[ { "docid": "e7ab498cb458cdcb5097216cbfed59d8", "score": "0.7524756", "text": "def process_challenge(self, message: str) -> None:\n\t\tvalue = message.get('val', None)\n\n\t\tif value != self.challenge_ans:\n\t\t\tlogger.error(\"Authentication failed\")\n\t\t\treturn False\n\t\t\n\n\t\tself.state = STATE_AUTH\n\t\tself._send({'type':'OK'})\n\n\t\treturn True", "title": "" }, { "docid": "b1dccbd12c98a4b96df5fc9bf408840a", "score": "0.74329895", "text": "def process_challenge_response(self, message):\n\t\tusername = message['credentials']['username']\n\t\t\n\t\tif username not in self.registered_users or 'A1' not in self.registered_users[username][1]:\n\t\t\tlogger.info(\"{} authenticated denied.\".format(username))\n\t\t\tmessage = {'type': 'AUTH_RESPONSE', 'status': 'DENIED'}\n\t\t\tsecure_message = self.encrypt_payload(message)\n\t\t\tself._send(secure_message)\n\t\t\tself.send_mac()\n\t\t\treturn False\n\n\t\tsigned_challenge = message['credentials']['signed_challenge']\n\t\tpw, permissions = self.registered_users[username][0], self.registered_users[username][1]\t\n\t\tsignature_verification = self.crypto.rsa_signature_verification(base64.b64decode(signed_challenge.encode()), (str(self.client_nonce) + pw + str(self.crypto.auth_nonce)).encode(), self.crypto.load_public_key(self.client_public_key))\n\n\t\tif signature_verification:\n\t\t\tlogger.info(\"{} authenticated with success!\".format(username))\n\t\t\tmessage = {'type': 'AUTH_RESPONSE', 'status': 'SUCCESS', 'username': username}\n\t\t\tsecure_message = self.encrypt_payload(message)\n\t\t\tself._send(secure_message)\n\t\t\tself.send_mac()\n\t\t\tself.authenticated_user = [username, permissions]\n\n\t\telse:\n\t\t\tself.authentication_tries += 1\n\t\t\tif self.authentication_tries == 3:\n\t\t\t\t# remove authentication permission\n\t\t\t\tself.registered_users[username][1] = self.registered_users[username][1].replace('1', '0')\n\t\t\t\tself.update_users()\n\t\t\t\tlogger.info(\"{} authenticated denied.\".format(username))\n\t\t\t\tmessage = {'type': 'AUTH_RESPONSE', 'status': 'DENIED'}\n\t\t\t\tsecure_message = self.encrypt_payload(message)\n\t\t\t\tself._send(secure_message)\n\t\t\t\tself.send_mac()\n\t\t\t\treturn False\n\n\t\t\tlogger.info(\"{} authenticated failed.\".format(username))\n\t\t\tmessage = {'type': 'AUTH_RESPONSE', 'status': 'FAILED'}\n\t\t\tsecure_message = self.encrypt_payload(message)\n\t\t\tself._send(secure_message)\n\t\t\tself.send_mac()\n\t\t\treturn True\n\t\t\n\t\treturn True", "title": "" }, { "docid": "72e890d5341676f0833fa204f2e0b630", "score": "0.70976585", "text": "def handle_challenge(self, challenge: bytes) -> bytes:\n raise NotImplementedError", "title": "" }, { "docid": "d8168a98462fb6a6ee4ee17d23912830", "score": "0.6681826", "text": "def triggerChallenge(self):\r\n self.send(\"\"\"<challenge xmlns='urn:ietf:params:xml:ns:xmpp-sasl'>cmVhbG09ImNoZXNzcGFyay5jb20iLG5vbmNlPSJ0YUhIM0FHQkpQSE40eXNvNEt5cFlBPT0iLHFvcD0iYXV0aCxhdXRoLWludCIsY2hhcnNldD11dGYtOCxhbGdvcml0aG09bWQ1LXNlc3M=</challenge>\"\"\")", "title": "" }, { "docid": "03ac3aeb12e0f5e43dfb3056dd6c38bd", "score": "0.65662795", "text": "def auth_challenge(self):\n\n self.set_header(\"WWW-Authenticate\", \"Basic realm=pyjojo\")\n self.set_status(401)\n self.finish()", "title": "" }, { "docid": "ef73f81cf0acac1e8ff79a7e900ced45", "score": "0.6551384", "text": "def challenge(self):\n if self._challenge is None:\n self.sock.sendto(a2s_getchallenge_req(), self.srvaddr)\n inforsp = recvresponse(self.sock)\n self._challenge = a2s_getchallenge_rsp(inforsp)[\"challenge\"]\n return self._challenge", "title": "" }, { "docid": "a634fd589381dd25bc3923e4a8944744", "score": "0.65054107", "text": "def challenge_response(self, challenge):\n key_1 = self.request.headers.get(\"Sec-Websocket-Key1\")\n key_2 = self.request.headers.get(\"Sec-Websocket-Key2\")\n try:\n part_1 = self._calculate_part(key_1)\n part_2 = self._calculate_part(key_2)\n except ValueError:\n raise ValueError(\"Invalid Keys/Challenge\")\n return self._generate_challenge_response(part_1, part_2, challenge)", "title": "" }, { "docid": "744d0279df8a34881c33353d10f98161", "score": "0.6491226", "text": "def get(self):\n self.response.out.write(self.request.get('challenge'))", "title": "" }, { "docid": "0195fd9e3a1aadd9a0a16d130010962a", "score": "0.6347538", "text": "def challenge(self, challenge):\n # pylint: disable=R0911\n if not challenge:\n logger.debug(\"Empty challenge\")\n return Failure(\"bad-challenge\")\n\n if self._server_first_message:\n return self._final_challenge(challenge)\n\n match = SERVER_FIRST_MESSAGE_RE.match(challenge)\n if not match:\n logger.debug(\"Bad challenge syntax: {0!r}\".format(challenge))\n return Failure(\"bad-challenge\")\n\n self._server_first_message = challenge\n\n mext = match.group(\"mext\")\n if mext:\n logger.debug(\"Unsupported extension received: {0!r}\".format(mext))\n return Failure(\"bad-challenge\")\n\n nonce = match.group(\"nonce\")\n if not nonce.startswith(self._c_nonce):\n logger.debug(\"Nonce does not start with our nonce\")\n return Failure(\"bad-challenge\")\n\n salt = match.group(\"salt\")\n try:\n salt = a2b_base64(salt)\n except ValueError:\n logger.debug(\"Bad base64 encoding for salt: {0!r}\".format(salt))\n return Failure(\"bad-challenge\")\n\n iteration_count = match.group(\"iteration_count\")\n try:\n iteration_count = int(iteration_count)\n except ValueError:\n logger.debug(\"Bad iteration_count: {0!r}\".format(iteration_count))\n return Failure(\"bad-challenge\")\n\n return self._make_response(nonce, salt, iteration_count)", "title": "" }, { "docid": "eb37ac6baf8d896b4b48e9b095b6a6fb", "score": "0.63438255", "text": "def process_login_request(self, message):\n\t\tself.state = STATE_CLIENT_AUTH\n\t\tself.client_nonce = base64.b64decode(message['nonce'].encode())\n\t\tself.crypto.auth_nonce = os.urandom(16)\n\t\tself.client_public_key = base64.b64decode(message['public_key'].encode())\n\t\tmessage = {'type': 'CHALLENGE_REQUEST', 'nonce': base64.b64encode(self.crypto.auth_nonce).decode()}\n\t\tsecure_message = self.encrypt_payload(message)\n\t\tself._send(secure_message)\n\t\tself.send_mac()\n\t\t\n\t\treturn True", "title": "" }, { "docid": "44428eaeb0244f85fb46d45692463873", "score": "0.6340404", "text": "def _send_challenge_result(\n request, session_id, challenge_id, client_input, access_token\n):\n body = {\n \"sessionId\": session_id,\n \"challengeId\": challenge_id,\n \"action\": \"RESPOND\",\n \"proposalResponse\": client_input,\n }\n metrics_header = {metrics.API_CLIENT_HEADER: metrics.reauth_continue()}\n\n return _client._token_endpoint_request(\n request,\n _REAUTH_API + \"/{}:continue\".format(session_id),\n body,\n access_token=access_token,\n use_json=True,\n headers=metrics_header,\n )", "title": "" }, { "docid": "80ddf053e89f40e99b58b0b9fc052d75", "score": "0.6324714", "text": "def start_responding(self, ignore, challenge, response):\n self.putChild(\n challenge.encode('token').encode('utf-8'),\n StaticTextResource(response.key_authorization),\n )", "title": "" }, { "docid": "7df5b38e6c4ef96a36abb96ccb9cc353", "score": "0.6309285", "text": "def authenticate(self, message):\n if self.authenticated == self.auth_statuses[\"auth_complete\"]:\n return\n\n if not self.is_auth() and message.strip() in [CLIENT_HELLO, INJECTOR_HELLO, ADMIN_HELLO]:\n self.change_auth_status(self.auth_statuses[\"auth_ok\"])\n self.send(SERVER_HELLO)\n if message.strip() == CLIENT_HELLO:\n self.client_type = self.client_types[\"hook\"]\n elif message.strip() == INJECTOR_HELLO:\n self.client_type = self.client_types[\"injector\"]\n elif message.strip() == ADMIN_HELLO:\n self.client_type = self.client_types[\"admin\"]\n return\n\n if self.client_type == self.client_types[\"admin\"]:\n admin = AdminAuthentication(self, self.db)\n admin.run(message)\n return\n\n if self.authenticated == self.auth_statuses[\"auth_ok\"]:\n response = message.splitlines()\n self.status, uid = response[0].strip().split(\" \")\n if int(self.status) != (PROTOCOL_STATUS_CODES[\"authentication\"]):\n return\n\n self.change_auth_status(self.auth_statuses[\"auth_complete\"])\n self.uid = uid\n\n self.send(str(PROTOCOL_STATUS_CODES[\"ok\"]) + \" Authentication complete\")\n LOGGER.info(\"{client_info} has authenticated\".format(client_info=self))\n return\n return", "title": "" }, { "docid": "784e0272e7533c1cfa73dfc053142893", "score": "0.61799854", "text": "def make_basic_challenge(self, realm, message=None):\n\n if message is None:\n message = \"Authentication is required\"\n\n authenticate = 'Basic realm=\"%s\"' % realm\n\n return Response(message, 401, {\"WWW-Authenticate\": authenticate})", "title": "" }, { "docid": "7e7a563e89802aa268fe7f4cb79a0fd4", "score": "0.6168865", "text": "def init_challenge(self, ignored):\n if not self.challenge_loaded:\n print('You must first send a challenge to the server.')\n return\n\n self.write_line('CHALLENGE_INITIATE')\n response = self.read_line()\n if response == 'CHALLENGE_NOT_FOUND':\n print(' No challenge has been sent to the server yet.')\n return\n elif response == 'CHALLENGE_ALREADY_ACTIVE':\n print(' Another challenge is already active.')\n return\n elif response == 'NO_PARTICIPANTS_CONNECTED':\n print(' There are no participants connected to the server.')\n return\n elif response == 'CHALLENGE_ERROR':\n print(' Unknown error')\n return\n \n while response != 'PARTICIPANT_LIST_FINISHED':\n participant_name = self.read_line()\n participant_editor = self.read_line()\n print('Participant \"{}\" accepted the challenge (editor \"{}\").'\n .format(participant_name, participant_editor))\n response = self.read_line()\n \n num_participants_accepting = self.read_line()\n num_participants_total = self.read_line()\n print('{} of {} participants accepted to do the challenge.'\n .format(num_participants_accepting, num_participants_total))\n \n print('Would you like to start the challenge? [Y/n]')\n answer = input(' > ')\n if answer.lower() != 'y':\n self.write_line('CHALLENGE_CANCEL')\n return\n \n self.write_line('CHALLENGE_START')\n response = self.read_line()\n if response == 'CHALLENGE_FINISH':\n print('Challenge successfully completed.')\n else:\n print('There was a problem with the challenge.')", "title": "" }, { "docid": "39140a527fd0cad46d112824311ee2f3", "score": "0.61504436", "text": "def _challenge_response(self, challenge, mode, slot, variable, may_block):\n # Check length and pad challenge if appropriate\n if mode == 'HMAC':\n if len(challenge) > yubikey_defs.SHA1_MAX_BLOCK_SIZE:\n raise yubico_exception.InputError('Mode HMAC challenge too big (%i/%i)' \\\n % (yubikey_defs.SHA1_MAX_BLOCK_SIZE, len(challenge)))\n if len(challenge) < yubikey_defs.SHA1_MAX_BLOCK_SIZE:\n pad_with = b'\\0'\n if variable and challenge[-1:] == pad_with:\n pad_with = b'\\xff'\n challenge = challenge.ljust(yubikey_defs.SHA1_MAX_BLOCK_SIZE, pad_with)\n response_len = yubikey_defs.SHA1_DIGEST_SIZE\n elif mode == 'OTP':\n if len(challenge) != yubikey_defs.UID_SIZE:\n raise yubico_exception.InputError('Mode OTP challenge must be %i bytes (got %i)' \\\n % (yubikey_defs.UID_SIZE, len(challenge)))\n challenge = challenge.ljust(yubikey_defs.SHA1_MAX_BLOCK_SIZE, b'\\0')\n response_len = 16\n else:\n raise yubico_exception.InputError('Invalid mode supplied (%s, valid values are HMAC and OTP)' \\\n % (mode))\n\n try:\n command = _CMD_CHALLENGE[mode][slot]\n except:\n raise yubico_exception.InputError('Invalid slot specified (%s)' % (slot))\n\n frame = yubikey_frame.YubiKeyFrame(command=command, payload=challenge)\n self._device._write(frame)\n response = self._device._read_response(may_block=may_block)\n if not yubico_util.validate_crc16(response[:response_len + 2]):\n raise YubiKeyUSBHIDError(\"Read from device failed CRC check\")\n return response[:response_len]", "title": "" }, { "docid": "e6dac9a3fac712c06bcdb9732a3060e4", "score": "0.61374694", "text": "def challenge(self, code):\n\n data = {\n 'code': code\n }\n token = self.base_request.request(\n 'auth/totp/verify', 'POST', data=data,\n endpoint=self.settings.get('api_endpoint'), login=True\n )\n self.settings.set(TOKEN_KEY, token)", "title": "" }, { "docid": "c5cbdbfc5382b824b20fbf775d0fc6ba", "score": "0.60651004", "text": "def process_server_auth(self, message):\n\t\tself.crypto.server_cert=self.crypto.load_cert(\"server_cert/secure_server.pem\")\n\t\tself.crypto.server_ca_cert=self.crypto.load_cert(\"server_roots/Secure_Server_CA.pem\")\n\t\tself.crypto.rsa_public_key=self.crypto.server_cert.public_key()\n\t\tself.crypto.rsa_private_key=self.crypto.load_key_from_file(\"server_key/server_key.pem\")\n\t\tnonce=base64.b64decode(message['nonce'].encode())\n\n\t\t# Encrypt NONCE received by client\n\t\tself.crypto.signature = self.crypto.rsa_signing(nonce, self.crypto.rsa_private_key)\n\n\t\tlogger.info(\"Sending certificates for validation\")\n\t\tmessage={'type':'SERVER_AUTH_RESPONSE','signature':base64.b64encode(self.crypto.signature).decode(),'server_cert':base64.b64encode(self.crypto.get_certificate_bytes(self.crypto.server_cert)).decode(),'server_roots':base64.b64encode(self.crypto.get_certificate_bytes(self.crypto.server_ca_cert)).decode()}\n\t\tsecure_message = self.encrypt_payload(message)\n\t\tself._send(secure_message)\n\t\tself.send_mac()\n\t\treturn True", "title": "" }, { "docid": "eaf10f9d00fed8445c64634c25ab1aa4", "score": "0.59784365", "text": "def getChallenge(self, request):\n return {'realm': self.authenticationRealm}", "title": "" }, { "docid": "1ffa8572df93759f903eb0eb84d96f36", "score": "0.59590465", "text": "def getChallenge(self):\n\t\tchallengeXML = self.requestFactory.generateQuestion()\n\t\tself.sessionSocket.sendall(challengeXML)\n\t\txmlString = self.sessionSocket.recv(1024).strip().decode('utf-8')\n\t\tprint self.responseHandler.handle(xmlString)", "title": "" }, { "docid": "8a58f15bebb5272615c8074de6a4e9e1", "score": "0.5938506", "text": "def auth_proceed(bot, trigger):\n if bot.config.core.auth_method == 'sasl':\n mech = bot.config.core.auth_target or 'PLAIN'\n elif bot.config.core.server_auth_method == 'sasl':\n mech = bot.config.core.server_auth_sasl_mech or 'PLAIN'\n else:\n return\n\n if mech == 'EXTERNAL':\n if trigger.args[0] != '+':\n # not an expected response from the server; abort SASL\n token = '*'\n else:\n token = '+'\n\n bot.write(('AUTHENTICATE', token))\n return\n\n if bot.config.core.auth_method == 'sasl':\n sasl_username = bot.config.core.auth_username\n sasl_password = bot.config.core.auth_password\n elif bot.config.core.server_auth_method == 'sasl':\n sasl_username = bot.config.core.server_auth_username\n sasl_password = bot.config.core.server_auth_password\n else:\n # How did we get here? I am not good with computer\n return\n\n sasl_username = sasl_username or bot.nick\n\n if mech == 'PLAIN':\n if trigger.args[0] == '+':\n sasl_token = _make_sasl_plain_token(sasl_username, sasl_password)\n LOGGER.info(\"Sending SASL Auth token.\")\n send_authenticate(bot, sasl_token)\n return\n else:\n # Not an expected response from the server\n LOGGER.warning(\n 'Aborting SASL: unexpected server reply \"%s\"', trigger,\n )\n # Send `authenticate-abort` command\n # See https://ircv3.net/specs/extensions/sasl-3.1#the-authenticate-command\n bot.write(('AUTHENTICATE', '*'))\n return\n\n # TODO: Implement SCRAM challenges", "title": "" }, { "docid": "c4cd3a2ea8d5fffb267d8d4e84d373b6", "score": "0.58901256", "text": "def authenticate(self, msg):\n\t\tif msg.command != 'AUTH':\n\t\t\traise TypeError('Must be AUTH command!')\n\t\tself.__auth_id = msg.id\n\t\tself.request(msg)", "title": "" }, { "docid": "dea5b4ccdf3d7b91489fcb22268a1ae6", "score": "0.5881792", "text": "def auth(self, irc, msg, args, nick):\r\n self._removeExpiredRequests()\r\n userdata = self.db.getByNick(nick)\r\n if len(userdata) == 0:\r\n irc.error(\"This nick is not registered. Please register.\")\r\n return\r\n keyid = userdata[0][1]\r\n fingerprint = userdata[0][2]\r\n if keyid is None:\r\n irc.error(\"You have not registered a GPG key. Try using bcauth instead, or register a GPG key first.\")\r\n return\r\n challenge = \"freenode:#bitcoin-otc:\" + hashlib.sha256(os.urandom(128)).hexdigest()[:-8]\r\n request = {msg.prefix: {'nick':userdata[0][5],\r\n 'expiry':time.time(), 'keyid':keyid,\r\n 'type':'auth', 'challenge':challenge,\r\n 'fingerprint':fingerprint}}\r\n self.pending_auth.update(request)\r\n self.authlog.info(\"auth request from hostmask %s for user %s, keyid %s.\" %\\\r\n (msg.prefix, nick, keyid, ))\r\n irc.reply(\"Request successful for user %s, hostmask %s. Your challenge string is: %s\" %\\\r\n (nick, msg.prefix, challenge,))", "title": "" }, { "docid": "fcccb39d57d9761bd333af094ae1d29f", "score": "0.58794737", "text": "def challenge_post(self, environ, start_response):\n # request_info = cgi.parse_qs(environ['wsgi.input'].read(int(environ['CONTENT_LENGTH'])))\n # redirect = request_info.get('tiddlyweb_redirect', ['/'])[0]\n query = environ['tiddlyweb.query']\n redirect = query.get('tiddlyweb_redirect', ['/'])[0]\n\n try:\n user = query['user'][0]\n password = query['password'][0]\n valid=self._validate_and_redirect(environ, start_response, user, password, redirect)\n if valid:\n self._ensure_bags(environ, user)\n self._ensure_recipe(environ, user)\n return valid\n except KeyError:\n return self._send_cookie_form(environ, start_response, redirect, '401 Unauthorized')", "title": "" }, { "docid": "d440a6e27a32c253b40039ac0a0db55b", "score": "0.5851626", "text": "def challenge_client(sock: socket) -> bool:\n nonce = os.urandom(32)\n sock.send(nonce)\n response = sock.recv(32)\n expected_response = hashlib.sha256(nonce + settings.PASSPHRASE).digest()\n return response == expected_response", "title": "" }, { "docid": "6c251cafda5d43f257abc49b9ce16c08", "score": "0.58377445", "text": "def calculate_response(challenge: bytes):\n\tsecret = challenge[3:6]\n\tresponse = struct.pack('BBBB', challenge[2], *[secret[i] ^ XOR_KEY[i] for i in range(len(secret))])\n\treturn format_message(response)", "title": "" }, { "docid": "ea9f7d7734ddb326707599334b1978a6", "score": "0.58345133", "text": "def _final_challenge(self, challenge):\n if self._finished:\n return Failure(\"extra-challenge\")\n\n match = SERVER_FINAL_MESSAGE_RE.match(challenge)\n if not match:\n logger.debug(\"Bad final message syntax: {0!r}\".format(challenge))\n return Failure(\"bad-challenge\")\n\n error = match.group(\"error\")\n if error:\n logger.debug(\"Server returned SCRAM error: {0!r}\".format(error))\n return Failure(u\"scram-\" + error.decode(\"utf-8\"))\n\n verifier = match.group(\"verifier\")\n if not verifier:\n logger.debug(\"No verifier value in the final message\")\n return Failure(\"bad-succes\")\n\n server_key = self.HMAC(self._salted_password, b\"Server Key\")\n server_signature = self.HMAC(server_key, self._auth_message)\n if server_signature != a2b_base64(verifier):\n logger.debug(\"Server verifier does not match\")\n return Failure(\"bad-succes\")\n\n self._finished = True\n return Response(None)", "title": "" }, { "docid": "fda05d5552c95d652d81fe3237cd7b16", "score": "0.5816291", "text": "def make_digest_challenge(self, realm, message=None):\n\n if message is None:\n message = \"Authentication is required\"\n\n param_dict = {\n \"realm\": realm,\n \"nonce\": gen_nonce(16),\n \"opaque\": gen_nonce(16),\n }\n\n parameters = \", \".join('%s=\"%s\"' % t for t in param_dict.items())\n\n authenticate = \"Digest %s\" % parameters\n\n return Response(message, 401, {\"WWW-Authenticate\": authenticate})", "title": "" }, { "docid": "6ec43bc6f2ed99c0d94ca6033cc37654", "score": "0.5812959", "text": "def challenge(self) -> bool:\n return self.__challenge", "title": "" }, { "docid": "e946bef569e81aca055ff13409099d0b", "score": "0.5784814", "text": "def authenticate_request(self, request, content):\n json_request = {\n \"method\": request.method,\n \"uri\": request.uri,\n \"destination_is\": self.sydent.server_name,\n \"signatures\": {},\n }\n\n if content is not None:\n json_request[\"content\"] = content\n\n origin = None\n\n def parse_auth_header(header_str):\n \"\"\"\n Extracts a server name, signing key and payload signature from an\n authentication header.\n\n :param header_str: The content of the header\n :type header_str: unicode\n\n :return: The server name, the signing key, and the payload signature.\n :rtype: tuple[unicode]\n \"\"\"\n try:\n params = header_str.split(u\" \")[1].split(u\",\")\n param_dict = dict(kv.split(u\"=\") for kv in params)\n\n def strip_quotes(value):\n if value.startswith(u\"\\\"\"):\n return value[1:-1]\n else:\n return value\n\n origin = strip_quotes(param_dict[\"origin\"])\n key = strip_quotes(param_dict[\"key\"])\n sig = strip_quotes(param_dict[\"sig\"])\n return origin, key, sig\n except Exception:\n raise SignatureVerifyException(\"Malformed Authorization header\")\n\n auth_headers = request.requestHeaders.getRawHeaders(u\"Authorization\")\n\n if not auth_headers:\n raise NoAuthenticationError(\"Missing Authorization headers\")\n\n for auth in auth_headers:\n if auth.startswith(u\"X-Matrix\"):\n (origin, key, sig) = parse_auth_header(auth)\n json_request[\"origin\"] = origin\n json_request[\"signatures\"].setdefault(origin, {})[key] = sig\n\n if not json_request[\"signatures\"]:\n raise NoAuthenticationError(\"Missing X-Matrix Authorization header\")\n\n yield self.verifyServerSignedJson(json_request, [origin])\n\n logger.info(\"Verified request from HS %s\", origin)\n\n defer.returnValue(origin)", "title": "" }, { "docid": "a218690d8a08a13ec66970712510aaa2", "score": "0.57790285", "text": "def authenticate(self, handler):", "title": "" }, { "docid": "1e151d44187b0970d8225f0b5b76c31c", "score": "0.57646435", "text": "def auth_step(socket, password, header):\n send(socket, b'EHLO ' + SMTP_DOMAINNAME.encode('ASCII'))\n response = read_line(socket)\n while '-' in response:\n response = read_line(socket)\n if response[:3] != '250':\n raise Exception('EHLO Response not OK')\n auth_login = b'AUTH LOGIN'\n user_name = base64.b64encode(b'[email protected]') # rip inbox\n encode_pass = base64.b64encode(password.encode('ASCII'))\n send(socket, auth_login)\n response_user = read_line(socket)\n if response_user[:3] != '334':\n raise Exception('Username question expected')\n send(socket, user_name)\n response_pass_ask = read_line(socket)\n if response_pass_ask[:3] != '334':\n raise Exception('Password Request Expected')\n send(socket, encode_pass)\n response_success = read_line(socket)\n if response_success[:3] != '235':\n print(response_success)\n return -1\n # raise Exception('Authentication was NOT successful')\n print('Authentication Successful')\n return 0", "title": "" }, { "docid": "176ecd63705a39b7662d6838e84bac8e", "score": "0.5752457", "text": "def authenticate():\n return Response('Could not verify your access level for that URL.\\n''You have to login with proper credentials', 401, {'WWW-Authenticate': 'Basic realm=\"Login Required\"'})", "title": "" }, { "docid": "3419696577a5db795993722803f03fec", "score": "0.57397336", "text": "def challenge_response(self, challenge, mode='HMAC', slot=1, variable=True, may_block=True):\n if not self.capabilities.have_challenge_response(mode):\n raise yubikey_base.YubiKeyVersionError(\"%s challenge-response unsupported in YubiKey %s\" % (mode, self.version()) )\n return self._challenge_response(challenge, mode, slot, variable, may_block)", "title": "" }, { "docid": "ea7f076fd66805b2cf6005d0a1f189a0", "score": "0.57264584", "text": "def authenticate():\n return Response(\n 'Could not verify your access level for that URL.\\n'\n 'You have to login with proper credentials', 401,\n {'WWW-Authenticate': 'Basic realm=\"Login Required\"'})", "title": "" }, { "docid": "66478b2d497eadcfc6c5a19c3f63b3a4", "score": "0.57178414", "text": "def everify(self, irc, msg, args, otp):\r\n self._removeExpiredRequests()\r\n if not self._testPresenceInChannels(irc, msg.nick):\r\n irc.error(\"In order to authenticate, you must be present in one \"\r\n \"of the following channels: %s\" % (self.registryValue('channels'),))\r\n return\r\n try:\r\n authrequest = self.pending_auth[msg.prefix]\r\n except KeyError:\r\n irc.error(\"Could not find a pending authentication request from your hostmask. \"\r\n \"Either it expired, or you changed hostmask, or you haven't made one.\")\r\n return\r\n if authrequest['type'] not in ['eregister','eauth','echangekey']:\r\n irc.error(\"No outstanding encryption-based request found.\")\r\n return\r\n if authrequest['challenge'] != otp:\r\n irc.error(\"Incorrect one-time password. Try again.\")\r\n return\r\n\r\n response = \"\"\r\n if authrequest['type'] == 'eregister':\r\n if self.db.getByNick(authrequest['nick']) or self.db.getByKey(authrequest['keyid']):\r\n irc.error(\"Username or key already in the database.\")\r\n return\r\n self.db.register(authrequest['keyid'], authrequest['fingerprint'], None,\r\n time.time(), authrequest['nick'])\r\n response = \"Registration successful. \"\r\n elif authrequest['type'] == 'echangekey':\r\n gpgauth = self._ident(msg.prefix)\r\n if gpgauth is None:\r\n irc.error(\"You must be authenticated in order to change your registered key.\")\r\n return\r\n if self.db.getByKey(authrequest['keyid']):\r\n irc.error(\"This key id already registered. Try a different key.\")\r\n return\r\n self.db.changekey(gpgauth['nick'], gpgauth['keyid'], authrequest['keyid'], authrequest['fingerprint'])\r\n response = \"Successfully changed key for user %s from %s to %s. \" %\\\r\n (gpgauth['nick'], gpgauth['keyid'], authrequest['keyid'],)\r\n userdata = self.db.getByNick(authrequest['nick'])\r\n self.authed_users[msg.prefix] = {'timestamp':time.time(),\r\n 'keyid': authrequest['keyid'], 'nick':authrequest['nick'],\r\n 'bitcoinaddress':userdata[0][3],\r\n 'fingerprint':authrequest['fingerprint']}\r\n del self.pending_auth[msg.prefix]\r\n logmsg = \"everify success from hostmask %s for user %s, keyid %s.\" %\\\r\n (msg.prefix, authrequest['nick'], authrequest['keyid'],) + response\r\n self.authlog.info(logmsg)\r\n self.db.update_auth_date(userdata[0][0], time.time())\r\n self.db.set_auth_status(userdata[0][5], 1)\r\n if not world.testing:\r\n irc.queueMsg(ircmsgs.privmsg(\"#bitcoin-otc-auth\", logmsg))\r\n irc.reply(response + \"You are now authenticated for user %s with key %s\" %\\\r\n (authrequest['nick'], authrequest['keyid']))", "title": "" }, { "docid": "d8c26245bcc8568f233aa5ce00ddee39", "score": "0.56832886", "text": "def authenticate():\n return Response(\n \"Could not verify your access level for that URL.\\n\"\n \"You have to login with proper credentials\",\n 401,\n {\"WWW-Authenticate\": 'Basic realm=\"Login Required\"'},\n )", "title": "" }, { "docid": "d8c26245bcc8568f233aa5ce00ddee39", "score": "0.56832886", "text": "def authenticate():\n return Response(\n \"Could not verify your access level for that URL.\\n\"\n \"You have to login with proper credentials\",\n 401,\n {\"WWW-Authenticate\": 'Basic realm=\"Login Required\"'},\n )", "title": "" }, { "docid": "bd1e3484726a4bc4129e5e53552d7a9d", "score": "0.56745934", "text": "def authenticate():\n return Response(\n 'Could not verify your access level for that URL.\\n'\n 'You have to login with proper credentials', 401,\n {'WWW-Authenticate': 'Basic realm=\"Login Required\"'})", "title": "" }, { "docid": "bd1e3484726a4bc4129e5e53552d7a9d", "score": "0.56745934", "text": "def authenticate():\n return Response(\n 'Could not verify your access level for that URL.\\n'\n 'You have to login with proper credentials', 401,\n {'WWW-Authenticate': 'Basic realm=\"Login Required\"'})", "title": "" }, { "docid": "bd1e3484726a4bc4129e5e53552d7a9d", "score": "0.56745934", "text": "def authenticate():\n return Response(\n 'Could not verify your access level for that URL.\\n'\n 'You have to login with proper credentials', 401,\n {'WWW-Authenticate': 'Basic realm=\"Login Required\"'})", "title": "" }, { "docid": "bd1e3484726a4bc4129e5e53552d7a9d", "score": "0.56745934", "text": "def authenticate():\n return Response(\n 'Could not verify your access level for that URL.\\n'\n 'You have to login with proper credentials', 401,\n {'WWW-Authenticate': 'Basic realm=\"Login Required\"'})", "title": "" }, { "docid": "bd1e3484726a4bc4129e5e53552d7a9d", "score": "0.56745934", "text": "def authenticate():\n return Response(\n 'Could not verify your access level for that URL.\\n'\n 'You have to login with proper credentials', 401,\n {'WWW-Authenticate': 'Basic realm=\"Login Required\"'})", "title": "" }, { "docid": "bd1e3484726a4bc4129e5e53552d7a9d", "score": "0.56745934", "text": "def authenticate():\n return Response(\n 'Could not verify your access level for that URL.\\n'\n 'You have to login with proper credentials', 401,\n {'WWW-Authenticate': 'Basic realm=\"Login Required\"'})", "title": "" }, { "docid": "bd1e3484726a4bc4129e5e53552d7a9d", "score": "0.56745934", "text": "def authenticate():\n return Response(\n 'Could not verify your access level for that URL.\\n'\n 'You have to login with proper credentials', 401,\n {'WWW-Authenticate': 'Basic realm=\"Login Required\"'})", "title": "" }, { "docid": "bd1e3484726a4bc4129e5e53552d7a9d", "score": "0.56745934", "text": "def authenticate():\n return Response(\n 'Could not verify your access level for that URL.\\n'\n 'You have to login with proper credentials', 401,\n {'WWW-Authenticate': 'Basic realm=\"Login Required\"'})", "title": "" }, { "docid": "bd1e3484726a4bc4129e5e53552d7a9d", "score": "0.56745934", "text": "def authenticate():\n return Response(\n 'Could not verify your access level for that URL.\\n'\n 'You have to login with proper credentials', 401,\n {'WWW-Authenticate': 'Basic realm=\"Login Required\"'})", "title": "" }, { "docid": "bd1e3484726a4bc4129e5e53552d7a9d", "score": "0.56745934", "text": "def authenticate():\n return Response(\n 'Could not verify your access level for that URL.\\n'\n 'You have to login with proper credentials', 401,\n {'WWW-Authenticate': 'Basic realm=\"Login Required\"'})", "title": "" }, { "docid": "bd1e3484726a4bc4129e5e53552d7a9d", "score": "0.56745934", "text": "def authenticate():\n return Response(\n 'Could not verify your access level for that URL.\\n'\n 'You have to login with proper credentials', 401,\n {'WWW-Authenticate': 'Basic realm=\"Login Required\"'})", "title": "" }, { "docid": "bd1e3484726a4bc4129e5e53552d7a9d", "score": "0.56745934", "text": "def authenticate():\n return Response(\n 'Could not verify your access level for that URL.\\n'\n 'You have to login with proper credentials', 401,\n {'WWW-Authenticate': 'Basic realm=\"Login Required\"'})", "title": "" }, { "docid": "bd1e3484726a4bc4129e5e53552d7a9d", "score": "0.56745934", "text": "def authenticate():\n return Response(\n 'Could not verify your access level for that URL.\\n'\n 'You have to login with proper credentials', 401,\n {'WWW-Authenticate': 'Basic realm=\"Login Required\"'})", "title": "" }, { "docid": "bd1e3484726a4bc4129e5e53552d7a9d", "score": "0.56745934", "text": "def authenticate():\n return Response(\n 'Could not verify your access level for that URL.\\n'\n 'You have to login with proper credentials', 401,\n {'WWW-Authenticate': 'Basic realm=\"Login Required\"'})", "title": "" }, { "docid": "bd1e3484726a4bc4129e5e53552d7a9d", "score": "0.56745934", "text": "def authenticate():\n return Response(\n 'Could not verify your access level for that URL.\\n'\n 'You have to login with proper credentials', 401,\n {'WWW-Authenticate': 'Basic realm=\"Login Required\"'})", "title": "" }, { "docid": "bd1e3484726a4bc4129e5e53552d7a9d", "score": "0.56745934", "text": "def authenticate():\n return Response(\n 'Could not verify your access level for that URL.\\n'\n 'You have to login with proper credentials', 401,\n {'WWW-Authenticate': 'Basic realm=\"Login Required\"'})", "title": "" }, { "docid": "1e612bfdff49d8f441a9e258775d0c52", "score": "0.56672156", "text": "def run(self, message):\n if self.parse_response(message):\n if self.validate_credentials():\n self.client.change_auth_status(self.client.auth_statuses[\"auth_complete\"])\n else:\n LOGGER.warning(\"Username, password or unique id incorrect at client: {client}\".format(\n client=str(self.client)))\n return\n else:\n LOGGER.warning(\"Error receiving admin credentials: {client}\".format(\n client=str(self.client)))\n return", "title": "" }, { "docid": "73a6e71a7ed72d6bec05f19f8202d1e5", "score": "0.5665175", "text": "def authenticate():\n return Response(\n 'Could not verify your access level for this URL.\\n'\n 'Login credentials required.', 401,\n {'WWW-Authenticate': 'Basic realm=\"Login Required\"'})", "title": "" }, { "docid": "f402242170ddeba635205a946f3e6b39", "score": "0.56613606", "text": "def authenticate():\n return Response(\n 'Could not verify your access level for that URL.\\n'\n 'You have to login with proper credentials', 401,\n { 'WWW-Authenticate': 'Basic realm=\"Login Required\"' }\n )", "title": "" }, { "docid": "e931527e6c09b961a935455a809800c8", "score": "0.5657155", "text": "def authenticate():\n return Response(\n 'Could not verify your access level for that URL.\\n'\n 'You have to login with proper credentials', 401,\n {'WWW-Authenticate': 'Basic realm=\"Login Required\"'})", "title": "" }, { "docid": "15d8a611bda1649971f213b486ec6897", "score": "0.5644863", "text": "def authenticate():\n return Response('', 401, {'WWW-Authenticate': 'Basic realm=\"Login Required\"'})", "title": "" }, { "docid": "1e9da2ce0b082d844de498283bf87d54", "score": "0.562817", "text": "def authenticate():\n return Response(\n 'Could not verify your access level for that URL.\\n'\n 'You have to login with proper credentials', 401,\n {'WWW-Authenticate': 'Basic realm=\"Login Required\"'})", "title": "" }, { "docid": "1e9da2ce0b082d844de498283bf87d54", "score": "0.562817", "text": "def authenticate():\n return Response(\n 'Could not verify your access level for that URL.\\n'\n 'You have to login with proper credentials', 401,\n {'WWW-Authenticate': 'Basic realm=\"Login Required\"'})", "title": "" }, { "docid": "1e9da2ce0b082d844de498283bf87d54", "score": "0.562817", "text": "def authenticate():\n return Response(\n 'Could not verify your access level for that URL.\\n'\n 'You have to login with proper credentials', 401,\n {'WWW-Authenticate': 'Basic realm=\"Login Required\"'})", "title": "" }, { "docid": "1e9da2ce0b082d844de498283bf87d54", "score": "0.562817", "text": "def authenticate():\n return Response(\n 'Could not verify your access level for that URL.\\n'\n 'You have to login with proper credentials', 401,\n {'WWW-Authenticate': 'Basic realm=\"Login Required\"'})", "title": "" }, { "docid": "1e9da2ce0b082d844de498283bf87d54", "score": "0.562817", "text": "def authenticate():\n return Response(\n 'Could not verify your access level for that URL.\\n'\n 'You have to login with proper credentials', 401,\n {'WWW-Authenticate': 'Basic realm=\"Login Required\"'})", "title": "" }, { "docid": "1e9da2ce0b082d844de498283bf87d54", "score": "0.562817", "text": "def authenticate():\n return Response(\n 'Could not verify your access level for that URL.\\n'\n 'You have to login with proper credentials', 401,\n {'WWW-Authenticate': 'Basic realm=\"Login Required\"'})", "title": "" }, { "docid": "1e9da2ce0b082d844de498283bf87d54", "score": "0.562817", "text": "def authenticate():\n return Response(\n 'Could not verify your access level for that URL.\\n'\n 'You have to login with proper credentials', 401,\n {'WWW-Authenticate': 'Basic realm=\"Login Required\"'})", "title": "" }, { "docid": "1e9da2ce0b082d844de498283bf87d54", "score": "0.562817", "text": "def authenticate():\n return Response(\n 'Could not verify your access level for that URL.\\n'\n 'You have to login with proper credentials', 401,\n {'WWW-Authenticate': 'Basic realm=\"Login Required\"'})", "title": "" }, { "docid": "1e9da2ce0b082d844de498283bf87d54", "score": "0.562817", "text": "def authenticate():\n return Response(\n 'Could not verify your access level for that URL.\\n'\n 'You have to login with proper credentials', 401,\n {'WWW-Authenticate': 'Basic realm=\"Login Required\"'})", "title": "" }, { "docid": "1e9da2ce0b082d844de498283bf87d54", "score": "0.562817", "text": "def authenticate():\n return Response(\n 'Could not verify your access level for that URL.\\n'\n 'You have to login with proper credentials', 401,\n {'WWW-Authenticate': 'Basic realm=\"Login Required\"'})", "title": "" }, { "docid": "1e9da2ce0b082d844de498283bf87d54", "score": "0.562817", "text": "def authenticate():\n return Response(\n 'Could not verify your access level for that URL.\\n'\n 'You have to login with proper credentials', 401,\n {'WWW-Authenticate': 'Basic realm=\"Login Required\"'})", "title": "" }, { "docid": "1e9da2ce0b082d844de498283bf87d54", "score": "0.562817", "text": "def authenticate():\n return Response(\n 'Could not verify your access level for that URL.\\n'\n 'You have to login with proper credentials', 401,\n {'WWW-Authenticate': 'Basic realm=\"Login Required\"'})", "title": "" }, { "docid": "1e9da2ce0b082d844de498283bf87d54", "score": "0.562817", "text": "def authenticate():\n return Response(\n 'Could not verify your access level for that URL.\\n'\n 'You have to login with proper credentials', 401,\n {'WWW-Authenticate': 'Basic realm=\"Login Required\"'})", "title": "" }, { "docid": "81a2f233dfda1b716720252f8325bee0", "score": "0.562801", "text": "def authenticate():\n return Response(\n 'Could not verify your access level for that URL.\\n'\n 'You have to login with proper credentials', 401,\n {'WWW-Authenticate': 'Basic realm=\"Login Required\"'}\n )", "title": "" }, { "docid": "d787347051c1dcc8d0b9a977e7547cf1", "score": "0.56100833", "text": "def authenticate():\n message = {'message': \"Authenticate.\"}\n resp = jsonify(message) # create a json object\n\n resp.status_code = 401\n resp.headers['WWW-Authenticate'] = 'Basic realm=\"Main\"'\n\n return resp", "title": "" }, { "docid": "02445dc3743fbc536768a9e8071dbd0a", "score": "0.56082314", "text": "def authenticate():\r\n\treturn Response(\r\n\t'Could not verify your access level for that URL.\\n'\r\n\t'You have to login with proper credentials', 401,\r\n\t{'WWW-Authenticate': 'Basic realm=\"Login Required\"'})", "title": "" }, { "docid": "9131c830ad6f6c798b92075c52eec195", "score": "0.55819196", "text": "def __authenticate(self, *args, **kwargs):\n\n # If priv_key is None, then master.key didn't get loaded correctly.\n if self.controller.priv_key is None:\n raise ControllerException(CONTROLLER_ERROR_NO_RSA_KEY)\n\n challenge_func = WebControllerFunction(self.controller, 'authenticate')\n challenge = challenge_func().__str__()\n\n # re-encrypt using servers key and then sha hash it before sending it\n # back\n response_encode = self.controller.priv_key.encrypt(challenge, None)\n response_hash = hashlib.sha512(response_encode[0]).hexdigest()\n \n challenge_func = WebControllerFunction(self.controller, \\\n 'challenge_response')\n if challenge_func(response_hash):\n return self(*args, **kwargs)\n\n raise ControllerException(CONTROLLER_ERROR_AUTH_FAIL)", "title": "" }, { "docid": "2a5fcbcaa0eddcad2fc682233c111859", "score": "0.5580618", "text": "def verify (self, response, challenge, v):\r\n\t\ttouple = self.check(response, challenge, v)\r\n\t\treturn touple[0]==touple[1]", "title": "" }, { "docid": "15af5994a08f565053f437475caf5d27", "score": "0.55725276", "text": "def auth_ok_message():\n return {\"type\": TYPE_AUTH_OK, \"ha_version\": __version__}", "title": "" }, { "docid": "20d028fdf571e87e2c6daa523e6e8459", "score": "0.5563835", "text": "def authenticate():\n return Response(\n 'Login Required', 401,\n {'WWW-Authenticate': 'Basic realm=\"Login Required\"'})", "title": "" }, { "docid": "3bd1ce8c3cdf0f4fd90f8ac7e4753dfc", "score": "0.55371714", "text": "def messenger_webhook():\n verify_token = request.query.get('hub.verify_token')\n # check whether the verify tokens match\n if verify_token == FB_VERIFY_TOKEN:\n # respond with the challenge to confirm\n challenge = request.query.get('hub.challenge')\n return challenge\n else:\n return 'Invalid Request or Verification Token'", "title": "" }, { "docid": "3bd1ce8c3cdf0f4fd90f8ac7e4753dfc", "score": "0.55371714", "text": "def messenger_webhook():\n verify_token = request.query.get('hub.verify_token')\n # check whether the verify tokens match\n if verify_token == FB_VERIFY_TOKEN:\n # respond with the challenge to confirm\n challenge = request.query.get('hub.challenge')\n return challenge\n else:\n return 'Invalid Request or Verification Token'", "title": "" }, { "docid": "288bd984225cff35a310b32ebceda0ca", "score": "0.5536558", "text": "def authenticate(self):\n log(2,\"Checking for authentication\")\n if self.request.user:#\n log(2,\"Authentication OK\")\n return True\n else:\n log(2,\"Authentication not OK, send 401\")\n self.response.add_header(\"WWW-Authenticate\", 'RestBasic realm=\"We need your password.\"')\n self.response.add_header('Content-Type', 'application/json')\n raise StopProcessing(401,json.dumps({\"error\":\"Authentiction required.\"}))", "title": "" }, { "docid": "110515db4636d2f7efad6c53953b3957", "score": "0.55330414", "text": "def basic_challenge(realm, content='Authorization Required'):\n response = HttpResponse(content, content_type=\"text/plain\")\n response['WWW-Authenticate'] = 'Basic realm=\"%s\"' % (realm, )\n response.status_code = 401\n return response", "title": "" }, { "docid": "5372fc9b27d87c5f4cea6a2d4d0e1f91", "score": "0.5510157", "text": "def authenticate(self, data):\n\n data = data.strip().split('%')\n if len(data) != 3:\n raise Exception(\"Invalid Message\")\n try:\n privateid = self.factory.authkeys[data[0]].encode('utf-8', 'ignore')\n except:\n raise Exception(\"Invalid Authkey\")\n message = data[1].encode('utf-8', 'ignore')\n checksum = hashlib.md5(message + privateid).hexdigest()\n if data[2] == checksum:\n return message.decode('utf-8', 'ignore')\n else:\n raise Exception(\"Failed auth\")", "title": "" }, { "docid": "7e7e1cf5272ece535e6af297b722b6e5", "score": "0.55049473", "text": "def authenticate(self, tree):\n\n # Currently RFC specifies only SASL as supported way of auth'ing\n handler = SASLAuthHandler()\n if tree.get('xmlns') != handler.namespace:\n raise MalformedRequestError\n handler.process(tree)\n self.connection.parser.reset()\n self.jid = JID(\"@\".join([handler.authenticated_user,\n self.hostname]))\n self.authenticated = True\n response_element = ET.Element(\"success\")\n response_element.set(\"xmlns\", handler.namespace)\n self.send_element(response_element)", "title": "" }, { "docid": "6477851370b338ccb1d08baa7d5df877", "score": "0.54741734", "text": "def authenticate():\n return flask.Response('Login required', 401, {\n 'WWW-Authenticate': 'Basic realm=\"Login required\"',\n })", "title": "" }, { "docid": "681e4c4b0db31e87d9e3bb22648fed0c", "score": "0.54555166", "text": "def authenticate(self, req, resp, resource):\n raise NotImplementedError(\".authenticate() must be overridden.\")", "title": "" }, { "docid": "f269c18a478e8bbf3fa00d2cdeb06c0e", "score": "0.5446857", "text": "def challenge(self, request, response, **kw):\n user_expired = response.getHeader(\"user_expired\")\n if user_expired:\n portal_url = api.portal.get_tool(name=\"portal_url\")()\n IStatusMessage(request).add(\n _(u\"Your password has expired.\"), type=\"error\"\n )\n response.redirect(\n \"%s/mail_password_form?userid=%s\" % (portal_url, user_expired),\n lock=1,\n )\n return 1\n return 0", "title": "" }, { "docid": "8e52eda84df4d0f232949dd3a95c7428", "score": "0.54455614", "text": "def authorization(self):\n pr_req = Request(RequestAction.PRESENCE, self.user.username)\n self.__send_request(pr_req)\n resp = self.__get_response()\n if resp is None:\n return Response(SERVER_ERROR)\n if resp.code != AUTH:\n return resp\n enc_pass = encrypt_rsa(\n import_pub_key(resp.message.encode()),\n self.user.password)\n auth_req = Request(RequestAction.AUTH, enc_pass.decode())\n self.__send_request(auth_req)\n return self.__get_response()", "title": "" }, { "docid": "ab644ede42a4c845ac2ae73a47c9fb22", "score": "0.5432318", "text": "def authenticate_request(\n self,\n request: 'CoreRequest'\n ) -> Success | Failure | Response | None:", "title": "" }, { "docid": "3be545bac3c2fba1cef46d1eace66f42", "score": "0.5428773", "text": "def exec_get_challenge(self):\n\n\t\tapdu = [\n\t\t\t0x00, # CLA\n\t\t\t0x84, # INS = C/R INT AUTH\n\t\t\t0x00, # P1 = 0x00\n\t\t\t0x00, # P2 = 0x00\n\t\t\t0x08,\n\t\t]\n\n\t\td, s, t = self.transmit(apdu)\n\t\treturn d", "title": "" }, { "docid": "79c0157f68832652bfc3b62a89d208cf", "score": "0.5420126", "text": "def authenticate():\n return Response(\"Could not authenticate. Please provide valid Globus credentials.\",\n 401, {\"WWW-Authenticate\": 'Basic realm=\"Login Required\"'})", "title": "" }, { "docid": "e1cd8db06076f610ff1a8742a0760690", "score": "0.5399056", "text": "async def __handleHandshake(self, path: str, headers):\n parts = path.split(\"?\")\n _pathname = parts[0]\n _query = \"\"\n\n if len(parts) >= 2:\n _query = \"?\".join(parts[1:])\n\n if self.protocol != \"ws+unix:\" and _pathname != self.pathname:\n return (http.HTTPStatus.NOT_FOUND, [], bytes([]))\n\n query = parse_qs(_query or \"\")\n clientId = str(query.get(\"id\") and query.get(\"id\")[0] or \"\")\n secret = str(query.get(\"secret\")\n and query.get(\"secret\")[0] or \"\")\n\n if not clientId or (self.secret and secret != self.secret):\n return (http.HTTPStatus.UNAUTHORIZED, [], bytes([]))", "title": "" }, { "docid": "8e8311598ba08f156aed434e86c23c10", "score": "0.5394339", "text": "def authenticate(socket, config):\n # Generate the auth string\n auth_str = config[\"user\"] + \";\" + config[\"pass\"]\n \n # Send the string\n send_mess(socket,auth_str)\n\n # Return response\n return get_bool_response(socket)", "title": "" }, { "docid": "46fa4d854805b344642819095f77e3b3", "score": "0.5392686", "text": "def response_auth(status, message, token, status_code):\n return make_response(jsonify({\n 'status': status,\n 'message': message,\n 'auth_token': token.decode(\"utf-8\")\n })), status_code", "title": "" }, { "docid": "e7b9d44eacb858ce821ecc6888ce0d4c", "score": "0.53925836", "text": "def logAuthenticate200(r):\n log.debug(\"Authenticate Response: \")\n log.debug(json.dumps(r.json(),indent=10,sort_keys=True))\n log.debug(\"Authenticate Response Headers: \")\n for h in r.headers:\n log.debug(\" \" + h + \" : \" + r.headers[h])\n token = r.json()[\"token\"]\n try:\n decoded_token = jwt.decode(token, server_env[\"CLIENT_SECRET\"], algorithms=[\"HS256\"], audience=server_env[\"CLIENT_ID\"])\n log.debug(\"Decoded token: \")\n log.debug(json.dumps(decoded_token, indent=30, sort_keys=True))\n header = base64.b64decode(token.split('.')[0]).decode('utf-8')\n log.debug(\"header:\")\n log.debug(json.dumps(json.loads(header), indent=4, sort_keys=True))\n payload = base64.b64decode(token.split('.')[1]+\"==\").decode('utf-8')\n log.debug(\"payload:\")\n log.debug(json.dumps(json.loads(payload), indent=4, sort_keys=True))\n except jwt.exceptions.InvalidSignatureError as err:\n log.warn(\"Unable to decode token: \" + str(err))\n return None", "title": "" }, { "docid": "63f88460cc1d063dc9e9e91c2a5ce439", "score": "0.53920156", "text": "def challenge_get(self, environ, start_response):\n redirect = environ['tiddlyweb.query'].get('tiddlyweb_redirect', ['/'])[0]\n # print \"REDIRECT\"\n # print redirect\n return self._send_cookie_form(environ, start_response, redirect)", "title": "" }, { "docid": "b85f31245b709456ab54cba9979c09da", "score": "0.53745955", "text": "def login(): \n headers = { 'X-Requested-With': 'XMLHttpRequest' }\n data = { 'callback': 'getChallenge', 'action': 'challenge'}\n\n response = session.post(URL + 'login', headers=headers, data=data)\n if response.status_code != 200:\n sys.exit(\"login(): Impossible de récuperer le challenge\")\n\n d = pq( response.content )\n challenge = d.find('challenge').text().encode('utf-8')\n\n hash1 = sha256('admin'.encode('utf-8')).hexdigest().encode('utf-8')\n hash1 = hmac.new(challenge, hash1, sha256).hexdigest()\n\n hash2 = sha256(KEY.encode('utf-8')).hexdigest().encode('utf-8')\n hash2 = hmac.new(challenge, hash2, sha256).hexdigest()\n \n data = {\n 'method' : 'passwd',\n 'page_ref': '',\n 'zsid' : challenge,\n 'hash' : hash1 + hash2\n }\n\n response = session.post( URL + 'login', data=data )\n if response.status_code != 200:\n sys.exit(\"login(): Impossible de se connecter\")", "title": "" }, { "docid": "c637b7d442ef84d4dbf262e51207ac77", "score": "0.53692544", "text": "def _run_next_challenge(msg, request, access_token):\n for challenge in msg[\"challenges\"]:\n if challenge[\"status\"] != \"READY\":\n # Skip non-activated challenges.\n continue\n c = challenges.AVAILABLE_CHALLENGES.get(challenge[\"challengeType\"], None)\n if not c:\n raise exceptions.ReauthFailError(\n \"Unsupported challenge type {0}. Supported types: {1}\".format(\n challenge[\"challengeType\"],\n \",\".join(list(challenges.AVAILABLE_CHALLENGES.keys())),\n )\n )\n if not c.is_locally_eligible:\n raise exceptions.ReauthFailError(\n \"Challenge {0} is not locally eligible\".format(\n challenge[\"challengeType\"]\n )\n )\n client_input = c.obtain_challenge_input(challenge)\n if not client_input:\n return None\n return _send_challenge_result(\n request,\n msg[\"sessionId\"],\n challenge[\"challengeId\"],\n client_input,\n access_token,\n )\n return None", "title": "" }, { "docid": "469d9b87f5ef0856de126c5e9ca4ef9b", "score": "0.5326108", "text": "def __authenticate(self):\n log.trace(\n \"Attempting authentication against {} with key [{}...{}]\",\n self.url,\n self.api_key[:2],\n self.api_key[-2:],\n )\n self.status[\"auth_state\"] = \"attempting\"\n response = self.request(\"auth\")\n log.trace(f\"Authenticate response: {response.text}\")\n self.status[\"auth_response\"] = response.text\n self.status[\"auth_httpstatus\"] = response.status_code\n\n # NOTE: an unauthorized request has already been caught be the request() method\n # above. Our legacy code was additionally testing for 'error' in the response\n # text - however, it is unclear if PUMAPI ever returns this:\n if \"error\" in response.text.lower():\n self.status[\"auth_state\"] = \"FAILED-ERROR\"\n msg = f\"Authentication failed with an error: {response.text}\"\n log.error(msg)\n raise requests.exceptions.ConnectionError(msg)\n\n status_ok = requests.codes.ok # pylint: disable-msg=no-member\n\n if response.status_code != status_ok:\n # NOTE: branch excluded from coverage as we don't have a known way\n # to produce such a response from the API\n log.warning(\n \"Unexpected combination of response [{}] and status code [{}], it's \"\n \"unclear if authentication succeeded (assuming it didn't)\",\n response.status_code,\n response.text,\n )\n self.status[\"auth_state\"] = \"FAILED-UNKNOWN\"\n\n msg = (\n f\"Authenticating against {self.url} with key \"\n f\"[{self.api_key[:2]}...{self.api_key[-2:]}] FAILED!\"\n )\n log.error(msg)\n raise requests.exceptions.ConnectionError(msg)\n\n log.trace(\n \"Authentication succeeded, response=[{}], http_status=[{}]\",\n response.text,\n response.status_code,\n )\n self.status[\"auth_state\"] = \"good\"", "title": "" } ]
6a373c6e7a1bd455aa1ed75f652c7e26
Tests the method which let us set the URL to work from for the case that the given URL is not a supported URL.
[ { "docid": "20831f84b83e2b3651fbe1de70c2a13f", "score": "0.0", "text": "def test_set_url_base_ends_with_slash(self) -> None:\n\n given = \"http://example.org/\"\n expected = \"http://example.org\"\n\n self.query_tool.url_base = given\n actual = self.query_tool.url_base\n\n self.assertEqual(expected, actual)", "title": "" } ]
[ { "docid": "6be824e2daffbb7a6dd84fc0d7cbc843", "score": "0.7581493", "text": "def set_broken_url():", "title": "" }, { "docid": "e02dcdf95f707f20fc80126e7cd69e75", "score": "0.7048977", "text": "def _set_url(self, url):\n ...", "title": "" }, { "docid": "1079ed216e18a353496465c0d18685c3", "score": "0.67172503", "text": "def test_set_url_base_not_url(self) -> None:\n\n given = \"example.org\"\n\n self.assertRaises(ValueError, lambda: self.query_tool.set_url_base(given))", "title": "" }, { "docid": "448572db4b03d97afd57398c53b3c0d7", "score": "0.6544353", "text": "def test_bad_url_double_scheme(self):\r\n index = setuptools.package_index.PackageIndex(\r\n hosts=('www.example.com',)\r\n )\r\n\r\n # issue 20\r\n url = 'http://http://svn.pythonpaste.org/Paste/wphp/trunk'\r\n try:\r\n index.open_url(url)\r\n except distutils.errors.DistutilsError:\r\n error = sys.exc_info()[1]\r\n msg = unicode(error)\r\n assert 'nonnumeric port' in msg or 'getaddrinfo failed' in msg or 'Name or service not known' in msg\r\n return\r\n raise RuntimeError(\"Did not raise\")", "title": "" }, { "docid": "3c512c5c48eb16b3045a8bbe06d231ab", "score": "0.64755464", "text": "def _validate_url(self, url):\n return", "title": "" }, { "docid": "cee220758bc5d3b3f2173da9b5715242", "score": "0.6451268", "text": "def _is_valid_url(self):\n try:\n self.req_page_obj = requests.get(self.url)\n except requests.exceptions.ConnectionError:\n return False\n else:\n return True", "title": "" }, { "docid": "ab0bd2aec17985dcbf73f700c6b4677c", "score": "0.64381975", "text": "def get_broken_url():", "title": "" }, { "docid": "c374a92ef66ebb5089b5b1609d7c6256", "score": "0.640715", "text": "def test_13_no_url(self):\n self.base_13_no_url()", "title": "" }, { "docid": "a4d366be29c75aad8d973a4212db5fc0", "score": "0.6349455", "text": "def suitable(url):\n return False", "title": "" }, { "docid": "d46785f5ec01d61d957cd4617b1fef28", "score": "0.63463944", "text": "def _fix_url(self): \n\t\tif self.scheme not in self.allowed_schemes:\n\t\t\tif not self.auto_fix:\n\t\t\t\traise SchemeError(self.scheme, self.allowed_schemes)\n\t\t\tself.scheme = self.allowed_schemes[0]\n\n\n\t\tif not self.host:\n\t\t\tif not self.auto_fix:\n\t\t\t\traise HostError()\n\n\t\t\telif self.path:\n\t\t\t\tpieces = self.path.split('/')\n\t\t\t\t\n\t\t\t\tif len(pieces) == 1:\n\t\t\t\t\tif not pieces[0]: raise HostError()\n\t\t\t\t\telse: self.host = pieces.pop(0)\n\t\t\t\telif pieces[0]:\n\t\t\t\t\tself.host = pieces.pop(0)\n\t\t\t\telif pieces[1]:\n\t\t\t\t\tself.host = pieces.pop(1)\n\t\t\t\telse:\n\t\t\t\t\traise HostError()\n\t\t\t\tself.path = '/'.join(pieces)\n\n\t\tself.join_url()", "title": "" }, { "docid": "a7a6e1679a199426d2fd489a2c217b71", "score": "0.6336512", "text": "def test_invalid_url(self):\n validator = zope.component.getUtility(IURLValidator)\n with self.assertRaises(ValueError):\n validator.validate(\"?\")", "title": "" }, { "docid": "aeee4a38066e463bb4b417312db4dc86", "score": "0.63004094", "text": "def test_invalid_url(self):\n self.data['url'] = 'notarepo'\n self.assertInvalid()", "title": "" }, { "docid": "fb6a2ea6f44a65b64942455a3be491d2", "score": "0.6286197", "text": "def is_my_url(url):\n raise NotImplementedError", "title": "" }, { "docid": "292b1f905f747a59b3f94a2e9acb875d", "score": "0.62741524", "text": "def test_guess_and_set_url_base_not_str(self) -> None:\n\n config_loader = ConfigLoader()\n config_loader.set_custom_config({\"collection\": {\"url_base\": False}}).start()\n\n self.query_tool.guess_and_set_url_base()\n\n expected = \"http://localhost:8001\"\n actual = self.query_tool.url_base\n\n self.assertEqual(expected, actual)\n\n del config_loader", "title": "" }, { "docid": "17b4483ec9d8dd31fe4c6b2fadd749bd", "score": "0.6204555", "text": "def test_wrong_url(self):\n print(\"(\"+self.test_wrong_url.__name__+\")\", self.test_wrong_url.__doc__)\n resp = self.client.get(self.url_wrong)\n self.assertEqual(resp.status_code, 404)", "title": "" }, { "docid": "376360eb4c2bf85f32c1198668405eda", "score": "0.61899644", "text": "def test_check_release_check_url(self):\n self.assertEqual(checks.check_release_check_url(None), [checks.E004])", "title": "" }, { "docid": "a6984825977b8d93b69113d4caf6b8ca", "score": "0.6174316", "text": "def _url_callback(url: str) -> Union[str, None]:\n if url == \"latest\":\n return url\n if validators.url(url):\n return url\n else:\n raise typer.BadParameter(f\"Please check {url} is a valid url\")", "title": "" }, { "docid": "400a259b89cdda5126376eb7ebe4ac6f", "score": "0.61433077", "text": "def test_set_url_base_method(self) -> None:\n\n given = \"https://example.org\"\n expected = given\n\n self.query_tool.set_url_base(given)\n actual = self.query_tool.url_base\n\n self.assertEqual(expected, actual)", "title": "" }, { "docid": "35666a00df4dc8b7fd5c3beb7c6aafad", "score": "0.6134628", "text": "def test_url_format_check(self):\n\n assert not is_url(\"www.google.com\")\n assert is_url(\"http://www.google.com\")\n assert is_url(\"https://www.google.com\")\n assert is_url(\"http://www.google.com/info.txt\")\n assert is_url(\"http://www.google.com/child/info.txt\")\n\n assert not is_url(\"10.120.1.23\")\n assert is_url(\"http://10.120.1.23\")\n assert is_url(\"http://10.120.1.23/info.txt\")\n assert is_url(\"http://10.120.1.23/child/info.txt\")\n\n assert is_url(\"http://127.0.0.1:8080\")\n assert is_url(\"http://127.0.0.1:8080/child/info.txt\")\n\n assert is_url(\"http://port:8080\")\n assert is_url(\"http://port:8080/child/info.txt\")\n\n assert is_url(\"http://hello\")\n assert not is_url(\"http://hello.\")\n assert is_url(\"http://hello.i\")\n assert is_url(\"http://hello.io\")\n assert is_url(\"http://hello/child/info.txt\")\n\n assert is_url(\"http://hel-lo\")\n assert is_url(\"http://hel_lo\")\n assert not is_url(\"http://hel lo\")\n assert is_url(\"http://hello/\")\n assert is_url(\"http://hello/.\")\n assert is_url(\"http://hello/.txt\")", "title": "" }, { "docid": "a3a3ca01af52f576a24288e34d983d05", "score": "0.61344594", "text": "def test_bad_url(self):\n resp = requests.get('http://localhost:8000/asdf')\n self.assertEquals(resp.status_code, 200)", "title": "" }, { "docid": "38743008d99972e9b2f459731e4c1793", "score": "0.61343056", "text": "def validate_url(self, url: str) -> bool:\n if url.startswith(\"http://\") or url.startswith(\"https://\") or url.startswith(\"ftp://\"):\n return True\n self.logger.info(f\"GetURL: Unsupported URL prefix: {url}\")\n raise PluginException(\n preset=PluginException.Preset.UNKNOWN,\n assistance=f\"GetURL: Unsupported URL prefix: {url}\",\n )", "title": "" }, { "docid": "9dab804bc225261246b8a6ceb25a3e56", "score": "0.61172897", "text": "def test_url_no_url_type(self, mock_get, request):\n mock_get.return_value = self.url_check_get_response\n request.json = {\"landing_url\": \"unimportant\"}\n response = post_source_parameter(SOURCE_ID, \"landing_url\", self.database)\n self.assertEqual(response, {\"ok\": True, \"nr_sources_mass_edited\": 0, \"availability\": []})\n mock_get.assert_not_called()", "title": "" }, { "docid": "d10862b17eb12b6524848ed5e1260ae2", "score": "0.6089364", "text": "def checkurl(self):\n url = self.fields.get('url',False)\n if url:\n if url.endswith('.'):\n url = url[:-1]\n self.fields['url'] = url\n\n if url != None and url.count(' ')>0:\n self.errors.append(\"space in url\")\n nonsites = ('ebrary','degruyter','doi','myilibrary','academia','ebscohost')\n for n in nonsites:\n if url != None and n in url:\n self.errors.append(\"%s: urls should only be given for true repositories or for material not available elsewhere\"%url)", "title": "" }, { "docid": "3e0aca29482e3cdabe025a148ee81cfa", "score": "0.60806435", "text": "def __check_url(self, url) -> None:\n\n if requests.head(url).status_code != 200:\n raise VersionUrlError('error: Invalid url')", "title": "" }, { "docid": "3ec42dc67576e5f985715919f733e253", "score": "0.6080406", "text": "def handles_url(self, url):", "title": "" }, { "docid": "7f6ac4119cc3282618540a786a22cc99", "score": "0.60770726", "text": "def test_url(url):\n http_url = url.replace(\"ftp://\", \"http://\")\n if http_url[-1] != \"/\":\n http_url += \"/\"\n\n res = requests.head(http_url)\n if res.status_code != 200:\n raise requests.HTTPError(f\"Unable to connect to URL: {url}\")\n\n return url", "title": "" }, { "docid": "eb431c9b62859f5b2f1ee79ee148fc3d", "score": "0.6057759", "text": "def OpenURL(self, type, url):", "title": "" }, { "docid": "a32d8e0abc4708f7f23b7df7e255d6fa", "score": "0.6049299", "text": "def test_match_normal_url(self):\n self.assertTrue(match(self.obj_url))", "title": "" }, { "docid": "f6f8477ea7186f05a46b283dc638b120", "score": "0.6046837", "text": "def test_set_url_base_not_str(self) -> None:\n\n given = [\"Hello\", \"World!\"]\n\n self.assertRaises(TypeError, lambda: self.query_tool.set_url_base(given))", "title": "" }, { "docid": "df286c561ba05bc1910af18ad33400f5", "score": "0.60453475", "text": "def _get_url(self):\n ...", "title": "" }, { "docid": "a034b58c5aca01a4dc25b05611b77d13", "score": "0.60043675", "text": "def test_is_url():\n assert utils.is_url(\"http://mydomain.com/foo/bar/bat?asdf=1234&qewr=ooo\")\n assert utils.is_url(\"http://xkcd.com/1193/\")\n assert not utils.is_url(\"syn123445\")\n assert not utils.is_url(\"wasssuuuup???\")\n assert utils.is_url(\"file://foo.com/path/to/file.xyz\")\n assert utils.is_url(\"file:///path/to/file.xyz\")\n assert utils.is_url(\"file:/path/to/file.xyz\")\n assert utils.is_url(\"file:///c:/WINDOWS/clock.avi\")\n assert utils.is_url(\"file:c:/WINDOWS/clock.avi\")\n assert not utils.is_url(\"c:/WINDOWS/ugh/ugh.ugh\")", "title": "" }, { "docid": "c82c4d56d599fb3c00138a7f83ddd2f1", "score": "0.6002521", "text": "def test_check_empty_url(self):\n self.empty_url = \"\"\n get_url_response = get_url(self.empty_url)\n self.assertEqual(get_url_response, None)", "title": "" }, { "docid": "e678efca97550b4cf16ce6e5a5146390", "score": "0.5997677", "text": "def test_good_url(self):\n rv = self.submit_url('www.python.org')\n resp = str(rv.data, encoding='utf8')\n assert 'Short url is ' in resp", "title": "" }, { "docid": "463844b2006068616c36266e93532417", "score": "0.59973145", "text": "def test_guess_and_set_url_base(self) -> None:\n\n config_loader = ConfigLoader()\n config_loader.set_custom_config(\n {\"collection\": {\"url_base\": \"https://example.org:8443\"}}\n ).start()\n\n self.query_tool.guess_and_set_url_base()\n\n expected = \"https://example.org:8443\"\n actual = self.query_tool.url_base\n\n self.assertEqual(expected, actual)\n\n del config_loader", "title": "" }, { "docid": "788b29cc67d2ed37c8b2773c1253b9c8", "score": "0.5980256", "text": "def test_url_sanitization(self):\n expected_url = 'http://foo'\n\n api = AerospikeRestApi('http://foo/')\n self.assertEqual(api.base_url, expected_url)\n\n api = AerospikeRestApi('foo/')\n self.assertEqual(api.base_url, expected_url)", "title": "" }, { "docid": "c33b067cee7c31123b6efbd895bc60de", "score": "0.59763426", "text": "def test_match_not_valid_url(self):\n self.assertFalse(match('http://not.valid.url/'))", "title": "" }, { "docid": "0ab72e65c82b85c94a9f08b124874fe4", "score": "0.5975472", "text": "def test_compose_url_from_with_properurl(self):\n href = \"http://www.gc.com\"\n url = None\n actual = SiteMap().compose_url_from_href(url, href)\n expected = 'http://www.gc.com'\n self.assertEqual(expected, actual)", "title": "" }, { "docid": "fc4cd78f250b2e207cf0b28f47e2a417", "score": "0.5965903", "text": "def test_run_non_existing_url(self):\n\n with patch(\"validators.url\", return_value=True), patch(\n \"streamlit.cli._main_run\"\n ), requests_mock.mock() as m:\n\n m.get(\"http://url\", exc=requests.exceptions.RequestException)\n with patch(\"tempfile.NamedTemporaryFile\"):\n result = self.runner.invoke(cli, [\"run\", \"http://url\"])\n\n self.assertNotEqual(0, result.exit_code)\n self.assertTrue(\"Unable to fetch\" in result.output)", "title": "" }, { "docid": "926316492dfcedc8770cec4fd4ac31c8", "score": "0.5939719", "text": "def test_url_http_error(self, mock_get, request):\n mock_get.side_effect = requests.exceptions.RequestException\n request.json = {\"url\": self.url}\n response = post_source_parameter(SOURCE_ID, \"url\", self.database)\n self.assert_url_check(response, -1, \"RequestException\")\n updated_report = self.database.reports.insert_one.call_args[0][0]\n url = self.url\n self.assert_delta(\n f\"url of source 'Source' of metric 'Metric' of subject 'Subject' in report 'Report' from '' to '{url}'\",\n report=updated_report,\n )", "title": "" }, { "docid": "891a203ff0a164af63a0322f7dc3d885", "score": "0.59220636", "text": "def url(self):\n raise NotImplemented()", "title": "" }, { "docid": "b51e6f28c47e72676eee6d4e3820cac6", "score": "0.59185445", "text": "def test_isUrl(self):\n cases = [\n (\"http://example.com:8080/path/to/file\", True),\n (\"https://example.com:8080/path/to/file\", True),\n (\"ftp://example.com:8080/path/to/file\", False),\n (\"/path/to/my/file\", False)\n ]\n for case in cases:\n url, expected = case\n result = Utils.isUrl(url)\n self.assertEquals(expected, result)", "title": "" }, { "docid": "1501cbd4109743eca9ded6285d573d2b", "score": "0.5914192", "text": "def test_13a_no_url(self):\n self.base_13_no_url(use_config_cache=True)", "title": "" }, { "docid": "1408064edde210cfa3ca8d5beca847a0", "score": "0.59105694", "text": "def url(self):\n raise NotImplementedError('url') # pragma no cover", "title": "" }, { "docid": "2468d04a0dddb63373dc6cd92da0ef3e", "score": "0.5906903", "text": "def is_supported(self, url):\r\n return self.regex.match(url) is not None", "title": "" }, { "docid": "cee6c17828b577438ff8f637c4ab346e", "score": "0.5883006", "text": "def testIsFeedURLValid(self):\n #invalid: not a feed url\n self.assertFalse(validate.isFeedURLValid('http://www.google.com'))\n\n self.assertFalse(validate.isFeedURLValid(''))\n\n #valid feed url\n self.assertTrue(validate.isFeedURLValid(\n 'http://googlesummerofcode.blogspot.com/feeds/posts/default'))\n\n #invalid: wrong protocol\n self.assertFalse(validate.isFeedURLValid(\n 'htp://googlesummerofcode.blogspot.com/feeds/posts/default'))", "title": "" }, { "docid": "530b845e524da9af9dbec02b5d6dd6be", "score": "0.5873142", "text": "def _validate_url(self):\n\n accepted_pattern = [\n r\"^https:\\/\\/player.vimeo.com\\/video\\/(\\d+)$\",\n r\"^https:\\/\\/vimeo.com\\/(\\d+)$\",\n r\"^https://vimeo.com/groups/.+?/videos/(\\d+)$\",\n r\"^https://vimeo.com/manage/videos/(\\d+)$\"\n ]\n for pattern in accepted_pattern:\n match = re.findall(pattern, self._url)\n if match:\n return match[0]\n # If none of the patterns is matched exception is raised\n raise URLNotSupported(\n f\"{self._url} is not supported. Make sure you don't include query parameters in the url\"\n )", "title": "" }, { "docid": "2065e9a3db6e28e683e2c6dc46996964", "score": "0.58643746", "text": "def test_domain_url_error(self):\n self._test_failed_domain_url_fetch(raise_exception=True)", "title": "" }, { "docid": "8019927ff0d0994980292cb877094508", "score": "0.5853208", "text": "def check_url(self, c):\n try:\n if c['src'][0:4] == 'http': \n self._comic_url = c['src'].replace(\" \", \"%20\")\n self._state = \"URL found\"\n else: \n self._comic_url = (self._url + c['src']).replace(\" \", \"%20\")\n self._state = \"URL found\"\n except:\n self._state = None\n self._comic_url = None", "title": "" }, { "docid": "9cfed8d95a1778b37ba9392a4a00de17", "score": "0.5842438", "text": "def test_run_valid_url(self):\n\n with patch(\"validators.url\", return_value=True), patch(\n \"streamlit.cli._main_run\"\n ), requests_mock.mock() as m:\n\n m.get(\"http://url\", content=b\"content\")\n with patch(\"tempfile.NamedTemporaryFile\"):\n result = self.runner.invoke(cli, [\"run\", \"http://url\"])\n\n self.assertEqual(0, result.exit_code)", "title": "" }, { "docid": "e588fa519ae8db817d2f18d79aa145fc", "score": "0.58424115", "text": "def checks(self):\n if not requests.head(self._url_page).status_code:\n raise NotImplementedError('incorrect url.')", "title": "" }, { "docid": "a4bde13df7c07cc2aaecb4b057fa4c7f", "score": "0.58409846", "text": "def test_empty_url(self, mock_get, request):\n self.sources[SOURCE_ID][\"parameters\"][\"url\"] = self.url\n request.json = {\"url\": \"\"}\n response = post_source_parameter(SOURCE_ID, \"url\", self.database)\n self.assertEqual(response, {\"ok\": True, \"nr_sources_mass_edited\": 0, \"availability\": []})\n mock_get.assert_not_called()", "title": "" }, { "docid": "51a99d5e07753a59f17e57aa1ff1266b", "score": "0.5838431", "text": "def test_schemes_allow_only_supported_protocols():\n pass", "title": "" }, { "docid": "af300a463789d3f0a22a9968374711fb", "score": "0.5825196", "text": "def bad_url(url: str, e: Exception):\n print(\"\\nProblem found with \" + url + \" via \" + str(e) + \"\\n\")\n print(\"Check if the datasource's URL has changed.\\n\")", "title": "" }, { "docid": "86c68f1c3946f6159f539deaeeca7d14", "score": "0.5824091", "text": "def test_url_socket_error(self, mock_get, request):\n mock_get.side_effect = socket.gaierror(\"This is some text that should be ignored ([Errno 1234] Error message)\")\n request.json = {\"url\": self.url}\n response = post_source_parameter(SOURCE_ID, \"url\", self.database)\n self.assert_url_check(response, -1, \"[Errno 1234] Error message\")\n updated_report = self.database.reports.insert_one.call_args[0][0]\n url = self.url\n self.assert_delta(\n f\"url of source 'Source' of metric 'Metric' of subject 'Subject' in report 'Report' from '' to '{url}'\",\n report=updated_report,\n )", "title": "" }, { "docid": "e68e31128996ff8fdeac261d70715a48", "score": "0.5820514", "text": "def url(self, name):\r\n raise NotImplementedError()", "title": "" }, { "docid": "37c39712801bae191d50d12c65090f99", "score": "0.5803945", "text": "def test_set_url_base_return(self) -> None:\n\n given = \"https://example.org\"\n\n actual = self.query_tool.set_url_base(given)\n\n self.assertIsInstance(actual, CollectionQueryTool)", "title": "" }, { "docid": "adb072453491e5a345fa5f87de540501", "score": "0.57885486", "text": "def get_checked_url(url_to_use):\n if not URL_FORMAT_PATTERN.match(url_to_use):\n write_out(\"Url '{}', is invalid\".format(url_to_use), False)\n return get_error_data(url_to_use, \"Invalid URL\", True)", "title": "" }, { "docid": "89c1a4055974ff48f6e0e9c9eb4f4d45", "score": "0.5785253", "text": "def test_index_url_resolve(self):\n self.assertEqual(self.url, '/home')", "title": "" }, { "docid": "1f5396dcf8c17aef8f41c7f718472fc2", "score": "0.57816863", "text": "def test_set_preferred_status_origin_not_supported(self) -> None:\n\n given = \"hello\"\n\n self.assertRaises(\n ValueError, lambda: self.query_tool.set_preferred_status_origin(given)\n )", "title": "" }, { "docid": "a1bee7b3d99ee58ade81ed4ea324dbb9", "score": "0.57625926", "text": "def test_malformed_url_property(self):\n self.activities[0]['object'].update({\n 'content': 'post content without backlinks',\n 'url': 'https://fa.ke/post/url',\n })\n\n # malformed u-url, should skip it without an unhashable dict error\n self.expect_requests_get('http://author/', \"\"\"\n<html class=\"h-feed\">\n <div class=\"h-entry\">\n <a class=\"u-url h-cite\" href=\"/permalink\">this is a strange permalink</a>\n </div>\n</html>\"\"\")\n\n self.mox.ReplayAll()\n self.assert_discover([])", "title": "" }, { "docid": "199385a73eade92cd64cd1c5869f542e", "score": "0.57601875", "text": "def check_url_is_allowed(self, value: str) -> str:\n\n url = parse_url(value.strip())\n\n if url.scheme in self.blacklisted_url_schemes:\n raise ValueError(\n \"Scheme: {scheme} is blacklisted\".format(scheme=url.scheme)\n )\n\n return url.url", "title": "" }, { "docid": "8cb13824bee11063c2b7bfbd96696f5c", "score": "0.57587177", "text": "def fetch(self, url):\n raise Exception(\"NotImplementedException\")", "title": "" }, { "docid": "9788060f2bf84b0864f11c2bc72117bf", "score": "0.5745475", "text": "def test_set_url_base_attribute(self) -> None:\n\n given = \"https://example.org\"\n expected = given\n\n self.query_tool.url_base = given\n actual = self.query_tool.url_base\n\n self.assertEqual(expected, actual)", "title": "" }, { "docid": "4d3808359fe9d1e9e40b2e64373beb83", "score": "0.5741907", "text": "def test_domain_url_not_found(self):\n self._test_failed_domain_url_fetch(raise_exception=False)", "title": "" }, { "docid": "9b4c68df862e78aa6debce276dbe0009", "score": "0.57383", "text": "def url(self, url):\n self.__url = url", "title": "" }, { "docid": "babdf1ea09f4f9a052de475d76cb4bbd", "score": "0.57349", "text": "def test_unknownschemehost_noport(self):\n exc = exceptions.HttpError\n match = \"no.*'port'\"\n with pytest.raises(exc, match=match):\n axonapi.http.ParserUrl(\"httpx://host\")", "title": "" }, { "docid": "3ecca72aaa0e35b34f98c7f73f78a729", "score": "0.5722408", "text": "def test_url_get(self):\n m = Maltiverse()\n item = m.url_get('https://www.welsfagmary-online.com/')\n self.assertTrue(isinstance(str(item), str))\n self.assertTrue(isinstance(item, dict))\n self.assertTrue('url' in item)", "title": "" }, { "docid": "53957a5032e8e4b81d6ced09d18518aa", "score": "0.57180303", "text": "def test_invalid_URL(self):\n #Invalid URL\n #Invalid URL\n count = 0\n try:\n e = DocManager(\"http://doesntexist.cskjdfhskdjfhdsom\")\n except SystemError:\n count += 1\n self.assertTrue(count == 1)\n print(\"PASSED INVALID URL\")", "title": "" }, { "docid": "9b7850dadebde1b3f1c688339b61dd5a", "score": "0.57177603", "text": "def test_user_tries_access_invalid_link():", "title": "" }, { "docid": "63316f34b4cbb7ff63edf96e27e42d4f", "score": "0.57032394", "text": "def try_url(url):\n try:\n urllib2.urlopen(url)\n return True\n except urllib2.URLError:\n return False", "title": "" }, { "docid": "9d23e0d794d8044f5bae528259d1ec11", "score": "0.5701769", "text": "def set_url(self, url):\n super(BatchIteration, self).set_url(url)", "title": "" }, { "docid": "20c793e321142a78ed50933fbb8eee99", "score": "0.56998813", "text": "def test_url(self):\n #NOTE: self.shortDescription() shuould work.\n print(\"(\"+self.test_url.__name__+\")\", self.test_url.__doc__, end=' ')\n with resources.app.test_request_context(self.url):\n rule = flask.request.url_rule\n view_point = resources.app.view_functions[rule.endpoint].view_class\n self.assertEqual(view_point, resources.MachineTypes)", "title": "" }, { "docid": "4d64e2cce4e21a5fc7daef6aa81a0cd7", "score": "0.5695838", "text": "def is_url(self, _input: str):\r\n if validators.url(_input):\r\n return True\r\n return False", "title": "" }, { "docid": "ad95864d6b06e2de862464c925c02388", "score": "0.5688012", "text": "def test_schemehost_noport80(self):\n u = axonapi.http.ParserUrl(\"http://host\")\n assert u.hostname == \"host\"\n assert u.port == 80\n assert u.scheme == \"http\"", "title": "" }, { "docid": "4c471eead7ff009e0804559ee2fbcd9d", "score": "0.5686827", "text": "def __init__(self, url):\n self.url = url\n self.parsed = requests.utils.urlparse(url)\n\n self.clean_netloc()\n\n if not self.parsed.netloc:\n raise ValueError('Wrong URL (Make sure \"http(s)://\" included)')\n\n self.adjust_url()", "title": "" }, { "docid": "37589a38d942d6a0fff7c12ecb2a7ced", "score": "0.56847954", "text": "def supports_uri_scheme():\n\n return True", "title": "" }, { "docid": "653dc6e238476f6b2666d06c68cb49a1", "score": "0.56796396", "text": "def try_url(URL, wsdl_url, code, version):\n # set schema doctor to fix missing schemas\n d = get_doctor(code, version)\n if not d: return\n try:\n # FIRST -- try the provided url\n client = Client(URL, plugins=[d])\n print(client.__str__())\n if 'Methods' not in client.__str__():\n # need to try this on first call, self loops have this as False and will return empty\n if wsdl_url == False:\n return\n else:\n # SECOND -- add ?wsdl to provided url\n url_with_wsdl = URL + '?wsdl'\n URL = try_url(url_with_wsdl, False, code, version)\n if URL:\n return URL\n else:\n # FINALLY -- try the wsdl url provided using the scheme and domain in the regular url\n URL = try_url(wsdl_url, False, code, version)\n if URL:\n return URL\n else:\n return\n except Exception as e:\n print(e)\n if wsdl_url != False:\n url_with_wsdl = URL + '?wsdl'\n URL = False\n try:\n URL = try_url(url_with_wsdl, False, code, version)\n if not URL:\n try:\n URL = try_url(wsdl_url, False, code, version)\n except Exception as e:\n print(e)\n return\n except Exception as e:\n print(e)\n try:\n URL = try_url(wsdl_url, False, code, version)\n except Exception as e:\n print(e)\n return\n return URL", "title": "" }, { "docid": "4803ca9b67ef302059a334b1bf6bd04c", "score": "0.56785655", "text": "def seturl(self) -> None:\n url = self.session.options.get(\"stdurl\", self.DEFAULT_SDT_URL)\n self.url = urlparse(url)\n self.address = (self.url.hostname, self.url.port)\n self.protocol = self.url.scheme", "title": "" }, { "docid": "c6807f1d20c2c6ca0403eba3365d21ab", "score": "0.5669546", "text": "def test_url(self, mock_get, request):\n mock_get.return_value = self.url_check_get_response\n request.json = {\"url\": self.url}\n response = post_source_parameter(SOURCE_ID, \"url\", self.database)\n self.assert_url_check(response)\n mock_get.assert_called_once_with(self.url, auth=(\"username\", \"\"), headers={}, timeout=10)\n updated_report = self.database.reports.insert_one.call_args[0][0]\n url = self.url\n self.assert_delta(\n f\"url of source 'Source' of metric 'Metric' of subject 'Subject' in report 'Report' from '' to '{url}'\",\n report=updated_report,\n )", "title": "" }, { "docid": "ea394ab60b7dc02d19954589f8d94171", "score": "0.5667569", "text": "def test_get_internal_api_url_with_microsite_override(self):\n self.assert_get_internal_api_url_value('foo')", "title": "" }, { "docid": "6bcfffe2c894fee4ec6654dbcd49efef", "score": "0.5664153", "text": "def url(self):\n raise NotImplementedError", "title": "" }, { "docid": "6189ec3ca1fd9c90c192587b52a59712", "score": "0.5647506", "text": "def prepare_url(self, url, params):\r\n #: Accept objects that have string representations.\r\n try:\r\n url = unicode(url)\r\n except NameError:\r\n # We're on Python 3.\r\n url = str(url)\r\n except UnicodeDecodeError:\r\n pass\r\n\r\n # Don't do any URL preparation for oddball schemes\r\n if ':' in url and not url.lower().startswith('http'):\r\n self.url = url\r\n return\r\n\r\n # Support for unicode domain names and paths.\r\n scheme, auth, host, port, path, query, fragment = parse_url(url)\r\n\r\n if not scheme:\r\n raise MissingSchema(\"Invalid URL {0!r}: No schema supplied. \"\r\n \"Perhaps you meant http://{0}?\".format(url))\r\n\r\n if not host:\r\n raise InvalidURL(\"Invalid URL %r: No host supplied\" % url)\r\n\r\n # Only want to apply IDNA to the hostname\r\n try:\r\n host = host.encode('idna').decode('utf-8')\r\n except UnicodeError:\r\n raise InvalidURL('URL has an invalid label.')\r\n\r\n # Carefully reconstruct the network location\r\n netloc = auth or ''\r\n if netloc:\r\n netloc += '@'\r\n netloc += host\r\n if port:\r\n netloc += ':' + str(port)\r\n\r\n # Bare domains aren't valid URLs.\r\n if not path:\r\n path = '/'\r\n\r\n if is_py2:\r\n if isinstance(scheme, str):\r\n scheme = scheme.encode('utf-8')\r\n if isinstance(netloc, str):\r\n netloc = netloc.encode('utf-8')\r\n if isinstance(path, str):\r\n path = path.encode('utf-8')\r\n if isinstance(query, str):\r\n query = query.encode('utf-8')\r\n if isinstance(fragment, str):\r\n fragment = fragment.encode('utf-8')\r\n\r\n enc_params = self._encode_params(params)\r\n if enc_params:\r\n if query:\r\n query = '%s&%s' % (query, enc_params)\r\n else:\r\n query = enc_params\r\n\r\n url = requote_uri(urlunparse([scheme, netloc, path, None, query, fragment]))\r\n self.url = url", "title": "" }, { "docid": "140df832cb85bee9ee1657476d5299c7", "score": "0.5647161", "text": "def _checkurl(self,url):\n\n try:\n util.request(url+'.dds')\n return True\n except KeyboardInterrupt:\n raise KeyboardInterrupt\n except:\n return False", "title": "" }, { "docid": "9ba2c959f29953db8ecce1795012d493", "score": "0.5644463", "text": "def check_url_availability(cmd_args):\n if cmd_args.source:\n url = cmd_args.source\n try:\n requests.get(url)\n LOGGER.info('Check the URL availability.')\n except Exception:\n raise er.UnreachableURLError(\"URL is invalid.\")\n else:\n LOGGER.info('URL is valid. Connection established.')\n return True\n else:\n raise er.UndefinedURL('URL is required')", "title": "" }, { "docid": "cf3aaf2102094d040a0d138d9bae5f93", "score": "0.5643659", "text": "def test_proxies_import_from_url_post(self):\n pass", "title": "" }, { "docid": "d8b02d34679b9ab3dd870ab00a060c47", "score": "0.56435233", "text": "def url(self, value):\n self.__url = value", "title": "" }, { "docid": "b3a7558b3d460ab0705511debc17703c", "score": "0.5638695", "text": "def __init__(self, url: str = None):\n self.url = url", "title": "" }, { "docid": "b3a7558b3d460ab0705511debc17703c", "score": "0.5638695", "text": "def __init__(self, url: str = None):\n self.url = url", "title": "" }, { "docid": "056c7b077bed214d544534fbbcc579b1", "score": "0.5633176", "text": "def set_url(self, url):\n o = urlparse.urlparse(url)\n self.path = o.path\n self.scheme = o.scheme or self.scheme\n self.host = o.netloc or self.host\n\n for (name, value) in urlparse.parse_qs(o.query).items():\n assert len(value) == 1\n self.set(name, value[0])", "title": "" }, { "docid": "cb2f5328ef4a51b4f721396b4109f423", "score": "0.56305605", "text": "def validServerURL(option, opt_str, value, parser):\n if value is not None:\n if not validURL(value):\n raise OptionValueError(\"%s option value '%s' not valid.\" % (opt_str, value))\n setattr(parser.values, option.dest, value)\n else:\n setattr(parser.values, option.dest, option.default)", "title": "" }, { "docid": "572e36c7f1037d59d3c0a9f6bc37e3e6", "score": "0.56234425", "text": "def test_urlparse_exception(self) -> None:\n\n address = \"[DOUBLEQUOTE]\"\n url = Url(address)\n self.assertEqual(url.domain, \"\")", "title": "" }, { "docid": "b91a58b48d67b4c44a941f3d4457f88d", "score": "0.5618582", "text": "def _detect(self):\n self.available = self._check_required_args(['urls'])", "title": "" }, { "docid": "04a81ca064231fd3796774c3257723de", "score": "0.5616055", "text": "def invalid_url(url):\n r = requests.get(url)\n if r.status_code >= 400 or r.text == '404 File Not Found':\n raise ValueError\n return", "title": "" }, { "docid": "b166e41867c21b5789b13e14939df6b5", "score": "0.5609505", "text": "def test_url_starts_with_https(self):\n self.assertIsNotNone(self.get_url_response)", "title": "" }, { "docid": "bac88bf8a2724a01b7c385c76c70db01", "score": "0.5605313", "text": "def url(self, value):\r\n self.logger.warn(\"Setting values on url will NOT update the remote Canvas instance.\")\r\n self._url = value", "title": "" }, { "docid": "00fa017725117babe58b82a768b044e4", "score": "0.5604786", "text": "def setQueryAPIURL(self, url):\n if self.useTestAPI:\n self.url_orderAPI_demo = url\n self.log(\"Changed DEMO URL for query API to:\", url)\n else:\n self.url_orderAPI = url\n self.log(\"Changed PRODUCTION URL for query API to:\", url)", "title": "" }, { "docid": "2c9fdfb77489ca3c3cc6a7f6ed29789a", "score": "0.5602213", "text": "def test_replace_not_valid_url(self):\n res = parse('<a class=\"external-link embedlylink\" href=\"http://not.valid.url/11\"></a>')\n self.assertFalse(res.startswith('<div class=\"embed\">'))", "title": "" }, { "docid": "68e34ce1f92e1bd34dbce0f7423ee99a", "score": "0.55987066", "text": "def test_fetch_url(self):\n # This should return a 200\n result = fetch_url(\n 'https://zoek.officielebekendmakingen.nl/rss/dossier/31981'\n )\n\n self.assertTrue(result)\n\n # This should yield a timeout - and thus empty data\n result = fetch_url(\n 'http://10.255.255.1/'\n )\n\n self.assertFalse(result)", "title": "" }, { "docid": "5d0cb7bb4a4445ab5938df18ea6364b2", "score": "0.559571", "text": "def test_bad_url(self):\n rv = self.submit_url('50 cent')\n resp = str(rv.data, encoding='utf8')\n assert 'Sorry, but' in resp", "title": "" }, { "docid": "3b8aa4a14c288035ca286ae5e4e6d195", "score": "0.5593122", "text": "def test_uri(self):\n with pytest.deprecated_call():\n assert \"https://www.structlog.org/\" == structlog.__uri__", "title": "" }, { "docid": "3ab3413c0ffcee3ddb2d9dff293b9528", "score": "0.5588141", "text": "def test_invalid_url(self):\n base_url = 'http://invalid_url'\n username = self.admin_username\n password = self.admin_password\n language = self.language\n with self.assertRaises(ConnectionError, msg='Exception not raised when invalid URL was passed.'):\n Session(base_url, username, password, language)", "title": "" } ]
7732897d672fe036c1d2b58e44dd262c
Make a random matrix with elements in range (lowhigh)
[ { "docid": "e27867629904731d44dbbc8c2a9a695c", "score": "0.76756394", "text": "def makeRandom(cls, m, n, low=0, high=10):\n\n obj = Matrix(m, n)\n for x in range(m):\n obj.rows[x] = [random.randrange(low, high) for i in range(obj.csize)]\n\n return obj", "title": "" } ]
[ { "docid": "b5e8a62cd84fd0814bfb38c042749abe", "score": "0.7580365", "text": "def randmatrix(m, n, lower=-0.5, upper=0.5):\n return np.array([random.uniform(lower, upper) for i in range(m*n)]).reshape(m, n)", "title": "" }, { "docid": "4154e7b54e17a3fc806a6f9e15ecbed0", "score": "0.7443374", "text": "def generate_matrix(upper_bound, length, width, height):\n\n return np.random.rand(length, width, height) * upper_bound", "title": "" }, { "docid": "d7f26b5c98de7a94af8309092dcdec73", "score": "0.73389655", "text": "def agx_matrix_new_random(row, column, min, max):\n return _angka.agx_matrix_new_random(row, column, min, max)", "title": "" }, { "docid": "e61bc6787d87c0b7857a807738987fe3", "score": "0.7003225", "text": "def uniform(min_, max_, dims):\r\n return mdp.numx_rand.random(dims)*(max_-min_)+min_", "title": "" }, { "docid": "06f1f2cbcee60f5bd10e5fdcb9dcf810", "score": "0.69823575", "text": "def random(self, min, max):\n return _angka.AgxMatrix_random(self, min, max)", "title": "" }, { "docid": "ac450d32595256138b717bd01fde5841", "score": "0.6828775", "text": "def create_random_matrix(columns, rows):\n return [[random.randrange(10) for c in range(columns)] for r in range(rows)]", "title": "" }, { "docid": "81323769c9501c5694b3c3f338707318", "score": "0.6759446", "text": "def randomMatrix(matrix):\n for i in range(len(matrix)):\n matrix[i] = random.randrange(2)\n return matrix", "title": "" }, { "docid": "0af832b08703340e2d1be1b42e67efaa", "score": "0.6697226", "text": "def randomUniformSampled(low, high):\n\treturn np.random.uniform(low, high)", "title": "" }, { "docid": "a14f2663ff5afb3127ac1b9c6ae255ab", "score": "0.6590932", "text": "def generate_matrix_num(rows, cols, low, high, value_type):\r\n\r\n matrix = []\r\n\r\n if value_type == 'int':\r\n for _ in range(rows):\r\n subList = []\r\n for _ in range(cols):\r\n b = randint(low, high)\r\n subList.append(b)\r\n matrix.append(subList)\r\n elif value_type == 'float':\r\n for _ in range(rows):\r\n subList = []\r\n for _ in range(cols):\r\n a = uniform(low, high)\r\n subList.append(a)\r\n matrix.append(subList)\r\n return matrix", "title": "" }, { "docid": "4b7966d2db6275367bd539b141c196a6", "score": "0.65716064", "text": "def rand_matrix(n: int ,m: int) -> np.matrix:\n\n matrix = np.matrix(np.random.random_integers(0,100, (n,m)))\n return matrix", "title": "" }, { "docid": "4b7966d2db6275367bd539b141c196a6", "score": "0.65716064", "text": "def rand_matrix(n: int ,m: int) -> np.matrix:\n\n matrix = np.matrix(np.random.random_integers(0,100, (n,m)))\n return matrix", "title": "" }, { "docid": "6de5856bc181a65140ebb8f3b5d81dc5", "score": "0.65316176", "text": "def randomized_matrix(self, a=0, b=255):\n matrix = []\n for _ in range(self.dim):\n row = []\n for _ in range(self.dim):\n row.append(np.random.randint(a, b))\n matrix.append(row)\n return matrix", "title": "" }, { "docid": "b2a0fff53a18d19d0578726bc812a711", "score": "0.6514424", "text": "def set_matrix(n, m):\n new_matrix = []\n for i in range(n):\n new_matrix.append(sample(range(10), m))\n return new_matrix", "title": "" }, { "docid": "7ebbc0a2ed1473ba82453b169606e918", "score": "0.64943236", "text": "def _randrange(n, vmin, vmax):\n return (vmax - vmin) * np.random.rand(n) + vmin", "title": "" }, { "docid": "a1fd993ca3075e6294f2fb2066ff285c", "score": "0.64659387", "text": "def uniform(low, high):\n\n return abs(high-low) * random() + low", "title": "" }, { "docid": "b025e518f01d7ebf5fa3526273742a85", "score": "0.6450461", "text": "def randomUniformSampledList(low, high, size):\n\treturn np.random.uniform(low, high, size)", "title": "" }, { "docid": "957099743023c5750c7f990e5e388299", "score": "0.6440226", "text": "def generate_random_matrix(n): # checked; works\r\n return np.random.random((n, n))", "title": "" }, { "docid": "3b279578857700d0f5d25a7de2033bcf", "score": "0.64094037", "text": "def sampleUniform(min, max):\n\treturn randint(min, max)", "title": "" }, { "docid": "9704cbf3516ed8e78119ea57aaa63fb2", "score": "0.6395663", "text": "def randomProblem(rows = 10, columns = 10, max = 1000):\n result = []\n\n for i in range(rows):\n resultRow = []\n\n for j in range(columns):\n resultRow.append(random.randint(0, max))\n\n result.append(resultRow)\n\n return result", "title": "" }, { "docid": "87e4a06e4c1cf72a0523275f9d5f958e", "score": "0.6314613", "text": "def _uniform(val_range):\n return np.random.uniform(val_range[0], val_range[1])", "title": "" }, { "docid": "6a3338ced95f9753c656d41e513bd855", "score": "0.62933385", "text": "def random_mat(m):\n return np.random.randn(m,m) / m**.5", "title": "" }, { "docid": "b05168f678e98cb75c92235b94f1b316", "score": "0.62763226", "text": "def random_upper_triangular(m):\n M = random_mat(m)\n return np.triu(M)", "title": "" }, { "docid": "4f559b065e2fc4fe91add3eed48db609", "score": "0.62369907", "text": "def uniform(low, high, size=None):\n if low == high:\n if size is None:\n return low\n else:\n return np.ones(size) * low\n else:\n return np.random.uniform(low, high, size)", "title": "" }, { "docid": "db604e36f9016c50681d8fac49b42121", "score": "0.62349445", "text": "def randtn(minlim=-3., maxlim=3.):\n rn = minlim-1 # initialize out of bounds to get the while loop going\n while (rn<minlim) or (rn>maxlim):\n rn = np.random.randn()\n return rn", "title": "" }, { "docid": "916d51b515e26e6cc7d0b103e8533615", "score": "0.62154245", "text": "def randrange(self, start, stop, step=None):\n pass", "title": "" }, { "docid": "ef9095d58770709aabdf49d6d23f0bda", "score": "0.6196181", "text": "def gen_random(self, n, M):\n m = []\n for i in range(0, n):\n m.append([])\n for j in range(0, n):\n m[i].append(random.randint(0, M-1))\n\n self.matrix = m", "title": "" }, { "docid": "39477c0f9357381e6a1d24057e311e11", "score": "0.6189386", "text": "def uniform(min_bound: int, max_bound: int):\n\n def apply_uniform(_):\n \"\"\" Inner function for appling random.uniform()\n \"\"\"\n return int(np.random.uniform(min_bound, max_bound))\n\n return tune.sample_from(apply_uniform)", "title": "" }, { "docid": "ffbce9a02c526ca9daa7da8c1a157be6", "score": "0.6188085", "text": "def randomCells(width,height):\n A = createBoard(width, height)\n for row in range(height):\n for col in range(width):\n if row == 0 or row == height-1 or col == 0 or col == width-1:\n A[row][col] = 0\n else:\n A[row][col] = random.choice([0,1])\n return A", "title": "" }, { "docid": "f63e052ebbb25e6e5b0a7c43e857ec49", "score": "0.617113", "text": "def randrange(self, start, stop, step=None):\n pass", "title": "" }, { "docid": "f63e052ebbb25e6e5b0a7c43e857ec49", "score": "0.617113", "text": "def randrange(self, start, stop, step=None):\n pass", "title": "" }, { "docid": "f63e052ebbb25e6e5b0a7c43e857ec49", "score": "0.617113", "text": "def randrange(self, start, stop, step=None):\n pass", "title": "" }, { "docid": "4f9cbd789790a2eac9f9dc90b6f612cb", "score": "0.6159474", "text": "def rand_lo_hi( lo , hi ):\n return random() * ( hi - lo ) + lo", "title": "" }, { "docid": "73281f819f43a7fe5d895114d877f584", "score": "0.6134226", "text": "def create_random_array(start=-50000, end=1000000, length=1000):\n\n from random import randint\n return [randint(start,end ) for i in range(length)]", "title": "" }, { "docid": "2fe62c817582adf962f18b17355b0b0d", "score": "0.61241114", "text": "def rand_in_range(lower, upper):\n return (random() * (upper - lower)) + lower", "title": "" }, { "docid": "c10859dd8c1218fad7762a46990e353d", "score": "0.6123463", "text": "def RANDBETWEEN(low, high):\n return random.randrange(low, high + 1)", "title": "" }, { "docid": "c561e05c540117b68babf76f5a9f527d", "score": "0.6104389", "text": "def generate_random_value(low_bound, upp_bound, identificator=None):\n if identificator == 't' or identificator == 's':\n return np.random.uniform(low_bound, upp_bound)\n mode = 1.0\n if low_bound >= mode:\n sample = np.random.triangular(low_bound, low_bound, upp_bound)\n elif upp_bound <= mode:\n sample = np.random.triangular(low_bound, upp_bound, upp_bound)\n else:\n sample = np.random.triangular(low_bound, mode, upp_bound)\n return sample", "title": "" }, { "docid": "8a432a49cd58ee5ab43115e94530483c", "score": "0.610209", "text": "def choose_pivot(start, end):\n pivot = randint(start, end)\n return pivot", "title": "" }, { "docid": "e2c0854024e8c59bf028f59289f7bd03", "score": "0.60778135", "text": "def random_matrix(size,conn):\n mtx = np.zeros((size,size))\n \n for i in range(size):\n for j in range(size):\n tmp = random.random()\n if tmp < conn:\n mtx[i,j] = 1 - tmp\n return mtx", "title": "" }, { "docid": "13b1da5eaf30c7574ec04a2884c5200f", "score": "0.6062179", "text": "def sample_Z(m, n):\n Z = np.random.uniform(-1, 1, size=[m, n])\n return Z", "title": "" }, { "docid": "c1e03ead24fe1fbdace278c56ed9f497", "score": "0.604987", "text": "def produce_board():\n base_range = range(base)\n rows = [x * base + y for x in shuffle(base_range) for y in shuffle(base_range)]\n cols = [x * base + z for x in shuffle(base_range) for z in shuffle(base_range)]\n nums = shuffle(range(1, base * base + 1))\n\n board = [[nums[pattern(r, c)] for c in cols] for r in rows]\n\n for line in board:\n num_one = randint(0, 8)\n num_two = randint(0, 8)\n while abs(num_one - num_two) < 1:\n # make sure range is greater 1\n num_one = randint(0, 8)\n num_two = randint(0, 8)\n\n if num_one > num_two:\n for i in range(num_two, num_one):\n line[i] = 0\n elif num_two > num_one:\n for j in range(num_one, num_two):\n line[j] = 0\n\n return board", "title": "" }, { "docid": "e72558b50085552319a2623e320d5dc5", "score": "0.60326946", "text": "def createIndividual(min, max, length):\n return np.random.randint(min, max, length)", "title": "" }, { "docid": "d958fec97177cf369a8c9febfbc1fa04", "score": "0.60246074", "text": "def initGrid(self, cols, rows, array):\r\n\r\n for i in range(rows):\r\n arrayRow = []\r\n for j in range(cols):\r\n if (i == 0 or j == 0 or (i == rows - 1) or (j == cols - 1)):\r\n arrayRow += [-1]\r\n else:\r\n ran = random.randint(0,3)\r\n if ran == 0:\r\n arrayRow += [1]\r\n else:\r\n arrayRow += [0]\r\n array += [arrayRow]", "title": "" }, { "docid": "ad4d3a99a3de2d9500d5fea8073fbb96", "score": "0.6014742", "text": "def samplemat(dims):\n aa = np.zeros(dims)\n for i in range(min(dims)):\n aa[i, i] = i\n return aa", "title": "" }, { "docid": "4801a52a22d3a8d837ca0f9318a8aadb", "score": "0.60018975", "text": "def gen_abnormal_sensor_data():\r\n abnormal_range = range(45, 60)\r\n return random.choice(abnormal_range)", "title": "" }, { "docid": "ac82a81e79f808fa7198ef6b135a0dfd", "score": "0.5997376", "text": "def randomCells(w,h):\n A = createBoard(w, h)\n\n for row in range(1,h-1):\n for col in range(1,w-1):\n rand= random.randrange(0,2)\n A[row][col] = rand\n return A", "title": "" }, { "docid": "b72ad201779a44772d7befe143f108ac", "score": "0.5994065", "text": "def fill_rng_2d(\n min=ScalarDef(F64),\n max=ScalarDef(F64),\n seed=ScalarDef(I32),\n O=TensorDef(T, S.M, S.N, output=True),\n):\n domain(D.m, D.n)\n multiplier = TypeFn.cast_signed(I32, const(1103515245))\n increment = TypeFn.cast_signed(I32, const(12345))\n rand1 = (TypeFn.cast_signed(I32, index(D.m)) + seed) * multiplier + increment\n rand2 = (TypeFn.cast_signed(I32, index(D.n)) + rand1) * multiplier + increment\n inv_range = TypeFn.cast_signed(F64, const(2.3283064e-10))\n offset = TypeFn.cast_signed(F64, const(2147483647))\n scaling = (max - min) * inv_range\n O[D.m, D.n] = TypeFn.cast_signed(\n T, (offset + TypeFn.cast_signed(F64, rand2)) * scaling + min\n )", "title": "" }, { "docid": "d08b51b58442f93d9a7ac648f53e102a", "score": "0.59930384", "text": "def draw_uniform(size=5, low=0., high=1., seed=42):\n np.random.seed(seed)\n return np.random.uniform(low, high, size)", "title": "" }, { "docid": "2a7a4a0094d12a6ad07bd9f455b54f05", "score": "0.5989579", "text": "def coord_gen(coord_limit):\n rand = []\n for i in range(tr.info_len()):\n value = np.random.randint(0, coord_limit)\n rand = np.append(rand, value)\n return rand", "title": "" }, { "docid": "3f70cb236ec24fe6bea7a68841b2294f", "score": "0.598252", "text": "def new_tile(self):\n # replace with your code\n #print self.matrix\n node = []\n for i in range(self.get_grid_height()):\n for j in range(self.get_grid_width()):\n if self.matrix[i][j] == 0:\n node += [(i,j)]\n\n rand_node = random.choice(node)\n row = rand_node[0]\n col = rand_node[1]\n if self.matrix[row][col] == 0:\n if random.randrange(0, 10) == 9:\n self.matrix[row][col] = 4\n else:\n self.matrix[row][col] = 2\n #print self.matrix\n #print", "title": "" }, { "docid": "322c4fcd1d4bff051f0e06064682bb39", "score": "0.59822", "text": "def create_weight_matrix(nrows, ncols):\n return np.random.default_rng().normal(loc=0, scale=1/(nrows*ncols), size=(nrows, ncols))", "title": "" }, { "docid": "d33aef9db3bc3326fd2d745593818d15", "score": "0.59617436", "text": "def generate_uniform_distribution(lower, upper, number_of_values):\n\n return np.longdouble(np.array(list(np.random.uniform(lower, upper, number_of_values))))", "title": "" }, { "docid": "304710d1859e53feb2db5b1b76b720b6", "score": "0.59546643", "text": "def sample_Z(m, n):\n\n return np.random.uniform(-1., 1., size=(m, n))", "title": "" }, { "docid": "e214be498c11c02f4304e5f1b4b628e2", "score": "0.5952451", "text": "def randomGrid(N):\r\n return np.random.choice(vals, N*N, p=[0.1,0.9]).reshape(N, N)", "title": "" }, { "docid": "935fe15558cd0f7f0b01f2832358a8b0", "score": "0.59476155", "text": "def uniform(self, low, high, size=None):\n return self.rng.uniform(low, high, size)", "title": "" }, { "docid": "c05a48f1b9b8905ec8c943a028d11c87", "score": "0.59428656", "text": "def test_get_random_range(self):\n return\n rand_seq = []\n hot_random = HotRandom()\n for j in range( 0, 200 ):\n rand_num = hot_random.hot_rand( 91, 37 )\n rand_seq.append( rand_num )\n assert are_items_in_range( rand_seq, 91, 37 ) , \"Got an out of range number\"\n rand_seq = []\n for j in range( 0, 200 ):\n rand_num = hot_random.hot_rand( 19, 0 )\n rand_seq.append( rand_num )\n assert are_items_in_range( rand_seq, 19, 0 ) , \"Got an out of range number\"\n rand_seq = []\n for j in range( 0, 200 ):\n rand_num = hot_random.hot_rand( 61, 4 )\n rand_seq.append( rand_num )\n assert are_items_in_range( rand_seq, 61, 4 ) , \"Got an out of range number\"", "title": "" }, { "docid": "d9ec1c0e21f3826cb2d780769c9671c9", "score": "0.59331393", "text": "def make_random_ints(num, lower_bound, upper_bound):\n rng = random.Random() # Create a random number generator\n result = []\n for i in range(num):\n result.append(rng.randrange(lower_bound, upper_bound))\n # result.append(random.randrange(lower_bound, upper_bound))\n return result", "title": "" }, { "docid": "1fb1f5c265915290c9a281eb666fb2e4", "score": "0.59292376", "text": "def generate_matrix(m, n, distr=scipy.stats.norm()):\n return distr.rvs((m,n))", "title": "" }, { "docid": "95c541ec81775366168027e6c5090be5", "score": "0.59199923", "text": "def randomGrid(N):\n return np.random.choice(vals, N*N, p=[0.2, 0.8]).reshape(N, N)", "title": "" }, { "docid": "4530dddc1e3eaffdd239d544848bdc24", "score": "0.5919806", "text": "def matrix_fill(rows,cols):\r\n return [[x for x in range(1,rows*cols+1)][i:i+cols] for i in range(0,rows*cols,cols)]", "title": "" }, { "docid": "820e13f4fd7eedb6a02ad5610940f357", "score": "0.59172094", "text": "def uniform_random_points(bounds, num_points):\n internal_bounds = [sorted(b) for b in bounds]\n # Generate rows for each of the coordinates according to the given bounds, stack into an array, \n # and split into a list of points.\n mat = np.vstack([np.random.uniform(b[0], b[1], num_points) for b in internal_bounds])\n return list(mat[:len(bounds)].T)", "title": "" }, { "docid": "dd31bfdbab2b269bf91eec157f191390", "score": "0.591685", "text": "def genera_lunghezza():\n return randint(0, min(args.N, args.M)//2)", "title": "" }, { "docid": "21504544f38166ebd4c3d49418ffd5ba", "score": "0.5907893", "text": "def randomGrid(N):\n return np.random.choice(vals, N * N, p=[0.2, 0.8]).reshape(N, N)", "title": "" }, { "docid": "21504544f38166ebd4c3d49418ffd5ba", "score": "0.5907893", "text": "def randomGrid(N):\n return np.random.choice(vals, N * N, p=[0.2, 0.8]).reshape(N, N)", "title": "" }, { "docid": "fae1f2421ebc8cfb2792117b1d1a4d68", "score": "0.59039974", "text": "def genMatrix(max = 10):\n matrix = []\n for i in range(getExp(start), getExp(end)):\n # print(str(bin(i)))\n matrix.append(str(bin(i))[-end+1:])\n # print(matrix[j])\n max -= 1\n if max == 0:\n break\n return matrix", "title": "" }, { "docid": "0ae130cbd1de5c5779a1e3262df2854f", "score": "0.58732146", "text": "def generate_random(self, shape: list, val_range: list, dtype=\"float\", seed=datetime.now()):\n random.seed(seed)\n self.shape[0] = shape[0]\n self.shape[1] = shape[1]\n\n for x in range(shape[0]):\n self.matrix.append([])\n for y in range(shape[1]):\n # self.matrix[x].append(random.randint(val_range[0], val_range[1]) + random.random() if dtype == \"float\"\n # else random.randint(val_range[0], val_range[1]))\n if dtype == \"float\":\n self.matrix[x].append(float(random.randint(val_range[0], val_range[1]) + round(random.random(), 4)))\n elif dtype == \"int\":\n self.matrix[x].append(random.randint(val_range[0], val_range[1]))", "title": "" }, { "docid": "397d151e76a8f74031d5c9e5f153fe4e", "score": "0.5872394", "text": "def random_RGB_matrix(self,\n a=0,\n b=255):\n matrix = []\n for _ in range(self.dim):\n row = []\n for _ in range(self.dim):\n pixel = [x for x in np.random.randint(a, b, 3)]\n row.append(pixel)\n matrix.append(row)\n return matrix", "title": "" }, { "docid": "5d4ecca61015b9c55cf1995945c318fe", "score": "0.5868767", "text": "def make_random_ints(num, lower_bound, upper_bound):\n rng = random.Random() # Create a random number generator\n result = []\n for i in range(num):\n result.append(rng.randrange(lower_bound, upper_bound))\n return result", "title": "" }, { "docid": "7991741abea7df7326ae15afcf632296", "score": "0.58624685", "text": "def randomGrid(N):\n return np.random.choice(vals, N*N, p=[0.7, 0.2, 0.1]).reshape(N, N)", "title": "" }, { "docid": "9de6110d1bb3eaf5e8e8b5f7c23189a0", "score": "0.5855373", "text": "def randomGrid(N):\n return np.random.choice(vals, N*N, p=[0.3, 0.7]).reshape(N, N)", "title": "" }, { "docid": "643cb8d4b86abed6ee943dbd845eb319", "score": "0.58393186", "text": "def gen_nox(self):\n new_nox = np.random.randint( low = matrix_min,\n high = matrix_max-handicap,\n size = (1) )\n return new_nox", "title": "" }, { "docid": "bf52bafce8f98ff98ac3dc98a3881811", "score": "0.58380544", "text": "def create_random(cls, low=0, high=1.):\n res = cls()\n res.random(low, high)\n return res", "title": "" }, { "docid": "d771a2d06655f0fb1beef1aa80c2ea64", "score": "0.5808646", "text": "def random_unit_vector_within_limits(self, lower_limit=0.0, upper_limit=1.0):\r\n return list(np.random.uniform(low=lower_limit, high=upper_limit, size=self.prior_count))", "title": "" }, { "docid": "960377cc5a4c09a8dc12acaab1fc6d4e", "score": "0.57926625", "text": "def randomize(self, xsize, ysize):\n res = []\n for y in range(ysize):\n row = []\n for x in range(xsize):\n row.append(random.sample([0]*20 + [1]*2,1)[0])\n res.append(row)\n self._data = res", "title": "" }, { "docid": "e795d3a8b8b73ad42a650ead8c2cb59c", "score": "0.57916695", "text": "def get_random_invertible_matrix(m):\n d = 2\n while d>1:\n M = Matrix([[randint(0,m-1), randint(0,m-1)],[randint(0,m-1), randint(0,m-1)]])\n d = gcd(M.det(), m)\n return M", "title": "" }, { "docid": "203ff4eabe57ccd1e8c98e59cf7150f3", "score": "0.5784927", "text": "def genMatrix(size=200, value=10):\n\n matrix = [[value for col in range(0,size)] for row in range(0,size)]\n\n return matrix", "title": "" }, { "docid": "c001423b00d45e71ed6f3c9d29fdc2d5", "score": "0.5784199", "text": "def rand_uniform(self, arr_shape, min_elem=0, max_elem=1):\n rand_arr = np.random.rand(*arr_shape)\n rand_arr = min_elem + rand_arr * (max_elem - min_elem)\n return rand_arr", "title": "" }, { "docid": "1ea5d4444d40cab632cef455ebd277f9", "score": "0.5774231", "text": "def embaralha(m,v,n):\n m = ceil(m)\n if m > 0 and m <= n:\n mi = (n-m) // 2\n mf = (n+m) // 2\n for num in range(mi,mf):\n i = np.random.randint(mi,mf)\n j = np.random.randint(mi,mf)\n t = v[i]\n v[i] = v[j]\n v[j] = t\n return v", "title": "" }, { "docid": "21b259c0b0ec5eb2c840b1350ca0548f", "score": "0.57656854", "text": "def get_random_corners(var_lower, var_upper):\n assert(isinstance(var_lower, np.ndarray))\n assert(isinstance(var_upper, np.ndarray))\n assert(len(var_lower) == len(var_upper))\n\n n = len(var_lower)\n limits = np.vstack((var_upper, var_lower))\n node_pos = np.atleast_2d(limits[np.random.randint(2, size=n), \n np.arange(n)])\n while (len(node_pos) < n+1):\n point = limits[np.random.randint(2, size=n), np.arange(n)]\n if (get_min_distance(point, node_pos) > 0):\n node_pos = np.vstack((node_pos, point))\n\n return np.array(node_pos, np.float_)", "title": "" }, { "docid": "04c31ffd48dcffc937dc432ac7e053a4", "score": "0.57642174", "text": "def new_tile(self):\n # replace with your code\n value = random.choice(([2] * 90) + ([4] * 10))\n row = random.randrange(0, self._grid_height)\n col = random.randrange(0, self._grid_width)\n while self._grid[row][col] != 0:\n row = random.randrange(0, self._grid_height)\n col = random.randrange(0, self._grid_width)\n self._grid[row][col] = value", "title": "" }, { "docid": "10ead1e548380a86bdaf04d202b919a9", "score": "0.57620573", "text": "def generate_random(min=1, max=100, nums=10):\r\n rng = default_rng()\r\n numbers = rng.choice(np.arange(min, max), size=nums, replace=False)\r\n return numbers", "title": "" }, { "docid": "067b0c9331fe9d6f358c985551d189e8", "score": "0.57606566", "text": "def random_board(self):\n\n board = np.random.randint(0, 11, (4, 4))\n for i in range(len(board)):\n for j in range(len(board[i])):\n if board[i, j] != 0:\n board[i, j] = 2**board[i, j]\n\n return board", "title": "" }, { "docid": "a53462efd8cd44e882681569c190c16b", "score": "0.5738326", "text": "def _random_binary_string_matrix(rows, cols, max_length):\n return [[_random_binary_string_gen(max_length) for _ in range(cols)] for _ in range(rows)]", "title": "" }, { "docid": "d35526d1bf0b825c860d350e1ac10bc6", "score": "0.5732991", "text": "def random_nums(num, mn, mx):\n array = []\n while len(array) < num:\n array.append(random.randrange(mn, mx))\n return array", "title": "" }, { "docid": "bef418b0a73e8106cd9eb06f05c5fd17", "score": "0.57216287", "text": "def create_uniform_grid(width, height):\n grid_size = (10, 10)\n num_bins_horizontal = int(round(width / grid_size[0]))\n num_bins_vertical = int(round(height / grid_size[1]))\n bins = (num_bins_horizontal, num_bins_vertical)\n low = [0, 0]\n high = [width, height]\n grid = [np.linspace(low[dim], high[dim], bins[dim] + 1)[1:-1] for dim in range(len(bins))]\n\n return grid", "title": "" }, { "docid": "8eb3792c26297b15a21efcef87db0a30", "score": "0.57181585", "text": "def random_init(n, initial, bounds, use_point=False, problem=None):\n population = np.ascontiguousarray(problem.randomize(n))\n if use_point:\n population[0] = clip(initial, *bounds)\n return population", "title": "" }, { "docid": "14698691c4c5b9c503f4ad0dd636a9b2", "score": "0.571791", "text": "def between(min, max):\n return random.random() * (max - min) + min", "title": "" }, { "docid": "cfba3ffb732bbcbc4f6b8d65e3d0b236", "score": "0.57167524", "text": "def make_uniform_grid(xmin, xmax, ncells, nghost=2):\n dx = (xmax - xmin)/(ncells - 1)\n xf = xmin + dx*np.arange(-nghost, ncells+nghost)\n return Grid(xf, nghost)", "title": "" }, { "docid": "871d393eda57d80211983a9d6a371272", "score": "0.57135993", "text": "def pick(arr, m):\r\n if m > len(arr):\r\n raise ValueError(\"not enough items to fill all m slots\")\r\n res = arr[:m]\r\n for i in range(m, len(arr)):\r\n k = randint(0, i)\r\n if k < m: # k must not equal m, else index out of bounds\r\n res[k] = arr[i]\r\n return res", "title": "" }, { "docid": "9d01779782b949870f6786ae3608d27a", "score": "0.57105386", "text": "def initialize(X, k):\n # cases\n if not isinstance(X, np.ndarray) or len(X.shape) != 2:\n return None\n if not isinstance(k, int) or k <= 0:\n return None\n\n # Setting min and max values per col\n n, d = X.shape\n X_min = X.min(axis=0)\n X_max = X.max(axis=0)\n\n # return multivariate uniform distribution\n return np.random.uniform(X_min, X_max, size=(k, d))", "title": "" }, { "docid": "cbf63516ecb02fb8b9bb6e45c127cd7f", "score": "0.5704827", "text": "def set_signal(low=-1, high=1, N=10):\r\n s = np.random.uniform(low=low, high=high, size=(N, 1))\r\n return s", "title": "" }, { "docid": "2037e8fe5f46db94c7ae1d2c4ea90639", "score": "0.5690302", "text": "def genMatrix2(size=200, value=5):\n\n matrix = np.asarray([ np.asarray([value for col in range(0,size)]) for row in range(0,size)])\n\n return matrix", "title": "" }, { "docid": "f9be7e2035c2be7ce56d938c165118b0", "score": "0.56721246", "text": "def new_tile(self):\n cell_value = 1\n while cell_value != 0:\n row = random.randint(0, self._height -1 )\n column = random.randint(0, self._width -1)\n cell_value = self._grid[row][column]\n \n random_value = random.randint(1, 10)\n if random_value <= 9:\n self._grid[row][column] = 2\n else:\n self._grid[row][column] = 4", "title": "" }, { "docid": "524d47fee9bf75d608299e90ceab4177", "score": "0.56651396", "text": "def make_matrix(height, width, fill_value):\n return [[fill_value for dummy_col in range(width)] for \n dummy_row in range(height)]", "title": "" }, { "docid": "f8d9aba0d4376eba79830ebab1e872f8", "score": "0.56586415", "text": "def mt_rand (low = 0, high = sys.maxint):\n return random.randint (low, high)", "title": "" }, { "docid": "dfe57dcc9769fc9fa0f79c0f13bc10b9", "score": "0.56497943", "text": "def generate_list():\n upper_bound = int(input(\"-> Please choose the Upper Bound of the array: \"))\n size = int(input(\"-> Please choose the size of the array: \"))\n\n # return upperBound, size, np.random.randint(0, upperBound, size)\n\n rand_arr = []\n for _ in range(size):\n rand_arr.append(pcg32bounded(upper_bound))\n\n return upper_bound, size, rand_arr", "title": "" }, { "docid": "db70b12d1f1d4ff0b1029a7c2f2bd4ed", "score": "0.56410575", "text": "def generate_function(limit):\r\n w0 = random.uniform(-limit, limit)\r\n w1 = random.uniform(-limit, limit)\r\n w2 = random.uniform(-limit, limit)\r\n return np.array([w0, w1, w2])", "title": "" }, { "docid": "739e28b3f79887822c1142806d82199e", "score": "0.5627422", "text": "def _random_vector(min, max, prng=DEFAULT_PRNG):\n min = np.array(min)\n max = np.array(max)\n assert min.shape == max.shape\n assert len(min.shape) == 1\n return prng.uniform(min, max)", "title": "" }, { "docid": "a89e331ec96e57eba296d4dab8507303", "score": "0.5625954", "text": "def random_uniform(low=0.0, high=0.0, size=None, dev='cpu', f=None):\n return _get_framework(f=f).random_uniform(low, high, size, dev)", "title": "" }, { "docid": "c14d79510049f0d7914b9a234dc2b100", "score": "0.56223214", "text": "def random_x_gen(self):\n if isinstance(self.min_x, list):\n x_list = []\n for i,j in zip(self.min_x, self.max_x):\n x_list.append(uniform(i,j))\n return x_list\n else:\n x_list = [uniform(self.min_x, self.max_x)]\n return x_list", "title": "" }, { "docid": "1b4515e37546eb3ae4dfa5ca3bbfedf2", "score": "0.56164277", "text": "def gen_pool_matrix(size=25,mu=.0,sigma=1.,gamma=-.5):\n\n if gamma==0:\n asymm=0\n else:\n asymm=(1- np.sqrt(1-gamma**2) )/gamma\n\n aij=(np.random.normal(0,1./np.sqrt(size),(size,size)) )\n\n aij= (aij+asymm * aij.T) / np.sqrt(1+ asymm**2 )\n\n A=-mu/size + sigma*aij\n np.fill_diagonal(A,-1)\n return A", "title": "" } ]
68d870d5d23b867b8cde3c54962e3557
Creates the temporary html_to_pdf.html file that will be located in the rssparser/data/ folder. With pdfkit library converts this html file to the pdf file specified in topdf arg. And removes the temporary html file.
[ { "docid": "38fd8232ae791da34cff94ddf52be21a", "score": "0.7395331", "text": "def to_pdf(items, path_to_save_pdf):\n to_html(items, 'rssparser/data/html_to_pdf.html')\n pdfkit.from_file('rssparser/data/html_to_pdf.html', path_to_save_pdf)\n os.remove('rssparser/data/html_to_pdf.html')", "title": "" } ]
[ { "docid": "0294fe5f674f0b44652a9c10498ec7d8", "score": "0.73789454", "text": "def write_pdf(self, html):\n try:\n f = tempfile.NamedTemporaryFile(delete=False, suffix='.html')\n f.write(html.encode('utf_8', 'xmlcharrefreplace'))\n f.close()\n except Exception:\n raise IOError(u\"Unable to create temporary file, aborting\")\n\n dummy_fh = open(os.path.devnull, 'w')\n\n try:\n command = [\"prince\", f.name, \"-o\", self.destination_file]\n\n Popen(command, stderr=dummy_fh).communicate()\n except Exception:\n raise EnvironmentError(u\"Unable to generate PDF file using \"\n \"prince. Is it installed and available?\")\n finally:\n dummy_fh.close()", "title": "" }, { "docid": "d32ba824fcf4a80ccfc2eb14e49d67f6", "score": "0.7002694", "text": "def pdftotxt(pdfdata):\n pdffout = tempfile.NamedTemporaryFile(suffix='.pdf')\n print pdffout.name\n pdffout.write(pdfdata)\n pdffout.flush()\n\n xmlin = tempfile.NamedTemporaryFile(mode='r', suffix='.xml')\n tmpxml = xmlin.name # \"temph.xml\"\n #cmd = '/usr/bin/pdftohtml -nodrm -enc UTF-8 \"%s\" \"%s\"' % (pdffout.name, os.path.splitext(tmpxml)[0])\n cmd = '/usr/bin/pdftohtml -xml -nodrm -zoom 1.5 -enc UTF-8 -noframes \"%s\" \"%s\"' % (pdffout.name, os.path.splitext(tmpxml)[0])\n cmd = cmd + \" >/dev/null 2>&1\" # can't turn off output, so throw away even stderr yeuch\n os.system(cmd)\n\n pdffout.close()\n #xmlfin = open(tmpxml)\n xmldata = xmlin.read()\n xmlin.close()\n return xmldata", "title": "" }, { "docid": "d32ba824fcf4a80ccfc2eb14e49d67f6", "score": "0.7002694", "text": "def pdftotxt(pdfdata):\n pdffout = tempfile.NamedTemporaryFile(suffix='.pdf')\n print pdffout.name\n pdffout.write(pdfdata)\n pdffout.flush()\n\n xmlin = tempfile.NamedTemporaryFile(mode='r', suffix='.xml')\n tmpxml = xmlin.name # \"temph.xml\"\n #cmd = '/usr/bin/pdftohtml -nodrm -enc UTF-8 \"%s\" \"%s\"' % (pdffout.name, os.path.splitext(tmpxml)[0])\n cmd = '/usr/bin/pdftohtml -xml -nodrm -zoom 1.5 -enc UTF-8 -noframes \"%s\" \"%s\"' % (pdffout.name, os.path.splitext(tmpxml)[0])\n cmd = cmd + \" >/dev/null 2>&1\" # can't turn off output, so throw away even stderr yeuch\n os.system(cmd)\n\n pdffout.close()\n #xmlfin = open(tmpxml)\n xmldata = xmlin.read()\n xmlin.close()\n return xmldata", "title": "" }, { "docid": "6c196d6b87e3e5b19398d2194ee877ad", "score": "0.68640995", "text": "def create_pdf(html):\n\n # Define options\n pdf_options = {\n \"page-size\": \"A4\",\n \"orientation\": \"portrait\"\n }\n\n if selectedTemplate == \"FWF\":\n pdf_options[\"orientation\"] = \"landscape\"\n\n config = pdfkit.configuration(wkhtmltopdf=PATH_WKHTMLTOPDF)\n\n pdfkit.from_file(html, html.replace(\".html\", \".pdf\"), configuration=config, options=pdf_options)\n return html.replace(\".html\", \".pdf\")", "title": "" }, { "docid": "a0eb8dbaf78e05f9f89ad04fa2b6edba", "score": "0.6609989", "text": "def test_put_convert_html_to_pdf(self):\n # Already in storage\n name = \"test1.html\"\n result_name = \"putHtmlToPdf.pdf\"\n test_folder = \"HtmlTestDoc\"\n test_out_path = test_folder + \"/\" + result_name\n\n try:\n # Convert document to pdf in storage\n self.api.put_convert_document_to_pdf(\n name, out_path=test_out_path, width=800, height=1000, left_margin=50, right_margin=100,\n top_margin=150, bottom_margin=200, folder=test_folder, storage=\"\")\n\n # Download result\n res = TestHelper.download_file(test_out_path)\n save_file = TestHelper.test_dst + result_name\n TestHelper.move_file(res, save_file)\n except ApiException as ex:\n print(\"Exception\")\n print(\"Info: \" + str(ex))\n raise ex", "title": "" }, { "docid": "35f0546e6d277ecc65ddd9c430c67fdb", "score": "0.6557726", "text": "def pdftoxml(pdfdata):\n pdffout = tempfile.NamedTemporaryFile(suffix='.pdf')\n pdffout.write(pdfdata)\n pdffout.flush()\n\n xmlin = tempfile.NamedTemporaryFile(mode='r', suffix='.xml')\n tmpxml = xmlin.name # \"temph.xml\"\n cmd = 'pdftohtml -xml -nodrm -zoom 1.5 -enc UTF-8 -noframes \"%s\" \"%s\"' % (pdffout.name, os.path.splitext(tmpxml)[0])\n cmd = cmd + \" >/dev/null 2>&1\" # can't turn off output, so throw away even stderr yeuch\n os.system(cmd)\n\n pdffout.close()\n #xmlfin = open(tmpxml)\n xmldata = xmlin.read()\n xmlin.close()\n return xmldata", "title": "" }, { "docid": "3dfddfcdddaa2569c5ab0f399a7a0c9b", "score": "0.65014225", "text": "def convert_html2pdf(detector):\n # get a list of all the html files in the calwebb_spec2_pytests dir\n latest_htmlfile = get_latest_file(\"*\" + detector + \"*.html\")\n # create the pdf output name\n pdf_file = latest_htmlfile.replace(\".html\", \".pdf\")\n # convert the html report into a pdf file\n # import pdfkit\n # options = {'dpi': 96}\n # pdfkit.from_file(latest_htmlfile, pdf_file, options=options)\n print(\"\\n Converted \", latest_htmlfile, \" to \", pdf_file, \". Both files are available in current directory. \\n\")", "title": "" }, { "docid": "45d93f989fbde2e3edb5c3b4ce84ee8a", "score": "0.6468519", "text": "def write_to_pdf(self, html, config, ofname=\"\"):\n pdf = None\n try:\n if ofname == \"\":\n pdf = pdfkit.from_string(html, False, configuration=config)\n # pdfkit.from_string(html, ofname, configuration=config)\n else:\n pdfkit.from_string(html, ofname, configuration=config)\n # pdfkit.from_string(html, ofname)\n finally:\n self.grouped_failure.close()\n self.grouped_risk.close()\n self.overall_failure.close()\n self.overall_risk.close()\n return pdf", "title": "" }, { "docid": "d87ec27ca5227f0fbaf58a93a928598a", "score": "0.646367", "text": "def pdftoxml(pdfdata):\n pdffout = tempfile.NamedTemporaryFile(suffix='.pdf')\n pdffout.write(pdfdata)\n pdffout.flush()\n\n xmlin = tempfile.NamedTemporaryFile(mode='r', suffix='.xml')\n tmpxml = xmlin.name # \"temph.xml\"\n cmd = '/usr/bin/pdftohtml -xml -nodrm -zoom 1.5 -enc UTF-8 -noframes \"%s\" \"%s\"' % (pdffout.name, os.path.splitext(tmpxml)[0])\n cmd = cmd + \" >/dev/null 2>&1\" # can't turn off output, so throw away even stderr yeuch\n os.system(cmd)\n\n pdffout.close()\n #xmlfin = open(tmpxml)\n xmldata = xmlin.read()\n xmlin.close()\n return xmldata", "title": "" }, { "docid": "d87ec27ca5227f0fbaf58a93a928598a", "score": "0.646367", "text": "def pdftoxml(pdfdata):\n pdffout = tempfile.NamedTemporaryFile(suffix='.pdf')\n pdffout.write(pdfdata)\n pdffout.flush()\n\n xmlin = tempfile.NamedTemporaryFile(mode='r', suffix='.xml')\n tmpxml = xmlin.name # \"temph.xml\"\n cmd = '/usr/bin/pdftohtml -xml -nodrm -zoom 1.5 -enc UTF-8 -noframes \"%s\" \"%s\"' % (pdffout.name, os.path.splitext(tmpxml)[0])\n cmd = cmd + \" >/dev/null 2>&1\" # can't turn off output, so throw away even stderr yeuch\n os.system(cmd)\n\n pdffout.close()\n #xmlfin = open(tmpxml)\n xmldata = xmlin.read()\n xmlin.close()\n return xmldata", "title": "" }, { "docid": "00a399a9e9d6ec6d4ad9e1a3d7dab1e3", "score": "0.62859905", "text": "def create_html_file(html):\r\n tmp_fd, url = tempfile.mkstemp(suffix='.html')\r\n tmp_file = os.fdopen(tmp_fd, 'w')\r\n tmp_file.write(html)\r\n tmp_file.close()\r\n return url", "title": "" }, { "docid": "2eff7e819359036aa0747cd7814dcccb", "score": "0.62706757", "text": "def test_post_convert_html_in_request_to_pdf(self):\n name = \"postHtmlToPdfInReq.pdf\"\n test_out_path = \"HtmlTestDoc/\" + name\n test_file = TestHelper.test_src + \"test1.html\"\n try:\n # Upload and convert document to pdf\n self.api.post_convert_document_in_request_to_pdf(\n out_path=test_out_path, file=test_file, width=800, height=1000, left_margin=50, right_margin=100,\n top_margin=150, bottom_margin=200)\n\n # Download result\n res = TestHelper.download_file(test_out_path)\n save_file = TestHelper.test_dst + name\n TestHelper.move_file(res, save_file)\n except ApiException as ex:\n print(\"Exception\")\n print(\"Info: \" + str(ex))\n raise ex", "title": "" }, { "docid": "b275724231056372124a1e8b27e79337", "score": "0.6114608", "text": "def test_get_convert_html_to_pdf(self):\n name = \"test1.html\"\n try:\n # Upload file to storage\n res = TestHelper.upload_file(name)\n self.assertTrue(len(res.uploaded) == 1)\n self.assertTrue(len(res.errors) == 0)\n\n # Convert document to pdf\n res = self.api.get_convert_document_to_pdf(\n name, width=800, height=1000, left_margin=50, right_margin=100, top_margin=150, bottom_margin=200,\n folder=TestHelper.folder, storage=\"\"\n )\n self.assertTrue(isinstance(res, str), \"Error convert html to pdf\")\n\n # Move to test folder\n TestHelper.move_file(str(res), TestHelper.test_dst)\n except ApiException as ex:\n print(\"Exception\")\n print(\"Info: \" + str(ex))\n raise ex", "title": "" }, { "docid": "176018c473850f440b9fdd512a2624f6", "score": "0.6098932", "text": "def save_pdf(htmls, file_name):\n options = {\n 'page-size': 'Letter',\n 'margin-top': '0.75in',\n 'margin-right': '0.75in',\n 'margin-bottom': '0.75in',\n 'margin-left': '0.75in',\n 'encoding': \"UTF-8\",\n 'custom-header': [\n ('Accept-Encoding', 'gzip')\n ],\n 'cookie': [\n ('cookie-name1', 'cookie-value1'),\n ('cookie-name2', 'cookie-value2'),\n ],\n 'outline-depth': 10,\n }\n pdfkit.from_file(htmls, file_name, options=options)", "title": "" }, { "docid": "38e1ce71cdbb72451e8c10ad70d8a187", "score": "0.60953766", "text": "def generate_pdf(html): \n result = StringIO.StringIO()\n pdf = pisa.pisaDocument(StringIO.StringIO(html.encode(\"UTF-8\")), result)\n if not pdf.err:\n return HttpResponse(result.getvalue(), content_type='application/pdf')\n return HttpResponse('Error al generar el PDF: %s' % cgi.escape(html))", "title": "" }, { "docid": "d6c614441db22ad8704f78516e5ea146", "score": "0.6012346", "text": "def write_pdf(self):\n try:\n #f = tempfile.NamedTemporaryFile(delete=False, suffix='.tex')\n f = open(self.output_file.replace('.pdf', '.tex'), 'w')\n for a in self.content:\n f.write(a)\n f.close()\n except Exception:\n raise IOError(u\"Unable to create temporary file, aborting\")\n\n #dummy_fh = open(os.path.devnull, 'w')\n\n try:\n command = [\"pdflatex\", \"-shell-escape\", f.name]\n Popen(command).communicate()\n except Exception:\n raise EnvironmentError(u\"Unable to generate PDF file using \"\n \"pdflatex. Is it installed and available?\")\n finally:\n #dummy_fh.close()\n name = self.output_file.replace('.pdf', '')\n latex_trash = ['.out', '.log', '.aux', '.snm', '.nav', '.toc']\n for a in latex_trash:\n #print self.source_abspath+'/'+name+a\n if os.path.isfile(self.source_abspath+'/'+name+a):\n os.remove(self.source_abspath+'/'+name+a)", "title": "" }, { "docid": "0860851ea29bb2f310e2cf9b95fb2370", "score": "0.5933459", "text": "def html_to_pdf(html, as_http_response=False, name=None):\n # weasyprint is an extra dependency\n import weasyprint\n\n def url_fetcher(url, timeout=10, ssl_context=None):\n return weasyprint.default_url_fetcher(\n \"file://\" + url.replace(\"file://\", settings.BASE_DIR), timeout, ssl_context\n )\n\n ret = weasyprint.HTML(string=html, base_url=\"\", url_fetcher=url_fetcher).write_pdf()\n if as_http_response:\n ret = HttpResponse(ret, content_type=\"application/pdf\")\n ret[\"Content-Disposition\"] = f\"inline; filename={name}.pdf\"\n return ret", "title": "" }, { "docid": "5eab72ba84cb310f145f8c77fa281d36", "score": "0.59234834", "text": "def test_save_news_in_pdf(self, pisa, html_saver):\n pisa.return_value = ''\n path = os.path.abspath(os.curdir)\n mock_logger = MagicMock(return_value=None)\n html_file = os.path.join(path, 'test.html')\n with open(html_file, 'x', encoding='utf-8') as html:\n html.write(self.html)\n html_saver.return_value = html\n pdf = functions.save_news_in_pdf_file(self.article_a, path, mock_logger)\n self.assertTrue(os.path.join(path, 'rss_news.pdf'))\n os.remove(pdf.name)", "title": "" }, { "docid": "2e5590425036d5778bca7b7b0cc78c96", "score": "0.5892757", "text": "def pdftoetree(pdforig, options=''):\n xmlin = tempfile.NamedTemporaryFile(mode='r', suffix='.xml')\n tmpxml = xmlin.name # \"temph.xml\"\n cmd = '/usr/bin/pdftohtml -xml -nodrm -zoom 1.5 -enc UTF-8 -noframes %s \"%s\" \"%s\"' % (options, pdforig, os.path.splitext(tmpxml)[0])\n cmd = cmd + \" >/dev/null 2>&1\" # can't turn off output, so throw away even stderr yeuch\n os.system(cmd)\n tree = lxml.etree.parse(xmlin)\n xmlin.close()\n return tree", "title": "" }, { "docid": "9a61eb38b2a2578876b9f58f885a1dc0", "score": "0.5879228", "text": "def _write_pdf_report(self):\n LOGGER.info('Generating HTML for writing pdf report...')\n pypandoc.convert_file(self.path, 'html',\n outputfile=self._REPORT_TMP_HTML_PATH,\n extra_args=['-V', 'geometry:margin=1.5cm', '--columns', '1000'])\n LOGGER.info('Metamorphosising HTML to PDF...')\n weasyprint.HTML(self._REPORT_TMP_HTML_PATH) \\\n .write_pdf(self.path_pdf, stylesheets=[weasyprint.CSS(self._REPORT_CSS_PATH)])", "title": "" }, { "docid": "bf8a7e53828d8044b506b13602db0bf1", "score": "0.5771483", "text": "def convert(file, path):\n try:\n process = subprocess.Popen(['pdftohtml -c -nodrm -hidden -xml ' + file + ' ' + path], shell=True)\n process.wait()\n except Exception as exception:\n error_logger(traceback.format_exc())\n error_logger(str(exception))", "title": "" }, { "docid": "14441f9b6dafc757d682e0f257f898d6", "score": "0.5673843", "text": "def create_pdf(text: str, output_file: str, font_type: str, font_size: int,\n remove_cms: bool = True, remove_tgs: bool = True) -> None:\n\n # Install reportlab if not installed\n try:\n import reportlab\n except ImportError:\n import pip\n pip.main(['install', 'reportlab', '--user'])\n import reportlab\n\n from reportlab.pdfgen import canvas\n from reportlab.pdfbase import pdfmetrics\n from reportlab.pdfbase.ttfonts import TTFont\n from reportlab.platypus import Paragraph\n from reportlab.lib.styles import ParagraphStyle\n from reportlab.lib.styles import getSampleStyleSheet\n from reportlab.platypus.flowables import KeepInFrame\n from reportlab.lib import pagesizes\n\n canvas = canvas.Canvas(output_file, pagesize=pagesizes.A4)\n\n # Get default fonts into a dictionary\n default_fonts = {font.upper(): font for font in canvas.getAvailableFonts()}\n\n # Get fonts in fonts/ directory into a dictionary\n program_fonts = {}\n for root, dirs, files in os.walk(\"fonts/\"):\n for file in files:\n if file.endswith('.ttf') or file.endswith('.otf'):\n program_fonts[re.sub('\\.[ot]tf$', '', file).upper()] = os.path.join(root, file)\n\n # Try to register font if not one of the defaults, fails otherwise\n try:\n # Check if font is in the fonts/ directory, register if true\n if font_type.upper() in program_fonts:\n pdfmetrics.registerFont(TTFont(font_type, program_fonts.get(font_type.upper())))\n # Else check if path to font is provided, register if true\n elif os.path.isfile(font_type) and font_type.endswith('.ttf') or font_type.endswith('.otf'):\n pdfmetrics.registerFont(TTFont(font_type, font_type))\n # Else check if the provided font is one of the default fonts, rename font\n elif font_type.upper() in default_fonts:\n font_type = default_fonts.get(font_type.upper())\n # Otherwise fail\n else:\n raise ValueError('Not a valid font file.')\n except (ValueError, reportlab.pdfbase.ttfonts.TTFError) as ex:\n print(ex)\n print('Can not use font: ' + os.path.abspath(font_type) + '.')\n print('You can use one of the default fonts:')\n [print(font, end=\" \") for font in default_fonts.values()]\n print()\n print('or the fonts in the \"fonts/\" directory:')\n [print(font, end=\" \") for font in program_fonts.keys()]\n print()\n print('Alternatively, you can provide a path to a TrueType font file.')\n sys.exit(1)\n\n # Styling\n stylesheet = getSampleStyleSheet()\n stylesheet.add(ParagraphStyle(name='custom', fontName=font_type, fontSize=font_size, leading=font_size * 1.3))\n\n # Positioning\n width, height = pagesizes.A4\n width -= 40 # drawing area width\n height -= 30 # drawing area height\n\n tlx = 30 # offset from left\n tly = height + 10 # offset from bottom\n\n # Remove comments and/or tags\n if remove_cms:\n text = remove_comments(text)\n if remove_tgs:\n text = remove_tags(text)\n\n # Pagination\n # Pages are split on empty lines (can contain whitespace)\n pages = re.split('(?:[\\n][ \\t]*){2,}', text.strip())\n for index, page in enumerate(pages):\n frame = KeepInFrame(width, height, [Paragraph(page.replace('\\n', '<br/>\\n'), stylesheet[\"custom\"])])\n w, h = frame.wrapOn(canvas, width, height)\n frame.drawOn(canvas, tlx, tly - h)\n\n # Insert page break for all but the last page\n if index != len(pages):\n canvas.showPage()\n\n # Produce pdf\n canvas.save()", "title": "" }, { "docid": "89f97a9e6d3a0bcad906a5da4aed9b2e", "score": "0.56649834", "text": "def convert_pdf(converter, build_path: pathlib.Path, settings: Settings, variables: Variables) -> None:\n build_path = get_build_path(settings)\n pdf_build_filename = build_path / pathlib.Path(settings[\"OutputFormats\"][\"pdf\"][\"OutputFile\"])\n html_filename = build_path / pathlib.Path(settings[\"InputFile\"]).with_suffix(\".html\").name\n html_builder = HTMLContentBuilder(html_filename, build_path)\n converter.convert(html_builder)\n html_cover_filename = build_path / pathlib.Path(settings[\"InputFile\"]).with_suffix(\".cover.html\").name\n html_cover_builder = HTMLCoverBuilder(html_cover_filename, build_path)\n converter.convert(html_cover_builder)\n with open(\"DocumentDefinitions/Shared/header.html\", encoding=\"utf-8\") as header_template_file:\n header_contents = header_template_file.read() % variables[\"KWALITEITSAANPAK\"]\n header_filename = build_path / \"header.html\"\n with open(header_filename, mode=\"w\", encoding=\"utf-8\") as header_file:\n header_file.write(header_contents)\n pdf_build_filename = build_path / pathlib.Path(settings[\"OutputFormats\"][\"pdf\"][\"OutputFile\"])\n os.system(\n f\"\"\"wkhtmltopdf \\\n --enable-local-file-access \\\n --footer-html DocumentDefinitions/Shared/footer.html --footer-spacing 10 \\\n --header-html {header_filename} --header-spacing 10 \\\n --margin-bottom 27 --margin-left 34 --margin-right 34 --margin-top 27 \\\n --title '{settings[\"Title\"]}' \\\n cover {html_cover_filename} \\\n {\"toc --xsl-style-sheet DocumentDefinitions/Shared/toc.xsl\" if settings[\"IncludeTableOfContents\"] else \"\"} \\\n {html_filename} {pdf_build_filename}\"\"\"\n )\n pdf_build_filename2 = build_path / pathlib.Path(settings[\"OutputFormats\"][\"pdf\"][\"OutputFile\"] + \".step2\")\n os.system(f\"gs -o {pdf_build_filename2} -sDEVICE=pdfwrite -dPrinted=false -f {pdf_build_filename} src/pdfmark.txt\")\n copy_output(pdf_build_filename2, settings, \"pdf\")", "title": "" }, { "docid": "d0e2715f8118b67b3edf30b83ffd6975", "score": "0.5664913", "text": "def test_put_convert_html_to_xps(self):\n # Already in storage\n name = \"test1.html\"\n result_name = \"putHtmlToXps.xps\"\n test_folder = \"HtmlTestDoc\"\n test_out_path = test_folder + \"/\" + result_name\n\n try:\n # Convert document to pdf in storage\n self.api.put_convert_document_to_xps(\n name, out_path=test_out_path, width=800, height=1000, left_margin=50, right_margin=100,\n top_margin=150, bottom_margin=200, folder=test_folder, storage=\"\")\n\n # Download result\n res = TestHelper.download_file(test_out_path)\n save_file = TestHelper.test_dst + result_name\n TestHelper.move_file(res, save_file)\n except ApiException as ex:\n print(\"Exception\")\n print(\"Info: \" + str(ex))\n raise ex", "title": "" }, { "docid": "cfd815c2e0a434f97518f60ed56d6dc4", "score": "0.55401945", "text": "def save_pdf(self, url: str, file_name: str):\n if not url.startswith('http'):\n url = 'http://' + url\n # TODO replace\n file_name = file_name.replace(\".html\", \"\")\n file_name = f'{path_join(self.root, file_name)}'\n if exists(file_name):\n os.remove(file_name)\n try:\n pdfkit.from_url(url, file_name, options=self.options)\n except OSError as e:\n logger.error(f'Pdf not saved: {url}.\\n{e}')\n os.remove(file_name)", "title": "" }, { "docid": "4c08ab5016c03208202e11ed2e154698", "score": "0.55357", "text": "def tmpHtmlFile(dir_path):\n\n #\n # Create a temporary file for logging the folder\n #\n fd, filename = tempfile.mkstemp(suffix='.htm', dir=dir_path)\n os.close(fd)\n f = open(filename, 'w')\n return f", "title": "" }, { "docid": "2a7e8c7ed9c51d95c1a4249b2faa73f1", "score": "0.5509825", "text": "def serve_pdf_from_html(\n html: str, offered_filename: str = \"test.pdf\", **kwargs\n) -> HttpResponse:\n pdf = get_pdf_from_html(html, **kwargs)\n return serve_buffer(\n pdf,\n offered_filename=offered_filename,\n content_type=MimeType.PDF,\n as_attachment=False,\n as_inline=True,\n )", "title": "" }, { "docid": "f77d9f22578ebf7a1e053b7270d3f152", "score": "0.54926306", "text": "def build_html_report(options, xml_file):\n options.xml_file = xml_file\n options.html = True\n xml_parser = ReportBuilder.FunkLoadXmlParser()\n xml_parser.parse(xml_file)\n utils.trace(\"Creating html report ...\")\n html_path = ReportRenderHtml.RenderHtml(\n xml_parser.config, xml_parser.stats,\n xml_parser.error, xml_parser.monitor,\n options)()\n report = os.path.dirname(html_path)\n os.rename(xml_file[:-4]+'.log',\n os.path.join(report, 'funkload.log'))\n assert os.path.isfile(os.path.join(report, 'funkload.xml'))\n os.remove(xml_file)\n utils.trace(\"done: \\n\")\n utils.trace(\"file://%s\\n\" % html_path)\n return report", "title": "" }, { "docid": "0b17b9da643de205fff5be0910d9748b", "score": "0.547525", "text": "def pdf_generation(request):\n html_template = render_to_string('bill.html')\n BASE_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))\n css = [os.path.join(BASE_DIR, 'static/css/', 'bill.css')]\n pdf_file = HTML(string=html_template).write_pdf(\n target='/home/kobey/Desktop/mypdf.pdf', stylesheets=css) # this line creates pdf\n fs = FileSystemStorage('/home/kobey/Desktop')\n\n with fs.open('mypdf.pdf') as pdf:\n response = HttpResponse(pdf, content_type='application/pdf')\n response['Content-Disposition'] = 'inline; filename=\"mypdf.pdf\"'\n return response\n # return render(request,'bill.html')\n return response", "title": "" }, { "docid": "ac112bcfe74d53987cff233ea9b2c369", "score": "0.5449141", "text": "def bytes(self):\n html_bytes = self.html_format.bytes\n\n pdf_bytes = pdfkit.from_string(\n html_bytes.decode(self.encoding),\n False,\n options={\n \"load-error-handling\": \"ignore\",\n \"load-media-error-handling\": \"ignore\",\n \"margin-left\": self.pdf_margin_left,\n \"margin-right\": self.pdf_margin_right,\n \"margin-top\": self.pdf_margin_top,\n \"margin-bottom\": self.pdf_margin_bottom,\n \"zoom\": self.pdf_output_zoom,\n # 'disable-smart-shrinking': '',\n },\n )\n\n return pdf_bytes", "title": "" }, { "docid": "d3ce3f507c7b7ece2d179cc3cf679f0c", "score": "0.5428952", "text": "def pdf_html(view):", "title": "" }, { "docid": "bcdf35dc11be71f9f351df5482af0ef2", "score": "0.53816795", "text": "def create_html(markdown_file):\n html_file = html_name_from_markdown(markdown_file)\n # Convert from Github-flavored Markdown to HTML\n cmd = f'pandoc -f gfm -o {html_file} {markdown_file}'\n # Use pandoc to generate HTML from Markdown\n process = subprocess.run(\n cmd,\n stdout=subprocess.PIPE,\n stderr=subprocess.STDOUT,\n shell=True,\n encoding=\"utf-8\",\n universal_newlines=True\n )\n return process", "title": "" }, { "docid": "2d47ebd79efdd4a750fb98bd79b27073", "score": "0.53535813", "text": "def write_html(html_text, filename):\n\ttmpfolder = os.path.join(CURDIR, '../../tmp')\n\tif not os.path.exists(tmpfolder):\n\t\tos.makedirs(tmpfolder)\n\ttagfile = os.path.join(tmpfolder, '%s.html' % filename)\n\twith codecs.open(tagfile, 'w') as f:\n\t\tsoup = BeautifulSoup(html_text, 'html.parser')\n\t\tf.write(soup.prettify())", "title": "" }, { "docid": "745ad3f26e45a974dbb18cc9552430ba", "score": "0.5352487", "text": "def pdf_response_xp(self, html, filename, download=False):\n disposition = f'filename=\"{filename}\"'\n if download:\n disposition = 'attachment;' + disposition\n\n response = HttpResponse(content_type='application/pdf')\n response['Content-Disposition'] = disposition\n\n pisa_status = pisa.CreatePDF(html, dest=response)\n\n if pisa_status.err:\n message = (f\"Unable to generate PDF from <hl><pre>{html}</pre>\")\n return HttpResponse(message, status=500)\n\n return response", "title": "" }, { "docid": "128f66c5ed05b808f175f6b85d9797e5", "score": "0.5317638", "text": "def write_pdf(\n inkscape: Path | str,\n pdf: Path | str,\n xml: EtreeElement,\n stylesheet: Path | str | None = None,\n) -> str:\n with NamedTemporaryFile(mode=\"wb\", delete=False) as svg_file:\n svg = write_svg(svg_file, xml, stylesheet)\n _ = write_pdf_from_svg(inkscape, svg, pdf)\n os.unlink(svg)\n return str(pdf)", "title": "" }, { "docid": "157bcc73badec34aaf0404662fd29f63", "score": "0.52880245", "text": "def test_get_convert_html_to_pdf_by_url(self):\n source_url = \"https://stallman.org/articles/anonymous-payments-thru-phones.html\"\n try:\n\n # Convert url to pdf\n res = self.api.get_convert_document_to_pdf_by_url(\n source_url, width=800, height=1000, left_margin=50, right_margin=100, top_margin=150,\n bottom_margin=200,\n folder=TestHelper.folder, storage=\"\"\n )\n self.assertTrue(isinstance(res, str), \"Error convert url to pdf\")\n\n # Move to test folder\n TestHelper.move_file(str(res), TestHelper.test_dst)\n except ApiException as ex:\n print(\"Exception\")\n print(\"Info: \" + str(ex))\n raise ex", "title": "" }, { "docid": "25c0027263b8baf8f6862720e05dd64f", "score": "0.5274468", "text": "def create_pdf(self) -> None:\n print(f\"making {self.pdf.title}\")\n if self.pdf.exists():\n self.pdf.path.unlink()\n\n canvas = Canvas(str(self.pdf.path))\n canvas.setAuthor(\"Luca Ercole\")\n\n for page in self.pages:\n drawing = page.drawing\n canvas.setPageSize((drawing.width, drawing.height))\n\n renderPDF.draw(drawing, canvas, 0, 0)\n\n for bookmark in page.bookmarks:\n exact_ts = bookmark.dtime\n title = exact_ts.replace(minute=30).isoformat()[:16]\n pos = bookmark.position\n\n canvas.bookmarkPage(key=str(exact_ts), fit=\"XYZ\", top=pos)\n # canvas.addOutlineEntry(title=title, key=title)\n canvas.addOutlineEntry(title=title, key=str(exact_ts))\n # canvas.drawString(0, pos, str(pos))\n\n canvas.showPage()\n\n canvas.save()\n print(\"Done\")", "title": "" }, { "docid": "2e3775d2948a79bc9b4f40bc3bb7ec60", "score": "0.526846", "text": "def render_pdf(\n template,\n file_,\n url_fetcher=staticfiles_url_fetcher,\n context=None,\n):\n context = context or {}\n\n html = get_template(template).render(context)\n HTML(\n string=html,\n base_url='not-used://',\n url_fetcher=url_fetcher,\n ).write_pdf(\n target=file_,\n )", "title": "" }, { "docid": "4a21aec5ddb7c8dd1fd3ecd2d307a0eb", "score": "0.5261352", "text": "async def convert_body(request: Request):\n data = await request.body()\n\n if not data:\n return Response(\"ERROR: No body\", status_code=400)\n\n with TemporaryDirectory() as tmpdirname:\n outfile = os.path.join(tmpdirname, \"out.html\")\n\n with open(outfile, \"w\") as fh:\n fh.write(data.decode(\"utf-8\"))\n\n bytes = await execute_wkhtmltopdf(outfile)\n\n return Response(bytes, media_type=\"application/pdf\")", "title": "" }, { "docid": "3ef788c9e61eef0953f8e61f0b1d26e5", "score": "0.52207357", "text": "def exportToHTMLFile(self, filePath):\n\n if os.path.exists(filePath) and os.path.isfile(filePath): #if the file path provided exists\n htmlDocument = self.htmlRenderer( self.noteEditor.toPlainText() ) #convert the note to HTML\n htmlFile = open(filePath, \"w\") #open the provided file path\n htmlFile.write(htmlDocument) #write the HTML content to the file\n htmlFile.close() #close the file after writing to it", "title": "" }, { "docid": "11f9b846961763ac38e7783a82354e1f", "score": "0.52157754", "text": "def get_to_file(self, *args, **kwargs):\n import os\n from tempfile import mkstemp\n fd, name = mkstemp()\n fp = os.fdopen(fd, 'wb')\n fp.write(self.get_html(*args, **kwargs))\n fp.close()\n return name", "title": "" }, { "docid": "b75b3ed28d39bb01fd607d1ba1a7a81d", "score": "0.52083796", "text": "def to_pdf(paper_dir, template_dir=None, use_shell_escape=False, flatten=False, keep_comments=False):\n template_dir = template_dir or scriptorium.CONFIG['TEMPLATE_DIR']\n\n paper_dir = os.path.abspath(paper_dir)\n if os.path.isdir(paper_dir):\n fname = paper_root(paper_dir)\n elif os.path.isfile(paper_dir):\n fname = paper_dir\n paper_dir = os.path.dirname(paper_dir)\n else:\n raise IOError(\"{0} is not a valid directory\".format(paper_dir))\n\n old_cwd = os.getcwd()\n if old_cwd != paper_dir:\n os.chdir(paper_dir)\n\n if not fname:\n raise IOError(\"{0} has no obvious root.\".format(paper_dir))\n\n #Convert all auxillary MMD files to LaTeX\n for mmd in _list_files(paper_dir):\n bname = os.path.basename(mmd).split('.')[0]\n with open(mmd, 'r') as mmd_fp, open('{0}.tex'.format(bname), 'w') as tex_fp:\n tex_fp.write(pymmd.convert(mmd_fp.read(), fmt=pymmd.LATEX, dname=mmd, ext=pymmd.SMART))\n\n pdf_cmd, new_env = _build_latex_cmd(fname, template_dir, use_shell_escape)\n\n bname = os.path.basename(fname).split('.')[0]\n if flatten:\n tname = '{0}.tex'.format(bname)\n fargs = '--keep-comments' if keep_comments else ''\n with tempfile.NamedTemporaryFile() as tmp:\n subprocess.check_call(['latexpand', '-o', tmp.name, tname, fargs], env=new_env)\n shutil.copyfile(tmp.name, tname)\n\n try:\n subprocess.check_output(pdf_cmd, env=new_env)\n if os.path.exists(os.path.join(paper_dir, '{0}.xdy'.format(bname))):\n subprocess.check_output(['makeglossaries', bname], env=new_env)\n except subprocess.CalledProcessError as exc:\n raise IOError(decodeCPEError(exc.output))\n\n _process_bib(fname, new_env)\n\n try:\n subprocess.check_output(pdf_cmd, env=new_env)\n except subprocess.CalledProcessError as exc:\n raise IOError(decodeCPEError(exc.output))\n try:\n subprocess.check_output(pdf_cmd, env=new_env)\n except subprocess.CalledProcessError as exc:\n raise IOError(decodeCPEError(exc.output))\n\n # Revert working directory\n if os.getcwd() != old_cwd:\n os.chdir(old_cwd)\n\n return os.path.join(paper_dir, '{0}.pdf'.format(bname))", "title": "" }, { "docid": "94509dbaa2f6a18c6dc1e05251e1c2bd", "score": "0.51938945", "text": "def render_to_pdf(template_src, context_dict={}):\n template = get_template(template_src)\n html = template.render(context_dict)\n result = BytesIO()\n pdf = pisa.pisaDocument(BytesIO(html.encode(\"UTF-8\")), result, link_callback=link_callback)\n if not pdf.err:\n return HttpResponse(result.getvalue(), content_type='application/pdf')\n return HttpResponse(\"Error Rendering PDF\", status=400)", "title": "" }, { "docid": "5ed75e370b200466ebdd2320004ff903", "score": "0.517688", "text": "def pdftotext(pdfdata):\n pdffout = tempfile.NamedTemporaryFile(suffix='.pdf')\n pdffout.write(pdfdata)\n pdffout.flush()\n\n textin = tempfile.NamedTemporaryFile(mode='r', suffix='.txt')\n tmptext = textin.name\n cmd = '/usr/bin/pdftotext -layout -enc UTF-8 \"%s\" \"%s\"' % (pdffout.name, tmptext)\n cmd = cmd + \" >/dev/null 2>&1\" # can't turn off output, so throw away even stderr yeuch\n os.system(cmd)\n\n pdffout.close()\n text = textin.read()\n textin.close()\n return text", "title": "" }, { "docid": "5ed75e370b200466ebdd2320004ff903", "score": "0.517688", "text": "def pdftotext(pdfdata):\n pdffout = tempfile.NamedTemporaryFile(suffix='.pdf')\n pdffout.write(pdfdata)\n pdffout.flush()\n\n textin = tempfile.NamedTemporaryFile(mode='r', suffix='.txt')\n tmptext = textin.name\n cmd = '/usr/bin/pdftotext -layout -enc UTF-8 \"%s\" \"%s\"' % (pdffout.name, tmptext)\n cmd = cmd + \" >/dev/null 2>&1\" # can't turn off output, so throw away even stderr yeuch\n os.system(cmd)\n\n pdffout.close()\n text = textin.read()\n textin.close()\n return text", "title": "" }, { "docid": "5ed75e370b200466ebdd2320004ff903", "score": "0.517688", "text": "def pdftotext(pdfdata):\n pdffout = tempfile.NamedTemporaryFile(suffix='.pdf')\n pdffout.write(pdfdata)\n pdffout.flush()\n\n textin = tempfile.NamedTemporaryFile(mode='r', suffix='.txt')\n tmptext = textin.name\n cmd = '/usr/bin/pdftotext -layout -enc UTF-8 \"%s\" \"%s\"' % (pdffout.name, tmptext)\n cmd = cmd + \" >/dev/null 2>&1\" # can't turn off output, so throw away even stderr yeuch\n os.system(cmd)\n\n pdffout.close()\n text = textin.read()\n textin.close()\n return text", "title": "" }, { "docid": "3c7276167a8be937af616b6973946ad0", "score": "0.51666015", "text": "def generate_pdf(tex_content, out_file):\n out_file = Path(out_file)\n with TemporaryDirectory() as tmpdir:\n tmpdir = Path(tmpdir)\n tex_file = tmpdir/f'{out_file.stem}.tex'\n\n with tex_file.open('w') as f:\n f.write(tex_content)\n\n p = subprocess.run(['pdflatex', '-interaction', 'nonstopmode', f'-output-directory={tmpdir}', tex_file.name])\n\n shutil.copy(tmpdir/f'{tex_file.stem}.pdf', out_file)", "title": "" }, { "docid": "2890795c6ceb3173a18c489371b326f2", "score": "0.51657754", "text": "def create_pdf(pdf_data):\n pdf = BytesIO()\n pisa.CreatePDF(BytesIO(pdf_data.encode('utf-8')), pdf)\n return pdf", "title": "" }, { "docid": "f58fcf9f9894f89e5ddf6face1d4fedf", "score": "0.5107512", "text": "def create_html_and_punc(html_dir, punc_dir, plain_dir, stm_dir, rawpunc_dir, puncstm_dir, speaker):\n\n # Perform link reading and html conversion if raw_transcript does not exist\n try:\n if not os.path.exists(os.path.join(punc_dir, speaker + \".txt\")):\n # exceptions, some speakers result in error\n if speaker == 'KarenThompsonWalker_2012G':\n return\n # Convert stm into a plain file of transcription (the transcription does not have punctuation or capitalization)\n plain_file = get_plain_with_stm(plain_dir, stm_dir, speaker)\n # Get the html_file that contains transcript that is the closest to the plain file (if there is anything close)\n best_link = write_html_with_name(html_dir, plain_file, speaker)\n\n # Write a punctuated transcript into punc_dir\n raw_transcript = write_raw_punc_with_name(rawpunc_dir, best_link, speaker)\n\n write_clean_punc_with_name(punc_dir, rawpunc_dir, speaker)\n except:\n pass", "title": "" }, { "docid": "974e5f80464a485cd6ba7a2366181d30", "score": "0.5073336", "text": "def wk_pdf_file( sourcefile, destfile, wk_executable = \"wkhtmltopdf\", \n options = [], del_source = False,\n ):\n if not destfile.endswith('.pdf'):\n destfile += '.pdf'\n try:\n rc = call( [ wk_executable ] + options + [ sourcefile, destfile ] )\n os.remove( sourcefile ) \n return destfile\n except Exception, e:\n if settings.MODULE_ADMINLOG:\n AdminLog( by = 'gdjet.utils.pdf.wk_pdf_file',\n message = 'Error writing PDF File: %s\\n%s' % (destfile, e) ).save()\n else:\n raise\n return False", "title": "" }, { "docid": "edde6ac7b446727f6fc19ff87f4b5a72", "score": "0.5070975", "text": "def write_html(outdir, path, html):\n with open(os.path.join(outdir, path), 'w') as buf:\n buf.write(html)", "title": "" }, { "docid": "05e5404a4a2dac9f611c81d6fc10d174", "score": "0.50464195", "text": "def to_html(doc, output=\"/tmp\", style=\"dep\"):\n # generate filename from first six non-punct tokens\n file_name = \"test_extract.html\"\n html = displacy.render(doc, style=style, page=True) # render markup\n if output is not None:\n output_path = Path(output)\n if not output_path.exists():\n output_path.mkdir()\n output_file = Path(output) / file_name\n output_file.open(\"a\", encoding=\"utf-8\").write(html) # save to file\n print(\"Saved HTML to {}\".format(output_file))\n else:\n print(html)", "title": "" }, { "docid": "0c6a0bec22c1297e3af5fb0e9516c418", "score": "0.50404537", "text": "def outline_pdf2html(pdf_fp,\n fp_dir_pdf2htmlEx=\"C:\\\\pdf2htmlEX\",\n fp_outdir=\"C:\\\\CloudStation\\\\CBitter_-_PHD\\\\CloudStation\\\\PHDLiterature\\\\AHP_scopus\\\\html\",\n outline_file_name=\"outline.html\",\n copy_to_outline_file=True,\n verbose=False):\n assert (isinstance(pdf_fp, str))\n assert (isinstance(fp_outdir, str))\n assert (isinstance(fp_dir_pdf2htmlEx, str))\n assert (isinstance(outline_file_name, str))\n\n if not os.path.isdir(fp_outdir):\n raise ValueError(\"outline_pdf2html - fp_outdir does not exist?\")\n\n if not os.path.isdir(fp_dir_pdf2htmlEx):\n raise ValueError(\"outline_pdf2html - pdf2htmlex does not exist?\")\n\n if not os.path.isfile(pdf_fp):\n raise ValueError(\"outline_pdf2html - pdf file does not exist?\")\n\n if not outline_file_name:\n raise ValueError(\"outline_pdf2html - outline file name not provided\")\n\n outline_fp = os.path.join(fp_outdir, outline_file_name)\n\n # we generate the content into a temp dir\n # then take the outline file and stuff its contents into the outline_file\n pdf_fname, _ = path.splitext(path.basename(pdf_fp))\n outline_fname= \"%s.outline\" % pdf_fname\n\n ret_code = pdf2html(fp_pdf=pdf_fp,\n generated_html_filename=\"generated.html\",\n fp_outdir=fp_outdir,\n splitPages=False,\n optimizeText=False,\n processNonText=True,\n preprocessToPS=False,\n options={\n \"process-outline\": \"1\"\n },\n debug=verbose)\n\n tmp_outline_fp = os.path.join(fp_outdir, outline_fname)\n\n if copy_to_outline_file:\n shutil.copy(tmp_outline_fp, outline_fp)\n else:\n outline_fp = tmp_outline_fp\n\n if verbose:\n print(\"outline html file: %s (%s)\" % (outline_fp, ret_code))\n\n if ret_code:\n return outline_fp\n else:\n return None", "title": "" }, { "docid": "8576b9bbf7ed2ae4c085a4e241fbf49b", "score": "0.50296617", "text": "def test_to_pdf_create_file(converter_instance):\n converter_instance.to_pdf()\n assert (os.path.exists(os.path.join(converter_instance.folder_path, \"news.pdf\")) is True)", "title": "" }, { "docid": "c80657ba20afb25195013e4407941c6e", "score": "0.5018169", "text": "async def execute_wkhtmltopdf(uri: str) -> bytes:\n cmd = [\n \"wkhtmltopdf\",\n \"--log-level\",\n \"none\",\n uri,\n \"-\",\n ]\n return check_output(cmd)", "title": "" }, { "docid": "5fe4e36f80d325aad7df8ce8fb1bfd5e", "score": "0.50049746", "text": "def generate_pdf(template_name, context, params, request=None, fix_static_paths=True, stream=True):\n html = render_to_string(template_name, context=context, request=request)\n if fix_static_paths:\n replacement = get_absolute_static_path(request)\n html = html.replace(settings.STATIC_URL, replacement)\n\n r = requests.post(\n settings.PDF_SERVICE_ENDPOINT,\n json={\n \"html\": html\n },\n params=params,\n headers={\n \"x-api-key\": settings.PDF_SERVICE_TOKEN,\n },\n stream=stream\n )\n r.raise_for_status()\n return r", "title": "" }, { "docid": "8097d95c1ec5b77121f5c8291341b8b0", "score": "0.498684", "text": "def test_to_html_create_file(converter_instance):\n converter_instance.to_html()\n assert (os.path.exists(os.path.join(converter_instance.folder_path, \"news.html\")) is True)", "title": "" }, { "docid": "2053447ac98da626c60de7b5cd15511f", "score": "0.49583066", "text": "def store_html(self, path: str = f\"html{os.sep}result.html\", allow_header_links: bool = False,\n skip_tables: bool = False):\n with open(path, \"w\") as result_file:\n result_file.write(self.to_html(allow_header_links=allow_header_links, skip_tables=skip_tables))\n result_file.write(\"\\n\")\n result_file.flush()", "title": "" }, { "docid": "d5264c302886e9a3626ccb5e033f72eb", "score": "0.49577415", "text": "def render_to_pdf(template_src, context_dict):\n template = get_template(template_src)\n context = Context(context_dict)\n html = template.render(context)\n result = StringIO.StringIO()\n\n pdf = pisa.pisaDocument(StringIO.StringIO(html.encode(\"UTF-8\")),\n dest=result,\n encoding='UTF-8',\n link_callback=fetch_resources)\n if not pdf.err:\n response = HttpResponse(result.getvalue(),\n mimetype='application/pdf')\n\n return response\n\n return HttpResponse('We had some errors<pre>%s</pre>' % escape(html))", "title": "" }, { "docid": "a743a40fa4293ed78b9314c281e88473", "score": "0.49230415", "text": "def create_txt():\n\t(_, week, year) = get_date()\n\tpdf_path = get_pdf_path()\n\ttxt_path = BASE_TXT_PATH.format(year, week)\n\tsubprocess.call(['pdftotext', pdf_path, txt_path])", "title": "" }, { "docid": "1a07e12ddc82feb7cb48115b74ab5ea1", "score": "0.49225733", "text": "def __init__(\n self,\n encoding=\"utf-8\",\n pdf_margin_left=\"0\",\n pdf_margin_right=\"0\",\n pdf_margin_top=\"0\",\n pdf_margin_bottom=\"0\",\n pdf_output_zoom=\"1.6\",\n pdf_input_zoom=\"1.0\",\n pdftohtml_path=\"pdftohtml\",\n wkhtmltopdf_path=\"wkhtmltopdf\",\n ):\n super().__init__()\n self.encoding = encoding\n self.html_format = HtmlFormat()\n self.pdf_margin_left = pdf_margin_left\n self.pdf_margin_right = pdf_margin_right\n self.pdf_margin_top = pdf_margin_top\n self.pdf_margin_bottom = pdf_margin_bottom\n self.pdf_output_zoom = pdf_output_zoom\n self.pdf_input_zoom = pdf_input_zoom\n self.pdftohtml_path = pdftohtml_path\n self.wkhtmltopdf_path = wkhtmltopdf_path", "title": "" }, { "docid": "e4651e9c583a34c952eaa6ec4ab5831b", "score": "0.49170125", "text": "def saveDMP(event=None):\n f = filedialog.asksaveasfile(mode='w', defaultextension=\".pdf\")\n if f is None: # asksaveasfile return `None` if dialog closed with \"cancel\".\n return\n\n html = create_html(template=selectedTemplate, output=f.name.replace(\".pdf\", \"\"))\n print(\"Stored HTML successfully as \" + str(html))\n pdf = create_pdf(html=html)\n print(\"Stored PDF successfully as \" + str(pdf))", "title": "" }, { "docid": "4769277cd3da1e5a1e6572ef0b07a02b", "score": "0.49056256", "text": "def make_pdf():\n d = datetime.utcnow()\n report_date = d.strftime(\"%Y%m%d%H%M%S\")\n report_name = \"report-\" + report_date + \".pdf\"\n pdf = canvas.Canvas(report_name, pagesize=letter)\n pdf.setFont(\"Courier\", 50)\n pdf.setStrokeColorRGB(1, 0, 0)\n pdf.setFillColorRGB(1, 0, 0)\n pdf.drawCentredString(letter[0] / 2, inch * 7, 'FANCY REPORT')\n pdf.drawCentredString(letter[0] / 2, inch * 6, 'CLASSIFIED')\n pdf.drawCentredString(letter[0] / 2, inch * 5, 'For Your Eyes Only')\n pdf.setFont(\"Courier\", 20)\n pdf.drawCentredString(letter[0] / 2, inch * 4, 'Created: ' + d.strftime(\"%B %d, %Y %H:%M:%S\"))\n\n pdf.showPage()\n pdf.save()\n return report_name", "title": "" }, { "docid": "5b3f9db2fbba9ad8e8cc088581170c69", "score": "0.49039894", "text": "def _get_report_pdf(self, report):\n pdf = StringIO()\n pisa.CreatePDF(StringIO(self._get_render_template(report)), pdf)\n return pdf", "title": "" }, { "docid": "12b3953e593046ba7db9d6531441a611", "score": "0.48923507", "text": "def _merge_pdf(self, documents):\n writer = PdfFileWriter()\n streams = [] # We have to close the streams *after* PdfFilWriter's call to write()\n for document in documents:\n pdfreport = file(document, 'rb')\n streams.append(pdfreport)\n reader = PdfFileReader(pdfreport)\n for page in range(0, reader.getNumPages()):\n writer.addPage(reader.getPage(page))\n\n merged_file_fd, merged_file_path = tempfile.mkstemp(suffix='.pdf', prefix='report.merged.tmp.')\n with closing(os.fdopen(merged_file_fd, 'w')) as merged_file:\n writer.write(merged_file)\n\n for stream in streams:\n stream.close()\n\n return merged_file_path", "title": "" }, { "docid": "6e9b3441ef63a23768c73ceb77384fe9", "score": "0.48896754", "text": "def generate_pdf(output_dir='.', doujinshi_obj=None, rm_origin_dir=False):\n if doujinshi_obj is not None:\n doujinshi_dir = os.path.join(output_dir, doujinshi_obj.filename)\n pdf_filename = os.path.join(\n os.path.join(doujinshi_dir, '..'),\n '{}.pdf'.format(doujinshi_obj.filename)\n )\n else:\n pdf_filename = './doujinshi.pdf'\n doujinshi_dir = '.'\n\n file_list = os.listdir(doujinshi_dir)\n file_list.sort()\n\n logger.info('Writing PDF file to path: {}'.format(pdf_filename))\n with open(pdf_filename, 'wb') as pdf_f:\n full_path_list = (\n [os.path.join(doujinshi_dir, image) for image in file_list]\n )\n pdf_f.write(img2pdf.convert(full_path_list))\n\n if rm_origin_dir:\n shutil.rmtree(doujinshi_dir, ignore_errors=True)\n\n logger.log(15, 'PDF file has been written to \\'{0}\\''.format(doujinshi_dir))", "title": "" }, { "docid": "2b8ab0b323e14a8c6b5bd2340eeb3416", "score": "0.4878805", "text": "def outputPDF(self, file, writer):\n pass", "title": "" }, { "docid": "5bf1cd5194512ca2b1d7b1511b805b7c", "score": "0.48548716", "text": "def create_raw_html(year, house, bill, html):\n raw_files_folder = os.path.join(default_path,'bill_files', 'raw', str(year))\n if not os.path.exists(raw_files_folder):\n os.mkdir(raw_files_folder)\n if check_bill(html):\n file = open(os.path.join(raw_files_folder, '{}{}{}.html'.format(year, house, bill)), 'w',\n encoding='utf-8')\n file.write(html)\n file.close()\n return html\n else:\n return None", "title": "" }, { "docid": "97e3e1c72d8531045cbe4b8a97734306", "score": "0.4854084", "text": "def make_send_pdf(self):\n slim_content = edit_content(page.get_content(page.get_summary()), \"Weblinks\", \"Literatur\", \"Einzelnachweise\",\n \"Sonstiges\", \"Filme\", \"Auszeichnungen\", \"Filmdokumentationen\", \"Anmerkungen\",\n \"Biografien\",\n \"Weitere Texte\") # cuts out these sections from the article\n\n to_mail = '[email protected]' # testing email\n try:\n print(\"Progress:\")\n # create the word page\n add_title()\n print(\"Added title...\")\n add_logo()\n print(\"Added image...\")\n add_summary()\n print(\"Added summary...\")\n add_content(slim_content)\n print(\"Added content...\")\n document.save(get_doc_name() + \".docx\")\n print(\"Saving file...\")\n delete_pic_in_outer_folder()\n word_to_pdf.docx_to_pdf(get_doc_name() + \".docx\", get_doc_name())\n move_word_files()\n print(\"Sending email...\")\n email_sender.send_email(to_mail, get_doc_name() + \".pdf\", str(page.get_title())) # uncomment to send email\n word_to_pdf.move_pdf_to_folder()\n print(\"===Finished===\")\n except PermissionError:\n print(\n \"Bitte schließen sie Microsoft Word und versuchen sie es erneut.\") # \"Please close Microsoft Word and try again\"", "title": "" }, { "docid": "9f8dfd98ea039984867401866adc896e", "score": "0.48486796", "text": "def convert_html(converter, build_path: pathlib.Path, settings: Settings) -> None:\n html_build_filename = build_path / settings[\"OutputFormats\"][\"html\"][\"OutputFile\"]\n html_builder = HTMLBuilder(html_build_filename, pathlib.Path(\"\"))\n converter.convert(html_builder)\n copy_output(html_build_filename, settings, \"html\")", "title": "" }, { "docid": "ce203683853ee854a16b6684b592734b", "score": "0.48462394", "text": "def render_html_to_file(filename: str, output: str):\n logging.info(\"Writing html to {}\".format(filename))\n\n with open(filename, \"w\", encoding=\"utf8\") as fh:\n fh.write(output)\n\n return filename", "title": "" }, { "docid": "19fd1ea086e1d74c28a5d7728308b60b", "score": "0.4846071", "text": "def write(self):\n html = self.render()\n\n if self.file_type == 'pdf':\n self.write_pdf(html)\n else:\n with codecs.open(self.destination_file, 'w',\n encoding='utf_8') as outfile:\n outfile.write(html)", "title": "" }, { "docid": "db50bccef34fa2cde35523c7a57ff994", "score": "0.4844008", "text": "def generate_pdf(self):\n filepath = os.path.join(self.m_output_dir, self.m_run_prefix+\"-summary\")\n self.m_doc.generate_pdf(clean_tex=False, filepath=filepath)", "title": "" }, { "docid": "8fcc29f1571cb3bf5ee3b46f1255c3a4", "score": "0.48339328", "text": "def write_graphs_to_pdf(graphs, out_dir=Path('./')):\n out_dir.mkdir(parents=True, exist_ok=True)\n print('Writing graphs to pdf an png files...')\n for graph_file in tqdm(graphs):\n graphs[graph_file].write_pdf(out_dir / (graph_file + '.pdf'))\n graphs[graph_file].write_png(out_dir / (graph_file + '.png'))", "title": "" }, { "docid": "59d771725ea870dc6728128ba359bdb1", "score": "0.48289427", "text": "def create_pdf(catalog, template):\n RML_DIR = 'rml'\n templateName = os.path.join(RML_DIR, template)\n template = preppy.getModule(templateName)\n namespace = {\n 'products':catalog,\n 'RML_DIR': RML_DIR,\n 'IMG_DIR': 'img'\n\n }\n rml = template.getOutput(namespace,quoteFunc=preppy.stdQuote)\n open(os.path.join(DATA_DIR,'latest.rml'), 'wb').write(asBytes(rml))\n buf = BytesIO()\n rml2pdf.go(asBytes(rml), outputFileName=buf)\n return buf.getvalue()", "title": "" }, { "docid": "d1968c959db56a6186b5a0d52bcb0d5c", "score": "0.48108557", "text": "def getPDF(self, params, template):\n output = template.render(Context(params))\n byteString = weasyprint.HTML(string = output).write_pdf()\n pdfString = base64.b64encode(byteString)\n return pdfString", "title": "" }, { "docid": "58aa2decd2a6e9c1e9094b492d5e8937", "score": "0.4808302", "text": "def pdf_to_text(filename,prefix=\"\"):\n global pdftotextProgram,pdftotextOptions\n\n notfound = \"\"\"\\\nCould not find '%s', which is needed for pdf to text conversion.\n%s is part of the 'xPdf' suite of programs, obtainable at:\n http://www.foolabs.com/xpdf/\n\"\"\" % (pdftotextProgram,pdftotextProgram)\n (fout,output) = apply_command_temp(pdftotextProgram,pdftotextOptions,notfound,filename,prefix,\".txt\")\n return fout", "title": "" }, { "docid": "7a568e4e033abbee304191ba90f8f4e2", "score": "0.4804926", "text": "def _create_document(self, link, html, title):\n self.chunk.compute_file_header_value(len(self.chunk.header))\n self.chunk.create_document(link, html, title)", "title": "" }, { "docid": "78382dc1d4dff5fd655acb2923b6454f", "score": "0.47853962", "text": "def temp_html_output(self, plot_object, backend=None):\n save_plot(\n \"temp_plot\", plot_object, self.rootdir, save_as=[\"html\"], backend=backend\n )\n return os.path.join(self.rootdir, \"html\", \"temp_plot.html\")", "title": "" }, { "docid": "16a129296e4028a73355f299ab0f62b4", "score": "0.47735542", "text": "def gera_pdf(imgdir, rodada, outpath=os.getcwd(),**kwargs):\n\n # TODO: Organizar arquivos em grupos de pdfs, config aqui dentro, texto descritivo vem como arg\n # TODO: Colocar para receber exemplos de dados para escrever nos textos. Receber texto exemplo pronto de fora\n # TODO: colocar exemplo como caption da tabela com styler do pandas\n\n textos ={\"cedidos_#{rodada:d}\": \"PONTOS CEDIDOS: Média dos pontos cedidos pelas equipes aos adversários no \"\n \"Cartola em {ano}, considerando o mando de campo. Em destaque, as equipes com\"\n \"desvio acima padrão.\",\n \"cedidos(semMando)_#{rodada:d}\": \"PONTOS CEDIDOS SEM MANDO: Média dos pontos cedidos pelas equipes aos adversários no \"\n \"Cartola em {ano}, considerando todos seus jogos SEM distinção de mando de campo. Em destaque, \"\n \"as equipes com maiores distância para média do campeoanto.\",\n \"liqCed_#{rodada:d}\": \"PONTOS CEDIDOS LÍQUIDOS (EXCLUSIVO): Média dos pontos cedidos pelas equipes \"\n \"aos adversários no Cartola em {ano}, tirando da conta as pontuações de gols, \"\n \"assistências e SG. A tabela não considera o mando de campo. Em destaque, as \"\n \"equipes com desvio acima padrão.\",\n \"ptosPos_#{rodada:d}\": \"PONTOS GANHOS (EXCLUSIVO): Média de pontos ganhos pelas equipes, separados por \"\n \"posição, no Cartola FC. A tabela não faz a distinção de mando de campo.\",\n \"apostas_#{rodada:d}\": \"CASAS DE APOSTAS (PRIVADO): Análise da probabilidade de vitória, SG e gols dos \"\n \"confrontos nas principais casas de apostas.\",\n \"rbCed_#{rodada:d}\": \"DSs e DDs CEDIDAS: Média dos desarmes e defesas difíceis (goleiros) cedidos pelas \"\n \"equipes aos adversários no Cartola em {ano}, sem considerar o mando de campo. Em destaque, \"\n \"as equipes com desvio acima do padrão.\",\n \"desempenhoClubes_#{rodada:d}\": \"DESEMPENHO DOS TIMES: Desempenho dos times no Cartola FC em {ano}, sem levar em \"\n \"consideração o mando de campo. As informações são a média de: SG, Gols Pró, Gols\"\n \" Sofridos e SG Cedidos.\",\n 'goleiros_#{rodada:d}': \"GOLEIROS (EXCLUSIVO): Médias por jogo dos goleiros no Cartola FC em {ano}. Todos os valores\"\n \" em média por jogo. A tabela conta com distinção de mando de campo.\",\n 'GS_Forma_#{rodada:d}': \"GOLS SOFRIDOS POR FORMA: Tabela com os tipos de gols sofridos por equipe do Campeonato \"\n \"Brasileiro {ano}. A tabela conta todos os jogos (casa e fora).\",\n 'GS_posicao_#{rodada:d}': \"GOLS SOFRIDOS POR POSIÇÃO (EXCLUSIVO): Tabela com os gols sofridos por equipe do Campeonato \"\n \"Brasileiro {ano}, separados pela posição do jogador adversário que marcou o gol. A tabela conta\"\n \" todos os jogos (casa e fora).\",\n }\n \n doc=Document(\".\\\\pytabelas\\\\socios.docx\")\n p = doc.add_paragraph()\n r = p.add_run()\n font = r.font\n font.name=\"Industry-Black\"\n font.size = Pt(36)\n p.paragraph_format.alignment = WD_ALIGN_PARAGRAPH.CENTER\n r.add_text(\"ARQUIVO DOS SÓCIOS RODADA \" +str(rodada))\n\n for prod, decr in textos.items():\n r.add_break(WD_BREAK.PAGE)\n p = doc.add_paragraph()\n r = p.add_run()\n font = r.font\n font.name=\"Industry-Black\"\n font.size = Pt(24)\n r.add_text(decr.format(rodada=rodada, **kwargs))\n p = doc.add_paragraph()\n r = p.add_run()\n r.add_picture(os.path.join(imgdir, prod.format(rodada=rodada, **kwargs)+\".jpg\"), width= Inches(6.1))\n \n \n doc.save(os.path.join(outpath, 'socios #{}'.format(rodada)+'.docx'))", "title": "" }, { "docid": "14a58f574464d90e4ca06b82dbfe70c6", "score": "0.47671464", "text": "def pickle_pdf_xml(arguments):\n file = arguments[0]\n path = arguments[1]\n try:\n xml = parser.from_file(file, xmlContent = True)\n replace_string = file.split('\\\\')[-1].replace('.pdf', '')\n save_string = path + replace_string + '.pkl'\n print(save_string)\n pickle.dump(xml, open(save_string, \"wb\"))\n except: \n return file", "title": "" }, { "docid": "d279f1b01da67e0f559cfdfe5755a989", "score": "0.47637767", "text": "def word_to_pdf(self, file, path):\n convert(file, path + \"\\\\\" + \"resultat\")", "title": "" }, { "docid": "f2c4d3d87cded7336b01ba35a9322859", "score": "0.4760178", "text": "def WriteRedirectHtmlFile(from_html_path, to_html_path):\n to_html_relative_path = GetRelativePathToDirectoryOfFile(\n to_html_path, from_html_path)\n content = (\"\"\"\n <!DOCTYPE html>\n <html>\n <head>\n <!-- HTML meta refresh URL redirection -->\n <meta http-equiv=\"refresh\" content=\"0; url=%s\">\n </head>\n </html>\"\"\" % to_html_relative_path)\n with open(from_html_path, 'w') as f:\n f.write(content)", "title": "" }, { "docid": "97670acd16ffb183fc86fc028aa73b19", "score": "0.47423863", "text": "def genpdf(source, des, rm=False):\n command = [\"pdflatex\", \"-output-directory\", des, source]\n subprocess.call(command)\n if rm:\n from os import listdir, remove\n from os.path import isfile, join, splitext, basename\n name = splitext(basename(source))[0]\n for f in listdir(des):\n if isfile(join(des, f)):\n fname, ext = splitext(f)\n if fname == name:\n if ext == '.aux' or ext == '.log' or ext == \".tex\":\n remove(os.path.join(des, f))", "title": "" }, { "docid": "3ce87e257d73c517742749a739b2d386", "score": "0.47367832", "text": "def test_put_convert_html_to_image(self):\n # Already in storage\n name = \"test1.html\"\n result_name = \"putHtmlToImg.zip\"\n test_folder = \"HtmlTestDoc\"\n test_out_path = test_folder + \"/\" + result_name\n\n try:\n # Convert document to image in storage\n self.api.put_convert_document_to_image(\n name, out_path=test_out_path, out_format=\"tiff\", width=800, height=1000, left_margin=50, right_margin=100,\n top_margin=150, bottom_margin=200, resolution=300, folder=test_folder, storage=\"\")\n\n # Download result\n res = TestHelper.download_file(test_out_path)\n save_file = TestHelper.test_dst + result_name\n TestHelper.move_file(res, save_file)\n except ApiException as ex:\n print(\"Exception\")\n print(\"Info: \" + str(ex))\n raise ex", "title": "" }, { "docid": "3ddf1b24f730594f35e8f63ad7e9be6f", "score": "0.46994278", "text": "def pdf_to_text_using_tikka(filepath, filename, output_path):\n output_file_name = filename + '_tikka.txt'\n output_file_path = output_path + output_file_name\n parsed_file = parser.from_file(filepath) # Parsing PDF file\n content = parsed_file['content'] # Extracting content\n\n with open(output_file_path, 'w') as f:\n res = f.write(content.strip())\n\n return res", "title": "" }, { "docid": "60d1d2177e14741fa3d0ab824dc37873", "score": "0.4697682", "text": "def html_str_to_file(text, filename):\n output = open(os.path.join(outputs_path,filename), \"w\", encoding=\"utf-8\")\n output.write(text)\n output.close()\n return", "title": "" }, { "docid": "82efd033b9b5ec1cba6c756bac314bc6", "score": "0.46915892", "text": "def genPDF(filename):\n newlines = 'echo -e \"' + '\\\\n' * 100 + '\"' # Hack to continue when pdflatex halts.\n cmd = newlines + \" | pdflatex \" + filename + \" --shell-escape 2>/dev/null >/dev/null\"\n os.system(cmd)", "title": "" }, { "docid": "a0dcfed2234da2d5402137e36516e3c8", "score": "0.46886522", "text": "def test_put_convert_html_to_markdown(self):\n # Already in storage\n name = \"test_md.html\"\n result_name = \"putConvertToMarkdownPy.md\"\n test_folder = \"HtmlTestDoc\"\n test_out_path = test_folder + \"/\" + result_name\n\n try:\n # Convert document to markdown in storage\n self.api.put_convert_document_to_markdown(\n name, out_path=test_out_path, use_git=\"false\", folder=test_folder, storage=\"\")\n\n # Download result\n res = TestHelper.download_file(test_out_path)\n save_file = TestHelper.test_dst + result_name\n TestHelper.move_file(res, save_file)\n except ApiException as ex:\n print(\"Exception\")\n print(\"Info: \" + str(ex))\n raise ex", "title": "" }, { "docid": "410ece57c4f4db8b0ebe89f1cb2ac99b", "score": "0.46829975", "text": "def _convert_to_pdf(\n self,\n file_content: typing.IO[bytes],\n input_extension: str,\n cache_path: str,\n output_filepath: str,\n mimetype: str,\n ) -> BytesIO:\n\n raise NotImplementedError", "title": "" }, { "docid": "7ef313c3078e29dc10b3d0de64a54aec", "score": "0.4661873", "text": "def save_as_pdf(self, text: str, file_name: str = \"Podcast.pdf\"):\n text_encoded = text.encode('latin-1', 'replace').decode('latin-1')\n text = text_encoded.replace(\"?\", \".\").replace(\"[\", \"\").replace(\"]\", \"\").replace(\"'\", \"\")\n\n pdf = FPDF(orientation='P', unit='mm', format='A4')\n pdf.set_auto_page_break(True, margin=10)\n pdf.add_page()\n pdf.set_font(family='Courier', size=12)\n splitter = text.split('\\n')\n\n for line in splitter:\n lines = textwrap.wrap(line, 75)\n if len(lines) == 0:\n pdf.ln()\n for wrap in lines:\n pdf.cell(0, 4, wrap, ln=1)\n pdf.output(file_name, 'F')", "title": "" }, { "docid": "a15e60e99e381a95edfceb96a7d8f65f", "score": "0.46592584", "text": "def test_post_merge_html_template(self):\n template_name = \"HtmlTemplate.html\"\n template_data = \"XmlSourceData.xml\"\n result_name = \"PostMergeHtmlTemplatePython.html\"\n options = \"\"\n folder = \"HtmlTestDoc\"\n storage = \"\"\n data_file = TestHelper.test_src + template_data\n out_path = folder + \"/\" + result_name\n\n try:\n # Convert document to image\n self.api.post_merge_html_template(template_name=template_name, out_path=out_path, file=data_file,\n options=options, folder=folder, storage=storage)\n # Download result\n res = TestHelper.download_file(out_path)\n save_file = TestHelper.test_dst + result_name\n TestHelper.move_file(res, save_file)\n except ApiException as ex:\n print(\"Exception\")\n print(\"Info: \" + str(ex))\n raise ex", "title": "" }, { "docid": "a4836404b9518e2c2482df8997b18135", "score": "0.46539325", "text": "def render_to_pdf(request, template_name, renderdict, filename):\n rendered_html = get_template(template_name).render(Context(renderdict))\n\n response = HttpResponse(content_type=\"application/pdf\")\n response['Content-Disposition'] = 'attachment; filename=\"' + filename + '\"'\n\n success = pisa.CreatePDF(rendered_html, dest=response)\n\n if not success:\n return HttpResponseServerError()\n return response", "title": "" }, { "docid": "3127f91de047cec24c9fdc3fef4aaf23", "score": "0.46349818", "text": "def page_extractor_pdftotext(file_name):\n \n import subprocess\n tmpfile = tempfile.NamedTemporaryFile()\n subprocess.call('pdftotext -l 3 \"%s\" \"%s\"' % (file_name, tmpfile.name), shell=True)\n text = tmpfile.read()\n return text", "title": "" }, { "docid": "0a4aa23649bb5e1bffbb345cd3752302", "score": "0.4619672", "text": "def generate_main_html(output_dir='./', html=False):\n\n image_html = ''\n\n main = readfile('viewer/main.html')\n css = readfile('viewer/main.css')\n js = readfile('viewer/main.js')\n\n element = '\\n\\\n <div class=\"gallery-favorite\">\\n\\\n <div class=\"gallery\">\\n\\\n <a href=\"./{FOLDER}/index.html\" class=\"cover\" style=\"padding:0 0 141.6% 0\"><img\\n\\\n src=\"./{FOLDER}/{IMAGE}\" />\\n\\\n <div class=\"caption\">{TITLE}</div>\\n\\\n </a>\\n\\\n </div>\\n\\\n </div>\\n'\n\n os.chdir(output_dir)\n doujinshi_dirs = next(os.walk('.'))[1]\n\n for folder in doujinshi_dirs:\n if html is True and folder[0] == '[':\n class Foo:\n pass\n foo = Foo()\n foo.name = folder\n foo.filename = folder\n generate_html(doujinshi_obj=foo)\n\n files = os.listdir(folder)\n files.sort()\n\n if 'index.html' in files:\n logger.info('Add doujinshi \\'{}\\''.format(folder))\n else:\n continue\n\n image = files[0] # 001.jpg or 001.png\n if folder is not None:\n title = folder.replace('_', ' ')\n else:\n title = 'nHentai HTML Viewer'\n\n image_html += element.format(FOLDER=folder, IMAGE=image, TITLE=title)\n if image_html == '':\n logger.warning('No index.html found, --gen-main paused.')\n return\n try:\n data = main.format(STYLES=css, SCRIPTS=js, PICTURE=image_html)\n if sys.version_info < (3, 0):\n with open('./main.html', 'w') as f:\n f.write(data)\n else:\n with open('./main.html', 'wb') as f:\n f.write(data.encode('utf-8'))\n shutil.copy(os.path.dirname(__file__)+'/viewer/logo.png', './')\n set_js_database()\n logger.log(\n 15, 'Main Viewer has been written to \\'{0}main.html\\''.format(output_dir))\n except Exception as e:\n logger.warning('Writing Main Viewer failed ({})'.format(str(e)))", "title": "" }, { "docid": "f4360982e2178d4069d811379b54c9fd", "score": "0.4611201", "text": "def image_to_pdf(self, file):\n image1 = Image.open(file)\n im1 = image1.convert('RGB')\n im1.save(path + \"\\\\\" + \"resultat\" + \"\\\\\" + filename + \".pdf\")", "title": "" }, { "docid": "e9ece8f5929ff929939ee07bd37c6e99", "score": "0.46092772", "text": "def merge_temporary_pdfs(self,\n description: str=\"No description\",\n ) -> None:\n\n temporary_pdfs = self.find_temporary_pdfs()\n\n filename = self.get_filename()\n save_path = self.get_save_path(filename)\n\n self._merge_and_save(temporary_pdfs, save_path)\n\n self.temporary_counter.zero()\n for temp_pdf in temporary_pdfs:\n os.remove(temp_pdf)\n\n pdf_hash = self.get_file_hash(save_path)\n\n self.save_to_database(\n pdf_hash=pdf_hash,\n filename=filename,\n description=description\n )", "title": "" }, { "docid": "5d3ab1c5e0d594dba140df15b8d6c922", "score": "0.45995933", "text": "def build_pdf():\n print(\"=== Build PDF Docs from ReST Files ===\")\n subprocess.check_call([\n \"sphinx-build\",\n \"-b\", \"latex\", # Builder name. TODO: accept as arg to setup.py.\n \"-d\", joinpath(SPHINX_BUILD, \"doctrees\"),\n SPHINX_SOURCE,\n joinpath(SPHINX_BUILD, \"latex\")\n ])\n\n LATEXDIR = joinpath(SPHINX_BUILD, \"latex\")\n\n def pdflatex():\n subprocess.call([\"pdflatex\", \"SasView.tex\"], cwd=LATEXDIR)\n\n # Note: pdflatex requires multiple passes to resolve cross-references correctly\n pdflatex()\n pdflatex()\n pdflatex()\n subprocess.call([\"makeindex\", \"-s\", \"python.ist\", \"SasView.idx\"], cwd=LATEXDIR)\n pdflatex()\n pdflatex()\n\n print(\"=== Copy PDF to HTML Directory ===\")\n source = joinpath(LATEXDIR, \"SasView.pdf\")\n target = joinpath(SASVIEW_DOC_TARGET, \"SasView.pdf\")\n shutil.copyfile(source, target)", "title": "" }, { "docid": "32992730a290607665ebdbd18af5310c", "score": "0.45934606", "text": "def create_html(template, output):\n templateLoader = jinja2.FileSystemLoader(searchpath=\"../templates\")\n templateEnv = jinja2.Environment(loader=templateLoader)\n TEMPLATE_FILE = \"template_\" + template.lower() + \".html\"\n real_template = templateEnv.get_template(TEMPLATE_FILE)\n\n if template == \"FWF\":\n outputText = real_template.render(data_officer=dataOfficerText.get(1.0,END).replace(\"\\n\", \"<p />\"),\n data_characteristics=dataDescriptionText1.get(1.0,END).replace(\"\\n\", \"<p />\"),\n meta_standards=docMetaText1.get(1.0,END).replace(\"\\n\", \"<p />\"),\n data_documentation=docMetaText2.get(1.0,END).replace(\"\\n\", \"<p />\"),\n data_quality=docMetaText3.get(1.0,END).replace(\"\\n\", \"<p />\"),\n data_sharing=dataSharingText1.get(1.0,END).replace(\"\\n\", \"<p />\"),\n data_storage=dataSharingText2.get(1.0,END).replace(\"\\n\", \"<p />\"),\n legal=ethicalText1.get(1.0,END).replace(\"\\n\", \"<p />\"),\n ethical=ethicalText2.get(1.0,END).replace(\"\\n\", \"<p />\"),\n generation=generatedDataText.get(1.0,END).replace(\"\\n\", \"<p />\"))\n else:\n global title\n outputText = real_template.render(title=title,\n abstract=abstractText.get(1.0,END).replace(\"\\n\", \"<p />\"),\n data_summary=dataDescriptionText1.get(1.0,END).replace(\"\\n\", \"<p />\"),\n fair_findable=fairText1.get(1.0,END).replace(\"\\n\", \"<p />\"),\n fair_accessible=fairText2.get(1.0,END).replace(\"\\n\", \"<p />\"),\n fair_interoperable=fairText3.get(1.0,END).replace(\"\\n\", \"<p />\"),\n fair_reuse=fairText4.get(1.0,END).replace(\"\\n\", \"<p />\"),\n resource=resourceText.get(1.0,END).replace(\"\\n\", \"<p />\"),\n security=securityText.get(1.0,END).replace(\"\\n\", \"<p />\"),\n ethical=ethicalText.get(1.0,END).replace(\"\\n\", \"<p />\"),\n other=otherText.get(1.0,END).replace(\"\\n\", \"<p />\"))\n\n html_file = open(output + \".html\", \"w\")\n html_file.write(outputText)\n html_file.close()\n\n return output + \".html\"", "title": "" }, { "docid": "00c0d5d7d427a6c3024f4a4e759cda50", "score": "0.4584902", "text": "def makeFiles(titlestring, generaldescription, containerlist, runlist, foldername,\n makeIndex = True, makeTables = True, makePlotOverview = True, plottuples = None,\n fullperRunDF = None, fullRunCompDF = None, ZperRunDF = None, ZRunCompDF = None,\n LadderperRunDF = None, LadderRunCompDF = None, cfgname = None,\n linkTeX = False, linkCSV = False, linkCFG = False):\n from ConfigParser import SafeConfigParser\n logging.info(\"Generating HTML files\")\n ####################################################################\n # General HTML headers\n styleconfig = SafeConfigParser()\n logging.debug(\"Loading style config\")\n styleconfig.read(\"configs/style.cfg\")\n\n header = \"<!DOCTYPE html> \\n <html> \\n <body> \\n\"\n style = \"<style> \\n table, th, td {{\\nborder: {0} solid black;\\n border-collapse: collapse;\\n}}\\nth, td {{ padding: {1}; }}\\n</style>\\n\".format(styleconfig.get(\"Tables\",\"bordersize\"),styleconfig.get(\"Tables\",\"padding\"))\n footnote = \"<br><br><small>Generated: {0} by K. Schweiger, [email protected]</small>\\n\".format(datetime.now())\n footer = \"</body> \\n </html> \\n\"\n htmltemplatetuple = (header, style, footnote, footer)\n ####################################################################\n if makeTables:\n #Check if DFs are passed -> Used to speed up the script\n if fullperRunDF is None or fullRunCompDF is None or ZperRunDF is None or LadderperRunDF is None or LadderRunCompDF is None:\n DFstopass = None\n else:\n DFstopass = (fullperRunDF, fullRunCompDF, ZperRunDF, ZRunCompDF, LadderperRunDF, LadderRunCompDF)\n makeComparisonFiles(titlestring, generaldescription, containerlist, runlist, foldername,\n htmltemplates = htmltemplatetuple, DFs = DFstopass, linkTeX = linkTeX, linkCSV = linkCSV, linkCFG = linkCFG)\n if makeIndex: #Make a landing page called index.html linking tables and plots (if passed as plottuples)\n commentsite = makeRunCommentPage(titlestring, runlist, foldername, htmltemplatetuple, containerlist)\n makeLandingPage(titlestring, runlist, foldername, htmltemplatetuple, makePlotOverview, cfgname, commentsite)\n if makePlotOverview:\n if plottuples is None:\n logging.error(\"No plotlists and names are given! No adding plots to index.\")\n else:\n for plottuple in plottuples: #Go through all groups\n plotlist, group = plottuple\n makePlotOverviewFile(titlestring, generaldescription, plotlist, runlist, foldername, group)", "title": "" } ]
bc1345310a64780e09c789847d11b327
Classifies the given inputs (images) by comparing the processing\ results of the images to the target classes targets.
[ { "docid": "6840b3a9df26313e808670b73b4439d1", "score": "0.79305094", "text": "def classify(self, inputs, targets):\n assert inputs.shape[0] == targets.shape[0], \\\n 'Different number of inputs and targets provided. inputs.shape[0] ' \\\n 'and targets.shape[0] must be identical.'\n assert inputs.shape[1] == self.imageSize, \\\n 'inputs have incorrect size. inputs.shape[1] must be %d.' % (\n self.imageSize)\n\n self._inputs = inputs\n self._targets = targets\n self._classifications = np.zeros(self._targets.shape, dtype=int)\n\n self._configureSNIPs()\n self._configureRun()\n\n # Compute number of batches\n assert self.numSamples % self.batchSize == 0, 'numSamples/targets ' \\\n 'must be multiple of ' \\\n 'batchSize.'\n\n imgIds = range(0, self.numSamples)\n self._start(imgIds)\n\n self.finish()", "title": "" } ]
[ { "docid": "8a295023de0f3cc752c3de5964aeb74a", "score": "0.6684749", "text": "def postprocess(results, filenames, batch_size):\n if len(results) != 1:\n raise Exception(\"expected 1 result, got {}\".format(len(results)))\n\n batched_result = results[0].batch_classes\n if len(batched_result) != batch_size:\n raise Exception(\"expected {} results, got {}\".format(batch_size, len(batched_result)))\n if len(filenames) != batch_size:\n raise Exception(\"expected {} filenames, got {}\".format(batch_size, len(filenames)))\n\n label, score = [], []\n # batch size is always 1 here, need to modify if were to larger batch_size\n for (index, result) in enumerate(batched_result):\n print(\"Image '{}':\".format(filenames[index]))\n for cls in result.cls:\n label.append(cls.label)\n score += [{\"index\": cls.label, \"val\": cls.value}]\n print(\" {} ({}) = {}\".format(cls.idx, cls.label, cls.value))\n return label[0], score", "title": "" }, { "docid": "73c23595c939de51150afd1ef98e92c8", "score": "0.6672096", "text": "def classified_images(wildcards):\n outrule, i = classify_or_test(wildcards, return_int=True)\n if i == 3:\n checkpoint_output = checkpoints.create_split_truth_data.get(**wildcards).output.test\n elif i:\n checkpoint_output = checkpoints.create_truth_data.get(**wildcards).output[0]\n else:\n checkpoint_output = checkpoints.rev_transform.get(**wildcards).output[0]\n return expand(\n outrule.output[0],\n sample=wildcards.sample,\n image=glob_wildcards(\n os.path.join(\n checkpoint_output,\n \"{image}\"+(\".tsv\" if i else \".json\")\n )\n ).image\n )", "title": "" }, { "docid": "314d3bc358652b13a3fb28ebe2290a18", "score": "0.65816724", "text": "def compute(self, images_true, images_pred, images_score):\n if not (len(images_true) == len(images_pred) == len(images_score)):\n raise ValueError('Dimensions must correspond.')\n\n for image_true, image_pred, image_score in itertools.izip(images_true, images_pred, images_score):\n groundtruths = np.argwhere(np.asarray(image_true) > 0)\n predictions = np.argwhere(np.asarray(image_pred) > 0)\n\n n_groundtruths = float(groundtruths.shape[0])\n n_predictions = float(predictions.shape[0])\n width = float(image_true.shape[0])\n height = float(image_true.shape[1])\n raw_count = np.sum(image_score / np.float(self._raw_factor), dtype=np.float64)\n\n # Sort detections by score in descending order such that the first detection\n # linked to a ground truths is the more confident one\n scores = image_score[tuple(predictions.T)]\n predictions = predictions[np.argsort(scores, kind='quicksort')]\n\n n_tp = 0\n if n_predictions > 0:\n kdtree = KDTree(predictions, metric='euclidean')\n pred_idxss, distancess = kdtree.query_radius(groundtruths, self._epsilon, return_distance=True)\n free = np.array([True] * predictions.shape[0])\n for pred_idxs, distances in itertools.izip(pred_idxss, distancess):\n pred_idxs = pred_idxs[free]\n if len(pred_idxs) > 0 and n_tp <= int(n_predictions):\n pred_idx = pred_idxs[np.argmax(pred_idxs)]\n free[pred_idx] = False\n self.distance_errors.append(distances[np.argmax(pred_idxs)])\n n_tp += 1\n\n self.cm.incrementTP(n_tp)\n self.cm.incrementFP(int(n_predictions - n_tp))\n self.cm.incrementFN(int(n_groundtruths - n_tp))\n\n self.count_errors.append(np.abs(n_groundtruths - n_predictions))\n self.raw_count_errors.append(np.abs(n_groundtruths - raw_count))\n\n self.density_errors.append(np.abs((n_groundtruths / (width * height)) - (n_predictions / (width * height))))\n self.raw_density_errors.append(np.abs((n_groundtruths / (width * height)) - (raw_count / (width * height))))\n self.count += n_groundtruths", "title": "" }, { "docid": "3637cb7c0f56c7e3a52c6380e2c71bc3", "score": "0.6534657", "text": "def classify(self, image_messages):\n pass", "title": "" }, { "docid": "545a58f7cb2334582c2c240b6a715500", "score": "0.6500155", "text": "def classify(self, caffemodel, deploy_file, image_files,\n mean_file=None, labels_file=None, batch_size=None, use_gpu=True):\n # Load the model and images\n net = self.get_net(caffemodel, deploy_file, use_gpu)\n transformer = self.get_transformer(deploy_file, mean_file)\n _, channels, height, width = transformer.inputs['data']\n if channels == 3:\n mode = 'RGB'\n elif channels == 1:\n mode = 'L'\n else:\n raise ValueError('Invalid number for channels: %s' % channels)\n images = [self.load_image(image_file, height, width, mode) for image_file in image_files]\n labels = self.read_labels(labels_file)\n\n # Classify the image\n scores = self.forward_pass(images, net, transformer, batch_size=batch_size)\n\n # Process the results\n indices = (-scores).argsort()[:, :5] # take top 5 results\n classifications = []\n for image_index, index_list in enumerate(indices):\n result = []\n for i in index_list:\n # 'i' is a category in labels and also an index into scores\n if labels is None:\n label = 'Class #%s' % i\n else:\n label = labels[i]\n result.append((label, round(100.0 * scores[image_index, i], 4)))\n classifications.append(result)\n\n # classifications is in the form of [('Occupied', 100.0), ('Empty', 0.0)]\n # originally this function just printed out results. modified to send an array with the results and labels\n # for use elsewhere in the program\n return classifications", "title": "" }, { "docid": "bfa059044b8f64e812d9da961b2ec168", "score": "0.6481143", "text": "def compare_results(input_url_images, input_url_results_labels, input_url_gt_labels, output_url, colors_gt,\n colors_result, save, visualize):\n data = os.listdir(input_url_gt_labels)\n\n for d in data:\n file = open(os.path.join(input_url_results_labels, d))\n file_gt = open(os.path.join(input_url_gt_labels, d))\n\n img = cv2.imread(os.path.join(input_url_images, d.split('.')[0] + \".png\"))\n if img is None:\n img = cv2.imread(os.path.join(input_url_images, d.split('.')[0] + \".jpg\"))\n\n img = get_labels(file_gt, img, 5, colors_gt, \"yolo\")\n img = get_labels(file, img, 5, colors_result, \"det\")\n\n if visualize:\n visualize_data(img)\n\n if save:\n save_data(img, output_url, d)", "title": "" }, { "docid": "5220d165b6421ad63622ce02844b740f", "score": "0.6467733", "text": "def _f_image(self, images: np.ndarray):\n out = np.zeros((len(images), 2)) # output same shape\n\n # inputs neeeds to be [PIL.Image.Image]; if not try to transform\n if not isinstance(images[0], Image.Image):\n images = utils.arr_to_img(images)\n\n for i, (image, text) in enumerate(zip(images, self._fixed_texts)):\n # classify, output is a tupe (index, score)\n ind, score = self.model.classify(image, text).values()\n out[i][ind] = score\n out[i][1 - ind] = 1 - score\n\n return out\n # test if only output is the probability of being hateful\n # return out[:, 1][:, np.newaxis]", "title": "" }, { "docid": "d67624d11d4986b95a2c60a8d75749fc", "score": "0.6464209", "text": "def classify(self, inputs):\n return self.predict(self.params, inputs)", "title": "" }, { "docid": "cebb62ab3ca63abff427e62afe3d02a9", "score": "0.6442733", "text": "def detect(self, images, verbose=0):\n assert self.mode == \"inference\", \"Create model in inference mode.\"\n assert len(\n images) == self.config.BATCH_SIZE, \"len(images) must be equal to BATCH_SIZE\"\n\n if verbose:\n log(\"Processing {} images\".format(len(images)))\n for image in images:\n log(\"image\", image)\n\n # Mold inputs to format expected by the neural network\n molded_images, image_metas, windows = self.mold_inputs(images)\n\n # Validate image sizes\n # All images in a batch MUST be of the same size\n image_shape = molded_images[0].shape\n for g in molded_images[1:]:\n assert g.shape == image_shape, \\\n \"After resizing, all images must have the same size. Check IMAGE_RESIZE_MODE and image sizes.\"\n\n # Anchors\n anchors = self.get_anchors(image_shape)\n # Duplicate across the batch dimension because Keras requires it\n # TODO: can this be optimized to avoid duplicating the anchors?\n anchors = np.broadcast_to(anchors, (self.config.BATCH_SIZE,) + anchors.shape)\n\n if verbose:\n log(\"molded_images\", molded_images)\n log(\"image_metas\", image_metas)\n log(\"anchors\", anchors)\n # Run object detection\n detections, _, _, mrcnn_mask, _, _, _ = \\\n self.keras_model.predict([molded_images, image_metas, anchors], verbose=0)\n # Process detections\n results = []\n for i, image in enumerate(images):\n final_rois, final_class_ids, final_scores, final_masks = \\\n self.unmold_detections(detections[i], mrcnn_mask[i],\n image.shape, molded_images[i].shape,\n windows[i])\n results.append({\n \"rois\": final_rois,\n \"class_ids\": final_class_ids,\n \"scores\": final_scores,\n \"masks\": final_masks,\n })\n return results", "title": "" }, { "docid": "4afe0b5fd3d9d76a532ff5a493ede9eb", "score": "0.6397793", "text": "def classify(test_Q1, test_Q2, y, threshold, model, vocab, data_generator=data_generator, batch_size=64):\n accuracy = 0\n ### START CODE HERE (Replace instances of 'None' with your code) ###\n for i in range(0, len(test_Q1), batch_size):\n # Call the data generator (built in Ex 01) with shuffle=False using next()\n # use batch size chuncks of questions as Q1 & Q2 arguments of the data generator. e.g x[i:i + batch_size]\n # Hint: use `vocab['<PAD>']` for the `pad` argument of the data generator\n q1, q2 = next(data_generator(\n test_Q1[i:i + batch_size], test_Q2[i:i + batch_size], batch_size, vocab[\"<PAD>\"], shuffle=False))\n # use batch size chuncks of actual output targets (same syntax as example above)\n y_test = y[i:i + batch_size]\n # Call the model\n v1, v2 = model((q1, q2))\n\n for j in range(batch_size):\n # take dot product to compute cos similarity of each pair of entries, v1[j], v2[j]\n # don't forget to transpose the second argument\n d = fastnp.dot(v1[j], v2[j].T)\n # is d greater than the threshold?\n res = d > threshold\n # increment accurancy if y_test is equal `res`\n accuracy += (y_test[j] == res)\n # compute accuracy using accuracy and total length of test questions\n accuracy = accuracy / len(test_Q1)\n ### END CODE HERE ###\n\n return accuracy", "title": "" }, { "docid": "e710c78d9059839bc92198551226f4df", "score": "0.63741803", "text": "def _check_images(self, abs_output_directory):\n # Make output directory/directories\n if len(set(self.class_labels)) <= 1:\n if not os.path.exists(abs_output_directory):\n try:\n os.makedirs(abs_output_directory)\n except IOError:\n logging.error(\"Insufficient rights to read or write output directory ({})\"\n .format(abs_output_directory))\n else:\n for class_label in self.class_labels:\n if not os.path.exists(os.path.join(abs_output_directory, str(class_label[0]))):\n try:\n os.makedirs(os.path.join(abs_output_directory, str(class_label[0])))\n except IOError:\n logging.error(\"Insufficient rights to read or write output directory ({})\"\n .format(abs_output_directory))\n # Check the images, read their dimensions, and remove them if they cannot be read\n for augmentor_image in self.augmentor_images:\n try:\n opened_image = np.load(augmentor_image.image_path)\n self.distinct_dimensions.add(opened_image.shape)\n except IOError as e:\n logging.error(\"There is a problem with image {} in the source directory: {}\"\n .format(augmentor_image.image_path, e.message))\n self.augmentor_images.remove(augmentor_image)\n\n logging.info(\"Initialised with {} image(s) found.\\n\".format(len(self.augmentor_images)))\n logging.info(\"Output directory set to {}.\".format(abs_output_directory))", "title": "" }, { "docid": "ea3f8f306170c37feb4fc50130240941", "score": "0.6322083", "text": "def test_network_monger(dirs, image_dims):\r\n start_time = time.time()\r\n read_write_time = 0.0\r\n n_classes = len(dirs) # assumes number of classes is the same as number of input directories\r\n master_list_encoded = []\r\n for i in range(len(dirs)):\r\n imagenames = return_images(dirs[i])\r\n image_encoded = list(zip(imagenames, [i] * len(imagenames)))\r\n master_list_encoded += image_encoded\r\n total_num_images = len(master_list_encoded)\r\n all_image_data = read_images(master_list_encoded, image_dims)\r\n image_indices = list(range(total_num_images))\r\n px_values, true_classes = batch_create(all_image_data, image_indices, image_dims, n_classes) # see batch_create\r\n input_dict = {image_data: px_values, true_class: true_classes,\r\n im_x: image_dims[0], im_y: image_dims[1], im_z: image_dims[2],\r\n num_classes: n_classes, keep_prob: 1.0}\r\n read_write_time = read_write_time + time.time() - start_time\r\n run_time_start = time.time()\r\n \"\"\"Now, the network isn't being optimized, so instead the calculated raw class values and binary class\r\n predictions are returned for all the test images\"\"\"\r\n decimal_classes, pred_classes = session.run([calculated_value, predicted_class], feed_dict=input_dict)\r\n run_time = time.time() - run_time_start\r\n current_time = time.time()\r\n pred_classes = np.ravel(pred_classes) # munge munge munge\r\n pred_classes = pred_classes.tolist() # munge munge munge munge munge\r\n decimal_classes.tolist() # munge munge munge munge munge munge munge munge munge munge\r\n all_test_classes = list(all_image_data[:, 0].astype(int))\r\n correct_predictions = [i == j for i, j in zip(all_test_classes, pred_classes)] # boolean of correct predictions\r\n lists = [all_test_classes, pred_classes, decimal_classes]\r\n names = ['true', 'pred', 'raw']\r\n for i in range(3):\r\n \"\"\"see readme for description of these output files\"\"\"\r\n file = open('out'+names[i]+'.txt', 'w')\r\n for line in lists[i]:\r\n file.write(str(line)+' \\n')\r\n file.close()\r\n total_correct_predictions = sum(correct_predictions) # total number of correct test predictions\r\n accuracy = round((float(total_correct_predictions) / float(total_num_images))*100, 2)\r\n read_write_time = read_write_time + time.time() - current_time\r\n return read_write_time, run_time, accuracy", "title": "" }, { "docid": "2fbf94146eae5aa78b71322062f1f1b7", "score": "0.6163668", "text": "def _input_format_classification(preds: torch.Tensor, target: torch.Tensor, threshold: float):\n if not (len(preds.shape) == len(target.shape) or len(preds.shape) == len(target.shape) + 1):\n raise ValueError(\n \"preds and target must have same number of dimensions, or one additional dimension for preds\"\n )\n\n if len(preds.shape) == len(target.shape) + 1:\n # multi class probabilites\n preds = torch.argmax(preds, dim=1)\n\n if len(preds.shape) == len(target.shape) and preds.dtype == torch.float:\n # binary or multilabel probablities\n preds = (preds >= threshold).long()\n return preds, target", "title": "" }, { "docid": "887fec5b52e14b5025918edd69b8be70", "score": "0.6161768", "text": "def step_fn(inputs):\n images, labels = inputs\n logits = self.model(images, training=False)\n\n if self.test_loss:\n if self.one_hot:\n loss = tf.keras.losses.categorical_crossentropy(\n labels, logits, label_smoothing=self.label_smoothing)\n else:\n loss = tf.keras.losses.sparse_categorical_crossentropy(labels, logits)\n loss = tf.reduce_sum(loss) * (1.0 / self.flags_obj.batch_size)\n self.test_loss.update_state(loss)\n\n if self.test_accuracy:\n self.test_accuracy.update_state(labels, logits)\n # tf.print('labels.shape: ', labels.shape,\n # ', logits.shape: ', logits.shape,\n # ', result: ', self.test_accuracy.result())\n # self.test_corrects.update_state(\n # tf.cast(\n # tf.reduce_sum(\n # tf.cast(\n # tf.equal(\n # tf.cast(tf.argmax(logits, axis=1), labels.dtype),\n # labels), tf.int32)), tf.float32))", "title": "" }, { "docid": "3323a1edd08d744a074e45eb6bec075f", "score": "0.61310375", "text": "def test_attack(self, adv_images, orig_images, true_labels,\n target_labels=None, targeted=False):\n score = self.model.evaluate(adv_images, true_labels, verbose=0)\n print('Test loss: {:.2f}'.format(score[0]))\n print('Successfully moved out of source class: {:.2f}'.format(\n 1 - score[1]))\n\n if targeted:\n score = self.model.evaluate(adv_images, target_labels, verbose=0)\n print('Test loss: {:.2f}'.format(score[0]))\n print('Successfully perturbed to target class: {:.2f}'.format(\n score[1]))\n\n dist = np.mean(np.sqrt(\n np.mean(np.square(adv_images - orig_images), axis=(1, 2, 3))))\n print('Mean perturbation distance: {:.2f}'.format(dist))\n return score[0], 1 - score[1], dist", "title": "" }, { "docid": "84897f663417004410fb56682d96d400", "score": "0.61264914", "text": "def main():\n os.chdir(os.path.dirname(__file__))\n cur_dir=os.getcwd()\n \n training_data_path_ants=cur_dir+\"\\\\ant\"\n training_data_path_lobsters=cur_dir+\"\\\\lobster\"\n test_data_path_unknown=cur_dir+\"\\\\test\";\n \n processor=ImageProcessor();\n train_X1=processor.fetch_all_jpg_files_as_numpy_array(training_data_path_ants);\"\"\" pictures of ants--class 0\"\"\"\n train_X2=processor.fetch_all_jpg_files_as_numpy_array(training_data_path_lobsters);\"\"\" pictures of lobsters--class 1\"\"\"\n \n train_Y1=[];\n train_Y2=[];\n \n for image in train_X1:\n train_Y1.append(0);\"\"\"class 0\"\"\"\n for image in train_X2:\n train_Y2.append(1);\"\"\"class 1\"\"\"\n \n train_Y=train_Y1+train_Y2;\"\"\"all class labels--> Y\"\"\"\n train_X_temp=train_X1+train_X2; \"\"\"all training data--variables X\"\"\"\n \n \"\"\"********************************************\n our training set is ready now: \n let us have a test data to find if the test image is ant or lobster.\"\"\"\n \n \n \"\"\"pad zeros at the end\"\"\"\n \n train_X=processor.pad_list(train_X_temp)\n \n dim1=int(math.sqrt(len(train_X[0])/3))\n dim2=dim1\n test_X=processor.fetch_with_resize(test_data_path_unknown,dim1,dim2)\n test_X=test_X[0].tolist() \n \"\"\" set up the deep network with 1 hidden layer defined above\"\"\"\n \n deep_network=DeepNet(train_X,train_Y)\n \n deep_network.train()\n output=deep_network.predict(test_X)\n \n if(output<0.5):\n print (\"ant\")\n else:\n print (\"lobster\")\n \n print(\"The end\")", "title": "" }, { "docid": "faab65fc3e395f22ee9d04e2296a64fb", "score": "0.61191386", "text": "def apply_preprocessing(self):\n # Images preprocessing\n preparator = ImagePreparation(\n self.images_paths, self.args['model_type']\n )\n images = preparator.apply_basic_processing()\n\n # Labels Encoding\n encoder = LabelClassifier(self.labels)\n labels_cat, label_classes = encoder.get_categorical_labels()\n\n return images, labels_cat, label_classes", "title": "" }, { "docid": "7b3ca84d4f190cbaeceaa2d22f46d443", "score": "0.6116314", "text": "def _classifyImages(self):\n processor = ImageProcessor(self._filePath)\n labels = processor.classifyImages()\n if(len(labels) == 0):\n raise ValueError(\"There are no images in that folder\")\n return labels", "title": "" }, { "docid": "1a08421180cd0f65d5228bc4584f5938", "score": "0.61113167", "text": "def categorize(self, protos):\n corpus = self.imagefeed.feed_location\n mask = ChooseTrainingSet(get_labels(corpus=self.imagefeed.feed_location), train_size=0.5)\n categories = classify_images(protos, get_images(corpus, mask=mask), \n get_labels(corpus, mask=mask), get_images(corpus, mask=~mask), corpus)\n reset_directory(self.sorted_location, self.imagefeed.image_location)\n self.imagefeed.move_images(categories, self.sorted_location)", "title": "" }, { "docid": "ed7d97656d6a3cf21340b81790c5e062", "score": "0.6108216", "text": "def classify_jpegs():\n args = sys.argv\n\n # ensure we have the right number of arguments\n if not len(args) == 2:\n print 'Usage: %s [directory]' % (args[0])\n print 'Classifies each JPEG image in the given directory.'\n return\n\n classifier = classify.read_classifier()\n\n directory = args[1]\n old_cwd = os.getcwd()\n os.chdir(directory)\n\n # find all images in directory\n image_paths = glob.glob('*.jpg') + glob.glob('*.jpeg') + glob.glob('*.png')\n\n # read and classify each image\n for image_path in image_paths:\n image = cv2.imread(image_path)\n features = classify.compute_features(image, preprocess=True)\n\n prediction = classifier.predict(features)[0]\n print image_path, chr(prediction)\n\n os.chdir(old_cwd)", "title": "" }, { "docid": "6ba4591a43635749242407f02c19cbcf", "score": "0.61065733", "text": "def detection_as_classification(model, test_generator):\n i = 0\n TP = 0\n FP = 0\n \n for X,Y in test_generator:\n if i >= len(test_generator):\n break # otherwise will run indefinitely\n X = rgb2bgr(X)\n X = preprocess_image(X)\n boxes, scores, labels = model.predict_on_batch(X)\n tp, fp = evaluate(filter(scores, labels, score_threshold), Y)\n i += 1\n TP += tp\n FP += fp\n\n return TP, FP", "title": "" }, { "docid": "6165e3379545522b2a36fc331be8a2d1", "score": "0.61028713", "text": "def training(\n positive_iis, \n negative_iis, \n num_classifiers, \n ns, \n Ddesired=0.9, \n Edesired = 0.05\n ):\n logger = getLogger('Train')\n \n num_pos = len(positive_iis)\n num_neg = len(negative_iis)\n num_imgs = num_pos + num_neg\n img_height, img_width = positive_iis[0].shape\n \n pos_weights = np.ones(num_pos) * 1. / (2 * num_pos)\n neg_weights = np.ones(num_neg) * 1. / (2 * num_neg)\n weights = np.hstack((pos_weights, neg_weights))\n labels = np.hstack((np.ones(num_pos), -np.ones(num_neg)))\n \n images = positive_iis + negative_iis\n \n logger.info('Stage 1: Creating features')\n features = create_dynamic_features(\n (img_width, img_height), \n ns,\n 0\n )\n num_features = len(features)\n num_classifiers = num_features if num_classifiers == -1 else num_classifiers\n \n logger.info('Generated features: {0}'.format(num_features))\n logger.info('Stage 2: Calculating thresholds')\n \n bar = ProgressBar()\n for i in bar(range(num_features)):\n feature = features[i]\n scores = np.fromiter(\n (feature.get_score(img) for img in images), \n np.float\n )\n feature.threshold = get_appropriet_threshold(\n scores[:num_pos], \n scores[num_pos:], \n Ddesired, \n Edesired\n )\n \n logger.info('Stage 3: Calculating scores for all images')\n votes = np.zeros((num_imgs, num_features))\n bar = ProgressBar()\n # Use as many workers as there are CPUs\n pool = Pool(processes=None)\n for i in bar(range(num_imgs)):\n features_matrix = np.array( \n list(\n pool.map(\n #map( \n partial(_get_feature_values, image=images[i]), \n features\n )))\n # Convert matrix to vector\n votes[i, :] = features_matrix\n \n logger.info('Stage 4: Selecting classifiers')\n classifiers = list()\n feature_indexes = list(range(num_features))\n bar = ProgressBar()\n for _ in bar(range(num_classifiers)):\n\n classification_errors = np.zeros(len(feature_indexes))\n # normalize weights\n weights *= 1. / np.sum(weights)\n\n # select best classifier based on the weighted error\n for f in range(len(feature_indexes)):\n f_idx = feature_indexes[f]\n # classifier error is the sum of image weights where the classifier\n # is right\n error = sum( map(\n lambda img_idx: weights[img_idx] if labels[img_idx] != votes[img_idx, f_idx] else 0, \n range(num_imgs)\n ))\n classification_errors[f] = error\n\n # get best feature, i.e. with smallest error\n min_error_idx = np.argmin(classification_errors)\n best_error = classification_errors[min_error_idx]\n best_feature_idx = feature_indexes[min_error_idx]\n\n # set feature weight\n best_feature = features[best_feature_idx]\n eb_eq = (1-best_error)/best_error\n feature_weight = 0.5 * np.log((1 - best_error) / best_error)\n best_feature.weight = feature_weight\n\n classifiers.append(best_feature)\n \n weights = np.array(\n list(\n map(\n lambda img_idx: weights[img_idx] * np.sqrt(eb_eq if (labels[img_idx] != votes[img_idx, best_feature_idx]) else 1/eb_eq),\n range(num_imgs)\n )\n )\n )\n \n feature_indexes.remove(best_feature_idx)\n \n logger.info(\n 'Training finished. {0} week classifiers formed'.format(len(classifiers))\n )\n return classifiers", "title": "" }, { "docid": "0d5339fbadddabf29188f5f26d8daabd", "score": "0.6089755", "text": "def process_batch(self, detections, labels):\n detections = detections[detections[:, 4] > self.conf]\n gt_classes = labels[:, 0].int()\n detection_classes = detections[:, 5].int()\n iou = box_iou(labels[:, 1:], detections[:, :4])\n\n x = torch.where(iou > self.iou_thres)\n if x[0].shape[0]:\n matches = torch.cat((torch.stack(x, 1), iou[x[0], x[1]][:, None]), 1).cpu().numpy()\n if x[0].shape[0] > 1:\n matches = matches[matches[:, 2].argsort()[::-1]]\n matches = matches[np.unique(matches[:, 1], return_index=True)[1]]\n matches = matches[matches[:, 2].argsort()[::-1]]\n matches = matches[np.unique(matches[:, 0], return_index=True)[1]]\n else:\n matches = np.zeros((0, 3))\n\n n = matches.shape[0] > 0\n m0, m1, _ = matches.transpose().astype(int)\n for i, gc in enumerate(gt_classes):\n j = m0 == i\n if n and sum(j) == 1:\n self.matrix[detection_classes[m1[j]], gc] += 1 # correct\n else:\n self.matrix[self.nc, gc] += 1 # background FP\n\n if n:\n for i, dc in enumerate(detection_classes):\n if not any(m1 == i):\n self.matrix[dc, self.nc] += 1 # background FN", "title": "" }, { "docid": "3bd5c7e71502f132bb8fdadbf9e2ba93", "score": "0.6076005", "text": "def inference(self, batch_inputs, images, pred_centers, pred_regions, pred_weights, encode_feat):\n results = batch_inputs\n processed_results = []\n for img_idx, result_img in enumerate(results):\n if \"instances\" in result_img.keys():\n img_shape = result_img[\"instances\"].image_size\n else:\n img_shape = result_img[\"image\"].shape[-2:]\n ori_shape = (result_img[\"height\"], result_img[\"width\"])\n encode_feat = encode_feat[img_idx].unsqueeze(0)\n feat_shape = encode_feat.shape\n encode_feat = encode_feat.reshape(*feat_shape[:2], -1)\n result_instance = None\n\n pred_regions = [_pred[img_idx].unsqueeze(0) for _pred in pred_regions]\n pred_weights = [_pred[img_idx].unsqueeze(0) for _pred in pred_weights]\n pred_centers = [_pred[img_idx].unsqueeze(0) for _pred in pred_centers]\n pool_size = [3,3,3,5,5]\n idx_feat_th, class_ths, score_ths, thing_num, idx_feat_st, score_sts, class_sts, stuff_num = \\\n multi_apply(self.inference_single_level, pred_centers,\\\n pred_regions, pred_weights, pool_size)\n \n thing_num = sum(thing_num)\n if thing_num == 0:\n result_instance = Instances(ori_shape, pred_masks=[], pred_boxes=[], \n pred_classes=[], scores=[])\n else:\n class_ths = [_class for _class in class_ths if len(_class)>0]\n score_ths = [_score for _score in score_ths if len(_score)>0]\n idx_feat_th = [_feat for _feat in idx_feat_th if len(_feat)>0]\n class_ths = torch.cat(class_ths, dim=0)\n score_ths = torch.cat(score_ths, dim=0)\n idx_feat_th = torch.cat(idx_feat_th, dim=2)\n keep = torch.argsort(score_ths, descending=True)\n idx_feat_th = idx_feat_th[:,:,keep]\n score_ths = score_ths[keep]\n class_ths = class_ths[keep]\n\n stuff_num = sum(stuff_num)\n if stuff_num == 0:\n class_sts, idx_feat_st, score_sts = [], [], []\n else:\n score_sts = [_score for _score in score_sts if len(_score)>0]\n class_sts = [_cate_sem for _cate_sem in class_sts if len(_cate_sem)>0]\n idx_feat_st = [_feat for _feat in idx_feat_st if len(_feat)>0]\n score_sts = torch.cat(score_sts, dim=0)\n class_sts = torch.cat(class_sts, dim=0)\n idx_feat_st = torch.cat(idx_feat_st, dim=0)\n\n pred_thing, [class_ths, score_ths] = self.thing_generator(encode_feat, feat_shape, idx_feat_th, thing_num, class_ths, score_ths)\n pred_stuff, [class_sts, score_sts] = self.stuff_generator(encode_feat, feat_shape, idx_feat_st, stuff_num, class_sts, score_sts)\n pred_stuff = pred_stuff.sigmoid()\n \n if result_instance is None:\n result_instance, pred_mask, class_ths, score_ths = self.process_inst(\n class_ths, score_ths, pred_thing, img_shape, ori_shape) \n else:\n pred_mask, class_ths, score_ths = None, None, None\n if self.sem_with_thing or self.cfg.MODEL.POSITION_HEAD.STUFF.ALL_CLASSES:\n sem_classes = self.sem_classes\n else:\n sem_classes = self.sem_classes + 1\n\n pred_stuff = F.interpolate(pred_stuff, scale_factor=self.common_stride, mode=\"bilinear\", \n align_corners=False)[...,:img_shape[0],:img_shape[1]]\n pred_stuff = F.interpolate(pred_stuff, size=ori_shape, mode=\"bilinear\", align_corners=False)[0]\n pred_sem_seg = torch.zeros(sem_classes, *pred_stuff.shape[-2:], device=self.device)\n pred_sem_seg[class_sts] += pred_stuff\n processed_results.append({\"sem_seg\": pred_sem_seg, \"instances\": result_instance})\n\n if self.panoptic_combine:\n result_panoptic = self.combine_thing_and_stuff(\n [pred_mask, class_ths, score_ths],\n pred_sem_seg.argmax(dim=0),\n self.panoptic_overlap_thrs,\n self.panoptic_stuff_limit,\n self.panoptic_inst_thrs)\n processed_results[-1][\"panoptic_seg\"] = result_panoptic\n\n return processed_results", "title": "" }, { "docid": "8d8e18c099a271baf52f0e63c258c43e", "score": "0.60603863", "text": "def classify_images(images_dir,petlabel_dic,model):\n #classifier()\n images_dir_filename = listdir(images_dir)\n #model_petlabel_dic = dict()\n results_dic = dict()\n # for block to pass values into classifer method and get model label\n # model lable will be added to a dictionary\n for key in petlabel_dic:\n file_name = images_dir+key\n # print(file_name)\n model_label = classifier(file_name,model)\n #print(model_label)\n model_label = model_label.lower()\n model_label = model_label.strip()\n truth = petlabel_dic[key]\n found = model_label.find(truth)\n if found>=0:\n if( (found ==0 and len(truth)==len(model_label) ) or\n ( ( (found==0) or (model_label[found-1]==\" \") ) and\n ( (found + len(truth)==len(model_label)) or\n (model_label[found + len(truth):found+len(truth)+1] in\n (\",\",\" \") )\n )\n )\n ):\n if key not in results_dic:\n results_dic[key] = [truth,model_label,1]\n\n else:\n if key not in result_dic:\n results_dic[key] = [truth,model_label,0]\n else:\n if key not in results_dic:\n results_dic[key] = [truth,model_label,0]\n return(results_dic)", "title": "" }, { "docid": "bacf24a7bca36483f1bd00eee184f5a2", "score": "0.605324", "text": "def imageClassify(net, image_name):\n\n # Load the image uploaded by user..\n im_file = os.path.join('./assets', image_name)\n im = cv2.imread(im_file)\n\n # Detect all object classes and regress object bounds\n timer = Timer()\n timer.tic()\n scores, boxes = im_detect(net, im)\n timer.toc()\n print ('Detection took {:.3f}s for '\n '{:d} object proposals').format(timer.total_time, boxes.shape[0])\n\n # Visualize detections for each class\n CONF_THRESH = 0.8\n NMS_THRESH = 0.3\n fw = open(im_file+\".txt\", 'w')\n fw.write(\"{\\\"objects\\\": [\\n\")\n for cls_ind, cls in enumerate(CLASSES[1:]):\n cls_ind += 1 # because we skipped background\n cls_boxes = boxes[:, 4*cls_ind:4*(cls_ind + 1)]\n cls_scores = scores[:, cls_ind]\n dets = np.hstack((cls_boxes,\n cls_scores[:, np.newaxis])).astype(np.float32)\n keep = nms(dets, NMS_THRESH)\n dets = dets[keep, :]\n# fw.write(\"{extent:[\"+str(bbox[0])+\",\"+str(bbox[1])+','+str(bbox[2])+\",\"+str(bbox[3])+\"],\\n\")\n# fw.write(\"class:\" + class_name + \",\\n\")\n# fw.write(\"score:\" + score + \"},\\n\")\n\n vis_detections(im, cls, dets, im_file, fw, thresh=CONF_THRESH)\n fw.write(\"{}]}\")\n fw.close()", "title": "" }, { "docid": "c9fcde7c822832a4adc922842d561494", "score": "0.60296136", "text": "def prepare_images(self):\n\n self.model.resize_data(Structure.CategoricalNeuralNetwork, Environment.TRAIN)\n self.model.resize_data(Structure.CategoricalNeuralNetwork, Environment.TEST)\n\n n_classes = self.model.convert_to_one_hot_data()\n\n return n_classes", "title": "" }, { "docid": "6c111a0ba9e1382e7af399588679d4d7", "score": "0.60258794", "text": "def run_classifier(self, sets=[\"train\", \"valid\"], class_threshold=0, preprocess_func=None, label_func=None, pwds=None, classify_func=None, **kwargs):\n\n print(\"\\nRunning Classifier:\", self.name)\n\n for data_set in sets:\n\n assert data_set in self.dataset, \"%s not in dataset\" % data_set\n print(\"Currently classifying {} with {} datapoints\".format(data_set, len(self.dataset[data_set][\"data\"])))\n\n preds = []\n\n data = self.dataset[data_set][\"data\"]\n\n self.dataset[data_set][\"matches\"] = []\n self.dataset[data_set][\"scores\"] = []\n\n for datum in data:\n capture_scores = defaultdict(int)\n captures = {}\n class_matches = {}\n\n #Forced to do this because self.regexes stored as {\"class\": [Regex Objects list]}\n for class_name in self.regexes:\n\n matches = {}\n #Ask handler to get capture_scores, captures, and matches\n if len(self.regexes[class_name]) > 0:\n matches, captures, capture_scores = self.handler.score_data(datum, self.regexes[class_name],\n pwds=pwds, preprocess_func=preprocess_func,\n capture_convert=label_func)\n #Storing matches in object\n class_matches[class_name] = matches\n\n #Adding biases\n for bias in self.capture_biases:\n capture_scores[bias] += self.capture_biases[bias]\n\n self.dataset[data_set][\"matches\"].append(class_matches)\n self.dataset[data_set][\"scores\"].append(capture_scores)\n\n #getting prediction\n\n if not classify_func:\n preds.append(self.classify(capture_scores, threshold=class_threshold)[0])\n else:\n preds.append(classify_func(class_matches, captures, capture_scores, **kwargs))\n\n preds = np.array(preds)\n self.dataset[data_set][\"preds\"] = preds", "title": "" }, { "docid": "d8fb745ccba717175f00be5ca6dada34", "score": "0.60210526", "text": "def _predict_images(self, images):\n img_transform = transforms.Compose([\n transforms.Resize((197, 197)),\n transforms.ToTensor(),\n transforms.Normalize([0.485, 0.456, 0.406], [0.229, 0.224, 0.225])\n ])\n \n\n reconstructed_image_tensor_array = []\n for (image, shape) in zip(images['image'], images['shape']):\n imagebytes = bytearray(image, encoding=\"utf8\")\n deserialized_bytes = base64.decodebytes(imagebytes)\n deserialized_bytes = np.frombuffer(deserialized_bytes, dtype=np.uint8)\n img_nparray = np.reshape(deserialized_bytes, newshape=shape)\n img = PIL.Image.fromarray(np.uint8(img_nparray), mode=\"RGB\")\n imgTensor = img_transform(img)\n reconstructed_image_tensor_array.append(imgTensor)\n \n reconstructed_image_tensor = torch.stack(reconstructed_image_tensor_array, dim=0)\n with torch.no_grad():\n output = self._model(reconstructed_image_tensor).squeeze(-1).squeeze(-1)\n return output", "title": "" }, { "docid": "7ef940ab70e617f83d12c0bfd7e0239e", "score": "0.6019942", "text": "def run_images(self):\n car_list_train, car_list_test, noncar_list_train, noncar_list_test = self.read_data()\n X_train, X_test, y_train, y_test, self.scaler = self.get_datasets(car_list_train, car_list_test,\n noncar_list_train, noncar_list_test)\n self.classifier = self.train_classifier(X_train, y_train, X_test, y_test)\n\n tests = glob.glob('./test_images/*.jpg')\n fig = plt.figure()\n idx = 1\n for img in tests:\n test_img = cv2.imread(img)\n t= time.time()\n self.heatmap = []\n img=self.find_cars(test_img, return_image=True)\n print('Time prediction: ', time.time()-t)\n fig.add_subplot(len(tests), 2, idx)\n idx+=1\n plt.imshow(cv2.cvtColor(img, cv2.COLOR_BGR2RGB))\n plt.axis('off')\n fig.add_subplot(len(tests), 2, idx)\n idx+=1\n plt.imshow(self.heatmap[-1], cmap='hot')\n plt.axis('off')\n plt.subplots_adjust(left=0.28, bottom=0.01, right=0.69, top=0.98, wspace=0.03, hspace=0.03)\n plt.savefig('./output_images/test_heatmap.png', dpi=400)\n plt.show()", "title": "" }, { "docid": "8f51986f12154b7f7f018265c5c9f3d4", "score": "0.60119885", "text": "def test(self):\n\n\t\ttotaltime = 0\n\t\tfiles = 0\n\n\t\ttp = 0\n\t\tfp = 0\n\t\ttn = 0\n\t\tfn = 0\n\n\t\tself.addr = \"http://\" + self.helpers.credentials[\"server\"][\"ip\"] + \\\n\t\t\t':'+str(self.helpers.credentials[\"server\"][\"port\"]) + '/Inference'\n\t\tself.headers = {'content-type': 'image/jpeg'}\n\n\t\tfor testFile in os.listdir(self.model.testing_dir):\n\t\t\tif os.path.splitext(testFile)[1] in self.model.valid:\n\n\t\t\t\tstart = time.time()\n\t\t\t\tprediction = self.request(self.model.testing_dir + \"/\" + testFile)\n\t\t\t\tprint(prediction)\n\t\t\t\tend = time.time()\n\t\t\t\tbenchmark = end - start\n\t\t\t\ttotaltime += benchmark\n\n\t\t\t\tmsg = \"\"\n\t\t\t\tstatus = \"\"\n\t\t\t\toutcome = \"\"\n\n\t\t\t\tif prediction[\"Diagnosis\"] == \"Positive\" and \"Non-Covid\" in testFile:\n\t\t\t\t\tfp += 1\n\t\t\t\t\tstatus = \"incorrectly\"\n\t\t\t\t\toutcome = \"(False Positive)\"\n\t\t\t\telif prediction[\"Diagnosis\"] == \"Negative\" and \"Non-Covid\" in testFile:\n\t\t\t\t\ttn += 1\n\t\t\t\t\tstatus = \"correctly\"\n\t\t\t\t\toutcome = \"(True Negative)\"\n\t\t\t\telif prediction[\"Diagnosis\"] == \"Positive\" and \"Covid\" in testFile:\n\t\t\t\t\ttp += 1\n\t\t\t\t\tstatus = \"correctly\"\n\t\t\t\t\toutcome = \"(True Positive)\"\n\t\t\t\telif prediction[\"Diagnosis\"] == \"Negative\" and \"Covid\" in testFile:\n\t\t\t\t\tfn += 1\n\t\t\t\t\tstatus = \"incorrectly\"\n\t\t\t\t\toutcome = \"(False Negative)\"\n\n\t\t\t\tfiles += 1\n\t\t\t\tself.helpers.logger.info(\"COVID-19 xDNN Classifier \" + status +\n\t\t\t\t\t\t\t\t\t\" detected \" + outcome + \" in \" + str(benchmark) + \" seconds.\")\n\n\t\tself.helpers.logger.info(\"Images Classified: \" + str(files))\n\t\tself.helpers.logger.info(\"True Positives: \" + str(tp))\n\t\tself.helpers.logger.info(\"False Positives: \" + str(fp))\n\t\tself.helpers.logger.info(\"True Negatives: \" + str(tn))\n\t\tself.helpers.logger.info(\"False Negatives: \" + str(fn))\n\t\tself.helpers.logger.info(\"Total Time Taken: \" + str(totaltime))", "title": "" }, { "docid": "6a49817fea99179942d072738408b915", "score": "0.60117257", "text": "def classify(image: Img, classifier: callable):\n\treturn classifier(image.arr1d)", "title": "" }, { "docid": "9497acef7cfac114ef930578ebafe53d", "score": "0.6011085", "text": "def predict(self, instances, **kwargs):\n imgs = map(self.decode_img, instances)\n inputs = np.asarray(imgs)\n outputs = self._model.predict(imgs)\n\n # If outputs are not in same order as inputs, consider that TF likely\n # runs things in parallel for multi-input???\n\n return [self.threshold(output) for output in outputs]", "title": "" }, { "docid": "1aea73709799f838a213d8a1cee919e6", "score": "0.5998706", "text": "def generate_images_pred(self, inputs, outputs):\n for scale in self.opt.scales:\n disp = outputs[(\"disp\", scale)]\n disp = F.interpolate(disp, [self.opt.height, self.opt.width], mode=\"bilinear\", align_corners=False)\n _, depth = disp_to_depth(disp, self.opt.min_depth, self.opt.max_depth)\n outputs[(\"depth\", scale)] = depth * 5.4\n\n # Compute Reprojection loss\n occmaskrec = dict()\n for k in inputs['imgr'].keys():\n if k == 0:\n continue\n occmaskrec[k] = (self.sod(inputs['intrinsic'], inputs['poser'][k], outputs[(\"depth\", 0)], float(1e10)) == 0).float()\n outputs['occmaskrec'] = occmaskrec\n\n # Compute Reconstruction image\n for scale in self.opt.scales:\n outputs['reconImgs', scale] = self.reconimg(inputs['imgr'], outputs[(\"depth\", scale)], inputs['intrinsic'], inputs['poser'])\n\n return outputs", "title": "" }, { "docid": "59ed4bd69fa8387dba2c91593270cde1", "score": "0.5981367", "text": "def matching_batch(\n iou_fn: Callable[[np.ndarray, np.ndarray], np.ndarray], \n iou_thresholds: Sequence[float], pred_boxes: Sequence[np.ndarray],\n pred_classes: Sequence[np.ndarray], pred_scores: Sequence[np.ndarray],\n gt_boxes: Sequence[np.ndarray], gt_classes: Sequence[np.ndarray],\n gt_ignore: Sequence[Sequence[bool]], max_detections: int = 100,\n ) -> List[Dict[int, Dict[str, np.ndarray]]]:\n results = []\n # iterate over images/batches\n for pboxes, pclasses, pscores, gboxes, gclasses, gignore in zip(\n pred_boxes, pred_classes, pred_scores, gt_boxes, gt_classes, gt_ignore):\n img_classes = np.union1d(pclasses, gclasses)\n result = {} # dict contains results for each class in one image\n for c in img_classes:\n pred_mask = pclasses == c # mask predictions with current class\n gt_mask = gclasses == c # mask ground trtuh with current class\n\n if not np.any(gt_mask): # no ground truth\n result[c] = _matching_no_gt(\n iou_thresholds=iou_thresholds,\n pred_scores=pscores[pred_mask],\n max_detections=max_detections)\n elif not np.any(pred_mask): # no predictions\n result[c] = _matching_no_pred(\n iou_thresholds=iou_thresholds,\n gt_ignore=gignore[gt_mask],\n )\n else: # at least one prediction and one ground truth\n result[c] = _matching_single_image_single_class(\n iou_fn=iou_fn,\n pred_boxes=pboxes[pred_mask],\n pred_scores=pscores[pred_mask],\n gt_boxes=gboxes[gt_mask],\n gt_ignore=gignore[gt_mask],\n max_detections=max_detections,\n iou_thresholds=iou_thresholds,\n )\n results.append(result)\n return results", "title": "" }, { "docid": "128a24d7a2976c59240fff985d365c36", "score": "0.59797955", "text": "def forward(self, outputs, processed_sizes, target_sizes=None):\n if target_sizes is None:\n target_sizes = processed_sizes\n assert len(processed_sizes) == len(target_sizes)\n out_logits, raw_masks, raw_boxes = outputs['pred_logits'], outputs['pred_masks'], outputs['pred_boxes']\n assert len(out_logits) == len(raw_masks) == len(target_sizes)\n preds = []\n\n def to_tuple(tup):\n if isinstance(tup, tuple):\n return tup\n return tuple(tup.cpu().tolist())\n for cur_logits, cur_masks, cur_boxes, size, target_size in zip(out_logits, raw_masks, raw_boxes, processed_sizes, target_sizes):\n scores, labels = cur_logits.softmax(-1).max(-1)\n keep = labels.ne(outputs['pred_logits'].shape[-1] - 1) & (scores > self.threshold)\n cur_scores, cur_classes = cur_logits.softmax(-1).max(-1)\n cur_scores = cur_scores[keep]\n cur_classes = cur_classes[keep]\n cur_masks = cur_masks[keep]\n cur_masks = interpolate(cur_masks[None], to_tuple(size), mode='bilinear').squeeze(0)\n cur_boxes = box_ops.box_cxcywh_to_xyxy(cur_boxes[keep])\n h, w = cur_masks.shape[-2:]\n assert len(cur_boxes) == len(cur_classes)\n cur_masks = cur_masks.flatten(1)\n stuff_equiv_classes = defaultdict(lambda : [])\n for k, label in enumerate(cur_classes):\n if not self.is_thing_map[label.item()]:\n stuff_equiv_classes[label.item()].append(k)\n\n def get_ids_area(masks, scores, dedup=False):\n m_id = masks.transpose(0, 1).softmax(-1)\n if m_id.shape[-1] == 0:\n m_id = torch.zeros((h, w), dtype=torch.long, device=m_id.device)\n else:\n m_id = m_id.argmax(-1).view(h, w)\n if dedup:\n for equiv in stuff_equiv_classes.values():\n if len(equiv) > 1:\n for eq_id in equiv:\n m_id.masked_fill_(m_id.eq(eq_id), equiv[0])\n final_h, final_w = to_tuple(target_size)\n seg_img = Image.fromarray(id2rgb(m_id.view(h, w).cpu().numpy()))\n seg_img = seg_img.resize(size=(final_w, final_h), resample=Image.NEAREST)\n np_seg_img = torch.ByteTensor(torch.ByteStorage.from_buffer(seg_img.tobytes())).view(final_h, final_w, 3).numpy()\n m_id = torch.from_numpy(rgb2id(np_seg_img))\n area = []\n for i in range(len(scores)):\n area.append(m_id.eq(i).sum().item())\n return area, seg_img\n area, seg_img = get_ids_area(cur_masks, cur_scores, dedup=True)\n if cur_classes.numel() > 0:\n while True:\n filtered_small = torch.as_tensor([(area[i] <= 4) for i, c in enumerate(cur_classes)], dtype=torch.bool, device=keep.device)\n if filtered_small.any().item():\n cur_scores = cur_scores[~filtered_small]\n cur_classes = cur_classes[~filtered_small]\n cur_masks = cur_masks[~filtered_small]\n area, seg_img = get_ids_area(cur_masks, cur_scores)\n else:\n break\n else:\n cur_classes = torch.ones(1, dtype=torch.long, device=cur_classes.device)\n segments_info = []\n for i, a in enumerate(area):\n cat = cur_classes[i].item()\n segments_info.append({'id': i, 'isthing': self.is_thing_map[cat], 'category_id': cat, 'area': a})\n del cur_classes\n with io.BytesIO() as out:\n seg_img.save(out, format='PNG')\n predictions = {'png_string': out.getvalue(), 'segments_info': segments_info}\n preds.append(predictions)\n return preds", "title": "" }, { "docid": "1ef887b0bf7877ca1b96c62bd65d89e3", "score": "0.5957035", "text": "def classify(self, source1, source2):\n \n scores1 = source1.similarity_scores(self)\n scores2 = source2.similarity_scores(self)\n \n print('scores for ' +source1.name+ ':', scores1)\n print('scores for ' +source2.name+ ':', scores2)\n print()\n \n sum1 = 0\n sum2 = 0\n total1 = 0\n total2 = 0\n weights = [10,7,5,7,1,3]\n for i in range(len(scores1)):\n if scores1[i] > scores2[i]:\n sum1 += 1\n else:\n sum2 += 1\n total1 += weights[i]*scores1[i]\n total2 += weights[i]*scores2[i]\n \n if sum1 > sum2:\n print(self.name + ' is more likely to come from ' + source1.name)\n print(sum1,sum2)\n print()\n elif sum2 > sum1:\n print(self.name + ' is more likely to come from ' + source2.name)\n print(sum1,sum2)\n print()\n else:\n if total1 > total2:\n print(self.name + ' is more likely to come from ' + source1.name)\n print(total1,total2)\n print()\n else:\n print(self.name + ' is more likely to come from ' + source2.name)\n print(total1,total2)\n print()", "title": "" }, { "docid": "2fddec0a4c39982b575b90f73d4787fe", "score": "0.59546", "text": "def classify_photo(pic=None, destination=None):\n if pic is not None:\n img = open_image(pic)\n else:\n img = open_image(destination)\n classifier_path = \"app/models/cnn_classifier/\"\n classifier = load_learner(classifier_path)\n pred_class, pred_idx, outputs = classifier.predict(img)\n\n if max(outputs) < 0.92:\n\n # if destination is not None:\n # img_data = cv2.imread(destination)\n # else:\n # img_data = cv2.imread(pic)\n\n # with global_graph.as_default():\n # img_paths = detector.predict(img_data, destination)\n # img_paths.append(destination)\n\n # classifier_outputs = []\n\n # for path in img_paths:\n # img = open_image(path)\n # pred_class, pred_idx, outputs = classifier.predict(img)\n # classifier_outputs.append(\n # [max(outputs), pred_class, pred_idx, outputs])\n # classifier_outputs.sort(key=lambda x: x[0], reverse=True)\n\n # pred_class = classifier_outputs[0][1]\n # pred_idx = classifier_outputs[0][2]\n # outputs = classifier_outputs[0][3]\n\n return pred_class, pred_idx, outputs\n\n return pred_class, pred_idx, outputs", "title": "" }, { "docid": "856a51177793c543f0e6030c0663c678", "score": "0.595266", "text": "def check_classification_targets(y):\n ...", "title": "" }, { "docid": "20911ed85bc9ebf34fc364567400be7f", "score": "0.5915451", "text": "def image_preprocessing():\n\t# generate tags for images, if they do not exis\tt\n\tif not os.path.isfile(images_folder + \"/train_tags.csv\"):\n\t\tprint(\"Extracting tags...\")\n\t\tio.extract_tags(raw_images_folder + \"/train\",images_folder,\"train_\")\n\t\tprint(\"Done!\\n\")\n\tif not os.path.isfile(images_folder + \"/val_tags.csv\"):\n\t\tprint(\"Extracting tags...\")\n\t\tio.extract_tags(raw_images_folder + \"/validation\", images_folder,\"val_\")\n\t\tprint(\"Done!\\n\")\n\n\t# resize the images to a particular size, if not already done\n\t# Let's assume its already done if a particular directory \n\t# has images in it...\n\t'''if len(os.listdir(resized_images_folder)) == 0:\n\t\tprint(\"Resizing images...this could take awhile!\\n\")\n\t\tio.resize_images(raw_images_folder,resized_images_folder,resize_dimensions)\n\t'''\n\tif(len(os.listdir(transformed_images_folder + \"/train\")) + len(os.listdir(transformed_images_folder + \"/validation\")) > 0):\t\n\t\tprint(\"clearing out previously transformed images\")\t\n\t\tos.system(\"rm ./data/transformed/train/* ./data/transformed/validation/*\")\n\t\t\n\t# generate X and Y, in the format keras needs for the model\n\t# and return\n\t#return io.partition_image_data(resized_images_folder,(150,150),3)\n\tprint(\"obtaining model inputs\")\n\treturn io.format_data(resize_dimensions, 3)", "title": "" }, { "docid": "8382faa30e2d300c3d951dad0930a0ba", "score": "0.5909281", "text": "def compute_class_weights(image_files, label_values):\n\n # image_files = [os.path.join(labels_dir, file) for file in os.listdir(labels_dir) if file.endswith('.png')]\n num_classes = len(label_values)\n class_pixels = np.zeros(num_classes)\n\n total_pixels = 0.0\n\n for n in range(len(image_files)):\n image = imread(image_files[n])\n\n for index, colour in enumerate(label_values):\n class_map = np.all(np.equal(image, colour), axis=-1)\n class_map = class_map.astype(np.float32)\n class_pixels[index] += np.sum(class_map)\n\n print(\"\\rProcessing image: \" + str(n) + \" / \" + str(len(image_files)), end=\"\")\n sys.stdout.flush()\n\n total_pixels = float(np.sum(class_pixels))\n index_to_delete = np.argwhere(class_pixels == 0.0)\n class_pixels = np.delete(class_pixels, index_to_delete)\n\n class_weights = total_pixels / class_pixels\n class_weights = class_weights / np.sum(class_weights)\n\n return class_weights", "title": "" }, { "docid": "8be6173f0a97bc23d9661ba1438c9277", "score": "0.5903825", "text": "def few_label_classification(self, list_num_shots=[1, 10, 100, -1], num_runs=10, batch_size=1000, classifier='logistic'):\n if not os.path.exists('results/few_label/'):\n os.makedirs('results/few_label/')\n results = {'Num_Shots': [], 'Mean': [], 'Std': []}\n train_loader, im_h, im_w, im_channels = setup_data_loader(data=self.data, \n data_dir=self.data_dir, \n batch_size=batch_size, \n train=True, \n normalize=False if self.model_name in ['VAE', 'VAE_GMM'] else True, \n shuffle=False, \n shot_random_seed=None)\n zs_train, ys_train = self.encode_dataset(train_loader)\n \n test_loader, im_h, im_w, im_channels = setup_data_loader(data=self.data, \n data_dir=self.data_dir, \n batch_size=batch_size, \n train=False, \n normalize=False if self.model_name in ['VAE', 'VAE_GMM'] else True, \n shuffle=False, \n shot_random_seed=None)\n zs_test, ys_test = self.encode_dataset(test_loader)\n \n num_classes = len(np.unique(ys_train))\n \n for num_shots in tqdm(list_num_shots):\n Accuracy = []\n if num_shots == -1:\n clf = LogisticRegression(random_state=0, \n multi_class='auto', \n solver='liblinear', \n max_iter=10000).fit(zs_train, ys_train)\n Accuracy.append(np.array([clf.score(zs_test, ys_test)])) \n else:\n for i in tqdm(range(num_runs)):\n # torch.cuda.empty_cache()\n zs_train_selected = []\n ys_train_selected = []\n set_seed(i)\n for k in range(num_classes):\n indk = np.argwhere(ys_train == k)[:,0]\n ind_of_indk = np.random.permutation(len(indk))[:num_shots]\n indk_selected = indk[ind_of_indk]\n zs_train_selected.append(zs_train[indk_selected])\n ys_train_selected.append(ys_train[indk_selected])\n zs_train_selected = np.concatenate(zs_train_selected, 0)\n ys_train_selected = np.concatenate(ys_train_selected, 0)\n\n# print(indk_selected)\n \n clf = LogisticRegression(random_state=0, \n multi_class='auto', \n solver='liblinear', \n max_iter=10000).fit(zs_train_selected, ys_train_selected)\n Accuracy.append(np.array([clf.score(zs_test, ys_test)])) \n# return 0\n Accuracy = np.concatenate(Accuracy)\n results['Num_Shots'].append(num_shots)\n results['Mean'].append(Accuracy.mean())\n results['Std'].append(Accuracy.std())\n pd.DataFrame.from_dict(results).to_csv('results/few_label/{}-{}-runs={}.csv'.format(self.model_name, self.data, num_runs), index=False)\n return results", "title": "" }, { "docid": "948198389e8b010b65ed4eb1e6d7e23a", "score": "0.58989424", "text": "def process_outputs(self, outputs, image_size):\n boxes = []\n confidences = []\n class_proba = []\n img_H = image_size[0]\n img_W = image_size[1]\n for output in outputs:\n boxes.append(output[..., 0:4])\n confidences.append(self.sigmoid(output[..., 4, np.newaxis]))\n class_proba.append(self.sigmoid(output[..., 5:]))\n for i, box in enumerate(boxes):\n g_h, g_w, achors_box, _ = box.shape\n coordidate = np.zeros((g_h, g_w, achors_box))\n idx_y = np.arange(g_h)\n idx_y = idx_y.reshape(g_h, 1, 1)\n idx_x = np.arange(g_w)\n idx_x = idx_x.reshape(1, g_w, 1)\n C_x = coordidate + idx_x\n C_y = coordidate + idx_y\n centerX = box[..., 0]\n centerY = box[..., 1]\n width = box[..., 2]\n height = box[..., 3]\n bx = (self.sigmoid(centerX) + C_x) / g_w\n by = (self.sigmoid(centerY) + C_y) / g_h\n pw = self.anchors[i, :, 0]\n ph = self.anchors[i, :, 1]\n bw = (np.exp(width) * pw) / self.model.input.shape[1].value\n bh = (np.exp(height) * ph) / self.model.input.shape[2].value\n x1 = bx - bw / 2\n y1 = by - bh / 2\n x2 = x1 + bw\n y2 = y1 + bh\n box[..., 0] = x1 * img_W\n box[..., 1] = y1 * img_H\n box[..., 2] = x2 * img_W\n box[..., 3] = y2 * img_H\n return boxes, confidences, class_proba", "title": "" }, { "docid": "5573952e335fb188d5885ee893550fc4", "score": "0.5890267", "text": "def forward(self, batched_inputs):\n images = self.preprocess_image(batched_inputs)\n if self.training:\n batch_size, _, H, W = images.tensor.shape\n img_masks = images.tensor.new_ones(batch_size, H, W)\n for img_id in range(batch_size):\n img_h, img_w = batched_inputs[img_id]['instances'].image_size\n img_masks[img_id, :img_h, :img_w] = 0\n else:\n batch_size, _, H, W = images.tensor.shape\n img_masks = images.tensor.new_zeros(batch_size, H, W)\n features = self.backbone(images.tensor)[self.in_features[-1]]\n features = self.input_proj(features)\n img_masks = F.interpolate(img_masks[None], size=features.shape[-2:])[0]\n pos_embed = self.position_embedding(img_masks)\n hidden_states, _ = self.transformer(features, img_masks, self.query_embed.weight, pos_embed)\n outputs_class = self.class_embed(hidden_states)\n outputs_coord = self.bbox_embed(hidden_states).sigmoid()\n output = {'pred_logits': outputs_class[-1], 'pred_boxes': outputs_coord[-1]}\n if self.aux_loss:\n output['aux_outputs'] = self._set_aux_loss(outputs_class, outputs_coord)\n if self.training:\n gt_instances = [x['instances'] for x in batched_inputs]\n targets = self.prepare_targets(gt_instances)\n loss_dict = self.criterion(output, targets)\n weight_dict = self.criterion.weight_dict\n for k in loss_dict.keys():\n if k in weight_dict:\n loss_dict[k] *= weight_dict[k]\n return loss_dict\n else:\n box_cls = output['pred_logits']\n box_pred = output['pred_boxes']\n results = self.inference(box_cls, box_pred, images.image_sizes)\n processed_results = []\n for results_per_image, input_per_image, image_size in zip(results, batched_inputs, images.image_sizes):\n height = input_per_image.get('height', image_size[0])\n width = input_per_image.get('width', image_size[1])\n r = detector_postprocess(results_per_image, height, width)\n processed_results.append({'instances': r})\n return processed_results", "title": "" }, { "docid": "cf45f6d088fd003c6b52bf2faa15c5d7", "score": "0.58813107", "text": "def classify(model, images):\n probabilities = model.predict(images)\n labels = np.argmax(probabilities, axis=1)\n\n # Load the label encoder\n encoder_dir = \\\n Path(__file__).absolute().parents[2].joinpath('generated_data',\n 'encoders')\n encoder_path = encoder_dir.joinpath('encoder.pkl')\n\n with encoder_path.open('rb') as f:\n label_encoder = pickle.load(f)\n\n labels = label_encoder.inverse_transform(labels)\n\n return labels, probabilities", "title": "" }, { "docid": "2098c9d59de4b26efd20cedc8ab5d1f9", "score": "0.58700335", "text": "def _compute_targets(entry):\n # Indices of ground-truth ROIs\n rois = entry['boxes']\n overlaps = entry['max_overlaps']\n labels = entry['max_classes']\n gt_inds = np.where((entry['gt_classes'] > 0) & (entry['is_crowd'] == 0))[0]\n # Targets has format (class, tx, ty, tw, th)\n targets = np.zeros((rois.shape[0], 5), dtype=np.float32)\n if len(gt_inds) == 0:\n # Bail if the image has no ground-truth ROIs\n return targets\n\n # Indices of examples for which we try to make predictions\n ex_inds = np.where(overlaps >= cfg.TRAIN.BBOX_THRESH)[0]\n\n # Get IoU overlap between each ex ROI and gt ROI\n ex_gt_overlaps = box_utils.bbox_overlaps(\n rois[ex_inds, :].astype(dtype=np.float32, copy=False),\n rois[gt_inds, :].astype(dtype=np.float32, copy=False))\n\n # Find which gt ROI each ex ROI has max overlap with:\n # this will be the ex ROI's gt target\n gt_assignment = ex_gt_overlaps.argmax(axis=1)\n gt_rois = rois[gt_inds[gt_assignment], :]\n ex_rois = rois[ex_inds, :]\n # Use class \"1\" for all boxes if using class_agnostic_bbox_reg\n targets[ex_inds, 0] = (1 if cfg.MODEL.CLS_AGNOSTIC_BBOX_REG else labels[ex_inds])\n targets[ex_inds, 1:] = box_utils.bbox_transform_inv(ex_rois, gt_rois, cfg.MODEL.BBOX_REG_WEIGHTS)\n return targets", "title": "" }, { "docid": "1c2edebd92e2553520422621b3530a15", "score": "0.5838647", "text": "def forward(self, batched_inputs):\n images = self.preprocess_image(batched_inputs)\n if self.training:\n batch_size, _, H, W = images.tensor.shape\n img_masks = images.tensor.new_ones(batch_size, H, W)\n for img_id in range(batch_size):\n img_h, img_w = batched_inputs[img_id]['instances'].image_size\n img_masks[img_id, :img_h, :img_w] = 0\n else:\n batch_size, _, H, W = images.tensor.shape\n img_masks = images.tensor.new_zeros(batch_size, H, W)\n features = self.backbone(images.tensor)[self.in_features[-1]]\n features = self.input_proj(features)\n img_masks = F.interpolate(img_masks[None], size=features.shape[-2:])[0]\n pos_embed = self.position_embedding(img_masks)\n hidden_states, reference = self.transformer(features, img_masks, self.query_embed.weight, pos_embed)\n reference_before_sigmoid = inverse_sigmoid(reference)\n outputs_coords = []\n for lvl in range(hidden_states.shape[0]):\n tmp = self.bbox_embed(hidden_states[lvl])\n tmp[..., :2] += reference_before_sigmoid\n outputs_coord = tmp.sigmoid()\n outputs_coords.append(outputs_coord)\n outputs_coord = torch.stack(outputs_coords)\n outputs_class = self.class_embed(hidden_states)\n output = {'pred_logits': outputs_class[-1], 'pred_boxes': outputs_coord[-1]}\n if self.aux_loss:\n output['aux_outputs'] = self._set_aux_loss(outputs_class, outputs_coord)\n if self.training:\n gt_instances = [x['instances'] for x in batched_inputs]\n targets = self.prepare_targets(gt_instances)\n loss_dict = self.criterion(output, targets)\n weight_dict = self.criterion.weight_dict\n for k in loss_dict.keys():\n if k in weight_dict:\n loss_dict[k] *= weight_dict[k]\n return loss_dict\n else:\n box_cls = output['pred_logits']\n box_pred = output['pred_boxes']\n results = self.inference(box_cls, box_pred, images.image_sizes)\n processed_results = []\n for results_per_image, input_per_image, image_size in zip(results, batched_inputs, images.image_sizes):\n height = input_per_image.get('height', image_size[0])\n width = input_per_image.get('width', image_size[1])\n r = detector_postprocess(results_per_image, height, width)\n processed_results.append({'instances': r})\n return processed_results", "title": "" }, { "docid": "753a71ea40064c5a62b82727b8fe906c", "score": "0.58297926", "text": "def visualize_detections(images, ground_truths, detections, id2code,\n label_codes, label_names, out_dir='/tmp'):\n max_id = max(id2code.values())\n name_range = range(len(label_names))\n\n for i in range(0, np.shape(detections)[0]):\n fig = plt.figure(figsize=(17, 17))\n\n # original image\n ax1 = fig.add_subplot(2, 2, 1)\n # TODO: expect also other data than S2\n a = np.stack((images[i][:, :, 3], images[i][:, :, 2],\n images[i][:, :, 1]), axis=2)\n ax1.imshow((255 / a.max() * a).astype(np.uint8))\n ax1.title.set_text('Actual image')\n\n # ground truths\n ax3 = fig.add_subplot(2, 2, 3)\n ax3.set_title('Ground truth labels')\n gt_labels = ground_truths[i]\n gt_labels = onehot_decode(gt_labels, id2code)\n ax3.imshow(gt_labels * 4)\n\n # detections\n ax4 = fig.add_subplot(2, 2, 4)\n ax4.set_title('Predicted labels')\n pred_labels = onehot_decode(detections[i], id2code)\n ax4.imshow(pred_labels * 4)\n\n # confusion matrix\n ax2 = fig.add_subplot(2, 2, 2)\n ax2.set_title('Confusion matrix')\n conf_matrix = confusion_matrix(\n gt_labels[:, :, 0].flatten(), pred_labels[:, :, 0].flatten(),\n max_id + 1)\n # subset to existing classes\n conf_matrix = conf_matrix.numpy()[label_codes][:, label_codes]\n # normalize the confusion matrix\n row_sums = conf_matrix.sum(axis=1)[:, np.newaxis]\n # TODO: solve division by 0\n cm_norm = np.around(conf_matrix.astype('float') / row_sums, decimals=2)\n # visualize\n ax2.imshow(cm_norm, cmap=plt.cm.Blues)\n y_labels = ['{}\\n{}'.format(label_names[j], row_sums[j]) for j in\n name_range]\n plt.xticks(name_range, label_names)\n plt.yticks(name_range, y_labels)\n plt.xlabel('Predicted label')\n plt.ylabel('True label')\n # write percentage values (0.00 -- 1.00) into the confusion matrix\n threshold = cm_norm.max() / 2. # used to decide for the font colour\n for row in range(len(conf_matrix)):\n for col in range(len(conf_matrix)):\n if cm_norm[col, row] > threshold:\n colour = 'white'\n else:\n colour = 'black'\n # TODO: class names, not codes\n ax2.text(row, col, cm_norm[col, row], color=colour,\n horizontalalignment='center')\n\n plt.savefig(os.path.join(out_dir, str(i)))\n plt.close()", "title": "" }, { "docid": "98b5eeb8aba439c41040b45c0c28aad7", "score": "0.58281577", "text": "def classify():\n classifier = load_model(\"kerasmodel.h5\")\n im = cv2.imread(\"result.png\", 0)\n im2 = cv2.resize(im, (28, 28))\n im = im2.reshape(28, 28, -1)\n im = im.reshape(1, 784)\n im = cv2.bitwise_not(im)\n plt.imshow(im.reshape(28, 28), cmap='Greys')\n result = classifier.predict(im)\n a = np.argmax(result)\n return a", "title": "" }, { "docid": "54b57f9eeb41d312252fbf8a69bb72d9", "score": "0.58160615", "text": "def classify(self, data, features, target, **kwargs):\n raise NotImplementedError", "title": "" }, { "docid": "88a4a74128dce86df26c0e1d336c08ad", "score": "0.5811792", "text": "def classify(labelled_files, unlabelled_files, labels):\n # Extract features using CountVectorizer (Bag of Words)\n count_vect = CountVectorizer()\n counts = count_vect.fit_transform(labelled_files)\n\n # Fit classifier to training data (user labels)\n clf = LogisticRegression().fit(counts, labels)\n\n # Transform CountVectorizer to unlabelled files\n new_counts = count_vect.transform(unlabelled_files)\n\n # Run prediction on new counts\n predicted_labels = clf.predict(new_counts)\n probabilities = clf.predict_proba(new_counts)\n\n # Reduce probabilities to only show for predicted label\n probabilities = np.amax(probabilities, axis=1)\n\n return predicted_labels, probabilities", "title": "" }, { "docid": "d3a789a6339e5f1b97dbb45d1c654cf3", "score": "0.5810821", "text": "def classify(self, elements):\r\n if elements is None:\r\n return\r\n for element in elements:\r\n was_classified = False\r\n image_added = False\r\n for op in self.ops():\r\n if op.operation == \"Raster\":\r\n if image_added:\r\n continue # already added to an image operation, is not added here.\r\n if element.stroke is not None and op.color == abs(element.stroke):\r\n op.append(element)\r\n was_classified = True\r\n elif isinstance(element, SVGImage):\r\n op.append(element)\r\n was_classified = True\r\n elif isinstance(element, SVGText):\r\n op.append(element)\r\n was_classified = True\r\n elif element.fill is not None and element.fill.value is not None:\r\n op.append(element)\r\n was_classified = True\r\n elif op.operation in (\"Engrave\", \"Cut\") and element.stroke is not None and \\\r\n op.color == abs(element.stroke):\r\n op.append(element)\r\n was_classified = True\r\n elif op.operation == 'Image' and isinstance(element, SVGImage):\r\n op.append(element)\r\n was_classified = True\r\n image_added = True\r\n if not was_classified:\r\n if element.stroke is not None and element.stroke.value is not None:\r\n op = LaserOperation(operation=\"Engrave\", color=element.stroke, speed=35.0)\r\n op.append(element)\r\n self.add_op(op)", "title": "" }, { "docid": "fbf7c0c480361a30f46110b320853329", "score": "0.58092415", "text": "def predict(self, images):\n\n molded_images, image_metas = mold_inputs(images, self.config)\n\n # Convert to Tensor\n molded_images = torch.from_numpy(molded_images).float().to(self.device)\n\n # Features Extraction\n features = self.extractor(molded_images)\n\n print(\"features\", features.size())\n\n # RPN\n rpn_logits, rpn_probs, rpn_bbox = self.rpn(features)\n\n print(\"rpn_probs\", rpn_probs.size(), \"rpn_bbox\", rpn_bbox.size())\n\n # Perform proposal_layer on each image in batch\n n_batch = features.size()[0]\n\n for i in range(n_batch):\n print(\"batch\", i, rpn_probs[i].size(), rpn_bbox[i].size())\n # proposal_layer\n\n # ROI Pooling / ROI Align\n\n # Classification", "title": "" }, { "docid": "1f917eea7c9833496c8df885a215d720", "score": "0.5807556", "text": "def pt_cls_preprocess(self, images):\n mean = np.array([0.485, 0.456, 0.406]).reshape((3, 1, 1)).astype(np.float32)\n std = np.array([0.229, 0.224, 0.225]).reshape((3, 1, 1)).astype(np.float32)\n output = []\n for image in images:\n for param in self.xform:\n param_0 = param[0]\n param_1 = param[1]\n if param_0 == \"resize\":\n size = param_1\n # image = cv2.resize(image, (size[1], size[0]), \\\n # interpolation=cv2.INTER_LINEAR)\n # image = cv2.cvtColor(image, cv2.COLOR_BGR2RGB)\n org_h, org_w = image.shape[:2]\n scale = 256.0 / min(org_h, org_w)\n resize_h, resize_w = int(scale * org_h), int(scale * org_w)\n image = Image.fromarray(cv2.cvtColor(image, cv2.COLOR_BGR2RGB))\n image = np.asarray(image.resize((resize_w, resize_h), \\\n resample=Image.BILINEAR))\n # image = cv2.resize(image, (tw, th), interpolation=cv2.INTER_LINEAR)\n offset_h, offset_w = (resize_h - size[0]) // 2, (resize_w - size[1]) // 2\n image = image[offset_h:offset_h + size[0], offset_w:offset_w + size[1]]\n image = np.transpose(image, (2, 0, 1)).astype(np.float32)\n image /= 255.0\n image = (image - mean) / std\n output.append(image)\n return np.array(output)", "title": "" }, { "docid": "7316ffc702978a09d7e85f78e2ba52a7", "score": "0.5806382", "text": "def process_batch(self, inputs):\n for key, ipt in inputs.items():\n if key == 'tag':\n continue\n elif key == 'imgr' or key == 'poser':\n for k, kipt in inputs[key].items():\n inputs[key][k] = kipt.to(self.device)\n else:\n inputs[key] = ipt.to(self.device)\n\n features = self.models[\"encoder\"](inputs['inrgb_augmented'])\n outputs = self.models[\"depth\"](features)\n\n outputs = self.generate_images_pred(inputs, outputs)\n losses = self.compute_losses(inputs, outputs)\n\n return outputs, losses", "title": "" }, { "docid": "1e3934bf7cc91231733ff198af2287b4", "score": "0.5800077", "text": "def prepare_targets(self, batch_inputs):\n images, bboxes, labels, points, has_pts, instance_mask = batch_inputs\n h, w = images[0].shape[1:]\n image_size_xyxy = torch.as_tensor([w, h, w, h], dtype=torch.float)\n target_images = images.to(self.device)\n target_classes = labels.to(self.device)\n # normalize bboxes with wdith and height and convert the x1, y1, x2, y2 to centerx, centery, w, h\n target_boxes = bboxes / image_size_xyxy\n target_boxes_cxcywh = box_xyxy_to_cxcywh(target_boxes).to(self.device)\n # x1, y1, x2, y2 bboxes\n bboxes = bboxes.to(self.device)\n # images w, h, w, h tensor shape -> (batch, Num)\n target_image_size_xyxy = torch.stack([torch.cat([image_size_xyxy]) for _ in range(images.shape[0])], dim=0)\n target_image_size_xyxy = target_image_size_xyxy.to(self.device)\n # instance mask\n target_instance_mask = instance_mask.to(self.device)\n if points[0] is not None:\n target_points = points.to(self.device)\n else:\n target_points = points\n\n if has_pts[0] is not None:\n target_has_pts = has_pts.to(self.device)\n else:\n target_has_pts = has_pts\n\n targets = {}\n targets['images'] = target_images\n targets['labels'] = target_classes\n targets['bboxes'] = target_boxes_cxcywh # (centerx, centery, w, h)\n targets['boxes_xyxy'] = bboxes # (x1, y1, x2, y2)\n targets['image_size_xyxy'] = target_image_size_xyxy\n targets['pts'] = target_points\n targets['pts_mask'] = has_pts\n targets['instance_mask'] = target_instance_mask\n\n return targets", "title": "" }, { "docid": "bbf3e95b916face8d17ca00b50a7ce8f", "score": "0.5798492", "text": "def classify_input(wildcards, return_int=False):\n if check_config('truth') and check_config(wildcards.sample, place=config['truth']):\n image_ending = \"/{image}.tsv\" if check_config('parallel') else ''\n if check_config('train_all', place=config['truth'][wildcards.sample]):\n if return_int:\n return 1\n return [\n checkpoints.create_truth_data.get(**wildcards).output[0]+image_ending,\n rules.train.output[0]\n ]\n if check_config('model'):\n if return_int:\n return 2\n return [\n checkpoints.create_truth_data.get(**wildcards).output[0]+image_ending,\n config['model']\n ]\n else:\n if return_int:\n return 3\n return [\n checkpoints.create_split_truth_data.get(**wildcards).output.test+image_ending,\n rules.train.output[0]\n ]\n else:\n if return_int:\n return 0\n if check_config('model'):\n return [rules.extract_features.output[0], config['model']]\n else:\n raise ValueError(\"If you don't specify any truth sets, you must provide a pre-trained model.\")", "title": "" }, { "docid": "33e267c0a4ea7eaa5ceebe7be7076767", "score": "0.5786576", "text": "def forward(self, batched_inputs):\n images = self.preprocess_image(batched_inputs)\n if self.training:\n batch_size, _, H, W = images.tensor.shape\n img_masks = images.tensor.new_ones(batch_size, H, W)\n for img_id in range(batch_size):\n img_h, img_w = batched_inputs[img_id]['instances'].image_size\n img_masks[img_id, :img_h, :img_w] = 0\n else:\n batch_size, _, H, W = images.tensor.shape\n img_masks = images.tensor.new_zeros(batch_size, H, W)\n features = self.backbone(images.tensor)[self.in_features[-1]]\n features = self.input_proj(features)\n img_masks = F.interpolate(img_masks[None], size=features.shape[-2:])[0]\n pos_embed = self.position_embedding(img_masks)\n dynamic_anchor_boxes = self.anchor_box_embed.weight\n hidden_states, reference_boxes = self.transformer(features, img_masks, dynamic_anchor_boxes, pos_embed)\n reference_boxes = inverse_sigmoid(reference_boxes)\n anchor_box_offsets = self.bbox_embed(hidden_states)\n outputs_coord = (reference_boxes + anchor_box_offsets).sigmoid()\n outputs_class = self.class_embed(hidden_states)\n output = {'pred_logits': outputs_class[-1], 'pred_boxes': outputs_coord[-1]}\n if self.aux_loss:\n output['aux_outputs'] = self._set_aux_loss(outputs_class, outputs_coord)\n if self.training:\n gt_instances = [x['instances'] for x in batched_inputs]\n targets = self.prepare_targets(gt_instances)\n loss_dict = self.criterion(output, targets)\n weight_dict = self.criterion.weight_dict\n for k in loss_dict.keys():\n if k in weight_dict:\n loss_dict[k] *= weight_dict[k]\n return loss_dict\n else:\n box_cls = output['pred_logits']\n box_pred = output['pred_boxes']\n results = self.inference(box_cls, box_pred, images.image_sizes)\n processed_results = []\n for results_per_image, input_per_image, image_size in zip(results, batched_inputs, images.image_sizes):\n height = input_per_image.get('height', image_size[0])\n width = input_per_image.get('width', image_size[1])\n r = detector_postprocess(results_per_image, height, width)\n processed_results.append({'instances': r})\n return processed_results", "title": "" }, { "docid": "c4eed7eac26f5a0035d5aea2098d3c08", "score": "0.5782558", "text": "def classify(self, X):", "title": "" }, { "docid": "d7adf2a8278f81d95f2c53608e0e6ef0", "score": "0.5781525", "text": "def postprocess_per_class(params, cls_outputs, box_outputs, image_scales=None):\n cls_outputs = to_list(cls_outputs)\n box_outputs = to_list(box_outputs)\n boxes, scores, classes = pre_nms(params, cls_outputs, box_outputs)\n return per_class_nms(params, boxes, scores, classes, image_scales)", "title": "" }, { "docid": "996d20b0cf818c8e254b2c8922fb02ac", "score": "0.577933", "text": "def run_inference(image_files):\n\t# Read image files, resize them, convert to numpy arrays w/ dtype=uint8\n\timages = []\n\tfor image_file in image_files:\n\t\timage = Image.open(image_file)\n\t\timage = image.convert('RGB')\n\t\timage = image.resize((IMG_SIZE, IMG_SIZE), Image.ANTIALIAS)\n\t\timage = np.array(list(image.getdata()), dtype='uint8')\n\t\timage = np.reshape(image, (32, 32, 3))\n\n\t\timages.append(image)\n\timages = np.array(images, dtype='uint8')\n\n\t# Pre-process the image (don't care about label, put dummy labels)\n\timages, _ = preprocess_data(images, np.array([0 for _ in range(images.shape[0])]))\n\n\twith tf.Graph().as_default(), tf.Session() as sess:\n\t\t# Instantiate the CNN model\n\t\tx, y, keep_prob, logits, optimizer, predictions, accuracy = neural_network()\n\n\t\t# Load trained weights\n\t\tsaver = tf.train.Saver()\n\t\tsaver.restore(sess, MODEL_SAVE_PATH)\n\n\t\t# Run inference on CNN to make predictions\n\t\tpreds = sess.run(predictions, feed_dict={x: images, keep_prob: 1.})\n\n\t# Load signnames.csv to map label number to sign string\n\tlabel_map = {}\n\twith open('signnames.csv', 'r') as f:\n\t\tfirst_line = True\n\t\tfor line in f:\n\t\t\t# Ignore first line\n\t\t\tif first_line:\n\t\t\t\tfirst_line = False\n\t\t\t\tcontinue\n\n\t\t\t# Populate label_map\n\t\t\tlabel_int, label_string = line.split(',')\n\t\t\tlabel_int = int(label_int)\n\n\t\t\tlabel_map[label_int] = label_string\n\n\tfinal_preds = [label_map[pred] for pred in preds]\n\n\treturn final_preds", "title": "" }, { "docid": "32851f705877c52bbbf48b0a2b7de63c", "score": "0.576555", "text": "def forward(self, batched_inputs):\n images = self.preprocess_image(batched_inputs)\n if self.training:\n batch_size, _, H, W = images.tensor.shape\n img_masks = images.tensor.new_ones(batch_size, H, W)\n for img_id in range(batch_size):\n img_h, img_w = batched_inputs[img_id]['instances'].image_size\n img_masks[img_id, :img_h, :img_w] = 0\n else:\n batch_size, _, H, W = images.tensor.shape\n img_masks = images.tensor.new_zeros(batch_size, H, W)\n features = self.backbone(images.tensor)[self.in_features[-1]]\n features = self.input_proj(features)\n img_masks = F.interpolate(img_masks[None], size=features.shape[-2:])[0]\n pos_embed = self.position_embedding(img_masks)\n if self.training:\n gt_instances = [x['instances'] for x in batched_inputs]\n targets = self.prepare_targets(gt_instances)\n gt_labels_list = [t['labels'] for t in targets]\n gt_boxes_list = [t['boxes'] for t in targets]\n else:\n targets = None\n matching_label_query = self.denoising_generator.label_encoder(torch.tensor(self.num_classes)).repeat(self.num_queries, 1)\n indicator_for_matching_part = torch.zeros([self.num_queries, 1])\n matching_label_query = torch.cat([matching_label_query, indicator_for_matching_part], 1).repeat(batch_size, 1, 1)\n matching_box_query = self.anchor_box_embed.weight.repeat(batch_size, 1, 1)\n if targets is None:\n input_label_query = matching_label_query.transpose(0, 1)\n input_box_query = matching_box_query.transpose(0, 1)\n attn_mask = None\n denoising_groups = self.denoising_groups\n max_gt_num_per_image = 0\n else:\n noised_label_queries, noised_box_queries, attn_mask, denoising_groups, max_gt_num_per_image = self.denoising_generator(gt_labels_list, gt_boxes_list)\n input_label_query = torch.cat([noised_label_queries, matching_label_query], 1).transpose(0, 1)\n input_box_query = torch.cat([noised_box_queries, matching_box_query], 1).transpose(0, 1)\n hidden_states, reference_boxes = self.transformer(features, img_masks, input_box_query, pos_embed, target=input_label_query, attn_mask=[attn_mask, None])\n reference_boxes = inverse_sigmoid(reference_boxes)\n anchor_box_offsets = self.bbox_embed(hidden_states)\n outputs_coord = (reference_boxes + anchor_box_offsets).sigmoid()\n outputs_class = self.class_embed(hidden_states)\n output = {'denoising_groups': torch.tensor(denoising_groups), 'max_gt_num_per_image': torch.tensor(max_gt_num_per_image)}\n outputs_class, outputs_coord = self.dn_post_process(outputs_class, outputs_coord, output)\n output.update({'pred_logits': outputs_class[-1], 'pred_boxes': outputs_coord[-1]})\n if self.aux_loss:\n output['aux_outputs'] = self._set_aux_loss(outputs_class, outputs_coord)\n if self.training:\n loss_dict = self.criterion(output, targets)\n weight_dict = self.criterion.weight_dict\n for k in loss_dict.keys():\n if k in weight_dict:\n loss_dict[k] *= weight_dict[k]\n return loss_dict\n else:\n box_cls = output['pred_logits']\n box_pred = output['pred_boxes']\n results = self.inference(box_cls, box_pred, images.image_sizes)\n processed_results = []\n for results_per_image, input_per_image, image_size in zip(results, batched_inputs, images.image_sizes):\n height = input_per_image.get('height', image_size[0])\n width = input_per_image.get('width', image_size[1])\n r = detector_postprocess(results_per_image, height, width)\n processed_results.append({'instances': r})\n return processed_results", "title": "" }, { "docid": "c94c9f61562de9b0b532bd15dc023de4", "score": "0.5765359", "text": "def compute_classif_metrics(y_true, y_pred, metric_averages=METRIC_AVERAGES):\n y_true = np.array(y_true)\n y_pred = np.array(y_pred)\n if y_true.shape != y_pred.shape:\n raise ValueError('prediction (%i) and annotation (%i) should be equal' % (len(y_true), len(y_pred)))\n logging.debug('unique lbs true: %r, predict %r', np.unique(y_true), np.unique(y_pred))\n\n uq_labels = np.unique(np.hstack((y_true, y_pred)))\n # in case there are just two classes, relabel them as [0, 1], sklearn error:\n # \"ValueError: pos_label=1 is not a valid label: array([ 0, 255])\"\n if len(uq_labels) <= 2:\n # NOTE, this is temporal just for purposes of computing statistic\n y_true = relabel_sequential(y_true, uq_labels)\n y_pred = relabel_sequential(y_pred, uq_labels)\n\n # http://scikit-learn.org/stable/modules/generated/sklearn.metrics.precision_recall_fscore_support.html\n eval_str = 'EVALUATION: {:<2} PRE: {:.3f} REC: {:.3f} F1: {:.3f} S: {:>6}'\n try:\n p, r, f, s = metrics.precision_recall_fscore_support(y_true, y_pred)\n for lb, _ in enumerate(p):\n logging.debug(eval_str.format(lb, p[lb], r[lb], f[lb], s[lb]))\n except Exception:\n logging.exception('metrics.precision_recall_fscore_support')\n\n dict_metrics = {\n 'ARS': metrics.adjusted_rand_score(y_true, y_pred),\n # 'F1': metrics.f1_score(y_true, y_pred),\n 'accuracy': metrics.accuracy_score(y_true, y_pred),\n # 'precision': metrics.precision_score(y_true, y_pred),\n 'confusion': metrics.confusion_matrix(y_true, y_pred).tolist(),\n # 'report': metrics.classification_report(labels, predicted),\n }\n # compute aggregated precision, recall, f-score, support\n names = ['precision', 'recall', 'f1', 'support']\n for avg in metric_averages:\n try:\n mtr = metrics.precision_recall_fscore_support(y_true, y_pred, average=avg)\n res = dict(zip(['{}_{}'.format(n, avg) for n in names], mtr))\n except Exception:\n logging.exception('metrics.precision_recall_fscore_support')\n res = dict(zip(['{}_{}'.format(n, avg) for n in names], [-1] * 4))\n dict_metrics.update(res)\n return dict_metrics", "title": "" }, { "docid": "cdf079c045a45e9cd378172d3875e8f3", "score": "0.576438", "text": "def mark(self, images: List[np.ndarray]):\n\n n_images = len(images)\n stack_shape = (n_images, 3) + TRANSFORM_SIZE\n image_stack = np.zeros(stack_shape, dtype=np.float32)\n image_sizes = np.zeros((n_images, 2), dtype=np.float32)\n\n for i in range(n_images):\n image_stack[i, :, :, :] = _image_transform(images[i])\n image_sizes[i, :] = images[i].shape[-2::-1]\n\n image_stack = torch.from_numpy(image_stack)\n if self.gpu is not None:\n image_stack = image_stack.to(self.gpu)\n\n with torch.no_grad():\n marks, offsets, scores = self.onet(image_stack)\n\n if self.gpu is not None:\n marks = marks.cpu().data.numpy()\n scores = scores.cpu().data.numpy()\n else:\n marks = marks.data.numpy()\n scores = scores.data.numpy()\n\n marks[:, 0:5] = np.expand_dims(image_sizes[:, 0], 1) * marks[:, 0:5]\n marks[:, 5:10] = np.expand_dims(image_sizes[:, 1], 1) * marks[:, 5:10]\n\n scores = scores[:, 1].reshape((-1,))\n marks = marks.reshape(-1, 2, 5).transpose(0, 2, 1).astype(np.int32)\n\n return marks, scores", "title": "" }, { "docid": "8d3b118ad335956281f2c387376eede6", "score": "0.57475436", "text": "def test(self,dataset):\n outputs = self.use(dataset)\n \n costs = np.ones((len(outputs),1))\n # Compute classification error\n for xy,pred,cost in zip(dataset,outputs,costs):\n x,y = xy\n if y == pred[0]:\n cost[0] = 0\n\n return outputs,costs", "title": "" }, { "docid": "3cc15c3cfc6db2e710c8c2d4dacd5d9c", "score": "0.5742178", "text": "def evaluate(self, input_data, target_data):\n\n self.confusion_matrix = np.zeros(\n [target_data.shape[-1], target_data.shape[-1]])\n \n # Compute confusion matrix (one data point at a time)\n for i in np.arange(len(input_data)):\n output = self.run(input_data[i])\n true_label = np.array(target_data[i], ndmin=2).T\n \n # Compute result and add to confusion matrix \n # (true label in rows, prediction in columns)\n confusion_result = true_label @ output.T\n confusion_result[confusion_result == np.max(confusion_result)] = 1\n confusion_result[confusion_result != 1] = 0\n self.confusion_matrix += confusion_result\n \n total_predictions = np.sum(self.confusion_matrix)\n correct_predictions = np.sum(np.diag(self.confusion_matrix))\n false_predictions = total_predictions - correct_predictions\n \n # Accuracy\n self.accuracy = correct_predictions / total_predictions\n \n # Recall (per class)\n self.recall = np.array([])\n for i in np.arange(target_data.shape[-1]):\n self.recall = np.append(\n self.recall, \n self.confusion_matrix[i,i] / np.sum(self.confusion_matrix[i,:]),\n )\n \n # Precision (per class)\n self.precision = np.array([])\n for i in np.arange(target_data.shape[-1]):\n self.precision = np.append(\n self.precision, \n self.confusion_matrix[i,i] / np.sum(self.confusion_matrix[:,i]),\n )\n \n # Print accuracy measures\n print('Accuracy: ' + str('%.2f' % (self.accuracy * 100)) + '%')\n for i in np.arange(target_data.shape[-1]): \n print('Recall for ' + str(i) + ': ' \n + str('%.2f' % (self.recall[i] * 100)) + '%')\n print('Precision for ' + str(i) + ': ' \n + str('%.2f' % (self.precision[i] * 100)) + '%')", "title": "" }, { "docid": "77d124ebe7be840095fcba6b88eb08cc", "score": "0.57381797", "text": "def get_classifier(images: tp.List[Image.Image], _model_path: str, class_dict: tp.Dict) -> np.ndarray:\n # noinspection PyTypeChecker\n images = [np.asarray(x, dtype=\"float32\") / 255 for x in images]\n images = np.stack(images)\n\n _model = load_model(_model_path)\n test_preds_raw = _model.predict(images, verbose=0)\n\n test_preds = np.argmax(test_preds_raw, axis=1)\n\n # convert (als mapping) prediction from number to str\n return np.vectorize(lambda x: class_dict[x])(test_preds)", "title": "" }, { "docid": "e4f8cf7b39a126aa3d41d37dd1406475", "score": "0.57213855", "text": "def classify(img_path, parameters):\n\n classes = np.array([b'non-cat', b'cat'])\n num_px = 64\n\n img = np.array(ndimage.imread(img_path, flatten=False))\n img_resized = scipy.misc.imresize(img, size=(num_px,num_px)).reshape((num_px*num_px*3,1))\n\n img_pred = predict(img_resized, parameters)\n\n img_class = np.squeeze(img_pred)\n img_result = classes[int(img_class),].decode(\"utf-8\")\n\n return img_result", "title": "" }, { "docid": "38e813fae4023682802fb700aed8102e", "score": "0.57198906", "text": "def attack(self, imgs, targets, input_data_format=CHANNELS_LAST, output_data_format=CHANNELS_LAST):\n assert input_data_format in (CHANNELS_FIRST, CHANNELS_LAST)\n assert output_data_format in (CHANNELS_FIRST, CHANNELS_LAST)\n r = []\n count = len(imgs)\n print('go up to', len(imgs))\n # tranpose the input data format to fit the data format of the attack model\n if input_data_format != self.data_format:\n if input_data_format == CHANNELS_LAST:\n # input is channels_last, transpose to channels_first\n imgs = np.transpose(imgs, [0, 3, 1, 2])\n else:\n # input is channels_first, transpose to channels_last\n imgs = np.transpose(imgs, [0, 2, 3, 1])\n for i in range(0, count, self.batch_size):\n print('tick', i)\n attack_imgs = imgs[i:i + self.batch_size]\n attack_targets = targets[i:i + self.batch_size]\n attack_len = len(attack_targets)\n\n if attack_len < self.batch_size:\n img_shape = np.asarray(attack_imgs.shape)\n img_shape[0] = self.batch_size - attack_len\n target_shape = np.asarray(attack_targets.shape)\n target_shape[0] = self.batch_size - attack_len\n attack_imgs = np.append(attack_imgs, np.zeros(img_shape), axis=0)\n attack_targets = np.append(attack_targets, np.zeros(target_shape), axis=0)\n r.extend(self.attack_batch(attack_imgs, attack_targets))\n\n output = np.array(r)[0: count]\n # tranpose the output data format of the attack model to fit the output data format\n if output_data_format != self.data_format:\n if output_data_format == CHANNELS_LAST:\n # attack model output is channels_first, transpose to channels_last\n output = np.transpose(output, [0, 2, 3, 1])\n else:\n # attack model output is channels_last, transpose to channels_first\n output = np.transpose(output, [0, 3, 1, 2])\n return output", "title": "" }, { "docid": "c70134d62d8b66e1851414062f0ec5d6", "score": "0.5719786", "text": "def make_1class_predictions(model, images, device, score_threshold=0.5):\n images = torch.stack(images).to(device).float()\n box_list = []\n score_list = []\n with torch.no_grad():\n det = model(images, torch.tensor([1] * images.shape[0]).to(device).float())\n for i in range(images.shape[0]):\n boxes = det[i].detach().cpu().numpy()[:, :4]\n scores = det[i].detach().cpu().numpy()[:, 4]\n # for higher threshold\n indexes = np.where((scores > score_threshold))[0]\n boxes[:, 2] = boxes[:, 2] + boxes[:, 0]\n boxes[:, 3] = boxes[:, 3] + boxes[:, 1]\n box_list.append(boxes[indexes])\n score_list.append(scores[indexes])\n\n return box_list, score_list", "title": "" }, { "docid": "3fdb901f79d3594053552bb4800f5f4d", "score": "0.5715709", "text": "def train(self, patches, labels_patches, imgs, labels):\n opts = self._options\n\n labels_patches = (labels_patches >= 0.5) * 1.\n labels = (labels >= 0.5) * 1.\n\n num_train_patches = patches.shape[0]\n\n indices = np.arange(0, num_train_patches)\n np.random.shuffle(indices)\n\n num_errors = 0\n total = 0\n\n for batch_i, offset in enumerate(range(0, num_train_patches - opts.batch_size, opts.batch_size)):\n batch_indices = indices[offset:offset + opts.batch_size]\n feed_dict = {\n self._patches_node: patches[batch_indices, :, :, :],\n self._labels_node: labels_patches[batch_indices],\n self._dropout_keep: opts.dropout,\n self._image_augmentation: opts.image_augmentation,\n }\n\n summary_str, _, l, predictions, predictions, step = self._session.run(\n [self.summary_op, self._train, self._loss, self._predict_logits, self._predictions,\n self._global_step],\n feed_dict=feed_dict)\n\n print(\"Batch {} Step {}\".format(batch_i, step), end=\"\\r\")\n self._summary.add(summary_str, global_step=step)\n\n num_errors += np.abs(labels_patches[batch_indices] - predictions).sum()\n total += opts.batch_size\n self._summary.add_to_pixel_missclassification_summary(num_errors, total, self._global_step)\n\n # from time to time do full prediction on some images\n if step > 0 and step % opts.eval_every == 0:\n print()\n\n images_to_predict = imgs[:opts.num_eval_images, :, :, :]\n masks = self.predict(images_to_predict)\n overlays = images.overlays(images_to_predict, masks)\n pred_masks = ((masks > 0.5) * 1).squeeze()\n true_masks = labels[:opts.num_eval_images, :, :].squeeze()\n\n self._summary.add_to_eval_summary(masks, overlays, labels, self._global_step)\n self._summary.add_to_overlap_summary(true_masks, pred_masks, self._global_step)\n\n if step > 0 and step % opts.train_score_every == 0:\n self._summary.add_to_training_summary(self.predict(imgs), labels, self._global_step)\n\n self._summary.flush()", "title": "" }, { "docid": "3a4f2d95a3386886796ee3a2c306f0eb", "score": "0.5712749", "text": "def predict(self, instances, **kwargs):\n imgs = map(self.decode_img, instances)\n inputs = np.asarray(imgs)\n outputs = self._model.predict(imgs)\n\n return [self.threshold(output) for output in outputs]", "title": "" }, { "docid": "d75acd21d5c2b89f4addb30e9bd003ec", "score": "0.57005554", "text": "def performance_evaluation(self, metrics):\n\n metrics_indicator = valid_input_perform_eval(metrics)\n if (metrics_indicator == None):\n print(\"Invalid metrics!\")\n return\n else:\n results = np.zeros((len(self.comp_img), len(metrics)))\n overlap_measures_filter = sitk.LabelOverlapMeasuresImageFilter()\n hausdorff_distance_filter = sitk.HausdorffDistanceImageFilter()\n\n for i in range(len(self.comp_img)):\n # Cannot compute for unmatched image files\n if (self.comp_img[i].GetSpacing() != self.radio_img[i].GetSpacing()):\n print(\n \"Spacing of corresponding image files ({}) don't match!\".format(self.filename[i]))\n results[i] = [float(\"NaN\")] * len(metrics)\n else: \n overlap_measures_filter.Execute(\n self.comp_img[i], self.radio_img[i])\n Fp = overlap_measures_filter.GetFalsePositiveError()\n Tn = 1 - Fp\n Fn = overlap_measures_filter.GetFalseNegativeError()\n Tp = 1 - Fn\n if (metrics_indicator[1]):\n results[i, 1] = overlap_measures_filter.GetDiceCoefficient()\n if (metrics_indicator[3]):\n results[i, 3] = overlap_measures_filter.GetJaccardCoefficient()\n if (metrics_indicator[4]):\n results[i, 4] = (\n Tp * Tn - Fp * Fn) / np.sqrt((Tp + Fp) * (Tp + Fn) * (Tn + Fp) * (Tn + Fn))\n if (metrics_indicator[0]):\n results[i, 0] = (Tp + Tn) / (Tp + Fp + Tn + Fn)\n hausdorff_distance_filter.Execute(\n self.comp_img[i], self.radio_img[i])\n if (metrics_indicator[2]):\n results[i, 2] = hausdorff_distance_filter.GetHausdorffDistance()\n \n results_df = pd.DataFrame(\n data=results, index=self.filename, columns=sorted(metrics))\n results_df.to_csv(self.export_path + '/results.csv')", "title": "" }, { "docid": "e2a7f015c4dc28ea47e75b144f30e0ba", "score": "0.56978613", "text": "def augment_images(self, images, labels, folder_names, target, current_step, total_steps):\n unq, unq_inv, unq_cnt = np.unique(labels, return_inverse=True, return_counts=True)\n\n print(\"___________________________________\")\n print(\"augment_images...\")\n print(\"Labels:\", labels)\n print(\"Labels Unique:\", unq)\n print(\"Labels Unique Invers:\", unq_inv)\n print(\"Labels Unique Count:\", unq_cnt)\n\n # count the number of images that already exist for each user\n unique_class_indices = np.split(np.argsort(unq_inv), np.cumsum(unq_cnt[:-1]))\n\n target_x = images\n target_y = labels\n i = 0\n for unique_class in unq:\n print(\"Unique Class:\", unique_class)\n indices = unique_class_indices[unique_class]\n #print(\"Unique Class Indeces:\", indices)\n\n folder_name = folder_names[unique_class]\n print(\"Gallery:\", folder_name)\n\n number_of_missing_images = target - len(indices)\n\n current_step += 1\n self.update_status('STARTED', {'step': current_step,\n 'total_steps': total_steps,\n 'description': 'Augmenting'})\n\n print(\"Number of missing Images: \", number_of_missing_images)\n batch_x = augmenter.augment_array_target(folder_name, number_of_missing_images)\n batch_y = np.full(number_of_missing_images, unique_class)\n print(\" \")\n print(\"batch_y:\", batch_y)\n\n target_x.extend(batch_x)\n target_y.extend(batch_y)\n i += 1\n return target_x, target_y", "title": "" }, { "docid": "443d500467409c119cda9d54be802a27", "score": "0.5693967", "text": "def test_images(self, gt, pred):\n avg_psnr = 0\n avg_ssim = 0\n individual_psnr = []\n individual_ssim = []\n\n print('Length of gt: %s'%len(gt))\n print('Length of pred: %s'%len(pred))\n for i in range(len(pred)):\n # compare to gt\n psnr = self.PSNR(self.luminance(gt[i]), self.luminance(pred[i]))\n ssim = self.SSIM(self.luminance(gt[i]), self.luminance(pred[i]))\n # save results to log_path ex: 'results/experiment1/Set5/baby/1000.png'\n # if save_images:\n # path = os.path.join(log_path, self.name, self.names[i])\n # gather results\n individual_psnr.append(psnr)\n individual_ssim.append(ssim)\n avg_psnr += psnr\n avg_ssim += ssim\n\n avg_psnr /= len(pred)\n avg_ssim /= len(pred)\n return avg_psnr, avg_ssim, individual_psnr, individual_ssim", "title": "" }, { "docid": "b0dbd367f2d4d61e5ca47b1e12dc9dff", "score": "0.5690589", "text": "def im_detect(net, im, boxes, num_classes, num_subclasses):\n\n if boxes.shape[0] == 0:\n scores = np.zeros((0, num_classes))\n pred_boxes = np.zeros((0, 4*num_classes))\n pred_views = np.zeros((0, 3*num_classes))\n scores_subcls = np.zeros((0, num_subclasses))\n return scores, pred_boxes, scores_subcls, pred_views\n\n blobs, unused_im_scale_factors = _get_blobs(im, boxes)\n\n # When mapping from image ROIs to feature map ROIs, there's some aliasing\n # (some distinct image ROIs get mapped to the same feature ROI).\n # Here, we identify duplicate feature ROIs, so we only compute features\n # on the unique subset.\n if cfg.DEDUP_BOXES > 0:\n v = np.array([1, 1e3, 1e6, 1e9, 1e12])\n hashes = np.round(blobs['rois'] * cfg.DEDUP_BOXES).dot(v)\n _, index, inv_index = np.unique(hashes, return_index=True,\n return_inverse=True)\n blobs['rois'] = blobs['rois'][index, :]\n boxes = boxes[index, :]\n\n # reshape network inputs\n net.blobs['data'].reshape(*(blobs['data'].shape))\n net.blobs['rois'].reshape(*(blobs['rois'].shape))\n blobs_out = net.forward(data=blobs['data'].astype(np.float32, copy=False),\n rois=blobs['rois'].astype(np.float32, copy=False))\n\n #for layer_name, blob in net.blobs.iteritems():\n # print layer_name+'\\t'+ str(blob.data.shape)\n if cfg.TEST.SVM:\n # use the raw scores before softmax under the assumption they\n # were trained as linear SVMs\n scores = net.blobs['cls_score'].data\n else:\n # use softmax estimated probabilities\n scores = blobs_out['cls_prob']\n\n if cfg.TEST.SUBCLS:\n scores_subcls = blobs_out['subcls_prob']\n else:\n # just use class scores\n scores_subcls = scores\n\n if cfg.TEST.BBOX_REG:\n # Apply bounding-box regression deltas\n box_deltas = blobs_out['bbox_pred']\n pred_boxes = _bbox_pred(boxes, box_deltas)\n pred_boxes = _clip_boxes(pred_boxes, im.shape)\n else:\n # Simply repeat the boxes, once for each class\n pred_boxes = np.tile(boxes, (1, scores.shape[1]))\n\n if cfg.TEST.VIEWPOINT:\n # Apply bounding-box regression deltas\n pred_views = blobs_out['view_pred']\n else:\n # set to zeros\n pred_views = np.zeros((boxes.shape[0], 3*num_classes))\n\n if cfg.DEDUP_BOXES > 0 and not cfg.TEST.IS_PATCH:\n # Map scores and predictions back to the original set of boxes\n scores = scores[inv_index, :]\n scores_subcls = scores_subcls[inv_index, :]\n pred_boxes = pred_boxes[inv_index, :]\n pred_views = pred_views[inv_index, :]\n\n return scores, pred_boxes, scores_subcls, pred_views", "title": "" }, { "docid": "28d395fb5073aac957852355b1933aa6", "score": "0.5686527", "text": "def run_batch_inference(self, images):\n if len(images.shape) == 3:\n logger.info(\"input is a single image. Use batch size 1.\")\n images = np.expand_dims(images, axis=0)\n output_tensor_dict = self._output_tensor_dict\n image_tensor = self._graph.get_tensor_by_name('image_tensor:0')\n\n # Run inference\n output_dict = self._sess.run(output_tensor_dict,\n feed_dict={image_tensor: images})\n\n # all outputs are float32 numpy arrays, so convert types as appropriate\n output_dict['num_detections'] = output_dict['num_detections'].astype(np.uint)\n output_dict['detection_classes'] = output_dict[\n 'detection_classes'].astype(np.uint8)\n output_dict['detection_boxes'] = output_dict['detection_boxes']\n output_dict['detection_scores'] = output_dict['detection_scores']\n return output_dict", "title": "" }, { "docid": "389aee3b8584e90914b7583fc383d57c", "score": "0.56823885", "text": "def _run_inference_on_image_tensors(self, images: tf.Tensor):\n model_params = self._params.task.model\n with tf.device('cpu:0'):\n images = tf.cast(images, dtype=tf.float32)\n\n # Tensor Specs for map_fn outputs (images, anchor_boxes, and image_info).\n images_spec = tf.TensorSpec(shape=self._input_image_size + [3],\n dtype=tf.float32)\n\n num_anchors = model_params.anchor.num_scales * len(\n model_params.anchor.aspect_ratios) * 4\n anchor_shapes = []\n for level in range(model_params.min_level, model_params.max_level + 1):\n anchor_level_spec = tf.TensorSpec(\n shape=[\n self._input_image_size[0] // 2**level,\n self._input_image_size[1] // 2**level, num_anchors\n ],\n dtype=tf.float32)\n anchor_shapes.append((str(level), anchor_level_spec))\n\n image_shape_spec = tf.TensorSpec(shape=[2,], dtype=tf.float32)\n\n images, anchor_boxes, image_shape = tf.nest.map_structure(\n tf.identity,\n tf.map_fn(\n self._build_inputs,\n elems=images,\n fn_output_signature=(images_spec, dict(anchor_shapes),\n image_shape_spec),\n parallel_iterations=32))\n\n detections = self._model.call(\n images=images,\n image_shape=image_shape,\n anchor_boxes=anchor_boxes,\n training=False)\n\n final_outputs = {\n 'detection_boxes': detections['detection_boxes'],\n 'detection_scores': detections['detection_scores'],\n 'detection_classes': detections['detection_classes'],\n 'num_detections': detections['num_detections']\n }\n if 'detection_masks' in detections.keys():\n final_outputs['detection_masks'] = detections['detection_masks']\n\n return final_outputs", "title": "" }, { "docid": "7bb77db78dc33f23a39c5affb6f6d02a", "score": "0.5681441", "text": "def _check_targets(y_true, y_pred):\n check_consistent_length(y_true, y_pred)\n type_true = type_of_target(y_true)\n type_pred = type_of_target(y_pred)\n\n y_type = set([type_true, type_pred])\n if y_type == set([\"binary\", \"multiclass\"]):\n y_type = set([\"multiclass\"])\n\n if y_type == set([\"multiclass-multioutput\", \"multilabel-indicator\"]):\n y_type = set([\"multiclass-multioutput\"])\n\n if len(y_type) > 1:\n raise ValueError(\"Classification metrics can't handle a mix of {0} \"\n \"and {1} targets\".format(type_true, type_pred))\n\n # We can't have more than one value on y_type => The set is no more needed\n y_type = y_type.pop()\n\n if (y_type not in [\"binary\", \"multiclass\", \"multilabel-indicator\",\n \"multiclass-multioutput\"]):\n raise ValueError(\"{0} is not supported\".format(y_type))\n\n if y_type in [\"binary\", \"multiclass\"]:\n y_true = column_or_1d(y_true)\n y_pred = column_or_1d(y_pred)\n if y_type == \"binary\":\n unique_values = np.union1d(y_true, y_pred)\n if len(unique_values) > 2:\n y_type = \"multiclass\"\n\n if y_type.startswith('multilabel'):\n y_type = 'multilabel-indicator'\n\n return y_type, y_true, y_pred", "title": "" }, { "docid": "2ae5e1eabf3713ac8881f4d42af0d5fb", "score": "0.5680647", "text": "def recognize(self, images: typing.List[typing.Union[np.ndarray, str]]) -> typing.List[str]:\n height, width = self.model.input_shape[1:-1]\n images = [tools.read_and_fit(image, width=width, height=height) for image in images]\n for index, image in enumerate(images):\n # It's a grayscale image with no\n # channel dimension so we need to add a\n # dimension first or possibly convert to color.\n if len(image.shape) == 2 and not self.color:\n images[index] = image[..., np.newaxis]\n continue\n # It's grayscale but we need color.\n if len(image.shape) == 2 and self.color:\n images[index] = cv2.cvtColor(image, cv2.COLOR_GRAY2RGB)\n continue\n\n # It's a color image but we need grayscale\n if image.shape[2] == 3 and not self.color:\n images[index] = cv2.cvtColor(image, cv2.COLOR_RGB2GRAY)[..., np.newaxis]\n continue\n\n # It's a grayscale image with a color dimension\n # but we need color.\n if image.shape[2] == 1 and self.color:\n images[index] = cv2.cvtColor(image[..., 0], cv2.COLOR_GRAY2RGB)\n\n images = np.array(images)\n X = keras_applications.imagenet_utils.preprocess_input(images,\n mode=self.preprocessing_mode,\n data_format='channels_last',\n backend=keras.backend)\n predictions = self.prediction_model.predict(X)\n return decode(predictions=predictions, alphabet=self.alphabet)", "title": "" }, { "docid": "a4723c6104635da5600eeddd7283cb41", "score": "0.5679709", "text": "def process_images_for_prediction(chess_piece_images):\n processed_chess_piece_images = []\n for chess_piece_image in chess_piece_images:\n processed_chess_piece_image = utils.resize_given_image(chess_piece_image, IMAGE_DIMS)\n processed_chess_piece_images.append(processed_chess_piece_image)\n\n processed_chess_piece_images = np.array(processed_chess_piece_images)\n processed_chess_piece_images = processed_chess_piece_images.astype('float32')\n processed_chess_piece_images /= 255\n\n return processed_chess_piece_images", "title": "" }, { "docid": "884e2689fcb76cdb71241aea19c8aa24", "score": "0.56754535", "text": "def preprocess_validate(images_names, color_data, tensors, softmax_layer,\n session, y_true, model):\n # load, remove mean, crop images \n dataset = []\n for image_name in images_names:\n im = cv2.imread(image_name)\n im[0] = im[0] - color_data['r_mean']\n im[1] = im[1] - color_data['g_mean']\n im[2] = im[2] - color_data['b_mean']\n pad_crop_im = pad_crop(im, model=model)\n pad_crop_bgr_im = cv2.cvtColor(np.uint8(pad_crop_im), cv2.COLOR_RGB2BGR)\n dataset.append(pad_crop_bgr_im)\n # convert list of images to 4-D array\n dataset = np.array(dataset)\n # normalize images\n dataset = norm_data(dataset)\n\n # predict labels for provided images names\n predictions = session.run(softmax_layer, feed_dict={tensors['input_layer']: dataset,\n tensors['labels_tensor']: y_true,\n tensors['hold_prob']: 1})\n return predictions", "title": "" }, { "docid": "b22d68c06a7d96ff9b93a480055c68eb", "score": "0.5671083", "text": "def im_test(self, detector: Detector, image: np.ndarray, labels: dict) -> tuple:\n start = datetime.now()\n objects = detector.predict(image)\n # initiate a dictionary for found labels, and lists for accuracies and probabilities.\n time = datetime.now() - start\n logging.info(\n f\"Run time is {time.total_seconds()}, the threshold is {self.thresholds['runtime'].total_seconds()}\")\n test_dict = {}\n tp = 0\n fp = 0\n fn = 0\n probabilities = []\n # fill test dictionary with labels found.\n for dic in objects:\n if dic['label'] in test_dict:\n test_dict[dic['label']] += 1\n else:\n test_dict[dic['label']] = 1\n probabilities.append(dic['prob'])\n # check false negative.\n for key in labels.keys():\n if key in test_dict:\n if test_dict[key] - labels[key] == 0:\n tp += labels[key]\n elif labels[key] > test_dict[key]:\n tp += test_dict[key]\n fn += labels[key] - test_dict[key]\n else:\n tp += labels[key]\n fp += test_dict[key] - labels[key]\n logging.info(f\"Found {test_dict[key]} {key}s out of {labels[key]}.\")\n else:\n fn += labels[key]\n logging.warning(f\"Found {0} {key}s out of {labels[key]}.\")\n # check false positive\n for key in test_dict.keys():\n if key not in labels:\n fp += test_dict[key]\n logging.warning(f\"False positive label was detected.\")\n # calculate recall and probability.\n recall = tp / (tp + fn)\n precision = tp / (tp + fp)\n mean_prob = mean(probabilities)\n logging.info(f\"Recall is {round(recall, 2)}, the threshold is {self.thresholds['recall']}.\")\n logging.info(f\"Precision is {round(precision, 2)}, the threshold is {self.thresholds['precision']}.\")\n logging.info(f\"Mean probability is {round(mean_prob, 2)}, the threshold is {self.thresholds['probability']}.\")\n if recall >= self.thresholds['recall'] and mean_prob >= self.thresholds['probability'] and time <= \\\n self.thresholds['runtime']:\n logging.warning(\"The test has passed successfully.\")\n else:\n logging.warning(\"The test has failed.\")\n\n return recall, mean_prob, precision, time", "title": "" }, { "docid": "aa752013b4b43a06c77ed23d5deeec39", "score": "0.5670382", "text": "def _eval_classifier(self):\n\n y_pred_prior = self.model_prior.predict(self.X_test_prior)\n y_pred_post = self.model_post.predict(self.X_test_post)\n\n y_test_ = pd.DataFrame(self.y_test)\n y_pred_prior = pd.DataFrame(y_pred_prior, columns=y_test.columns)\n y_pred_post = pd.DataFrame(y_pred_post, columns=y_test.columns)\n\n y_pred_prior['source'] = \"prior\"\n y_pred_post['source'] = \"post\"\n y_test['source'] = \"test\"\n\n y_ = pd.concat([y_pred_prior, y_pred_post, y_test])\n cols = [col for col in y_.columns if col != \"source\"]\n y_ = pd.get_dummies(y_, columns=cols)\n\n y_pred_prior = y_[y_.source == 'prior'].drop('source', axis=1).values\n y_pred_post = y_[y_.source == 'post'].drop('source', axis=1).values\n y_test = y_[y_.source == 'test'].drop('source', axis=1).values\n\n y_.drop('source', axis=1, inplace=True)\n class_labels = y_.columns\n\n res = pd.DataFrame([])\n\n if (len(y_test[0]) == 2):\n # for binary classification\n # only take position 1, assuming position 1 is the true label\n iters = [1]\n else:\n # for multiclass classification\n iters = range(len(y_test[0]))\n\n for i in iters:\n\n precision_prior = precision_score(y_test[:,i], y_pred_prior[:,i])\n recall_prior = recall_score(y_test[:,i], y_pred_prior[:,i])\n acc_prior = accuracy_score(y_test[:,i], y_pred_prior[:,i])\n f1_prior = f1_score(y_test[:,i], y_pred_prior[:,i])\n try:\n auc_prior = roc_auc_score(y_test[:,i], y_pred_prior[:,i])\n except ValueError:\n auc_prior = \"NA\"\n\n precision_post = precision_score(y_test[:,i], y_pred_post[:,i])\n recall_post = recall_score(y_test[:,i], y_pred_post[:,i])\n acc_post = accuracy_score(y_test[:,i], y_pred_post[:,i])\n f1_post = f1_score(y_test[:,i], y_pred_post[:,i])\n try:\n auc_post = roc_auc_score(y_test[:,i], y_pred_post[:,i])\n except ValueError:\n auc_post = \"NA\"\n\n multiindex = [(str(class_labels[i]), 'Prior'),\n (str(class_labels[i]), 'Post')]\n\n index = pd.MultiIndex.from_tuples(multiindex,\n names=['Class', 'Data Type'])\n\n score = pd.DataFrame({\n 'Accuracy': [acc_prior, acc_post],\n 'Precision': [precision_prior, precision_post],\n 'Recall': [recall_prior, recall_post],\n 'F1': [f1_prior, f1_post],\n 'AUC': [auc_prior, auc_post]\n },\n index=index\n )\n\n res = pd.concat([res, score])\n\n self.ml_report = res", "title": "" }, { "docid": "1b88f7d1e0692e0c8f6185bb880d15d6", "score": "0.56663907", "text": "def accuracy(classifications, targets):\n return (classifications == targets).mean()", "title": "" }, { "docid": "d4acd753d0c664fac050befba38ed9ae", "score": "0.5664122", "text": "def inference(\n net, images, device=\"cuda\", prob_thresh=0.05, nms_iou_thresh=0.3,\n resize=True\n):\n if not isinstance(images, list):\n images = [images]\n\n orig_image_shapes = [image.shape for image in images]\n\n # Resize input images to match shape of images on which net was trained.\n if resize:\n net_image_shape = (net.net_info[\"height\"], net.net_info[\"width\"])\n images = [\n cv2.resize(image, net_image_shape)\n if image.shape[:2] != net_image_shape\n else image for image in images\n ]\n\n # Stack images along new batch axis, flip channel axis so channels are RGB\n # instead of BGR, transpose so channel axis comes before row/column axes,\n # and convert pixel values to FP32. Do this in one step to ensure array\n # is contiguous before passing to torch tensor constructor.\n inp = np.transpose(np.flip(np.stack(images), 3), (0, 3, 1, 2)).astype(\n np.float32) / 255.0\n\n inp = torch.tensor(inp, device=device)\n out = net.forward(inp)\n\n bbox_xywh = out[\"bbox_xywh\"].detach().cpu().numpy()\n class_prob = out[\"class_prob\"].cpu().numpy()\n class_idx = out[\"class_idx\"].cpu().numpy()\n\n thresh_mask = class_prob >= prob_thresh\n\n # Perform post-processing on each image in the batch and return results.\n results = []\n for i in range(bbox_xywh.shape[0]):\n image_bbox_xywh = bbox_xywh[i, thresh_mask[i, :], :]\n image_class_prob = class_prob[i, thresh_mask[i, :]]\n image_class_idx = class_idx[i, thresh_mask[i, :]]\n\n image_bbox_xywh[:, [0, 2]] *= orig_image_shapes[i][1]\n image_bbox_xywh[:, [1, 3]] *= orig_image_shapes[i][0]\n image_bbox_tlbr = cxywh_to_tlbr(image_bbox_xywh.astype(np.int))\n\n idxs_to_keep = non_max_suppression(\n image_bbox_tlbr, image_class_prob, class_idx=image_class_idx,\n iou_thresh=nms_iou_thresh\n )\n\n results.append(\n [\n image_bbox_tlbr[idxs_to_keep, :],\n image_class_prob[idxs_to_keep],\n image_class_idx[idxs_to_keep]\n ]\n )\n\n return results", "title": "" }, { "docid": "058723c29ce82a331bc0d26241be5169", "score": "0.56624264", "text": "def run_inference_on_images(image_list, output_dir):\n image_to_labels = defaultdict(list)\n\n create_graph()\n\n with tf.compat.v1.Session() as sess:\n # Some useful tensors:\n # 'softmax:0': A tensor containing the normalized prediction across\n # 1000 labels.\n # 'pool_3:0': A tensor containing the next-to-last layer containing 2048\n # float description of the image.\n # 'DecodeJpeg/contents:0': A tensor containing a string providing JPEG\n # encoding of the image.\n # Runs the softmax tensor by feeding the image_data as input to the graph.\n softmax_tensor = sess.graph.get_tensor_by_name('softmax:0')\n\n for image_index, image in enumerate(image_list):\n try:\n print(\"parsing\", image_index, image, \"\\n\")\n if not tf.io.gfile.exists(image):\n tf.logging.fatal('File does not exist %s', image)\n\n with tf.io.gfile.GFile(image, 'rb') as f:\n image_data = f.read()\n\n process = psutil.Process(os.getpid())\n mem3 = process.memory_info().rss\n print('Memory After reading file', mem3 / (1024 ** 2), 'MB')\n\n predictions = sess.run(softmax_tensor,\n {'DecodeJpeg/contents:0': image_data})\n\n predictions = np.squeeze(predictions)\n\n ###\n # Get penultimate layer weights\n ###\n\n feature_tensor = sess.graph.get_tensor_by_name('pool_3:0')\n feature_set = sess.run(feature_tensor,\n {'DecodeJpeg/contents:0': image_data})\n feature_vector = np.squeeze(feature_set)\n outfile_name = os.path.basename(image) + \".npz\"\n out_path = os.path.join(output_dir, outfile_name)\n np.savetxt(out_path, feature_vector, delimiter=',')\n\n # Creates node ID --> English string lookup.\n node_lookup = NodeLookup()\n\n process = psutil.Process(os.getpid())\n mem4 = process.memory_info().rss\n print('Memory before prediction', mem4 / (1024 ** 2), 'MB')\n\n top_k = predictions.argsort()[-FLAGS.num_top_predictions:][::-1]\n for node_id in top_k:\n human_string = node_lookup.id_to_string(node_id)\n score = predictions[node_id]\n print(\"results for\", image)\n print('%s (score = %.5f)' % (human_string, score))\n print(\"\\n\")\n\n image_to_labels['image_labels'].append(\n {\n \"labels\": human_string,\n \"score\": str(score)\n }\n )\n process = psutil.Process(os.getpid())\n mem5 = process.memory_info().rss\n print('Memory After Prediction', mem5 / (1024 ** 2), 'MB')\n\n # # detect number of faces\n num_faces = detect_num_faces(image)\n image_to_labels['number_of_faces'].append(num_faces)\n\n process = psutil.Process(os.getpid())\n mem6 = process.memory_info().rss\n print('Memory After Face Detection', mem6 / (1024 ** 2), 'MB')\n\n # close the open file handlers\n proc = psutil.Process()\n open_files = proc.open_files()\n\n for open_file in open_files:\n file_handler = getattr(open_file, \"fd\")\n os.close(file_handler)\n except:\n print('could not process image index', image_index, 'image', image)\n\n return image_to_labels", "title": "" }, { "docid": "e372f3e44e6ba24d3b98edb532d1ce63", "score": "0.5662352", "text": "def _process_batch(sess, original_images, semantic_predictions, probs, image_names,\n image_heights, image_widths, image_id_offset, save_dir,\n raw_save_dir, train_id_to_eval_id=None):\n (original_images,\n semantic_predictions,\n probs,\n image_names,\n image_heights,\n image_widths) = sess.run([original_images, semantic_predictions, probs,\n image_names, image_heights, image_widths])\n\n num_image = semantic_predictions.shape[0]\n for i in range(num_image):\n image_height = np.squeeze(image_heights[i])\n image_width = np.squeeze(image_widths[i])\n semantic_prediction = np.squeeze(semantic_predictions[i])\n crop_semantic_prediction = semantic_prediction[:image_height, :image_width]\n img_name = f\"{image_names[i].decode().split('/')[-1].split('.')[0]}\"\n\n if FLAGS.also_save_raw_predictions:\n image_filename = os.path.basename(image_names[i])\n\n if train_id_to_eval_id is not None:\n crop_semantic_prediction = _convert_train_id_to_eval_id(\n crop_semantic_prediction,\n train_id_to_eval_id)\n\n save_annotation.save_annotation(\n prediction_with_COCO(crop_semantic_prediction, probs[i], concept_dict[f\"{FLAGS.folder}/{img_name}.jpg\"]),\n raw_save_dir,\n f\"{img_name}\",\n add_colormap=False)", "title": "" }, { "docid": "d37b79ca8c7a21575d2f346b7a9f68e0", "score": "0.5660186", "text": "def visualize_training(self, batched_inputs, results, feedbacks=None):\r\n from detectron2.utils.visualizer import Visualizer\r\n\r\n assert len(batched_inputs) == len(\r\n results\r\n ), \"Cannot visualize inputs and results of different sizes\"\r\n storage = get_event_storage()\r\n max_boxes = 20\r\n\r\n image_index = 0 # only visualize a single image\r\n img = batched_inputs[image_index][\"image\"]\r\n img = convert_image_to_rgb(img.permute(1, 2, 0), self.input_format)\r\n v_gt = Visualizer(img, None)\r\n v_gt = v_gt.overlay_instances(boxes=batched_inputs[image_index][\"instances\"].gt_boxes)\r\n anno_img = v_gt.get_image()\r\n processed_results = detector_postprocess(results[image_index], img.shape[0], img.shape[1])\r\n predicted_boxes = processed_results.pred_boxes.tensor.detach().cpu().numpy()\r\n\r\n v_pred = Visualizer(img, None)\r\n v_pred = v_pred.overlay_instances(boxes=predicted_boxes[0:max_boxes])\r\n prop_img = v_pred.get_image()\r\n\r\n num_classes = self.num_classes\r\n if feedbacks is not None:\r\n v_feedback_gt = Visualizer(img, None)\r\n instance = feedbacks[0].to(torch.device(\"cpu\"))\r\n v_feedback_gt = v_feedback_gt.overlay_instances(\r\n boxes=instance.proposal_boxes[instance.gt_classes != num_classes])\r\n feedback_gt_img = v_feedback_gt.get_image()\r\n\r\n v_feedback_gf = Visualizer(img, None)\r\n v_feedback_gf = v_feedback_gf.overlay_instances(\r\n boxes=instance.proposal_boxes[instance.gt_classes == num_classes])\r\n feedback_gf_img = v_feedback_gf.get_image()\r\n\r\n vis_img = np.vstack((anno_img, prop_img, feedback_gt_img, feedback_gf_img))\r\n vis_img = vis_img.transpose(2, 0, 1)\r\n vis_name = f\"Top: GT; Middle: Pred; Bottom: Feedback GT, Feedback GF\"\r\n else:\r\n vis_img = np.concatenate((anno_img, prop_img), axis=1)\r\n vis_img = vis_img.transpose(2, 0, 1)\r\n vis_name = \"Left: GT bounding boxes; Right: Predicted proposals\"\r\n\r\n storage.put_image(vis_name, vis_img)", "title": "" }, { "docid": "ef6c8ecd231cec6e87fb351b6ca61dfe", "score": "0.5654193", "text": "def process(self):\n print('Processing images...')\n processed = self.model.predict(self.source['process'], batch_size=self.batch_size, verbose=1)\n save_img(self.res_dir, processed)\n print('Complete')", "title": "" }, { "docid": "85f10becdce5d3df79a6f19e658a365f", "score": "0.5653114", "text": "def postprocess(results, filenames, batch_size):\n if len(results) != 1:\n raise Exception(\"expected 1 result, got {}\".format(len(results)))\n\n batched_result = list(results.values())[0]\n if len(batched_result) != batch_size:\n raise Exception(\"expected {} results, got {}\".format(batch_size, len(batched_result)))\n if len(filenames) != batch_size:\n raise Exception(\"expected {} filenames, got {}\".format(batch_size, len(filenames)))\n\n for (index, result) in enumerate(batched_result):\n print(\"Image '{}':\".format(filenames[index]))\n for cls in result:\n print(\" {} ({}) = {}\".format(cls[0], cls[2], cls[1]))", "title": "" }, { "docid": "c8a3392028eda8c4acc7fb3037eb3c8e", "score": "0.56511486", "text": "def inference_single_image(self, pred_labels, anchors):\n pred_classes = pred_labels[\"class\"]\n pred_bbox_offsets = pred_labels[\"bbox_offset\"]\n pred_dim_offsets = pred_labels[\"dim_offset\"]\n pred_orient_offsets = pred_labels[\"orient_offset\"]\n pred_bin_confs = pred_labels[\"bin_conf\"]\n pred_classes_all = []\n pred_scores_all = []\n pred_bboxes_all = []\n pred_dims_all = []\n pred_orients_all = []\n\n # Iterate over every feature level\n for i, anchors_i in enumerate(anchors):\n pred_classes_i = pred_classes[i]\n pred_bbox_offsets_i = pred_bbox_offsets[i]\n pred_dim_offsets_i = pred_dim_offsets[i]\n pred_orient_offsets_i = pred_orient_offsets[i]\n pred_bin_confs_i = pred_bin_confs[i]\n # (HxWxAxK,)\n pred_classes_i = pred_classes_i.flatten().sigmoid()\n anchors_i = anchors_i.to(pred_classes_i.device)\n # Keep top k top scoring indices only.\n num_topk = min(self.topk_candidates, pred_bbox_offsets_i.size(0))\n # torch.sort is actually faster than .topk (at least on GPUs)\n pred_scores_i, topk_idxs = pred_classes_i.sort(descending=True)\n pred_scores_i = pred_scores_i[:num_topk]\n topk_idxs = topk_idxs[:num_topk]\n\n # filter out the proposals with low confidence score\n keep_idxs = pred_scores_i > self.score_threshold\n if keep_idxs.float().sum() <= 0:\n continue\n pred_scores_i = pred_scores_i[keep_idxs]\n topk_idxs = topk_idxs[keep_idxs]\n\n anchor_idxs = topk_idxs // self.num_classes\n classes_idxs = topk_idxs % self.num_classes\n\n pred_bbox_offsets_i = pred_bbox_offsets_i[anchor_idxs]\n anchors_i = anchors_i[anchor_idxs]\n pred_dim_offsets_i = pred_dim_offsets_i[anchor_idxs]\n pred_orient_offsets_i = pred_orient_offsets_i[anchor_idxs]\n pred_bin_confs_i = pred_bin_confs_i[anchor_idxs]\n # predict boxes\n pred_bbox_i = self._ground_truth_coder.decode_bbox(pred_bbox_offsets_i, anchors_i.tensor)\n # predict dimensions\n pred_dims_i = self._ground_truth_coder.decode_dimension(pred_dim_offsets_i, classes_idxs)\n # predict orientation\n pred_orients_i = self._ground_truth_coder.decode_orient(pred_orient_offsets_i, pred_bin_confs_i)\n pred_bboxes_all.append(pred_bbox_i)\n pred_scores_all.append(pred_scores_i)\n pred_classes_all.append(classes_idxs)\n pred_dims_all.append(pred_dims_i)\n pred_orients_all.append(pred_orients_i)\n\n result = ParamList(self.in_size, is_train=False)\n if len(pred_bboxes_all) > 0:\n pred_bboxes_all, pred_scores_all, pred_classes_all, pred_dims_all, pred_orients_all = [\n torch.cat(x) for x in [pred_bboxes_all, pred_scores_all, pred_classes_all, pred_dims_all, pred_orients_all]\n ]\n keep = batched_nms(pred_bboxes_all, pred_scores_all, pred_classes_all, self.nms_threshold)\n keep = keep[: self.max_detections_per_image]\n result.add_field(\"class\", pred_classes_all[keep])\n result.add_field(\"score\", pred_scores_all[keep])\n result.add_field(\"bbox\", pred_bboxes_all[keep])\n result.add_field(\"dimension\", pred_dims_all[keep])\n result.add_field(\"orientation\", pred_orients_all[keep])\n else:\n result.add_field(\"class\", pred_classes_all)\n result.add_field(\"score\", pred_scores_all)\n result.add_field(\"bbox\", pred_bboxes_all)\n result.add_field(\"dimension\", pred_dims_all)\n result.add_field(\"orientation\", pred_orients_all)\n return result", "title": "" }, { "docid": "9ebfa5991161e0a8595de7f1b8f3f192", "score": "0.5649904", "text": "def preprocess_image(self, batched_inputs):\n images = [x[\"image\"].to(self.device) / 255. for x in batched_inputs]\n images = [(x - self.pixel_mean) / self.pixel_std for x in images]\n images, masks = ImageList.from_tensors(images, self.backbone.size_divisibility)\n return images, masks", "title": "" }, { "docid": "e049c72db629467f0b847333deda2842", "score": "0.56466806", "text": "def save_interm_result(imgs, outputs, targets, save_dir, epoch):\n def _denormalize_img(img):\n return (img*127.5) + 127.5\n\n num_imgs = len(imgs)\n img_size = imgs.shape[-2], imgs.shape[-1]\n output_size = outputs.shape[-2], outputs.shape[-1]\n\n num_rows = math.ceil(num_imgs**0.5)\n num_cols = round(num_imgs**0.5)\n\n img_mask = np.zeros([img_size[0]*num_rows, img_size[1]*num_cols])\n output_mask = np.zeros([output_size[0]*num_rows, output_size[1]*num_cols])\n target_mask = np.zeros([output_size[0]*num_rows, output_size[1]*num_cols])\n\n imgs_arr = imgs.cpu().data.numpy()[:, 0, :, :]\n targets_arr = targets.cpu().data.numpy()[:, 0, :, :]\n outputs_arr = outputs.cpu().data.numpy()[:, 0, :, :]\n\n for i, (img, target, output) in enumerate(zip(imgs_arr, targets_arr, outputs_arr)):\n row = i // num_cols\n col = i % num_cols\n img_mask[row*img_size[0]: (row+1)*img_size[0], col*img_size[1]:(col+1)*img_size[1]] = img.copy()\n output_mask[row*output_size[0]: (row+1)*output_size[0], col*output_size[1]:(col+1)*output_size[1]] = output.copy()\n target_mask[row*output_size[0]: (row+1)*output_size[0], col*output_size[1]:(col+1)*output_size[1]] = target.copy()\n\n\n for i, mask in enumerate([img_mask, output_mask, target_mask]):\n save_path = os.path.join(save_dir, f'{epoch}_{i}.jpg')\n cv2.imwrite(save_path, _denormalize_img(mask))\n return img_mask, output_mask, target_mask", "title": "" }, { "docid": "f1e6b970fff5215fc10cbbf808de8406", "score": "0.5643073", "text": "def classify(data, label, weights):\n\tdataMat, _ = normalize(data); labelMat= np.mat(label).T\n\tm, n = np.shape(dataMat)\n\th = sigmoid(dataMat * weights)\n\ty = np.float32(h > 0.5)\n\taccuracy = np.sum(y == labelMat) / m\n\tprint('Accuracy {:.2f}%'.format(accuracy * 100))", "title": "" }, { "docid": "5eac0ab26285d507b370f09ef2888594", "score": "0.5642408", "text": "def train_from_images(self, images):\n # TODO: Allow filenames to be a list of actual image objects too?\n raw_patches, raw_originals = self._random_patches_from_images(images)\n if len(raw_patches) == 0:\n raise Exception(\"No patches found, maybe your thresholds are too strict?\")\n mixture = ag.stats.BernoulliMixture(self.num_parts, raw_patches, init_seed=0)\n # Also store these in \"settings\"\n mixture.run_EM(1e-8, min_probability=self.settings['min_probability'])\n ag.info(\"Done.\")\n\n # Reject weak parts\n scores = np.empty(self.num_parts) \n for i in range(self.num_parts):\n part = mixture.templates[i]\n sh = part.shape\n p = part.reshape((sh[0]*sh[1], sh[2]))\n \n #import ipdb; ipdb.set_trace()\n pec = p.mean(axis=0)\n \n N = np.sum(p * np.log(p/pec) + (1-p)*np.log((1-p)/(1-pec)))\n D = np.sqrt(np.sum(np.log(p/(1-p))**2 * p * (1-p)))\n\n scores[i] = N/D \n\n # Only keep with a certain score\n visparts = mixture.remix(raw_originals)\n \n self.parts = mixture.templates[scores > 1]\n self.visparts = visparts[scores > 1]\n self.num_parts = self.parts.shape[0]\n \n # Update num_parts\n \n # Store the stuff in the instance\n #self.parts = mixture.templates\n #self.visparts = mixture.remix(raw_originals)\n\n self._preprocess_logs()", "title": "" }, { "docid": "7269b0039d93fe959000a1f2230a2dce", "score": "0.563852", "text": "def __call__(self, *args, **kwargs):\n image = kwargs.get('image')\n class_ids, scores, _ = self.detect_objects(image, show=False)\n labels = self.get_object_labels(class_ids, scores)\n return labels", "title": "" }, { "docid": "15a6f69e87a58c2a8d487d5289ca9805", "score": "0.5634711", "text": "def images_to_predicted_labels(self, network, images):\n predicted_densities, predicted_counts = network(images)\n return predicted_densities, predicted_counts", "title": "" }, { "docid": "8426042ea9369652e4089e4776618336", "score": "0.56341326", "text": "def image_analysis(self, *args, **kwargs):", "title": "" }, { "docid": "91b87ed616f51ddd9668767836d16f11", "score": "0.5632717", "text": "def postprocess(self, prediction_dict, true_image_shapes, **params):\n with tf.variable_scope('PostprocessorInference'):\n detections_dict = {}\n\n semantic_prediction, semantic_prediction_probability = \\\n self._postprocess_logits(\n prediction_dict['semantic_predictions'], true_image_shapes)\n detections_dict[\n fields.DetectionResultFields.detection_semantic] \\\n = semantic_prediction\n detections_dict[\n fields.DetectionResultFields.detection_semantic_heatmap] \\\n = semantic_prediction_probability\n\n instance_prediction, instance_prediction_probability = \\\n self._postprocess_logits(\n prediction_dict['instance_predictions'], true_image_shapes)\n detections_dict[\n fields.DetectionResultFields.detection_masks] \\\n = instance_prediction\n detections_dict[\n fields.DetectionResultFields.detection_masks_heatmap] \\\n = instance_prediction_probability\n\n # [Combining Instances] ************************************************\n mask_image = tf.cast(instance_prediction, dtype=tf.uint8)\n semantic_image = tf.cast(semantic_prediction, dtype=tf.uint8)\n zero_image = tf.zeros_like(mask_image, dtype=tf.uint8)\n mask_image = tf.concat([mask_image, zero_image, zero_image], axis=-1)\n semantic_image = tf.concat([zero_image, zero_image, semantic_image],\n axis=-1)\n panoptic_image = mask_image + semantic_image\n detections_dict[\n fields.DetectionResultFields.detection_masks_image] = \\\n mask_image\n detections_dict[\n fields.DetectionResultFields.detection_panoptic_image] = \\\n panoptic_image\n params = (mask_image, panoptic_image)\n self._add_summary_panoptic_image(params)\n # ************************************************ [Combining Instances]\n return detections_dict", "title": "" } ]
3209a5dac48fd4a7f382b7f2a4c9652b
Test eq as arrayarray for failing with different data positions in array 2 Array code d.
[ { "docid": "ced6c869f5ee002089761d83cdfd7e8a", "score": "0.79888207", "text": "def test_eq_numpos_array_array_c2(self):\n\t\tfor testpos in range(self.testarraylen):\n\t\t\twith self.subTest(msg='Failed with posistion', testpos = testpos):\n\n\t\t\t\texpected = all([x == y for x,y in zip(self.data1, self.data2fail)])\n\n\t\t\t\tresult = arrayfunc.eq(self.data1, self.data2fail)\n\n\t\t\t\tself.assertFalse(result)\n\t\t\t\tself.assertIsInstance(result, bool)\n\t\t\t\tself.assertEqual(expected, result)\n\n\t\t\t\n\t\t\t# Shift the data one position.\n\t\t\tself.data2.append(self.data2.pop(0))", "title": "" } ]
[ { "docid": "71d41f5200c765aa5f06fb50a38ef57b", "score": "0.8023773", "text": "def test_eq_numpos_array_array_c1(self):\n\t\tfor testpos in range(self.testarraylen):\n\t\t\twith self.subTest(msg='Failed with posistion', testpos = testpos):\n\n\t\t\t\texpected = all([x == y for x,y in zip(self.data1fail, self.data2)])\n\n\t\t\t\tresult = arrayfunc.eq(self.data1fail, self.data2)\n\n\t\t\t\tself.assertFalse(result)\n\t\t\t\tself.assertIsInstance(result, bool)\n\t\t\t\tself.assertEqual(expected, result)\n\n\t\t\t\n\t\t\t# Shift the data one position.\n\t\t\tself.data1.append(self.data1.pop(0))", "title": "" }, { "docid": "71d41f5200c765aa5f06fb50a38ef57b", "score": "0.8023773", "text": "def test_eq_numpos_array_array_c1(self):\n\t\tfor testpos in range(self.testarraylen):\n\t\t\twith self.subTest(msg='Failed with posistion', testpos = testpos):\n\n\t\t\t\texpected = all([x == y for x,y in zip(self.data1fail, self.data2)])\n\n\t\t\t\tresult = arrayfunc.eq(self.data1fail, self.data2)\n\n\t\t\t\tself.assertFalse(result)\n\t\t\t\tself.assertIsInstance(result, bool)\n\t\t\t\tself.assertEqual(expected, result)\n\n\t\t\t\n\t\t\t# Shift the data one position.\n\t\t\tself.data1.append(self.data1.pop(0))", "title": "" }, { "docid": "71d41f5200c765aa5f06fb50a38ef57b", "score": "0.8023773", "text": "def test_eq_numpos_array_array_c1(self):\n\t\tfor testpos in range(self.testarraylen):\n\t\t\twith self.subTest(msg='Failed with posistion', testpos = testpos):\n\n\t\t\t\texpected = all([x == y for x,y in zip(self.data1fail, self.data2)])\n\n\t\t\t\tresult = arrayfunc.eq(self.data1fail, self.data2)\n\n\t\t\t\tself.assertFalse(result)\n\t\t\t\tself.assertIsInstance(result, bool)\n\t\t\t\tself.assertEqual(expected, result)\n\n\t\t\t\n\t\t\t# Shift the data one position.\n\t\t\tself.data1.append(self.data1.pop(0))", "title": "" }, { "docid": "71d41f5200c765aa5f06fb50a38ef57b", "score": "0.8023773", "text": "def test_eq_numpos_array_array_c1(self):\n\t\tfor testpos in range(self.testarraylen):\n\t\t\twith self.subTest(msg='Failed with posistion', testpos = testpos):\n\n\t\t\t\texpected = all([x == y for x,y in zip(self.data1fail, self.data2)])\n\n\t\t\t\tresult = arrayfunc.eq(self.data1fail, self.data2)\n\n\t\t\t\tself.assertFalse(result)\n\t\t\t\tself.assertIsInstance(result, bool)\n\t\t\t\tself.assertEqual(expected, result)\n\n\t\t\t\n\t\t\t# Shift the data one position.\n\t\t\tself.data1.append(self.data1.pop(0))", "title": "" }, { "docid": "71d41f5200c765aa5f06fb50a38ef57b", "score": "0.8023773", "text": "def test_eq_numpos_array_array_c1(self):\n\t\tfor testpos in range(self.testarraylen):\n\t\t\twith self.subTest(msg='Failed with posistion', testpos = testpos):\n\n\t\t\t\texpected = all([x == y for x,y in zip(self.data1fail, self.data2)])\n\n\t\t\t\tresult = arrayfunc.eq(self.data1fail, self.data2)\n\n\t\t\t\tself.assertFalse(result)\n\t\t\t\tself.assertIsInstance(result, bool)\n\t\t\t\tself.assertEqual(expected, result)\n\n\t\t\t\n\t\t\t# Shift the data one position.\n\t\t\tself.data1.append(self.data1.pop(0))", "title": "" }, { "docid": "71d41f5200c765aa5f06fb50a38ef57b", "score": "0.8023773", "text": "def test_eq_numpos_array_array_c1(self):\n\t\tfor testpos in range(self.testarraylen):\n\t\t\twith self.subTest(msg='Failed with posistion', testpos = testpos):\n\n\t\t\t\texpected = all([x == y for x,y in zip(self.data1fail, self.data2)])\n\n\t\t\t\tresult = arrayfunc.eq(self.data1fail, self.data2)\n\n\t\t\t\tself.assertFalse(result)\n\t\t\t\tself.assertIsInstance(result, bool)\n\t\t\t\tself.assertEqual(expected, result)\n\n\t\t\t\n\t\t\t# Shift the data one position.\n\t\t\tself.data1.append(self.data1.pop(0))", "title": "" }, { "docid": "71d41f5200c765aa5f06fb50a38ef57b", "score": "0.8023773", "text": "def test_eq_numpos_array_array_c1(self):\n\t\tfor testpos in range(self.testarraylen):\n\t\t\twith self.subTest(msg='Failed with posistion', testpos = testpos):\n\n\t\t\t\texpected = all([x == y for x,y in zip(self.data1fail, self.data2)])\n\n\t\t\t\tresult = arrayfunc.eq(self.data1fail, self.data2)\n\n\t\t\t\tself.assertFalse(result)\n\t\t\t\tself.assertIsInstance(result, bool)\n\t\t\t\tself.assertEqual(expected, result)\n\n\t\t\t\n\t\t\t# Shift the data one position.\n\t\t\tself.data1.append(self.data1.pop(0))", "title": "" }, { "docid": "71d41f5200c765aa5f06fb50a38ef57b", "score": "0.8023773", "text": "def test_eq_numpos_array_array_c1(self):\n\t\tfor testpos in range(self.testarraylen):\n\t\t\twith self.subTest(msg='Failed with posistion', testpos = testpos):\n\n\t\t\t\texpected = all([x == y for x,y in zip(self.data1fail, self.data2)])\n\n\t\t\t\tresult = arrayfunc.eq(self.data1fail, self.data2)\n\n\t\t\t\tself.assertFalse(result)\n\t\t\t\tself.assertIsInstance(result, bool)\n\t\t\t\tself.assertEqual(expected, result)\n\n\t\t\t\n\t\t\t# Shift the data one position.\n\t\t\tself.data1.append(self.data1.pop(0))", "title": "" }, { "docid": "71d41f5200c765aa5f06fb50a38ef57b", "score": "0.8023773", "text": "def test_eq_numpos_array_array_c1(self):\n\t\tfor testpos in range(self.testarraylen):\n\t\t\twith self.subTest(msg='Failed with posistion', testpos = testpos):\n\n\t\t\t\texpected = all([x == y for x,y in zip(self.data1fail, self.data2)])\n\n\t\t\t\tresult = arrayfunc.eq(self.data1fail, self.data2)\n\n\t\t\t\tself.assertFalse(result)\n\t\t\t\tself.assertIsInstance(result, bool)\n\t\t\t\tself.assertEqual(expected, result)\n\n\t\t\t\n\t\t\t# Shift the data one position.\n\t\t\tself.data1.append(self.data1.pop(0))", "title": "" }, { "docid": "71d41f5200c765aa5f06fb50a38ef57b", "score": "0.8023773", "text": "def test_eq_numpos_array_array_c1(self):\n\t\tfor testpos in range(self.testarraylen):\n\t\t\twith self.subTest(msg='Failed with posistion', testpos = testpos):\n\n\t\t\t\texpected = all([x == y for x,y in zip(self.data1fail, self.data2)])\n\n\t\t\t\tresult = arrayfunc.eq(self.data1fail, self.data2)\n\n\t\t\t\tself.assertFalse(result)\n\t\t\t\tself.assertIsInstance(result, bool)\n\t\t\t\tself.assertEqual(expected, result)\n\n\t\t\t\n\t\t\t# Shift the data one position.\n\t\t\tself.data1.append(self.data1.pop(0))", "title": "" }, { "docid": "71d41f5200c765aa5f06fb50a38ef57b", "score": "0.8023773", "text": "def test_eq_numpos_array_array_c1(self):\n\t\tfor testpos in range(self.testarraylen):\n\t\t\twith self.subTest(msg='Failed with posistion', testpos = testpos):\n\n\t\t\t\texpected = all([x == y for x,y in zip(self.data1fail, self.data2)])\n\n\t\t\t\tresult = arrayfunc.eq(self.data1fail, self.data2)\n\n\t\t\t\tself.assertFalse(result)\n\t\t\t\tself.assertIsInstance(result, bool)\n\t\t\t\tself.assertEqual(expected, result)\n\n\t\t\t\n\t\t\t# Shift the data one position.\n\t\t\tself.data1.append(self.data1.pop(0))", "title": "" }, { "docid": "71d41f5200c765aa5f06fb50a38ef57b", "score": "0.8023773", "text": "def test_eq_numpos_array_array_c1(self):\n\t\tfor testpos in range(self.testarraylen):\n\t\t\twith self.subTest(msg='Failed with posistion', testpos = testpos):\n\n\t\t\t\texpected = all([x == y for x,y in zip(self.data1fail, self.data2)])\n\n\t\t\t\tresult = arrayfunc.eq(self.data1fail, self.data2)\n\n\t\t\t\tself.assertFalse(result)\n\t\t\t\tself.assertIsInstance(result, bool)\n\t\t\t\tself.assertEqual(expected, result)\n\n\t\t\t\n\t\t\t# Shift the data one position.\n\t\t\tself.data1.append(self.data1.pop(0))", "title": "" }, { "docid": "0fa76d60225a9c4713bd998e022b1065", "score": "0.8017626", "text": "def test_eq_basic_array_array_c2(self):\n\t\texpected = all([x == y for (x, y) in zip(self.data_array_array, self.data_array_array_fail)])\n\t\tresult = arrayfunc.eq(self.data_array_array, self.data_array_array_fail , nosimd=True)\n\n\t\tself.assertFalse(expected)\n\t\tself.assertFalse(result)\n\t\tself.assertIsInstance(result, bool)\n\t\tself.assertEqual(expected, result)", "title": "" }, { "docid": "0fa76d60225a9c4713bd998e022b1065", "score": "0.8017626", "text": "def test_eq_basic_array_array_c2(self):\n\t\texpected = all([x == y for (x, y) in zip(self.data_array_array, self.data_array_array_fail)])\n\t\tresult = arrayfunc.eq(self.data_array_array, self.data_array_array_fail , nosimd=True)\n\n\t\tself.assertFalse(expected)\n\t\tself.assertFalse(result)\n\t\tself.assertIsInstance(result, bool)\n\t\tself.assertEqual(expected, result)", "title": "" }, { "docid": "0fa76d60225a9c4713bd998e022b1065", "score": "0.8017626", "text": "def test_eq_basic_array_array_c2(self):\n\t\texpected = all([x == y for (x, y) in zip(self.data_array_array, self.data_array_array_fail)])\n\t\tresult = arrayfunc.eq(self.data_array_array, self.data_array_array_fail , nosimd=True)\n\n\t\tself.assertFalse(expected)\n\t\tself.assertFalse(result)\n\t\tself.assertIsInstance(result, bool)\n\t\tself.assertEqual(expected, result)", "title": "" }, { "docid": "0fa76d60225a9c4713bd998e022b1065", "score": "0.8017626", "text": "def test_eq_basic_array_array_c2(self):\n\t\texpected = all([x == y for (x, y) in zip(self.data_array_array, self.data_array_array_fail)])\n\t\tresult = arrayfunc.eq(self.data_array_array, self.data_array_array_fail , nosimd=True)\n\n\t\tself.assertFalse(expected)\n\t\tself.assertFalse(result)\n\t\tself.assertIsInstance(result, bool)\n\t\tself.assertEqual(expected, result)", "title": "" }, { "docid": "0fa76d60225a9c4713bd998e022b1065", "score": "0.8017626", "text": "def test_eq_basic_array_array_c2(self):\n\t\texpected = all([x == y for (x, y) in zip(self.data_array_array, self.data_array_array_fail)])\n\t\tresult = arrayfunc.eq(self.data_array_array, self.data_array_array_fail , nosimd=True)\n\n\t\tself.assertFalse(expected)\n\t\tself.assertFalse(result)\n\t\tself.assertIsInstance(result, bool)\n\t\tself.assertEqual(expected, result)", "title": "" }, { "docid": "0fa76d60225a9c4713bd998e022b1065", "score": "0.8017626", "text": "def test_eq_basic_array_array_c2(self):\n\t\texpected = all([x == y for (x, y) in zip(self.data_array_array, self.data_array_array_fail)])\n\t\tresult = arrayfunc.eq(self.data_array_array, self.data_array_array_fail , nosimd=True)\n\n\t\tself.assertFalse(expected)\n\t\tself.assertFalse(result)\n\t\tself.assertIsInstance(result, bool)\n\t\tself.assertEqual(expected, result)", "title": "" }, { "docid": "0fa76d60225a9c4713bd998e022b1065", "score": "0.8017626", "text": "def test_eq_basic_array_array_c2(self):\n\t\texpected = all([x == y for (x, y) in zip(self.data_array_array, self.data_array_array_fail)])\n\t\tresult = arrayfunc.eq(self.data_array_array, self.data_array_array_fail , nosimd=True)\n\n\t\tself.assertFalse(expected)\n\t\tself.assertFalse(result)\n\t\tself.assertIsInstance(result, bool)\n\t\tself.assertEqual(expected, result)", "title": "" }, { "docid": "0fa76d60225a9c4713bd998e022b1065", "score": "0.8017626", "text": "def test_eq_basic_array_array_c2(self):\n\t\texpected = all([x == y for (x, y) in zip(self.data_array_array, self.data_array_array_fail)])\n\t\tresult = arrayfunc.eq(self.data_array_array, self.data_array_array_fail , nosimd=True)\n\n\t\tself.assertFalse(expected)\n\t\tself.assertFalse(result)\n\t\tself.assertIsInstance(result, bool)\n\t\tself.assertEqual(expected, result)", "title": "" }, { "docid": "0fa76d60225a9c4713bd998e022b1065", "score": "0.8017626", "text": "def test_eq_basic_array_array_c2(self):\n\t\texpected = all([x == y for (x, y) in zip(self.data_array_array, self.data_array_array_fail)])\n\t\tresult = arrayfunc.eq(self.data_array_array, self.data_array_array_fail , nosimd=True)\n\n\t\tself.assertFalse(expected)\n\t\tself.assertFalse(result)\n\t\tself.assertIsInstance(result, bool)\n\t\tself.assertEqual(expected, result)", "title": "" }, { "docid": "0fa76d60225a9c4713bd998e022b1065", "score": "0.8017626", "text": "def test_eq_basic_array_array_c2(self):\n\t\texpected = all([x == y for (x, y) in zip(self.data_array_array, self.data_array_array_fail)])\n\t\tresult = arrayfunc.eq(self.data_array_array, self.data_array_array_fail , nosimd=True)\n\n\t\tself.assertFalse(expected)\n\t\tself.assertFalse(result)\n\t\tself.assertIsInstance(result, bool)\n\t\tself.assertEqual(expected, result)", "title": "" }, { "docid": "0fa76d60225a9c4713bd998e022b1065", "score": "0.8017626", "text": "def test_eq_basic_array_array_c2(self):\n\t\texpected = all([x == y for (x, y) in zip(self.data_array_array, self.data_array_array_fail)])\n\t\tresult = arrayfunc.eq(self.data_array_array, self.data_array_array_fail , nosimd=True)\n\n\t\tself.assertFalse(expected)\n\t\tself.assertFalse(result)\n\t\tself.assertIsInstance(result, bool)\n\t\tself.assertEqual(expected, result)", "title": "" }, { "docid": "0fa76d60225a9c4713bd998e022b1065", "score": "0.8017626", "text": "def test_eq_basic_array_array_c2(self):\n\t\texpected = all([x == y for (x, y) in zip(self.data_array_array, self.data_array_array_fail)])\n\t\tresult = arrayfunc.eq(self.data_array_array, self.data_array_array_fail , nosimd=True)\n\n\t\tself.assertFalse(expected)\n\t\tself.assertFalse(result)\n\t\tself.assertIsInstance(result, bool)\n\t\tself.assertEqual(expected, result)", "title": "" }, { "docid": "0fa76d60225a9c4713bd998e022b1065", "score": "0.8017626", "text": "def test_eq_basic_array_array_c2(self):\n\t\texpected = all([x == y for (x, y) in zip(self.data_array_array, self.data_array_array_fail)])\n\t\tresult = arrayfunc.eq(self.data_array_array, self.data_array_array_fail , nosimd=True)\n\n\t\tself.assertFalse(expected)\n\t\tself.assertFalse(result)\n\t\tself.assertIsInstance(result, bool)\n\t\tself.assertEqual(expected, result)", "title": "" }, { "docid": "0fa76d60225a9c4713bd998e022b1065", "score": "0.8017626", "text": "def test_eq_basic_array_array_c2(self):\n\t\texpected = all([x == y for (x, y) in zip(self.data_array_array, self.data_array_array_fail)])\n\t\tresult = arrayfunc.eq(self.data_array_array, self.data_array_array_fail , nosimd=True)\n\n\t\tself.assertFalse(expected)\n\t\tself.assertFalse(result)\n\t\tself.assertIsInstance(result, bool)\n\t\tself.assertEqual(expected, result)", "title": "" }, { "docid": "0fa76d60225a9c4713bd998e022b1065", "score": "0.8017626", "text": "def test_eq_basic_array_array_c2(self):\n\t\texpected = all([x == y for (x, y) in zip(self.data_array_array, self.data_array_array_fail)])\n\t\tresult = arrayfunc.eq(self.data_array_array, self.data_array_array_fail , nosimd=True)\n\n\t\tself.assertFalse(expected)\n\t\tself.assertFalse(result)\n\t\tself.assertIsInstance(result, bool)\n\t\tself.assertEqual(expected, result)", "title": "" }, { "docid": "0fa76d60225a9c4713bd998e022b1065", "score": "0.8017626", "text": "def test_eq_basic_array_array_c2(self):\n\t\texpected = all([x == y for (x, y) in zip(self.data_array_array, self.data_array_array_fail)])\n\t\tresult = arrayfunc.eq(self.data_array_array, self.data_array_array_fail , nosimd=True)\n\n\t\tself.assertFalse(expected)\n\t\tself.assertFalse(result)\n\t\tself.assertIsInstance(result, bool)\n\t\tself.assertEqual(expected, result)", "title": "" }, { "docid": "0fa76d60225a9c4713bd998e022b1065", "score": "0.8017626", "text": "def test_eq_basic_array_array_c2(self):\n\t\texpected = all([x == y for (x, y) in zip(self.data_array_array, self.data_array_array_fail)])\n\t\tresult = arrayfunc.eq(self.data_array_array, self.data_array_array_fail , nosimd=True)\n\n\t\tself.assertFalse(expected)\n\t\tself.assertFalse(result)\n\t\tself.assertIsInstance(result, bool)\n\t\tself.assertEqual(expected, result)", "title": "" }, { "docid": "0fa76d60225a9c4713bd998e022b1065", "score": "0.8017626", "text": "def test_eq_basic_array_array_c2(self):\n\t\texpected = all([x == y for (x, y) in zip(self.data_array_array, self.data_array_array_fail)])\n\t\tresult = arrayfunc.eq(self.data_array_array, self.data_array_array_fail , nosimd=True)\n\n\t\tself.assertFalse(expected)\n\t\tself.assertFalse(result)\n\t\tself.assertIsInstance(result, bool)\n\t\tself.assertEqual(expected, result)", "title": "" }, { "docid": "0fa76d60225a9c4713bd998e022b1065", "score": "0.8017626", "text": "def test_eq_basic_array_array_c2(self):\n\t\texpected = all([x == y for (x, y) in zip(self.data_array_array, self.data_array_array_fail)])\n\t\tresult = arrayfunc.eq(self.data_array_array, self.data_array_array_fail , nosimd=True)\n\n\t\tself.assertFalse(expected)\n\t\tself.assertFalse(result)\n\t\tself.assertIsInstance(result, bool)\n\t\tself.assertEqual(expected, result)", "title": "" }, { "docid": "0fa76d60225a9c4713bd998e022b1065", "score": "0.8017626", "text": "def test_eq_basic_array_array_c2(self):\n\t\texpected = all([x == y for (x, y) in zip(self.data_array_array, self.data_array_array_fail)])\n\t\tresult = arrayfunc.eq(self.data_array_array, self.data_array_array_fail , nosimd=True)\n\n\t\tself.assertFalse(expected)\n\t\tself.assertFalse(result)\n\t\tself.assertIsInstance(result, bool)\n\t\tself.assertEqual(expected, result)", "title": "" }, { "docid": "0fa76d60225a9c4713bd998e022b1065", "score": "0.8017626", "text": "def test_eq_basic_array_array_c2(self):\n\t\texpected = all([x == y for (x, y) in zip(self.data_array_array, self.data_array_array_fail)])\n\t\tresult = arrayfunc.eq(self.data_array_array, self.data_array_array_fail , nosimd=True)\n\n\t\tself.assertFalse(expected)\n\t\tself.assertFalse(result)\n\t\tself.assertIsInstance(result, bool)\n\t\tself.assertEqual(expected, result)", "title": "" }, { "docid": "0fa76d60225a9c4713bd998e022b1065", "score": "0.8017626", "text": "def test_eq_basic_array_array_c2(self):\n\t\texpected = all([x == y for (x, y) in zip(self.data_array_array, self.data_array_array_fail)])\n\t\tresult = arrayfunc.eq(self.data_array_array, self.data_array_array_fail , nosimd=True)\n\n\t\tself.assertFalse(expected)\n\t\tself.assertFalse(result)\n\t\tself.assertIsInstance(result, bool)\n\t\tself.assertEqual(expected, result)", "title": "" }, { "docid": "0fa76d60225a9c4713bd998e022b1065", "score": "0.8017626", "text": "def test_eq_basic_array_array_c2(self):\n\t\texpected = all([x == y for (x, y) in zip(self.data_array_array, self.data_array_array_fail)])\n\t\tresult = arrayfunc.eq(self.data_array_array, self.data_array_array_fail , nosimd=True)\n\n\t\tself.assertFalse(expected)\n\t\tself.assertFalse(result)\n\t\tself.assertIsInstance(result, bool)\n\t\tself.assertEqual(expected, result)", "title": "" }, { "docid": "0fa76d60225a9c4713bd998e022b1065", "score": "0.8017626", "text": "def test_eq_basic_array_array_c2(self):\n\t\texpected = all([x == y for (x, y) in zip(self.data_array_array, self.data_array_array_fail)])\n\t\tresult = arrayfunc.eq(self.data_array_array, self.data_array_array_fail , nosimd=True)\n\n\t\tself.assertFalse(expected)\n\t\tself.assertFalse(result)\n\t\tself.assertIsInstance(result, bool)\n\t\tself.assertEqual(expected, result)", "title": "" }, { "docid": "0fa76d60225a9c4713bd998e022b1065", "score": "0.8017626", "text": "def test_eq_basic_array_array_c2(self):\n\t\texpected = all([x == y for (x, y) in zip(self.data_array_array, self.data_array_array_fail)])\n\t\tresult = arrayfunc.eq(self.data_array_array, self.data_array_array_fail , nosimd=True)\n\n\t\tself.assertFalse(expected)\n\t\tself.assertFalse(result)\n\t\tself.assertIsInstance(result, bool)\n\t\tself.assertEqual(expected, result)", "title": "" }, { "docid": "0fa76d60225a9c4713bd998e022b1065", "score": "0.8017626", "text": "def test_eq_basic_array_array_c2(self):\n\t\texpected = all([x == y for (x, y) in zip(self.data_array_array, self.data_array_array_fail)])\n\t\tresult = arrayfunc.eq(self.data_array_array, self.data_array_array_fail , nosimd=True)\n\n\t\tself.assertFalse(expected)\n\t\tself.assertFalse(result)\n\t\tself.assertIsInstance(result, bool)\n\t\tself.assertEqual(expected, result)", "title": "" }, { "docid": "0fa76d60225a9c4713bd998e022b1065", "score": "0.8017626", "text": "def test_eq_basic_array_array_c2(self):\n\t\texpected = all([x == y for (x, y) in zip(self.data_array_array, self.data_array_array_fail)])\n\t\tresult = arrayfunc.eq(self.data_array_array, self.data_array_array_fail , nosimd=True)\n\n\t\tself.assertFalse(expected)\n\t\tself.assertFalse(result)\n\t\tself.assertIsInstance(result, bool)\n\t\tself.assertEqual(expected, result)", "title": "" }, { "docid": "0fa76d60225a9c4713bd998e022b1065", "score": "0.8017626", "text": "def test_eq_basic_array_array_c2(self):\n\t\texpected = all([x == y for (x, y) in zip(self.data_array_array, self.data_array_array_fail)])\n\t\tresult = arrayfunc.eq(self.data_array_array, self.data_array_array_fail , nosimd=True)\n\n\t\tself.assertFalse(expected)\n\t\tself.assertFalse(result)\n\t\tself.assertIsInstance(result, bool)\n\t\tself.assertEqual(expected, result)", "title": "" }, { "docid": "0fa76d60225a9c4713bd998e022b1065", "score": "0.8017626", "text": "def test_eq_basic_array_array_c2(self):\n\t\texpected = all([x == y for (x, y) in zip(self.data_array_array, self.data_array_array_fail)])\n\t\tresult = arrayfunc.eq(self.data_array_array, self.data_array_array_fail , nosimd=True)\n\n\t\tself.assertFalse(expected)\n\t\tself.assertFalse(result)\n\t\tself.assertIsInstance(result, bool)\n\t\tself.assertEqual(expected, result)", "title": "" }, { "docid": "0fa76d60225a9c4713bd998e022b1065", "score": "0.8017626", "text": "def test_eq_basic_array_array_c2(self):\n\t\texpected = all([x == y for (x, y) in zip(self.data_array_array, self.data_array_array_fail)])\n\t\tresult = arrayfunc.eq(self.data_array_array, self.data_array_array_fail , nosimd=True)\n\n\t\tself.assertFalse(expected)\n\t\tself.assertFalse(result)\n\t\tself.assertIsInstance(result, bool)\n\t\tself.assertEqual(expected, result)", "title": "" }, { "docid": "0fa76d60225a9c4713bd998e022b1065", "score": "0.8017626", "text": "def test_eq_basic_array_array_c2(self):\n\t\texpected = all([x == y for (x, y) in zip(self.data_array_array, self.data_array_array_fail)])\n\t\tresult = arrayfunc.eq(self.data_array_array, self.data_array_array_fail , nosimd=True)\n\n\t\tself.assertFalse(expected)\n\t\tself.assertFalse(result)\n\t\tself.assertIsInstance(result, bool)\n\t\tself.assertEqual(expected, result)", "title": "" }, { "docid": "0fa76d60225a9c4713bd998e022b1065", "score": "0.8017626", "text": "def test_eq_basic_array_array_c2(self):\n\t\texpected = all([x == y for (x, y) in zip(self.data_array_array, self.data_array_array_fail)])\n\t\tresult = arrayfunc.eq(self.data_array_array, self.data_array_array_fail , nosimd=True)\n\n\t\tself.assertFalse(expected)\n\t\tself.assertFalse(result)\n\t\tself.assertIsInstance(result, bool)\n\t\tself.assertEqual(expected, result)", "title": "" }, { "docid": "0fa76d60225a9c4713bd998e022b1065", "score": "0.8017626", "text": "def test_eq_basic_array_array_c2(self):\n\t\texpected = all([x == y for (x, y) in zip(self.data_array_array, self.data_array_array_fail)])\n\t\tresult = arrayfunc.eq(self.data_array_array, self.data_array_array_fail , nosimd=True)\n\n\t\tself.assertFalse(expected)\n\t\tself.assertFalse(result)\n\t\tself.assertIsInstance(result, bool)\n\t\tself.assertEqual(expected, result)", "title": "" }, { "docid": "0fa76d60225a9c4713bd998e022b1065", "score": "0.8017626", "text": "def test_eq_basic_array_array_c2(self):\n\t\texpected = all([x == y for (x, y) in zip(self.data_array_array, self.data_array_array_fail)])\n\t\tresult = arrayfunc.eq(self.data_array_array, self.data_array_array_fail , nosimd=True)\n\n\t\tself.assertFalse(expected)\n\t\tself.assertFalse(result)\n\t\tself.assertIsInstance(result, bool)\n\t\tself.assertEqual(expected, result)", "title": "" }, { "docid": "0fa76d60225a9c4713bd998e022b1065", "score": "0.8017626", "text": "def test_eq_basic_array_array_c2(self):\n\t\texpected = all([x == y for (x, y) in zip(self.data_array_array, self.data_array_array_fail)])\n\t\tresult = arrayfunc.eq(self.data_array_array, self.data_array_array_fail , nosimd=True)\n\n\t\tself.assertFalse(expected)\n\t\tself.assertFalse(result)\n\t\tself.assertIsInstance(result, bool)\n\t\tself.assertEqual(expected, result)", "title": "" }, { "docid": "0fa76d60225a9c4713bd998e022b1065", "score": "0.8017626", "text": "def test_eq_basic_array_array_c2(self):\n\t\texpected = all([x == y for (x, y) in zip(self.data_array_array, self.data_array_array_fail)])\n\t\tresult = arrayfunc.eq(self.data_array_array, self.data_array_array_fail , nosimd=True)\n\n\t\tself.assertFalse(expected)\n\t\tself.assertFalse(result)\n\t\tself.assertIsInstance(result, bool)\n\t\tself.assertEqual(expected, result)", "title": "" }, { "docid": "0fa76d60225a9c4713bd998e022b1065", "score": "0.8017626", "text": "def test_eq_basic_array_array_c2(self):\n\t\texpected = all([x == y for (x, y) in zip(self.data_array_array, self.data_array_array_fail)])\n\t\tresult = arrayfunc.eq(self.data_array_array, self.data_array_array_fail , nosimd=True)\n\n\t\tself.assertFalse(expected)\n\t\tself.assertFalse(result)\n\t\tself.assertIsInstance(result, bool)\n\t\tself.assertEqual(expected, result)", "title": "" }, { "docid": "0fa76d60225a9c4713bd998e022b1065", "score": "0.8017626", "text": "def test_eq_basic_array_array_c2(self):\n\t\texpected = all([x == y for (x, y) in zip(self.data_array_array, self.data_array_array_fail)])\n\t\tresult = arrayfunc.eq(self.data_array_array, self.data_array_array_fail , nosimd=True)\n\n\t\tself.assertFalse(expected)\n\t\tself.assertFalse(result)\n\t\tself.assertIsInstance(result, bool)\n\t\tself.assertEqual(expected, result)", "title": "" }, { "docid": "0fa76d60225a9c4713bd998e022b1065", "score": "0.8017626", "text": "def test_eq_basic_array_array_c2(self):\n\t\texpected = all([x == y for (x, y) in zip(self.data_array_array, self.data_array_array_fail)])\n\t\tresult = arrayfunc.eq(self.data_array_array, self.data_array_array_fail , nosimd=True)\n\n\t\tself.assertFalse(expected)\n\t\tself.assertFalse(result)\n\t\tself.assertIsInstance(result, bool)\n\t\tself.assertEqual(expected, result)", "title": "" }, { "docid": "0fa76d60225a9c4713bd998e022b1065", "score": "0.8017626", "text": "def test_eq_basic_array_array_c2(self):\n\t\texpected = all([x == y for (x, y) in zip(self.data_array_array, self.data_array_array_fail)])\n\t\tresult = arrayfunc.eq(self.data_array_array, self.data_array_array_fail , nosimd=True)\n\n\t\tself.assertFalse(expected)\n\t\tself.assertFalse(result)\n\t\tself.assertIsInstance(result, bool)\n\t\tself.assertEqual(expected, result)", "title": "" }, { "docid": "0fa76d60225a9c4713bd998e022b1065", "score": "0.8017626", "text": "def test_eq_basic_array_array_c2(self):\n\t\texpected = all([x == y for (x, y) in zip(self.data_array_array, self.data_array_array_fail)])\n\t\tresult = arrayfunc.eq(self.data_array_array, self.data_array_array_fail , nosimd=True)\n\n\t\tself.assertFalse(expected)\n\t\tself.assertFalse(result)\n\t\tself.assertIsInstance(result, bool)\n\t\tself.assertEqual(expected, result)", "title": "" }, { "docid": "0fa76d60225a9c4713bd998e022b1065", "score": "0.8017626", "text": "def test_eq_basic_array_array_c2(self):\n\t\texpected = all([x == y for (x, y) in zip(self.data_array_array, self.data_array_array_fail)])\n\t\tresult = arrayfunc.eq(self.data_array_array, self.data_array_array_fail , nosimd=True)\n\n\t\tself.assertFalse(expected)\n\t\tself.assertFalse(result)\n\t\tself.assertIsInstance(result, bool)\n\t\tself.assertEqual(expected, result)", "title": "" }, { "docid": "0fa76d60225a9c4713bd998e022b1065", "score": "0.8017626", "text": "def test_eq_basic_array_array_c2(self):\n\t\texpected = all([x == y for (x, y) in zip(self.data_array_array, self.data_array_array_fail)])\n\t\tresult = arrayfunc.eq(self.data_array_array, self.data_array_array_fail , nosimd=True)\n\n\t\tself.assertFalse(expected)\n\t\tself.assertFalse(result)\n\t\tself.assertIsInstance(result, bool)\n\t\tself.assertEqual(expected, result)", "title": "" }, { "docid": "0fa76d60225a9c4713bd998e022b1065", "score": "0.8017626", "text": "def test_eq_basic_array_array_c2(self):\n\t\texpected = all([x == y for (x, y) in zip(self.data_array_array, self.data_array_array_fail)])\n\t\tresult = arrayfunc.eq(self.data_array_array, self.data_array_array_fail , nosimd=True)\n\n\t\tself.assertFalse(expected)\n\t\tself.assertFalse(result)\n\t\tself.assertIsInstance(result, bool)\n\t\tself.assertEqual(expected, result)", "title": "" }, { "docid": "0fa76d60225a9c4713bd998e022b1065", "score": "0.8017626", "text": "def test_eq_basic_array_array_c2(self):\n\t\texpected = all([x == y for (x, y) in zip(self.data_array_array, self.data_array_array_fail)])\n\t\tresult = arrayfunc.eq(self.data_array_array, self.data_array_array_fail , nosimd=True)\n\n\t\tself.assertFalse(expected)\n\t\tself.assertFalse(result)\n\t\tself.assertIsInstance(result, bool)\n\t\tself.assertEqual(expected, result)", "title": "" }, { "docid": "0fa76d60225a9c4713bd998e022b1065", "score": "0.8017626", "text": "def test_eq_basic_array_array_c2(self):\n\t\texpected = all([x == y for (x, y) in zip(self.data_array_array, self.data_array_array_fail)])\n\t\tresult = arrayfunc.eq(self.data_array_array, self.data_array_array_fail , nosimd=True)\n\n\t\tself.assertFalse(expected)\n\t\tself.assertFalse(result)\n\t\tself.assertIsInstance(result, bool)\n\t\tself.assertEqual(expected, result)", "title": "" }, { "docid": "0fa76d60225a9c4713bd998e022b1065", "score": "0.8017626", "text": "def test_eq_basic_array_array_c2(self):\n\t\texpected = all([x == y for (x, y) in zip(self.data_array_array, self.data_array_array_fail)])\n\t\tresult = arrayfunc.eq(self.data_array_array, self.data_array_array_fail , nosimd=True)\n\n\t\tself.assertFalse(expected)\n\t\tself.assertFalse(result)\n\t\tself.assertIsInstance(result, bool)\n\t\tself.assertEqual(expected, result)", "title": "" }, { "docid": "0fa76d60225a9c4713bd998e022b1065", "score": "0.8017626", "text": "def test_eq_basic_array_array_c2(self):\n\t\texpected = all([x == y for (x, y) in zip(self.data_array_array, self.data_array_array_fail)])\n\t\tresult = arrayfunc.eq(self.data_array_array, self.data_array_array_fail , nosimd=True)\n\n\t\tself.assertFalse(expected)\n\t\tself.assertFalse(result)\n\t\tself.assertIsInstance(result, bool)\n\t\tself.assertEqual(expected, result)", "title": "" }, { "docid": "7c5f7454c8090ee1e7ece493fc164621", "score": "0.79798645", "text": "def test_eq_array_num_a2(self):\n\t\tfor testval, badval in zip(self.data2, self.badarray2):\n\t\t\twith self.subTest(msg='Failed with parameter', testval = testval):\n\n\t\t\t\t# Copy the array so we don't change the original data.\n\t\t\t\tdata1 = copy.copy(self.data1)\n\n\t\t\t\t# This version is expected to pass.\n\t\t\t\tresult = arrayfunc.eq(data1, testval)\n\n\t\t\t\tdata1 = copy.copy(self.data1)\n\n\t\t\t\t# This is the actual test.\n\t\t\t\twith self.assertRaises(TypeError):\n\t\t\t\t\tresult = arrayfunc.eq(data1, badval)", "title": "" }, { "docid": "7c5f7454c8090ee1e7ece493fc164621", "score": "0.79798645", "text": "def test_eq_array_num_a2(self):\n\t\tfor testval, badval in zip(self.data2, self.badarray2):\n\t\t\twith self.subTest(msg='Failed with parameter', testval = testval):\n\n\t\t\t\t# Copy the array so we don't change the original data.\n\t\t\t\tdata1 = copy.copy(self.data1)\n\n\t\t\t\t# This version is expected to pass.\n\t\t\t\tresult = arrayfunc.eq(data1, testval)\n\n\t\t\t\tdata1 = copy.copy(self.data1)\n\n\t\t\t\t# This is the actual test.\n\t\t\t\twith self.assertRaises(TypeError):\n\t\t\t\t\tresult = arrayfunc.eq(data1, badval)", "title": "" }, { "docid": "7c5f7454c8090ee1e7ece493fc164621", "score": "0.79798645", "text": "def test_eq_array_num_a2(self):\n\t\tfor testval, badval in zip(self.data2, self.badarray2):\n\t\t\twith self.subTest(msg='Failed with parameter', testval = testval):\n\n\t\t\t\t# Copy the array so we don't change the original data.\n\t\t\t\tdata1 = copy.copy(self.data1)\n\n\t\t\t\t# This version is expected to pass.\n\t\t\t\tresult = arrayfunc.eq(data1, testval)\n\n\t\t\t\tdata1 = copy.copy(self.data1)\n\n\t\t\t\t# This is the actual test.\n\t\t\t\twith self.assertRaises(TypeError):\n\t\t\t\t\tresult = arrayfunc.eq(data1, badval)", "title": "" }, { "docid": "7c5f7454c8090ee1e7ece493fc164621", "score": "0.79798645", "text": "def test_eq_array_num_a2(self):\n\t\tfor testval, badval in zip(self.data2, self.badarray2):\n\t\t\twith self.subTest(msg='Failed with parameter', testval = testval):\n\n\t\t\t\t# Copy the array so we don't change the original data.\n\t\t\t\tdata1 = copy.copy(self.data1)\n\n\t\t\t\t# This version is expected to pass.\n\t\t\t\tresult = arrayfunc.eq(data1, testval)\n\n\t\t\t\tdata1 = copy.copy(self.data1)\n\n\t\t\t\t# This is the actual test.\n\t\t\t\twith self.assertRaises(TypeError):\n\t\t\t\t\tresult = arrayfunc.eq(data1, badval)", "title": "" }, { "docid": "7c5f7454c8090ee1e7ece493fc164621", "score": "0.79798645", "text": "def test_eq_array_num_a2(self):\n\t\tfor testval, badval in zip(self.data2, self.badarray2):\n\t\t\twith self.subTest(msg='Failed with parameter', testval = testval):\n\n\t\t\t\t# Copy the array so we don't change the original data.\n\t\t\t\tdata1 = copy.copy(self.data1)\n\n\t\t\t\t# This version is expected to pass.\n\t\t\t\tresult = arrayfunc.eq(data1, testval)\n\n\t\t\t\tdata1 = copy.copy(self.data1)\n\n\t\t\t\t# This is the actual test.\n\t\t\t\twith self.assertRaises(TypeError):\n\t\t\t\t\tresult = arrayfunc.eq(data1, badval)", "title": "" }, { "docid": "7c5f7454c8090ee1e7ece493fc164621", "score": "0.79798645", "text": "def test_eq_array_num_a2(self):\n\t\tfor testval, badval in zip(self.data2, self.badarray2):\n\t\t\twith self.subTest(msg='Failed with parameter', testval = testval):\n\n\t\t\t\t# Copy the array so we don't change the original data.\n\t\t\t\tdata1 = copy.copy(self.data1)\n\n\t\t\t\t# This version is expected to pass.\n\t\t\t\tresult = arrayfunc.eq(data1, testval)\n\n\t\t\t\tdata1 = copy.copy(self.data1)\n\n\t\t\t\t# This is the actual test.\n\t\t\t\twith self.assertRaises(TypeError):\n\t\t\t\t\tresult = arrayfunc.eq(data1, badval)", "title": "" }, { "docid": "7c5f7454c8090ee1e7ece493fc164621", "score": "0.79798645", "text": "def test_eq_array_num_a2(self):\n\t\tfor testval, badval in zip(self.data2, self.badarray2):\n\t\t\twith self.subTest(msg='Failed with parameter', testval = testval):\n\n\t\t\t\t# Copy the array so we don't change the original data.\n\t\t\t\tdata1 = copy.copy(self.data1)\n\n\t\t\t\t# This version is expected to pass.\n\t\t\t\tresult = arrayfunc.eq(data1, testval)\n\n\t\t\t\tdata1 = copy.copy(self.data1)\n\n\t\t\t\t# This is the actual test.\n\t\t\t\twith self.assertRaises(TypeError):\n\t\t\t\t\tresult = arrayfunc.eq(data1, badval)", "title": "" }, { "docid": "7c5f7454c8090ee1e7ece493fc164621", "score": "0.79798645", "text": "def test_eq_array_num_a2(self):\n\t\tfor testval, badval in zip(self.data2, self.badarray2):\n\t\t\twith self.subTest(msg='Failed with parameter', testval = testval):\n\n\t\t\t\t# Copy the array so we don't change the original data.\n\t\t\t\tdata1 = copy.copy(self.data1)\n\n\t\t\t\t# This version is expected to pass.\n\t\t\t\tresult = arrayfunc.eq(data1, testval)\n\n\t\t\t\tdata1 = copy.copy(self.data1)\n\n\t\t\t\t# This is the actual test.\n\t\t\t\twith self.assertRaises(TypeError):\n\t\t\t\t\tresult = arrayfunc.eq(data1, badval)", "title": "" }, { "docid": "7c5f7454c8090ee1e7ece493fc164621", "score": "0.79798645", "text": "def test_eq_array_num_a2(self):\n\t\tfor testval, badval in zip(self.data2, self.badarray2):\n\t\t\twith self.subTest(msg='Failed with parameter', testval = testval):\n\n\t\t\t\t# Copy the array so we don't change the original data.\n\t\t\t\tdata1 = copy.copy(self.data1)\n\n\t\t\t\t# This version is expected to pass.\n\t\t\t\tresult = arrayfunc.eq(data1, testval)\n\n\t\t\t\tdata1 = copy.copy(self.data1)\n\n\t\t\t\t# This is the actual test.\n\t\t\t\twith self.assertRaises(TypeError):\n\t\t\t\t\tresult = arrayfunc.eq(data1, badval)", "title": "" }, { "docid": "7c5f7454c8090ee1e7ece493fc164621", "score": "0.79798645", "text": "def test_eq_array_num_a2(self):\n\t\tfor testval, badval in zip(self.data2, self.badarray2):\n\t\t\twith self.subTest(msg='Failed with parameter', testval = testval):\n\n\t\t\t\t# Copy the array so we don't change the original data.\n\t\t\t\tdata1 = copy.copy(self.data1)\n\n\t\t\t\t# This version is expected to pass.\n\t\t\t\tresult = arrayfunc.eq(data1, testval)\n\n\t\t\t\tdata1 = copy.copy(self.data1)\n\n\t\t\t\t# This is the actual test.\n\t\t\t\twith self.assertRaises(TypeError):\n\t\t\t\t\tresult = arrayfunc.eq(data1, badval)", "title": "" }, { "docid": "7c5f7454c8090ee1e7ece493fc164621", "score": "0.79798645", "text": "def test_eq_array_num_a2(self):\n\t\tfor testval, badval in zip(self.data2, self.badarray2):\n\t\t\twith self.subTest(msg='Failed with parameter', testval = testval):\n\n\t\t\t\t# Copy the array so we don't change the original data.\n\t\t\t\tdata1 = copy.copy(self.data1)\n\n\t\t\t\t# This version is expected to pass.\n\t\t\t\tresult = arrayfunc.eq(data1, testval)\n\n\t\t\t\tdata1 = copy.copy(self.data1)\n\n\t\t\t\t# This is the actual test.\n\t\t\t\twith self.assertRaises(TypeError):\n\t\t\t\t\tresult = arrayfunc.eq(data1, badval)", "title": "" }, { "docid": "7c5f7454c8090ee1e7ece493fc164621", "score": "0.79798645", "text": "def test_eq_array_num_a2(self):\n\t\tfor testval, badval in zip(self.data2, self.badarray2):\n\t\t\twith self.subTest(msg='Failed with parameter', testval = testval):\n\n\t\t\t\t# Copy the array so we don't change the original data.\n\t\t\t\tdata1 = copy.copy(self.data1)\n\n\t\t\t\t# This version is expected to pass.\n\t\t\t\tresult = arrayfunc.eq(data1, testval)\n\n\t\t\t\tdata1 = copy.copy(self.data1)\n\n\t\t\t\t# This is the actual test.\n\t\t\t\twith self.assertRaises(TypeError):\n\t\t\t\t\tresult = arrayfunc.eq(data1, badval)", "title": "" }, { "docid": "bba0b99bced34543ebd1a101bf47f3f1", "score": "0.7969679", "text": "def test_eq_array_array_e1(self):\n\t\t# Copy the array so we don't change the original data.\n\t\tdata1 = copy.copy(self.data1)\n\n\t\t# This version is expected to pass.\n\t\tresult = arrayfunc.eq(data1, self.data2)\n\n\t\t# Copy the array so we don't change the original data.\n\t\tdata1 = copy.copy(self.data1)\n\n\t\t# This is the actual test.\n\t\twith self.assertRaises(TypeError):\n\t\t\tresult = arrayfunc.eq(data1, self.badarray2)", "title": "" }, { "docid": "bba0b99bced34543ebd1a101bf47f3f1", "score": "0.7969679", "text": "def test_eq_array_array_e1(self):\n\t\t# Copy the array so we don't change the original data.\n\t\tdata1 = copy.copy(self.data1)\n\n\t\t# This version is expected to pass.\n\t\tresult = arrayfunc.eq(data1, self.data2)\n\n\t\t# Copy the array so we don't change the original data.\n\t\tdata1 = copy.copy(self.data1)\n\n\t\t# This is the actual test.\n\t\twith self.assertRaises(TypeError):\n\t\t\tresult = arrayfunc.eq(data1, self.badarray2)", "title": "" }, { "docid": "bba0b99bced34543ebd1a101bf47f3f1", "score": "0.7969679", "text": "def test_eq_array_array_e1(self):\n\t\t# Copy the array so we don't change the original data.\n\t\tdata1 = copy.copy(self.data1)\n\n\t\t# This version is expected to pass.\n\t\tresult = arrayfunc.eq(data1, self.data2)\n\n\t\t# Copy the array so we don't change the original data.\n\t\tdata1 = copy.copy(self.data1)\n\n\t\t# This is the actual test.\n\t\twith self.assertRaises(TypeError):\n\t\t\tresult = arrayfunc.eq(data1, self.badarray2)", "title": "" }, { "docid": "bba0b99bced34543ebd1a101bf47f3f1", "score": "0.7969679", "text": "def test_eq_array_array_e1(self):\n\t\t# Copy the array so we don't change the original data.\n\t\tdata1 = copy.copy(self.data1)\n\n\t\t# This version is expected to pass.\n\t\tresult = arrayfunc.eq(data1, self.data2)\n\n\t\t# Copy the array so we don't change the original data.\n\t\tdata1 = copy.copy(self.data1)\n\n\t\t# This is the actual test.\n\t\twith self.assertRaises(TypeError):\n\t\t\tresult = arrayfunc.eq(data1, self.badarray2)", "title": "" }, { "docid": "bba0b99bced34543ebd1a101bf47f3f1", "score": "0.7969679", "text": "def test_eq_array_array_e1(self):\n\t\t# Copy the array so we don't change the original data.\n\t\tdata1 = copy.copy(self.data1)\n\n\t\t# This version is expected to pass.\n\t\tresult = arrayfunc.eq(data1, self.data2)\n\n\t\t# Copy the array so we don't change the original data.\n\t\tdata1 = copy.copy(self.data1)\n\n\t\t# This is the actual test.\n\t\twith self.assertRaises(TypeError):\n\t\t\tresult = arrayfunc.eq(data1, self.badarray2)", "title": "" }, { "docid": "bba0b99bced34543ebd1a101bf47f3f1", "score": "0.7969679", "text": "def test_eq_array_array_e1(self):\n\t\t# Copy the array so we don't change the original data.\n\t\tdata1 = copy.copy(self.data1)\n\n\t\t# This version is expected to pass.\n\t\tresult = arrayfunc.eq(data1, self.data2)\n\n\t\t# Copy the array so we don't change the original data.\n\t\tdata1 = copy.copy(self.data1)\n\n\t\t# This is the actual test.\n\t\twith self.assertRaises(TypeError):\n\t\t\tresult = arrayfunc.eq(data1, self.badarray2)", "title": "" }, { "docid": "bba0b99bced34543ebd1a101bf47f3f1", "score": "0.7969679", "text": "def test_eq_array_array_e1(self):\n\t\t# Copy the array so we don't change the original data.\n\t\tdata1 = copy.copy(self.data1)\n\n\t\t# This version is expected to pass.\n\t\tresult = arrayfunc.eq(data1, self.data2)\n\n\t\t# Copy the array so we don't change the original data.\n\t\tdata1 = copy.copy(self.data1)\n\n\t\t# This is the actual test.\n\t\twith self.assertRaises(TypeError):\n\t\t\tresult = arrayfunc.eq(data1, self.badarray2)", "title": "" }, { "docid": "bba0b99bced34543ebd1a101bf47f3f1", "score": "0.7969679", "text": "def test_eq_array_array_e1(self):\n\t\t# Copy the array so we don't change the original data.\n\t\tdata1 = copy.copy(self.data1)\n\n\t\t# This version is expected to pass.\n\t\tresult = arrayfunc.eq(data1, self.data2)\n\n\t\t# Copy the array so we don't change the original data.\n\t\tdata1 = copy.copy(self.data1)\n\n\t\t# This is the actual test.\n\t\twith self.assertRaises(TypeError):\n\t\t\tresult = arrayfunc.eq(data1, self.badarray2)", "title": "" }, { "docid": "bba0b99bced34543ebd1a101bf47f3f1", "score": "0.7969679", "text": "def test_eq_array_array_e1(self):\n\t\t# Copy the array so we don't change the original data.\n\t\tdata1 = copy.copy(self.data1)\n\n\t\t# This version is expected to pass.\n\t\tresult = arrayfunc.eq(data1, self.data2)\n\n\t\t# Copy the array so we don't change the original data.\n\t\tdata1 = copy.copy(self.data1)\n\n\t\t# This is the actual test.\n\t\twith self.assertRaises(TypeError):\n\t\t\tresult = arrayfunc.eq(data1, self.badarray2)", "title": "" }, { "docid": "bba0b99bced34543ebd1a101bf47f3f1", "score": "0.7969679", "text": "def test_eq_array_array_e1(self):\n\t\t# Copy the array so we don't change the original data.\n\t\tdata1 = copy.copy(self.data1)\n\n\t\t# This version is expected to pass.\n\t\tresult = arrayfunc.eq(data1, self.data2)\n\n\t\t# Copy the array so we don't change the original data.\n\t\tdata1 = copy.copy(self.data1)\n\n\t\t# This is the actual test.\n\t\twith self.assertRaises(TypeError):\n\t\t\tresult = arrayfunc.eq(data1, self.badarray2)", "title": "" }, { "docid": "bba0b99bced34543ebd1a101bf47f3f1", "score": "0.7969679", "text": "def test_eq_array_array_e1(self):\n\t\t# Copy the array so we don't change the original data.\n\t\tdata1 = copy.copy(self.data1)\n\n\t\t# This version is expected to pass.\n\t\tresult = arrayfunc.eq(data1, self.data2)\n\n\t\t# Copy the array so we don't change the original data.\n\t\tdata1 = copy.copy(self.data1)\n\n\t\t# This is the actual test.\n\t\twith self.assertRaises(TypeError):\n\t\t\tresult = arrayfunc.eq(data1, self.badarray2)", "title": "" }, { "docid": "bba0b99bced34543ebd1a101bf47f3f1", "score": "0.7969679", "text": "def test_eq_array_array_e1(self):\n\t\t# Copy the array so we don't change the original data.\n\t\tdata1 = copy.copy(self.data1)\n\n\t\t# This version is expected to pass.\n\t\tresult = arrayfunc.eq(data1, self.data2)\n\n\t\t# Copy the array so we don't change the original data.\n\t\tdata1 = copy.copy(self.data1)\n\n\t\t# This is the actual test.\n\t\twith self.assertRaises(TypeError):\n\t\t\tresult = arrayfunc.eq(data1, self.badarray2)", "title": "" }, { "docid": "db18b95ae387abe616381f03ce88eb6b", "score": "0.79009455", "text": "def test_eq_array_array_e2(self):\n\t\t# This version is expected to pass.\n\t\tresult = arrayfunc.eq(self.data1, self.data2)\n\n\t\t# This is the actual test.\n\t\twith self.assertRaises(TypeError):\n\t\t\tresult = arrayfunc.eq(self.badarray1, self.data2)", "title": "" }, { "docid": "db18b95ae387abe616381f03ce88eb6b", "score": "0.79009455", "text": "def test_eq_array_array_e2(self):\n\t\t# This version is expected to pass.\n\t\tresult = arrayfunc.eq(self.data1, self.data2)\n\n\t\t# This is the actual test.\n\t\twith self.assertRaises(TypeError):\n\t\t\tresult = arrayfunc.eq(self.badarray1, self.data2)", "title": "" }, { "docid": "db18b95ae387abe616381f03ce88eb6b", "score": "0.79009455", "text": "def test_eq_array_array_e2(self):\n\t\t# This version is expected to pass.\n\t\tresult = arrayfunc.eq(self.data1, self.data2)\n\n\t\t# This is the actual test.\n\t\twith self.assertRaises(TypeError):\n\t\t\tresult = arrayfunc.eq(self.badarray1, self.data2)", "title": "" }, { "docid": "db18b95ae387abe616381f03ce88eb6b", "score": "0.79009455", "text": "def test_eq_array_array_e2(self):\n\t\t# This version is expected to pass.\n\t\tresult = arrayfunc.eq(self.data1, self.data2)\n\n\t\t# This is the actual test.\n\t\twith self.assertRaises(TypeError):\n\t\t\tresult = arrayfunc.eq(self.badarray1, self.data2)", "title": "" }, { "docid": "db18b95ae387abe616381f03ce88eb6b", "score": "0.79009455", "text": "def test_eq_array_array_e2(self):\n\t\t# This version is expected to pass.\n\t\tresult = arrayfunc.eq(self.data1, self.data2)\n\n\t\t# This is the actual test.\n\t\twith self.assertRaises(TypeError):\n\t\t\tresult = arrayfunc.eq(self.badarray1, self.data2)", "title": "" } ]
a813f98ec27f13401901346feec8a484
Recursively turn the TC logic into TC configuration
[ { "docid": "10a45d1c09ea052b511d7a5891329964", "score": "0.0", "text": "def make(self):\n tcg = TCCommandGenerator()\n print(\"Make ClassifierFilter\")\n CmdExecutor.run_and_print(tcg.add_classifier_filter(self))", "title": "" } ]
[ { "docid": "ef40f7ed175823942ced5d3c9a0f4d46", "score": "0.5959538", "text": "def process_config(lunch_config):\n def add(domain_name, tree_root, product):\n tree_key = inner_tree.InnerTreeKey(tree_root, product)\n if tree_key in trees:\n tree = trees[tree_key]\n else:\n tree = inner_tree.InnerTree(tree_root, product)\n trees[tree_key] = tree\n domain = api_domain.ApiDomain(domain_name, tree, product)\n domains[domain_name] = domain\n tree.domains[domain_name] = domain\n\n trees = {}\n domains = {}\n\n system_entry = lunch_config.get(\"system\")\n if system_entry:\n add(API_DOMAIN_SYSTEM, system_entry[\"tree\"], system_entry[\"product\"])\n\n vendor_entry = lunch_config.get(\"vendor\")\n if vendor_entry:\n add(API_DOMAIN_VENDOR, vendor_entry[\"tree\"], vendor_entry[\"product\"])\n\n for module_name, module_entry in lunch_config.get(\"modules\", []).items():\n add(module_name, module_entry[\"tree\"], None)\n\n return inner_tree.InnerTrees(trees, domains)", "title": "" }, { "docid": "4430967bdb5e9dc6d25d8eb5e9073881", "score": "0.5949084", "text": "def traverse(config, workdir, mpc, **kwargs):\n for entry in config:\n #construct a list of tuples, each tuple is of form (file, workdir, ...)\n #where ... are additionnal arguments that will be used to expand the name pattern of the file\n entries = entry.get(workdir, mpc)\n\n #finaly, perform the transformation on the files/arguments constructed above\n for e in entries:\n transformations = entry.trans\n if len(e) == 0:\n continue\n for trans in transformations:\n trans(*e, **kwargs)", "title": "" }, { "docid": "341f2cd6d9d320c3572ce5b2b13f8ab3", "score": "0.553948", "text": "def expand_config(self, config): \n # First, expand the templates without variables, then expand modes\n # using the vars and expanded templates\n vars = config.get('vars', {})\n templates = config.get('templates', {})\n expanded_templates = self.expand_template(vars, templates, templates)\n\n # Create a new config dict and swap in expanded bits\n #new_config = config\n new_config = OrderedDict()\n\n cruise = config.get('cruise', {})\n if cruise:\n new_config['cruise'] = self.expand_template(vars, cruise,\n expanded_templates)\n loggers = config.get('loggers', {})\n if loggers:\n new_config['loggers'] = self.expand_template(vars, loggers,\n expanded_templates)\n modes = config.get('modes', {})\n if modes:\n new_config['modes'] = self.expand_template(vars, modes,\n expanded_templates)\n default_mode = config.get('default_mode', {})\n if default_mode:\n new_config['default_mode'] = self.expand_template(vars, default_mode,\n expanded_templates)\n configs = config.get('configs', {})\n if configs:\n new_config['configs'] = self.expand_template(vars, configs,\n expanded_templates)\n return new_config", "title": "" }, { "docid": "a9daa34233a77cbeeb46862debca535f", "score": "0.55340546", "text": "def traverse_cfg(cfg, features_list, handled_set, handled_features_set):\n\n if cfg.control_dep_children: #check for control dependent node\n features_list.append(cfg.name) #added current CFG node in feature\n handled_features_set.add(cfg.id) # Store id from features handled\n get_ast_features(cfg, features_list, handled_features_set) # Handled only once\n for control_dep in cfg.control_dep_children: #Traverse all childeren which has control dependency\n control_flow = control_dep.extremity #gets all dependecy control and statements\n # Otherwise missing CF pointing to a node already analyzed\n if not control_flow.control_dep_children or control_flow.id in handled_set: #if not already handled add and not\n features_list.append(control_flow.name)\n # else: the node name will be added while calling traverse_cfg\n if control_flow.id not in handled_set: #called for all other recurrently\n handled_set.add(control_flow.id)\n handled_features_set.add(control_flow.id) # Store id from features\n get_ast_features(control_flow, features_list, handled_features_set) # Once\n traverse_cfg(control_flow, features_list, handled_set, handled_features_set)", "title": "" }, { "docid": "097acee3b03070c066c0292c3df8b213", "score": "0.54283386", "text": "def process_config(data):\n config = deepcopy(INITIAL_CONFIG)\n try:\n regexps = data['regexps']\n logs = data['logs']\n actions = data.get('actions') or {}\n except KeyError as exc:\n raise ConfigError('required section missing: {}'.format(exc))\n except TypeError:\n raise ConfigError('config is not a dict: received {}'.format(type(data)))\n config['regexps'] = _process_regexps(regexps)\n config['actions'], auto_actions = _process_actions(actions)\n config['logs'] = _process_logs(logs, config['regexps'], config['actions'], auto_actions)\n return config", "title": "" }, { "docid": "064f2fa416a3eba7a1d122ae8d7af4e0", "score": "0.54272306", "text": "def apply_conf(self, conf):\n for s in self.steps_data:\n o2 = []\n for o in s[2]:\n co = o.replace(\"%input_dir%\", conf.input_dir)\n co = co.replace(\"%output_dir%\", conf.output_dir)\n co = co.replace(\"%matches_dir%\", conf.matches_dir)\n co = co.replace(\"%reconstruction_dir%\", conf.reconstruction_dir)\n co = co.replace(\"%mvs_dir%\", conf.mvs_dir)\n co = co.replace(\"%camera_file_params%\", conf.camera_file_params)\n o2.append(co)\n s[2] = o2", "title": "" }, { "docid": "1ce0e6137647a97306212b3f31fc312a", "score": "0.53981847", "text": "def test_walk_config(self):\n n = config_manager.Namespace(doc='top')\n n.add_option('aaa', False, 'the a', short_form='a')\n n.c = config_manager.Namespace(doc='c space')\n n.c.add_option('fred', doc='husband from Flintstones')\n n.c.add_option('wilma', doc='wife from Flintstones')\n n.d = config_manager.Namespace(doc='d space')\n n.d.add_option('fred', doc='male neighbor from I Love Lucy')\n n.d.add_option('ethel', doc='female neighbor from I Love Lucy')\n n.d.x = config_manager.Namespace(doc='x space')\n n.d.x.add_option('size', 100, 'how big in tons', short_form='s')\n n.d.x.add_option('password', 'secrets', 'the password')\n c = config_manager.ConfigurationManager(\n [n],\n use_admin_controls=True,\n #use_config_files=False,\n use_auto_help=False,\n argv_source=[]\n )\n e = [('aaa', 'aaa', n.aaa.name),\n ('c', 'c', n.c._doc),\n ('c.wilma', 'wilma', n.c.wilma.name),\n ('c.fred', 'fred', n.c.fred.name),\n ('d', 'd', n.d._doc),\n ('d.ethel', 'ethel', n.d.ethel.name),\n ('d.fred', 'fred', n.d.fred.name),\n ('d.x', 'x', n.d.x._doc),\n ('d.x.size', 'size', n.d.x.size.name),\n ('d.x.password', 'password', n.d.x.password.name),\n ]\n e.sort()\n r = [(q, k, v.name if isinstance(v, config_manager.Option) else v._doc)\n for q, k, v in c._walk_config()]\n r.sort()\n for expected, received in zip(e, r):\n self.assertEqual(received, expected)", "title": "" }, { "docid": "7e479804ac58246919d4fead8ec6dcfe", "score": "0.5370012", "text": "def visit_ControlFlowGraph(self, cfg):\n pass", "title": "" }, { "docid": "961f03e57363f56ece817427ef00af50", "score": "0.53425205", "text": "def get_config_template(self) -> cconfig.Config:\n config = cconfig.Config()\n #\n stage = \"resample_events\"\n config_tmp = config.add_subconfig(self._get_nid(stage))\n config_tmp[\"rule\"] = \"T\"\n config_tmp[\"agg_func\"] = \"mean\"\n #\n stage = \"generate_event_signal\"\n config_tmp = config.add_subconfig(self._get_nid(stage))\n config_kwargs = config_tmp.add_subconfig(\"transformer_kwargs\")\n config_kwargs[\"tau\"] = 8\n config_kwargs[\"max_depth\"] = 3\n #\n stage = \"shift\"\n config_tmp = config.add_subconfig(self._get_nid(stage))\n config_kwargs = config_tmp.add_subconfig(\"method_kwargs\")\n config_kwargs[\"periods\"] = 1\n #\n stage = \"build_local_ts\"\n config_tmp = config.add_subconfig(self._get_nid(stage))\n config_kwargs = config_tmp.add_subconfig(\"connector_kwargs\")\n config_kwargs[\"relative_grid_indices\"] = range(-10, 50)\n #\n stage = \"model\"\n config_tmp = config.add_subconfig(self._get_nid(stage))\n config_tmp[\"x_vars\"] = [\"_DUMMY_\"]\n config_tmp[\"y_vars\"] = [\"_DUMMY_\"]\n config_kwargs = config_tmp.add_subconfig(\"model_kwargs\")\n config_kwargs[\"alpha\"] = 0.5\n return config", "title": "" }, { "docid": "69095991cc633ced222ce7e51d1ff60c", "score": "0.5322106", "text": "def process_config(config_all):\n print(\"Preparing PATHs for the pipeline...\")\n PATHS = {}\n\n # Set parameters for the pipeline based on config\n if config_all['COMMON']['DATA_LOCATION'] == \"local\":\n PATHS[\"input_data\"] = config_all['LOCAL']['INPUT_DATA_LOCAL']\n PATHS[\"i94_data\"] = config_all['LOCAL']['INPUT_DATA_I94_LOCAL']\n PATHS[\"airport_codes\"] = config_all['LOCAL']['INPUT_DATA_AIRPORT_LOCAL']\n PATHS[\"country_codes_iso\"] = config_all['LOCAL']['INPUT_DATA_COUNTRY_LOCAL']\n PATHS[\"airport_codes_i94\"] = config_all['LOCAL']['INPUT_DATA_AIRPORT_I94_LOCAL']\n PATHS[\"country_codes_i94\"] = config_all['LOCAL']['INPUT_DATA_COUNTRY_I94_LOCAL']\n PATHS[\"output_data\"] = config_all['LOCAL']['OUTPUT_DATA_LOCAL']\n elif config_all['COMMON']['DATA_LOCATION'] == \"server\":\n PATHS[\"input_data\"] = config_all['SERVER']['INPUT_DATA_SERVER']\n PATHS[\"i94_data\"] = config_all['SERVER']['INPUT_DATA_I94_SERVER']\n PATHS[\"airport_codes\"] = config_all['SERVER']['INPUT_DATA_AIRPORT_SERVER']\n PATHS[\"country_codes_iso\"] = config_all['SERVER']['INPUT_DATA_COUNTRY_SERVER']\n PATHS[\"airport_codes_i94\"] = config_all['SERVER']['INPUT_DATA_AIRPORT_I94_SERVER']\n PATHS[\"country_codes_i94\"] = config_all['SERVER']['INPUT_DATA_COUNTRY_I94_SERVER']\n PATHS[\"output_data\"] = config_all['SERVER']['OUTPUT_DATA_SERVER']\n elif config_all['COMMON']['DATA_LOCATION'] == \"aws\":\n PATHS[\"input_data\"] = config_all['AWS']['INPUT_DATA']\n PATHS[\"i94_data\"] = config_all['AWS']['INPUT_DATA_I94']\n PATHS[\"airport_codes\"] = config_all['AWS']['INPUT_DATA_AIRPORT']\n PATHS[\"country_codes_iso\"] = config_all['AWS']['INPUT_DATA_COUNTRY']\n PATHS[\"airport_codes_i94\"] = config_all['AWS']['INPUT_DATA_AIRPORT_I94']\n PATHS[\"country_codes_i94\"] = config_all['AWS']['INPUT_DATA_COUNTRY_I94']\n PATHS[\"output_data\"] = config_all['AWS']['OUTPUT_DATA']\n\n if config_all[\"COMMON\"][\"DATA_STORAGE\"] == \"postgresql\":\n PATHS[\"data_storage\"] = config_all[\"COMMON\"][\"DATA_STORAGE_SQL\"]\n elif config_all[\"COMMON\"][\"DATA_STORAGE\"] == \"parquet\":\n PATHS[\"data_storage\"] = config_all[\"COMMON\"][\"DATA_STORAGE\"]\n\n #print(AWS_ACCESS_KEY_ID)\n #print(AWS_SECRET_ACCESS_KEY)\n\n # Print out paths in PATHS\n print(\"PATHS preparation DONE.\\n\")\n print(\"PATHS:\")\n for path in PATHS:\n print(path)\n\n return PATHS", "title": "" }, { "docid": "19d81fbef3ad071635fb4e99c692dd74", "score": "0.53159124", "text": "def build_tcno2tcid_mapping(self):\n try:\n test = \"Services_HA_\"\n base_strings = [\"VYOS_EW_\", \"VYOS_NS_\", \"ASAV_EW_\", \"ASAV_NS_\"]\n chains = [\"FW\", \"FW+LB\", \"REMOTE_VPN+FW\", \"REMOTE_VPN+FW+LB\",\n \"S2S_VPN+FW\", \"S2S_VPN+FW+LB\"\n ]\n tc_no = 0\n for bstr in base_strings:\n for chain in chains:\n if \"_EW\" in bstr and \"VPN\" in chain:\n continue\n services = []\n if \"VPN\" in chain:\n services.append(\"VPN\")\n if \"FW\" in chain:\n services.append(\"FW\")\n if \"LB\" in chain:\n services.append(\"LB\")\n # if vpn & fw services are in chain,\n # they shares same service vm.\n if \"VPN\" in chain and \"FW\" in chain:\n services.remove(\"FW\")\n\n # base test no. for service chain insertion.\n base_tc = gbp_config.services_ha_base_test_mapping[\n bstr.lower() + chain.lower()]\n\n # To generate combination of test, where some of the\n # services in chain will in HA mode & some will be in\n # standalone mode and failover will be applied on service\n # vm of service launched in ha mode.\n if len(services) > 1:\n for service_vm in services:\n services_ha = []\n tc_no += 1\n chain_string = chain.replace(\n service_vm, service_vm + '(HA)')\n services_ha.append(service_vm)\n if service_vm == \"VPN\":\n chain_string = chain_string.\\\n replace(\"FW\", \"FW(HA)\")\n services_ha.append('FW')\n\n tc_id = test + bstr + chain_string + \"_FailOver_\"\\\n + service_vm + \"_\" + str(tc_no)\n self.tcno2tcid_mapping[tc_no] = {\n \"tc_id\": tc_id,\n \"base_tc_no\": base_tc,\n \"fail_over\": [service_vm],\n \"services_ha\": services_ha\n }\n\n # To generate combination of test, where all the\n # services in chain will in HA mode. And Failover\n # will be applied on one service vm only.\n for service_vm in services:\n tc_no += 1\n chain_string = '+'.join(map(lambda service: service +\n '(HA)', chain.split('+')))\n tc_id = test + bstr + chain_string + \"_FailOver_\" +\\\n service_vm + \"_\" + str(tc_no)\n services_ha = copy.deepcopy(services)\n if 'VPN' in services:\n services_ha.append('FW')\n\n self.tcno2tcid_mapping[tc_no] = {\n \"tc_id\": tc_id,\n \"base_tc_no\": base_tc,\n \"fail_over\": [service_vm],\n \"services_ha\": services_ha}\n else:\n # tests where all services in chain will be\n # launched in HA mode and failover will be applied on\n # all active service vms of chain one after another.\n # and data path traffic will be verified.\n if len(services) > 1:\n services_ha = copy.deepcopy(services)\n tc_no += 1\n chain_string = '+'.join(map(\n lambda service: service +\n '(HA)', chain.split('+')))\n tc_id = test + bstr + chain_string +\\\n \"_FailOver_All_\" + str(tc_no)\n if 'VPN' in services:\n services_ha.append('FW')\n self.tcno2tcid_mapping[tc_no] = {\n \"tc_id\": tc_id,\n \"base_tc_no\": base_tc,\n \"fail_over\": services,\n \"services_ha\": services_ha}\n\n except Exception as err:\n LOGOBJ.exception(err)\n return \"ATFError: Exception occurred while building test \"\\\n \"cases numbers to test cases id mapping.\"", "title": "" }, { "docid": "fdda980df12fdb6cf4af3f2cc0103937", "score": "0.53126025", "text": "def _parse_test_cfg(self):", "title": "" }, { "docid": "49bdb39737fe898a8dd50099f8e5c65d", "score": "0.53021973", "text": "def build_tc(self, tc):\n tc = data.TestCampaigns[tc]\n [self.build_dut_tc(tc, data.DUTs[name]) for name in tc.DUTs]", "title": "" }, { "docid": "bc36f4a729b4aea5d6e30839374a74ed", "score": "0.52694374", "text": "def create_all_scenarios(self):\r\n for tree in self.trees:\r\n self.parse_tree(tree)", "title": "" }, { "docid": "17cb5e049394fe72aba0c5eb30dd9264", "score": "0.5206591", "text": "def BuildTC( *args, **kargs ):\n from helpers.tchelper import BuildTC_Helper\n helper = BuildTC_Helper()\n helper.configure( *args, **kargs )\n return helper.execute( *args, **kargs )", "title": "" }, { "docid": "d7f3230bc929d6e8080d78d414c74152", "score": "0.5194215", "text": "def makeRuleTree(self):\n \n try:\n line = self.command_output[self.index].strip()\n self.index += 1\n except IndexError:\n return\n if len(line) > 0:\n key, value = self.getKeyValue(line)\n if key == \"Subgroup GUID\":\n guid = value.split()[0]\n if value.find(\"(\") == -1:\n name = \"NO NAME\"\n else:\n name = value[value.find(\"(\")+1:-1]\n self.current_subgroup = guid\n self.options[guid] = {\"name\": name}\n #print guid, name, \"(%s)\" % line\n elif key == \"Power Setting GUID\":\n guid = value.split()[0]\n if value.find(\"(\") == -1:\n name = \"NO NAME\"\n else:\n name = value[value.find(\"(\")+1:-1]\n #print \" \" + guid, name\n self.current_pwroption = guid\n self.options[self.current_subgroup][guid] = {\n \"name\": name,\n \"units\": None,\n \"possibilities\": None,\n \"AC\": None,\n \"DC\": None\n }\n else:\n if key == \"Possible Setting Index\":\n self.options[self.current_subgroup][self.current_pwroption][\"possibilities\"] = self.getPossibilities()\n elif key == \"Possible Settings units\":\n self.options[self.current_subgroup][self.current_pwroption][\"units\"] = value\n elif key == \"Current AC Power Setting Index\":\n self.options[self.current_subgroup][self.current_pwroption][\"AC\"] = int(value, 16)\n elif key == \"Current DC Power Setting Index\":\n self.options[self.current_subgroup][self.current_pwroption][\"DC\"] = int(value, 16)\n \n self.makeRuleTree()\n else:\n self.makeRuleTree()", "title": "" }, { "docid": "2a78026e733c6d5b01f34b2f8a306960", "score": "0.5173538", "text": "def convert_template(config: dict[str, Any]) -> dict[str, Any]:\n my_map = {\n CONF_ROOM_ON: dyn_const.CONF_ROOM_ON,\n CONF_ROOM_OFF: dyn_const.CONF_ROOM_OFF,\n CONF_CHANNEL_COVER: dyn_const.CONF_CHANNEL_COVER,\n CONF_DEVICE_CLASS: dyn_const.CONF_DEVICE_CLASS,\n CONF_OPEN_PRESET: dyn_const.CONF_OPEN_PRESET,\n CONF_CLOSE_PRESET: dyn_const.CONF_CLOSE_PRESET,\n CONF_STOP_PRESET: dyn_const.CONF_STOP_PRESET,\n CONF_DURATION: dyn_const.CONF_DURATION,\n CONF_TILT_TIME: dyn_const.CONF_TILT_TIME,\n }\n return convert_with_map(config, my_map)", "title": "" }, { "docid": "bfb073149d96aa039c513c16c64a89b1", "score": "0.5161812", "text": "def get_configs(self):\n return {'Runway': self.parse_config(os.path.join(self.template_dir, 'runway.yml')),\n 'Serverless':\n self.parse_config(os.path.join(self.template_dir, 'serverless.yml'))}", "title": "" }, { "docid": "b0d166cac235ecbc65bddb1d5bf78a32", "score": "0.5157954", "text": "def compile_config(self, config):\n return utils.walk_and_apply(config, self.compile_lookups)", "title": "" }, { "docid": "172e6689cba967981bb5ecb5c3a94ff9", "score": "0.5157662", "text": "def __configureNodes(self):\n\t\t\"Load all configuration values into an easy to use dict\"\"\"\n\t\tmydict = {}\n\t\tsections = self.Config.sections()\n\t\tprint(\"Configuring nodes for config sections {}\".format(sections))\n\t\tfor section in sections:\n\t\t mydict = dict(mydict.items() + self.Config.items(section))\n\t\treturn mydict", "title": "" }, { "docid": "797cff2743d5f78a6fe37b6924f04a74", "score": "0.5151952", "text": "def create_configs(\r\n task_filename: str,\r\n configs_folder: str,\r\n parent_config_filename: str,\r\n git_tag: str,\r\n counter_file: str,\r\n results_folder: str,\r\n machine_id: str,\r\n):\r\n list_configs = []\r\n with open(task_filename) as f:\r\n task = json.load(f)\r\n with open(parent_config_filename) as f:\r\n parent_config = json.load(f)\r\n\r\n count = get_starting_number_from_file(counter_file) - 1\r\n cross_scan_gen = (cross_scan_dict for cross_scan_dict in task[\"Task\"][\"CrossScan\"] if cross_scan_dict)\r\n for cross_scan_dict in cross_scan_gen:\r\n patches = (dict(zip(cross_scan_dict.keys(), comb)) for comb in it.product(*cross_scan_dict.values()))\r\n for patch in patches:\r\n count += 1\r\n config_filename = f\"{count}.json\"\r\n save_folder = os.path.join(results_folder, f\"{count}_{machine_id}/\")\r\n if not os.path.exists(save_folder):\r\n os.mkdir(save_folder)\r\n\r\n config = copy.deepcopy(parent_config)\r\n apply_patch(config[\"Configuration\"], patch)\r\n apply_patch(\r\n config,\r\n {\r\n \"Configuration->gitHashShort\": git_tag,\r\n \"Configuration->Name\": str(count),\r\n \"Configuration->LoggerParameters->FilePath\": save_folder,\r\n \"Configuration->ParentConfigName\": parent_config_filename,\r\n \"Configuration->TaskFile\": task_filename,\r\n \"Configuration->ComputerID\": machine_id,\r\n },\r\n )\r\n assign_initial_conditions(config)\r\n\r\n with open(os.path.join(configs_folder, config_filename), \"w\") as f:\r\n json.dump(config, f, sort_keys=True, indent=2)\r\n list_configs.append(config_filename)\r\n\r\n set_counter_file(counter_file, count + 1)\r\n return list_configs", "title": "" }, { "docid": "a0f6fb37fcb1b23794cd6e7f82cfa442", "score": "0.51390517", "text": "def parse_config(config_path):\n import copy\n from functools import reduce\n import importlib\n import operator\n from ruamel.yaml import YAML\n from ruamel.yaml.comments import CommentedMap, CommentedSeq\n\n yaml = YAML()\n config = yaml.load(open(config_path, 'r'))\n classes = {}\n\n def get_from_dict(data_dict, map_list):\n return reduce(operator.getitem, map_list, data_dict)\n\n def set_in_dict(data_dict, map_list, value):\n get_from_dict(data_dict, map_list[:-1])[map_list[-1]] = value\n\n # assume if a function definition is in the most outer scope, it will not\n # have anywhere in config a nested class\n def walk(node, curr=None, depth=0):\n if curr is None:\n curr = {}\n if isinstance(node, CommentedMap):\n for key, item in node.items():\n curr[depth] = key\n if key in ['object', 'closure']:\n if depth in classes.keys():\n classes[depth].append(copy.deepcopy(curr))\n else:\n classes[depth] = [copy.deepcopy(curr)]\n walk(item, copy.deepcopy(curr), depth + 1)\n elif isinstance(node, CommentedSeq):\n for i, _ in enumerate(node):\n curr[depth] = i\n walk(node[i], copy.deepcopy(curr), depth + 1)\n\n walk(config)\n depths = classes.keys()\n depths = reversed(sorted(depths))\n for depth in depths:\n for keys_dict in classes[depth]:\n keys = list(keys_dict.values())\n item = get_from_dict(config, keys)\n parts = item['name'].split('.')\n module = importlib.import_module('.'.join(parts[:-1]))\n\n # get the constructor for this instance\n ctor = getattr(module, parts[-1])\n # unify branches by setting an empty dict if no params provided\n if 'params' not in item or item['params'] is None:\n item['params'] = {}\n if isinstance(item['params'], CommentedSeq):\n item['params'] = list(item['params'])\n\n try:\n if isinstance(item['params'], dict):\n instance = ctor(**item['params'])\n elif isinstance(item['params'], list):\n instance = ctor(*item['params'])\n except TypeError:\n # pylint: disable=cell-var-from-loop\n instance = lambda *args, f=ctor, params=item['params'], \\\n **kwargs: f(*args, **kwargs, **params)\n # pylint: enable=cell-var-from-loop\n set_in_dict(config, keys[:-1], instance)\n\n return config", "title": "" }, { "docid": "c4165440f0951a5c7e20999b0551f52c", "score": "0.5112517", "text": "def resolve_cmd_inheritance(self, test_cfg):\n\n for section in ['build', 'run']:\n config = test_cfg.get(section)\n if not config:\n continue\n new_cmd_list = []\n if config.get('prepend_cmds', []):\n new_cmd_list.extend(config.get('prepend_cmds'))\n config['prepend_cmds'] = []\n new_cmd_list += test_cfg[section]['cmds']\n if config.get('append_cmds', []):\n new_cmd_list.extend(config.get('append_cmds'))\n config['append_cmds'] = []\n test_cfg[section]['cmds'] = new_cmd_list\n\n return test_cfg", "title": "" }, { "docid": "d5e0333db6ecb450a4b7cc41c340f2f0", "score": "0.5110972", "text": "async def cfg(self, ctx):", "title": "" }, { "docid": "dc0ea28355605c29942397ef80a0862e", "score": "0.5092078", "text": "def config_collection():\n\tshutit = shutit_global.shutit\n\tshutit.log('In config_collection',level=logging.DEBUG)\n\tcfg = shutit.cfg\n\tfor module_id in module_ids():\n\t\t# Default to None so we can interpret as ifneeded\n\t\tshutit.get_config(module_id, 'shutit.core.module.build', None, boolean=True, forcenone=True)\n\t\tshutit.get_config(module_id, 'shutit.core.module.remove', False, boolean=True)\n\t\tshutit.get_config(module_id, 'shutit.core.module.tag', False, boolean=True)\n\t\t# Default to allow any image\n\t\tshutit.get_config(module_id, 'shutit.core.module.allowed_images', [\".*\"])\n\t\tmodule = shutit.shutit_map[module_id]\n\t\tcfg_file = os.path.dirname(module.__module_file) + '/configs/build.cnf'\n\t\tif os.path.isfile(cfg_file):\n\t\t\t# use shutit.get_config, forcing the passed-in default\n\t\t\tconfig_parser = ConfigParser.ConfigParser()\n\t\t\tconfig_parser.read(cfg_file)\n\t\t\tfor section in config_parser.sections():\n\t\t\t\tif section == module_id:\n\t\t\t\t\tfor option in config_parser.options(section):\n\t\t\t\t\t\tif option == 'shutit.core.module.allowed_images':\n\t\t\t\t\t\t\toverride = False\n\t\t\t\t\t\t\tfor mod, opt, val in shutit.build['config_overrides']:\n\t\t\t\t\t\t\t\tval = val # pylint\n\t\t\t\t\t\t\t\t# skip overrides\n\t\t\t\t\t\t\t\tif mod == module_id and opt == option:\n\t\t\t\t\t\t\t\t\toverride = True\n\t\t\t\t\t\t\tif override:\n\t\t\t\t\t\t\t\tcontinue\n\t\t\t\t\t\t\tvalue = config_parser.get(section,option)\n\t\t\t\t\t\t\tif option == 'shutit.core.module.allowed_images':\n\t\t\t\t\t\t\t\tvalue = json.loads(value)\n\t\t\t\t\t\t\tshutit.get_config(module_id, option, value, forcedefault=True)\n\t\t# ifneeded will (by default) only take effect if 'build' is not\n\t\t# specified. It can, however, be forced to a value, but this\n\t\t# should be unusual.\n\t\tif cfg[module_id]['shutit.core.module.build'] is None:\n\t\t\tshutit.get_config(module_id, 'shutit.core.module.build_ifneeded', True, boolean=True)\n\t\t\tcfg[module_id]['shutit.core.module.build'] = False\n\t\telse:\n\t\t\tshutit.get_config(module_id, 'shutit.core.module.build_ifneeded', False, boolean=True)", "title": "" }, { "docid": "441d3b079f1d99c6bc1f7616889c4d16", "score": "0.5087299", "text": "def generate_node_vars(self, config):\n\n self.vars = []\n\n # get location to output individual configuration files to\n configdir = self.get_option(\"config\", section=\"pe\", default=\"configs\")\n configlocation = os.path.join(self.outdir, configdir)\n if not os.path.exists(configlocation):\n os.makedirs(configlocation)\n\n # get results directory\n self.resdir = os.path.join(\n self.outdir, self.get_option(\"results\", default=\"results\"), self.psrname\n )\n if not os.path.exists(self.resdir):\n os.makedirs(self.resdir)\n\n transfer_files = self.submit_options.get(\"should_transfer_files\", \"NO\")\n\n # store expected results file names\n extension = self.sampler_kwargs.get(\"save\", \"hdf5\")\n gzip = self.sampler_kwargs.get(\"gzip\", False)\n self.resultsfiles = []\n\n for i in range(self.n_parallel):\n curconfig = copy.deepcopy(config)\n vardict = {}\n\n label = f\"{self.submit_options.get('name', 'cwinpy_pe')}_{''.join(self.dets)}_{self.psrname}\"\n\n if self.n_parallel > 1:\n configfile = os.path.join(\n configlocation,\n \"{}_{}_{}.ini\".format(\"\".join(self.dets), self.psrname, i),\n )\n\n label += f\"_{i}\"\n else:\n configfile = os.path.join(\n configlocation, \"{}_{}.ini\".format(\"\".join(self.dets), self.psrname)\n )\n\n self.resultsfiles.append(\n bilby.core.result.result_file_name(\n os.path.abspath(self.resdir), label, extension=extension, gzip=gzip\n )\n )\n\n curconfig[\"label\"] = label\n\n # add files for transfer\n if transfer_files == \"YES\":\n transfer_input = []\n\n transfer_input.append(relative_topdir(configfile, self.resdir))\n\n for key in [\n \"par_file\",\n \"inj_par\",\n \"data_file_1f\",\n \"data_file_2f\",\n \"prior\",\n ]:\n if key in list(config.keys()):\n if key in [\"data_file_1f\", \"data_file_2f\"]:\n for detkey in config[key]:\n transfer_input.append(\n relative_topdir(config[key][detkey], self.resdir)\n )\n\n # exclude full path as the transfer directory is flat\n curconfig[key][detkey] = os.path.basename(\n config[key][detkey]\n )\n else:\n transfer_input.append(\n relative_topdir(config[key], self.resdir)\n )\n\n # exclude full path as the transfer directory is flat\n curconfig[key] = os.path.basename(config[key])\n\n # transfer ephemeris files\n for ephem in [\"earth\", \"sun\"]:\n key = f\"{ephem}ephemeris\"\n if key in config:\n if isinstance(config[key], dict):\n for etype in copy.deepcopy(config[key]):\n transfer_input.append(\n relative_topdir(config[key][etype], self.resdir)\n )\n curconfig[key][etype] = os.path.basename(\n config[key][etype]\n )\n else:\n transfer_input.append(\n relative_topdir(config[key], self.resdir)\n )\n curconfig[key] = os.path.basename(config[key])\n\n curconfig[\"outdir\"] = \"results/\"\n\n # add output directory to inputs in case resume file exists\n transfer_input.append(\".\")\n\n vardict[\"ARGS\"] = f\"--config {os.path.basename(configfile)}\"\n vardict[\"INITIALDIR\"] = self.resdir\n vardict[\"TRANSFERINPUT\"] = \",\".join(transfer_input)\n vardict[\"TRANSFEROUTPUT\"] = curconfig[\"outdir\"]\n else:\n vardict[\"ARGS\"] = f\"--config {os.path.basename(configfile)}\"\n\n # set log files\n vardict[\"LOGFILE\"] = os.path.join(\n self.log_directories[\"log\"], f\"{label}.log\"\n )\n vardict[\"OUTPUTFILE\"] = os.path.join(\n self.log_directories[\"out\"], f\"{label}.out\"\n )\n vardict[\"ERRORFILE\"] = os.path.join(\n self.log_directories[\"error\"], f\"{label}.err\"\n )\n\n # write out configuration file\n parseobj = DefaultConfigFileParser()\n with open(configfile, \"w\") as fp:\n fp.write(parseobj.serialize(curconfig))\n\n self.vars.append(vardict)", "title": "" }, { "docid": "dc8a29a64ddfaa262279b1631e7b438c", "score": "0.50859463", "text": "def _augment_pipeline_cfg(self):", "title": "" }, { "docid": "c55e007ebddca1b90638405690dd0989", "score": "0.5085939", "text": "def run(self) -> None:\n self.all_commands = []\n # Replaces all $ref entries in config with what they point to\n self.logger.info(\"run():: Replacing references in schemas...\")\n self.recursive_replace_refs(self.openapi_config, self.openapi_config)\n # Now openapi_config contains fully unwrapped configuration specification\n # We can use it as reference dict to update paths\n self.logger.info(\"run():: Replacing references in paths...\")\n self.recursive_replace_refs(self.openapi_paths, self.openapi_config)\n\n # Now we have full configuration starting from API paths with all parameters populated.\n # We can create our command dictionaries\n self.logger.info(\"run():: Building command set...\")\n for endpoint_name, path in self.openapi_paths.items():\n\n # Get full list of parameters for every endpoint (everything in schema)\n endpoint_data = self.recursive_find_dict_key(path, API_DATA_NODE_KEY)\n if endpoint_data != []:\n\n # Now we need to construct a command for each item in endpoint data\n # entry is a dict {\"path\":List, \"data\":Dict}\n # entry[\"data\"] usually contains multiple parameters, e.g.\n # {'systemClass': {'type': 'string', 'readOnly': True, 'nullable': True},\n # 'systemLine': {'type': 'string', 'readOnly': True, 'nullable': True},\n # ...\n # }\n # which have to be split to separate SL2 commands\n # entry[\"path\"] is a node path in JSON schema to the data dict\n for entry in endpoint_data:\n for param_name, param_dict in entry[\"data\"].items():\n # First, get rid of fucking useless oneOf keys\n # They are used for JSON schema validation, absolute PITA\n # for parameters parsing\n self.recursive_reduce(param_dict, \"oneOf\")\n if not isinstance(param_dict, dict):\n self.logger.warning(\"run():: Wrong parameter configuration for %s %s.%s - not a dictionary!\", endpoint_name, '.'.join(entry['path']), param_name)\n continue\n self.all_commands.extend(self.make_command(endpoint_name, entry[\"path\"], param_name, param_dict))\n else:\n self.logger.warning(\"run():: No schema found for endpoint %s\", endpoint_name)", "title": "" }, { "docid": "900cef7ad7ae59279204dc39637ed3fb", "score": "0.5076582", "text": "def unroll_config(config):\n # get global prefix of run\n prefix = config[\"global\"][\"prefix\"]\n\n # store unrolled configurations here\n configs = {}\n\n # check if we have a single job or need to unroll\n # into multiple jobs\n if config.get(\"batch\", None) is None:\n configs[prefix] = config\n else:\n # go through all specified runs\n for sub_id, delta_config in config[\"batch\"].items():\n # create copy of config and update for current subjob\n sub_config = deepcopy(config)\n\n # create prefix of subjob (may contain / to route\n # subjob output to subfolder)\n sub_prefix = prefix + sub_id\n\n # these are not batch jobs anymore, so deactivate section\n sub_config[\"batch\"] = None\n\n # create full prefix for subjob\n sub_config[\"global\"][\"prefix\"] = sub_prefix\n\n # apply subconfig delta\n # (assuming parameters are nested in two layers)\n for section in delta_config:\n # if dictionary, substitute all items on second level\n if isinstance(delta_config[section], Mapping):\n for param, value in delta_config[section].items():\n sub_config[section][param] = value\n else:\n # substitute entire section (this only affects pipeline stages)\n sub_config[section] = delta_config[section]\n\n configs[sub_prefix] = sub_config\n\n return configs", "title": "" }, { "docid": "1e96bd937ea0d1762878203b150de79b", "score": "0.50726634", "text": "def build_general_config(self):\n\n raise NotImplementedError()", "title": "" }, { "docid": "46e3364e6acd168f1aa02e6922416a27", "score": "0.50712633", "text": "def generate_config(cls, tree, special='rethinkdb', override=None):\n\n\t\t# Call the parent\n\t\treturn super().generate_config(tree, special, override);", "title": "" }, { "docid": "a3ceeb4fd3fdc1698dc184ea624153b7", "score": "0.50711846", "text": "def build_structure(config):\n\n logging.info(\"Building data structure...\")\n\n\n # Build templates\n logging.info(\"Building templates...\")\n templates = {}\n for id_, data in config[\"templates\"].items():\n logging.debug(\"Building template '{0}'\".format(id_))\n try:\n templates[id_] = Template(id_, *data)\n except ConfigError as e:\n logging.warn(\"Problem with config file - {0}. Skipping template '{1}'\".format(e, id_))\n\n # Build items\n logging.info(\"Building items...\")\n items = {}\n for id_, data in config[\"items\"].items():\n logging.debug(\"Building item '{0}'\".format(id_))\n try:\n template_id = data[1]\n if template_id is not None:\n template = templates[template_id]\n else:\n template = None\n items[id_] = Item(id_, data[0], template)\n except KeyError as e:\n logging.warn(\"Problem with config file - Invalid template '{0}'. Skipping item '{1}'\".format(data[1], id_))\n except ConfigError as e:\n logging.warn(\"Problem with config file - {0}. Skipping item '{1}'\".format(e, id_))\n\n # Build users\n logging.info(\"Building users...\")\n users = {}\n for email, item_ids in config[\"users\"].items():\n logging.debug(\"Building user '{0}'\".format(email))\n user_items = []\n\n for item_id in item_ids:\n try:\n user_items.append(items[item_id])\n except KeyError as e:\n logging.warn(\"Problem with config file - Invalid item '{0}' for user '{1}'\".format(item_id, email))\n\n # Check for duplicates\n if len(set(user_items)) != len(user_items):\n logging.warn(\"Problem with config file - Duplicate items for user '{0}'\".format(email))\n\n users[email] = User(user_items)\n\n return users", "title": "" }, { "docid": "adb6409c4556de74340027f11b18a315", "score": "0.50702965", "text": "def _next_configs(self):\n for state, input, stack in self.data:\n subtable = self.table.get(state, {})\n for (input_prefix, stack_prefix), entries in subtable.items():\n if (input.startswith(input_prefix) and\n stack.startswith(stack_prefix)):\n for next_state, next_stack in entries:\n yield Config(\n next_state,\n input[len(input_prefix):],\n next_stack+stack[len(stack_prefix):])", "title": "" }, { "docid": "c34746248482ba01a6181e25d0e7dfcd", "score": "0.5070073", "text": "def _create(self):\n parser = _create_field_parser()\n main_rules = parser \\\n .parseFile(self.base_dir + '/' + self.main_config_file,\n parseAll=True)\n rules = main_rules.rules\n includes = main_rules.includes\n already_includes = [self.main_config_file]\n\n #Resolve includes\n for include in includes:\n if include[0] in already_includes:\n continue\n already_includes.append(include[0])\n if os.path.exists(include[0]):\n tmp = parser.parseFile(include[0], parseAll=True)\n else:\n #CHECK: This will raise an IOError if the file doesn't exist\n tmp = parser.parseFile(self.base_dir + '/' + include[0],\n parseAll=True)\n if rules and tmp.rules:\n rules += tmp.rules\n else:\n rules = tmp.rules\n if includes and tmp.includes:\n includes += tmp.includes\n else:\n includes = tmp.includes\n\n #Create config rules\n for rule in rules:\n if rule.override:\n self.__override_rules.append(rule)\n elif rule.extend:\n self.__extend_rules.append(rule)\n elif rule.inherit_from:\n self.__inherit_rules.append(rule)\n else:\n self._create_rule(rule)\n\n self.__resolve_inherit_rules()\n self.__resolve_override_rules()\n self.__resolve_extend_rules()", "title": "" }, { "docid": "54b92469f316d8fdd4e870f1f8a6ce3b", "score": "0.506807", "text": "def config_func(config):\n\n for node in config.children:\n key = node.key.lower()\n val = node.values[0]\n if key == 'debug':\n obj.debug = pc.convert2boolean(val)\n elif key == 'verbose':\n obj.verbose = pc.convert2boolean(val)\n\n collectd.info('%s: debug=%s, verbose=%s'\n % (PLUGIN, obj.debug, obj.verbose))\n\n return pc.PLUGIN_PASS", "title": "" }, { "docid": "eb4394c5c70ae5af3b4bfa98c0940c29", "score": "0.50434846", "text": "def applyConfiguration (c):\r\n\r\n def getboolean(name):\r\n value = getattr(config, name)\r\n newvalue = c.config.getBool(name)\r\n if newvalue is not None:\r\n setattr(config, name, newvalue)\r\n \r\n getboolean(\"rst2file\")\r\n getboolean(\"rst2_bodyfilter\")\r\n getboolean(\"rst2_clear_attributes\")\r\n getboolean(\"rst2_http_server_support\")\r\n if config.rst2_http_server_support and not mod_http:\r\n g.es(\"Resetting rst2_http_server_support because mod_http plugin was not imported successfully\", color='red')\r\n config.rst2_http_server_support = False\r\n getboolean(\"rst2_pure_document\")\r\n getboolean(\"rst2_format_headlines\")\r\n # getboolean(\"rst2_warnofdrags\")\r\n getboolean(\"rst2_run_on_window_open\")\r\n \r\n getboolean(\"rst2_debug_handle_endtag\")\r\n getboolean(\"rst2_debug_store_lines\")\r\n getboolean(\"rst2_debug_handle_starttag\")\r\n getboolean(\"rst2_debug_show_unknownattributes\")\r\n getboolean(\"rst2_debug_node_html_1\")\r\n getboolean(\"rst2_debug_anchors\")\r\n getboolean(\"rst2_debug_before_and_after_replacement\")\r\n getboolean(\"rst2_install_menu_item_in_edit_menu\")", "title": "" }, { "docid": "0f2bd2c3a6dcfd4f229e0da4d6382736", "score": "0.5012573", "text": "def create_config_template():\n import ruamel.yaml\n yaml_str = \"\"\"\\\n# Project definitions (do not edit)\n Task:\n scorer:\n date:\n \\n\n# Project path (change when moving around)\n project_path:\n \\n\n# Annotation data set configuration (and individual video cropping parameters)\n video_sets:\n bodyparts:\n start:\n stop:\n numframes2pick:\n \\n\n# Plotting configuration\n skeleton:\n skeleton_color:\n pcutoff:\n dotsize:\n alphavalue:\n colormap:\n \\n\n# Training,Evaluation and Analysis configuration\n TrainingFraction:\n iteration:\n resnet:\n snapshotindex:\n batch_size:\n \\n\n# Cropping Parameters (for analysis and outlier frame detection)\n cropping:\n#if cropping is true for analysis, then set the values here:\n x1:\n x2:\n y1:\n y2:\n \\n\n# Refinement configuration (parameters from annotation dataset configuration also relevant in this stage)\n corner2move2:\n move2corner:\n \"\"\"\n ruamelFile = ruamel.yaml.YAML()\n cfg_file = ruamelFile.load(yaml_str)\n return(cfg_file,ruamelFile)", "title": "" }, { "docid": "6a1ba6cc584b7e453f363efd562a5b9f", "score": "0.50076985", "text": "def generate_confs():\n\tglobal_config = config_gen.get_config()\n\tfor section in global_config.keys():\n\t\tif global_config[section].has_key('TEMPLATE_CONFIG'):\n\t\t\tif not global_config[section].has_key('OUTPUT_CONFIG'):\n\t\t\t\texit_error(\"[ERROR] 'OUTPUT_CONFIG' not specified for '\" + section + \"'\")\n\t\t\ttemplate_file = global_config[section]['TEMPLATE_CONFIG']\n\t\t\ttemplate_str = ''\n\t\t\ttry:\n\t\t\t\twith open(template_file) as f:\n\t\t\t\t\ttemplate_str = f.read()\n\t\t\texcept:\n\t\t\t\texit_error(\"[ERROR] Template File for '\" + section + \"', \" + template_file + \" does not exist\") \n\n\t\t\tfor key, val in global_config[section].items():\n\t\t\t\ttemplate_str = template_str.replace('$' + key + '$', val)\n\n\t\t\ttry:\n\t\t\t\twith open(global_config[section]['OUTPUT_CONFIG'], 'wb') as f:\n\t\t\t\t\tprint 'Writing', f.name, '...'\n\t\t\t\t\tf.write(template_str)\n\t\t\texcept:\n\t\t\t\texit_error(\"[ERROR] Failed to open output_config '\" + global_config[section]['OUTPUT_CONFIG'] + \"' in write mode\")\n\t\telif section == 'HOSTAPD':\n\t\t\twrite_hostapd_conf(global_config)", "title": "" }, { "docid": "32322e10328741038c4d57412e8d5eaa", "score": "0.5007296", "text": "def interpretCfg(self):\n target_node = self.getTargetNode()\n current_node = self.getSourceNode()\n self.visited.append(current_node)\n while current_node != target_node:\n # see if there is actually a decision to take when node is (if|while)\n if current_node in self.cfgparser.labelsIf or \\\n current_node in self.cfgparser.labelsWhile :\n decision_node = current_node\n succ = [s for s in self.cfg.successors(current_node)]\n for s in succ:\n label = self.cfg.edges[decision_node, s]['label']\n if self.interpretCondition(label):\n current_node = s\n # else the edge represents an assigment\n # however if the assigment has a while as successor, it means there is\n # a condition to evaluate\n elif current_node in self.cfgparser.labelsAssigns:\n succ = [s for s in self.cfg.successors(current_node)]\n if len(succ) == 1:\n label = self.cfg.edges[current_node, succ[0]]['label']\n self.interpretAssigments(label)\n current_node = succ[0]\n # else finally the node must BE the empty label with no successfors\n else:\n succ = [s for s in self.cfg.successors(current_node)]\n if len(succ) == 0 :\n current_node = target_node\n else:\n raise Exception('Target node could not be reached')\n self.visited.append(current_node)", "title": "" }, { "docid": "998f5b34efcb0b17f54d73e5546f73c6", "score": "0.49866524", "text": "def build_config(config: Dict[str, Any]) -> Dict[str, str]:\n result = config.copy()\n # Manage the classifier stable/beta\n is_stable = result.pop(\"is_stable\", False)\n if is_stable:\n result[\"classifier\"] = \"Development Status :: 5 - Production/Stable\"\n else:\n result[\"classifier\"] = \"Development Status :: 4 - Beta\"\n # Manage the nspkg\n package_name = result[\"package_name\"]\n result[\"package_nspkg\"] = result.pop(\"package_nspkg\", package_name[: package_name.rindex(\"-\")] + \"-nspkg\")\n # ARM?\n result[\"is_arm\"] = result.pop(\"is_arm\", True)\n\n # Do I need msrestazure for this package?\n result[\"need_msrestazure\"] = result.pop(\"need_msrestazure\", False)\n\n # Do I need azure-mgmt-core for this package?\n result[\"need_azuremgmtcore\"] = result.pop(\"need_azuremgmtcore\", True)\n\n # Pre-compute some Jinja variable that are complicated to do inside the templates\n package_parts = result[\"package_nspkg\"][: -len(\"-nspkg\")].split(\"-\")\n result[\"nspkg_names\"] = [\".\".join(package_parts[: i + 1]) for i in range(len(package_parts))]\n result[\"init_names\"] = [\"/\".join(package_parts[: i + 1]) + \"/__init__.py\" for i in range(len(package_parts))]\n exclude_folders = result.pop(\"exclude_folders\", \"\")\n result[\"exclude_folders\"] = [item.strip() for item in exclude_folders.split(\",\") if item.strip()]\n\n # Return result\n return result", "title": "" }, { "docid": "dd0ff7d0b17092c8fa77f4ba759bae71", "score": "0.49811134", "text": "def _python_to_conf(self, x, name='', nesting_context=None, **metadata):\n\n # If this is an atomic type, no parsing is required\n if type(x) in self.VALID_ATOMIC_VALUE_TYPES:\n return x\n elif type(x) in self.VALID_SEQUENCE_VALUE_TYPES:\n # a sequence. apply to elements recursively\n if hasattr(x, 'items'):\n return type(x)((k, self._python_to_conf(v)) for k, v in x.items())\n else:\n return type(x)(self._python_to_conf(elem) for elem in x)\n\n if type(x) == type(inspect):\n\n # the top-level module object --> a ConfigContainer\n raw_attrs = self._get_dunder_dict(x)\n\n # handle the __entry_point___ directive:\n new_x = self._find_entry_point(raw_attrs)\n if new_x is not None:\n # return the result of the recursive call on the new entry point\n return self._python_to_conf(new_x)\n\n elif _is_raw_container(x):\n # a class --> a ConfigContainer\n raw_attrs = self._get_dunder_dict(x)\n\n # handle auto-overlay magic\n if nesting_context is None:\n nesting_context = []\n nesting_context = nesting_context + [(name, x)] # don't use append\n for overlayee in self._gen_overlayees(nesting_context):\n for k, v in self._get_dunder_dict(overlayee).items():\n if k not in raw_attrs:\n raw_attrs[k] = v\n else:\n # type misunderstood\n raise ConfigParsingError('Config construct of unsupported type %r: %s' % (type(x), x))\n\n # Separate raw_attrs to their types. See _prepare_attrs for more details.\n real_attrs, meta_attrs = self._prepare_attrs(raw_attrs)\n\n # if this is an overridet-set, all nested containers are also override sets automatically.\n # we propagate this metadata attribute using the metadata dict:\n if meta_attrs.get('is_override_set', False):\n metadata['is_override_set'] = meta_attrs['is_override_set']\n\n # convert real attr recursively\n attrs = {\n k: self._python_to_conf(v, name=k, nesting_context=nesting_context, **metadata)\n for k, v in real_attrs.items()\n }\n\n # create the container\n metadata_to_apply = merge_dicts(metadata, meta_attrs)\n if metadata_to_apply.get('is_override_set', False):\n container_cls = ConfigOverrideSet\n else:\n container_cls = ConfigContainer\n container = container_cls(attrs)\n\n # set metadata attributes on the container\n container.get_metadata().update(metadata_to_apply)\n\n return container", "title": "" }, { "docid": "38de66c8fdee0b6a4739b56eec38b8af", "score": "0.49773577", "text": "def parse_config(self) -> None:\n filename = self.nodes\n config = Config()\n cfgdir = os.path.dirname(filename)\n cfg = yaml.safe_load(open(filename))\n nodes = [self.create_node(cfgdir, nodecfg) for nodecfg in cfg['nodes']]\n addrs = []\n for node in nodes:\n addrs.append(node.addr)\n idx = 1\n for node in nodes:\n if not node.addr:\n while addr(idx) in addrs:\n idx += 1\n node.addr = addr(idx)\n addrs.append(node.addr)\n\n if 'wmediumd' in cfg:\n wmd = cfg['wmediumd']\n\n wm_cfg = wmd.get('config', None)\n if wm_cfg is not None:\n assert isinstance(wm_cfg, str)\n config.wmediumd_conf = os.path.join(cfgdir, wm_cfg)\n\n wm_per = wmd.get('per', None)\n if wm_per is not None:\n assert isinstance(wm_per, str)\n config.wmediumd_per = os.path.join(cfgdir, wm_per)\n\n if 'net' in cfg:\n net = cfg['net']\n\n net_delay = net.get('delay', None)\n if net_delay is not None:\n assert isinstance(net_delay, (int, float))\n config.net_delay = float(net_delay)\n\n if 'controller' in cfg:\n config.start_time = cfg['controller'].get('start-time', 0)\n\n config.nodes = nodes\n self.config = config", "title": "" }, { "docid": "587ba2ebe1a59380f6b51d0457f12ca1", "score": "0.4937039", "text": "def _setup_pipeline_cfg(self):", "title": "" }, { "docid": "f9037a0589aab29e2b5b4aad5aeb2743", "score": "0.49354577", "text": "def traverse_config(multi_config: dict):\n config_list = [copy.deepcopy(multi_config)]\n queue = [[key] for key in multi_config.keys()]\n sample_functions = []\n while len(queue) > 0:\n key_path = queue.pop(0)\n values = key_path_get_value(multi_config, key_path)\n if key_path[-1] == 'sample':\n assert isinstance(values, dict)\n parent_key_path = key_path[:-1]\n # parent_config = key_path_get_value(multi_config, parent_key_path)\n # the value of the parent_key_path is replaced with the sample in config_list\n # this assertion can be false when the default explain config is merged into the multi config\n # assert len(parent_config) == 1\n sample_config = merge_dicts(SAMPLE_CONFIG, values)\n\n if sample_config['exclude'] is not None:\n def exclude_func(exclude_config: dict):\n return key_path_get_value(exclude_config, sample_config['exclude'])\n else:\n def exclude_func(exclude_config: dict):\n return None\n # TODO: functionality to sample together\n new_config_list = []\n for c in config_list:\n samples = sample(sample_config, exclude_func(c))\n for val in samples:\n config = key_path_set_value(copy.deepcopy(c), parent_key_path, val)\n new_config_list.append(config)\n config_list = new_config_list\n\n elif isinstance(values, dict):\n for key in values.keys():\n queue.append(key_path + [key])\n elif isinstance(values, list) or isinstance(values, tuple):\n for i in range(len(values)):\n queue.append(key_path + [i])\n\n return config_list", "title": "" }, { "docid": "339403b25507ab9284566f4687dbd59d", "score": "0.49337658", "text": "def ConfigureTests(test_hierarchy: dict, argv):\n\n specific_test = \"\"\n if argv.test is not None:\n specific_test = argv.test\n print(\"specific_test=\" + specific_test)\n\n test_objects = []\n for testdir in test_hierarchy:\n for config_file in ListFilesInDir(testdir, \".json\"):\n sub_test_objs = ParseTestConfiguration(testdir + config_file)\n for obj in sub_test_objs:\n if specific_test != \"\" and obj.filename != specific_test:\n print(\"skipping \" + obj.filename)\n continue\n test_objects.append(obj)\n\n # If the out directory exists then we clear it\n if os.path.isdir(testdir + \"out/\"):\n shutil.rmtree(testdir + \"out/\")\n\n # If the out directory does not exist then we create it\n if not os.path.isdir(testdir + \"out/\"):\n os.mkdir(testdir + \"out/\")\n\n # If the gold directory does not exist then we create it\n if not os.path.isdir(testdir + \"gold/\"):\n os.mkdir(testdir + \"gold/\")\n\n return test_objects", "title": "" }, { "docid": "2b1bb95989620240f195c66942df9983", "score": "0.49330992", "text": "def GenerateConfig(context):\n\n if context.properties['networkType'] == 'external':\n networkInterfaces = [{\n 'subnetwork': context.properties['subNetwork'],\n 'accessConfigs': [{\n 'name': 'External NAT',\n 'type': 'ONE_TO_ONE_NAT'\n }]\n }]\n else:\n networkInterfaces = [{\n 'subnetwork': context.properties['subNetwork'],\n }]\n if context.properties['mongoType'] == 'dbData':\n importScript = \"mongodata.sh\"\n elif context.properties['mongoType'] == 'dbRouter':\n importScript = \"mongos.sh\"\n elif context.properties['mongoType'] == 'dbConfig':\n if 'a' in context.env['name']:\n networkInterfaces[0]['networkIP'] = '10.0.2.10'\n elif 'b' in context.env['name']:\n networkInterfaces[0]['networkIP'] = '10.0.2.11'\n elif 'c' in context.env['name']:\n networkInterfaces[0]['networkIP'] = '10.0.2.12'\n importScript = \"mongoconf.sh\"\n\n resources = [{\n 'name': context.env['name'],\n 'type': 'compute.v1.instance',\n 'properties': {\n 'zone': context.properties['zone'],\n 'machineType': ''.join([computeBaseUrl, \n 'projects/', context.env['project'],\n '/zones/', context.properties['zone'], \n '/machineTypes/', context.properties['machineType']]),\n 'disks': [{\n 'deviceName': 'boot',\n 'type': 'PERSISTENT',\n 'boot': True,\n 'autoDelete': True,\n 'initializeParams': {\n 'sourceImage': ''.join([computeBaseUrl, 'projects/',\n 'ubuntu-os-cloud/global',\n '/images/family/ubuntu-2004-lts'])\n }\n }],\n 'networkInterfaces': networkInterfaces,\n # Allow the instance to access logging.\n \"serviceAccounts\": [\n {\n \"email\": \"[email protected]\",\n \"scopes\": [\n \"https://www.googleapis.com/auth/devstorage.read_only\",\n \"https://www.googleapis.com/auth/logging.write\",\n \"https://www.googleapis.com/auth/monitoring.write\",\n \"https://www.googleapis.com/auth/servicecontrol\",\n \"https://www.googleapis.com/auth/service.management.readonly\",\n \"https://www.googleapis.com/auth/trace.append\"\n ]\n }\n ],\n # Metadata\n 'metadata': {\n 'items': [{\n # Startup script\n 'key': 'startup-script',\n 'value': context.imports[importScript]\n }]\n }\n }\n }]\n\n return {'resources': resources}", "title": "" }, { "docid": "7111f80dbd0a719c17fa753bdb604826", "score": "0.4928377", "text": "def to_yaml_tree_transform(self, model, tag, ctx):", "title": "" }, { "docid": "fa4b1586344b171ebcbb8a638d24507c", "score": "0.49226865", "text": "def get_configs(configs):\n\tshutit = shutit_global.shutit\n\tcp = LayerConfigParser()\n\tfail_str = ''\n\tfiles = []\n\tfor config_file in configs:\n\t\tif isinstance(config_file, tuple):\n\t\t\tcontinue\n\t\tif not is_file_secure(config_file):\n\t\t\tfail_str = fail_str + '\\nchmod 0600 ' + config_file\n\t\t\tfiles.append(config_file)\n\tif fail_str != '':\n\t\tif shutit.build['interactive'] > 1:\n\t\t\tfail_str = 'Files are not secure, mode should be 0600. Running the following commands to correct:\\n' + fail_str + '\\n'\n\t\t\t# Actually show this to the user before failing...\n\t\t\tshutit.log(fail_str)\n\t\t\tshutit.log('Do you want me to run this for you? (input y/n)')\n\t\t\tif shutit.build['interactive'] == 0 or util_raw_input(default='y') == 'y':\n\t\t\t\tfor f in files:\n\t\t\t\t\tshutit.log('Correcting insecure file permissions on: ' + f)\n\t\t\t\t\tos.chmod(f,0o600)\n\t\t\t\t# recurse\n\t\t\t\treturn get_configs(configs)\n\t\telse:\n\t\t\tfor f in files:\n\t\t\t\tshutit.log('Correcting insecure file permissions on: ' + f)\n\t\t\t\tos.chmod(f,0o600)\n\t\t\t# recurse\n\t\t\treturn get_configs(configs)\n\t\tshutit.fail(fail_str)\n\tfor config in configs:\n\t\tif isinstance(config, tuple):\n\t\t\tcp.readfp(config[1], filename=config[0])\n\t\telse:\n\t\t\tcp.read(config)\n\t# Treat allowed_images as a special, additive case\n\tshutit.build['shutit.core.module.allowed_images'] = cp.get_config_set('build', 'shutit.core.module.allowed_images')\n\treturn cp", "title": "" }, { "docid": "09bd34c2e0b1cd33616ba985a97f2ee0", "score": "0.48903823", "text": "def _custom_tasks(template, info) -> None:\n if template == \"integration\":\n changes = {\"codeowners\": [info.codeowner]}\n\n if info.requirement:\n changes[\"requirements\"] = [info.requirement]\n\n info.update_manifest(**changes)\n\n if template == \"device_trigger\":\n info.update_strings(\n device_automation={\n **info.strings().get(\"device_automation\", {}),\n \"trigger_type\": {\n \"turned_on\": \"{entity_name} turned on\",\n \"turned_off\": \"{entity_name} turned off\",\n },\n }\n )\n\n if template == \"device_condition\":\n info.update_strings(\n device_automation={\n **info.strings().get(\"device_automation\", {}),\n \"condtion_type\": {\n \"is_on\": \"{entity_name} is on\",\n \"is_off\": \"{entity_name} is off\",\n },\n }\n )\n\n if template == \"device_action\":\n info.update_strings(\n device_automation={\n **info.strings().get(\"device_automation\", {}),\n \"action_type\": {\n \"turn_on\": \"Turn on {entity_name}\",\n \"turn_off\": \"Turn off {entity_name}\",\n },\n }\n )\n\n if template == \"config_flow\":\n info.update_manifest(config_flow=True)\n info.update_strings(\n config={\n \"title\": info.name,\n \"step\": {\n \"user\": {\"title\": \"Connect to the device\", \"data\": {\"host\": \"Host\"}}\n },\n \"error\": {\n \"cannot_connect\": \"Failed to connect, please try again\",\n \"invalid_auth\": \"Invalid authentication\",\n \"unknown\": \"Unexpected error\",\n },\n \"abort\": {\"already_configured\": \"Device is already configured\"},\n }\n )\n\n if template == \"config_flow_discovery\":\n info.update_manifest(config_flow=True)\n info.update_strings(\n config={\n \"title\": info.name,\n \"step\": {\n \"confirm\": {\n \"title\": info.name,\n \"description\": f\"Do you want to set up {info.name}?\",\n }\n },\n \"abort\": {\n \"single_instance_allowed\": f\"Only a single configuration of {info.name} is possible.\",\n \"no_devices_found\": f\"No {info.name} devices found on the network.\",\n },\n }\n )\n\n if template in (\"config_flow\", \"config_flow_discovery\"):\n init_file = info.integration_dir / \"__init__.py\"\n init_file.write_text(\n init_file.read_text()\n + \"\"\"\n\nasync def async_setup_entry(hass, entry):\n \\\"\\\"\\\"Set up a config entry for NEW_NAME.\\\"\\\"\\\"\n # TODO forward the entry for each platform that you want to set up.\n # hass.async_create_task(\n # hass.config_entries.async_forward_entry_setup(entry, \"media_player\")\n # )\n\n return True\n\"\"\"\n )", "title": "" }, { "docid": "4965713aa9af60fed320461f01deee92", "score": "0.4884857", "text": "def generate(physical, fancy, debug):\n\n #\n # Send all the output to a string by redirecting stdout\n #\n import StringIO\n sio = StringIO.StringIO()\n save = sys.stdout\n sys.stdout = sio\n\n #\n #\n #\n print \"<script>\"\n f=open(fancy)\n print f.read()\n f.close()\n print \"</script>\"\n\n\n #\n # Generate the control and main net part of the config\n #\n Defaults(physical, debug).start()\n\n #\n # \n #\n from xtparse import process\n try:\n process(debug, physical, fancy)\n except MyError, error:\n sys.stderr.write(\"%s\\n\" % str(error))\n sys.exit(1)\n\n #\n # Restore stdout\n #\n sys.stdout = save\n\n print \"<testbed_config>\"\n print sio.getvalue(),\n print \"</testbed_config>\"", "title": "" }, { "docid": "b1d714014ee43a630abe4e9df5c6d8ed", "score": "0.48782662", "text": "def parse_configs(code_config, field_config, time_config):\n # performing basic validation of config paths, obtaining dictionary of \n # config types and correpsonding raw dataframes\n raw_dfs = validate_config_dfs(code_config, field_config, time_config)\n\n # performing additional config-specific validation and parsing \n config_dict = {}\n for config_type, df in raw_dfs.items():\n if config_type in validation_functions:\n validation_functions[config_type](df)\n if config_type in parse_functions:\n config_dict[config_type] = parse_functions[config_type](df)\n else:\n config_dict[config_type] = df\n\n # concatenating code and field configs\n if CODE_CONFIG in config_dict:\n if FIELD_CONFIG in config_dict:\n config_dict[FIELD_CONFIG] = pd.concat([config_dict[CODE_CONFIG],\n config_dict[FIELD_CONFIG]], sort=True)\n else:\n config_dict[FIELD_CONFIG] = config_dict[CODE_CONFIG]\n config_dict.pop(CODE_CONFIG)\n\n return config_dict", "title": "" }, { "docid": "957ecf046c8747fc8eb2d7aa0924c742", "score": "0.48711365", "text": "async def get_config_to_integration_load() -> dict[str, Any]:\n\n return {\n \"command_line\": [\n {\n \"binary_sensor\": {\n \"name\": \"Test\",\n \"command\": \"echo 1\",\n \"payload_on\": \"1\",\n \"payload_off\": \"0\",\n \"command_timeout\": 15,\n }\n },\n {\n \"cover\": {\n \"name\": \"Test\",\n \"command_state\": \"echo 1\",\n \"command_timeout\": 15,\n }\n },\n {\n \"notify\": {\n \"name\": \"Test\",\n \"command\": \"echo 1\",\n \"command_timeout\": 15,\n }\n },\n {\n \"sensor\": {\n \"name\": \"Test\",\n \"command\": \"echo 5\",\n \"unit_of_measurement\": \"in\",\n \"command_timeout\": 15,\n }\n },\n {\n \"switch\": {\n \"name\": \"Test\",\n \"command_state\": \"echo 1\",\n \"command_timeout\": 15,\n }\n },\n ]\n }", "title": "" }, { "docid": "1bd9fad74c2b8cc0e29460accb8b00e7", "score": "0.48693904", "text": "def main(argv):\n config_generator = FaucetConfigGenerator()\n filepath = '/tmp/faucet_config_dump'\n egress = 2\n access = 3\n devices = 1\n topo_type = STACK\n argv = argv[1:]\n\n help_msg = \"\"\"\n <python3> build_config.py -e <egress_switches> -a <access_switches> -d <devices per switch>\n -p <config path> -t <topology type (flat, corp, stack)>\n \"\"\"\n\n try:\n opts, _ = getopt.getopt(\n argv, 'he:a:d:p:t:', ['egress=', 'access=', 'devices=', 'path=', 'type='])\n except getopt.GetoptError:\n print(help_msg)\n sys.exit(2)\n for opt, arg in opts:\n if opt == '-h':\n print(help_msg)\n sys.exit()\n elif opt in ('-e', '--egress'):\n egress = int(arg)\n elif opt in ('-a', '--access'):\n access = int(arg)\n elif opt in ('-d', '--devices'):\n devices = int(arg)\n elif opt in ('-p', '--path'):\n filepath = arg\n elif opt in ('-t', '--type'):\n topo_type = arg\n\n if topo_type == FLAT:\n faucet_config = config_generator.create_flat_faucet_config(access, devices)\n elif topo_type == CORP:\n faucet_config = config_generator.create_corp_faucet_config()\n elif topo_type == STACK:\n faucet_config = config_generator.create_scale_faucet_config(egress, access, devices)\n else:\n raise Exception('Unkown topology type: %s' % topo_type)\n\n config_map = cleanup_config(proto_dict(faucet_config))\n\n with open(filepath, 'w') as config_file:\n yaml.dump(config_map, config_file)", "title": "" }, { "docid": "2dd4813e0621c0a08a7ca1dba0e83c3a", "score": "0.4855035", "text": "def _processCfg(self):\n errors = []\n opts = dict(self.node_cfg)\n cls = get_class(opts.pop('cls'))\n self.dispatcher = cls(opts.pop('name', self._cfgfiles),\n self.log.getChild('dispatcher'), opts, self)\n\n if opts:\n self.dispatcher.errors.append(self.unknown_options(cls, opts))\n self.modules = OrderedDict()\n\n # create and initialize modules\n todos = list(self.module_cfg.items())\n while todos:\n modname, options = todos.pop(0)\n if modname in self.modules:\n # already created by Dispatcher (via Attached)\n continue\n # For Pinata modules: we need to access this in Dispatcher.get_module\n self.module_cfg[modname] = dict(options)\n modobj = self.dispatcher.get_module_instance(modname) # lazy\n if modobj is None:\n self.log.debug('Module %s returned None', modname)\n continue\n self.modules[modname] = modobj\n if isinstance(modobj, Pinata):\n # scan for dynamic devices\n pinata = self.dispatcher.get_module(modname)\n pinata_modules = list(pinata.scanModules())\n for name, _cfg in pinata_modules:\n if name in self.module_cfg:\n self.log.error('Module %s, from pinata %s, already'\n ' exists in config file!', name, modname)\n self.log.info('Pinata %s found %d modules', modname, len(pinata_modules))\n todos.extend(pinata_modules)\n\n # initialize all modules by getting them with Dispatcher.get_module,\n # which is done in the get_descriptive data\n # TODO: caching, to not make this extra work\n self.dispatcher.get_descriptive_data('')\n # =========== All modules are initialized ===========\n\n # all errors from initialization process\n errors = self.dispatcher.errors\n\n if not self._testonly:\n start_events = MultiEvent(default_timeout=30)\n for modname, modobj in self.modules.items():\n # startModule must return either a timeout value or None (default 30 sec)\n start_events.name = f'module {modname}'\n modobj.startModule(start_events)\n if not modobj.startModuleDone:\n errors.append(f'{modobj.startModule.__qualname__} was not called, probably missing super call')\n\n if errors:\n for errtxt in errors:\n for line in errtxt.split('\\n'):\n self.log.error(line)\n # print a list of config errors to stderr\n sys.stderr.write('\\n'.join(errors))\n sys.stderr.write('\\n')\n sys.exit(1)\n\n if self._testonly:\n return\n self.log.info('waiting for modules being started')\n start_events.name = None\n if not start_events.wait():\n # some timeout happened\n for name in start_events.waiting_for():\n self.log.warning('timeout when starting %s', name)\n self.log.info('all modules started')\n history_path = os.environ.get('FRAPPY_HISTORY')\n if history_path:\n from frappy_psi.historywriter import FrappyHistoryWriter # pylint: disable=import-outside-toplevel\n writer = FrappyHistoryWriter(history_path, PREDEFINED_ACCESSIBLES.keys(), self.dispatcher)\n # treat writer as a connection\n self.dispatcher.add_connection(writer)\n writer.init(self.dispatcher.handle_describe(writer, None, None))\n # TODO: if ever somebody wants to implement an other history writer:\n # - a general config file /etc/secp/frappy.conf or <frappy repo>/etc/frappy.conf\n # might be introduced, which contains the log, pid and cfg directory path and\n # the class path implementing the history\n # - or we just add here an other if statement:\n # history_path = os.environ.get('ALTERNATIVE_HISTORY')\n # if history_path:\n # from frappy_<xx>.historywriter import ... etc.", "title": "" }, { "docid": "0e683b5c682146176896ca4a7e7b27fb", "score": "0.48546398", "text": "def generate_kanon_config(sample: pd.DataFrame, k: int, cn_config: dict, topics: tuple):\n from cn.protect import Protect\n from cn.protect.privacy import KAnonymity\n from cn.protect.hierarchy import DataHierarchy, OrderHierarchy\n import uuid\n import textwrap\n\n protector = Protect(sample, KAnonymity(k))\n\n for prop_name, config in cn_config.items():\n protector.itypes[prop_name], protector.hierarchies[prop_name] = config\n\n private = protector.protect()\n\n tasks = {}\n\n def add_subtask(signature: str, **kwargs):\n # todo Don't add a new item to the dict on each call, but instead group tasks that\n # make use of the same function, e.g.:\n # - signature: drop_keys\n # args:\n # keys: [A, B]\n tasks[f\"{signature}-{uuid.uuid4()}\"] = kwargs\n\n for prop_name, (identifying, hierarchy) in cn_config.items():\n if private[prop_name][0] == \"*\":\n add_subtask(\"drop_keys\", keys=[prop_name])\n elif hierarchy is None:\n pass # no anonymization applied - do nothing\n elif isinstance(hierarchy, OrderHierarchy):\n lower, upper = [float(bound) for bound in private[prop_name][0][1:-1].split(\",\")]\n add_subtask(\"reduce_to_nearest_value\", keys=[prop_name], step_width=upper - lower)\n elif isinstance(hierarchy, DataHierarchy):\n actual_replacements = {}\n possible_replacements = hierarchy.df\n for value in private[prop_name]:\n # todo extract replaced from dataframe\n pass\n add_subtask(\"replace_with\", replacements=actual_replacements)\n else:\n print(\"Warning: Unsupported hierarchy type \" + str(type(hierarchy)))\n\n worker_config = {\n \"task_defaults\": {\n \"input_offset_reset\": \"earliest\",\n \"topic_encoding\": \"utf8\",\n \"storage_mode\": \"memory\"\n },\n \"tasks\": [{\n \"name\": task_name,\n \"function\": {\n \"signature\": task_name.split(\"-\")[0],\n \"args\": task_config\n }\n } for task_name, task_config in tasks.items()]\n }\n worker_config[\"tasks\"][0][\"input_topic\"] = topics[0]\n for task, previous_task in zip(worker_config[\"tasks\"][1:], worker_config[\"tasks\"]):\n task[\"input_topic\"] = previous_task[\"output_topic\"] = f\"{uuid.uuid4()}\"\n worker_config[\"tasks\"][-1][\"output_topic\"] = topics[1]\n\n print(textwrap.dedent(\"\"\"\n To configure k-anonymity in your data processing pipeline, include the following \n configuration snippet in your config.yml:\n ---\n \"\"\"))\n print(yaml.dump(worker_config))\n\n return worker_config", "title": "" }, { "docid": "d7b2e7664cd33e3e9aceb5059c7de0f5", "score": "0.48444206", "text": "def _get_can_tp_config(self):\r\n tp_configs = self.root.findall(\".//{http://autosar.org/schema/r4.0}CAN-TP-CONFIG\")\r\n for tp_config in tp_configs:\r\n cluster_name = tp_config.find(\"{http://autosar.org/schema/r4.0}COMMUNICATION-CLUSTER-REF\").text.split('/')[-1]\r\n cluster = self.clusters_dict.get(cluster_name)\r\n\r\n # Get TP CONNECTIONS information\r\n tp_connections = tp_config.find(\"{http://autosar.org/schema/r4.0}TP-CONNECTIONS\")\r\n for tp_connection in tp_connections:\r\n current_tpc = _CanTPC()\r\n\r\n current_tpc.addressing_format = tp_connection.find(\"{http://autosar.org/schema/r4.0}ADDRESSING-FORMAT\").text.split('/')[-1]\r\n current_tpc.data_pdu = tp_connection.find(\"{http://autosar.org/schema/r4.0}DATA-PDU-REF\").text.split('/')[-1]\r\n try:\r\n current_tpc.flow_control_pdu = tp_connection.find(\"{http://autosar.org/schema/r4.0}FLOW-CONTROL-PDU-REF\").text.split('/')[-1]\r\n except AttributeError:\r\n pass\r\n current_tpc.tp_sdu = tp_connection.find(\"{http://autosar.org/schema/r4.0}TP-SDU-REF\").text.split('/')[-1]\r\n current_tpc.transmitter = tp_connection.find(\"{http://autosar.org/schema/r4.0}TRANSMITTER-REF\").text.split('/')[-1]\r\n\r\n receiver_refs = tp_connection.find(\"{http://autosar.org/schema/r4.0}RECEIVER-REFS\")\r\n for receiver_ref in receiver_refs:\r\n current_tpc.receiver.append(receiver_ref.text.split('/')[-1])\r\n\r\n cluster.tp_connections.append(current_tpc)", "title": "" }, { "docid": "f273fe558a6582a1584a8baa7daa96dd", "score": "0.4841644", "text": "def configure(self, configuation):", "title": "" }, { "docid": "6c51d3ee8d7fb6bd6ff8166b91acfcf5", "score": "0.48380682", "text": "def get_base_config(cfg_parser):\n\tshutit = shutit_global.shutit\n\tshutit.config_parser = cp = cfg_parser\n\t# BEGIN Read from config files\n\t# build - details relating to the build\n\tshutit.build['privileged'] = cp.getboolean('build', 'privileged')\n\tshutit.build['base_image'] = cp.get('build', 'base_image')\n\tshutit.build['dotest'] = cp.get('build', 'dotest')\n\tshutit.build['net'] = cp.get('build', 'net')\n\tshutit.build['secret_words_set'] = set()\n\tshutit.build['completed'] = False\n\tshutit.build['step_through'] = False\n\tshutit.build['ctrlc_stop'] = False\n\tshutit.build['ctrlc_passthrough'] = False\n\tshutit.build['have_read_config_file'] = False\n\t# Width of terminal to set up on login and assume for other cases.\n\tshutit.build['stty_cols'] = 320\n\tshutit.build['vagrant_run_dir'] = None\n\tshutit.build['this_vagrant_run_dir'] = None\n\t# Signals are set here, which is useful for context-switching callbacks.\n\tshutit.shutit_signal['ID'] = 0\n\t# Take a command-line arg if given, else default.\n\tif shutit.build['conn_module'] is None:\n\t\tshutit.build['conn_module'] = cp.get('build', 'conn_module')\n\t# Track logins in a stack and details in logins.\n\tshutit.build['login_stack'] = []\n\tshutit.build['logins'] = {}\n\t# Whether to accept default configs\n\tshutit.build['accept_defaults'] = None\n\t# target - the target of the build, ie the container\n\tshutit.target['hostname'] = cp.get('target', 'hostname')\n\tshutit.target['locale'] = cp.get('target', 'locale')\n\tshutit.target['ports'] = cp.get('target', 'ports')\n\tshutit.target['volumes'] = cp.get('target', 'volumes')\n\tshutit.target['volumes_from'] = cp.get('target', 'volumes_from')\n\tshutit.target['name'] = cp.get('target', 'name')\n\tshutit.target['rm'] = cp.getboolean('target', 'rm')\n\t# host - the host on which the shutit script is run\n\tshutit.host['add_shutit_to_path'] = cp.getboolean('host', 'add_shutit_to_path')\n\tshutit.host['docker_executable'] = cp.get('host', 'docker_executable')\n\tshutit.host['dns'] = cp.get('host', 'dns')\n\tshutit.host['password'] = cp.get('host', 'password')\n\tshutit.build['secret_words_set'].add(shutit.host['password'])\n\tshutit.host['logfile'] = cp.get('host', 'logfile')\n\tshutit.host['shutit_module_path'] = cp.get('host', 'shutit_module_path').split(':')\n\t# repository - information relating to repository/registry\n\tshutit.repository['name'] = cp.get('repository', 'name')\n\tshutit.repository['server'] = cp.get('repository', 'server')\n\tshutit.repository['push'] = cp.getboolean('repository', 'push')\n\tshutit.repository['tag'] = cp.getboolean('repository', 'tag')\n\tshutit.repository['export'] = cp.getboolean('repository', 'export')\n\tshutit.repository['save'] = cp.getboolean('repository', 'save')\n\tshutit.repository['suffix_date'] = cp.getboolean('repository', 'suffix_date')\n\tshutit.repository['suffix_format'] = cp.get('repository', 'suffix_format')\n\tshutit.repository['user'] = cp.get('repository', 'user')\n\tshutit.repository['password'] = cp.get('repository', 'password')\n\tshutit.build['secret_words_set'].add(shutit.repository['password'])\n\tshutit.repository['email'] = cp.get('repository', 'email')\n\tshutit.repository['tag_name'] = cp.get('repository', 'tag_name')\n\t# END Read from config files\n\n\t# BEGIN Standard expects\n\t# It's important that these have '.*' in them at the start, so that the matched data is reliably 'after' in the\n\t# child object. Use these where possible to make things more consistent.\n\t# Attempt to capture any starting prompt (when starting) with this regexp.\n\tshutit.expect_prompts['base_prompt'] = '\\r\\n.*[@#$] '\n\t# END Standard expects\n\n\tif shutit.build['delivery'] in ('bash','ssh'):\n\t\tif shutit.target['docker_image'] != '':\n\t\t\tprint('delivery method specified (' + shutit.build['delivery'] + ') and image_tag argument make no sense')\n\t\t\thandle_exit(exit_code=1)\n\tif shutit.target['docker_image'] == '':\n\t\tshutit.target['docker_image'] = shutit.build['base_image']\n\t# END tidy configs up\n\n\t# BEGIN warnings\n\t# FAILS begins\n\t# rm is incompatible with repository actions\n\tif shutit.target['rm'] and (shutit.repository['tag'] or shutit.repository['push'] or shutit.repository['save'] or shutit.repository['export']):\n\t\tprint(\"Can't have [target]/rm and [repository]/(push/save/export) set to true\")\n\t\thandle_exit(exit_code=1)\n\tif shutit.target['hostname'] != '' and shutit.build['net'] != '' and shutit.build['net'] != 'bridge':\n\t\tprint('\\n\\ntarget/hostname or build/net configs must be blank\\n\\n')\n\t\thandle_exit(exit_code=1)\n\t# FAILS ends", "title": "" }, { "docid": "842e44eebfa47ec517bb41e9a5f9a7c0", "score": "0.48260784", "text": "def create_c_dict(self):\n c_dict = super().create_c_dict()\n\n et_upper = self.app_name.upper()\n\n # get TCStat data dir/template to read\n c_dict['TC_STAT_INPUT_DIR'] = (\n self.config.getdir('EXTRACT_TILES_TC_STAT_INPUT_DIR', '')\n )\n\n c_dict['TC_STAT_INPUT_TEMPLATE'] = (\n self.config.getraw('filename_templates',\n 'EXTRACT_TILES_TC_STAT_INPUT_TEMPLATE',\n '')\n )\n if not c_dict['TC_STAT_INPUT_TEMPLATE']:\n self.log_error('Must set EXTRACT_TILES_TC_STAT_INPUT_TEMPLATE '\n 'to run ExtractTiles wrapper')\n\n # get gridded input/output directory/template to read\n for data_type in ['FCST', 'OBS']:\n # get [FCST/OBS]_INPUT_DIR\n c_dict[f'{data_type}_INPUT_DIR'] = (\n self.config.getdir(f'{data_type}_EXTRACT_TILES_INPUT_DIR', '')\n )\n if not c_dict[f'{data_type}_INPUT_DIR']:\n self.log_error(f'Must set {data_type}_EXTRACT_TILES_INPUT_DIR to '\n 'run ExtractTiles wrapper')\n\n # get [FCST/OBS]_[INPUT/OUTPUT]_TEMPLATE\n for put in ['INPUT', 'OUTPUT']:\n local_name = f'{data_type}_{put}_TEMPLATE'\n config_name = f'{data_type}_{et_upper}_{put}_TEMPLATE'\n c_dict[local_name] = (\n self.config.getraw('filename_templates',\n config_name)\n )\n if not c_dict[local_name]:\n self.log_error(f\"{config_name} must be set.\")\n\n c_dict['OUTPUT_DIR'] = (\n self.config.getdir('EXTRACT_TILES_OUTPUT_DIR', '')\n )\n if not c_dict['OUTPUT_DIR']:\n self.log_error('Must set EXTRACT_TILES_OUTPUT_DIR to run '\n 'ExtractTiles wrapper')\n\n c_dict['NLAT'] = self.config.getstr('config', 'EXTRACT_TILES_NLAT')\n c_dict['NLON'] = self.config.getstr('config', 'EXTRACT_TILES_NLON')\n c_dict['DLAT'] = self.config.getstr('config', 'EXTRACT_TILES_DLAT')\n c_dict['DLON'] = self.config.getstr('config', 'EXTRACT_TILES_DLON')\n c_dict['LAT_ADJ'] = self.config.getfloat('config',\n 'EXTRACT_TILES_LAT_ADJ')\n c_dict['LON_ADJ'] = self.config.getfloat('config',\n 'EXTRACT_TILES_LON_ADJ')\n\n return c_dict", "title": "" }, { "docid": "67efe9f194b0b81c776b11ac18a3efab", "score": "0.4816568", "text": "def setup_mtree(nw_src,nw_mcast_dst,inport,controller):\n if nw_mcast_dst == mcast_ip_addr1:\n mtree1_switches = []\n primary_tree = []\n if len(controller.mcast_groups.keys()) == 2:\n mtree1_switches = [10,11,13,12]\n primary_tree = [(13,12),(12,11),(12,10)]\n else:\n mtree1_switches = [7,6,5,4]\n primary_tree = [(7,6),(6,4),(6,5)]\n \n controller.primary_trees[nw_mcast_dst] = primary_tree\n return setup_mtree1_flow_tables(nw_src, nw_mcast_dst, inport,mtree1_switches,controller)\n elif nw_mcast_dst == mcast_ip_addr2:\n mtree2_switches = []\n primary_tree = []\n if len(controller.mcast_groups.keys()) == 2:\n mtree2_switches = [10,14,15]\n primary_tree = [(15,14),(15,10)]\n \n controller.primary_trees[nw_mcast_dst] = primary_tree #TODO REFACTOR !!!!!!!!!!!!!!!!!!!\n return setup_mtree2_flow_tables(nw_src, nw_mcast_dst, inport,mtree2_switches,controller)", "title": "" }, { "docid": "faff659c7dcd6cde063d49fb242f7860", "score": "0.48164737", "text": "def config(self,conf):\n for key in conf:\n if key in ('startx','starty','start_direction'):\n setattr(__class__,key,conf[key])\n if key in ('puzzle','uid','title','type','theme'):\n setattr(self,key,conf[key])", "title": "" }, { "docid": "502f6c9d32c0fe8c03f8e71df796ee15", "score": "0.48163405", "text": "def read(prog):\n try:\n with open(str(prog.config), \"r\") as file:\n raw = file.read().splitlines()\n except FileNotFoundError as ex:\n prog.log.error(\"config file '{}' not found\".format(ex.filename))\n return Prog.RetVal.exit_failure\n except OSError as ex:\n prog.log.error(\n \"config file '{}': {}\".format(ex.filename, ex.strerror.lower()))\n return Prog.RetVal.exit_failure\n\n prog.log.info1(\"+++ reading config file '{}'\".format(prog.config))\n\n line_pos = 0\n state = Prog.ConfigState()\n\n active_section = None\n target = None\n\n default_tlsa_list = []\n default_api = None\n\n log_level = None\n\n for l in raw:\n line_pos += 1\n state.line(line_pos)\n\n # matches section: \"[DOMAIN]\"\n match = re.match(r'\\s*\\[\\s*(?P<section>((\\w[a-zA-Z0-9-]*\\w|\\w+)\\.)+\\w+)\\s*\\](\\s*|\\s+#.*)$', l)\n if match:\n active_section = match.group('section').lower()\n prog.log.info3(\" + line {}: section: {}\".format(\n line_pos, active_section))\n for t in prog.target_list:\n if t.matches_domain(active_section):\n target = t\n break\n else:\n prog.target_list += [ Prog.Target(active_section) ]\n target = prog.target_list[-1]\n if default_api:\n target.api = default_api.copy()\n # NOT 'target.api = default_api'. If we do that, then in\n # the following line when we change the api object's\n # domain attribute, it will change the domain in _all_ of\n # the targets. We need to create a NEW object that is\n # (mostly) the same as the default_api object, and we do\n # this with the 'copy' method.\n target.api.set_domain(active_section)\n for tlsa in default_tlsa_list:\n tlsa.domain = active_section\n target.add_tlsa(tlsa)\n continue\n\n # matches parameter: \"param = input\"\n match = re.match(\n r'\\s*(?P<param>\\w+)\\s*=\\s*(?P<input>[^#]*)(\\s*|\\s#.*)$', l)\n if match:\n param = match.group('param')\n try:\n inputs = shlex.split(match.group('input'))\n except ValueError:\n if len(l) > 23:\n state.add_error(prog,\n \"unrecognized command: '{}...'\".format(l[:20]))\n else:\n state.add_error(prog,\n \"unrecognized command: '{}'\".format(l))\n continue\n\n if param == \"tlsa\":\n prog.log.info3(\" + line {}: parameter: {}, inputs: {}\".format(\n line_pos, param, inputs))\n if len(inputs) == 0:\n state.add_error(prog, \"no tlsa data given\")\n elif len(inputs) == 1:\n state.add_error(prog, \"tlsa record given insufficient data\")\n elif len(inputs) > 4:\n state.add_error(prog, \"tlsa record given superfluous data: '{}'\".format(' '.join(inputs[4:])))\n else:\n tlsa = get_tlsa_param(prog, inputs, active_section, state)\n if tlsa:\n if active_section:\n target.add_tlsa(tlsa)\n else:\n default_tlsa_list += [ tlsa ]\n\n elif param == \"api\":\n prog.log.info3(\n \" + line {}: parameter: {}, inputs: ({})...\".format(\n line_pos, param, len(inputs) ))\n if len(inputs) == 0:\n state.add_error(prog, \"api command given no input\")\n continue\n\n if inputs[0] in prog.apis:\n apimod = import_module('alnitak.api.' + inputs[0])\n api = apimod.get_api(prog, active_section,\n inputs[1:], state)\n if api:\n if active_section:\n target.api = api\n else:\n default_api = api\n else:\n state.add_error(prog,\n \"unrecognized api scheme: '{}'\".format(inputs[0]))\n\n elif param == \"dane_directory\":\n prog.log.info3(\" + line {}: parameter: {}, inputs: {}\".format(\n line_pos, param, inputs))\n if len(inputs) == 0:\n state.add_error(\n prog, \"dane_directory command given no input\")\n elif len(inputs) > 1:\n state.add_error(prog, \"dane_directory command given superfluous input: '{}'\".format(' '.join(inputs[1:])))\n else:\n prog.set_dane_directory(inputs[0])\n\n elif param == \"letsencrypt_directory\":\n prog.log.info3(\" + line {}: parameter: {}, inputs: {}\".format(\n line_pos, param, inputs))\n if len(inputs) == 0:\n state.add_error(\n prog, \"letsencrypt_directory command given no input\")\n elif len(inputs) > 1:\n state.add_error(prog, \"letsencrypt_directory command given superfluous input: '{}'\".format(' '.join(inputs[1:])))\n else:\n prog.set_letsencrypt_directory(inputs[0])\n\n elif param == \"log_level\":\n prog.log.info3(\" + line {}: parameter: {}, inputs: {}\".format(\n line_pos, param, inputs))\n if len(inputs) == 0:\n state.add_error(\n prog, \"log_level command given no input\")\n elif len(inputs) > 1:\n state.add_error(prog, \"log_level command given superfluous input: '{}'\".format(' '.join(inputs[1:])))\n else:\n if inputs[0] not in [ 'no', 'normal', 'verbose', 'debug' ]:\n state.add_error(prog, \"\")\n continue\n\n log_level = inputs[0]\n\n elif param == \"ttl\":\n prog.log.info3(\" + line {}: parameter: {}, inputs: {}\".format(\n line_pos, param, inputs))\n if len(inputs) == 0:\n state.add_error(\n prog, \"ttl command given no input\")\n elif len(inputs) > 1:\n state.add_error(prog, \"ttl command given superfluous input: '{}'\".format(' '.join(inputs[1:])))\n else:\n try:\n import alnitak.parser\n # python 3.4: 'from alnitak import parser' will\n # cause an error because of circular imports.\n # Importing like this will work\n ttl_value = alnitak.parser.ttl_check(prog, 0, 'config', inputs[0])\n except Except.Error1013:\n state.add_error(prog, \"ttl value '{}' not an integer\".format(inputs[0]))\n continue\n except Except.Error1100 as ex:\n state.add_error(prog, \"ttl value '{}' exceeds maximum value of '{}'\".format(inputs[0], ex.max))\n continue\n except Except.Error1101 as ex:\n state.add_error(prog, \"ttl value '{}' less than minimum value of '{}'\".format(inputs[0], ex.min))\n continue\n\n prog.set_ttl(ttl_value)\n\n\n else:\n state.add_error(prog,\n \"unrecognized parameter '{}'\".format(param))\n\n continue\n\n # matches empty line\n match = re.match(r'^\\s*(#.*)?$', l)\n if match:\n continue\n\n if len(l) > 23:\n state.add_error(prog,\n \"unrecognized command: '{}...'\".format(l[:20]))\n else:\n state.add_error(prog, \"unrecognized command: '{}'\".format(l))\n\n\n state.lineno = None\n for t in prog.target_list:\n if not t.tlsa:\n state.add_error(\n prog, \"target '{}' has no tlsa record\".format(t.domain))\n if not t.api:\n state.add_error(\n prog, \"target '{}' has no api scheme\".format(t.domain))\n\n if state.errors:\n return Prog.RetVal.config_failure\n\n prog.log.info3(\"+++ targets...\")\n if prog.target_list:\n for t in prog.target_list:\n prog.log.info3(str(t))\n else:\n prog.log.info3(\" + no targets found\")\n prog.log.error(\"config file: no targets given\")\n return Prog.RetVal.config_failure\n\n # set the delayed log level. We don't do this straight away or else we\n # will have mixed logging for this function itself.\n if log_level:\n prog.set_log_level(log_level)\n\n return Prog.RetVal.ok", "title": "" }, { "docid": "6137123851bb0ced08b10e8149dff4f7", "score": "0.4812785", "text": "def test_config_load_multiple_configs(rule_manager, tmp_path, capsys):\n\n config1 = {}\n config1['rules'] = []\n config1['rules'].append({\n 'name': 'rulepack1.printFilename'\n })\n config1['rules'].append({\n 'name': 'rulepack1.findSingleLineCommentsWith',\n 'settings': {'with_string' : 'the'}\n })\n\n config1_file = tmp_path / \"config1.json\"\n with open(config1_file, 'w') as outfile:\n json.dump(config1, outfile)\n\n\n config2 = {}\n config2['rules'] = []\n config2['rules'].append({\n 'name': 'rulepack1.printFilename'\n })\n config2['rules'].append({\n 'name': 'rulepack1.findSingleLineCommentsWith',\n 'settings': {'with_string' : 'the'}\n })\n config2['rules'].append({\n 'name': 'rulepack1.findSingleLineCommentsWith',\n 'settings': {'with_string' : 'different'}\n })\n config2['rules'].append({\n 'name': 'rulepack1.printLanguage'\n })\n\n config2_file = tmp_path / \"config2.json\"\n with open(config2_file, 'w') as outfile:\n json.dump(config2, outfile)\n\n rule_manager.load_rules([str(config1_file), str(config2_file)], ['./tests'])\n\n captured = capsys.readouterr()\n assert \"Could not load rule\" not in captured.out\n assert 'rulepack1.findSingleLineCommentsWith' in rule_manager._rules_dict\n assert len(rule_manager._rules_dict['rulepack1.findSingleLineCommentsWith']) == 2\n assert 'rulepack1.printFilename' in rule_manager._rules_dict\n assert len(rule_manager._rules_dict['rulepack1.printFilename']) == 1\n assert 'rulepack1.printLanguage' in rule_manager._rules_dict", "title": "" }, { "docid": "533d6192d1391336843ee09221a32ee2", "score": "0.48095852", "text": "def build_cfg(noop: bool) -> cconfig.Config:\n return cconfig.Config(\n basedir='/base',\n subdir='sub',\n baseurl='/repo',\n branches_file='/conf/test-branches.yaml',\n noop=noop,\n series='weird',\n space='outer',\n repo_auth='jrl:secret',\n )", "title": "" }, { "docid": "49dde00206678594c48413f52c51d290", "score": "0.48072264", "text": "def create_target_config():\n print (\"Creating target config dir\")\n input_data = target_host1\n app_data = {'mode': 'target'}\n output_data = {'file': 'events.log'}\n create_json_file(\"./target\", input_data, \"inputs.json\")\n create_json_file(\"./target\", app_data, \"app.json\")\n create_json_file(\"./target\", output_data, \"outputs.json\")", "title": "" }, { "docid": "70d4ddfc1a1e777728ce5588568424b4", "score": "0.4802051", "text": "def va_config_routes(self):\r\n logger.debug(\"Start to config route\")\r\n route_info = self.route_info\r\n\r\n for key in route_info.keys() :\r\n\r\n devobj = self.testdata.va_get_by_uniq_id(key,False)\r\n routes = route_info[key]\r\n node_type = devobj.get_nodetype()\r\n if (node_type == 'linux' or node_type == 'dir') :\r\n #only support linux pc and director now.\r\n devobj = self.testdata.va_get_by_uniq_id(key)\r\n for route in routes :\r\n if (not 'auto_set' in route) or (route['auto_set'] != 1):\r\n continue\r\n else:\r\n # if auto_set =1 , it means need to config route for defined pc\r\n if node_type == 'linux':\r\n result = devobj.config_route(route['dst'],route['netmask'],route['gateway'])\r\n\r\n if node_type == 'dir':\r\n #address netmask format.\r\n if isinstance(route['netmask'],int) :\r\n address = route['dst']+ '/' +route['netmask']\r\n else :\r\n address = route['dst'] + '/' + route['netmask']\r\n result = devobj.va_set_route(ipaddress.IPv4Network(address), route['gateway'],True)\r\n\r\n if not result :\r\n logger.error('Failed to config route')\r\n logger.debug(devobj.show_route())\r\n logger.debug(devobj.show_interface())\r\n return False\r\n\r\n logger.info(\"Completed to config route\")\r\n return True", "title": "" }, { "docid": "8c09b8d7e0e585250c8821646300f60a", "score": "0.48007068", "text": "def getCommonConf(opts, config):\n\n if not opts:\n raise ProgrammingError(\"opts is None\")\n if not config:\n raise ProgrammingError(\"config is None\")\n \n polltime = POLL_DELAY_SECONDS_DEFAULT\n if opts.polltime:\n polltime = opts.polltime\n \n try:\n sshdkeypath = config.get(\"sshd\", \"generatedkey\")\n hostbasedconfig = config.get(\"sshd\", \"hostbasedconfig\")\n knownhostsconfig = config.get(\"sshd\", \"knownhostsconfig\")\n scratchdir = config.get(\"ctxservice\", \"scratchspacedir\")\n retr_template = config.get(\"ctxservice\", \"retr_template\")\n retr_template2 = config.get(\"ctxservice\", \"retr_template2\")\n err_template = config.get(\"ctxservice\", \"err_template\")\n err_template2 = config.get(\"ctxservice\", \"err_template2\")\n ok_template = config.get(\"ctxservice\", \"ok_template\")\n ok_template2 = config.get(\"ctxservice\", \"ok_template2\")\n ipandhostdir = config.get(\"taskpaths\", \"ipandhostdir\")\n restartdir = config.get(\"taskpaths\", \"restartdir\")\n thishostdir = config.get(\"taskpaths\", \"thishostdir\")\n thishostfinalizedir = config.get(\"taskpaths\", \"thishostfinalizedir\")\n logfilepath = config.get(\"ctxservice\", \"logfilepath\")\n curl = config.get(\"systempaths\", \"curl\")\n hostname = config.get(\"systempaths\", \"hostname\")\n datadir = config.get(\"taskpaths\", \"datadir\")\n etchosts_exe = config.get(\"taskpaths\", \"etchosts\")\n except:\n exception_type = sys.exc_type\n try:\n exceptname = exception_type.__name__ \n except AttributeError:\n exceptname = exception_type\n msg = \"%s: %s\" % (str(exceptname), str(sys.exc_value))\n raise InvalidConfig(msg)\n \n # no evaluate yet, pass False for now\n return CommonConf(opts.trace, False, ipandhostdir, restartdir, polltime, sshdkeypath, scratchdir, retr_template, retr_template2, err_template, err_template2, ok_template, ok_template2, hostbasedconfig, knownhostsconfig, thishostdir, thishostfinalizedir, logfilepath, curl, hostname, datadir, etchosts_exe)", "title": "" }, { "docid": "a10b0ad170662478202a55e64ea83006", "score": "0.47946462", "text": "def _configure(self):\n # reset group_inputs back to what it was just after self.setup() in case _configure\n # is called multiple times.\n self._group_inputs = self._pre_config_group_inputs.copy()\n for n, lst in self._group_inputs.items():\n self._group_inputs[n] = lst.copy()\n\n for subsys in self._subsystems_myproc:\n subsys._configure()\n subsys._setup_var_data()\n\n self._has_guess |= subsys._has_guess\n self._has_bounds |= subsys._has_bounds\n self.matrix_free |= subsys.matrix_free\n\n conf_info = self._problem_meta['config_info']\n conf_info._reset()\n\n self._problem_meta['setup_status'] = _SetupStatus.POST_CONFIGURE\n self.configure()\n\n # if our configure() has added or promoted any variables, we have to call\n # _setup_var_data again on any modified systems and their ancestors (only those that\n # are our descendents).\n for s in conf_info._modified_system_iter(self):\n s._setup_var_data()", "title": "" }, { "docid": "d6c63e36ea4219369653d5a307e99f78", "score": "0.47769693", "text": "def read_config(config_data):\n out_lines = []\n df = io.StringIO(config_data.decode('utf-8'))\n odf = io.StringIO()\n in_comment = False\n for line in df.readlines():\n # ack, multiline comments in /objects/generic/statuspod/statuspod.object\n # also in /objects/ancient/hologramgalaxy/hologramgalaxy.object, and\n # unfortunately that one necessitates some stripping (though stripping\n # is no more CPU-intensive than hardcoding the few instances)\n if line.lstrip()[:2] == '/*':\n if line.rstrip()[-2:] != '*/':\n in_comment = True\n else:\n if in_comment:\n if line.lstrip()[:2] == '*/':\n in_comment = False\n else:\n idx = line.find('//')\n if idx == -1:\n print(line, file=odf)\n else:\n print(line[0:idx], file=odf)\n\n # This list of patterns allows us to load all the data we care about\n # (that I'm aware of anyway) but I've moved to just stripping out\n # anything after // automatically. That shaves about a second off of\n # our startup time. Doubtless there are image processing speedups\n # which would probably account for the majority of the loadtime)\n #elif line[:3] != '// ':\n # found_pattern = False\n # for pattern in [\n # ' // ',\n # # special case for /objects/biome/foundry/lavatanklarge/lavatanklarge.object\n # '//FIRE',\n # # special cases for /objects/biome/tentacle/tentaclespawner1/tentaclespawner1.object\n # '//type',\n # '//additional',\n # '//relative',\n # '//[x,y] size',\n # '//total',\n # # special case for /objects/avian/sawblade/sawblade.object\n # '//mollys',\n # # special case for /objects/avian/birdgroundlantern/birdgroundlantern.object\n # '//\"interactive\"',\n # # special cases for /objects/outpost/signstore/signdispenser.object\n # '//\"openSounds\"',\n # '//\"closeSounds\"',\n # # special case for /objects/glitch/medievalspikes/medievalspikes.object\n # '//TODO',\n # # special case for /objects/themed/island/islandhammock/islandhammock.object\n # '//\"sitCoverImage\"',\n # # special case for /objects/protectorate/objects/protectoratewindbanner3/protectoratewindbanner3.object\n # '//\"soundEffect\"',\n # # special cases for /objects/protectorate/objects/protectoratelobbyvending/protectoratelobbyvending.object\n # '//\"onSound\"',\n # '//\"offSound\"',\n # # special case for /objects/spawner/spawners/spawner_human.object\n # '//6000,',\n # # special cases for /objects/spawner/colonydeed/colonydeed.object\n # '//whether',\n # '//delay',\n # '//cooldown',\n # '//scan',\n # '//length',\n # '//seconds',\n # # special cases for /objects/spawner/invisiblemonsterspawner.object\n # '//level',\n # '//options',\n # '//only',\n # # special case for /objects/crafting/upgradeablecraftingobjects/craftingwheel/craftingwheel.object\n # '//this',\n # ]:\n # idx = line.find(pattern)\n # if idx != -1:\n # found_pattern = True\n # break\n # if found_pattern:\n # print(line[0:idx], file=odf)\n # else:\n # print(line, file=odf)\n odf.seek(0)\n return json.load(odf)", "title": "" }, { "docid": "25b08aa81b669ada92f82d37aa4524fc", "score": "0.47752577", "text": "def create_expected_configs(self, user):", "title": "" }, { "docid": "2f8bee1aa4b6247584816b0ef1708a09", "score": "0.47747043", "text": "def _buildxml(self, namesonly=False):\n xpath = self._data_dict[self._type]\n self._get_xpath = \"//configuration/\" + xpath\n top = E(\"configuration\")\n dot = top\n for name in xpath.split(\"/\"):\n dot.append(E(name))\n dot = dot[0]\n\n if namesonly is True:\n dot.attrib[\"recurse\"] = \"false\"\n return top", "title": "" }, { "docid": "b667672d18243e5b230c3994d9067eb8", "score": "0.47745883", "text": "def _prepare_from_cfg(self, model, config: Dict):\n assert isinstance(self.channel_unit_cfg, dict)\n assert 'units' in self.channel_unit_cfg\n config = self.channel_unit_cfg['units']\n if isinstance(config, str):\n config = fileio.load(config)\n assert isinstance(config, dict)\n\n if 'Analyzer' in self.parse_cfg['type']:\n self.parse_cfg.pop('from_cfg')\n tracer = TASK_UTILS.build(self.parse_cfg)\n unit_configs = tracer.analyze(model)\n\n units = []\n for unit_key in config:\n init_args = copy.deepcopy(self.unit_default_args)\n if 'init_args' in config[unit_key]:\n init_args.update(config[unit_key]['init_args'])\n config[unit_key]['init_args'] = init_args\n if 'channels' in config[unit_key]:\n unit = self.unit_class.init_from_cfg(model, config[unit_key])\n unit.name = unit_key\n else:\n try:\n unit = self._prepare_unit_from_init_cfg(\n model, config[unit_key], unit_configs[unit_key])\n except ValueError:\n raise ValueError(\n 'Initializing channel_mutator from the config needs'\n 'to include `channels` or `Analyzer` in the config.')\n units.append(unit)\n return units", "title": "" }, { "docid": "39575d462080125595dc243c5961e201", "score": "0.4771972", "text": "def convert_area(config: dict[str, Any]) -> dict[str, Any]:\n my_map = {\n CONF_NAME: dyn_const.CONF_NAME,\n CONF_FADE: dyn_const.CONF_FADE,\n CONF_NO_DEFAULT: dyn_const.CONF_NO_DEFAULT,\n CONF_ROOM_ON: dyn_const.CONF_ROOM_ON,\n CONF_ROOM_OFF: dyn_const.CONF_ROOM_OFF,\n CONF_CHANNEL_COVER: dyn_const.CONF_CHANNEL_COVER,\n CONF_DEVICE_CLASS: dyn_const.CONF_DEVICE_CLASS,\n CONF_OPEN_PRESET: dyn_const.CONF_OPEN_PRESET,\n CONF_CLOSE_PRESET: dyn_const.CONF_CLOSE_PRESET,\n CONF_STOP_PRESET: dyn_const.CONF_STOP_PRESET,\n CONF_DURATION: dyn_const.CONF_DURATION,\n CONF_TILT_TIME: dyn_const.CONF_TILT_TIME,\n }\n result = convert_with_map(config, my_map)\n if CONF_CHANNEL in config:\n result[dyn_const.CONF_CHANNEL] = {\n channel: convert_channel(channel_conf)\n for (channel, channel_conf) in config[CONF_CHANNEL].items()\n }\n if CONF_PRESET in config:\n result[dyn_const.CONF_PRESET] = {\n preset: convert_preset(preset_conf)\n for (preset, preset_conf) in config[CONF_PRESET].items()\n }\n if CONF_TEMPLATE in config:\n result[dyn_const.CONF_TEMPLATE] = TEMPLATE_MAP[config[CONF_TEMPLATE]]\n return result", "title": "" }, { "docid": "8820ca0f214d75bd13acc80a805e32ba", "score": "0.4770118", "text": "def setup_directories(self, config):\n # self.input_dir = os.path.join(config['run_directory'], 'HYDRO_IN')\n # self.output_dir = os.path.join(config['run_directory'], 'HYDRO_OUTPUT')\n self.input_dir = os.path.join(\"..\", \"HYDRO_IN\")\n self.output_dir = os.path.join(\"..\", \"HYDRO_OUTPUT\")\n if os.path.exists(self.input_dir) is False:\n os.mkdir(self.input_dir, 0o755)\n if os.path.exists(self.output_dir) is False:\n os.mkdir(self.output_dir, 0o755)", "title": "" }, { "docid": "583e222f8bd286553c858d1efc3be5bd", "score": "0.4767908", "text": "def convert_config(\n config: dict[str, Any] | MappingProxyType[str, Any]\n) -> dict[str, Any]:\n my_map = {\n CONF_NAME: dyn_const.CONF_NAME,\n CONF_HOST: dyn_const.CONF_HOST,\n CONF_PORT: dyn_const.CONF_PORT,\n CONF_AUTO_DISCOVER: dyn_const.CONF_AUTO_DISCOVER,\n CONF_POLL_TIMER: dyn_const.CONF_POLL_TIMER,\n }\n result = convert_with_map(config, my_map)\n if CONF_AREA in config:\n result[dyn_const.CONF_AREA] = {\n area: convert_area(area_conf)\n for (area, area_conf) in config[CONF_AREA].items()\n }\n if CONF_DEFAULT in config:\n result[dyn_const.CONF_DEFAULT] = convert_default(config[CONF_DEFAULT])\n if CONF_ACTIVE in config:\n result[dyn_const.CONF_ACTIVE] = ACTIVE_MAP[config[CONF_ACTIVE]]\n if CONF_PRESET in config:\n result[dyn_const.CONF_PRESET] = {\n preset: convert_preset(preset_conf)\n for (preset, preset_conf) in config[CONF_PRESET].items()\n }\n if CONF_TEMPLATE in config:\n result[dyn_const.CONF_TEMPLATE] = {\n TEMPLATE_MAP[template]: convert_template(template_conf)\n for (template, template_conf) in config[CONF_TEMPLATE].items()\n }\n return result", "title": "" }, { "docid": "7d61a5423a7361acd85da28d777ddbf9", "score": "0.47649974", "text": "def __configure(self):\n\n # load the core configuration\n self.context = zope.configuration.xmlconfig.file(self.confFile,\n execute=False)\n\n # load extensions and plugins\n for epath in p6.app.extension.extPaths():\n for extconf in p6.app.extension.extConfs(epath):\n print extconf\n p6.app.extension.loadExtension(extconf, self.context)\n\n # perform the actions specified by the configuration files\n self.context.execute_actions()\n\n # expand any metadata page groups\n newpages = []\n for page in self.pages:\n if getattr(page, 'expand', False):\n newpages = newpages + [n for n in page(None)]\n else:\n newpages.append(page)\n\n self.pages = newpages\n del newpages", "title": "" }, { "docid": "209e4332bddf24f6df92200144c582c9", "score": "0.47578472", "text": "def convert_preset(config: dict[str, Any]) -> dict[str, Any]:\n my_map = {\n CONF_NAME: dyn_const.CONF_NAME,\n CONF_FADE: dyn_const.CONF_FADE,\n CONF_LEVEL: dyn_const.CONF_LEVEL,\n }\n return convert_with_map(config, my_map)", "title": "" }, { "docid": "b9380a7021e055edbfc46110674f09a0", "score": "0.47564927", "text": "def customize_process(group_tree, groupvars, group_hostnames, hostvars, specific_vars):", "title": "" }, { "docid": "fceee2e3bd77da8acbbc1503cdb5530e", "score": "0.47562543", "text": "def _propagate_to_ctg(meta, new_cycle_acls):\n print \"Propagating roles to cycle task groups\"\n new_ctg_acls = _propagate_to_children(\n meta,\n new_cycle_acls,\n \"CycleTaskGroup\",\n \"cycle_id\",\n \"Cycle\",\n )\n\n _propagate_to_cycle_tasks(meta, new_ctg_acls)", "title": "" }, { "docid": "94e539f5279a0065475a320f1cbdcb7a", "score": "0.47546867", "text": "def from_yaml_tree_transform(self, node, tag, ctx):", "title": "" }, { "docid": "1551c75fe2645763cd4a9141653b99cc", "score": "0.47540516", "text": "def run_rendering(self):\n \n if not isinstance(self.config, list):\n # If the config was not a list, just convert this one element into a list\n self.config = [self.config]\n\n for i, c in enumerate(self.config):\n # For each conversion\n if not 'data' in c:\n # Check that the yaml resume file is specified\n error(\"Configuration file has not defined 'data' with resume yaml file\")\n else:\n with open(c['data']) as resume_file:\n self.resume = yaml.load(resume_file)\n\n for output in c['outputs']:\n fmt = output['format']\n # Check that we have a plugin whose classname starts with this format\n assert any([x.startswith(fmt) for x in Plugin.registered])\n template_file = output['template']\n filebasename,filetype = os.path.splitext(template_file)\n if filetype[1:] not in self.allowed_filetypes:\n error(\"File type/extension %s is not one of following: %s\" % (filetype,' '.join(self.allowed_filetypes)))\n output_filename = output['output']\n # Instantiate the required conversion plugin\n print (\"Creating %s ...\" % output_filename, end='')\n text = Plugin.registered['%sResume' % fmt](template_file, self.resume, self.skip)\n text.render(output_filename)\n print (\" done\")", "title": "" }, { "docid": "0e423b542d0f9c5ad93a7d5aa81f56bc", "score": "0.47520763", "text": "def load_config():\n config = Config(\"config.yaml\")\n # print(config)\n class_setting = dict()\n for item in config[\"dynamic\"][\"inherit\"].values():\n cls_path = item[\"path\"]\n del item[\"path\"]\n class_setting[cls_path] = dict(item)\n\n return class_setting", "title": "" }, { "docid": "b1afebd696a1ae7c310b440158b0c15a", "score": "0.4749547", "text": "def handle_config(cls, config, base_config):\n if 'include' not in config:\n config['include'] = []\n elif isinstance(config['include'], str):\n config['include'] = [config['include']]\n for p, path in enumerate(config['include']):\n if not os.path.isabs(path):\n config['include'][p] = os.path.abspath(os.path.join(base_config['base_dir'], path))\n load_includes(config['include'])\n\n defaults = {'runner': 'local', 'interface': 'memmap', 'custom': False}\n for key, default in defaults.items():\n if key not in config:\n config[key] = default\n\n if not isinstance(config['runner'], MutableMapping):\n config['runner'] = {'class': config['runner']}\n Runner[config['runner']['class']].handle_subconfig(config['runner'], base_config)\n if not isinstance(config['interface'], MutableMapping):\n config['interface'] = {'class': config['interface']}\n RunnerInterface[config['interface']['class']].handle_config(config['interface'], base_config)\n Worker.handle_config(config, base_config)", "title": "" }, { "docid": "949dc7b05142d1855fc7580314eb17c1", "score": "0.47479874", "text": "def normalize(conf):\n\n def expand(section_name):\n section = conf.get(section_name)\n if isinstance(section, string_types):\n conf[section_name] = {\n 'search': [section],\n 'register': section,\n 'publish': section\n }\n elif section:\n search = section.get('search')\n if search and not isinstance(search, list):\n section['search'] = [search]\n\n expand('registry')\n expand('ca')\n\n registry = conf.get('registry')\n registry['search'] = map(lambda item: item.rstrip('/'), registry.get('search', []))\n registry['register'] = registry.get('register', '').rstrip('/')\n registry['publish'] = registry.get('publish', '').rstrip('/')\n\n conf['tmp'] = path.abspath(conf['tmp'])\n return conf", "title": "" }, { "docid": "b8ad7d8cdbba28fac64b55cef09e59d6", "score": "0.47457963", "text": "def post_run_javac(self):\n\n\tpar = {}\n\tfor x in self.inputs:\n\t\tpar[x.parent.id] = x.parent\n\n\tinner = {}\n\tfor k in par.values():\n\t\tpath = k.abspath(self.env)\n\t\tlst = os.listdir(path)\n\n\t\tfor u in lst:\n\t\t\tif u.find('$') >= 0:\n\t\t\t\tinner_class_node = k.find_or_declare(u)\n\t\t\t\tinner[inner_class_node.id] = inner_class_node\n\n\tto_add = set(inner.keys()) - set([x.id for x in self.outputs])\n\tfor x in to_add:\n\t\tself.outputs.append(inner[x])\n\n\treturn Task.Task.post_run(self)", "title": "" }, { "docid": "0b4816f12318d09a78b0efb53530460f", "score": "0.4735967", "text": "def configure_device():\n config = {}\n\n profile = device_config[\"sys\"][\"device\"].get(\"profile\", ATTR_SWITCH)\n\n for cover_id in range(covers):\n topic, payload = get_cover(cover_id, profile)\n config[topic] = payload\n\n for sensor, description in cover_sensors.items():\n topic, payload = get_sensor(\n sensor, description, profile=profile, cover_id=cover_id\n )\n config[topic] = payload\n\n for relay_id in range(relays):\n consumption_types = [\n item.lower()\n for item in device_config[\"sys\"][\"ui_data\"].get(\"consumption_types\", [])\n ]\n relay_type = get_consumption_type(consumption_types, relay_id)\n\n topic, payload = get_switch(relay_id, relay_type, profile)\n config[topic] = payload\n\n topic, payload = get_light(relay_id, relay_type, profile)\n config[topic] = payload\n\n for sensor, description in relay_sensors.items():\n topic, payload = get_sensor(\n sensor, description, profile=profile, relay_id=relay_id\n )\n config[topic] = payload\n\n for binary_sensor, description in relay_binary_sensors.items():\n topic, payload = get_binary_sensor(\n binary_sensor, description, relay_id, profile=profile\n )\n config[topic] = payload\n\n for input_id in range(inputs):\n input_type = device_config[f\"input:{input_id}\"][\"type\"]\n\n for event in input_events:\n topic, payload = get_input(input_id, input_type, event)\n config[topic] = payload\n\n for binary_sensor, description in input_binary_sensors.items():\n topic, payload = get_binary_sensor(\n binary_sensor,\n description,\n input_id,\n is_input=True,\n input_type=input_type,\n )\n config[topic] = payload\n\n for button, descripton in buttons.items():\n topic, payload = get_button(button, descripton)\n config[topic] = payload\n\n for sensor, description in sensors.items():\n topic, payload = get_sensor(sensor, description)\n config[topic] = payload\n\n for binary_sensor, description in binary_sensors.items():\n topic, payload = get_binary_sensor(binary_sensor, description)\n config[topic] = payload\n\n return config", "title": "" }, { "docid": "53d6aaf01ff66b084ff75781667b1f44", "score": "0.4717589", "text": "def __init__(self,parser):\n ## TODO: Initialization without parser (e.g. script example)\n self.parser = parser\n \n ## set some vars\n ################################################################\n ## separators\n self.separator = \"## -----------------------------------------------------------------------------\"\n self.verboseseparator = \"## --Verbose--------------------------------------------------------------------\"\n self.debugSeparator = \"## --Debug----------------------------------------------------------------------\"\n \n ## get options and args\n ################################################################\n (self.options, self.args) = parser.parse_args()\n\n ## Setup Logs\n ################################################################\n self.messages = {}\n \n ## main section inside configuration files\n ################################################################\n self.main_section = self.options.section\n ## TODO: set in cfg-file or options or 'main'\n self.sysclass_section = \"sysclass\"\n\n ## parse config files\n ################################################################\n self.config = self.options.config\n ## find path\n if self.config:\n config_files = []\n ## File/Path destination of the configuration\n if not os.path.exists(self.config):\n msg = str(self.config) + \" is not a file or path in current directory. Trying out script location.\"\n self.add_message(msg, level='WARNING')\n if self.config.startswith('./'):\n self.config = os.path.dirname(os.path.abspath(sys.argv[0])) + '/' + self.config.lstrip('./')\n if not os.path.exists(self.config):\n msg = \"Sorry, no configuration file found! Use --no-config if you want to disable configfile usage\"\n self.add_message(msg, level='ERROR')\n raise Exception(msg)\n\n if os.path.isfile(self.config):\n config_files = [self.config]\n elif os.path.isdir(self.config):\n for f in os.listdir(self.config):\n if f.endswith(\".conf\"):\n config_files.append(self.config.rstrip('/') + '/' + f)\n else:\n msg = \"Sorry, \" + str(self.config) + \" is neither a directory, nor a file!\"\n self.add_message(msg, level='ERROR')\n raise Exception(msg)\n ## List of configuration files to be parsed\n self.config_files = config_files\n\n ## ConfigParser object with parsed configuration\n self.configuration = ConfigParser.ConfigParser()\n self.configuration.read(self.config_files)\n else:\n self.add_message(\"Configfile functionality disabled!\", level='SYSINFO')\n\n \n ## Set debugging and the verbosity\n ################################################################\n ## Get verbosity option\n# if options.verbosity:\n if ((self.options.verbosity) or\n (self.config and\n self.configuration.has_option(self.sysclass_section,'verbose') and\n self.configuration.get(self.sysclass_section,'verbose') == 'True')):\n self.verbose = True\n else:\n self.verbose = False\n \n \n ## Get debugging option\n# if options.debug:\n if ((self.options.debug) or\n (self.config and\n self.configuration.has_option(self.sysclass_section,'debug') and\n self.configuration.get(self.sysclass_section,'debug') == 'True')):\n self.debug = True\n self.verbose = True\n else:\n self.debug = False\n if self.verbose:\n self.BeVerbose(\"Getting Options:\\nVerbose:\\t\" + str(self.verbose) + \"\\nDebug:\\t\" + str(self.debug))", "title": "" }, { "docid": "8a27ef88bf7c868d002a1ecf0b90fd0c", "score": "0.4716479", "text": "def test_configuration(dumbalgo):\n nested_algo = {\"DumbAlgo\": dict(value=6, scoring=5)}\n algo = dumbalgo(8, value=1)\n config = algo.configuration\n assert config == {\n \"dumbalgo\": {\n \"seed\": None,\n \"value\": 1,\n \"scoring\": 0,\n \"judgement\": None,\n \"suspend\": False,\n \"done\": False,\n }\n }", "title": "" }, { "docid": "c5edd682a6eff9d939ae70cedd2f1a73", "score": "0.47078672", "text": "def _propagate_to_cte(meta, new_ct_acls):\n print \"Propagating roles to cycle task entries\"\n return _propagate_to_children(\n meta,\n new_ct_acls,\n \"CycleTaskEntry\",\n \"cycle_task_group_object_task_id\",\n \"CycleTaskGroupObjectTask\",\n )", "title": "" }, { "docid": "e6449199d42453275cb4ca5819151be5", "score": "0.4705307", "text": "def _get_conv_configs(dtype_configs):\n conv_configs = []\n observation_type = ObservationType.OUTPUT_USE_DIFFERENT_OBSERVER_AS_INPUT\n for convs in [_Conv1dMetadata, _Conv2dMetadata, _Conv3dMetadata]:\n\n # (1) Single conv modules/functions\n # -----------------------------------\n # conv module\n conv_configs.append(\n BackendPatternConfig(convs.root).set_observation_type(\n observation_type) # noqa: E131\n .set_dtype_configs(dtype_configs).set_root_module(\n convs.root).set_reference_quantized_module(\n convs.reference).set_qat_module(convs.qat))\n # conv qat module\n conv_configs.append(\n BackendPatternConfig(convs.qat).set_observation_type(\n observation_type) # noqa: E131\n .set_dtype_configs(dtype_configs).set_root_module(\n convs.root).set_reference_quantized_module(convs.reference))\n # functional conv\n conv_configs.append(\n BackendPatternConfig(convs.func).set_observation_type(\n observation_type) # noqa: E131\n .set_dtype_configs(dtype_configs)._set_input_type_to_index({\n 'weight':\n 1,\n 'bias':\n 2\n }))\n\n # (2) Conv + relu\n # -----------------\n # 2.1 conv module + relu fusion configs\n # conv relu fusion, conv module + relu module\n conv_configs.append(\n BackendPatternConfig(\n (torch.nn.ReLU,\n convs.root)).set_dtype_configs(dtype_configs) # noqa: E131\n .set_fuser_method(\n reverse_sequential_wrapper2(\n convs.fused_conv_relu)).set_fused_module(\n convs.fused_conv_relu))\n # conv relu fusion, conv module + functional relu\n conv_configs.append(\n BackendPatternConfig(\n (F.relu,\n convs.root)).set_dtype_configs(dtype_configs) # noqa: E131\n .set_fuser_method(\n reverse_sequential_wrapper2(\n convs.fused_conv_relu)).set_fused_module(\n convs.fused_conv_relu))\n # 2.2 conv module + relu fused module configs\n # conv relu, fused module\n conv_configs.append(\n BackendPatternConfig(convs.fused_conv_relu).set_observation_type(\n observation_type) # noqa: E131\n .set_dtype_configs(dtype_configs).set_root_module(\n convs.root).set_reference_quantized_module(\n convs.reference).set_qat_module(convs.relu_qat))\n # conv relu, qat fused module\n conv_configs.append(\n BackendPatternConfig(convs.relu_qat).set_observation_type(\n observation_type) # noqa: E131\n .set_dtype_configs(dtype_configs).set_root_module(\n convs.root).set_reference_quantized_module(convs.reference))\n # 2.3 functional conv + relu configs\n # conv relu, functional conv + relu module\n conv_configs.append(\n BackendPatternConfig(\n (torch.nn.ReLU, convs.func)).set_observation_type(\n observation_type) # noqa: E131\n .set_dtype_configs(dtype_configs))\n # conv relu, functional conv + functional relu\n conv_configs.append(\n BackendPatternConfig((F.relu, convs.func)).set_observation_type(\n observation_type) # noqa: E131\n .set_dtype_configs(dtype_configs))\n\n # fused conv relu\n conv_configs.append(\n BackendPatternConfig(convs.fused_conv_relu).set_dtype_configs(\n dtype_configs) # noqa: E131\n .set_qat_module(convs.relu_qat))\n\n conv_configs.append(\n BackendPatternConfig(convs.relu_qat).set_dtype_configs(\n dtype_configs) # noqa: E131\n .set_root_module(convs.root).set_reference_quantized_module(\n convs.reference))\n\n # (3) Conv + batchnorm (+ relu)\n # -------------------------------\n # 3.1 conv bn fusion configs\n # conv + bn fusion\n conv_configs.append(\n BackendPatternConfig(\n (convs.bn,\n convs.root)).set_dtype_configs(dtype_configs) # noqa: E131\n .set_fuser_method(reverse2(fuse_conv_bn)).set_fused_module(\n convs.fused_conv_bn))\n # conv + bn + relu module fusion\n conv_configs.append(\n BackendPatternConfig(\n (nn.ReLU,\n (convs.bn,\n convs.root))).set_dtype_configs(dtype_configs) # noqa: E131\n .set_fuser_method(reverse3(fuse_conv_bn_relu)).set_fused_module(\n convs.fused_conv_bn_relu))\n # conv + bn + relu functional fusion\n conv_configs.append(\n BackendPatternConfig(\n (F.relu,\n (convs.bn,\n convs.root))).set_dtype_configs(dtype_configs) # noqa: E131\n .set_root_module(convs.root).set_fuser_method(\n reverse3(fuse_conv_bn_relu)).set_fused_module(\n convs.fused_conv_bn_relu))\n # TODO: we can add fusion for torch.relu as well\n\n # 3.2 conv + bn (+ relu) fused module configs\n # fused conv bn\n conv_configs.append(\n BackendPatternConfig(convs.fused_conv_bn).set_dtype_configs(\n dtype_configs) # noqa: E131\n .set_qat_module(convs.bn_qat))\n\n # fused conv bn relu\n conv_configs.append(\n BackendPatternConfig(convs.fused_conv_bn_relu).set_dtype_configs(\n dtype_configs) # noqa: E131\n .set_qat_module(convs.bn_relu_qat))\n\n # conv bn, qat fused module\n conv_configs.append(\n BackendPatternConfig(convs.bn_qat).set_observation_type(\n observation_type) # noqa: E131\n .set_dtype_configs(dtype_configs).set_root_module(\n convs.root).set_reference_quantized_module(convs.reference))\n # conv bn relu, qat fused module\n conv_configs.append(\n BackendPatternConfig(convs.bn_relu_qat).set_observation_type(\n observation_type) # noqa: E131\n .set_dtype_configs(dtype_configs).set_root_module(\n convs.root).set_reference_quantized_module(convs.reference))\n\n # (4) conv transpose and its fusion\n # 4.1 conv transpose config\n conv_configs.append(\n BackendPatternConfig(convs.transpose).set_dtype_configs(\n dtype_configs) # noqa: E131\n .set_root_module(convs.transpose).set_reference_quantized_module(\n convs.transpose_reference))\n\n # 4.2 conv transpose + bn fusion\n conv_configs.append(\n BackendPatternConfig(\n (convs.bn, convs.transpose)).set_dtype_configs(\n dtype_configs) # noqa: E131\n .set_fuser_method(reverse2(fuse_convtranspose_bn)).set_root_module(\n convs.transpose).set_reference_quantized_module(\n convs.transpose_reference))\n\n return conv_configs", "title": "" }, { "docid": "1caa3b30e9e06bff1c9c8184508bb5ff", "score": "0.47014508", "text": "def _state_replaced(self, want, have):\n\n commands = []\n\n # Drill each iteration of want n have and then based on dest and afi tyoe comparison take config call\n for w in want:\n for addr_want in w.get(\"address_families\"):\n for route_want in addr_want.get(\"routes\"):\n check = False\n for h in have:\n if h.get(\"address_families\"):\n for addr_have in h.get(\"address_families\"):\n for route_have in addr_have.get(\"routes\"):\n if (\n route_want.get(\"dest\")\n == route_have.get(\"dest\")\n and addr_want[\"afi\"]\n == addr_have[\"afi\"]\n ):\n check = True\n have_set = set()\n new_hops = []\n for each in route_want.get(\n \"next_hops\"\n ):\n want_set = set()\n new_dict_to_set(\n each, [], want_set, 0\n )\n new_hops.append(want_set)\n new_dict_to_set(\n addr_have, [], have_set, 0\n )\n # Check if the have dict next_hops value is diff from want dict next_hops\n have_dict = filter_dict_having_none_value(\n route_want.get(\"next_hops\")[0],\n route_have.get(\"next_hops\")[0],\n )\n # update the have_dict with forward_router_address\n have_dict.update(\n {\n \"forward_router_address\": route_have.get(\n \"next_hops\"\n )[\n 0\n ].get(\n \"forward_router_address\"\n )\n }\n )\n # updating the have_dict with next_hops val that's not None\n new_have_dict = {}\n for k, v in have_dict.items():\n if v is not None:\n new_have_dict.update({k: v})\n\n # Set the new config from the user provided want config\n cmd = self._set_config(\n w,\n h,\n addr_want,\n route_want,\n route_have,\n new_hops,\n have_set,\n )\n\n if cmd:\n # since inplace update isn't allowed for static routes, preconfigured\n # static routes needs to be deleted before the new want static routes changes\n # are applied\n clear_route_have = copy.deepcopy(\n route_have\n )\n # inplace update is allowed in case of ipv6 static routes, so not deleting it\n # before applying the want changes\n if \":\" not in route_want.get(\n \"dest\"\n ):\n commands.extend(\n self._clear_config(\n {},\n h,\n {},\n addr_have,\n {},\n clear_route_have,\n )\n )\n commands.extend(cmd)\n if check:\n break\n if check:\n break\n if not check:\n # For configuring any non-existing want config\n new_hops = []\n for each in route_want.get(\"next_hops\"):\n want_set = set()\n new_dict_to_set(each, [], want_set, 0)\n new_hops.append(want_set)\n commands.extend(\n self._set_config(\n w,\n {},\n addr_want,\n route_want,\n {},\n new_hops,\n set(),\n )\n )\n commands = [each for each in commands if \"no\" in each] + [\n each for each in commands if \"no\" not in each\n ]\n\n return commands", "title": "" }, { "docid": "e02a8d145f05b26953933f17729d4b01", "score": "0.46912372", "text": "def start(self):\n\n # tenants is the main structure to be converted into zuul main.yaml\n tenants = {}\n # projects_list is the list of projects used to check for conflicts\n projects_list = {}\n tenant_resources_cache = {}\n\n auth_rules = {}\n\n for tenant_name, tenant_conf in self.main_resources.get(\n \"resources\", {}).get(\"tenants\", {}).items():\n\n # When a tenant_resources already exists\n # It is only set if we loaded tenant resources from\n # a config repo copy (eg. config-check)\n if self.tenant_resources:\n # We only proceed when the args gateway_url\n # match the tenant url\n if tenant_conf['url'] != '%s/manage' % (\n self.gateway_url.rstrip('/')):\n continue\n\n self.log.debug(\n \"--[ Processing %s - %s\" % (tenant_name, tenant_conf))\n\n # First we look for the tenant resources\n if tenant_name != self.default_tenant_name and \\\n tenant_conf[\"url\"] != self.main_resources[\"public-url\"]:\n # check for v2\n tenant_url = tenant_conf['url'].rstrip('/')\n if not tenant_url.endswith('/v2'):\n tenant_url = os.path.join(tenant_url, \"v2\")\n url = os.path.join(tenant_url, 'resources')\n if self.tenant_resources:\n self.log.debug(\"%s: loading resources from workspace\",\n tenant_name)\n tenant_resources = self.tenant_resources\n else:\n self.log.debug(\"%s: loading resources %s\",\n tenant_name, url)\n tenant_resources = get_resources(url)\n else:\n tenant_resources = self.main_resources\n # Fallback to default_tenant_name tenant default connection\n if not tenant_conf.get(\"default-connection\"):\n tenant_conf[\"default-connection\"] = self.main_resources[\n \"resources\"][\"tenants\"][self.default_tenant_name][\n \"default-connection\"]\n\n tenant_resources_cache[tenant_name] = tenant_resources\n\n if not self.utests:\n # Then we pull tenant config repository for legacy zuul\n # flat files\n path = self.fetch_git_repo(\n tenant_name, tenant_resources[\"config-repo\"],\n self.cache_dir)\n tenants_dir = os.path.join(path, 'zuul')\n if not os.path.isdir(tenants_dir):\n continue\n tenants_conf_files = self.discover_yaml_files(tenants_dir)\n # And we load flat files\n self.merge_tenant_from_files(\n tenants, auth_rules, tenants_conf_files,\n tenant_name, projects_list)\n\n # We load project from the resources\n default_conn = tenant_conf[\"default-connection\"]\n self.merge_tenant_from_resources(\n tenants, tenant_resources, tenant_name, projects_list,\n self.main_resources, default_conn, tenant_conf)\n\n for tenant_name, tenant_conf in self.main_resources.get(\n \"resources\", {}).get(\"tenants\", {}).items():\n\n tenant_resources = tenant_resources_cache.get(tenant_name)\n if not tenant_resources:\n continue\n\n default_conn = tenant_conf[\"default-connection\"]\n\n # Finally we add Repos not listed in sr with an include: [] to Zuul\n skip_missing_resources = False\n if tenant_conf[\"url\"] == self.main_resources[\"public-url\"]:\n if tenant_name != self.default_tenant_name:\n # We only add local missing resources to the\n # default_tenant_name tenant\n skip_missing_resources = True\n # Check default_conn is a registered connection\n if default_conn not in self.main_resources[\n 'resources']['connections']:\n # We cannot add repos to Zuul if no valid connection for\n # that tenant\n self.log.debug(\n \"Skip adding missing repos. The tenant has an invalid\"\n \" default connection: %s\" % default_conn)\n continue\n if not skip_missing_resources:\n self.add_missing_repos(\n tenants, tenant_resources, tenant_name, projects_list,\n self.main_resources, default_conn)\n\n self.log.debug(\"]-- Finish processing %s\" % tenant_name)\n\n final_tenant_data = self.final_tenant_merge(tenants)\n auth_rules_data = self.merge_auth_rules(auth_rules)\n final_data = auth_rules_data + final_tenant_data\n return yaml.safe_dump(final_data)", "title": "" }, { "docid": "e24f6f8a3a25a8ed0cab7fc4b0ca09e8", "score": "0.46880388", "text": "def configure_common_directories(cls, tc_config_files):\n if cls.config_directory is None:\n # Get config directory from properties\n config_directory = cls.get_configured_value('TOOLIUM_CONFIG_DIRECTORY',\n tc_config_files.config_directory, 'conf')\n prop_filenames = cls.get_configured_value('TOOLIUM_CONFIG_PROPERTIES_FILENAMES',\n tc_config_files.config_properties_filenames, 'properties.cfg')\n cls.config_directory = cls._find_parent_directory(config_directory, prop_filenames.split(';')[0])\n\n # Get output directory from properties and create it\n cls.output_directory = cls.get_configured_value('TOOLIUM_OUTPUT_DIRECTORY',\n tc_config_files.output_directory, 'output')\n if not os.path.isabs(cls.output_directory):\n # If output directory is relative, we use the same path as config directory\n cls.output_directory = os.path.join(os.path.dirname(cls.config_directory), cls.output_directory)\n makedirs_safe(cls.output_directory)\n\n # Get visual baseline directory from properties\n default_baseline = os.path.join(cls.output_directory, 'visualtests', 'baseline')\n cls.visual_baseline_directory = cls.get_configured_value('TOOLIUM_VISUAL_BASELINE_DIRECTORY',\n tc_config_files.visual_baseline_directory,\n default_baseline)\n if not os.path.isabs(cls.visual_baseline_directory):\n # If baseline directory is relative, we use the same path as config directory\n cls.visual_baseline_directory = os.path.join(os.path.dirname(cls.config_directory),\n cls.visual_baseline_directory)", "title": "" }, { "docid": "4e9d755a2d1fb2e7bdd6e7c0b88c579d", "score": "0.468542", "text": "def translate_cfg(self,cfg_str):\n yml = {\"name\": None,\"parameters\": {}, \"cells\": [] }\n # Regex\n re_params = re.compile('^\\s*\\$param{\\s*\"([a-zA-Z0-9_]+)\"\\s*}\\s*=\\s*([^;]+);?\\s*$')\n re_name = re.compile('^\\s*Name\\s*=\\s([a-zA-Z0-9_]+)\\s*$')\n re_cell = re.compile('^\\s*([a-zA-Z0-9_]+)\\s+([RWrw])\\s+([^;\\s]+)\\s+([^;\\s]+)\\s*$')\n for line in cfg_str.split('\\n'): \n p = re_params.search(line)\n n = re_name.search(line)\n c = re_cell.search(line)\n # Check to see which one matched\n if p is not None:\n rhs = self.sub_perl_params(p.group(2))\n rhs = self.attempt_int_conv(rhs)\n yml['parameters'][p.group(1)] = rhs\n elif n is not None:\n yml['name'] = n.group(1)\n elif c is not None:\n width = self.sub_perl_params(c.group(3))\n width = self.attempt_int_conv(width)\n mult = self.sub_perl_params(c.group(4))\n mult = self.attempt_int_conv(mult)\n yml['cells'].append(self.return_cell(c.group(1),c.group(2),width,mult))\n return yaml.dump(yml)", "title": "" }, { "docid": "d75bf4d4785b4f76cc377da141cea37c", "score": "0.46834138", "text": "def _parse_train_cfg(self):", "title": "" }, { "docid": "1587bdd915b1bef0bf99a5e0ae053992", "score": "0.46775225", "text": "def prepare_config(config: ConfigType, env_metadata: EnvMetaDataType) -> ConfigType:\n config = config_utils.make_config_mutable(config_utils.unset_struct(config))\n key = \"type_to_select\"\n if key in config.agent.encoder:\n encoder_type_to_select = config.agent.encoder[key]\n # config.agent.encoder = config.agent.encoder[encoder_type_to_select]\n else:\n encoder_type_to_select = config.agent.encoder.type\n if encoder_type_to_select in [\"identity\"]:\n # if the encoder is an identity encoder infer the shape of the input dim.\n config.agent.encoder_feature_dim = env_metadata[\"env_obs_space\"].shape[0]\n\n key = \"ordered_task_list\"\n if key in env_metadata and env_metadata[key]:\n config.env.ordered_task_list = deepcopy(env_metadata[key])\n config = config_utils.make_config_immutable(config)\n\n return config", "title": "" }, { "docid": "a8af68d96081823c8bd0aabe0c6eda2f", "score": "0.46763518", "text": "def c3k_sysbi_automation_config():\n path = 'apollo.scripts.entsw.automation.auto_control' # Standard path required; link must point to specific module (use run_tests2 >=v2.9.0).\n apollo_config = lib.get_station_configuration()\n uag_production_line = apollo_config.add_production_line(name='UAG_C3K')\n pcbst = uag_production_line.add_area(name='SYSBI')\n station = pcbst.add_test_station(name='Station')\n # Configuration Data\n station.add_configuration_data(key='AUTOMATION', value=dict(enabled=True))\n\n ssh_dictionary = dict(host='localhost', user='gen-apollo', password='Ad@pCr01!', timeout='30')\n\n auto_container = station.add_super_container(name='AUTO')\n auto_container.add_connection(\n name='PC',\n host=\"10.1.1.12\" if '185' in lib.get_hostname() else \"10.1.1.11\",\n port=2027,\n protocol='telnet',\n )\n auto_container.add_connection(name='LOCAL', protocol='ssh', **ssh_dictionary)\n\n auto_container.assign_pre_sequence(sequence_definition='{}.pre_control'.format(path))\n auto_container.add_pid_map(pid='73-*',\n sequence_definition='{}.main_control'.format(path),\n )\n\n for rack in range(1, 3):\n for slot in range(1, 13):\n cell = auto_container.add_container(name='UUT{:02}_{:02}'.format(rack, slot))\n cell.add_connection(\n name='uutTN',\n host=\"10.1.1.11\",\n port=2003 + (rack - 1) * 12 + (slot - 1),\n protocol='telnet',\n )\n cell.add_connection(name='serverSSH', protocol='ssh', **ssh_dictionary)\n cell.assign_pre_sequence(\n sequence_definition='apollo.scripts.entsw.cat3.area_sequences.c3k_all_pre_sequences.pre_pcbft_gen2'\n )\n cell.add_pid_map(\n pid='WS-C3*',\n sequence_definition='apollo.scripts.entsw.cat3.area_sequences.c3k_pcbft_run.standard_switch_sysbi'\n )\n\n for rack in range(3, 5):\n for slot in range(1, 13):\n cell = auto_container.add_container(name='UUT{:02}_{:02}'.format(rack, slot))\n cell.add_connection(\n name='uutTN',\n host=\"10.1.1.8\",\n port=2003 + (rack - 3) * 12 + (slot - 1),\n protocol='telnet',\n )\n cell.add_connection(name='serverSSH', protocol='ssh', **ssh_dictionary)\n cell.assign_pre_sequence(\n sequence_definition='apollo.scripts.entsw.cat3.area_sequences.c3k_all_pre_sequences.pre_pcbft_gen2'\n )\n cell.add_pid_map(\n pid='WS-C3*',\n sequence_definition='apollo.scripts.entsw.cat3.area_sequences.c3k_pcbft_run.standard_switch_sysbi'\n )\n\n for rack in range(5, 7):\n for slot in range(1, 13):\n cell = auto_container.add_container(name='UUT{:02}_{:02}'.format(rack, slot))\n cell.add_connection(\n name='uutTN',\n host=\"10.1.1.9\",\n port=2003 + (rack - 5) * 12 + (slot - 1),\n protocol='telnet',\n )\n cell.add_connection(name='serverSSH', protocol='ssh', **ssh_dictionary)\n cell.assign_pre_sequence(\n sequence_definition='apollo.scripts.entsw.cat3.area_sequences.c3k_all_pre_sequences.pre_pcbft_gen2'\n )\n cell.add_pid_map(\n pid='WS-C3*',\n sequence_definition='apollo.scripts.entsw.cat3.area_sequences.c3k_pcbft_run.standard_switch_sysbi'\n )\n\n for rack in range(7, 9):\n for slot in range(1, 13):\n cell = auto_container.add_container(name='UUT{:02}_{:02}'.format(rack, slot))\n cell.add_connection(\n name='uutTN',\n host=\"10.1.1.10\",\n port=2003 + (rack - 7) * 12 + (slot - 1),\n protocol='telnet',\n )\n cell.add_connection(name='serverSSH', protocol='ssh', **ssh_dictionary)\n cell.assign_pre_sequence(\n sequence_definition='apollo.scripts.entsw.cat3.area_sequences.c3k_all_pre_sequences.pre_pcbft_gen2'\n )\n cell.add_pid_map(\n pid='WS-C3*',\n sequence_definition='apollo.scripts.entsw.cat3.area_sequences.c3k_pcbft_run.standard_switch_sysbi'\n )", "title": "" }, { "docid": "daa76f69532dd141363f6cc0d3f11375", "score": "0.46760038", "text": "def get_config():", "title": "" }, { "docid": "23a827c5a16d63ae28cc7fccb9852083", "score": "0.46759373", "text": "def test_generic_preprocess(self):\n init_test_data()\n with tempfile.TemporaryDirectory() as tmpdirname:\n scratch_dir = Path(tmpdirname)\n print('scratch dir :', tmpdirname)\n args = [(f'masking_config.masking_config_{mod}.visualisation_path', str(scratch_dir / f'vis_{mod}')) for mod in\n ['anat', 'func']]\n config, workflow_uid = prepare_config(json_config_path=JSON_CONFIG_PATH, scratch_dir=scratch_dir,\n additional_args=args)\n prepare_experiment_result_dataframe(config=config, workflow_uid=workflow_uid)\n config_path = os.path.expanduser(os.path.join(scratch_dir, 'config.json'))\n\n generic(BIDS_BASE,\n '/usr/share/mouse-brain-atlases/dsurqec_200micron.nii',\n registration_mask='/usr/share/mouse-brain-atlases/dsurqec_200micron_mask.nii',\n functional_match={'acquisition': ['EPIlowcov'], },\n structural_match={'acquisition': ['TurboRARElowcov'], },\n out_base='{}/preprocessing'.format(scratch_dir),\n workflow_name='generic',\n keep_work=False,\n subjects=['0000'],\n masking_config_path=config_path,\n )\n assert not os.path.exists(os.path.join(scratch_dir, 'preprocessing/crashdump'))\n for f in ['anat/sub-0000_ses-ofM_acq-TurboRARElowcov_T2w.nii.gz',\n 'func/sub-0000_ses-ofM_task-JogB_acq-EPIlowcov_run-0_bold.nii.gz']:\n out_path = os.path.exists(os.path.join(scratch_dir, 'preprocessing/generic/sub-0000/ses-ofM', f))\n assert out_path, f'Test failed, path {out_path} does not exist.'\n print('finished test_generic_preprocess successfully.')", "title": "" }, { "docid": "35cfbe6673f488b4f1197f563d23a2de", "score": "0.46735027", "text": "def get_configurations_by_preset(config, model, fq_to_hw_confs, hardware_config):\n\n def _apply_preset_rule(preset_, fq_name, param_type, confs, to_skip=None):\n if param_type == 'weights':\n if preset_ == 'accuracy':\n return confs[-1]\n return confs[0]\n if not to_skip or fq_name not in [fq for _, fqs in to_skip for fq in fqs]:\n if preset_ == 'performance':\n return confs[0]\n return confs[-1]\n return confs\n\n def _intersect_and_apply_preset(preset_, fq_to_hw_confs_, fqs_to_unify_):\n\n def _unify_and_apply_preset(preset_, cur_conf, fqs_to_unify_):\n def _test_shapes(shapes):\n return any([s[0] != shapes[0][0] or len(s) == 1 or s[1] != shapes[0][1] for s in shapes])\n\n for bridges, fqs in fqs_to_unify_:\n res_conf = []\n with_concat = 'Concat' in [get_node_by_name(model, bridge).type for bridge in bridges]\n fq_input_shapes = [get_input_shape(get_node_by_name(model, fq), 0) for fq in fqs]\n unclear_layout = _test_shapes(fq_input_shapes)\n bridge_layers = [get_node_by_name(model, bridge) for bridge in bridges]\n bridge_input_shapes = [get_input_shape(layer, i) for layer in bridge_layers for i in layer.in_ports()]\n broadcasting = _test_shapes(bridge_input_shapes)\n for fq in fqs:\n for key in cur_conf[fq]:\n if key in ACTIVATION_QUANTIZATION_MODES:\n if with_concat or unclear_layout or broadcasting:\n if not isinstance(cur_conf[fq]['activations'], list):\n cur_conf[fq]['activations'] = [cur_conf[fq]['activations']]\n\n configuration = [c for c in cur_conf[fq][key] if c['granularity'] == 'pertensor']\n else:\n configuration = cur_conf[fq][key]\n res_conf = intersect_configs(res_conf, configuration, primary_bitwidth) if res_conf \\\n else configuration\n if not res_conf:\n raise Exception('Fake quantize nodes {} cannot be unified'.format(fqs))\n for fq in fqs:\n for key in cur_conf[fq]:\n if key in ACTIVATION_QUANTIZATION_MODES:\n cur_conf[fq][key] = _apply_preset_rule(preset_, fq, key, res_conf)\n return cur_conf\n\n primary_bitwidth = hardware_config[1]['primary_bitwidth']\n res = {}\n for key, value in fq_to_hw_confs_.items():\n conf = dict()\n for i_type in QUANTIZATION_MODES:\n if i_type in value:\n res_conf = []\n for _, configuration in value[i_type]:\n res_conf = intersect_configs(res_conf, configuration, primary_bitwidth) if res_conf \\\n else configuration\n if not res_conf:\n raise Exception('Fake quantize node {} does not have a suitable configuration'\n ' for layers {}'.format(key, [layer for layer, _ in value[i_type]]))\n conf[i_type] = _apply_preset_rule(preset_, key, i_type, res_conf, fqs_to_unify_)\n res[key] = conf\n res = _unify_and_apply_preset(preset_, res, fqs_to_unify_)\n return res\n\n available_presets = ['accuracy', 'mixed', 'performance']\n preset = config.preset\n if preset not in available_presets:\n raise Exception('Unsupported preset value: {}.'\n ' Supported values are {}'.format(preset, available_presets))\n\n fqs_to_unify = find_fqs_to_unify(model, config)\n result = _intersect_and_apply_preset(preset, fq_to_hw_confs, fqs_to_unify)\n\n return result", "title": "" }, { "docid": "9a5811e1ec6fd0a011b53eb410840557", "score": "0.46715492", "text": "def do_aws_cf_configure():\n\n # TODO(cmaloney): Move to Config class introduced in https://github.com/dcos/dcos/pull/623\n config = Config(CONFIG_PATH)\n\n # This process is usually ran from a docker container where default boto3 credential\n # method may fail and as such, we allow passing these creds explicitly\n if 'aws_template_storage_access_key_id' in config:\n os.environ['AWS_ACCESS_KEY_ID'] = config['aws_template_storage_access_key_id']\n if 'aws_template_storage_secret_access_key' in config:\n os.environ['AWS_SECRET_ACCESS_KEY'] = config['aws_template_storage_secret_access_key']\n if 'aws_template_storage_region_name' in config:\n os.environ['AWS_DEFAULT_REGION'] = config['aws_template_storage_region_name']\n\n gen_config = config.as_gen_format()\n\n extra_sources = [\n gen.build_deploy.aws.aws_base_source,\n aws_advanced_source,\n gen.build_deploy.aws.groups['master'][1]]\n\n sources, targets, _ = gen.get_dcosconfig_source_target_and_templates(gen_config, [], extra_sources)\n targets.append(get_aws_advanced_target())\n resolver = gen.internals.resolve_configuration(sources, targets)\n # TODO(cmaloney): kill this function and make the API return the structured\n # results api as was always intended rather than the flattened / lossy other\n # format. This will be an API incompatible change. The messages format was\n # specifically so that there wouldn't be this sort of API incompatibility.\n messages = normalize_config_validation(resolver.status_dict)\n if messages:\n print_messages(messages)\n return 1\n\n # TODO(cmaloney): This is really hacky but a lot simpler than merging all the config flows into\n # one currently.\n # Get out the calculated arguments and manually move critical calculated ones to the gen_config\n # object.\n # NOTE: the copying across, as well as validation is guaranteed to succeed because we've already\n # done a validation run.\n full_config = {k: v.value for k, v in resolver.arguments.items()}\n\n # Calculate the config ID and values that depend on it.\n config_id = gen.get_config_id(full_config)\n reproducible_artifact_path = 'config_id/{}'.format(config_id)\n cloudformation_s3_url = '{}/config_id/{}'.format(full_config['bootstrap_url'], config_id)\n cloudformation_s3_url_full = '{}/cloudformation'.format(cloudformation_s3_url)\n\n # TODO(cmaloney): Switch to using the targets\n gen_config['bootstrap_url'] = full_config['bootstrap_url']\n gen_config['provider'] = full_config['provider']\n gen_config['bootstrap_id'] = full_config['bootstrap_id']\n gen_config['package_ids'] = full_config['package_ids']\n gen_config['cloudformation_s3_url_full'] = cloudformation_s3_url_full\n\n # Convert the bootstrap_Variant string we have back to a bootstrap_id as used internally by all\n # the tooling (never has empty string, uses None to say \"no variant\")\n bootstrap_variant = full_config['bootstrap_variant'] if full_config['bootstrap_variant'] else None\n\n artifacts = list()\n for built_resource in list(gen.build_deploy.aws.do_create(\n tag='dcos_generate_config.sh --aws-cloudformation',\n build_name='Custom',\n reproducible_artifact_path=reproducible_artifact_path,\n variant_arguments={bootstrap_variant: gen_config},\n commit=full_config['dcos_image_commit'],\n all_completes=None)):\n artifacts += release.built_resource_to_artifacts(built_resource)\n\n artifacts += list(release.make_bootstrap_artifacts(\n full_config['bootstrap_id'],\n json.loads(full_config['package_ids']),\n bootstrap_variant,\n 'artifacts',\n ))\n\n for package_id in json.loads(full_config['package_ids']):\n package_filename = release.make_package_filename(package_id)\n artifacts.append({\n 'reproducible_path': package_filename,\n 'local_path': 'artifacts/' + package_filename,\n })\n\n # Upload all the artifacts to the config-id path and then print out what\n # the path that should be used is, as well as saving a local json file for\n # easy machine access / processing.\n repository = release.Repository(\n full_config['aws_template_storage_bucket_path'],\n None,\n 'config_id/' + config_id)\n\n storage_commands = repository.make_commands({'core_artifacts': [], 'channel_artifacts': artifacts})\n\n cf_dir = GENCONF_DIR + '/cloudformation'\n log.warning(\"Writing local copies to {}\".format(cf_dir))\n storage_provider = release.storage.local.LocalStorageProvider(cf_dir)\n release.apply_storage_commands({'local': storage_provider}, storage_commands)\n\n log.warning(\n \"Generated templates locally available at %s\",\n cf_dir + \"/\" + reproducible_artifact_path)\n # TODO(cmaloney): Print where the user can find the files locally\n\n if full_config['aws_template_upload'] == 'false':\n return 0\n\n storage_provider = release.storage.aws.S3StorageProvider(\n bucket=full_config['aws_template_storage_bucket'],\n object_prefix=None,\n download_url=cloudformation_s3_url,\n region_name=full_config['aws_template_storage_region_name'],\n access_key_id=full_config['aws_template_storage_access_key_id'],\n secret_access_key=full_config['aws_template_storage_secret_access_key'])\n\n log.warning(\"Uploading to AWS\")\n release.apply_storage_commands({'aws': storage_provider}, storage_commands)\n log.warning(\"AWS CloudFormation templates now available at: {}\".format(cloudformation_s3_url))\n\n # TODO(cmaloney): Print where the user can find the files in AWS\n # TODO(cmaloney): Dump out a JSON with machine paths to make scripting easier.\n return 0", "title": "" } ]
a29a5b68ad9385db44275aaac25299f2
calculate all lazily calculated properties of explainer
[ { "docid": "98cda06ab8b2983211f12fad2566a05b", "score": "0.58270127", "text": "def calculate_properties(self, include_interactions=True):\n _ = self.pred_probas\n super().calculate_properties(include_interactions=include_interactions)", "title": "" } ]
[ { "docid": "db812ffcc271376b51b84567bde6244c", "score": "0.59954566", "text": "def get_descent(self):", "title": "" }, { "docid": "db812ffcc271376b51b84567bde6244c", "score": "0.59954566", "text": "def get_descent(self):", "title": "" }, { "docid": "b28db5e20f7c8f8092ca30609d273dd9", "score": "0.57683766", "text": "def analytical_properties(self):\n return [ method[4:] for method in dir(self) if (method[0:4]=='get_') ]", "title": "" }, { "docid": "4f452603007b9a151974899464a467bf", "score": "0.5758349", "text": "def explain(self) -> global___Explain:", "title": "" }, { "docid": "dc9d4c7095c957f61f1943918c256ac6", "score": "0.5672616", "text": "def calculate_attributes():\n\n pass", "title": "" }, { "docid": "b21d0ab366b0127b71fff049ed4d8c9b", "score": "0.5619407", "text": "def def_advanced_properties(self): \n\n raise NotImplementedError('def_advanced_properties has to be implemented')", "title": "" }, { "docid": "cfa74ecf76709ca612bed25272d7c559", "score": "0.5605686", "text": "def explaination(self):", "title": "" }, { "docid": "34645bcb1ee630640a76cf3d9775086d", "score": "0.5546397", "text": "def summarize(self):\n ...", "title": "" }, { "docid": "dd5d900f4fde61d402f4b31a7812a5f2", "score": "0.5544888", "text": "def GetExtraDescent(self):", "title": "" }, { "docid": "8758a04afa7c27e3cc29b8a5177443b6", "score": "0.553566", "text": "def computed_properties(self):\n raise NotImplementedError(\n \"Subclasses must define the list of computed properties.\"\n )", "title": "" }, { "docid": "840912db0a24bc05ebc77c1e1745677a", "score": "0.5525588", "text": "def AdvancedProperties(adv_prop_definition): \n def advanced_prop_wrapper(*args):\n prop_dict = adv_prop_definition(*args)\n #print \"in decorator: \",properties\n host = args[0]\n if not host._run_from_web: # property run locally\n host._wvs.advanced_vars =prop_dict\n host.reducer.prop_man.set_input_parameters(**prop_dict)\n return prop_dict\n\n return advanced_prop_wrapper", "title": "" }, { "docid": "c5b7fe63ba6abd1f698c374ca78bcb40", "score": "0.542923", "text": "def calculate_properties(self, mol):\r\n properties = []\r\n properties.append(mol.GetNumAtoms())\r\n properties.append(desc.CalcCrippenDescriptors(mol)[0])\r\n properties.append(desc.CalcTPSA(mol))\r\n properties.append(desc.CalcNumRotatableBonds(mol))\r\n properties.append(desc.CalcFractionCSP3(mol))\r\n return properties", "title": "" }, { "docid": "f2281018e9f694427ceaa1a8fc2bf8a9", "score": "0.5423781", "text": "def prop(self):", "title": "" }, { "docid": "87e9d0c9832fa25ccb56add91be55878", "score": "0.5414532", "text": "def _meta_iteration(self):\n assert (\n self._fin_hts is not None\n ), \"Must call BMLHOptimizer.Aggregate_all before performing optimization.\"\n return self._fin_hts", "title": "" }, { "docid": "2aa3e986a49ded653e413c352977fe9d", "score": "0.5389935", "text": "def parse_all(self):\n return dict(\n (key, expand_results(getattr(self, key, self.empty_result)))\n for key, attr in self.__class__.__dict__.items()\n if hasattr(attr, '_attached') and type(attr).__name__ == 'cached_property'\n )", "title": "" }, { "docid": "aa3c2df5bbc6923b3bda5d52d816df0e", "score": "0.53672475", "text": "def get_calculated(self):\n self.initialize_if_necessary()\n return self._calculated_propertynames", "title": "" }, { "docid": "7c020af9159eefa082fd379f2ca805ab", "score": "0.5330502", "text": "def calculate_properties(self, include_interactions=True):\n _ = (self.preds, self.pred_percentiles,\n self.shap_base_value, self.shap_values,\n self.mean_abs_shap)\n if not self.y_missing:\n _ = self.permutation_importances\n if self.onehot_cols:\n _ = (self.mean_abs_shap_cats, self.X_cats,\n self.shap_values_cats)\n if self.interactions_should_work and include_interactions:\n _ = self.shap_interaction_values\n if self.onehot_cols:\n _ = self.shap_interaction_values_cats", "title": "" }, { "docid": "383f0efa551142aa469042b543c69d64", "score": "0.5319493", "text": "def derive_related_properties():\n logger.info('Deriving related properties ...')\n data = statistics.get_json_data('properties')\n\n related = {}\n\n for pid in data:\n if 'r' in data[pid] and data[pid]['r']:\n related[pid] = data[pid]['r']\n\n statistics.update_json_data('properties/related', related)\n statistics.update_split_json_data('properties/related', related, 10)", "title": "" }, { "docid": "fbab870c74e1662c36568a56b4b40fe2", "score": "0.5308144", "text": "def ContextProperties(self) -> _n_1_t_0:", "title": "" }, { "docid": "22f32729ae0f34692c7167b09f4a57f2", "score": "0.530135", "text": "def pred(self, index_set=None):\n return [ self.simple_reflection(i) for i in self.descents(index_set) ]", "title": "" }, { "docid": "dd90c2a2a93a85c909ffd2d18650a897", "score": "0.5282453", "text": "def get_properties(self):\n return super(PatternAnalyzer, self).get_properties() + (self.pattern,)", "title": "" }, { "docid": "acfc3d024910ff732ab395426b3742b5", "score": "0.52702093", "text": "def _compute_experiment_statistics(self):", "title": "" }, { "docid": "2ab3d678901b63572098516bbb48f2bb", "score": "0.5239263", "text": "def summary(self):", "title": "" }, { "docid": "2ab3d678901b63572098516bbb48f2bb", "score": "0.5239263", "text": "def summary(self):", "title": "" }, { "docid": "1f6cdfab0381137ff8dd32d761e54fbd", "score": "0.52306867", "text": "def _lazyproperties(self):\n def islazyproperty(obj):\n return isinstance(obj, lazyproperty)\n\n return [i[0] for i in inspect.getmembers(self.__class__,\n predicate=islazyproperty)]", "title": "" }, { "docid": "f991943d87adfe236e2177f3c65e7fcd", "score": "0.51917374", "text": "def get_class_properties(table, graph, entities):\n ne = table.get_NE_cols()\n for column in ne.columns:\n column_candidates = column.candidates\n\n column_property_candidates = {}\n\n for cell in tqdm(column.cells):\n for candidate in cell.candidates:\n #for some reason if the candidate is null\n if candidate is None:\n continue\n \n\n #this one find the properties suggested to the belonging class\n #properties of the instance\n #these ones we add to the graph and then connect to the candidate classes\n properties = get_all_instance_properties(entities[candidate].uri)\n if properties is not None:\n for p in properties:\n prop = p[\"property\"][\"value\"]\n\n if prop.find(\"http://dbpedia.org/ontology\") == -1:\n continue\n\n exclusion = [\n \"http://dbpedia.org/ontology/abstract\",\n \"http://dbpedia.org/ontology/thumbnail\",\n \"http://dbpedia.org/ontology/wikiPageID\",\n \"http://dbpedia.org/ontology/wikiPageRevisionID\",\n \"http://dbpedia.org/ontology/wikiPageExternalLink\"\n ]\n\n if prop in exclusion:\n continue\n \n prop_id = create_entity(entities, graph, prop, \"property\")\n if prop_id not in column_property_candidates:\n column_property_candidates[prop_id] = 0\n column_property_candidates[prop_id] += 1\n\n #TODO: modify this to include only the parents of\n #related cell instances?\n for column_candidate in column_candidates:\n #check if cell and column candidates are related\n if graph.has_edge(candidate, column_candidate):\n graph.add_edge(column_candidate, prop_id)\n \n #adding the property labels\n entities[prop_id].add_value(p[\"callret-1\"][\"value\"])\n \n #now filter the column_property_space to select the top 50%\n if len(column_property_candidates) > 0:\n cmax = column_property_candidates[max(column_property_candidates, key=column_property_candidates.get)]\n selected_property_candidates = filter(lambda k: column_property_candidates[k] > cmax*0.5, \n column_property_candidates)\n \n #keep the winning properties in the graph and remove the rest\n for prop in column_property_candidates:\n if prop not in selected_property_candidates:\n delete_entity(entities, graph, prop)", "title": "" }, { "docid": "0a15aa303f049f3a9330c3f447344bba", "score": "0.5190388", "text": "def properties(self):\n o = self.oorig\n getters = [name for name in dir(o)\n if name.startswith('get_') and callable(getattr(o, name))]\n getters.sort()\n d = {}\n for name in getters:\n func = getattr(o, name)\n if self.is_alias(func):\n continue\n try:\n with warnings.catch_warnings():\n warnings.simplefilter('ignore')\n val = func()\n except Exception:\n continue\n else:\n d[name[4:]] = val\n return d", "title": "" }, { "docid": "af66631559ea394a9a18d31c63ebba5f", "score": "0.51859057", "text": "def properties(raw):\n prop_list = []\n dfs(prop_list, [], raw)\n return prop_list", "title": "" }, { "docid": "ea6cb88959f5b731e33f8efe7b31349d", "score": "0.51762813", "text": "def pprint_getters(self):\n lines = []\n for name, val in sorted(self.properties().items()):\n if getattr(val, 'shape', ()) != () and len(val) > 6:\n s = str(val[:6]) + '...'\n else:\n s = str(val)\n s = s.replace('\\n', ' ')\n if len(s) > 50:\n s = s[:50] + '...'\n name = self.aliased_name(name)\n lines.append(' %s = %s' % (name, s))\n return lines", "title": "" }, { "docid": "18c4f7f3e315858370b95b546e616e02", "score": "0.51607627", "text": "def explain(self):\n\n return None", "title": "" }, { "docid": "7f8f7e74968f88b15546cb6553725eb1", "score": "0.5153185", "text": "def get_ascent(self):", "title": "" }, { "docid": "7f8f7e74968f88b15546cb6553725eb1", "score": "0.5153185", "text": "def get_ascent(self):", "title": "" }, { "docid": "1bac82955f75219b07c52a9beb8097bd", "score": "0.5128885", "text": "def calc_deficit(self):", "title": "" }, { "docid": "a8f7fcd0e2ac7ae370f9be1f4718b188", "score": "0.51115346", "text": "def _computeInfo(self):\n return {\"answer\": 42} #### Calculated by the Deep Thought supercomputer in 7.5M years", "title": "" }, { "docid": "e034ea853a7336efb3b79086dd954790", "score": "0.5109337", "text": "def describe(self):", "title": "" }, { "docid": "e034ea853a7336efb3b79086dd954790", "score": "0.5109337", "text": "def describe(self):", "title": "" }, { "docid": "da2141ccb73a26e572108b5ac86afae2", "score": "0.5107499", "text": "def compute(self):\n \n return _computeMethdos(self)", "title": "" }, { "docid": "a33b3166d66683afdcbc4b672bfed5d6", "score": "0.51025355", "text": "def propertyValues():", "title": "" }, { "docid": "a33b3166d66683afdcbc4b672bfed5d6", "score": "0.51025355", "text": "def propertyValues():", "title": "" }, { "docid": "8dfda7e5724cdd96650404785283f903", "score": "0.50917476", "text": "def succ(self, index_set=None):\n return [ self.simple_reflection(i) for i in self.descents(index_set=index_set, positive=True) ]", "title": "" }, { "docid": "2525fcfc0b0231f2f79f226b0a59ac6e", "score": "0.5081335", "text": "def property_summary(self):\n summary = {\"size\": self.size(),\n \"depth\": self.depth(),\n \"width\": self.width(),\n \"bits\": self.num_cbits(),\n \"factors\": self.num_tensor_factors(),\n \"operations\": self.count_ops()}\n return summary", "title": "" }, { "docid": "782d5f770ef5fe5216e3aea443318dcc", "score": "0.5073165", "text": "def properties():\n N, M, Psi, GT = load_dataset()\n print('# of sources: {}'.format(N))\n print('# of object: {}'.format(M))\n obs_n = 0\n V = [0 for obj in range(M)]\n for obj in range(M):\n obs_n += len(Psi[obj])\n V[obj] = len(set([val for s, val in Psi[obj]]))\n print('# of observations: {}'.format(obs_n))\n print('average # of values per object: {:1.3f}'.format(np.average(V)))\n print('min # of values per object: {:1.3f}'.format(min(V)))\n print('max # of values per object: {:1.3f}'.format(max(V)))", "title": "" }, { "docid": "1267c35e738c29b613b02c75bee56ec3", "score": "0.50695896", "text": "def generate_summary(self):", "title": "" }, { "docid": "181076b93364a9d6440edad08e9cb99f", "score": "0.50634605", "text": "def _get_full_objs_decorator(func):\n addr = get_config()[\"mongo_rest_interface_addr\"]\n get_obj = lambda t: json.loads(preprocess_resp(get_book_by(\"author_or_title\", addr, t)))[0]\n get_overall_sentiment = lambda o: o[\"sentiment\"][\"overall\"][0]\n\n def __inner(base_title, scores, *args, **kwargs):\n resp = func(base_title, scores, *args, **kwargs)\n head_obj, *tail_objs = resp[\"resp\"]\n\n base_obj = get_obj(head_obj[\"title\"])\n\n for kvs in tail_objs:\n kvs[\"title\"] = get_obj(kvs[\"title\"])\n\n kvs[\"title\"][\"sentiment\"][\"overall\"] = [get_overall_sentiment(base_obj),\n get_overall_sentiment(kvs[\"title\"])]\n _, *top_matches = resp[\"resp\"]\n return top_matches\n return __inner", "title": "" }, { "docid": "c7080ea558116200ad07f2645fdfb8cb", "score": "0.50614005", "text": "def permalist(self):", "title": "" }, { "docid": "b59b5e8f1ebb0721ccf3e0b0a397fef7", "score": "0.5055935", "text": "def compute_properties(self):\n # Calculate the area\n self._area = self.calc_area()\n self._Iz, self._Iy = self.calc_inertia()", "title": "" }, { "docid": "93351b779816729dc368f80431863bf5", "score": "0.5048545", "text": "def properties(self):\n if self.bound and \"properties\" in self.__stale:\n self.refresh()\n return super(Node, self).properties", "title": "" }, { "docid": "ecea282618a86c234828d9b527ecd9a2", "score": "0.5042679", "text": "def interleaved_computed_per_page_values_and_summaries(self):\n return self._interleaved_computed_per_page_values_and_summaries", "title": "" }, { "docid": "810328cc57897b797bbda6d27df0618c", "score": "0.5038257", "text": "def update_derived_property_records():\n derive_property_classification()\n derive_related_properties()\n derive_url_patterns()\n derive_property_usage()\n derive_property_datatypes()", "title": "" }, { "docid": "0a34dc84f791777729a301eca2318c93", "score": "0.50336385", "text": "def analyse(self):", "title": "" }, { "docid": "c71458250acae4505869fe52aca6c916", "score": "0.50191516", "text": "def compute(self):", "title": "" }, { "docid": "c71458250acae4505869fe52aca6c916", "score": "0.50191516", "text": "def compute(self):", "title": "" }, { "docid": "349e548a728b283e55ee3e8a5a65e401", "score": "0.5018438", "text": "def _optimize(self):\n return {}", "title": "" }, { "docid": "012ed6daab996ded5cfb53db1b748881", "score": "0.5005248", "text": "def explain(self):\n for i in self.m:\n for j in i:\n if j.visited==1:\n txt='{:2d}'.format(j.data)\n print(txt,end=' ')\n else:\n print(' ',end=' ')\n print('')", "title": "" }, { "docid": "9ab95ae5087e0d81a419496cc5fb8442", "score": "0.49948457", "text": "async def describe(self):\n\n self._ready.clear()\n\n entries = await self._pool.fetch(self._script)\n\n details = collections.defaultdict(dict)\n primaries = collections.defaultdict(list)\n for entry in map(dict, entries):\n table = entry.pop('table')\n field = entry.pop('field')\n details[table][field] = entry\n if entry['main']:\n primaries[table].append(field)\n\n self._details = dict(details)\n self._primaries = dict(primaries)\n\n self._ready.set()", "title": "" }, { "docid": "b000a125aca09181facb8eb5f61294a7", "score": "0.4990571", "text": "def calc(self):\r\n for word in self:\r\n word.calc()", "title": "" }, { "docid": "b000a125aca09181facb8eb5f61294a7", "score": "0.4990571", "text": "def calc(self):\r\n for word in self:\r\n word.calc()", "title": "" }, { "docid": "0e3f52a806b31ffe11af809df856b59c", "score": "0.49786073", "text": "def test_get_efficiency(self):\n pass", "title": "" }, { "docid": "a3118bd450580db71e9a2cdb5d56885e", "score": "0.4973626", "text": "def _get_calculations(self) -> dict:\n calculations = {}\n for element in dir(self):\n # BLACKMAGIC : _repr_html_ must be skipped\n \"\"\"\n `_repr_html_` is an element of the directory of the PostProc object which\n causes the search of calculated attributes to overflow, looping indefinitely\n and never reaching the actual elements containing those said attributes.\n It will be skipped until the issue has been properly identified and fixed.\n You can uncomment the block below to observe how the directory loops after\n reaching that element - acknowledging you are not skipping it.\n \"\"\"\n if element not in PROBLEMATIC_ATTRIBUTE_NAMES:\n try:\n # print(\n # \"directory element : \",\n # e,\n # \", calculations list : \",\n # calculations,\n # )\n for k in getattr(getattr(self, element), \"_calculates\"):\n calculations[k] = element\n except AttributeError:\n pass\n\n return calculations", "title": "" }, { "docid": "950711e8283b79c14f79e0728cd0d77d", "score": "0.49721813", "text": "def compute_info(self):\n pass", "title": "" }, { "docid": "2de725265a7c822bf42ac8fad2e3611c", "score": "0.49636874", "text": "def propertyItems():", "title": "" }, { "docid": "2de725265a7c822bf42ac8fad2e3611c", "score": "0.49636874", "text": "def propertyItems():", "title": "" }, { "docid": "59cf8734cf8658986cb62dbc2e56bf0a", "score": "0.495714", "text": "def calculate(self):\r\n attributes = {\r\n \"defense\": 0,\r\n \"influence\": 0,\r\n \"lands\": 0,\r\n \"law\": 0,\r\n \"population\": 0,\r\n \"power\": 0,\r\n \"wealth\": 0,\r\n }\r\n\r\n for attr in attributes:\r\n attributes[attr] += getattr(self.baseattributes, attr)\r\n attributes[attr] += getattr(self.realm, attr)\r\n attributes[attr] += getattr(self.destinycharacters, attr)\r\n for event in self.event_set.all():\r\n attributes[attr] += getattr(event, attr)\r\n setattr(self, attr, attributes[attr])", "title": "" }, { "docid": "f588bb121d172b4d455b060d487aecf8", "score": "0.4955306", "text": "def get_properties():\n raise NotImplementedError", "title": "" }, { "docid": "43f457912db7e4b79f0efcee47cfb141", "score": "0.4943038", "text": "def get_prop():", "title": "" }, { "docid": "3785277201b4bec926b9e6edec2be9eb", "score": "0.49346378", "text": "def get_properties(self):\n raise NotImplementedError(\"not implemented\")", "title": "" }, { "docid": "14a577e71092f0402aee7cd48674b59c", "score": "0.49253562", "text": "def get_properties(self):\n return None", "title": "" }, { "docid": "9d027674503c723afb7808d4c92be92b", "score": "0.49251354", "text": "def properties(self):\n if self.bound and \"properties\" in self.__stale:\n self.refresh()\n return super(Rel, self).properties", "title": "" }, { "docid": "901a041f66daf6cfe787c18c1c6939dd", "score": "0.4925062", "text": "def node_properties(self):\n for n in self.nodes:\n yield dict(n)", "title": "" }, { "docid": "3ae7ea9f7af562970e90b832e81e037a", "score": "0.49059898", "text": "def calculate(self):\n pass", "title": "" }, { "docid": "e14ee427d2ef763dd991be61c8317536", "score": "0.49059656", "text": "def compute_all_properties(cls):\n all_properties = {}\n\n props = []\n # Get all of the properties of the class including inherited ones\n for m in dir(cls):\n p = getattr(cls, m)\n if isinstance(p, Property):\n props.append((m, p))\n\n props = sorted(props, key=lambda p: p[1].instance_idx)\n for prop_name, prop_value in props:\n value_name = prop_value.name\n if value_name:\n all_properties[value_name] = prop_name\n prop_name = value_name\n else:\n all_properties[prop_name] = prop_name\n\n Graph.guard_reserved_words(prop_name, cls)\n return all_properties", "title": "" }, { "docid": "b55d73e0f5d867bd9189d23c4c07ddd1", "score": "0.49003336", "text": "def properties(cls):\n matrix = []\n for name, prop in readme_md.getmembers(cls, inspect.isdatadescriptor):\n row = readme_md.Row(cls, name).get_columns()\n matrix.append(row)\n return mdown.table((\"`%s` properties\" % cls.__name__, \"`__doc__`\"), matrix)", "title": "" }, { "docid": "ebc7802b4072d3156b6ef46985067d6d", "score": "0.48835608", "text": "def dict(self):\n measures={}\n for prop in self.properties:\n measures[prop]=getattr(self,prop)\n return measures", "title": "" }, { "docid": "d76b65f5f8de1db3c7f17f2356fa2343", "score": "0.48829767", "text": "def _get_properties(edges_response):\n es = Elasticsearch(args.elastic_hosts)\n ic = IndicesClient(es)\n \"\"\" edges_response looks like ...\n {\"responses\": [{\"status\": 200,\n \"hits\": {\"hits\": [], \"total\": 69644, \"max_score\": 0.0},\n \"_shards\": {\"successful\": 5, \"failed\": 0, \"total\": 5},\n \"took\": 1,\n \"aggregations\": {\"Labels\":\n {\"buckets\": [{\"key\": \"Gene\", \"doc_count\": 39892},\n {\"key\": \"Pubmed\", \"doc_count\": 18044},\n {\"key\": \"Individual\", \"doc_count\": 11036},\n {\"key\": \"GeneFamily\", \"doc_count\": 589},\n {\"key\": \"Cohort\", \"doc_count\": 40},\n {\"key\": \"Project\", \"doc_count\": 33},\n {\"key\": \"GeneDatabase\", \"doc_count\": 10}]\n }\n \"\"\"\n if (\n \"responses\" not in edges_response or\n 'aggregations' not in edges_response['responses'][0]\n ):\n raise tornado.gen.Return(\n None\n )\n print(edges_response)\n edges = edges_response[\"responses\"][0]['aggregations']['Labels']['buckets'] # NOQA\n edge_names = []\n for edge in edges:\n edge_names.append(edge['key'])\n raise tornado.gen.Return(\n ic.get_mapping(index=self.index,\n doc_type=edge_names, request_timeout=60)\n )", "title": "" }, { "docid": "f5aa11c2100c2bec77e5dd3c2b12dff1", "score": "0.48822555", "text": "def _automap_properties(self, protected_methods=True):\n for prop_name, prop in self._properties.items():\n # search for object methods\n getter_name = 'get_'\n setter_name = 'set_'\n if protected_methods:\n getter_name = '_'+getter_name\n setter_name = '_'+setter_name\n\n try:\n prop.getter = self.__getattribute__(getter_name+prop_name)\n except AttributeError:\n # not found\n pass\n\n try:\n prop.setter = self.__getattribute__(setter_name+prop_name)\n except AttributeError:\n pass", "title": "" }, { "docid": "f984008e01be4d6409bc4de00b4d274a", "score": "0.48814657", "text": "def summary(self):\n# print \"***********AT BASE CLASS summary()***********\"\n return {}", "title": "" }, { "docid": "34b37622a4a2329d14f6410744bf9fd4", "score": "0.4873339", "text": "def get_properties(self):\n return self.property_desc.keys()", "title": "" }, { "docid": "319398ae65d544fd7bfe2ed4a8402cb9", "score": "0.48716626", "text": "def get_properties(self) -> Dict:\n _dict = {}\n\n for _key in [\n k for k in vars(self).keys()\n if k not in AnalyzedResult._properties\n ]:\n _dict[_key] = getattr(self, _key)\n return _dict", "title": "" }, { "docid": "2790abc1f36414395152a9aa25f3ba84", "score": "0.48583788", "text": "def _propertyMap():", "title": "" }, { "docid": "6439a3e9734749a82b0ac08f65985663", "score": "0.48567018", "text": "def _calculate(self) -> None:", "title": "" }, { "docid": "d8e18f224adf606a41b30b201be1fdd2", "score": "0.4851939", "text": "def item_view_object(context, request):\n properties = item_links(context, request)\n if not asbool(request.params.get('skip_calculated')):\n calculated = calculate_properties(context, request, properties)\n properties.update(calculated)\n return properties", "title": "" }, { "docid": "0ef69ea85d9aee3b6c413fa287f57eec", "score": "0.48498613", "text": "def calculate_properties(self, atoms, properties):\n # TODO: Check atoms.\n\n if \"rdf\" in properties:\n self.results[\"rdf\"] = self.data_handler.target_calculator.\\\n get_radial_distribution_function(atoms)\n if \"tpcf\" in properties:\n self.results[\"tpcf\"] = self.data_handler.target_calculator.\\\n get_three_particle_correlation_function(atoms)\n if \"static_structure_factor\" in properties:\n self.results[\"static_structure_factor\"] = self.data_handler.\\\n target_calculator.get_static_structure_factor(atoms)\n if \"ion_ion_energy\" in properties:\n self.results[\"ion_ion_energy\"] = self.\\\n last_energy_contributions[\"e_ewald\"]", "title": "" }, { "docid": "f0f72abc460055d57a106eb4be922eaf", "score": "0.48431128", "text": "def explore(self):", "title": "" }, { "docid": "4f7f831411a4294950e812c639385da4", "score": "0.48429444", "text": "def _compute_experiment_statistics(self):\n pass", "title": "" }, { "docid": "2e87eb70fc8d2a953382d8bb6dd98989", "score": "0.48422936", "text": "def preheat_oven(self):\n pass", "title": "" }, { "docid": "c849196a34453b0930cebb5d3564793d", "score": "0.48355392", "text": "def calculate_stats(self):\n\n pass", "title": "" }, { "docid": "88266901cb4a33d83f9e9a65f5c55a52", "score": "0.4831468", "text": "def _explain(self):\n self._explain_simple_consensus()", "title": "" }, { "docid": "f20fb9d6150b6535b9da1b58a9e5b8b6", "score": "0.4828499", "text": "def def_main_properties(self): \n raise NotImplementedError('def_main_properties has to be implemented')", "title": "" }, { "docid": "8e3afe11f8cd519164755df422ceb676", "score": "0.48283434", "text": "def __init__(self):\n self.intermediate = {}\n self.result = []", "title": "" }, { "docid": "61df8d1f0db70b4e34eacc623a692525", "score": "0.48221806", "text": "def _compute_descriptor(self, data):", "title": "" }, { "docid": "f45fc55020fddb60c60bb46b7ff04420", "score": "0.48158824", "text": "def transformer_properties(self):\n for key, transformer in self._transformer.items():\n yield (key, transformer.mean_, transformer.stdev_)", "title": "" }, { "docid": "d035c637678eba36787becfe27c6bdec", "score": "0.48091656", "text": "def compat_explain(cur):\n res = cur.explain()\n if 'nscannedObjects' in res:\n res['executionStats'] = dict(\n nReturned=res.pop('n'),\n totalKeysExamined=res.pop('nscanned'),\n totalDocsExamined=res.pop('nscannedObjects'),\n executionTimeMillis=res.pop('millis'),\n )\n return res", "title": "" }, { "docid": "014770ad1416e17d09ef96cec7ec50aa", "score": "0.4799041", "text": "def get_score(self, real_data, synthetic_data, metadata, progress_bar=None):\n if self._single_table_property is None:\n raise NotImplementedError()\n\n all_details = []\n for table_name in metadata['tables']:\n property_instance = self._single_table_property()\n self._properties[table_name] = property_instance\n self._properties[table_name].get_score(\n real_data[table_name],\n synthetic_data[table_name],\n metadata['tables'][table_name],\n progress_bar\n )\n all_details.append(property_instance._details)\n\n self.is_computed = True\n all_details = pd.concat(all_details)\n return all_details['Score'].mean()", "title": "" }, { "docid": "bb42787d8b3c67a7c80f111cca104ed1", "score": "0.47980973", "text": "def propertyMap():", "title": "" }, { "docid": "bb42787d8b3c67a7c80f111cca104ed1", "score": "0.47980973", "text": "def propertyMap():", "title": "" }, { "docid": "729b9b655b13377f742b5ea80b286184", "score": "0.4794479", "text": "def properties(self):\n return self._properties", "title": "" }, { "docid": "729b9b655b13377f742b5ea80b286184", "score": "0.4794479", "text": "def properties(self):\n return self._properties", "title": "" }, { "docid": "5bb0858e5c8ae1c4a429c71c7fd66867", "score": "0.47892186", "text": "def read_relation_property(self):\n relation_property_head = {x: [] for x in range(len(self.relations))}\n relation_property_tail = {x: [] for x in range(len(self.relations))}\n\n for t in self.triplets['train']:\n relation_property_head[t.r].append(t.h)\n relation_property_tail[t.r].append(t.t)\n\n self.relation_property = {x: (len(set(relation_property_tail[x]))) / ( \\\n len(set(relation_property_head[x])) + len(set(relation_property_tail[x]))) \\\n for x in relation_property_head.keys()}\n\n return self.relation_property", "title": "" }, { "docid": "abcda2d6d03dd8ed602c4b9044ef929c", "score": "0.4787436", "text": "def derive_property_usage():\n logger.info('Deriving property usage ...')\n data = statistics.get_json_data('properties')\n\n usage = defaultdict(dict)\n keys = ['i', 's', 'q', 'e', 'qs', 'pc']\n\n for pid in data:\n for key in keys:\n if key in data[pid] and data[pid][key]:\n usage[pid][key] = data[pid][key]\n\n statistics.update_json_data('properties/usage', usage)", "title": "" }, { "docid": "8fa31df26635e049f6793e66f1e61f4b", "score": "0.47843242", "text": "def expand_properties(self, fmt):\n pass", "title": "" } ]
3281e3c9560e50b7f3b70816c2c11ccd
this function responsible for the initial skip grams
[ { "docid": "fee1eac4d6717fd2cddf61b9d2eb07ec", "score": "0.59567994", "text": "def initial_skip_grams(full_sentence_info, k_skip, n_gram):\n if n_gram == 1:\n return [[full_sentence_info[0]]]\n grams = []\n for j in range(min(k_skip + 1, len(full_sentence_info) - 1)):\n kmj_skip_nm1_grams = initial_skip_grams(full_sentence_info[j + 1:], k_skip - j, n_gram - 1)\n if kmj_skip_nm1_grams is not None:\n for gram in kmj_skip_nm1_grams:\n grams.append([full_sentence_info[0]] + gram)\n return grams", "title": "" } ]
[ { "docid": "d5df531ee98e066ac7f749a019d9ea17", "score": "0.6951125", "text": "def Skip(self, skip=True):", "title": "" }, { "docid": "62f28dd39a3b4451ebad8dc5424e80ef", "score": "0.6750491", "text": "def skip(self):\n self.next()", "title": "" }, { "docid": "4092f1a261f3ddab45595c1952c32e68", "score": "0.67421526", "text": "def skip(self, count: int, /) -> None:\n ...", "title": "" }, { "docid": "01b7e6105f79494336eddb5d0a432a00", "score": "0.67299765", "text": "def skip(self, n):\n pass", "title": "" }, { "docid": "874a9fbd87e6a04065af897211780ef7", "score": "0.6707515", "text": "def skip(self) -> bool:", "title": "" }, { "docid": "bf962400dbab3e823e4704729366621c", "score": "0.64574397", "text": "def test_skip(self):\n pass", "title": "" }, { "docid": "bf962400dbab3e823e4704729366621c", "score": "0.64574397", "text": "def test_skip(self):\n pass", "title": "" }, { "docid": "bf962400dbab3e823e4704729366621c", "score": "0.64574397", "text": "def test_skip(self):\n pass", "title": "" }, { "docid": "81d61bf2b408b4dd00ddcf998699d6a2", "score": "0.6378706", "text": "def skip(*args,**kwarg): \n pass", "title": "" }, { "docid": "1eff94be5aead165408f70bbbd4da62d", "score": "0.63501203", "text": "def GetSkipped(self):", "title": "" }, { "docid": "a1ec771adf0f58b306ce03d0dbb25613", "score": "0.63429886", "text": "def prescan(self): # -> None:\n ...", "title": "" }, { "docid": "5393b5337bdf77fa2e3b4d089cea1c79", "score": "0.6274985", "text": "def skip ( self ) :\n return self.__skip", "title": "" }, { "docid": "5393b5337bdf77fa2e3b4d089cea1c79", "score": "0.6274985", "text": "def skip ( self ) :\n return self.__skip", "title": "" }, { "docid": "8229e6239cef6b0d057ec6fdee078eb2", "score": "0.62682694", "text": "def SkipSize(self) -> int:", "title": "" }, { "docid": "9a38f1b34b6c8cb9b4f79ff282aaded7", "score": "0.62615716", "text": "def logSkippedPars(self):\n logSkippedPars(self)", "title": "" }, { "docid": "4d5c16c75adb9a7eeca2276119cf34f5", "score": "0.62334174", "text": "def skip(t, n):\n pu(t)\n fd(t, n)\n pd(t)", "title": "" }, { "docid": "0b56d06d54a8f66add86db58117a49c7", "score": "0.61965966", "text": "def test_skip(self):", "title": "" }, { "docid": "0b56d06d54a8f66add86db58117a49c7", "score": "0.61965966", "text": "def test_skip(self):", "title": "" }, { "docid": "47caa2d1cf73d795799dd027a6aa209d", "score": "0.61688685", "text": "def skip(self):\n self.currentiv += 1\n self.next()", "title": "" }, { "docid": "9742035f1406b108a015656208d04b17", "score": "0.61307335", "text": "def skip(self, amount: int):\n raise NotImplementedError", "title": "" }, { "docid": "e7dd27f54c5f6ae1deb3b787d0183f62", "score": "0.6098544", "text": "def unskip(self, step):\n self.skip = [i for i in self.skip if step != i]", "title": "" }, { "docid": "fdd0a53894f2abeeaac78d599775a394", "score": "0.6093332", "text": "def make_skip():\n return { \"kind\": \"Skip\" }", "title": "" }, { "docid": "7f961f1c83bcedb420dfff15924da229", "score": "0.6080094", "text": "def __init__(self, apply=1.0):\n self.skip = apply", "title": "" }, { "docid": "1ee882c65921e5428eeb6aa4a53f1cdc", "score": "0.6065739", "text": "def skip(self, n=1):\r\n for x in range(n):\r\n next(self)", "title": "" }, { "docid": "56aa8181063c52f56d0d0d4e9730c126", "score": "0.6059741", "text": "def skip(self) -> None:\n\n self._skipped.append(\n {\n \"line\": self.item[\"line\"],\n \"column\": self.item[\"line\"],\n \"kind\": self.kind,\n \"name\": self.name,\n }\n )", "title": "" }, { "docid": "47e74ce399d2cd23f20ea7cbf5aa2c18", "score": "0.60528964", "text": "def _skip_sampling(self, total, skip_ids):\n rand_id = np.random.randint(total - len(skip_ids))\n return rand_id if rand_id < min(skip_ids) else rand_id + len(skip_ids)", "title": "" }, { "docid": "3cbb136836f2356532a94bfb50e5a937", "score": "0.60461617", "text": "def _skip(self, dataset):\n determ = self._deterministic\n return dataset.map(\n self._add_param,\n num_parallel_calls=tf.data.AUTOTUNE,\n deterministic=determ)", "title": "" }, { "docid": "76bfb511fc368d0073c9673538ac45a5", "score": "0.60394377", "text": "def _skipped_inc(self, item):\n\t\tself.skipped += 1", "title": "" }, { "docid": "fa383704ac626f135aeb789f5c21612d", "score": "0.60279024", "text": "def skip(self, n: int):\n # makes use of knowing the number of nodes\n # does some math, skips extraneous full loops\n for x in range(n % self.numnodes):\n # updates the correct amount of times\n self.go_next()", "title": "" }, { "docid": "893f6501f28c883999a4fc19cf3ee265", "score": "0.6023453", "text": "def skip(self, params):\n return False", "title": "" }, { "docid": "29e39bb1f76561be0bb867058512838e", "score": "0.5973915", "text": "def skip(self, amount):\n for i in range(0, amount):\n self.next()\n time.sleep(0.5) # FIXME: Direct access to playlist", "title": "" }, { "docid": "a7178b5e5f0fc0cd8c02c9f7aa3eedf3", "score": "0.59552014", "text": "def n_skip_steps(self) -> int:\n return self._n_skip_steps", "title": "" }, { "docid": "48eb3a3b0fdce736c2abd7a127390807", "score": "0.5947714", "text": "def skip(self):\n \n self.playing_i = (self.playing_i + 1) % len(self.players)\n self.playing = self.players[self.playing_i]", "title": "" }, { "docid": "213e9341e561f3661095c316faa0cf57", "score": "0.59147024", "text": "def skip_forward(self, j):\n pass", "title": "" }, { "docid": "c8b17e822015c4decb0672e0a9801ebd", "score": "0.59073013", "text": "def addSkip(self, test):\n pass", "title": "" }, { "docid": "b2885727e7a68fbc46afd96577d55917", "score": "0.586508", "text": "def skipToken():\n raise SkipToken", "title": "" }, { "docid": "a915c56a4f2b8c202d712020443e471f", "score": "0.5860903", "text": "def on_skip(self, record):", "title": "" }, { "docid": "69f3bbe77fe5972dd27fd8ee98cdb9a3", "score": "0.58512914", "text": "def _heuristic_skip_inspection(self):\n self._ninsps = self._ninsps + 1 if hasattr(self, '_ninsps') else 1\n if self.mode in ['tile', 'only_tile'] and self._ninsps < 3:\n return True\n return False", "title": "" }, { "docid": "f363a9888452ff5429121b28d26ae811", "score": "0.5842535", "text": "def skip(self, count):\n self.lexer.skip(count)", "title": "" }, { "docid": "bafb62b5bde62b0195b50ecefaf25457", "score": "0.5828772", "text": "def skip_gd_pair():\n skip = [[10.9697, 4.4073],\n [15.3189, 2.0326],\n [23.7452, 24.5140],\n [28.4283, 20.9148],\n [35.1739, 1.1985],\n [40.0218, -0.6527],\n [41.2998, -1.2216],\n [41.9780, 0.6380],\n [123.3026, 54.2806],\n [126.6732, 45.7450],\n [131.5022, 7.0747],\n [150.3681, 50.4663],\n [150.3362, 55.8989], # FOS lens\n [158.2551, 47.2532],\n [164.0395, 55.2669],\n [170.1281, 54.7426],\n [176.7206, 16.7400],\n [188.5052, 6.5367],\n [190.7380, 25.7174],\n [193.7286, 8.7812],\n [196.9841, 4.3710],\n [198.7737, 47.9047],\n [201.3239, 37.6164],\n [211.2581, 44.8000],\n [222.7320, 47.0272],\n [238.3773, 22.5040],\n [243.2571, 8.1350],\n [253.7555, 26.0882],\n [357.0800, 0.9549],\n [116.9959, 43.3015],\n [184.6687, 50.2621],\n [166.63912, -18.35661], # FOS lens\n [166.6396, -18.3567], # FOS lens\n [216.9947, -1.3601],\n [9.9763, -27.4229], # 2QZ pair\n [341.6578, -29.4963], # 2QZ pair\n ]\n # Table\n sa = np.array(skip)\n stbl = Table()\n stbl['RA'] = sa[:,0]\n stbl['DEC'] = sa[:,1]\n # Return\n return stbl", "title": "" }, { "docid": "e41e2fcc3ec4b95c491d85eba4313b05", "score": "0.58279467", "text": "def _must_skip(self):\n if self.bananas_record[\"integradoERP10\"] != \"0\":\n return True\n return", "title": "" }, { "docid": "b08980845210b010348007e2d8c591a9", "score": "0.58222044", "text": "def skip_next(self) -> None:\n \n self.spotipyObject.next_track()", "title": "" }, { "docid": "c978322e212ef48e058f6b408ef6ff02", "score": "0.57943505", "text": "def get_skipgrams(self):\n # List to hold skipgrams and associated metadata\n skipgram_data = []\n # List of indexes (of original texts list from dataframe) where we are throwing the doc out due to short length\n self.purged_docs = []\n # List holding the number of tokens in each document\n self.doc_lengths = []\n # ID to count document IDS\n doc_id_counter = 0\n print(\"\\n---------- Getting Skipgrams ----------\")\n for i, t in tqdm(enumerate(self.idx_data)):\n pairs, _ = skipgrams(t,\n vocabulary_size = self.vocab_size,\n window_size = self.window_size,\n shuffle=True,\n negative_samples=0)\n \n if len(pairs) > 2:\n for pair in pairs:\n temp_data = pair\n # Appends doc ID\n temp_data.append(doc_id_counter)\n # Appends document index (index of original texts list from dataframe)\n temp_data.append(i)\n skipgram_data.append(temp_data)\n self.doc_lengths.append(len(t))\n doc_id_counter+=1\n else:\n # We purge docs with less than 2 pairs\n self.purged_docs.append(i)\n\n self.skipgrams_df = pd.DataFrame(skipgram_data)", "title": "" }, { "docid": "95fbef75ca01e8646532379961971f91", "score": "0.57853013", "text": "def set_skip(self,value=True):\n\n self.skip = value", "title": "" }, { "docid": "824cde9ce4286701d1695c911d6d6d2d", "score": "0.5774947", "text": "def skip_gram_features(full_sentence_info_list, k_skip, n_gram):\n # Open a file\n shutil.rmtree('../dataset/output') # removes a directory with sub directories and files\n os.mkdir('../dataset/output') # creates a new directory\n output_file = open('../dataset/output/jurry.skip.txt', 'a') # open a file in append mode\n\n grams = []\n for full_sentence_info in full_sentence_info_list: # iterates ove the values (sentence_info(s)) list\n if n_gram == 0 or len(full_sentence_info) == 0:\n return None\n for i in range(len(full_sentence_info) - n_gram + 1):\n grams.extend(initial_skip_grams(full_sentence_info[i:], k_skip, n_gram))\n for gram in grams: # for printing the skip_gram\n for gra in gram:\n output_file.write(gra.token + '_')\n output_file.write('\\n')\n output_file.close() # closes the file", "title": "" }, { "docid": "bf056b6d221093852ab970a0f5b03ff9", "score": "0.5765171", "text": "def add_skip(self, test, reason):\n self.stream.writeln('SKIP', YELLOW)\n self.write_details(reason, test.parents_count, YELLOW)", "title": "" }, { "docid": "9e74a5ce8df99c05bb42830bcb68f87e", "score": "0.5762922", "text": "def _choose_skip_length(self):\n max_length = min((self._skip_frame_range[1] - self._skip_frame_range[0]), \\\n (self._full_dataset_view // self._skip_increase_interval))\n self._skip_length = 0 if max_length == 0 else randint(0, max_length)", "title": "" }, { "docid": "f092d2b433791add56819844a214f61f", "score": "0.57521266", "text": "def _pre_water_erosion_steps(self):\n pass", "title": "" }, { "docid": "4dd7f286612575fcd1c2e89b4903ca28", "score": "0.5745903", "text": "def test_skipping(self):\n raise NotImplementedError()", "title": "" }, { "docid": "802c6738fae7e2655838098cdb45f268", "score": "0.5745476", "text": "def skipRemaining(self):\n return False", "title": "" }, { "docid": "158139a207689b98aad9ad126e782cb2", "score": "0.5741946", "text": "def test_skips(self):\n assert not hasattr(self.t, \"test_code_cell_1\")", "title": "" }, { "docid": "d9fdae50ec89b1df68dc406866bd15bc", "score": "0.5739221", "text": "def skip_token():\n raise SkipToken", "title": "" }, { "docid": "20496c1dc81a3ddda96b33ed8137a9bb", "score": "0.57227665", "text": "def _skipgram_generator(self, doc):\n return it.izip(*[it.islice(seq, i, None) for i, seq in enumerate(it.tee(doc, self.skipgram_len))])", "title": "" }, { "docid": "04959898e940422e797a11ab1559f5da", "score": "0.57183963", "text": "def _get_skip_shapes(self, in_shape):\n in_shape = utils.convert_shape_to_np_array(in_shape)\n sh = in_shape\n# ch_axis = self.channel_axis\n# if ch_axis < 0:\n# ch_axis = len(in_shape) + self.channel_axis\n \n skip_out_shapes = list()\n# skip_in_shapes = list()\n for block in self.block_list:\n bdef = self.block_definitions[block]\n out_num = self._get_out_num(sh[self.channel_axis], block)\n # get indices of skip_save (possibly more than one)\n skip_concat_idx = [i for i, layer in enumerate(bdef) if layer=='skip_concat']\n# skip_save_idx = [i for i, layer in enumerate(bdef) if layer=='skip_save']\n if not skip_concat_idx:# and not skip_save_idx:\n # just determine block out shape\n sh = self._get_layer_sequence_output_shape(sh, bdef, out_num)\n# elif skip_save_idx:\n# # sequence until skip concat\n# seq = bdef[:skip_save_idx[0]] \n# sh = self._get_layer_sequence_output_shape(sh, seq, out_num)\n# skip_in_shapes.append(sh)\n# # will only enter for-loop if len(skips)>1\n# for i in range(len(skip_save_idx)-1):\n# seq = bdef[skip_save_idx[i]:skip_save_idx[i+1]]\n# sh = self._get_layer_sequence_output_shape(sh, seq, out_num)\n# skip_in_shapes.append(sh)\n# # sequence after skip concat\n# seq = bdef[skip_save_idx[-1]:] \n# sh = self._get_layer_sequence_output_shape(sh, seq, out_num)\n elif skip_concat_idx:\n # sequence until skip concat\n seq = bdef[:skip_concat_idx[0]] \n sh = self._get_layer_sequence_output_shape(sh, seq, out_num)\n skip_out_shapes.append(sh)\n # will only enter for-loop if len(skips)>1\n for i in range(len(skip_concat_idx)-1):\n seq = bdef[skip_concat_idx[i]:skip_concat_idx[i+1]]\n sh = self._get_layer_sequence_output_shape(sh, seq, out_num)\n skip_out_shapes.append(sh)\n # sequence after skip concat\n seq = bdef[skip_concat_idx[-1]:] \n sh = self._get_layer_sequence_output_shape(sh, seq, out_num)\n# if len(skip_out_shapes) != len(skip_in_shapes):\n# raise RuntimeError(\"There are \" + str(len(skip_out_shapes)) + \n# \"skip_concats and \" + str(len(skip_in_shapes)) +\n# \"skip_saves. This needs to match.\")\n# skip_in_shapes = skip_in_shapes[::-1]\n# skip_shapes = list()\n# for i in range(len(skip_out_shapes)):\n# skip = skip_out_shapes[i]\n# skip[self.im_axis] = skip_in_shapes[i][self.im_axis]\n# skip[self.channel_axis] = skip_in_shapes[i][self.channel_axis]\n# skip_shapes.append(skip)\n# list([skip_in_shapes[self.im_axis]] + \n# [skip_out_shapes[spatial_axes])\n# skip_shapes.insert(ch_axis, skip_in_shapes[self.im_axis])\n if self.input_output_skip:\n skip_out_shapes.append(self._get_net_output_shape(in_shape))\n return skip_out_shapes", "title": "" }, { "docid": "8e1865c975c18feca094e819d44fd346", "score": "0.57174057", "text": "def handle_skipped(self, song):\n self.update_item(song['beets_item'], 'skip_count', increment=1)\n self._log.info('skipped {0}', displayable_path(song['path']))", "title": "" }, { "docid": "e2093ffbce03e3375de0639135b58baf", "score": "0.5697211", "text": "def set_skip(self, n):\n\n self.skip = n", "title": "" }, { "docid": "b9b012ec3d25a5c0fbebb83b0b3ff7e8", "score": "0.5680758", "text": "def skip(self):\n\n\t\tif not len(self.args.skip):\n\t\t\tself._show_skip()\n\t\telse:\n\t\t\t# This could signify STDIN contains json or xml to intrepret as ytids???\n\t\t\tif self.args.json:\n\t\t\t\traise NotImplementedError(\"--json not meaningful when adding skipped videos\")\n\t\t\tif self.args.xml:\n\t\t\t\traise NotImplementedError(\"--xml not meaningful when adding skipped videos\")\n\n\t\t\tytids = list(set(self.args.skip))\n\t\t\tytids = ['-' + _[1:] for _ in ytids if _[0] == '='] + [_ for _ in ytids if _[0] != '=']\n\n\t\t\t# Split into videos and playlists\n\t\t\tv_ytids = [_ for _ in ytids if len(_) == 11]\n\t\t\tpl_ytids= [_ for _ in ytids if len(_) != 11]\n\n\n\t\t\tself.db.begin()\n\t\t\tprint(\"Marking videos to skip (%d):\" % len(v_ytids))\n\t\t\tfor ytid in v_ytids:\n\t\t\t\tprint(\"\\t%s\" % ytid)\n\t\t\t\trow = self.db.v.select_one(\"rowid\", \"`ytid`=?\", [ytid])\n\t\t\t\tself.db.v.update({\"rowid\": row['rowid']}, {\"skip\": True})\n\n\t\t\t\t# Delete any sleep times for this video, this will not error if no rows present\n\t\t\t\tself.db.v_sleep.delete({'ytid': ytid})\n\n\t\t\t\t# Hook: \"skip-video\"\n\t\t\t\tif not self.args.nohook:\n\t\t\t\t\trun_hook(self.db, 'skip-video', ytid=ytid)\n\n\t\t\tprint(\"Marking playlists to skip (%d):\" % len(pl_ytids))\n\t\t\tfor ytid in pl_ytids:\n\t\t\t\tprint(\"\\t%s\" % ytid)\n\t\t\t\trow = self.db.pl.select_one(\"rowid\", \"`ytid`=?\", [ytid])\n\t\t\t\tself.db.pl.update({\"rowid\": row['rowid']}, {\"skip\": True})\n\n\t\t\t\t# Hook: \"skip-playlist\"\n\t\t\t\tif not self.args.nohook:\n\t\t\t\t\trun_hook(self.db, 'skip-playlist', plid=ytid)\n\n\t\t\tself.db.commit()", "title": "" }, { "docid": "f4c4d77c1f083a16dbbab434b181654f", "score": "0.56698376", "text": "def should_skip(self):\n\n return self.skip", "title": "" }, { "docid": "238e9ed6b6678061ec99f39c0de60f01", "score": "0.56594795", "text": "def skipped(self, bot_name, deferred):\n pass", "title": "" }, { "docid": "3c9ecbf61a6a952a25c8c475be00faff", "score": "0.56442344", "text": "def WasSkipped(self):", "title": "" }, { "docid": "3c9ecbf61a6a952a25c8c475be00faff", "score": "0.564423", "text": "def WasSkipped(self):", "title": "" }, { "docid": "506802fdcc6d4796fd68623a4e99320a", "score": "0.56310844", "text": "def skip(self, amount):\n return self.__class__(self.graph, self._labels, self._conditions,\n self._order_by, amount, self._limit)", "title": "" }, { "docid": "3731700f48639672eb4cecb7d643f599", "score": "0.5629662", "text": "def skipFrame(self, frames, cap):\n totalFrames = self.getFPS(cap)\n skip = totalFrames // frames # 30 / 3 = 10 1 ..1 ..11 ..21 ..22 .. 30 *** 2 \n return skip", "title": "" }, { "docid": "7b51737de404de773e564718c366b0af", "score": "0.56170434", "text": "def clear_empties(self):\n keep = self.galpairs > 0\n self.sep, self.galpairs, self.ranpairs, self.est = \\\n self.sep[keep], self.galpairs[keep], self.ranpairs[keep],\\\n self.est[keep]\n self.nbin = len(self.sep)\n return self.nbin", "title": "" }, { "docid": "0994fe2efd4a0254320d86ac5ee20df1", "score": "0.5602012", "text": "def skip_tiles(self, tiles=None, tiles_batches=None):\n logger.debug(\"determine which tiles to skip...\")\n # only check for existing output in \"continue\" mode\n if self.config.mode == \"continue\":\n yield from tiles_exist(\n config=self.config,\n process_tiles=tiles,\n process_tiles_batches=tiles_batches,\n )\n # otherwise don't skip tiles\n else:\n if tiles_batches:\n for batch in tiles_batches:\n for tile in batch:\n yield (tile, False)\n else:\n for tile in tiles:\n yield (tile, False)", "title": "" }, { "docid": "a1ee24891f27db18e2367aaa02a8b033", "score": "0.55959487", "text": "def test_with_skip(self):\n # self.assertEqual(3, add(1, 2))\n # self.assertNotEqual(3, add(2, 2))", "title": "" }, { "docid": "a1ee24891f27db18e2367aaa02a8b033", "score": "0.55959487", "text": "def test_with_skip(self):\n # self.assertEqual(3, add(1, 2))\n # self.assertNotEqual(3, add(2, 2))", "title": "" }, { "docid": "cd7941b90a877a2fcfb21b13cc439e83", "score": "0.5585231", "text": "def __manage_rows_skipping(self, kwargs, skipchunks, skiprows, nrows):\n if skipchunks is not None:\n skiprows = skipchunks * kwargs.get('chunksize', self.chunksize)\n if skiprows is not None:\n kwargs['skiprows'] = skiprows\n if self.columns is not None:\n col = self.columns[0]\n kwargs['skiprows'] += len(col) if isinstance(col, tuple) else 1\n kwargs['header'] = None\n kwargs['names'] = self.columns\n if nrows is not None:\n kwargs['nrows'] = nrows", "title": "" }, { "docid": "e54b0be2ffffbc065e23cbaa20b9c66b", "score": "0.5567402", "text": "def skipgrams(pages, max_context, l):\n for words in pages:\n for index, current in enumerate(words):\n if current != 0 and not subsample(pages.counter[pages.decode(current)]/pages.total_count, l):\n continue\n context = np.random.randint(1, max_context)\n for target in words[max(0, index - context): index]:\n yield current, target\n for target in words[index + 1: index + context + 1]:\n yield current, target", "title": "" }, { "docid": "668064206217007e075b18f3c1b189e3", "score": "0.5560861", "text": "def skip(self, n):\n self.pos += n", "title": "" }, { "docid": "5bd8b2e558abb903e98cf61f3826d16c", "score": "0.5554391", "text": "def SkipGramOptionsStart(builder):\n return Start(builder)", "title": "" }, { "docid": "69d5d3b895903d2d77448322bb2e94c3", "score": "0.55486065", "text": "def _check_skip(self, planred, name):\n # Update the attributes\n self._skipexred = {k: exists(v) for k, v in self.summpaths.items()}\n self._skipexplan = {k: exists(v) for k, v in self.planpaths.items()}\n _name = name if isinstance(name, str) else f\"lv{name}\"\n\n # Check if the step is skipped\n if planred.lower().startswith(\"r\"):\n if self._skipexred[name]:\n if self.verbose >= 1:\n print(f\"SKIP: Summary exists for {_name} at {self.summpaths[name]}\")\n return self._read_summ(name)\n\n elif planred.lower().startswith(\"p\"):\n if self._skipexplan[name]:\n if self.verbose >= 1:\n print(f\"SKIP: Planer exists for {_name} at {self.planpaths[name]}\")\n return self._read_plan(name)", "title": "" }, { "docid": "06c84841a1e02e383768daceabeb89c2", "score": "0.5548301", "text": "def skip_arm(self):\n self._next_arm()\n self.list_next = self.list_next[1:]", "title": "" }, { "docid": "3170d44e5013cade5703f785793b55cf", "score": "0.55476904", "text": "def skip(self, size):\n self.__pntr += size", "title": "" }, { "docid": "ebdb3fb5d1a62726faaefd7b47bde15f", "score": "0.55332303", "text": "def skipped_sim_steps(self) -> int:\n skipped_steps = 0\n if self.enable_step_throttling:\n while skipped_steps < self.n_skip_steps:\n skipped_steps = self.obj_handle.get_sim_step() - self.prev_sim_step\n time.sleep(1e-5)\n self.prev_sim_step = self.obj_handle.get_sim_step()\n if skipped_steps > self.n_skip_steps:\n print(\n 'WARN: Skipped {} steps, Default skip limit {} Steps'.format(\n skipped_steps,\n self.n_skip_steps\n )\n )\n else:\n skipped_steps = self.obj_handle.get_sim_step() - self.prev_sim_step\n self.prev_sim_step = self.obj_handle.get_sim_step()\n\n self._skipped_sim_steps = skipped_steps\n\n return self._skipped_sim_steps", "title": "" }, { "docid": "48e6df9d636f87999f0bd9e9443112dc", "score": "0.5514506", "text": "def _get_skip_info(self, helix_size, base_list):\n skip_info = [0]*helix_size\n for base in base_list:\n skip_info[base.p] = base.num_deletions\n return skip_info", "title": "" }, { "docid": "81c0aa508c97312e471d31ed729be6dc", "score": "0.550138", "text": "def skip(self, n):\n self.incr_index(n)\n return self", "title": "" }, { "docid": "5390c985177e4691846d25d779343346", "score": "0.5497097", "text": "def n_skip_steps(self, value: int):\n self._n_skip_steps = value\n self._world_handle.set_num_step_skips(value)\n return", "title": "" }, { "docid": "623e22884d3b7bca211c253434d07507", "score": "0.5491756", "text": "def _default_skip(insym, keras_layer, _): # pylint: disable=unused-argument\n return insym", "title": "" }, { "docid": "e1f34aecfc6267b4b9b2f04715bd1bf6", "score": "0.54896474", "text": "def skip(self, n=None):\n\n try:\n for _ in zip(xrange(random.randint(0, 4000) if n is None else n), self._normal_words()): pass\n except NameError:\n # why the fuck would you do this to your own language?\n for _ in zip(range(random.randint(0, 4000) if n is None else n), self._normal_words()): pass", "title": "" }, { "docid": "693ba86404a34d63d32ee32b1e3d2268", "score": "0.54856104", "text": "def skip_chromosome(self):\n self.load_chromosome(collect_data=False)", "title": "" }, { "docid": "603f64f6be66da0012c4852fc34ad685", "score": "0.547532", "text": "def log_skipped( self, text ):\r\n text = \"[*] SKIPPED: \" + text\r\n self._log(text, YELLOW, True)", "title": "" }, { "docid": "e8d13299b9914bb5bd106565adbaae3b", "score": "0.5467124", "text": "def skip(self, player) -> None:\n if self.round:\n new_round = self.round.skip(player)\n if new_round:\n self.round.chat.update_chat(f\"Round {self.round_count} has been skipped\")\n self.round_ended()\n return True\n return False\n else:\n raise Exception(\"No round started yet!\")", "title": "" }, { "docid": "2a54f079b4611b6887f763b5869f4840", "score": "0.54654855", "text": "def test_no_samples(self):\n data = self.collagen[:0]\n for proc in PREPROCESSORS:\n d2 = proc(data)", "title": "" }, { "docid": "73764e5b7d5ce467a862396c9e4b9342", "score": "0.54648125", "text": "def _EXKK(self, opcode):\n self.skip_keys[self.get_n(opcode)](self.get_x(opcode))", "title": "" }, { "docid": "b1cfe6e4ef869641c05bb90b39a08222", "score": "0.54574573", "text": "def can_skip(self, nf):\n return False", "title": "" }, { "docid": "11c4792fcecc5bc680fde22db6ee7f0a", "score": "0.54529834", "text": "def get_skipped_plays(self):\n return self.games_to_skip", "title": "" }, { "docid": "8260734187621fe7fd0d554b02ce4f8f", "score": "0.5448306", "text": "def skip_line(self, amount=1):\n for i in xrange(amount):\n self.f.readline()", "title": "" }, { "docid": "fcdd3a0245a8b9800e20d16bf5fb652e", "score": "0.54468286", "text": "def skipPlayer(self, event):\n if self.screen.selected is not None:\n self.screen.selected.skipped = True\n self.stopRunning()", "title": "" }, { "docid": "bae95ca5c4662be826556605e53fdbcc", "score": "0.5437433", "text": "def eliminated(people, skip):\n assert people > 0\n assert skip >= 0\n circle = Queue()\n for position in range(1, people + 1):\n circle.enqueue(position)\n positions = []\n while circle:\n for _skipped in range(skip):\n circle.enqueue(circle.dequeue())\n positions.append(circle.dequeue())\n return positions", "title": "" }, { "docid": "045b1f74e1d7dacddb2dfcc7366466a3", "score": "0.5437125", "text": "def skip(func):\n def inner(self, *args, **kwargs):\n \"\"\"Inner.\"\"\"\n if SKIP:\n return\n return func(self, *args, **kwargs)\n return inner", "title": "" }, { "docid": "e02c2549536f28188cf2d62b21c8f987", "score": "0.54360056", "text": "def _finish_early(self):\n self._logger.debug(\"Confidence threshold met; skipping remaining sources\")\n with self._queues.lock:\n for source in self.sources:\n source.skip()\n self.finished = True", "title": "" }, { "docid": "e02c2549536f28188cf2d62b21c8f987", "score": "0.54360056", "text": "def _finish_early(self):\n self._logger.debug(\"Confidence threshold met; skipping remaining sources\")\n with self._queues.lock:\n for source in self.sources:\n source.skip()\n self.finished = True", "title": "" }, { "docid": "4b6687a9206845986cfab72161ad54c9", "score": "0.5432385", "text": "def skip(self):\n self.skips += 1\n if self.skips > len(self.players) -2:\n return True\n \n return False", "title": "" }, { "docid": "01fe9601de6545768257ff631ccee65c", "score": "0.5428907", "text": "def handle_skips(blocks):\n skip_tags = ['skip_this', 'skip_other', 'skip_below', 'skip_above']\n\n def check_skip_syntax(b, h=skip_tags):\n l = [k for k in b['kwargs'].keys() if k.startswith('skip_')]\n n = [k for k in l if not k in h]\n if n:\n app.die('Not understood skip statment', unknown=n, allowed=h)\n\n def skip(b):\n b['kwargs']['skip_this'] = True\n\n for b in blocks:\n check_skip_syntax(b)\n if b['kwargs'].get('skip_other'):\n for c in blocks:\n skip(c)\n b['kwargs'].pop('skip_this')\n return True\n if b['kwargs'].get('skip_below'):\n s = False\n for c in blocks:\n if c == b:\n s = True\n continue\n if s:\n skip(c)\n return True\n\n if b['kwargs'].get('skip_above'):\n for c in blocks:\n if c == b:\n return True\n skip(c)\n\n if b['kwargs'].get('skip_this'):\n return True", "title": "" }, { "docid": "c17a9534907a21d6a6a78963e537dfa0", "score": "0.5425112", "text": "def begin_sampling(self):\n pass", "title": "" }, { "docid": "756c407cbb9841cd268370ad1aa1647e", "score": "0.5415831", "text": "def addSkip(self, test, reason):\n super(MyTestResult, self).addSkip(test, reason)\n self.addResult(test, STATES.SKIP, reason)", "title": "" }, { "docid": "bcd54a1fcf8b84f842e7a3a9fa6b7218", "score": "0.5412402", "text": "def skip_chunk(self, stream, chunk_size):\n pass", "title": "" }, { "docid": "ae9ee695fc68d43756065c289763ad89", "score": "0.5409749", "text": "def skip(self, n):\n stream = self._stream\n\n def inner():\n for _ in range(n):\n next(stream)\n for x in stream:\n yield x\n self._stream = inner()\n return self", "title": "" }, { "docid": "2af106a36165790c562616841c97f2a0", "score": "0.54060584", "text": "def gen_lines_prepass(self) -> None:\n pass", "title": "" } ]
294970926228fce0b623e8b5c3e22575
Loads a network from a CSV file.
[ { "docid": "88a79f5ad83f58d5f4dffa69cdbc21e0", "score": "0.61747426", "text": "def network_from_csv(filename):\n from openpnm.network import Network\n fname = _parse_filename(filename=filename, ext='csv')\n\n a = read_table(filepath_or_buffer=fname,\n sep=',',\n skipinitialspace=True,\n index_col=False,\n true_values=['T', 't', 'True', 'true', 'TRUE'],\n false_values=['F', 'f', 'False', 'false', 'FALSE'])\n\n # First parse through all the items and re-merge columns\n dct = {}\n keys = sorted(list(a.keys()))\n for item in keys:\n m = re.search(r'\\[.\\]', item) # The dot '.' is a wildcard\n if m: # m is None if pattern not found, otherwise merge cols\n pname = re.split(r'\\[.\\]', item)[0] # Get base propname\n # Find all other keys with same base propname\n merge_keys = [k for k in a.keys() if k.startswith(pname)]\n # Rerieve and remove arrays with same base propname\n merge_cols = [a.pop(k) for k in merge_keys]\n # Merge arrays into multi-column array and store in DataFrame\n dct[pname] = np.vstack(merge_cols).T\n # Remove key from list of keys\n for k in keys:\n if k.startswith(pname):\n keys.pop(keys.index(k))\n else:\n dct[item] = np.array(a.pop(item))\n\n # Now scan through 'pore.coords' and 'throat.conns' to get Np and Nt,\n # then remove the nans\n try:\n Np = np.where(np.isnan(dct['pore.coords'][:, 0]))[0][0]\n except IndexError:\n Np = dct['pore.coords'][:, 0].shape[0]\n try:\n Nt = np.where(np.isnan(dct['throat.conns'][:, 0]))[0][0]\n except IndexError:\n Nt = dct['throat.conns'][:, 0].shape[0]\n for k, v in dct.items():\n if k.startswith('pore.'):\n dct[k] = v[:Np, ...]\n if k.startswith('throat.'):\n dct[k] = v[:Nt, ...]\n\n network = Network()\n network.update(dct)\n return network", "title": "" } ]
[ { "docid": "da41cb35073df863beef61c4f11e5bb6", "score": "0.70894367", "text": "def from_csv(self, file: str):\n with open(file) as f:\n for line in f:\n words = line.split(',')\n line_type = words[0]\n if line_type == 'c':\n self._add_node(int(words[1]), int(words[2]))\n elif line_type == 'e':\n self.add_edge(int(words[1]), int(words[2]))", "title": "" }, { "docid": "5d5420377b332b7df83d2febe0b0acb0", "score": "0.6935833", "text": "def from_csv(self, file: str):\n with open(file) as f:\n for line in f:\n words = line.split(',')\n src = int(words[0])\n dest = int(words[1])\n self.add_edge(src, dest)", "title": "" }, { "docid": "016b9e346ff6c9bf6f94b7cabaa1c281", "score": "0.68656546", "text": "def load(file_path=\"./graph.csv\"):\n assert path.isfile(file_path), \"'{}' is not a file\".format(file_path)\n log.debug(\"loading graph from: {}\".format(file_path))\n graph = None\n\n with open(file_path, 'r', newline='', encoding='utf-8') as file:\n reader = csv.reader(file, quotechar='\"')\n for i, row in enumerate(reader):\n if not graph:\n graph = ActorsGraph(row)\n else:\n for j, movie_ids in enumerate(row):\n for movie_id in movie_ids.split(\",\"):\n graph.add_edge_by_indices(i - 1, j, movie_id)\n\n assert graph, \"graph has not been created, check '{}' file\".format(file_path)\n return graph", "title": "" }, { "docid": "646ca8b8548ac2e59a675c298e6f8dd5", "score": "0.66369027", "text": "def load_filename(self, filename):\n graph = {}\n with open(filename, 'rb') as csvfile:\n reader = csv.reader(csvfile)\n nodes = map(eval, next(reader)[1:])\n for line in reader:\n base = eval(line.pop(0))\n graph[base] = dict((n1, n2)\n for n1, n2 in zip(nodes, map(float, line))\n if n2 > 0)\n self.load(graph)", "title": "" }, { "docid": "2e1bd8ca7edd2fc331237792c15e1e17", "score": "0.65834135", "text": "def from_csv(self, csvfile, header=None, start=0, size=None, delimiter=','):\n with open(csvfile) as f:\n reader = csv.reader(f, delimiter=delimiter)\n i = 0\n if header:\n next(reader, None)\n i = 1\n while i < start:\n i += 1\n next(reader, None)\n i = 0\n for row in reader:\n pred = cleanse(row[1])\n sub = cleanse(row[0])\n ob = cleanse(row[2])\n if not (sub.replace('_','').isalnum() and ob.replace('_','').isalnum()):\n continue\n if len(row) > 3:\n node_attributes = [json.loads(row[3]), json.loads(row[4])]\n edge_attributes = json.loads(row[5])\n else:\n node_attributes = []\n edge_attributes = {}\n self.store('{}({},{})'.format(pred, sub, ob), node_attributes=node_attributes, edge_attributes=edge_attributes)\n i += 1\n if size and i > size:\n break", "title": "" }, { "docid": "3647cf0b83315f0b8e469a5703d8da49", "score": "0.65201735", "text": "def load_neos(neo_csv_path):\n # We are making new list of neo collections:\n # open the csv file, skipping the first line of headers,\n # then each row from csv file read into\n # new Near Earth Object, then add this row to collection.\n # Return collection.\n\n neo_collection = []\n with open(neo_csv_path, 'r') as infile:\n reader = csv.reader(infile)\n next(reader)\n for elem in reader:\n neo = NearEarthObject(elem[3], elem[4], elem[15], elem[7])\n neo_collection.append(neo)\n return neo_collection", "title": "" }, { "docid": "0a029943560567f6f2f07cce925f9f88", "score": "0.64970046", "text": "def load_network(self, directory):\n with open(directory + '/structure', \"r\") as f:\n line = f.readline()\n input_name, input_size = line.split(' ')\n self.add_input(int(input_size))\n i = 0\n\n while 1:\n line = f.readline()\n if len(line) <= 0:\n break\n i += 1\n layer_name, layer_size, layer_activation = line.split(' ')\n layer_activation = layer_activation[:-1] # get rid of endline\n self.add_fully_connected(int(layer_size), layer_activation)\n self.layers[-1].set_weights(np.loadtxt(directory + '/weights' + str(i) + '.csv', delimiter=','))", "title": "" }, { "docid": "bac7d6321e9e8b10f896797c4fd9c98f", "score": "0.6475423", "text": "def load_csv_into_graph(file_name: str) -> Graph:\n user_map = Graph()\n row_num = 0\n width_of_map = 0\n\n with open(file_name) as csv_file:\n reader = csv.reader(csv_file)\n for row in reader:\n width_of_map = len(row)\n for i in range(len(row)):\n if row[i] == \"B\":\n user_map.add_vertex((i, row_num), \"blocked\")\n elif row[i] == \"S\":\n user_map.add_vertex((i, row_num), \"start\")\n elif row[i] == \"E\":\n user_map.add_vertex((i, row_num), \"end\")\n else:\n user_map.add_vertex((i, row_num))\n row_num += 1\n height_of_map = row_num\n # Knowing the dimensions of the graph, connect the graph\n for y in range(height_of_map):\n for x in range(width_of_map):\n if x == width_of_map - 1 and y == height_of_map - 1:\n pass\n elif x == width_of_map - 1:\n user_map.add_edge((x, y), (x, y + 1))\n elif y == height_of_map - 1:\n user_map.add_edge((x, y), (x + 1, y))\n else:\n user_map.add_edge((x, y), (x, y + 1))\n user_map.add_edge((x, y), (x + 1, y))\n\n return user_map", "title": "" }, { "docid": "ce4ed47a0c620e3fe34bc893bccb7f41", "score": "0.64610356", "text": "def load_neos(neo_csv_path):\n neos = []\n with open(neo_csv_path, 'r') as f:\n reader = csv.DictReader(f)\n for row in reader:\n neo = NearEarthObject(\n row['pdes'],\n row['name'],\n row['diameter'],\n row['pha'],\n [])\n neos.append(neo)\n return neos", "title": "" }, { "docid": "5372ee685555bced9ea62c81fe36d312", "score": "0.6425544", "text": "def __init__(self, filename='social_network.csv'):\n with open(filename, 'r') as csvfile:\n rows = np.array(list(csv.reader(csvfile)))\n\n #flatten all name tuples, cast to set to \n #get unique names and cast to list for ordering\n self.names = list(set(rows.flatten()))\n\n #network adjacency matrix\n n = len(self.names)\n self.A = np.zeros((n , n))\n\n #populate network\n for c in rows:\n #get cooresponding indicies of names in name list\n i = self.get_name_index(c[0])\n j = self.get_name_index(c[1])\n #make each bi-directional connection\n self.A[i, j] = 1\n self.A[j, i] = 1\n\n #resistance of the network A\n self.R = effective_resistance(self.A)", "title": "" }, { "docid": "c183cf4faf90d2f5eb4d654d82d819f4", "score": "0.64249206", "text": "def load_neos(neo_csv_path='data/neos.csv'):\r\n neos = [] # create list of neo objects\r\n with open(neo_csv_path) as f:\r\n reader = csv.reader(f)\r\n next(reader)\r\n # parse row of csv into pdes(design.) ,name,diameter, and hazardous\r\n for row in reader:\r\n if row[3] is not None or row[3] != '':\r\n if row[7] == 'Y':\r\n H = True\r\n else:\r\n H = False\r\n # create Neo obj with designation,name,diameter,hazardous\r\n neo_obj = NearEarthObject(row[3], row[4], row[15], H)\r\n neos.append(neo_obj) # append obj to the list\r\n return neos", "title": "" }, { "docid": "efcd830aa775344932897d3c26e6176b", "score": "0.6359306", "text": "def loadGraph(graph_name):\n with open('./dataset/{}_node.csv'.format(graph_name), 'r',\n encoding='utf-8') as fp:\n reader = csv.reader(fp)\n nodes = list(int(_[0]) for _ in reader)\n with open('./dataset/{}_edge.csv'.format(graph_name), 'r',\n encoding='utf-8') as fp:\n reader = csv.reader(fp)\n edges = list((int(_[0]), int(_[1])) for _ in reader if _[0] != _[1])\n G = nx.Graph()\n G.add_nodes_from(nodes)\n G.add_edges_from(edges)\n return G", "title": "" }, { "docid": "ac479056f306df2d3483ce461ed954f6", "score": "0.63494384", "text": "def load_diagram_from_csv_file(self, filepath):\n\n bpmn_csv_import.BpmnDiagramGraphCSVImport.load_diagram_from_csv(filepath, self)", "title": "" }, { "docid": "fe8ecfbf3b66364c2850f3d67acda4c8", "score": "0.62989837", "text": "def load_data():\n import time\n\n print(\"Loading data and create road network (about 30 sec)...\")\n start_time = time.clock()\n\n road_network = RoadNetwork()\n\n vertices = pd.read_csv(\"./data/vertices.csv\")\n edges = pd.read_csv(\"./data/edges.csv\")\n\n print(\"Loading vertices...\")\n for index, row in vertices.iterrows():\n road_network.add_vertex(int(row['v_id']), row['lat'], row['lon'])\n\n print(\"Loading edges...\")\n for index, row in edges.iterrows():\n road_network.add_edge(int(row['e_id']), int(row['start_vid']), int(row['end_vid']), row['length'])\n\n print(\"Done. Elapsed time is %f seconds\" % (time.clock() - start_time))\n\n return road_network", "title": "" }, { "docid": "fd086123b7299e8b8145e6d6fddd883b", "score": "0.6272067", "text": "def load_neos(neo_csv_path):\n with open(neo_csv_path, \"r\") as infile:\n\n neos = csv.DictReader(infile)\n\n neos = [\n NearEarthObject(\n pdes=neo[\"pdes\"],\n name=neo[\"name\"],\n diameter=neo[\"diameter\"],\n pha=neo[\"pha\"]\n )\n for neo in neos\n ]\n\n return neos", "title": "" }, { "docid": "df5270252e8af5368e07d84da320a1f4", "score": "0.6266226", "text": "def load_network(network_file):\n handler = h.MiTabHandler()\n handler.parse(network_file)\n return handler.network", "title": "" }, { "docid": "1e747687e4d7d45913fa1d8f2634ea33", "score": "0.62265074", "text": "def _load(self):\n self._data = load_network_from_file(self.path, fmt=self.ext)", "title": "" }, { "docid": "f612bc245d36f71694ab05f863b289a7", "score": "0.62200767", "text": "def read_from_csv(self, filename):\n \n self.graph = []\n\n # Read the file into a string separated by newlines for each row\n try:\n with open(filename,'r') as readfile:\n lines = readfile.read().splitlines()\n except IOError, e:\n print \"I/O error: %s\" % e\n return\n\n # Read lines with csv.reader\n csvreader = csv.reader(lines)\n\n # Load the rows into self.graph matrix\n try:\n for row in csvreader:\n self.graph.append([])\n for i in range(csvreader.line_num):\n self.graph[csvreader.line_num-1].append(int(row[i]))\n except csv.Error, e:\n print \"CSV error: %s\" % e\n return\n except IndexError, e:\n print \"Index error: %s\" % e\n return\n \n # Make matrix symmetric\n # While increasing size, we do not have to check for\n # out of range when getting data from the graph\n for i in range(len(self.graph)):\n for j in range(i+1,len(self.graph)):\n self.graph[i].append(self.graph[j][i])", "title": "" }, { "docid": "a7b106ad047eafa594e2894d56642b56", "score": "0.6205403", "text": "def load_prefecture_network(path):\n data = pd.read_csv(path, header=0, index_col=0)\n return data", "title": "" }, { "docid": "3b48a841d24bdccc9d08635c8b51aa72", "score": "0.61859536", "text": "def load_dataset(net, filename): \n \n data = N.recfromcsv(filename, delimiter=\",\", names=True, autostrip=True, case_sensitive=True)\n\n m, n = data.size, len(data.dtype.names)\n numberdata = N.zeros((m, n), order='F')\n \n for node, d in net.node.iteritems():\n states = d['states_ind']\n numberdata[:,net.graph['nilut'][node]] = [states[str(s)] for s in data[node]]\n \n return numberdata", "title": "" }, { "docid": "dec56d138351081b72fe9541a8a78881", "score": "0.6177733", "text": "def multinet_from_csv(\n file_name,\n filter_func=util.default_filter,\n weight_func=util.default_weight,\n layer_func=util.default_layer,\n ow='ORIGIN', dw='DEST',\n create_using=None,\n **csv_reader_argv):\n\n index_dict = {}\n if create_using is None:\n create_using = mn.DiMultinet()\n mg = create_using\n\n if type(weight_func) == str:\n weight_func = util.weight_from_string(weight_func)\n\n if type(layer_func) == str:\n layer_func = util.layer_from_string(layer_func)\n\n with open(file_name) as netfile:\n if not csv_reader_argv:\n netreader = csv.reader(netfile,\n delimiter=',',\n quotechar='\\\"',\n quoting=csv.QUOTE_NONNUMERIC)\n else:\n netreader = csv.reader(netfile,**csv_reader_argv)\n\n index_line = next(netreader)\n\n index = 0\n for item in index_line:\n index_dict[item] = index\n index += 1\n\n origin_index = index_dict[ow]\n dest_index = index_dict[dw]\n\n for line in netreader:\n if not filter_func(index_dict,line):\n continue\n\n origin = str(line[origin_index])\n dest = str(line[dest_index])\n\n layer = layer_func(index_dict,line)\n weight = weight_func(index_dict,line)\n\n mg.add_node(origin)\n mg.add_node(dest)\n\n mg.aggregate_edge(origin,dest,layer,weight)\n\n return mg", "title": "" }, { "docid": "cba4544819e8f41ea873371efe1fcc71", "score": "0.61474216", "text": "def load(self, csvFilePath, metadataFilePath):\n\t\tf = open(csvFilePath, 'rb')\n\t\treader = csv.reader(f)\n\t\tl = list(reader)\n\n\t\t# extract attributes and data points\n\t\tself.attributes = map(lambda x: x.strip(), l[0])\n\t\tself.data = l[1:10]\n\n\t\tself.classAttr = self.attributes[-1].strip()\n\n\t\tf = open(metadataFilePath)\n\t\tfor line in f:\n\t\t\ttype = line[line.index(':')+1:].strip()\n\n\t\t\t# save attribute type\n\t\t\tif type == 'nominal':\n\t\t\t\tself.attrTypes.append(self.NOMINAL)\n\t\t\telif type == 'numeric':\n\t\t\t\tself.attrTypes.append(self.NUMERIC)\n\t\t\telse:\n\t\t\t\tself.attrTypes.append(self.OTHERS)\n\n\t\t# convert values to correct type\n\t\tfor i, sample in enumerate(self.data):\n\t\t\tself.numData += 1\n\t\t\tfor j, type in enumerate(self.attrTypes):\n\t\t\t\tval = self.data[i][j]\n\t\t\t\tif type == self.NUMERIC and val != '?':\n\t\t\t\t\tself.data[i][j] = float(val)", "title": "" }, { "docid": "92f98edb63b7406938b60dffac8da187", "score": "0.612518", "text": "def load_network(filename):\n temp = pickle.load(filename)\n n = Network(None, None, None, None, None, None, None)\n n.__setstate__(temp)\n return n", "title": "" }, { "docid": "a7f005dd7f8132267f72b11a4cbf66e2", "score": "0.6119918", "text": "def loadnetwork(filename):\n with open(filename, 'rb') as networkin:\n self = pickle.load(networkin)\n \n return self", "title": "" }, { "docid": "b2576bf23e2ffe4e7d4f5afeb42a1fdc", "score": "0.61056966", "text": "def from_CSV(cls, path):\n with open(path) as file:\n tiles = list(csv.reader(file))\n\n return cls.from_list(tiles)", "title": "" }, { "docid": "c763d849205884dd187de2654fa93c00", "score": "0.60922307", "text": "def load_gangs(self, filename):\n\n with open(filename, 'r') as csvfile:\n reader = csv.DictReader(csvfile)\n for row in reader:\n gang = Gang(int(row[\"gang number\"]),\n (float(row[\"x location\"]), float(row[\"y\"])),\n int(row[\"gang members\"]))\n self.gang_info[int(row[\"gang number\"])] = gang", "title": "" }, { "docid": "0dd346d3371c6ccbb172b88c6cd1b4b7", "score": "0.6080333", "text": "def load_network(path, filename):\n network_path = '{}/networks/{}.graphml'.format(path, filename)\n h = open(network_path, 'r')\n try:\n multithreading.lock_file_handle(h)\n return igraph.Graph.Read(network_path)\n finally:\n multithreading.unlock_file_handle(h)\n h.close()", "title": "" }, { "docid": "ed93b8dde00643a09fc9bf65891af5c5", "score": "0.60763484", "text": "def from_csv(self, filepath, delim = ','):\n\n tdo = textio.read_csv(filepath, True, delim)\n\n self.from_tdo(tdo)\n\n return", "title": "" }, { "docid": "2fb46335b50f5ba95cd2d73a56df4acb", "score": "0.60590553", "text": "def load_networks(filename, datadir=None):\n fpath = utils.uploaded_filepath(filename, FT_NETWORKS, datadir=datadir)\n\n return fortios_xutils.load_network_graph(fpath)", "title": "" }, { "docid": "13c7634a7ab150373f2ea161731959d6", "score": "0.605758", "text": "def load_neos(neo_csv_path):\n\n df = pd.read_csv(neo_csv_path, low_memory=False)\n df = df[['pdes', 'name', 'pha', 'diameter']]\n df.pdes = df.pdes.astype('str')\n neos = [NearEarthObject(**value) for value in df.T.to_dict().values()]\n return neos", "title": "" }, { "docid": "5d030bde2bbc9d187d7b7ed526954ae9", "score": "0.6051468", "text": "def loadcsv(self, fn):\n csvfile = fn\n with open(csvfile) as f:\n reader = csv.reader(f, delimiter=\",\")\n #print reader.next(), \" ommitted\"\n for row in reader:\n row0 = row[0]\n row8 = row[8]\n row3 = row[3]\n row4 = row[4]\n row5 = row[5]\n row6 = row[6]\n row7 = row[7]\n self.name[row8] = row0\n\n self.path[row8] = row3\n if \"Size\" not in row4:\n row4 = int(row4)\n self.size[row8] = row4\n #self.pctsize[row[8]] = float(row[4) / float(self.tsize)\n # size in KB\n self.modified[row8] = row5\n self.accessed[row8] = row6\n self.created[row8] = row7\n\n return", "title": "" }, { "docid": "571f9546034a448ed91689f40134e88c", "score": "0.59965175", "text": "def load(self, filename):\n self.ids = []\n self.nodes = {}\n self.neigh = {}\n f = open(filename)\n for line in f:\n if not line.strip(): break\n i, l = map(str.strip, line.split(':'))\n if i in self.nodes:\n raise ValueError\n self.nodes[i] = Node(i, l)\n self.neigh[i] = []\n self.ids.append(i)\n\n for line in f:\n fr, l, to = map(str.strip, line.split(':'))\n if not l: l = None\n if fr not in self.nodes or to not in self.nodes:\n raise ValueError\n self.nodes[fr].add_neighbour(self.nodes[to], l)\n if l == None: l = ''\n self.neigh[fr].append((to, l))", "title": "" }, { "docid": "be2c1a8defb1ce0e566b3528ac8244d0", "score": "0.59776723", "text": "def load_from_file_csv(cls):\n csvfile = '{}.csv'.format(cls.__name__)\n try:\n with open(csvfile, 'r', newline='') as csvf:\n if cls.__name__ == 'Square':\n fields = [\"id\", \"size\", \"x\", \"y\"]\n else:\n fields = [\"id\", \"width\", \"height\", \"x\", \"y\"]\n doc = csv.DictReader(csvf, fieldnames=fields)\n ls_of_dics = [{k: int(v) for k, v in dic.items()}\n for dic in doc]\n return [cls.create(**dics) for dics in ls_of_dics]\n except IOError:\n return []", "title": "" }, { "docid": "8b1ea210fdc0d6869d1498b833db686a", "score": "0.597655", "text": "def load_network(path):\n with open(path) as f:\n content = f.read()\n\n edge_str = content.split('\\n')\n network = list()\n for edge in edge_str:\n network.append(edge.split())\n\n return network", "title": "" }, { "docid": "adb3630915556b4c9cd421e6919b9b09", "score": "0.595501", "text": "def from_file(cls):\n with open('data.csv') as file:\n data_reader = csv.reader(file)\n for row in data_reader:\n first, last, mail = row\n cls(first, last, mail, True)", "title": "" }, { "docid": "4d467ad4992d1021521dee67f9665b5b", "score": "0.59509164", "text": "def load_csv(csv):\n t = so(\"csv\",\"random\",\"fgdb\")\n TableToTable_conversion(\n in_rows=csv, \n out_path=os.path.dirname(t), \n out_name=os.path.basename(t)\n )\n return t", "title": "" }, { "docid": "50050f90f10435fb78d38077b88f50c6", "score": "0.5927157", "text": "def load_network(self, fileName):\n\t\tif '.json' not in modelName:\n\t\t\tself.model = model_from_json(open(modelName+'_arch.json', 'r').read())\n\t\telse:\n\t\t\tself.model = model_from_json(open(modelName, 'r').read())", "title": "" }, { "docid": "7f5f10673e74e62904d8dc62089bfcd1", "score": "0.5914837", "text": "def load_neos(neo_csv_path):\n\n result = []\n with open(neo_csv_path, 'r') as file:\n reader = DictReader(file)\n for x in reader:\n result.append(NearEarthObject(designation=x['pdes'],\n name=x['name'],\n hazardous=x['pha'],\n diameter=x['diameter']))\n\n return result", "title": "" }, { "docid": "6edae08debda8804f99096dd97d582e2", "score": "0.5914653", "text": "def load_from_file_csv(cls):\n filename = \"{}.csv\".format(cls.__name__)\n\n if os.path.exists(filename):\n with open(filename, encoding=\"utf-8\") as f:\n content = f.read()\n objs = cls.from_json_string(content)\n newlist = []\n for obj in objs:\n new = cls.create(**obj)\n newlist.append(new)\n return newlist\n else:\n return []", "title": "" }, { "docid": "977642f6f45ec5ff12797222e8dc7241", "score": "0.58989906", "text": "def import_network_from_file(file=None, base_url=DEFAULT_BASE_URL):\n if file is None:\n file = os.path.abspath('data/galFiltered.sif')\n else:\n file = os.path.abspath(file)\n res = commands.commands_post('network load file file=' + file, base_url=base_url)\n # TODO: Fix R documentation to match what's really returned\n\n # should not be necessary, but is because \"network load file\" doesn't actually set the current network\n # until after it's done. So, without the sleep(), setting the current network will be superceded by\n # \"network load file\"'s own network. This is race condition that can be solved by \"network load file\"\n # not returning until it's actually done.\n # TODO: Fix this race condition\n time.sleep(CATCHUP_NETWORK_SECS)\n\n return res", "title": "" }, { "docid": "46f33d0cf7544970cce1d77957613291", "score": "0.5894413", "text": "def load_from_file_csv(cls):\n filename = cls.__name__ + \".csv\"\n # Unless the file does not exist, opens the file and reads each row.\n # The length of the row specifies the type of object. If this is not\n # a valid value, raises a TypeError. Otherwise, values are paired\n # with their correct attribute name in a dictionary, then used to\n # create those instances. A list of instances is returned upon success\n try:\n with open(filename, \"r\") as fcsv:\n c_reader = csv.reader(fcsv, delimiter=\",\")\n retlist = []\n for row in c_reader:\n if len(row) == 5:\n ats = [\"id\", \"width\", \"height\", \"x\", \"y\"]\n elif len(row) == 4:\n ats = [\"id\", \"size\", \"x\", \"y\"]\n else:\n raise TypeError\n values_dic = {}\n for i in range(len(row)):\n if type(row[i]) is not int:\n row[i] = int(row[i])\n values_dic[ats[i]] = row[i]\n obj = cls.create(**values_dic)\n retlist.append(obj)\n return retlist\n except IOError:\n raise IOError", "title": "" }, { "docid": "3938bce4fa5e66c2764752243c9ca8bb", "score": "0.58855146", "text": "def load_dataset(self,\n csv_file,\n root_dir=None,\n augment=False,\n shuffle=True,\n batch_size=1,\n train=False):\n\n ds = dataset.TreeDataset(csv_file=csv_file,\n root_dir=root_dir,\n transforms=self.transforms(augment=augment),\n label_dict=self.label_dict,\n preload_images=self.config[\"train\"][\"preload_images\"])\n\n data_loader = torch.utils.data.DataLoader(\n ds,\n batch_size=batch_size,\n shuffle=shuffle,\n collate_fn=utilities.collate_fn,\n num_workers=self.config[\"workers\"],\n )\n\n return data_loader", "title": "" }, { "docid": "23b08b15a197bb24f43fe9eb1f031099", "score": "0.58812714", "text": "def load(cls, device):\n fpath = Path(COLLISION_DIR, 'collisions.csv')\n inputs, labels = [], []\n with open(fpath, 'r') as csvfile:\n reader = csv.reader(csvfile, delimiter=',')\n for line in reader:\n input = [float(s) for s in line[:-1]]\n inputs.append(input)\n\n label = int(line[-1])\n assert label in [0, 1]\n labels.append(label)\n\n inputs = torch.tensor(inputs).to(device)\n labels = torch.tensor(labels).long().to(device)\n return cls(inputs, labels)", "title": "" }, { "docid": "785315180eb18f154bca3be42a832bf3", "score": "0.58764255", "text": "def load_net(self, path):\n\n with open(path, 'rb') as nn_file:\n net = pickle.load(nn_file)\n\n if type(net) == dict:\n loaded = list()\n if 'model' in net.keys():\n self.model = net['model']\n loaded.append('model')\n if 'meta_params' in net.keys():\n self.meta_params = net['meta_params']\n loaded.append('meta_params')\n if 'data_sets' in net.keys():\n self.data_sets = net['data_sets']\n loaded.append('data_sets')\n if 'costs' in net.keys():\n self.costs = net['costs']\n loaded.append('costs')\n if 'cost_counter' in net.keys():\n self.cost_counter = net['cost_counter']\n loaded.append('cost_counter')\n print('Loaded from {0}: {1}'.format(path, ', '.join(loaded)))", "title": "" }, { "docid": "5920150abf03965e2c05acb19006904b", "score": "0.5874864", "text": "def from_csv(csv_file, units=DEFAULT_UNIT):\n\n if isinstance(csv_file, str):\n if not csv_file.endswith('.csv'):\n raise ValueError(\n 'Filetype must be .csv to create a DistanceMatrix.')\n\n with open(csv_file, 'r') as open_file:\n return DistanceMatrix.from_csv(open_file)\n\n lines = [line.strip().split(',') for line in csv_file.readlines()]\n\n # Strip trailing empty strings if they exist\n try:\n [line.remove('') for line in lines]\n except ValueError:\n pass\n\n highest_line_length = 0\n matrix = []\n for line in lines:\n if len(line) == highest_line_length + 1:\n if all(is_numeric(item) for item in line):\n matrix.append([float(item) for item in line])\n highest_line_length += 1\n else:\n raise ValueError(\n 'Data lines in input file should be in order of ascending length.')\n\n return DistanceMatrix(matrix, units)", "title": "" }, { "docid": "7139a8d85797a92497b30657ec0db74d", "score": "0.58667594", "text": "def load_graph(file):\n g = nx.DiGraph()\n mode = \"N\"\n for l in file:\n l = l.strip()\n if mode == \"N\":\n if l == \"// Nodes\":\n mode = \"LN\"\n elif mode == \"LN\":\n if l == \"// Edges\":\n mode = \"LE\"\n else: # LOAD NODES\n nparts = l.split(\" \", 2)\n g.add_node(int(nparts[0]),\n {'unixtime': int(nparts[1]), 'line': nparts[2]})\n pass\n elif mode == \"LE\" and len(l) > 0: # LOAD EDGES\n eparts = [int(x) for x in l.split(\" \", 1)]\n g.add_edge(eparts[0], eparts[1])\n return g", "title": "" }, { "docid": "ecd30f289351eeca88b0cfe2a13c6164", "score": "0.58567697", "text": "def loadNetworkFromFile(filename, mode = 'pickle'):\n if mode == 'pickle':\n import pickle\n fp = open(filename)\n network = pickle.load(fp)\n fp.close()\n return network\n elif mode in ['plain', 'conx']:\n fp = open(filename, \"r\")\n line = fp.readline()\n network = None\n while line:\n if line.startswith(\"layer,\"):\n # layer, name, size\n temp, name, sizeStr = line.split(\",\")\n name = name.strip()\n size = int(sizeStr)\n network.addLayer(name, size)\n line = fp.readline()\n weights = [float(f) for f in line.split()]\n for i in range(network[name].size):\n network[name].weight[i] = weights[i]\n elif line.startswith(\"connection,\"):\n # connection, fromLayer, toLayer\n temp, nameFrom, nameTo = line.split(\",\")\n nameFrom, nameTo = nameFrom.strip(), nameTo.strip()\n network.connect(nameFrom, nameTo)\n for i in range(network[nameFrom].size):\n line = fp.readline()\n weights = [float(f) for f in line.split()]\n for j in range(network[nameTo].size):\n network[nameFrom, nameTo].weight[i][j] = weights[j]\n elif line.startswith(\"parameter,\"):\n temp, exp = line.split(\",\")\n exec(exp) # network is the neural network object\n elif line.startswith(\"network,\"):\n temp, netType = line.split(\",\")\n netType = netType.strip().lower()\n if netType == \"cascornetwork\":\n from pyrobot.brain.cascor import CascorNetwork\n network = CascorNetwork()\n elif netType == \"network\":\n network = Network()\n elif netType == \"srn\":\n network = SRN()\n else:\n raise AttributeError, \"unknown network type: '%s'\" % netType\n line = fp.readline()\n return network", "title": "" }, { "docid": "0e064e8bc5fd29283f229fd615b2651c", "score": "0.58296126", "text": "def load_from_file(file_path: str):\n routes = []\n\n with open(file_path, \"r\") as routes_file:\n csv_reader = csv.reader(routes_file, delimiter=\",\")\n next(csv_reader, None) # Dump header\n\n for row in csv_reader:\n routes.append(MMTRoute.parse_from_row(row))\n\n return MMTRouteDataController(routes)", "title": "" }, { "docid": "6d49a58993ee4b5c0b0a55e3db38a612", "score": "0.58203024", "text": "def load_from_file_csv(cls):\n file_name = cls.__name__ + '.csv'\n list_objs = []\n with open(file_name, 'r') as fp:\n csv_r = csv.DictReader(fp)\n for i in csv_r:\n for k, v in i:\n i[k] = int(v)\n list_objs.append(cls.create(**i))", "title": "" }, { "docid": "c3fe6e022dfe252490f00c97af2196a2", "score": "0.57661384", "text": "def load_categories(self, path: str):\n with open(path, 'r') as f:\n csv_reader = csv.reader(f, delimiter=';')\n # skip header \n next(csv_reader)\n \n for row in csv_reader:\n if row[2] == 'NULL':\n p = None\n else:\n p = self.categories[int(row[2])]\n \n self.categories[int(row[0])] = Category(int(row[0]), row[1], p)", "title": "" }, { "docid": "11b4f4cc3188c0afa49e3406dc74e365", "score": "0.5759813", "text": "def loadData(labelName = \"G3\"):\n print(\"\\nLoading data…\")\n trainPath, testPath = TRAIN_PATH, TEST_PATH\n print(\"\\nParse local csv\")\n train = pd.read_csv(filepath_or_buffer = trainPath,\n names = CSV_COLUMN_NAMES,\n header = 0)\n print(\"\\nGet train information\")\n trainFeatures, trainLabel = train, train.pop(labelName)\n\n # Parse local csv\n test = pd.read_csv(filepath_or_buffer = testPath,\n names = CSV_COLUMN_NAMES,\n header = 0)\n # Get test information\n testFeatures, testLabel = test, test.pop(labelName)\n\n return (trainFeatures, trainLabel),(testFeatures, testLabel)", "title": "" }, { "docid": "8e834572a37f9f14237a1d951ada26a9", "score": "0.5752725", "text": "def load_train_data(csv_path=_TRAIN_DATA_PATH):\n train_df = pd.read_csv(open(csv_path, 'r', encoding=\"iso-8859-1\"), quotechar='\"')\n\n return train_df", "title": "" }, { "docid": "1315fb2c3dea930f843b178f8f07d96e", "score": "0.57414705", "text": "def loadCSV(self, path):\n dtframe = pd.read_csv(path)\n self.loadDataframe(dtframe)\n \n #####", "title": "" }, { "docid": "5f2b242c33300281a9e6db95f09d55d6", "score": "0.5735814", "text": "def load_graph_from_file(filename):\n with open(filename) as UK_cities:\n dict_uk_cities = json.load(UK_cities)\n return nx.Graph(dict_uk_cities)", "title": "" }, { "docid": "2f086418c793a6cedcce66864bd38c0f", "score": "0.57288903", "text": "def importCSV(self):\n filepath = QFileDialog.getOpenFileName(self, 'Load CSV file',\"\",\"Comma-separated values (*.csv)\")\n if filepath !=\"\":\n urllib.urlretrieve('file:/// %s' % filepath,'Schedule.csv')\n self.tableWidget.clear()\n self.loadSchedule()", "title": "" }, { "docid": "2ed0c3b6f8c016dd5881c8f5ce09f2f3", "score": "0.5704464", "text": "def load_from_file_csv(cls):\n csv_file = cls.__name__ + \".csv\"\n lst = list()\n try:\n with open(csv_file, \"r\") as fh:\n reader = csv.DictReader(fh)\n res = list(reader)\n for d in res:\n for item in d.keys():\n d[item] = int(d[item])\n temp = cls.create(**d)\n lst.append(temp)\n return lst\n except:\n return list()", "title": "" }, { "docid": "2321149ca9513bf6e89e5c5e2d9f5867", "score": "0.570376", "text": "def load_from_file(self, filename):\n\n with open(filename, newline='') as csvfile:\n reader = csv.reader(csvfile)\n self.column_names = next(reader) # Take note of the header names and save them seperate from the data\n for row in reader: # Now save the data row by row\n self.data.append(row)\n\n self.convert_to_numeric() # Convert values to floats (that can be converted)\n\n return self", "title": "" }, { "docid": "863c4a47d27a8f567de055ec086191ee", "score": "0.5689223", "text": "def do_load(self, line):\n with open(self.rosterFilePath, 'r') as csvFile:\n rosterReader = csv.DictReader(csvFile)\n for row in rosterReader:\n completedProblems = row['completedProblems'] if 'completedProblems' in row else 0\n currentBalloons = row['currentBalloons'] if 'currentBalloons' in row else 0\n contestant = Contestant(row['Room #'], row['Team #'],\n row['Check-In'], row['HackerRank'].lower(), \n row['Partner 1 Name'], row['Partner 2 Name'], \n completedProblems, currentBalloons)\n try: \n Contestant.add_to_cache(contestant)\n except Exception as e:\n continue", "title": "" }, { "docid": "6670fea3c870c995ac2a9826a1bb35d5", "score": "0.56890976", "text": "def import_csv(filename, csv_map=None, encoding=\"latin1\"):\n if not os.path.exists(filename):\n raise FileNotFoundError\n\n with open(filename, mode=\"r\", encoding=encoding, errors=\"ignore\") as csv_file:\n Part._import_csv_content(csv_file, csv_map)", "title": "" }, { "docid": "7e7e83b5d5a4445c0518663ba841302a", "score": "0.5683459", "text": "def __init__(self, csv_file):\n self.server_list = SLFile.convert_csv_to_dict_list(csv_file)", "title": "" }, { "docid": "5266981ae08f217e9facbdbd41d1217b", "score": "0.56708896", "text": "def load_csv(filename, label_col, num_test):\n\n X_train = []\n X_test = []\n num = 0\n with open(filename,'rt') as f:\n reader = csv.reader(f, delimiter=',')\n for row in reader:\n row1 = [float(item) for item in row if item != '\\0']\n last_ele = row1.pop(label_col)\n X_train.append(row1)\n X_test.append(int(last_ele))\n num+=1\n if num > num_test:\n break\n \n f.close()\n return X_train, X_test", "title": "" }, { "docid": "83af557eee48511af436f09dd9bb4771", "score": "0.5633198", "text": "def run(self) -> None:\n with open(self.import_csv_path) as csv_file:\n csv_reader = csv.DictReader(csv_file)\n for row in csv_reader:\n aso_node = graph.SpaceObject.find_one(norad_id=row['aso_id'])\n if aso_node is None:\n continue\n is_compliant = row['is_compliant'] == 'True'\n compliance_nodes = aso_node.compliance.all()\n if compliance_nodes:\n compliance_node = compliance_nodes[0]\n compliance_node.is_compliant = is_compliant\n compliance_node.save()\n else:\n compliance_node = graph.Compliance(\n is_compliant=is_compliant\n )\n compliance_node.save()\n aso_node.compliance.connect(compliance_node)\n compliance_node.from_data_source.connect(\n self.data_source_node\n )", "title": "" }, { "docid": "9dee2a459d3b62f52f82e800bfef054c", "score": "0.5628896", "text": "def load_prod_table(csv_fname):\n left_col, right_col = 0, 3\n return load_csv(csv_fname, left_col, right_col)", "title": "" }, { "docid": "21e4c283554e332e93fdceb92a314174", "score": "0.5624825", "text": "def load_from_file_csv(cls):\n empty_list = []\n complete_name = cls.__name__+'.csv'\n try:\n with open(complete_name, 'r') as f:\n instance = cls.from_json_string(f.read())\n instance_list = [cls.create(**line) for line in instance]\n return instance_list\n except:\n return (empty_list)", "title": "" }, { "docid": "4cee894ea39247d1992edca7294f7f63", "score": "0.5606623", "text": "def load_destination_data(input_data, header_lines): # Move to own controller file??\n with open(input_data) as places:\n data = csv.reader(places, delimiter=',')\n num_col = len(next(data)) # count number of columns in the csv file\n places.seek(0) # Return csv reader back to start of file\n for i in range(0, header_lines): # skip specified number of header lines\n next(data, None)\n\n for place in data: # Add all vertices to the graph.\n address = place[1]\n vertex = model.graph.Vertex(address)\n graph_instance.add_vertex(vertex, address)\n\n places.seek(0) # Return csv reader back to start of file.\n for i in range(0, header_lines): # skip specified number of header lines\n next(data, None)\n\n index_counter = 0\n for place in data: # Add edges to the graph.\n # name = place[0]\n # key = place[1]\n i = 2\n while place[i] != '':\n if place[i] != '':\n vertex_a = graph_instance.get_vertex(graph_instance.vertex_key_list[index_counter])\n vertex_b = graph_instance.get_vertex(graph_instance.vertex_key_list[i - 2])\n distance = float(place[i])\n graph_instance.add_undirected_edge(vertex_a, vertex_b, distance)\n i += 1\n if i > num_col - 1:\n break\n\n index_counter += 1", "title": "" }, { "docid": "6c3dd2dc89274e5a9e68b3aa3bcc636c", "score": "0.5574019", "text": "def from_csv(file_path: str) -> \"CorrelationsData\":\n return CorrelationsData(data=pd.read_csv(file_path))", "title": "" }, { "docid": "39d2c32b2ce6bd037e9faf39ad6f051c", "score": "0.557101", "text": "def __init__(self, filename='social_network.csv'):\n with open(filename) as file:\n data = file.read().split(\"\\n\")\n \n pair = []\n names = set()\n for line in data[:-1]:\n line = line.split(\",\")\n names.add(line[0])\n names.add(line[1])\n pair.append(line)\n names = sorted(list(names))\n \n n = len(names)\n M = np.zeros((n,n))\n for k in pair:\n i, j = names.index(k[0]),names.index(k[1])\n M[i,j] = 1\n M[j,i] = 1\n \n self.names = names\n self.M = M\n self.R = effective_resistance(M)", "title": "" }, { "docid": "59582d82e29df5023dd641dcaeb763f3", "score": "0.5569314", "text": "def import_cities(self, path):\n labeled_cities = pd.read_csv(path)\n for i in range(100):\n x = labeled_cities.iloc[i,1]\n y = labeled_cities.iloc[i,2]\n self.tab_villes.append([x, y])", "title": "" }, { "docid": "5bdeec59eda957d65b3ce4892c5bafd5", "score": "0.55657744", "text": "def loadNeuralNetwork(filename):\n extn=filename.rsplit('.',1)[-1]\n if extn=='t7':\n return cv2.dnn.readNetFromTorch(filename)\n if extn=='xml':\n return cv2.dnn.readNetFromONNX(filename)\n raise IOError('Unknown file format for \"'+filename+'\"')", "title": "" }, { "docid": "bcf035ab6fa7768376fb6508a13f29d2", "score": "0.5562248", "text": "def load_label(csv_file):\n with open(csv_file, 'r') as label_file:\n labels = label_file.read()\n\n labels = labels.split(\",\")\n labels = [int(l) for l in labels]\n # Labels is a list of int, representing gesture for each frame\n return labels", "title": "" }, { "docid": "c2024c38a2bbbd2bdaf989c985dbfb66", "score": "0.55456424", "text": "def loadships(shipcsv):\n shipdict = shipreader(shipcsv)\n availableShips = craftassign(shipdict)\n return availableShips", "title": "" }, { "docid": "23712c0c4eccc53633e99ce25acdf01b", "score": "0.55411035", "text": "def load_csv(self, filename, sep=\",\"):\n\n return self.session.read.csv(\n filename,\n inferSchema=True,\n sep=sep,\n header=True\n )", "title": "" }, { "docid": "fa889fb82f390b4988022fef57c072ad", "score": "0.5538708", "text": "def load_model_file(data_file):\n # Open the file with the data, read it, and split it into lines\n with open(data_file, 'r') as f:\n data = f.read()\n \n lines = data.splitlines()\n \n # Define the interfaces info \n int_info_begin_index = 2\n int_info_end_index = lines.index(\"NODES_TABLE,,,,\") - 1\n interface_lines = lines[int_info_begin_index:int_info_end_index]\n interface_set = set([]) \n node_list = []\n for interface_line in interface_lines:\n node_name, remote_node_name, name, cost, capacity = \\\n interface_line.split(',')\n interface_set.add(Interface(name, int(cost), int(capacity), Node(node_name),\n Node(remote_node_name)))\n node_list.append(Node(node_name))\n node_list.append(Node(remote_node_name))\n model = Model(interface_set, set(node_list))\n \n # Define the nodes info\n nodes_info_begin_index = int_info_end_index + 3\n nodes_info_end_index = lines.index(\"DEMANDS_TABLE,,,,\") -1\n node_lines = lines[nodes_info_begin_index:nodes_info_end_index]\n node_names = set([node.name for node in node_list])\n for node_line in node_lines:\n node_info = node_line.split(',')\n node_name = node_info[0]\n try:\n node_lat = int(node_info[1])\n except ValueError:\n node_lat = 0\n try:\n node_lon = int(node_info[2])\n except ValueError:\n node_lon = 0 \n if node_name not in node_names: # Pick up orphan nodes\n new_node = Node(node_name)\n model.add_node(new_node)\n new_node.lat = node_lat\n new_node.lon = node_lon\n else:\n model.get_node_object(node_name).lat = node_lat\n model.get_node_object(node_name).lon = node_lon\n \n # Define the demands info\n demands_info_begin_index = nodes_info_end_index + 3\n demands_lines = lines[demands_info_begin_index:]\n for demand_line in demands_lines:\n demand_info = demand_line.split(',')\n source = demand_info[0]\n dest = demand_info[1]\n traffic = int(demand_info[2])\n name = demand_info[3] \n if name == '':\n demand_name = 'none'\n else: \n demand_name = name \n model.add_demand(source, dest, traffic, demand_name)\n \n return model", "title": "" }, { "docid": "d1413decf89a10a4c5c57ba490db4023", "score": "0.553635", "text": "def openCsv():\n csvFile = 'BDO_app/modules/nodes/cityNodes.csv'\n return csvFile", "title": "" }, { "docid": "b216637a354afa4a13ebf51db370cad7", "score": "0.55337936", "text": "def load_dataset(csv_path):\n\n # read in and shuffle data\n df = pd.read_csv(csv_path).sample(frac=1)\n # drop indicator columns for destinations\n cols = [c for c in df.columns if c[:11] !=\n 'destination' and c[:14] != 'wind_direction']\n df = df[cols]\n # print(len(cols))\n # print(df.columns.values.tolist())\n\n inputs = df.drop('time_taxi_out', axis=1).values\n labels = df['time_taxi_out'].values\n return inputs, labels", "title": "" }, { "docid": "a9f7cf9a98d1a0de583fde1596254f16", "score": "0.5517911", "text": "def import_csv(self, csvfile, columns=None, delimiter=' ', quotechar='|'):\n if isinstance(csvfile, str):\n csvfile = open(csvfile, 'rb')\n import csv\n R = csv.reader(csvfile, delimiter=delimiter, quotechar=quotechar)\n if columns is None:\n columns = R.next()\n d = []\n for x in R:\n z = {}\n for i in range(len(x)):\n y = x[i]\n if y != '':\n if y.isdigit():\n y = eval(y)\n else:\n v = y.split('.')\n if len(v) == 2 and v[0].isdigit() and v[1].isdigit():\n y = eval(y)\n z[columns[i]] = y\n d.append(z)\n self.insert(d)", "title": "" }, { "docid": "08cf2abbd97ca76ef69d719d91aa8ba4", "score": "0.5512902", "text": "def load_csv(filename):\n with open(filename, 'rb') as csvfile:\n dialect = csv.Sniffer().sniff(csvfile.read(1024))\n csvfile.seek(0)\n csv_data = csv.reader(csvfile, dialect)\n data = []\n for row in csv_data:\n data.append(row)\n return data", "title": "" }, { "docid": "091fb5851a65435b2b84c66fca0ef03e", "score": "0.5506476", "text": "def load_network_from_file(filename, fmt=None):\n # if no format has been given, get it from the file name\n if fmt is None:\n _, fmt = os.path.splitext(filename)\n fmt = fmt[1:]\n # decide which function to call to load the data\n if fmt == 'pickle':\n return _load_network_from_pickle(filename)\n elif fmt == 'json':\n return _load_network_from_json(filename)\n else:\n raise ValueError('Only pickle or json formats are supported.')", "title": "" }, { "docid": "baae61805c40247c078a14b9e8911484", "score": "0.5500594", "text": "def load_from_file_csv(cls):\n obj_list = []\n file_name = cls.__name__ + \".csv\"\n if cls.__name__ == \"Rectangle\":\n field_names = ['id', 'width', 'height', 'x', 'y']\n elif cls.__name__ == \"Square\":\n field_names = ['id', 'size', 'x', 'y']\n try:\n with open(file_name, mode=\"r\", encoding=\"utf-8\") as f:\n reader = csv.DictReader(f, fieldnames=field_names)\n for row in reader:\n dic = {}\n for key, value in dict(row).items():\n dic[key] = int(value)\n obj_list.append(cls.create(**dic))\n except:\n return []\n return obj_list", "title": "" }, { "docid": "2cb05094f5ed7ab5dce913655e0d3284", "score": "0.5500581", "text": "def load_data(self, fname):\n with open(fname) as f:\n data = csv.reader(f, delimiter=',')\n data = [row for row in data]\n tx = [map(float, x[1:len(x)]) for x in data[6:]]\n ty = [x[0] for x in data[6:]]\n for i, label in enumerate(ty):\n if label == 'BRICKFACE':\n ty[i] = 0\n elif label == 'SKY':\n ty[i] = 1\n elif label == 'FOLIAGE':\n ty[i] = 2\n elif label == 'CEMENT':\n ty[i] = 3\n elif label == 'WINDOW':\n ty[i] = 4\n elif label == 'PATH':\n ty[i] = 5\n elif label == 'GRASS':\n ty[i] = 6\n else:\n print 'error'\n ds = ClassificationDataSet(19, 1, nb_classes=7)\n for x, y in zip(tx, ty):\n ds.addSample(x, y)\n ds._convertToOneOfMany()\n return ds", "title": "" }, { "docid": "65c9e9aa8b116c154d60fa2f1185c5cb", "score": "0.5496548", "text": "def load_map(self, filename):\n self._graph = Graph()\n self._location = {}\n self._streetnames = {}\n\n with open(filename, 'r') as f:\n for line in f:\n elements = line.split(\",\")\n if(elements[0] == \"V\"):\n self._graph.add_vertex(int(elements[1]))\n self._location[int(elements[1])] = (self.process_coord(elements[2]),\n self.process_coord(elements[3]))\n elif (elements[0] == \"E\"):\n self._graph.add_edge((int(elements[1]), int(elements[2])))\n self._streetnames[(int(elements[1]), int(elements[2]))] = elements[3]", "title": "" }, { "docid": "55fc32b5964a413c53f0ad9b59103fa7", "score": "0.54956716", "text": "def __csv_to_nc__(file_name):\n load_file = pd.read_csv(file_name, header=0, index_col=0)\n load_file.index = pd.to_datetime(load_file.index)\n \n # convert to xarray.Dataset and drop unused local time column\n ds = load_file.to_xarray().drop(cest_col)\n var_names = [var for var in ds.data_vars]\n \n # convert again to datetime, won't be recognized otherwise\n ds[utc_col] = pd.to_datetime(ds[utc_col])\n \n # set unit variable for each variable; all MW, exept for some shares\n for var in var_names:\n unit = 'MW' if any(label in var for label in hasMWlbl) else 'share'\n ds[var].attrs.update(OrderedDict([('units', unit)]))\n \n # write to file\n ds.to_netcdf(load_path)", "title": "" }, { "docid": "9532a98922cf16fe2a04b9a441503d75", "score": "0.54931366", "text": "def load_network(name):\r\n net = Network([0])\r\n net.sizes = np.load(\"pretrained_networks/{0}/sizes.npy\".format(name))\r\n net.weights = np.load(\"pretrained_networks/{0}/weights.npy\".format(name))\r\n net.biases = np.load(\"pretrained_networks/{0}/biases.npy\".format(name))\r\n net.num_layers = len(net.sizes)\r\n return net", "title": "" }, { "docid": "51260f127265c158937fe23200daf337", "score": "0.54910463", "text": "def load_from_file_csv(cls):\n if not os.path.isfile(cls.__name__ + \".csv\"):\n return []\n with open(cls.__name__ + \".csv\", encoding=\"utf-8\") as f:\n csv_r = list(csv.DictReader(f))\n a = []\n for i in csv_r:\n i = {x: int(y) for x, y in i.items()}\n a[len(a):] = [cls.create(**i)]\n return a", "title": "" }, { "docid": "2d8de737e6445a95ccc14101df892172", "score": "0.5490962", "text": "def load_mac_addresses(self, path=None, file_name=None):\n self._path = path\n if self._path is None:\n self._path = os.getcwd() \n\n self._file_name = file_name\n if self._file_name is None:\n # TODO: get list of files here\n pass\n\n with open(file_name, \"r\") as csv_file:\n first_line = True\n for line in csv_file:\n if first_line:\n first_line = False\n continue\n line_split = line.split(\",\")\n self._base[line_split[0]] = [x for x in line_split[1:]]", "title": "" }, { "docid": "7fa30ee8e99aabaffb424d8ed94b2af8", "score": "0.54839677", "text": "def load_Network(Filename):\n print(\"Loading Network map from file \", Filename)\n g = SmartDigraph()\n hash_table = {}\n counter = 1\n \n lines = open(Filename, 'r').readlines()\n \n for i in lines:\n for j in range(2):\n line = i.split()\n if line[j] not in hash_table:\n hash_table[line[j]] = counter\n counter += 1\n \n for i in hash_table:\n g.addNode(SmartNode(hash_table[i]))\n \n for i in lines:\n line = i.split()\n start_node = g.getNode(str(hash_table[line[0]]))\n end_node = g.getNode(str(hash_table[line[1]]))\n lead_time = int(line[2])\n\n edge = WeightedEdge(start_node, end_node,lead_time) # creates the edge\n g.addEdge(edge) # adds the edge to the smartdigrahp\n\n return g, hash_table", "title": "" }, { "docid": "fe96f1f9ec2a76632bc7aa390d75c146", "score": "0.5463261", "text": "def read_travels_csv_file(filename):\n\n fileIN=open(filename, 'r')\n\n # Read the first line\n line=fileIN.readline()\n\n # Try to guess the seperator\n separator=None\n for sep in [';', ',', '\\t']:\n if line.strip().split(sep) == ns_header:\n separator=sep\n break\n else:\n error(\"I cannot recognise the specified file '{}', sorry.\".format(filename))\n error(\"Try exporting the file to csv with a semicolon (;) as field separator\")\n exit(-1)\n \n # Store all trips\n travels=list()\n\n # Read all the lines in the file\n # NOTE: entries in the file are not guaranteed to be ordered on time/date\n line=fileIN.readline()\n while line:\n # Remove trailing newline and split the line on the separator we found\n spline=line.split(separator)\n\n # If the line does not contain as many fields as the header, something is wrong\n if len(spline) != len(header):\n error(\"Error, invalid line detected:\\n{}\".format(line))\n\n # Unpack the split line, and make a Trip object out of it\n travels.append(Trip(*spline))\n line=fileIN.readline()\n\n # Close the file\n fileIN.close()\n return travels", "title": "" }, { "docid": "d10a81011185d1988e810e451bdb083a", "score": "0.54531014", "text": "def ReadCsv(url = 'http://developer.mbta.com/lib/rthr/red.csv'):\n fp = urllib2.urlopen(url)\n reader = csv.reader(fp)\n\n tss = []\n for t in reader:\n if t[5] != 'Kendall/MIT': continue \n if t[3] != 'Braintree': continue\n\n ts = TrainSpotting(t)\n tss.append(ts)\n\n fp.close()\n return tss", "title": "" }, { "docid": "f3b08ec7a8f359d6a664bc1f93639e58", "score": "0.54518944", "text": "def load_geolocations():\n\n with open(\"data/geolocation.csv\", \"rb\") as f:\n \n reader = csv.reader(f, delimiter=\",\")\n\n for row in reader: \n\n street_addr = row[0]\n city = row[1]\n state = row[2]\n latitude = row[3]\n longitude = row[4]\n elevation = row[5]\n timezone=row[6]\n\n geolocation = Geolocation(street_addr=street_addr, city=city, state=state, latitude=latitude, \n longitude=longitude, elevation=elevation, timezone=timezone)\n\n db.session.add(geolocation)\n db.session.commit()\n\n print \"Geolocation committed\"", "title": "" }, { "docid": "618114906e9262687fdcc448797a0f5d", "score": "0.544634", "text": "def load_data(self, train_csv=None, test_csv=None):\n\n if train_csv is not None and test_csv is not None:\n self.train_data_vector = pandas.read_csv(train_csv)\n self.test_data_vector = pandas.read_csv(test_csv)", "title": "" }, { "docid": "bbe6c47983b0f7f789aff6770cd7c3f6", "score": "0.5436664", "text": "def load_csv_from_path(self, csv_path):\n if not os.path.isabs(csv_path):\n csv_path = os.path.join(self.data_path, csv_path)\n\n # Deal with multiple possible extensions\n path_base, path_ext = os.path.splitext(csv_path)\n if path_ext == '.mp4':\n csv_path = path_base + '.csv'\n elif path_ext == '':\n csv_path = csv_path + '.csv'\n # Load the bounding box and format the timestamp\n box_data = np.loadtxt(csv_path, delimiter=',',\n dtype=float, skiprows=1,\n converters={0: lambda x: self.parse_time(x)})\n\n return box_data", "title": "" }, { "docid": "713d828d81ebb66a31ccd76f47794e01", "score": "0.54298455", "text": "def read_csv(request):\n graph_file = request.param\n dataset_path = graph_file.get_path()\n M = utils.read_csv_for_nx(dataset_path)\n M_cu = utils.read_csv_file(dataset_path)\n\n return M_cu, M, graph_file", "title": "" }, { "docid": "6574459bcb87e4653e9df4eba672a4f3", "score": "0.54286075", "text": "def loadFromFile(self, path):\n with open (path, \"rt\") as f:\n for line in f.readlines():\n card, cardNum = [int(x) for x in line.split(',')]\n self.add((card, cardNum))", "title": "" }, { "docid": "5b4b36c3f160721581ea4ecedce5eb90", "score": "0.54274607", "text": "def load_csv(input):\n\n data_countries = pd.read_csv(input)\n return data_countries", "title": "" }, { "docid": "50727d13cfa04bb896088b1610e1b853", "score": "0.541671", "text": "def load(self, hyperlinks):\n \tif Path(hyperlinks).is_file():\n \t\tself.g = nx.DiGraph()\n\t \twith open(hyperlinks, \"rb\") as ifile:\n\t \t\tfor line in ifile.readlines():\n\t \t\t\tdoc, links = line.split(\":\")\n\t \t\t\tdoc = int(doc)\n\t \t\t\tfor link in links.split(\";\"):\n\t \t\t\t\tself.g.add_edge(doc, int(link))\n\t \tlog.debug(\"%d documents et %d hyperliens chargés\", len(self.g), self.g.size())\n\t else:\n\t \tlog.error(\"Erreur lors de l'ouverture du fichier %s\", hyperlinks)", "title": "" }, { "docid": "bef65f244899491ffdbcae4b70dbbffa", "score": "0.5415988", "text": "def loadCsvFile(self,myFile):\n # open file\n myFile.open(mode='rb')\n # read the field names\n head = myFile.readline().decode()\n self.allFieldNames = head.split(',')\n for index in range(len(self.allFieldNames)):\n self.allFieldNames[index] = self.allFieldNames[index].strip()\n # load the table data (from the csv file) into a numpy nd array\n data = np.loadtxt(myFile,dtype='float',delimiter=',',ndmin=2,skiprows=1)\n # close file\n myFile.close()\n return data", "title": "" }, { "docid": "bef65f244899491ffdbcae4b70dbbffa", "score": "0.5415988", "text": "def loadCsvFile(self,myFile):\n # open file\n myFile.open(mode='rb')\n # read the field names\n head = myFile.readline().decode()\n self.allFieldNames = head.split(',')\n for index in range(len(self.allFieldNames)):\n self.allFieldNames[index] = self.allFieldNames[index].strip()\n # load the table data (from the csv file) into a numpy nd array\n data = np.loadtxt(myFile,dtype='float',delimiter=',',ndmin=2,skiprows=1)\n # close file\n myFile.close()\n return data", "title": "" }, { "docid": "f41675bf47059717aa58eca84e153c21", "score": "0.5413548", "text": "def load_dataset(self):\n print(\"Loading Dataset\")\n csv_retrieve_dataset(self.TRAINING_DATA_PATH,self.TRAINING_DATA_URL,self.COLUMN_NAMES)", "title": "" }, { "docid": "13ff2357baf8a0d38bca6ebddc1d9c39", "score": "0.5409063", "text": "def load_csv(file_name):\n if sys.version_info[0] < 3:\n lines = []\n infile = open(file_name, 'rb')\n else:\n lines = []\n infile = open(file_name, 'r', newline='')\n\n ant_dict = {}\n with infile as f:\n csvreader = csv.reader(f)\n for c, lines in enumerate(csvreader):\n if c == 0:\n continue\n ant_id = lines[0]\n if ant_id not in ant_dict:\n ant_dict[ant_id] = {}\n ant_dict[ant_id]['frames'] = []\n ant_dict[ant_id]['coordiantes'] = []\n if lines[-1]:\n ant_dict[ant_id]['frames'].append(\n '{:05d}'.format(int(lines[1])))\n ant_dict[ant_id]['coordiantes'].append([lines[2], lines[3]])\n\n return ant_dict", "title": "" }, { "docid": "be721ab29de43bf1abc58ec0148e2fae", "score": "0.5408601", "text": "def import_tofrom_csv(file_name, id_col_name, to_node_col, from_node_col):\n use_cols = [\n str(id_col_name), str(to_node_col), str(from_node_col)\n ]\n\n df = pd.read_csv(file_name, usecols=use_cols, dtype=str)\n network_df, indx_df = df_transform(\n df, id_col_name, to_node_col, from_node_col\n )\n return network_df, indx_df", "title": "" } ]
d85d71c32c197fa3a34b069127d452a1
Reads reference data from the original "new_reference.dat" file from ff_generator Currently torsion and out_bends do not support reading a value in Offdiagonal values of the Hessian are not supported Must be called after setup
[ { "docid": "0cb73e703624784c0f6107acfd7b7d63", "score": "0.60091716", "text": "def read_ref_from_refdat(self, filename, conv_angle=np.pi/180.0, conv_energy = 143.88, conv_length=1.0, read_fit_flag=True):\n assert self._setup\n #\n f = open(filename, \"r\")\n line = f.readline()\n if line[:3] != \"###\":\n raise IOError, \"This does not look like a traditional reference file\"\n line = f.readline()\n stop = False\n while not stop:\n sline = line.split()\n if sline[1] == \"stretch\":\n ind = map(string.atoi, sline[2:4])\n ref_val = string.atof(sline[4])*conv_length\n ref_2nd = string.atof(sline[5])*conv_energy\n weight = 0.0\n if len(sline) > 6:\n if sline[6] == \"fit\": weight = 1.0\n i, iglob = self.map_ric(\"str\", ind)\n if i >=0:\n self.ref_val_str[i] = ref_val\n self.ref_hes[iglob, iglob] = ref_2nd\n print \"read RIC stretch %20s ref values %10.5f %10.5f\" % (ind, ref_val, ref_2nd) \n if read_fit_flag:\n self.wgt_val_str[i] = weight\n self.wgt_hes[iglob, iglob] = weight\n if weight >0: print \" RIC is fitted\"\n else:\n print \"RIC stretch %s is not defiend\" % ind\n elif sline[1] == \"bend\":\n ind = map(string.atoi, sline[2:5])\n ref_val = string.atof(sline[5])*conv_angle\n ref_2nd = string.atof(sline[6])*conv_energy\n weight = 0.0\n if len(sline) > 7:\n if sline[7] == \"fit\": weight = 1.0\n i, iglob = self.map_ric(\"ibe\", ind)\n if i >=0:\n self.ref_val_ibe[i] = ref_val\n self.ref_hes[iglob, iglob] = ref_2nd\n print \"read RIC bend %20s ref values %10.5f %10.5f\" % (ind, ref_val, ref_2nd) \n if read_fit_flag:\n self.wgt_val_ibe[i] = weight\n self.wgt_hes[iglob, iglob] = weight\n if weight >0: print \" RIC is fitted\"\n else:\n print \"RIC bend %s is not defiend\" % ind\n elif sline[1] == \"wag\":\n ind = map(string.atoi, sline[2:6])\n ref_2nd = string.atof(sline[6])*conv_energy\n weight = 0.0\n if len(sline) > 7:\n if sline[7] == \"fit\": weight = 1.0\n i, iglob = self.map_ric(\"obe\", ind)\n if i >=0:\n self.ref_hes[iglob, iglob] = ref_2nd\n print \"read RIC wag %20s ref values %10.5f\" % (ind, ref_2nd) \n if read_fit_flag:\n self.wgt_hes[iglob, iglob] = weight\n if weight >0: print \" RIC is fitted\"\n else:\n print \"RIC wag %s is not defiend\" % ind\n elif sline[1] == \"torsion\":\n for i in xrange(2, len(sline)):\n if sline[i].count(\".\") == 1: break\n ind = map(string.atoi, sline[2:i])\n na = len(ind)\n ind = ind[na/2-1:na/2+1]\n weight = 0.0\n if len(sline) > i+1:\n if sline[i+1] == \"fit\": weight = 1.0\n ref_2nd = string.atof(sline[i])*conv_energy\n i, iglob = self.map_ric(\"tor\", ind)\n if i >=0:\n self.ref_hes[iglob, iglob] = ref_2nd\n print \"read RIC torsion %20s ref values %10.5f\" % (ind, ref_2nd) \n if read_fit_flag:\n self.wgt_hes[iglob, iglob] = weight\n if weight >0: print \" RIC is fitted\"\n else:\n print \"RIC torsion %s is not defiend\" % ind\n else:\n raise IOError, \"Unknown RIC type in reference file\"\n line = f.readline()\n if len(line)==0: stop=True\n # in the refernece_dat file the Hessian elements start with a line with \"###\" -> stop here\n if line[:3] == \"###\": stop=True\n f.close()\n return", "title": "" } ]
[ { "docid": "b5be3e4f061018379de1fd59914de34a", "score": "0.61490226", "text": "def Load_reference(self):\n if self.reffile is None:\n raise ValueError('No reference file specified')\n if os.path.isfile(self.reffile):\n hdu = fits.open(self.reffile)\n else:\n raise ValueError('{} Does not exist. Create the file with tess_reference.py'.format(self.reffile)) \n\n data = hdu[0].data\n cut = Cutout2D(data,(1024+44,1024),2048)\n self.reference = cut.data\n return", "title": "" }, { "docid": "85c4202012909b3b69aa844c8363b49b", "score": "0.5946181", "text": "def regenerate_reference_data():\n generate_data(2,\n 2,\n maxn=4,\n maxrdm=4,\n daga=[0],\n undaga=[1],\n dagb=[0],\n undagb=[0])", "title": "" }, { "docid": "d97ee0d4a5d6029e1576f30cdbf488d1", "score": "0.5614137", "text": "def traindata(output, resol,study, reference, foci, max_distance,w,feature):\n\n if checkConfig():\n pass\n else:\n print('Please run refhic config first.')\n print('Good bye!')\n sys.exit()\n\n dataParams={'resol':resol,'feature':feature,'w':w}\n reference = referenceMeta(reference)\n\n chromTest = {'chr15','chr16','chr17',15,16,17,'15','16','17'}\n chromVal = {'chr11','chr12',11,12,'11','12'}\n _mask = np.zeros(2 * (w * 2 + 1) ** 2 + 2 * (2 * w + 1) + 4)\n featureMask = feature.split(',')\n if '0' in featureMask:\n _mask[:] = 1\n if '1' in featureMask:\n _mask[:(2 * w + 1) ** 2] = 1\n if '2' in featureMask:\n _mask[(2 * w + 1) ** 2:2 * (2 * w + 1) ** 2] = 1\n if '3' in featureMask:\n _mask[2 * (2 * w + 1) ** 2:2 * (2 * w + 1) ** 2 + 2 * (2 * w + 1)] = 1\n if '4' in featureMask:\n _mask[2 * (2 * w + 1) ** 2 + 2 * (2 * w + 1)] = 1\n if '5' in featureMask:\n _mask[2 * (2 * w + 1) ** 2 + 2 * (2 * w + 1) + 1] = 1\n if '6' in featureMask:\n _mask[2 * (2 * w + 1) ** 2 + 2 * (2 * w + 1) + 2] = 1\n if '7' in featureMask:\n _mask[2 * (2 * w + 1) ** 2 + 2 * (2 * w + 1) + 3] = 1\n featureMask = np.ma.make_mask(_mask)\n\n\n\n studyBcools = [bcool(file_path+'::/resolutions/'+str(resol)) for file_path in study.split(',')]\n referencedBcools = [bcool(file_path+'::/resolutions/'+str(resol)) for file_path in reference['file'].to_list()]\n _foci = pd.read_csv(foci, header=None, sep='\\t')\n\n # read labels\n labels = {}\n\n for row in _foci.itertuples():\n chr1 = row[1]\n pos1 = row[2]\n chr2 = row[4]\n pos2 = row[5]\n label = list(row[7:])\n if chr1 == chr2 and abs(pos2 - pos1) < max_distance:\n if chr1 not in labels:\n labels[chr1] = {'contact':[],'label':[]}\n labels[chr1]['contact'].append((pos1,pos2))\n labels[chr1]['label'].append(label)\n\n\n dataParams['classes'] = len(label)\n dataParams['featureDim'] = np.sum(featureMask)\n print('This training data contains ', dataParams['featureDim'], ' features, and ', dataParams['classes'], ' targets per item')\n\n\n Xs = {} # for extra\n label={}\n X = {}\n for chrom in labels:\n if chrom not in X:\n label[chrom] = []\n for i in range(len(labels[chrom]['label'])):\n label[chrom].append(labels[chrom]['label'][i])\n\n for b in studyBcools:\n if chrom not in X:\n X[chrom] = [[] for _ in range(len(labels[chrom]['contact']))]\n bmatrix = b.bchr(chrom, max_distance=max_distance)\n for i in range(len(labels[chrom]['contact'])):\n x,y = labels[chrom]['contact'][i]\n\n mat,meta = bmatrix.square(x,y,w,'b')\n X[chrom][i].append(np.concatenate((mat.flatten(), meta)))\n\n for b in referencedBcools:\n if chrom not in Xs:\n Xs[chrom] = [[] for _ in range(len(labels[chrom]['contact']))]\n bmatrix = b.bchr(chrom, max_distance=max_distance)\n for i in range(len(labels[chrom]['contact'])):\n x,y = labels[chrom]['contact'][i]\n mat,meta = bmatrix.square(x,y,w,'b')\n Xs[chrom][i].append(np.concatenate((mat.flatten(), meta)))\n\n\n X_train = []\n Xs_train = []\n y_label_train = []\n X_test = []\n Xs_test = []\n y_label_test = []\n X_val = []\n Xs_val = []\n y_label_val = []\n for chrom in X:\n for i in range(len(X[chrom])):\n x=np.asarray(X[chrom][i])[:, featureMask]\n xs=np.asarray(Xs[chrom][i])[:, featureMask]\n if chrom in chromTest:\n X_test.append(x)\n Xs_test.append(xs)\n y_label_test.append(label[chrom][i])\n elif chrom in chromVal:\n X_val.append(x)\n Xs_val.append(xs)\n y_label_val.append(label[chrom][i])\n else:\n X_train.append(x)\n Xs_train.append(xs)\n y_label_train.append(label[chrom][i])\n del X,Xs\n with open(output+'.pkl', 'wb') as handle:\n pickle.dump((dataParams,X_train, Xs_train, y_label_train,\n X_test, Xs_test, y_label_test,\n X_val, Xs_val, y_label_val), handle, protocol=pickle.HIGHEST_PROTOCOL)", "title": "" }, { "docid": "e35be5e75be2c84b29336d64029e8491", "score": "0.5596661", "text": "def gen_reference(in_img,fwhm=5,newpath=None):\n import nibabel as nb \n import numpy as np\n import os \n from nibabel.processing import smooth_image\n newpath = Path(newpath or \".\")\n ss=check_img(in_img)\n if ss == 0: \n ref_data=nb.load(in_img).get_fdata()\n else: \n nii = nb.load(in_img).get_fdata()\n ref_data=np.mean(nii,axis=3)\n new_file = nb.Nifti1Image(dataobj=ref_data,header=nb.load(in_img).header,\n affine=nb.load(in_img).affine)\n\n new_file = smooth_image(new_file,fwhm=fwhm)\n out_file = fname_presuffix('aslref', suffix=\"_reference.nii.gz\", newpath=str(newpath.absolute()))\n new_file.to_filename(out_file)\n return out_file", "title": "" }, { "docid": "0b631b81068cd0edffa6ac9a4bb507dd", "score": "0.54430836", "text": "def read_foreign(self):\n ffile = pd.read_csv(INPUTPATH + 'BEA/international-by-industry.csv')\n ffile.set_index('indcode', inplace=True)\n self.foreign = copy.deepcopy(ffile)", "title": "" }, { "docid": "130fc29d15da1c20f1afe1be162365a7", "score": "0.5427231", "text": "def read_ric(self, filename):\n f = open(filename, \"r\")\n line = f.readline()\n if line[:3] != \"###\":\n raise IOError, \"This does not look like a traditional reference file\"\n line = f.readline()\n stop = False\n while not stop:\n sline = line.split()\n if sline[1] == \"stretch\":\n ind = map(string.atoi, sline[2:4])\n self.add_stretch(ind)\n elif sline[1] == \"bend\":\n ind = map(string.atoi, sline[2:5])\n self.add_in_bend(ind)\n elif sline[1] == \"wag\":\n ind = map(string.atoi, sline[2:6])\n self.add_out_bend(ind)\n elif sline[1] == \"torsion\":\n for i in xrange(2, len(sline)):\n if sline[i].count(\".\") == 1: break\n ind = map(string.atoi, sline[2:i])\n self.add_torsion(ind)\n else:\n raise IOError, \"Unknown RIC type in reference file\"\n line = f.readline()\n if len(line)==0: stop=True\n # in the refernece_dat file the Hessian elements start with a line with \"###\" -> stop here\n if line[:3] == \"###\": stop=True\n f.close()\n # HACK\n # assumption ... non peridic .. fix\n self.add_eckart_rots()\n return", "title": "" }, { "docid": "344d3c97a83e1549ea6cc41eeb6a6007", "score": "0.54000765", "text": "def read_reference(self):\n ref = general.load_csv(self.reference_filename, drop_gene_names=False)\n self.gene_symbol_dict = {name: wbgene for name, wbgene in zip(ref[self.ref_gene_col], ref[self.ref_wbgene_col])\n if not pd.isna(name)}\n self.sequence_dict = {name: wbgene for name, wbgene in zip(ref[self.ref_seq_col], ref[self.ref_wbgene_col])\n if not pd.isna(name)}\n\n split_other_id = ref[self.ref_other_col].str.split(pat=\";\")\n for namelst, wbgene in zip(split_other_id, ref[self.ref_wbgene_col]):\n if isinstance(namelst, list):\n for name in namelst:\n self.other_id_dict[name] = wbgene", "title": "" }, { "docid": "eeb386a58ffe5e49d2eb27ee88412acf", "score": "0.53429496", "text": "def setup(self):\n # now let us do all the extra stuff to map rics and allocate python arrays for the reference\n self.nstretch = len(self.pd.ric._stretches)\n self.first_str = 0\n self.nin_bend = len(self.pd.ric._in_bends)\n self.first_ibe = self.first_str+self.nstretch\n self.nout_bend = len(self.pd.ric._out_bends)\n self.first_obe = self.first_ibe+self.nin_bend\n self.nlin_bend = len(self.pd.ric._lin_bends)\n self.first_lbe = self.first_obe+self.nout_bend\n self.ntorsion = len(self.pd.ric._torsions)\n self.first_tor = self.first_lbe+self.nlin_bend\n # keep a list of central atoms for all torsions\n self.tor_cent = []\n for t in self.pd.ric._torsions:\n na = len(t)\n self.tor_cent.append(t[na/2-1:na/2+1])\n # Allocate a reference structure/hessian array\n self.ref_val_str = np.zeros([self.nstretch],dtype=\"float64\")\n self.wgt_val_str = np.zeros([self.nstretch],dtype=\"float64\")\n self.ref_val_ibe = np.zeros([self.nin_bend],dtype=\"float64\")\n self.wgt_val_ibe = np.zeros([self.nin_bend],dtype=\"float64\")\n self.ref_val_obe = np.zeros([self.nout_bend],dtype=\"float64\")\n self.wgt_val_obe = np.zeros([self.nout_bend],dtype=\"float64\")\n self.ref_val_lbe = np.zeros([self.nlin_bend],dtype=\"float64\")\n self.wgt_val_lbe = np.zeros([self.nlin_bend],dtype=\"float64\")\n self.ref_val_tor = np.zeros([self.ntorsion],dtype=\"float64\")\n self.wgt_val_tor = np.zeros([self.ntorsion],dtype=\"float64\")\n self.ref_hes = np.zeros([self.pd.ric.nric, self.pd.ric.nric], dtype=\"float64\")\n self.wgt_hes = np.zeros([self.pd.ric.nric, self.pd.ric.nric], dtype=\"float64\")\n # set some defaults\n self.fact_str = 1.0\n self.fact_ibe = 1.0\n self.fact_obe = 1.0\n self.fact_lbe = 1.0\n self.fact_tor = 1.0\n self.cycle = 0\n self.fdiagnostics = open('ric_fit_%s.punch' % self.tag, 'w')\n # We use here the original convention to compare Hessian elements in milidyn*A\n self.fact_hes = 1.0/143.88\n # generate referece data \n self.generate_reference(self.pd.get_cell())\n self.riclist = []\n self.riclist.append(self.pd.ric._stretches)\n self.riclist.append(self.pd.ric._in_bends)\n self.riclist.append(self.pd.ric._out_bends)\n self.riclist.append(self.pd.ric._lin_bends)\n self.riclist.append(self.pd.ric._torsions)\n self.firstrics = []\n self.firstrics.append(self.first_str)\n self.firstrics.append(self.first_ibe)\n self.firstrics.append(self.first_obe)\n self.firstrics.append(self.first_lbe)\n self.firstrics.append(self.first_tor)\n return", "title": "" }, { "docid": "403e1c93bb6687b44869880662d2189e", "score": "0.5340942", "text": "def reference_offset(data):\n offset = data[:, :, 0, 0] # Reference point: person 0, joint 0 -> NTURGB+D\n data -= offset[:, :, None, None]\n return data", "title": "" }, { "docid": "bd078f174079b22353fd75b6d7e6bfbe", "score": "0.52440065", "text": "def read_body_inc(f,verbose,jj,glueball_corr) :\n for ibin in range(0, numbin):\n for iopA in range(0,numop) :\n for iblockA in range(0, nblock) :\n for iopB in range(0,numop) : \n for iblockB in range(0, nblock) :\n for t in range(0, Ntmax) :\n \n try:\n bbb = f.read(8)\n except IOError as e:\n print (\"I/O error({0}): {1}\".format(e.errno, e.strerror))\n except:\n print (\"Unexpected error:\", sys.exc_info()[0])\n raise\n\n vvv = struct.unpack('d',bbb)\n if verbose :\n print t, vvv[0]\n \n glueball_corr[jj, iblockA,iblockB,t, iopA,iopB, ibin] = vvv[0]\n\n\n\n f.close()", "title": "" }, { "docid": "7da5a38e4c47d3f065ff7d0c7d1d8744", "score": "0.5213994", "text": "def read_QM_in( network_architecture, scaling_factors, filename, stepnumber, options ): \n #get path of QM.in\n pathQMNN=options['QMoutpath']\n #create QM_in object (imported from QM_transformer) that holds all information from the QM.in-file \n raw_data = QM_in( filename, stepnumber, pathQMNN )\n #get descriptors\n descriptors = raw_data.descriptors#_coulomb\n descriptors = np.array(descriptors)\n states = raw_data.states\n nmstates = raw_data.nmstates\n n_atoms = raw_data.n_atoms\n nstates = raw_data.nstates\n spin = raw_data.spin\n #charges = raw_data.charges\n istates = raw_data.istates\n n_singlets = raw_data.n_singlets\n n_triplets = raw_data.n_triplets\n input_dim = len( descriptors )\n\n if input_dim != network_architecture[0][0]:\n print( 'Descriptor dimension of input and training data not matching.' )\n exit()\n\n #scale descriptors \n descriptors = ( descriptors - scaling_factors[0] ) / scaling_factors[1]\n\n #load descriptors in theano shared variables\n descriptors = theano.shared( np.asarray(descriptors,dtype=theano.config.floatX), borrow=True ) \n xyz_coords = []\n xyz_coords.append(raw_data.geoms)\n\n return descriptors, xyz_coords, states, n_atoms, nstates, nmstates, istates, spin, n_singlets, n_triplets", "title": "" }, { "docid": "bf953df72149f789baabb51a18f2951f", "score": "0.52125144", "text": "def PullReadIrefCalibrationFile(self):\n if os.path.isfile(self.gc_step1_filename):\n cmdstring = 'del ' + self.gc_step1_filename\n os.system(cmdstring) \n cmdstring = 'adb pull /factory_setting/' + self.gc_step1_filename\n os.system(cmdstring)\n self.r_iref_values = np.zeros(self.NumLEDs)\n self.g_iref_values = np.zeros(self.NumLEDs)\n self.b_iref_values = np.zeros(self.NumLEDs)\n with open(self.gc_step1_filename, 'r') as f:\n for i in range(self.NumLEDs):\n linestring = f.readline()\n strlist = str.split(linestring, ' ')\n self.r_iref_values[i] = int(strlist[0])\n self.g_iref_values[i] = int(strlist[1])\n self.b_iref_values[i] = int(strlist[2])\n cmdstring = 'del ' + self.gc_step1_filename\n os.system(cmdstring)\n self.WriteIrefRegisters()", "title": "" }, { "docid": "a8d7072f845d6fcf170c24d9163675ea", "score": "0.5206732", "text": "def get_full_data(cur_params):\n path_prefix = './'\n if cur_params['monkey']=='D':\n data = sio.loadmat(path_prefix+'drakeFeb.mat')\n else:\n data = sio.loadmat(path_prefix+'cousFeb.mat')\n\n # Built augmented data\n emg2 = preprocess_array(data['D'][0, 0]['EMG'])\n time_axis, time_inds1, time_inds2 = get_time_axis(data['D'][0, 0]['KIN'])\n y_data1 = emg2[time_axis]\n p = y_data1.shape[-1]\n\n # Build inputs\n m = 2\n u_data1 = create_input_array(y_data1.shape)\n\n # Augmented data\n # For regularizing the network -- it must fit actual and augmented data\n period = int(np.round(np.diff(time_inds2).mean()))\n y_cat1 = augmented_data(emg2, time_inds1, period=period, tiles=10)\n y_cat1 = y_cat1[::25]\n y_cat2 = augmented_data(emg2, time_inds2, period=period, tiles=10)\n y_cat2 = y_cat2[::25]\n\n u_cat1 = create_input_array(y_cat1.shape)\n u_cat2 = create_input_array(y_cat2.shape)\n\n sequence_length = [y_data1.shape[0], y_cat1.shape[0], y_cat2.shape[0]]\n y_data = np.zeros((np.max(sequence_length), 4*3, p))\n u_data = np.zeros((np.max(sequence_length), 4*3, m))\n\n y_data[:sequence_length[0], 0:4, :] = y_data1\n y_data[:sequence_length[1], 4:8, :] = y_cat1\n y_data[:sequence_length[2], 8:12, :] = y_cat2\n\n u_data[:sequence_length[0], 0:4, :] = u_data1\n u_data[:sequence_length[1], 4:8, :] = u_cat1\n u_data[:sequence_length[2], 8:12, :] = u_cat2\n return u_data, y_data", "title": "" }, { "docid": "6f5c1b50de304bd33d22a134c7667f31", "score": "0.51948804", "text": "def load_physician_referral_data(infilename):\n tmpdir = tempfile.mkdtemp()\n try:\n archive = zipfile.ZipFile(infilename, 'r')\n archive.extract('physician-shared-patient-patterns-2014-days30.txt', tmpdir)\n filename = os.path.join(tmpdir, \"physician-shared-patient-patterns-2014-days30.txt\")\n archive.close()\n context = snap.TTableContext()\n schema = snap.Schema()\n ## schema.Add(snap.TStrTAttrPr(\"NPI_1\", snap.atInt))\n ## schema.Add(snap.TStrTAttrPr(\"NPI_2\", snap.atInt))\n # the above 2 lines worked with SNAP 4.0.0 on VLSCI \n # but now using SNAP 4.1.0\n # on hpc.ics.usi.ch find that all ids are -1 so graph wrong.\n # Cannot work out why so changed to string not int to try to fix it:\n schema.Add(snap.TStrTAttrPr(\"NPI_1\", snap.atStr))\n schema.Add(snap.TStrTAttrPr(\"NPI_2\", snap.atStr))\n ## schema.Add(snap.TStrTAttrPr(\"count\", snap.atInt))\n ## schema.Add(snap.TStrTAttrPr(\"unique_bene\", snap.atInt))\n ## schema.Add(snap.TStrTAttrPr(\"same_day_count\", snap.atInt))\n # The above 3 lines also worked fine with SNAP 4.0.0 before but\n # now fail on SNAP 4.1.0 (seems to be due to spaces in CSV fields,\n # not inexplicable like first two which have no spaces) but not using\n # them at the moment anyway so easier to just make (unused) strings:\n schema.Add(snap.TStrTAttrPr(\"count\", snap.atStr))\n schema.Add(snap.TStrTAttrPr(\"unique_bene\", snap.atStr))\n schema.Add(snap.TStrTAttrPr(\"same_day_count\", snap.atStr))\n table = snap.TTable.LoadSS(schema, filename, context, \",\", snap.TBool(False))\n G = snap.ToGraph(snap.PNGraph, table, \"NPI_1\", \"NPI_2\", snap.aaFirst) \n finally:\n cleanup_tmpdir(tmpdir)\n\n return G", "title": "" }, { "docid": "ae262497a0c062583959b4412d45d2c7", "score": "0.519233", "text": "def test_ref_degenerate():\n info = read_info(ctf_fname)\n # exclude ref by default\n projs = compute_proj_hfc(info)\n meg_names = [\n info[\"ch_names\"][pick]\n for pick in pick_types(info, meg=True, ref_meg=False, exclude=[])\n ]\n assert len(projs) == 3\n assert projs[0][\"desc\"] == \"HFC: l=1 m=-1\"\n assert projs[1][\"desc\"] == \"HFC: l=1 m=0\"\n assert projs[2][\"desc\"] == \"HFC: l=1 m=1\"\n assert projs[0][\"data\"][\"col_names\"] == meg_names\n meg_ref_names = [\n info[\"ch_names\"][pick]\n for pick in pick_types(info, meg=True, ref_meg=True, exclude=[])\n ]\n projs = compute_proj_hfc(info, picks=(\"meg\", \"ref_meg\"))\n assert projs[0][\"data\"][\"col_names\"] == meg_ref_names\n\n # Degenerate\n info = read_info(fif_fname)\n compute_proj_hfc(info) # smoke test\n with pytest.raises(ValueError, match=\"Only.*could be interpreted as MEG\"):\n compute_proj_hfc(info, picks=[0, 330]) # one MEG, one EEG\n info[\"chs\"][0][\"loc\"][:] = np.nan # first MEG proj\n with pytest.raises(ValueError, match=\"non-finite projectors\"):\n compute_proj_hfc(info)\n info_eeg = pick_info(info, pick_types(info, meg=False, eeg=True))\n with pytest.raises(ValueError, match=r\"picks \\(\\'meg\\'\\) could not be\"):\n compute_proj_hfc(info_eeg)", "title": "" }, { "docid": "9e85d0b782e7b6bd3b291741b4d15c0b", "score": "0.5188064", "text": "def read_infit(self): \n from astropy.io import ascii\n self.fdat = ascii.read(self.cdir+self.bdir+self.ifit)\n self.ldat = ascii.read(self.cdir+self.bdir+self.loop)\n self.tdat = join(self.fdat,self.ldat,keys=['mod_nam'])\n\n self.comp_tsig()\n self.read_model()\n #remove some bad coronal loops\n\n#use model id for abbicus\n self.tdat['mod_id'] = [int(i.split('_')[0].replace('model','')) for i in self.tdat['mod_nam']]", "title": "" }, { "docid": "839e5bfbf6679cdb08e2931f1be010eb", "score": "0.5169292", "text": "def test_fqe_data_manipulation():\n test = fqe_data.FqeData(1, 1, 2)\n ref = numpy.random.rand(2, 2) + 1.j * numpy.random.rand(2, 2)\n test.set_wfn(strategy='from_data', raw_data=ref)\n assert numpy.allclose(test.beta_inversion(), ref[:, (1, 0)])\n test.conj()\n assert numpy.allclose(test.coeff, numpy.conj(ref))", "title": "" }, { "docid": "d6c7802d7a5b79dfd6337fe00870cef3", "score": "0.5151748", "text": "def read_op2_dynamics(self):\n key = self._get_key()\n if self._ibytes == 4:\n header_str = struct.Struct(self._endian + b'iii')\n hbytes = 12\n else:\n header_str = struct.Struct(self._endian + b'qqq')\n hbytes = 24\n\n eot = 0\n print('self._intstr = %r' % self._intstr)\n data = np.zeros(0, dtype=self._intstr)\n while not eot:\n while key > 0:\n self._fileh.read(4) # reclen\n header = header_str.unpack(self._fileh.read(hbytes))\n if header == (7107, 71, 138):\n if key < self._rowsCutoff:\n bytes = (key-3)*self._ibytes\n ndata = struct.unpack(self._intstru % (key-3),\n self._fileh.read(bytes))\n else:\n ndata = np.fromfile(self._fileh,\n self._intstr, key-3)\n data = np.hstack((data, ndata))\n else:\n self._fileh.seek((key-3)*self._ibytes, 1)\n self._fileh.read(4) # endrec\n key = self._get_key()\n self._skip_key(2)\n eot, key = self._read_op2_end_of_table()\n\n if np.any(data):\n L = len(data)\n mult5 = L == 5*(L // 5)\n mult6 = L == 6*(L // 6)\n err1 = ('Could not determine if TLOADs are 5 or 6 rows! '\n 'Both work. Routine needs updating.')\n err2 = ('Could not determine if TLOADs are 5 or 6 rows! '\n 'Neither work. Routine needs updating.')\n if mult5:\n mindelta5 = np.min(np.diff(data[0::5]))\n if mult6:\n mindelta6 = np.min(np.diff(data[0::6]))\n if mult5:\n if mult6:\n # L is multiple of both 5 and 6:\n if mindelta5 > 0:\n if mindelta6 > 0:\n raise ValueError(err1)\n rows = 5\n else:\n if mindelta6 > 0:\n rows = 6\n else:\n raise ValueError(err2)\n else:\n if mindelta5 > 0:\n rows = 5\n else:\n raise ValueError(err2)\n elif mult6:\n if mindelta6 > 0:\n rows = 6\n else:\n raise ValueError(err2)\n else:\n raise ValueError(err2)\n data = np.reshape(data, (rows, -1), order='F')\n return data", "title": "" }, { "docid": "0df54f7668b50397167a12fb6fff7471", "score": "0.5142007", "text": "def read_body(f,verbose,jj,glueball_corr) :\n for ibin in range(0, numbin):\n for iopA in range(0,numop) :\n for iblockA in range(0, nblock) :\n for iopB in range(0,numop) : \n for iblockB in range(0, nblock) :\n for t in range(0, Ntmax) :\n \n try:\n bbb = f.read(8)\n except IOError as e:\n print (\"I/O error({0}): {1}\".format(e.errno, e.strerror))\n except:\n print (\"Unexpected error:\", sys.exc_info()[0])\n raise\n\n vvv = struct.unpack('d',bbb)\n if verbose :\n print (t, vvv[0])\n\n \n glueball_corr[jj, iblockA,iblockB,t, iopA,iopB, ibin] = vvv[0]\n\n\n\n f.close()", "title": "" }, { "docid": "02d2d25a183dbde04caaec41861f8752", "score": "0.5140673", "text": "def test_io_forward(tmp_path):\n # do extensive tests with MEEG + grad\n n_channels, n_src = 366, 108\n fwd = read_forward_solution(fname_meeg_grad)\n assert isinstance(fwd, Forward)\n fwd = read_forward_solution(fname_meeg_grad)\n fwd = convert_forward_solution(fwd, surf_ori=True)\n leadfield = fwd[\"sol\"][\"data\"]\n assert_equal(leadfield.shape, (n_channels, n_src))\n assert_equal(len(fwd[\"sol\"][\"row_names\"]), n_channels)\n fname_temp = tmp_path / \"test-fwd.fif\"\n with pytest.warns(RuntimeWarning, match=\"stored on disk\"):\n write_forward_solution(fname_temp, fwd, overwrite=True)\n\n fwd = read_forward_solution(fname_meeg_grad)\n fwd = convert_forward_solution(fwd, surf_ori=True)\n fwd_read = read_forward_solution(fname_temp)\n fwd_read = convert_forward_solution(fwd_read, surf_ori=True)\n leadfield = fwd_read[\"sol\"][\"data\"]\n assert_equal(leadfield.shape, (n_channels, n_src))\n assert_equal(len(fwd_read[\"sol\"][\"row_names\"]), n_channels)\n assert_equal(len(fwd_read[\"info\"][\"chs\"]), n_channels)\n assert \"dev_head_t\" in fwd_read[\"info\"]\n assert \"mri_head_t\" in fwd_read\n assert_array_almost_equal(fwd[\"sol\"][\"data\"], fwd_read[\"sol\"][\"data\"])\n\n fwd = read_forward_solution(fname_meeg)\n fwd = convert_forward_solution(fwd, surf_ori=True, force_fixed=True, use_cps=False)\n with pytest.warns(RuntimeWarning, match=\"stored on disk\"):\n write_forward_solution(fname_temp, fwd, overwrite=True)\n fwd_read = read_forward_solution(fname_temp)\n fwd_read = convert_forward_solution(\n fwd_read, surf_ori=True, force_fixed=True, use_cps=False\n )\n assert repr(fwd_read)\n assert isinstance(fwd_read, Forward)\n assert is_fixed_orient(fwd_read)\n assert_forward_allclose(fwd, fwd_read)\n\n fwd = convert_forward_solution(fwd, surf_ori=True, force_fixed=True, use_cps=True)\n leadfield = fwd[\"sol\"][\"data\"]\n assert_equal(leadfield.shape, (n_channels, 1494 / 3))\n assert_equal(len(fwd[\"sol\"][\"row_names\"]), n_channels)\n assert_equal(len(fwd[\"info\"][\"chs\"]), n_channels)\n assert \"dev_head_t\" in fwd[\"info\"]\n assert \"mri_head_t\" in fwd\n assert fwd[\"surf_ori\"]\n with pytest.warns(RuntimeWarning, match=\"stored on disk\"):\n write_forward_solution(fname_temp, fwd, overwrite=True)\n fwd_read = read_forward_solution(fname_temp)\n fwd_read = convert_forward_solution(\n fwd_read, surf_ori=True, force_fixed=True, use_cps=True\n )\n assert repr(fwd_read)\n assert isinstance(fwd_read, Forward)\n assert is_fixed_orient(fwd_read)\n assert_forward_allclose(fwd, fwd_read)\n\n fwd = read_forward_solution(fname_meeg_grad)\n fwd = convert_forward_solution(fwd, surf_ori=True, force_fixed=True, use_cps=True)\n leadfield = fwd[\"sol\"][\"data\"]\n assert_equal(leadfield.shape, (n_channels, n_src / 3))\n assert_equal(len(fwd[\"sol\"][\"row_names\"]), n_channels)\n assert_equal(len(fwd[\"info\"][\"chs\"]), n_channels)\n assert \"dev_head_t\" in fwd[\"info\"]\n assert \"mri_head_t\" in fwd\n assert fwd[\"surf_ori\"]\n with pytest.warns(RuntimeWarning, match=\"stored on disk\"):\n write_forward_solution(fname_temp, fwd, overwrite=True)\n fwd_read = read_forward_solution(fname_temp)\n fwd_read = convert_forward_solution(\n fwd_read, surf_ori=True, force_fixed=True, use_cps=True\n )\n assert repr(fwd_read)\n assert isinstance(fwd_read, Forward)\n assert is_fixed_orient(fwd_read)\n assert_forward_allclose(fwd, fwd_read)\n\n # test warnings on bad filenames\n fwd = read_forward_solution(fname_meeg_grad)\n fwd_badname = tmp_path / \"test-bad-name.fif.gz\"\n with pytest.warns(RuntimeWarning, match=\"end with\"):\n write_forward_solution(fwd_badname, fwd)\n with pytest.warns(RuntimeWarning, match=\"end with\"):\n read_forward_solution(fwd_badname)\n\n fwd = read_forward_solution(fname_meeg)\n write_forward_solution(fname_temp, fwd, overwrite=True)\n fwd_read = read_forward_solution(fname_temp)\n assert_forward_allclose(fwd, fwd_read)", "title": "" }, { "docid": "136b7409a35513fc514c65435d29a833", "score": "0.50885725", "text": "def get_ref(in_file):\n with open(\"./input_data/\" + in_file) as in_handle:\n for title, seq in SimpleFastaParser(in_handle):\n ref_seq = seq\n break\n\n return ref_seq", "title": "" }, { "docid": "909e8ffcbc7f64e1cde32b68b3f5f296", "score": "0.507998", "text": "def compute_Fnat_fast(self,ref_pairs=None,cutoff=5):\n # read the izone file\n if ref_pairs is None:\n residue_pairs_ref = self.compute_residue_pairs_ref(cutoff,save_file=False)\n\n elif not os.path.isfile(ref_pairs):\n self.compute_residue_pairs_ref(cutoff,save_file=True,filename=ref_pairs)\n f = open(ref_pairs,'rb')\n residue_pairs_ref = pickle.load(f)\n f.close()\n else:\n f = open(ref_pairs,'rb')\n residue_pairs_ref = pickle.load(f)\n f.close()\n\n # create a dict of the ecoy data\n if isinstance(self.decoy,str) and os.path.isfile(self.decoy):\n with open(self.decoy,'r') as f:\n data_decoy = f.readlines()\n decoy_name = os.path.basename(self.decoy)\n elif isinstance(self.decoy,np.ndarray):\n data_decoy = [l.decode('utf-8') for l in self.decoy]\n decoy_name = 'decoy'\n else:\n raise ValueError('Decoy not found')\n\n # read the decoy data\n atom_decoy, xyz_decoy = [],[]\n residue_xyz = {}\n residue_name = {}\n\n # go through all the lines\n # that starts with ATOM\n for line in data_decoy:\n\n if line.startswith('ATOM'):\n\n # chain ID\n chainID = line[21]\n if chainID == ' ':\n chainID = line[72]\n\n # atom info\n resSeq = int(line[22:26])\n resName = line[17:20].strip()\n name = line[12:16].strip()\n\n # position\n x,y,z = float(line[30:38]), float(line[38:46]), float(line[46:54])\n\n # dict entry\n key = (chainID,resSeq,resName)\n\n # create the dict entry if necessary\n if key not in residue_xyz.keys():\n residue_xyz[key] = []\n residue_name[key] = []\n\n # we exclude the Hydrogens from the search\n if name[0] != 'H':\n residue_xyz[key].append([x,y,z])\n residue_name[key].append(name)\n\n # loop over the residue pairs of the\n # and increment common if an atom pair is close enough\n nCommon,nTotal = 0,0\n for resA,resB_list in residue_pairs_ref.items():\n if resA in residue_xyz:\n xyzA = residue_xyz[resA]\n for resB in resB_list:\n if resB in residue_xyz.keys():\n xyzB = residue_xyz[resB]\n dist_min = np.min(np.array([ np.sqrt(np.sum((np.array(p1)-np.array(p2))**2)) for p1 in xyzA for p2 in xyzB ]))\n if dist_min<=cutoff:\n nCommon += 1\n nTotal += 1\n else:\n msg = '\\t FNAT Warning could not find residue: ', resA, ' in: ',decoy_name\n _printif(msg,self.verbose)\n\n # normalize\n return nCommon/nTotal", "title": "" }, { "docid": "35d63de85a9d0930b49815b0247f47dc", "score": "0.50795496", "text": "def read_data(self):\n import os\n import numpy as np\n\n if isinstance(self.filename, str):\n f = open(self.filename)\n nitpi2f = None\n itpi2f = None\n i2f = []\n ni2f = []\n\n for n,line in enumerate(f):\n if 'k-points in 1st BZ' in line:\n #initiate temporary array\n self.nitpi2f = np.array(ni2f)\n i2f = []\n\n if 'KPOINTS_INTER' in line:\n # save the first array\n self.nitpi2f = np.array(ni2f)\n\n if 't-inv' in line:\n # read the mapping indeces\n i2f.append(int(line.split()[4])-1)\n ni2f.append((int(line.split()[4])-1,float(line.split()[0]),float(line.split()[1]),float(line.split()[2]),float(line.split()[3])))\n\n self.itpi2f = np.array(i2f)\n #self.nitpi2f = np.array(ni2f)", "title": "" }, { "docid": "3969afca6b74e3d99bf20fa077569dee", "score": "0.5076639", "text": "def loadub(self, file):\r\n ub=None\r\n pos=None\r\n cons_list=None\r\n latt=None\r\n azi_ref=None\r\n azi_hkl=None\r\n \r\n if isinstance(file,int):\r\n file=self.listub(ret=True)[file]\r\n \r\n if str(file).endswith('.txt'):\r\n with open (file,'r') as f:\r\n print(\"reading .txt file\\n\",file)\r\n \r\n lines=f.read().split('\\n')\r\n #print(lines)\r\n \r\n #go through the file looking for markers\r\n for i,line in enumerate(lines):\r\n #line=line.strip().split()\r\n #print(line,line[0])\r\n #read the ub matrix rows\r\n if(line=='ub'):\r\n ub=np.zeros((3,3))\r\n #print(\"ub marker\")\r\n for j in range(3):\r\n ubrow=lines[i+j+1].strip().split()\r\n ubrow=[float(x) for x in ubrow]\r\n ub[j,:]=np.array(ubrow)\r\n \r\n #read position\r\n elif(line=='sixc'):\r\n #print(\"pos marker\")\r\n pos=lines[i+1].strip().split()\r\n pos=[float(x) for x in pos] #uses position order\r\n \r\n #read contraints\r\n elif(line=='con'):\r\n cons={}\r\n cons_list=[]\r\n #print(\"con marker\")\r\n for j in range(3):\r\n if (i+j+1)<len(lines):\r\n conline=lines[i+j+1].strip().split()\r\n if len(conline)==1:\r\n cons[conline[0]]=True\r\n elif len(conline)==2:\r\n if (conline[1]=='None' or conline[1]==None):\r\n cons[conline[0]]=True\r\n else:\r\n cons[conline[0]]=float(conline[1])\r\n\r\n for key in cons:\r\n key_varified = self.varifykey(key)\r\n cons_list.append(key_varified)\r\n cons_list.append(cons[key])\r\n \r\n elif(line=='en'):\r\n en=float(lines[i+1].strip())\r\n \r\n elif(line=='lattice'):\r\n latt_name=lines[i+1].strip()\r\n latt=[float(x) for x in lines[i+2].strip().split()]\r\n \r\n elif(line=='azi_ref'):\r\n azi_ref=lines[i+1].strip().split()\r\n azi_ref=[float(x) for x in azi_ref] #uses position order\r\n \r\n elif(line=='azi_hkl'):\r\n azi_hkl=lines[i+1].strip().split()\r\n azi_hkl=[float(x) for x in azi_hkl]\r\n \r\n \r\n #print(cons_list,cons)\r\n \r\n print(\"Setting:\\n\")\r\n if not(ub is None):\r\n print(\"ub\")\r\n self.ubcalc.set_ub(ub)\r\n self.hklcalc.ubcalc.UB=ub\r\n print(self.ubcalc.UB)\r\n print()\r\n if not(pos is None):\r\n print(\"sixc\")\r\n self.pos(self.scannables['sixc'],self.pos_to_sixc(pos))\r\n if not(cons_list is None):\r\n print(\"con\")\r\n self.con(*cons_list)\r\n if not(en is None):\r\n print('en')\r\n self.pos(self.scannables['en'],en)\r\n if not(latt is None):\r\n self.setlat(latt_name,*latt)\r\n print(\"lattice\")\r\n print(latt_name,latt)\r\n print()\r\n \r\n try:\r\n if self.ubcalc.crystal is not None:\r\n self.ubcalc.U = self.ubcalc.UB @ np.linalg.inv(self.ubcalc.crystal.B)\r\n except Exception as e:\r\n traceback.print_exc()\r\n print(e)\r\n print('Failed to set U matrix')\r\n \r\n elif not(self.ubcalc.UB is None):\r\n pass\r\n #get real vectors from UB\r\n \r\n if not (azi_ref is None):\r\n self.setnphi(azi_ref)\r\n print('nphi')\r\n print(azi_ref)\r\n print()\r\n if not (azi_hkl is None):\r\n self.setnhkl(azi_hkl)\r\n print('nhkl')\r\n print(azi_hkl)\r\n print()\r\n \r\n self.hklcalc= HklCalculation(self.ubcalc, self.cons)\r\n self.update_pos()\r\n self.set_reciprocal_vectors()\r\n vectors.set_vector('reference azimuthal',self.setnphi())\r\n \r\n #if it is a nexus file, create a suitable .txt and read that\r\n elif str(file).endswith('.nxs'):\r\n from i16sim.util.hdf5_to_i16sim import interpret\r\n interpret(file)\r\n self.loadub(str(file)+'.i16sim.txt')\r\n \r\n else:\r\n raise Exception('File not recognised')", "title": "" }, { "docid": "f7c06e2b12fc1af265c18813538817b8", "score": "0.5075727", "text": "def gen_reference(out_data_list, bus_num=2, file=axis_check_file):\r\n with open(file, \"w+\") as f:\r\n for l in out_data_list:\r\n l_tmp = l.copy()\r\n while len(l_tmp) > bus_num:\r\n f.write(\"0_\")\r\n for b in range(bus_num):\r\n trans_part = l_tmp.pop(0)\r\n f.write(\"{0:08x}{1:08x}\".format(int_to_short(trans_part.imag, 32), int_to_short(trans_part.real, 32)))\r\n f.write(\"\\n\")\r\n else:\r\n f.write(\"1_\")\r\n for b in range(bus_num):\r\n trans_part = l_tmp.pop(0)\r\n f.write(\"{0:08x}{1:08x}\".format(int_to_short(trans_part.imag, 32), int_to_short(trans_part.real, 32)))\r\n f.write(\"\\n\")", "title": "" }, { "docid": "3d1d579f393da01bae1c4ad8282cdd3e", "score": "0.5053391", "text": "def _otherreference(self):\n\n print('Loading otherreference.tsv')\n ipath = os.path.join(self._ipath, 'otherreference.tsv.bz2')\n opath = os.path.join(self._ipath, 'otherreference.pkl.bz2')\n if os.path.exists(opath):\n return pd.read_pickle(opath)\n otherreference = pd.read_csv(ipath, sep='\\t', quoting=3, dtype=str,\n lineterminator='\\n').drop(\n columns=['uuid', 'sequence'])\n otherreference.dropna(axis='index', how='any', inplace=True)\n otherreference = otherreference.groupby('patent_id').aggregate(\n lambda x: len(list(x)))\n otherreference.rename(columns={'text': 'otherreference'}, inplace=True)\n otherreference.index.rename('pid', inplace=True)\n otherreference.to_pickle(opath)\n return otherreference", "title": "" }, { "docid": "28ed0f1a9ad5fa47fc21f3e0c4e95ded", "score": "0.5052405", "text": "def read_datafile(self):\n if self.datafile is None:\n\n default_files = ['ai1dat.dat','ai1mod.dat','ai1fit.dat',\n 'inmodel.dat','inregulm.dat']\n dlst = [i for i in os.listdir(self.working_directory) if \\\n (i[-4:] == '.dat') and (i not in default_files)]\n if len(dlst) == 1:\n self.datafile = dlst[0]\n else:\n print(\"please define datafile\")\n return\n\n # define path to file\n datafpath = os.path.join(self.working_directory,self.datafile)\n self.mode = open(datafpath).readline().strip().split()[0]\n data = np.loadtxt(datafpath,skiprows = 2)\n self.freq = 1./data[:,0]\n \n if self.mode == 'I': \n zr = np.vstack([data[:,i] for i in range(len(data[0])) if (i-1)%4 == 0])\n ze = np.vstack([data[:,i] for i in range(len(data[0])) if (i-2)%4 == 0])\n zi = -np.vstack([data[:,i] for i in range(len(data[0])) if (i-3)%4 == 0])\n z = zr + 1j*zi\n self.z = z.T.reshape(len(z[0]),2,2)\n self.zerr = ze.T.reshape(len(z[0]),2,2)\n\n \n # make a frequency array that has the same shape as z\n freq2 = np.zeros(np.shape(self.z))\n for i in range(len(freq2)):\n freq2[i,:,:] = 1./data[:,0][i] \n \n# calculate resistivity\n self.resistivity = 0.2*np.abs(self.z)**2/freq2\n \n q = np.zeros(np.shape(self.resistivity))\n# q[(zr<0)&(zi<0)] = np.pi\n# q[(zr<0)&(zi>0)] = -np.pi\n phase = np.zeros([len(self.z),2,2])\n res = np.zeros([len(self.z),2,2])\n self.resistivity_err = np.zeros([len(self.z),2,2])\n self.phase_err = np.zeros([len(self.z),2,2]) \n \n \n self.q = q\n for iz in range(len(self.z)):\n for i in range(2):\n for j in range(2):\n phase[iz,i,j] = np.rad2deg(cmath.phase(self.z[iz,i,j]))\n res[iz,i,j]= 0.2*np.abs(self.z[iz,i,j])**2/self.freq[iz]\n r_err, phi_err = MTcc.zerror2r_phi_error(\n np.real(self.z[iz,i,j]), \n self.zerr[iz,i,j], \n np.imag(self.z[iz,i,j]), \n self.zerr[iz,i,j])\n\n \n\n self.resistivity_err[iz,i,j] = \\\n 0.4*np.abs(self.z[iz,i,j])/\\\n self.freq[iz]*r_err\n self.phase_err[iz,i,j] = phi_err\n\n phase[phase<-180] += 360\n self.phase = phase\n self.resistivity = res\n \n elif self.mode == 'R':\n res = np.vstack([data[:,i] for i in range(len(data[0])) if (i-1)%4 == 0])\n self.resistivity = res.T.reshape(len(res[0]),2,2)\n res_err = np.vstack([data[:,i] for i in range(len(data[0])) if (i-2)%4 == 0])\n self.resistivity_err = res_err.T.reshape(len(res_err[0]),2,2)\n \n phs = np.vstack([data[:,i] for i in range(len(data[0])) if (i-3)%4 == 0])\n self.phase = phs.T.reshape(len(phs[0]),2,2)\n phs_err = np.vstack([data[:,i] for i in range(len(data[0])) if (i-4)%4 == 0])\n self.phase_err = phs_err.T.reshape(len(phs_err[0]),2,2)", "title": "" }, { "docid": "35877cecfc58cbbc07d9dc94f46bc419", "score": "0.5046248", "text": "def run_reference() :\n db = get_db(0)\n db.put_int('adjoint', 0)\n db.put_int(\"store_angular_flux\", 1)\n \n mat = get_material()\n\n #--------------------------------------------------------------------------#\n # MODELS\n #--------------------------------------------------------------------------#\n phi = []\n numcells = []\n keff = []\n pin_power = []\n angularmoment = []\n nModels = len(cases)-1\n names = cases[nDLPcases:nModels]\n print names\n number = 0\n if nGroups == 238 :\n thermal_cutoff_group = 203\n elif nGroups == 44 :\n thermal_cutoff_group = 27\n for n in names :\n if n == '1-pin' :\n db.put_str(\"outer_solver\", \"GS\")\n db.put_str(\"bc_west\", \"periodic\")\n db.put_str(\"bc_east\", \"periodic\")\n else :\n db.put_str(\"outer_solver\", \"GMRES\")\n db.put_str(\"bc_west\", \"reflect\")\n db.put_str(\"bc_east\", \"reflect\") \n print n\n x = []\n mesh = get_mesh(number) \n number += 1 \n solver = Eigen1D(db, mat, mesh)\n solver.solve()\n if n == '10-pin' :\n keff = solver.state().eigenvalue()\n rates = ReactionRates(mat, mesh, solver.state())\n pin_power = rates.region_power(\"PINS\") \n ng = solver.material().number_groups()\n nc = solver.mesh().number_cells()\n no = solver.quadrature().number_octants()/2\n na = solver.quadrature().number_angles_octant()\n phig = np.zeros((ng,nc))\n x.append(mesh.dx(0) *0.5)\n for i in range(1,nc) :\n x.append(x[i-1]+mesh.dx(i-1))\n numcells.append(x)\n for g in range(0, ng) :\n for i in range(0, nc) :\n phig[g][i] = solver.state().phi(g)[i]\n np.savetxt(dir_name+'/refdata/'+n+'_mg_flux', phig)\n phi.append(phig)\n\n nOrder = 4\n angmom = np.zeros((nOrder,ng,nc))\n P = OrthogonalBasisParameters()\n P.order = nOrder\n P.size = na\n P.x.resize(P.size, 0.0)\n P.qw.resize(P.size, 0.0)\n P.lower_bound = 0.0\n P.upper_bound = 1.0\n P.orthonormal = False\n x = np.linspace(P.lower_bound, P.upper_bound, P.size)\n for g in range(ng) :\n for i in range(nc) :\n for o in range(no) :\n psi = np.zeros((na))\n for a in range(na) :\n psi[a] = solver.state().psi(g,o,a)[i]\n for a in range(na) :\n P.x[a] = solver.quadrature().mu(o,a)\n P.qw[a] = solver.quadrature().weight(a)\n ang = OrthogonalBasis.Create(\"jacobi\", P)\n angular = np.zeros(nOrder+1)\n y = vec_dbl(psi.tolist())\n z = vec_dbl(angular.tolist())\n ang.transform(y, z)\n for order in range(nOrder) : \n angmom[order][g][i] += z[order]\n for order in range(nOrder) :\n np.savetxt(dir_name+'/refdata/'+n+'_mg_moment'+str(order), angmom[order])\n angularmoment.append(angmom)\n \n #--------------------------------------------------------------------------#\n # PLOTS AND DATA\n #--------------------------------------------------------------------------#\n\n io = IO_HDF5('cross_section_libraries/'+str(test_problem)+'_'+str(nGroups)+'_library.h5')\n db = io.read_input()\n EG = db.get_vec_dbl('neutron_energy_bounds')\n energyrange = np.zeros((len(EG)-1))\n deltaE = np.zeros((len(EG)-1))\n for i in range(len(energyrange)):\n energyrange[i] = (EG[i]+EG[i+1])/2\n deltaE[i] = abs(EG[i+1] - EG[i])\n \n Eave = []\n Save = []\n for order in range(nOrder) :\n E = []\n S = []\n for c in range(len(names)) :\n E.append(np.mean(angularmoment[c][order], axis=0))\n S.append(np.mean(angularmoment[c][order], axis=1))\n Eave.append(E)\n Save.append(S)\n\n A = []\n # Data for Phi plots \n for a in range(len(phi)): \n y, A0 = common.barchart(EG, np.mean(phi[a], axis=1))\n A.append(A0)\n C = []\n for order in range(nOrder) :\n CC = []\n for core in range(len(names)) :\n y, B00 = common.barchart(EG, Save[order][core])\n CC.append(B00)\n C.append(CC)\n\n groups = range(0, nGroups)\n fontP = FontProperties()\n fontP.set_size('medium')\n plt.figure(0, figsize=(8, 6.5))\n plt.semilogy(groups, np.mean(phi[0], axis=1), 'k-', \\\n groups, np.mean(phi[1], axis=1), 'b-', \\\n groups, np.mean(phi[2], axis=1), 'g-', \\\n groups, np.mean(phi[3], axis=1), 'r-' )\n plt.xlabel('$g$')\n plt.ylabel('$\\phi_g$')\n leg = ['10-pin', 'UO$_2$', 'MOX', '1-pin']\n plt.legend(leg, loc = 0, prop=fontP)\n plt.grid(True)\n plt.tight_layout()\n plt.savefig(dir_name+'/reference_figures/'+str(nGroups)+'group_spectra.pdf')\n\n # Spectra Plot\n leg = ['10-pin', 'UO$_2$', 'MOX']\n plt.figure(1, figsize=(8, 6.5))\n plt.loglog(y, A[0], 'k-', \\\n y, A[1], 'r-', \\\n y, A[2], 'g-' )\n plt.xlabel('energy in $eV$')\n plt.ylabel('$\\phi_g$')\n plt.legend(leg, loc = 0, prop=fontP)\n plt.grid(True) \n plt.tight_layout()\n plt.savefig(dir_name+'/reference_figures/'+str(nGroups)+'group_spectra_energy.pdf') \n \n # Spatially Averaged Moment Plots\n # Moment 0 Plot\n plt.figure(2, figsize=(8, 6.5))\n plt.loglog(y, C[0][0], 'k-', \\\n y, C[0][1], 'r-', \\\n y, C[0][2], 'g-' )\n plt.xlabel('energy in $eV$')\n plt.ylabel('${moment0}_g$')\n plt.legend(leg, loc = 0, prop=fontP)\n plt.grid(True) \n plt.tight_layout()\n plt.savefig(dir_name+'/reference_figures/'+str(nGroups)+'monent0_spectra_energy.pdf')\n \n # Moment 1 Plot\n plt.figure(3, figsize=(8, 6.5))\n plt.loglog(y, C[1][0], 'k-', \\\n y, C[1][1], 'r-', \\\n y, C[1][2], 'g-' )\n plt.xlabel('energy in $eV$')\n plt.ylabel('${moment1}_g$')\n plt.legend(leg, loc = 0, prop=fontP)\n plt.grid(True)\n plt.tight_layout()\n plt.savefig(dir_name+'/reference_figures/'+str(nGroups)+'monent1_spectra_energy.pdf')\n \n # Moment 2 Plot\n plt.figure(4, figsize=(8, 6.5))\n plt.loglog(y, C[2][0], 'k-', \\\n y, C[2][1], 'r-', \\\n y, C[2][2], 'g-' )\n plt.xlabel('energy in $eV$')\n plt.ylabel('${moment2}_g$')\n plt.legend(leg, loc = 0, prop=fontP)\n plt.grid(True)\n plt.tight_layout()\n plt.savefig(dir_name+'/reference_figures/'+str(nGroups)+'monent2_spectra_energy.pdf')\n \n # Energy Averaged Plots\n x = numcells[0] \n \n leg = ['Fast Phi', 'Thermal Phi']\n plt.figure(5, figsize=(8, 6.5))\n plt.plot(x, np.mean(phi[0][:thermal_cutoff_group][0:len(x)],axis=0), 'k-', \\\n x, np.mean(phi[0][thermal_cutoff_group:][0:len(x)],axis=0), 'g-' )\n plt.xlabel('position in cm')\n plt.ylabel('$\\phi$')\n plt.legend(leg, loc = 0, prop=fontP)\n plt.grid(True)\n plt.tight_layout()\n plt.savefig(dir_name+'/reference_figures/spectra_full_energyaveraged.pdf')\n \n # Moment 0 Plot\n \n leg = ['$0^{th}$ moment', '$1^{st}$ moment', '$2^{nd}$ moment']\n plt.figure(6, figsize=(8, 6.5))\n plt.plot(x, Eave[0][0], 'k-', \\\n x, Eave[1][0], 'r-', \\\n x, Eave[2][0], 'g-' )\n plt.xlabel('position in cm')\n plt.ylabel('${moment0}_g$')\n plt.legend(leg, loc = 0, prop=fontP)\n plt.grid(True)\n plt.tight_layout()\n plt.savefig(dir_name+'/reference_figures/'+str(nGroups)+'monent_spectra_full.pdf')\n \n # Moment 1 Plot\n x = numcells[1]\n n = len(x)\n plt.figure(7, figsize=(8, 6.5))\n plt.plot(x, Eave[0][1], 'k-', \\\n x, Eave[1][1], 'r-', \\\n x, Eave[2][1], 'g-' )\n plt.xlabel('position in cm')\n plt.ylabel('${moment1}_g$')\n plt.legend(leg, loc = 0, prop=fontP)\n plt.grid(True)\n plt.tight_layout()\n plt.savefig(dir_name+'/reference_figures/'+str(nGroups)+'monent_spectra_uo2.pdf')\n \n # Moment 2 Plot\n x = numcells[2]\n n = len(x)\n plt.figure(8, figsize=(8, 6.5))\n plt.plot(x, Eave[0][2], 'k-', \\\n x, Eave[1][2], 'r-', \\\n x, Eave[2][2], 'g-' )\n plt.xlabel('position in cm')\n plt.ylabel('${moment2}_g$')\n plt.legend(leg, loc = 0, prop=fontP)\n plt.grid(True)\n plt.tight_layout()\n plt.savefig(dir_name+'/reference_figures/'+str(nGroups)+'monent_spectra_mox.pdf') \n \n data = {}\n data['pin_power'] = pin_power\n data['phi_full'] = np.mean(phi[0],axis=1)\n data['phi_uo2'] = np.mean(phi[1],axis=1)\n data['phi_mox'] = np.mean(phi[2],axis=1)\n data['keff'] = keff\n pickle.dump(data, open(dir_name+'/refdata/reference_data.p', 'wb'))", "title": "" }, { "docid": "1275eed1d2c98c29c58b5664e015cfc3", "score": "0.5036114", "text": "def previous_load_data(path=\"./data/cora/\", dataset=\"cora\" ,n_train = 140):\n print('Loading {} dataset...'.format(dataset))\n\n idx_features_labels = np.genfromtxt(\"{}{}.content\".format(path, dataset), dtype=np.dtype(str))\n features = sp.csr_matrix(idx_features_labels[:, 1:-1], dtype=np.float32)\n labels = encode_onehot(idx_features_labels[:, -1])\n\n # build graph\n idx = np.array(idx_features_labels[:, 0], dtype=np.int32)\n idx_map = {j: i for i, j in enumerate(idx)}\n edges_unordered = np.genfromtxt(\"{}{}.cites\".format(path, dataset), dtype=np.int32)\n edges = np.array(list(map(idx_map.get, edges_unordered.flatten())), dtype=np.int32).reshape(edges_unordered.shape)\n adj = sp.coo_matrix((np.ones(edges.shape[0]), (edges[:, 0], edges[:, 1])), shape=(labels.shape[0], labels.shape[0]), dtype=np.float32)\n\n # build symmetric adjacency matrix\n adj = adj + adj.T.multiply(adj.T > adj) - adj.multiply(adj.T > adj)\n # print(adj)\n # print(features)\n\n features = normalize_features(features)\n adj = normalize_adj(adj + sp.eye(adj.shape[0]))\n\n #idx_train = range(140)\n idx_train = range(n_train)\n idx_val = range(200, 500)\n idx_test = range(500, 1500)\n\n adj = torch.FloatTensor(np.array(adj.todense()))\n features = torch.FloatTensor(np.array(features.todense()))\n labels = torch.LongTensor(np.where(labels)[1])\n\n idx_train = torch.LongTensor(idx_train)\n idx_val = torch.LongTensor(idx_val)\n idx_test = torch.LongTensor(idx_test)\n\n return adj, features, labels, idx_train, idx_val, idx_test", "title": "" }, { "docid": "3ac936c39581019a922df685d063ee38", "score": "0.5005946", "text": "def within_reference(bampath, outpath,n, param):\n\n\tmin_isize, max_isize = param.lib_min, param.lib_max\n\n\tcorrelated_check = CorrelatedSample(n)\n\tbamfile = pysam.Samfile(bampath, 'rb')\n\toutfile = pysam.Samfile(outpath, 'wb', template=bamfile)\n\n\ti = 0\n\treads_rev = 0\n\treads_fwd = 0\n\tread_read1 = 0\n\tread_read2 = 0\n\tprinted_fwd = set()\n\n\tfor read, mate_pos in proper_read_isize(bamfile, min_isize, max_isize, param):\n\t\tread_pos = read.pos\n\n\t\t# remove unactive samples (read1 is more than n bp away)\n\t\tcorrelated_check.remove_inactive(read_pos)\n\n\t\tif not read.is_reverse:\n\t\t\tif correlated_check.is_correlated(mate_pos):\n\t\t\t\t#print 'correlated'\n\t\t\t\tcontinue\n\t\t\t\t# read is forward\n\t\t\telse:\n\t\t\t\t# add to sample neighborhood\n\t\t\t\tcorrelated_check.add_sample(read_pos, mate_pos)\n\t\t\t\t# print to filtered bamfile\n\t\t\t\toutfile.write(read)\n\t\t\t\tprinted_fwd.add(read.qname)\n\t\t\t\treads_fwd += 1\n\t\t\t\tif read.is_read1:\n\t\t\t\t\tread_read1 += 1\n\t\t\t\telif not read.is_read1:\n\t\t\t\t\tread_read2 += 1\n\n\t\t# read is reverse, both reads parsed\n\t\telse:\n\t\t\tif read.qname in printed_fwd:\n\t\t\t\toutfile.write(read)\n\t\t\t\tprinted_fwd.remove(read.qname)\n\t\t\t\treads_rev += 1\n\n\t\t\t\tif read.is_read1:\n\t\t\t\t\tread_read1 += 1\n\t\t\t\telif not read.is_read1:\n\t\t\t\t\tread_read2 += 1\n\t\t\t#print 'not!'\n\t\ti += 1\n\t\tif i % 10000 == 1:\n\t\t\tprint 'processing coordinate', read.pos, 'on ref:', read.tid\n\t\t\tprint reads_fwd,reads_rev,read_read1,read_read2\n\tprint reads_fwd,reads_rev,read_read1,read_read2\n\tpysam.index(outpath)\n\toutfile.close()", "title": "" }, { "docid": "9411e2a1cc4d5bca1be6b8ab8011a7b5", "score": "0.4994594", "text": "def example_gen_fixe_ref_patch(vol_names, vol_size, fid=None):\n ref_name = 'FTD096K7'\n ref_patch = 0\n dir_path = os.path.dirname(vol_names[0])\n part_names = [os.path.basename(file).split('_')[0] for file in vol_names]\n patch_indices = [(os.path.basename(file).split('_')[1]).split('.')[0] for file in vol_names]\n part_names = list(set(part_names) -set([ref_name]))\n patch_indices = list(set(patch_indices)-set([ref_patch]))\n print('part_name is ', part_names)\n while(True):\n name_idx = np.random.permutation(len(part_names))[0]\n patch_idx = np.random.permutation(len(patch_indices))[0]\n file1 = os.path.join(dir_path, part_names[name_idx] + '_' + str(ref_patch) + '.nii.gz')\n file2 = os.path.join(dir_path, ref_name + '_' + str(ref_patch) + '.nii.gz')\n print(file1)\n print(file2)\n if(fid is not None):\n fid.write(file1 + '\\n')\n fid.write(file2 + '\\n')\n fid.flush()\n X1 = nib.load(file1).get_data()\n X2 = nib.load(file2).get_data()\n X1, X2 = augment(X1, X2, vol_size)\n X1 = X1/(pow(2,16)-1.0)\n X2 = X2/(pow(2,16)-1.0)\n X1 = X1[np.newaxis, :,:,:, np.newaxis]\n X2 = X2[np.newaxis, :,:,:, np.newaxis]\n return_vals = (X1, X2, part_names[name_idx], ref_name)\n yield tuple(return_vals)", "title": "" }, { "docid": "6cce94b63d87ad7d17954326f77b8d2b", "score": "0.49934655", "text": "def transform_target_data(audiofile,analyze=0):\n target_file = DATA_PATH + audiofile[:-4] + \"REF.txt\"\n data = read_array(target_file,',')\n \n if analyze:\n pylab.subplot(211)\n pylab.plot(data[:,1])\n pylab.subplot(212)\n pylab.plot(data[:,2])\n \n pylab.figure()\n pylab.subplot(211)\n pylab.hist(data[:,1],bins=25)\n pylab.xlim(40,65)\n pylab.subplot(212)\n pylab.hist(data[:,2],bins=13)\n pylab.xlim(0,12)\n \n # get the unique element (nr. of different notes) of the piece\n midi_data = data[:,1]\n unique = list(set(midi_data))\n target = np.zeros((len(midi_data), len(unique)))\n target_chromas = np.zeros((len(midi_data), 13))\n for n in range( len(midi_data) ):\n ind = unique.index( midi_data[n] )\n target[n,ind] = 1\n target_chromas[n,data[n,2]+1] = 1\n \n if analyze:\n print \"pitch classes:\",len(unique)\n pylab.show()\n exit(0)\n \n savefile = OUTPUT_DIR + audiofile[:-4] + \"_STFT.dat\"\n data = shelve.open(savefile)\n lenge = data[\"features\"].shape[0]\n data[\"targets\"] = target[:lenge]\n data[\"target_midi\"] = midi_data[:lenge]\n data[\"target_chromas\"] = target_chromas[:lenge]\n data[\"pitch_classes\"] = unique\n data.close()", "title": "" }, { "docid": "4da9c11c649e92c83f1c50dbdfeff2ae", "score": "0.4990033", "text": "def testUpdateDataFile(self):\n self.lfh.write(\"\\nStarting %s %s\\n\" % (self.__class__.__name__,\n sys._getframe().f_code.co_name))\n try:\n # Create a initial data file --\n #\n myDataList=[]\n ofh = open(\"test-output-1.cif\", \"w\")\n curContainer=DataContainer(\"myblock\")\n aCat=DataCategory(\"pdbx_seqtool_mapping_ref\")\n aCat.appendAttribute(\"ordinal\")\n aCat.appendAttribute(\"entity_id\")\n aCat.appendAttribute(\"auth_mon_id\")\n aCat.appendAttribute(\"auth_mon_num\")\n aCat.appendAttribute(\"pdb_chain_id\")\n aCat.appendAttribute(\"ref_mon_id\")\n aCat.appendAttribute(\"ref_mon_num\") \n aCat.append((1,2,3,4,5,6,7))\n aCat.append((1,2,3,4,5,6,7))\n aCat.append((1,2,3,4,5,6,7))\n aCat.append((1,2,3,4,5,6,7))\n curContainer.append(aCat)\n myDataList.append(curContainer)\n pdbxW=PdbxWriter(ofh)\n pdbxW.write(myDataList)\n ofh.close()\n #\n # Read and update the data -\n # \n myDataList=[]\n ifh = open(\"test-output-1.cif\", \"r\")\n pRd=PdbxReader(ifh)\n pRd.read(myDataList)\n ifh.close()\n #\n myBlock=myDataList[0]\n myBlock.printIt()\n myCat=myBlock.getObj('pdbx_seqtool_mapping_ref')\n myCat.printIt()\n for iRow in xrange(0,myCat.getRowCount()):\n myCat.setValue('some value', 'ref_mon_id',iRow)\n myCat.setValue(100, 'ref_mon_num',iRow)\n ofh = open(\"test-output-2.cif\", \"w\") \n pdbxW=PdbxWriter(ofh)\n pdbxW.write(myDataList)\n ofh.close() \n \n except:\n traceback.print_exc(file=sys.stderr)\n self.fail()", "title": "" }, { "docid": "43f0ff94674c0493e95c1e1d5bd8e539", "score": "0.49866006", "text": "def load_sequential_aras_last(isPerson):\n #by Adele\n #print (\"module_path: \" + module_path )\n #/by Adele\n \n with open(\"E:\\Lessons_tutorials\\Behavioural user profile articles\\Datasets\\Aras\\House A\\CSV_Summery\\Sequential\\Day\\last\\Whole_data.csv\") as csv_file:\n \n data_file = csv.reader(csv_file)\n #target_names = np.array(temp[2:]) # the name of labels \n n_samples = 2068#sum(1 for row in data_file) \n n_features = 41\n data = np.empty((n_samples, n_features))\n target = np.empty((n_samples,), dtype=np.int)\n temp_data = np.empty(n_features+1)# copy whole data to it, then select all col except 20\n \n idx_IN_columns = [] \n for i, ir in enumerate(data_file):\n #print(i)\n temp_data = np.asarray(ir[:42], dtype=np.int)\n #print(temp_data)\n if isPerson == True:\n n_features = 40\n data = np.empty((n_samples, n_features))\n \n # in version 2 the activity col is removed too because it is not important, sensor data are required.\n idx_IN_columns = [0,1,2,3,4,5,6,7,8,9,10,11,12,13,14,15,16,17,18,19,20,21,22,23,24,25,26,27,28,29,30,31,32,33,34,35,36,37,38,39]#all of features except feature 40 (Person_ID\n target[i] = np.asarray(ir[40], dtype=np.int)# Person ID is the class\n else:\n idx_IN_columns = [0,1,2,3,4,5,6,7,8,9,10,11,12,13,14,15,16,17,18,19,20,21,22,23,24,25,26,27,28,29,30,31,32,33,34,35,36,37,38,39,40]#all of features except feature 41 (Activity_ID\n target[i] = np.asarray(ir[41], dtype=np.int)# Activity ID is the class\n \n \n extractedData = temp_data[idx_IN_columns]\n #print(extractedData)\n data[i] = np.asarray(extractedData , dtype=np.int)\n #print(target[i])\n \n \n #print(data)\n #print(target)\n return Bunch(data=data, target=target)\n # target_names=target_names)\n #feature_names=['sepal length (cm)', 'sepal width (cm)',\n # 'petal length (cm)', 'petal width (cm)'])", "title": "" }, { "docid": "761dc5c8f94d8c330bea3536353931b3", "score": "0.49820733", "text": "def flirt2aff_files(matfile, in_fname, ref_fname):\n mat = np.loadtxt(matfile)\n in_img = nib.load(in_fname)\n ref_img = nib.load(ref_fname)\n return flirt2aff(mat, in_img, ref_img)", "title": "" }, { "docid": "e5f27708ef324c06df58754c1e355448", "score": "0.49668267", "text": "def create_reference(dataset: tl.TimeSeriesData, timestep, batch_size):\n # creates reference for a certain timestep\n history_horizon = dataset.history_horizon\n forecast_horizon = dataset.forecast_horizon\n num_encoder_features = len(dataset.encoder_features)\n num_decoder_features = len(dataset.decoder_features)\n seed(1) # seed random number generator\n\n features1_references_np = np.zeros(shape=(batch_size, history_horizon, num_encoder_features))\n features2_references_np = np.zeros(shape=(batch_size, forecast_horizon, num_decoder_features))\n\n inputs1_np = dataset[timestep][0].cpu().numpy()\n inputs2_np = dataset[timestep][1].cpu().numpy()\n\n for x in range(num_encoder_features): # iterate through encoder features\n feature_x = inputs1_np[:, x]\n mu = 0\n sigma = abs(np.std(feature_x)) # 0.3 is chosen arbitrarily # hier np.std nehmen\n for j in range(batch_size):\n noise_feature1 = np.random.default_rng().normal(mu, sigma, history_horizon) # create white noise series\n features1_references_np[j, :, x] = noise_feature1 + feature_x\n\n for x in range(num_decoder_features): # iterate through decoder features\n feature_x = inputs2_np[:, x]\n mu = 0\n sigma = abs(np.std(feature_x)) # 0.3 is chosen arbitrarily\n for j in range(batch_size):\n noise_feature2 = np.random.default_rng().normal(mu, sigma, forecast_horizon)\n features2_references_np[j, :, x] = noise_feature2 + feature_x\n\n return torch.Tensor(features1_references_np).to(DEVICE), torch.Tensor(features2_references_np).to(DEVICE)", "title": "" }, { "docid": "8e444fc4c930b1f0719f8de41744aa07", "score": "0.49487188", "text": "def load_FRG(fpath):\n\n def readFRGFile(fpath):\n data = {}\n\n with open(fpath, \"r\") as f:\n ReadingHeader = True # check when header has been fully parsed\n BothDirections = False # sweep forward + reverse\n\n while ReadingHeader:\n line = f.readline()\n lineparts = line.split(\":\")\n\n if \"Data\" in lineparts[0]:\n ReadingHeader = False\n f.readline()\n f.readline()\n else:\n try:\n data[lineparts[0]] = float(lineparts[1])\n except:\n data[lineparts[0]] = lineparts[1][1:].replace(\"\\n\", \"\")\n\n vforward = []\n iforward = []\n timeforward = []\n if data[\"sweepDir\"] == \"Forward + Reverse\":\n BothDirections = True\n vreverse = []\n ireverse = []\n timereverse = []\n\n for line in f:\n lineparts = f.readline().split(\"\\t\")\n if len(lineparts) == 1:\n break\n vforward.append(lineparts[0])\n iforward.append(lineparts[1])\n timeforward.append(lineparts[2])\n if BothDirections:\n vreverse.append(lineparts[0])\n ireverse.append(lineparts[1])\n timereverse.append(lineparts[2])\n\n data[\"V\"] = np.array(vforward).astype(float)\n data[\"I\"] = np.array(iforward).astype(float)\n data[\"J\"] = data[\"I\"] / data[\"area_cm2\"]\n data[\"delay\"] = np.array(timeforward).astype(float)\n\n if BothDirections:\n data2 = data.copy()\n data2[\"sampleName\"] = data[\"sampleName\"] + \"_Reverse\"\n data[\"sampleName\"] = data[\"sampleName\"] + \"_Forward\"\n data2[\"V\"] = np.array(vreverse).astype(float)\n data2[\"I\"] = np.array(ireverse).astype(float)\n data2[\"J\"] = data2[\"I\"] / data2[\"area_cm2\"]\n data2[\"delay\"] = np.array(timereverse).astype(float)\n output = [data, data2]\n else:\n output = data\n\n return output\n\n fids = [os.path.join(fpath, x) for x in os.listdir(fpath)]\n\n alldata = {}\n for f in fids:\n output = readFRGFile(f)\n if type(output) == list:\n for each in output:\n alldata[each[\"sampleName\"]] = each\n else:\n alldata[output[\"sampleName\"]] = output\n return alldata", "title": "" }, { "docid": "60482db3aa202b94337a3464dd780a34", "score": "0.49388656", "text": "def test_reference_data():\n inputs = [\n [3735928559, 195939070, 229505742, 305419896],\n [3668361503, 4165561550, 1661411377, 3634257570],\n [164546577, 4166754639, 1765190214, 1303880213],\n [446610472, 3941463886, 522937693, 1882353782],\n [1864922766, 1719732118, 3882010307, 1776744564],\n [4141682960, 3310988675, 553637289, 902896340],\n [1134851934, 2352871630, 3699409824, 2648159817],\n [1240956131, 3107113773, 1283198141, 1924506131],\n [2669565031, 579818610, 3042504477, 2774880435],\n [2766103236, 2883057919, 4029656435, 862374500],\n ]\n outputs = [\n [3914649087, 576849849, 3593928901, 2229911004],\n [2240804226, 3691353228, 1365957195, 2654016646],\n [3562296087, 3191708229, 1147942216, 3726991905],\n [1403443605, 3591372999, 1291086759, 441919183],\n [1086200464, 2191331643, 560336446, 3658716651],\n [3249937430, 2346751812, 847844327, 2996632307],\n [2584285912, 4034195531, 3523502488, 169742686],\n [959045797, 3875435559, 1886309314, 359682705],\n [3978441347, 432478529, 3223635119, 138903045],\n [296367413, 4262059219, 13109864, 3283683422],\n ]\n outputs64 = [\n [2477551240072187391, 9577394838764454085],\n [15854241394484835714, 11398914698975566411],\n [13708282465491374871, 16007308345579681096],\n [15424829579845884309, 1898028439751125927],\n [9411697742461147792, 15714068361935982142],\n [10079222287618677782, 12870437757549876199],\n [17326737873898640088, 729039288628699544],\n [16644868984619524261, 1544825456798124994],\n [1857481142255628931, 596584038813451439],\n [18305404959516669237, 14103312907920476776],\n ]\n for seed, expected, expected64 in zip(inputs, outputs, outputs64):\n expected = np.array(expected, dtype=np.uint32)\n ss = SeedSequence(seed)\n state = ss.generate_state(len(expected))\n assert_array_equal(state, expected)\n state64 = ss.generate_state(len(expected64), dtype=np.uint64)\n assert_array_equal(state64, expected64)", "title": "" }, { "docid": "7ed377464398ed9f38ced6709e53852f", "score": "0.49329084", "text": "def loadReference(self, data, ref, otherref='', percentile=50):\n if ref == 'Mean':\n return data.raw.mean(0)\n elif ref == 'Percentile':\n return np.percentile(data.raw, percentile, axis=0)\n elif ref == 'Other':\n if otherref == '':\n raise RuntimeError('Specify a reference spectrum file')\n return correction.load_reference(data.wavenumber,\n matfilename=otherref)\n else:\n return correction.load_reference(data.wavenumber, what=ref.lower())", "title": "" }, { "docid": "2dbc92c4c161a4af001bf3fb02ba2a8a", "score": "0.49298394", "text": "def call_methylated_sites_pe(inputf, sample, reference_fasta,\n unmethylated_control = None,\n sig_cutoff=.01,num_procs = 1,\n num_upstr_bases=0,num_downstr_bases=2,\n generate_mpileup_file=True,\n compress_output=True,\n bgzip=False,\n path_to_bgzip=\"\",\n path_to_tabix=\"\",\n buffer_line_number=100000,\n min_mapq=30,\n min_cov=1,binom_test=True,\n path_to_samtools=\"\",\n remove_chr_prefix=True,\n add_snp_info=False,\n sort_mem=\"500M\",\n path_to_files=\"\",min_base_quality=1,\n keep_temp_files=False):\n\n #Flip the strand of read 2 and create a new bam file\n ##input: sample+\"_processed_reads_no_clonal.bam\"\n ##output: sample+\"_processed_reads_no_clonal_flipped.bam\"\n\n print_checkpoint(\"Begin flipping the strand of read 2\")\n flip_read2_strand(input_file = inputf,\n output_file = inputf+\".read2flipped.bam\",\n path_to_samtools=path_to_samtools)\n\n \n #Call methylated sites\n call_methylated_sites(inputf = inputf+\".read2flipped.bam\",\n sample = sample,\n reference_fasta = reference_fasta,\n unmethylated_control = unmethylated_control,\n sig_cutoff = sig_cutoff,\n num_procs = num_procs,\n num_upstr_bases=num_upstr_bases,\n num_downstr_bases=num_downstr_bases,\n generate_mpileup_file=generate_mpileup_file,\n compress_output=compress_output,\n bgzip=bgzip,\n path_to_bgzip=path_to_bgzip,\n path_to_tabix=path_to_tabix,\n buffer_line_number = buffer_line_number,\n min_mapq=min_mapq,\n min_cov = min_cov,\n binom_test = binom_test,\n path_to_samtools = path_to_samtools,\n sort_mem=sort_mem,\n path_to_files = path_to_files,\n min_base_quality = min_base_quality,\n remove_chr_prefix = remove_chr_prefix,\n add_snp_info = add_snp_info,\n keep_temp_files = keep_temp_files)\n\n #Remove intermediate bam file\n try:\n subprocess.check_call(shlex.split(\"rm -f \"+inputf+\".read2flipped.bam\"+\n \" \"+inputf+\".read2flipped.bam.bai\"))\n except:\n pass", "title": "" }, { "docid": "479c09ea885cbee278474efa3a423fcf", "score": "0.4925192", "text": "def createReference(self, data, params, outfile):\n params.scDo = False\n params.srDo = False\n wns = []\n ys = []\n try:\n for fi in range(len(data.filenames)):\n self.batchProgress.emit(fi, len(data.filenames))\n if not self.loadFile(data, fi):\n continue\n\n wn = data.wavenumber\n y = data.raw\n\n if params.scRef == 'Percentile':\n y = np.percentile(y, params.scRefPercentile, axis=0)[None, :]\n else:\n y = y.mean(0)[None, :]\n\n y = self.callACandSC(data, params, wn, y)\n wn, y = self.callSGFandSRandBC(params, wn, y)\n if params.normDo:\n y = normalization.normalize_spectra(\n params.normMethod, y, wn,\n wavenum=params.normWavenum)\n wns.append(wn)\n ys.append(y[0])\n\n # Do all images have the same wavenumbers?\n if all(np.array_equal(v, wns[0]) for v in wns):\n ys = np.median(ys, axis=0)\n else:\n w1 = min(v.min() for v in wns)\n w2 = max(v.max() for v in wns)\n maxres = max((len(v) - 1) / (v.max() - v.min()) for v in wns)\n wn = np.linspace(w1, w2, num=maxres * (w2 - w1) + 1)\n interpol = interp1d(np.concatenate(wns), np.concatenate(ys))\n ys = interpol(wn)\n\n ab = np.hstack((wn[:, None], ys[:, None]))\n scipy.io.savemat(outfile, {'AB': ab } )\n self.batchDone.emit(True)\n return\n\n except InterruptedError:\n self.stopped.emit()\n except Exception as e:\n traceback.print_exc()\n self.failed.emit(repr(e), traceback.format_exc())\n self.batchDone.emit(False)", "title": "" }, { "docid": "4b411971d81d0cbb79813e36b3eb97f3", "score": "0.49233758", "text": "def transform_target_data_old(audiofile,analyze=0):\n target_file = DATA_PATH + audiofile[:-4] + \"REF.txt\"\n data = read_array(target_file)\n \n # remove time axes and convert to midi\n data = data[:,1]\n midi_data = audiotools.freq_to_midi(data)\n midi_data = np.round(midi_data)\n \n if analyze:\n pylab.plot(midi_data)\n \n # get the unique element (nr. of different notes) of the piece\n unique = list(set(midi_data))\n target = np.zeros((len(midi_data), len(unique)))\n for n in range( len(midi_data) ):\n ind = unique.index( midi_data[n] )\n target[n,ind] = 1\n \n if analyze:\n print \"classes:\",len(unique)\n pylab.figure()\n pylab.psd(target.flatten())\n pylab.show()\n exit(0)\n \n savefile = OUTPUT_DIR + audiofile[:-4] + \"_STFT.dat\"\n data = shelve.open(savefile)\n lenge = data[\"features\"].shape[0]\n data[\"targets\"] = target[:lenge]\n data[\"target_midi\"] = midi_data[:lenge]\n data[\"pitch_classes\"] = unique\n data.close()", "title": "" }, { "docid": "3b2f592a92f23cc012b64b330d477b0f", "score": "0.49093008", "text": "def reference_parser(args, genome_chroms):\n global referenceFiles\n\n referenceFiles = os.path.join(args.dir, \"refAnnotation_\" + args.output_prefix + \".genePred\")\n print(\"**** Parsing Reference Transcriptome....\", file=sys.stdout)\n\n # don't check for exisitng reference file, because need to create a new one for exon and cds centric runs\n # if os.path.exists(referenceFiles):\n # print(\"{0} already exists. Using it.\".format(referenceFiles), file=sys.stdout)\n # else:\n ## gtf to genePred\n if not args.genename:\n subprocess.call([GTF2GENEPRED_PROG, args.annotation, referenceFiles, '-genePredExt', '-allErrors', '-ignoreGroupsWithoutExons'])\n else:\n subprocess.call([GTF2GENEPRED_PROG, args.annotation, referenceFiles, '-genePredExt', '-allErrors', '-ignoreGroupsWithoutExons', '-geneNameAsName2'])\n\n ## parse reference annotation\n # 1. ignore all miRNAs (< 200 bp)\n # 2. separately store single exon and multi-exon references\n refs_1exon_by_chr = defaultdict(lambda: IntervalTree()) #\n refs_exons_by_chr = defaultdict(lambda: IntervalTree())\n # store donors as the exon end (1-based) and acceptor as the exon start (0-based)\n # will convert the sets to sorted list later\n junctions_by_chr = defaultdict(lambda: {'donors': set(), 'acceptors': set(), 'da_pairs': set()})\n # dict of gene name --> set of junctions (don't need to record chromosome)\n junctions_by_gene = defaultdict(lambda: set())\n # dict of gene name --> list of known begins and ends (begin always < end, regardless of strand)\n known_5_3_by_gene = defaultdict(lambda: {'begin':set(), 'end': set()})\n\n ## dictionary of record.id (e.g., gencode enst) to genePred record objects\n ## need this for later computation of 5' and 3' overhangs for protein classification\n refDict = {}\n\n for r in genePredReader(referenceFiles):\n refDict[r.id] = r\n if r.length < args.min_ref_len and not args.is_fusion: continue # ignore miRNAs\n if r.exonCount == 1:\n refs_1exon_by_chr[r.chrom].insert(r.txStart, r.txEnd, r)\n known_5_3_by_gene[r.gene]['begin'].add(r.txStart)\n known_5_3_by_gene[r.gene]['end'].add(r.txEnd)\n else:\n refs_exons_by_chr[r.chrom].insert(r.txStart, r.txEnd, r)\n # only store junctions for multi-exon transcripts\n for d, a in r.junctions:\n junctions_by_chr[r.chrom]['donors'].add(d)\n junctions_by_chr[r.chrom]['acceptors'].add(a)\n junctions_by_chr[r.chrom]['da_pairs'].add((d,a))\n junctions_by_gene[r.gene].add((d,a))\n known_5_3_by_gene[r.gene]['begin'].add(r.txStart)\n known_5_3_by_gene[r.gene]['end'].add(r.txEnd)\n\n # check that all genes' chromosomes are in the genome file\n ref_chroms = set(refs_1exon_by_chr.keys()).union(list(refs_exons_by_chr.keys()))\n if genome_chroms is not None:\n diff = ref_chroms.difference(genome_chroms)\n if len(diff) > 0:\n print(\"WARNING: ref annotation contains chromosomes not in genome: {0}\\n\".format(\",\".join(diff)), file=sys.stderr)\n\n # convert the content of junctions_by_chr to sorted list\n for k in junctions_by_chr:\n junctions_by_chr[k]['donors'] = list(junctions_by_chr[k]['donors'])\n junctions_by_chr[k]['donors'].sort()\n junctions_by_chr[k]['acceptors'] = list(junctions_by_chr[k]['acceptors'])\n junctions_by_chr[k]['acceptors'].sort()\n junctions_by_chr[k]['da_pairs'] = list(junctions_by_chr[k]['da_pairs'])\n junctions_by_chr[k]['da_pairs'].sort()\n\n return dict(refs_1exon_by_chr), dict(refs_exons_by_chr), dict(junctions_by_chr), dict(junctions_by_gene), dict(known_5_3_by_gene), refDict", "title": "" }, { "docid": "995dd4b23722e85a8cc71ce8fbe7d0b4", "score": "0.49028236", "text": "def __init__(self, filename='as_R.UXD', echo=False,\r\n mode='pf', bgmode='manual', monitoring = False,\r\n sep = \"; (Data for Range number\",dfc=None):\r\n if mode == 'pf':\r\n print('')\r\n print('---------------------------')\r\n print('POLE FIGURE AQUISITION MODE')\r\n print('---------------------------\\n')\r\n elif mode =='df':\r\n print('')\r\n print('---------------------------')\r\n print('DEFOCUS DATA AQUSITION MODE')\r\n print('---------------------------\\n')\r\n\r\n blocks = make_blocks(filename=filename, sep = sep)\r\n self.blocks = blocks\r\n blocks = block_in(blocks)\r\n header = blocks[0]\r\n self.data_block = blocks[1:len(blocks)]\r\n\r\n # Trim the data_block neatly.\r\n for i in range(len(self.data_block)):\r\n self.data_block[i]=self.data_block[i][0:len(self.data_block[i])-1]\r\n\r\n print('** Total number of data blocks: ', len(self.data_block))\r\n\r\n self.th2s = []\r\n set_2thet = set()\r\n # print '%5s %5s %5s %5s'%('2th','khi','time','size')\r\n for i in range(len(self.data_block)):\r\n cb = self.data_block[i] #cb : current block\r\n info = self.block_info(block=cb, echo=echo)\r\n\r\n _2theta, _khi, _steptime, _stepsize = list(map(float,info[:4]))\r\n # print '%5.3f %5.1f %5.2i %5.2i'%(_2theta, _khi, _steptime, _stepsize)\r\n set_2thet.add(round(float(info[0]), 3))\r\n\r\n _2th_ = round(float(info[0]),3)\r\n if _2th_ in self.th2s: pass\r\n else: self.th2s.append(_2th_)\r\n\r\n # print '-----------'\r\n # raise IOError, 'debug'\r\n\r\n\r\n\r\n # th2 = set_2thet.copy()\r\n th2 = self.th2s\r\n self.listh2= self.th2s\r\n\r\n #print th2\"\r\n #if raw_input()=='q': raise IOError\"\r\n\r\n # print 'Kinds of _2theta are printed out'\r\n # for i in range(len(th2)):\r\n # print th2[i], '',\r\n # print\r\n\r\n set_2thet=list(set_2thet)\r\n set_2thet=np.array(set_2thet,dtype='float')\r\n set_2thet=np.sort(set_2thet)\r\n for i in range(len(set_2thet)):\r\n print('%2.2i'%i,set_2thet[i])\r\n\r\n # raise IOError, 'Debug: 2set_2thet'\r\n\r\n # while True:\r\n # try:\r\n # print set_2thet.pop(),' ',\r\n # except: break\r\n # print\r\n\r\n\r\n # self.listh2 = []\r\n # while True:\r\n # try: self.listh2.append(th2.pop())\r\n # except: break\r\n\r\n self.pfs = []\r\n for i in range(len(self.listh2)):\r\n self.pfs.append(self.lookfor(th2=self.listh2[i]))\r\n\r\n ###################################\r\n # polefigures and backgrounds #\r\n ###################################\r\n self.polefigures=[]\r\n self.backgrounds=[]\r\n # bg_determine_condition='digits'\r\n bg_determine_condition='short'\r\n\r\n for i in range(len(self.pfs)):\r\n if self.bg_or_pf(self.pfs[i], condition = bg_determine_condition)=='pf':\r\n self.polefigures.append(self.pfs[i])\r\n elif self.bg_or_pf(self.pfs[i], condition = bg_determine_condition)=='bg':\r\n self.backgrounds.append(self.pfs[i])\r\n\r\n print('\\n')\r\n for i in range(len(self.polefigures)):\r\n print('PF #',i+1)\r\n _2th, _st, _sz, d_alpha, d_khi, _khis = self.pf_info(self.polefigures[i])\r\n print('peak at Bragg the 2theta of ', round(_2th,3))\r\n print('delta alpha = ', d_alpha,' delta khi = ', d_khi, end=' ')\r\n print(' step time :', _st, end=' ')\r\n print(' step size :', _sz)\r\n\r\n\r\n print('\\n')\r\n for i in range(len(self.backgrounds)):\r\n print('BG #', i+1)\r\n _2th, _st, _sz, d_alpha, d_khis, _khis = self.pf_info(self.backgrounds[i])\r\n print('peak at Bragg the 2theta of ', round(_2th,3))\r\n if d_alpha !='unknown': print('delta alpha = ', d_alpha, end=' ')\r\n print(' delta khi = ', d_khi, end=' ')\r\n print(' step time :', _st, end=' ')\r\n print(' step size :', _sz)\r\n\r\n input(' Press Enter >>> ')\r\n\r\n self.__pf_selection__()\r\n\r\n input(\"Press enter if you'd like to proceed >> \")\r\n\r\n if os.name=='nt': os.system('cls')\r\n elif os.name=='posix': os.system('clear')\r\n\r\n print(\"\\n\\n***************************************************************\")\r\n print(\"d_alpha is given to backgrounds, that's most probably because\")\r\n print(\"the backgrounds are measured only at along a certain phi angle\")\r\n print(\"In other words, it's been partially measured.\")\r\n print(\"***************************************************************\")\r\n\r\n \"Combines a certain set of pole figure and backgrounds\"\r\n\r\n #----------------------------------------------------------\r\n # Recommends sets of polefigure and its background measure\r\n #----------------------------------------------------------\r\n self.__pf_bg_sets__(bgmode = bgmode)\r\n #access the combination set by self.combi_pf_bg\r\n\r\n \"\"\"\r\n Note that if bgmode is None, background is not subtracted\r\n in the final end. This Ipf = Ipf - dI is not performed in the\r\n next big loop below\r\n \"\"\"\r\n\r\n #----------------------------------------------------------\r\n # Core of this scripts\r\n # Outer-Loop over polefifures\r\n # Inner-Loop over its corresponding two backgrounds\r\n #----------------------------------------------------------\r\n\r\n INTENSITIES =[]\r\n\r\n if bgmode==None:\r\n for i in range(len(self.polefigures)):\r\n INTENSITIES.append([])\r\n for j in range(len(self.polefigures[i])):\r\n INTENSITIES[i].append([])\r\n C_pf = self.polefigures[i][j]\r\n Ipf = th2count(block = C_pf )[0] #intensity\r\n info_temp = self.block_info(C_pf, echo=False)\r\n #normalization by the step time\r\n for k in range(len(Ipf)):\r\n Ipf[k] = Ipf[k]/float(info_temp[2])\r\n INTENSITIES[i][j].append(Ipf[k])\r\n\r\n #monitoring upon polefigure\r\n if monitoring ==True:\r\n if os.name=='posix': os.system('clear')\r\n elif os.name=='nt': os.system('cls')\r\n print(\" You chose not to subtract the backgrounds\")\r\n print(\" Now you have %i polefigure(s) \"%(len(self.polefigures)))\r\n print(\" Do you like to plot pole figures ? \")\r\n if input( 'yes/no') == 'yes':\r\n for i in range(len(self.polefigures)):\r\n __pf_plot__(intensity = polefigures[i], ifig=6+i)\r\n else : pass\r\n else: pass\r\n\r\n elif bgmode!=None:\r\n for i in range(len(self.polefigures)):\r\n INTENSITIES.append([])\r\n \"On each polefigure set\"\r\n iask = True\r\n for j in range(len(self.polefigures[i])):\r\n INTENSITIES[i].append([])\r\n \" On each chi \"\r\n R_bg = self.backgrounds[self.combi_pf_bg[i][0]][j]\r\n L_bg = self.backgrounds[self.combi_pf_bg[i][1]][j]\r\n C_pf = self.polefigures[i][j]\r\n Ipf = th2count(block = C_pf)[0]\r\n Ibgl = th2count(block = R_bg)[0]\r\n Ibgr = th2count(block = L_bg)[0]\r\n info_temp = self.block_info(C_pf, echo=False)\r\n info_temp = list(map(float,info_temp[0:4]))\r\n pf_2th = info_temp[0]\r\n pf_khi = info_temp[1]\r\n pf_steptime = info_temp[2]\r\n pf_stepsize = info_temp[3]\r\n info_temp = self.block_info(L_bg, echo=False)\r\n info_temp = list(map(float,info_temp[0:4]))\r\n L_bg_2th = info_temp[0]\r\n L_bg_khi = info_temp[1]\r\n L_bg_steptime = info_temp[2]\r\n L_bg_stepsize = info_temp[3]\r\n info_temp = self.block_info(R_bg, echo=False)\r\n info_temp = list(map(float,info_temp[0:4]))\r\n R_bg_2th = info_temp[0]\r\n R_bg_khi = info_temp[1]\r\n R_bg_steptime = info_temp[2]\r\n R_bg_stepsize = info_temp[3]\r\n \"Normalize the intensity by its steptime\"\r\n for k in range(len(Ipf)): Ipf[k] =Ipf[k]/float(pf_steptime)\r\n for k in range(len(Ibgl)): Ibgl[k]=Ibgl[k]/float(L_bg_steptime)\r\n for k in range(len(Ibgr)): Ibgr[k]=Ibgr[k]/float(L_bg_steptime)\r\n\r\n if bgmode!=None:\r\n bglr_len = [len(Ibgl),len(Ibgr)]\r\n if any(bglr_len[k] !=len(Ipf) for k in range(2)):\r\n print('** Partial background measured **')\r\n pass\r\n\r\n elif bgmode==None: print('** No Background subtraction **')\r\n\r\n for k in range(len(Ipf)):\r\n \"\"\"\r\n If Ibgl and Ibgr were measured at a certain phi,\r\n at different phi then\r\n that certain phi is assumed to be that of the only measured\r\n \"\"\"\r\n try: cibgl = Ibgl[k]\r\n except IndexError: cibgl = Ibgl[0]\r\n try: cibgr = Ibgr[k]\r\n except IndexError: cibgr = Ibgr[0]\r\n slope = (cibgr - cibgl)/(R_bg_2th-L_bg_2th)\r\n dI = slope * (pf_2th - L_bg_2th) + cibgl\r\n Ipf[k] = Ipf[k] - dI\r\n if Ipf[k] < 0:\r\n if iask==True:\r\n print('Caution) Negative value from prior BG subtraction: ', end=' ')\r\n print('value = ', Ipf[k])\r\n print('Do you want to keep on going(yes)?, or', end=' ')\r\n print(\"Don't ask this to the end(ignore)\")\r\n ans = input('Type answer (yes/ignore) >> ')\r\n if len(ans)==0: pass\r\n elif ans =='yes': pass\r\n elif ans =='ignore': iask = False; pass\r\n elif ans =='no':\r\n print(\"\\n******************************************\")\r\n print(\"There's no 'no' answer here\")\r\n print(\"Negative intensity is physically non-sense\")\r\n print(\"The code raises an error\")\r\n print(\"******************************************\")\r\n input()\r\n raise IOError\r\n else: raise IOError\r\n Ipf[k] = 1\r\n elif iask == False:\r\n print(\"If negative intensity will be returned to be 1.\")\r\n Ipf[k] = 1\r\n INTENSITIES[i][j].append(Ipf[k])\r\n\r\n\r\n self.INTENSITIES = INTENSITIES\r\n if mode =='pf':\r\n ## find defocus files\r\n dff=[]\r\n if type(dfc)==type(None):\r\n dff = glob.glob('*.dfc')\r\n else:\r\n dff = dfc\r\n\r\n if len(dff)==0:\r\n print('You do not have any *.dfc file')\r\n else:\r\n print('*********************************************')\r\n print('%15s\\n'%('Available FILES and its _2THETA'))\r\n print('%3s %15s %6s %6s'%('ID','FILENAME','PEAK_AT','COMMENT'))\r\n for i in range(len(dff)):\r\n ff = open(dff[i],'r')\r\n lines = ff.readlines()\r\n try: float(lines[3].split('=')[1])\r\n except:\r\n print('Could not get %s file rightly.\\n'%(dff[i]))\r\n print('**************************************')\r\n print(\"Contents of the file is shown as below\\n\")\r\n if os.name =='posix': os.system('cat %s'%(dff[i]))\r\n elif os.name =='nt': os.system('type %s'%(dff[i]))\r\n print('\\n Please type enter to proceed'); input()\r\n pass\r\n else:\r\n _2th = float(lines[3].split('=')[1])\r\n comment = lines[2]\r\n ff.close()\r\n print('%3i %15s %5.3f %s'%(i, dff[i], _2th, comment))\r\n\r\n for i in range(len(self.INTENSITIES)):\r\n print(' Type the defocus correction file id (from 0)')\r\n print(' minus value (e.g. -1) will turn down ', end=' ')\r\n print('the defoucssing correction')\r\n id_dfc = input(' >>> ')\r\n id_dfc = int(id_dfc)\r\n if id_dfc<0: pass\r\n else: self.__dfc_correct__(filename=dff[id_dfc], pf_id=i)\r\n\r\n #--------------------------------------\r\n # Normalize the intensity so that max\r\n # intensity in the all pole figures in\r\n # the given file equals to 9999 (modified from 999 to 9999)\r\n #--------------------------------------\r\n self.__normalize___()\r\n\r\n #--------------------------------------\r\n # WRITING ACTIVITY TO POLE FIGURE FILE\r\n #--------------------------------------\r\n # if os.name=='nt': os.system('cls')\r\n # elif os.name=='posix': os.system('clear')\r\n\r\n print(\"############################\")\r\n print(\" WRITING ACTIVITY \")\r\n print(\"############################\\n\")\r\n print(\" available formats: epf(0), list(1), No file writing(-1)\")\r\n fileformat = input(\"Type format flag(0,1,-1)(default=0) >> \")\r\n if len(fileformat)==0: fileformat='epf'\r\n else:\r\n if fileformat =='0': fileformat='epf'\r\n elif fileformat =='1': fileformat='list'\r\n elif fileformat =='-1': fileformat=None\r\n else: print('Wrong fileformat input'); raise IOError\r\n if fileformat == None: pass\r\n else:\r\n print(\" Type the file name\")\r\n filename = input(\" >> \")\r\n self.write(filename=filename, mode=fileformat)\r\n\r\n elif mode=='df': #defocus correction curve making mode\r\n #--------------------------------\r\n # DEFOCUS CORRECTION CURVE FILE\r\n #--------------------------------\r\n print(\"\\n\\n ****************************************\")\r\n print(\" * Defocus correction curve file maker *\")\r\n print(\" ****************************************\\n\")\r\n for i in range(len(self.polefigures)):\r\n self.defc(pfid=i, filename='dfc_'+str(i)+'.dfc', mode='avg')", "title": "" }, { "docid": "a911d1d4ca26448aa9c288c526fb16a4", "score": "0.48980546", "text": "def load_sequential_aras_frequency_occur(isPerson):\n #by Adele\n #print (\"module_path: \" + module_path )\n #/by Adele\n \n with open(r\"E:\\Lessons_tutorials\\Behavioural user profile articles\\Datasets\\Aras\\House A\\CSV_Summery\\Sequential\\Day\\frequency_occur\\Whole_data.csv\") as csv_file:\n \n data_file = csv.reader(csv_file)\n #target_names = np.array(temp[2:]) # the name of labels \n n_samples = 2068#sum(1 for row in data_file) \n n_features = 41\n data = np.empty((n_samples, n_features))\n target = np.empty((n_samples,), dtype=np.int)\n temp_data = np.empty(n_features+1)# copy whole data to it, then select all col except 20\n \n idx_IN_columns = [] \n for i, ir in enumerate(data_file):\n #print(i)\n temp_data = np.asarray(ir[:42], dtype=np.int)\n #print(temp_data)\n if isPerson == True:\n n_features = 40\n data = np.empty((n_samples, n_features))\n \n # in version 2 the activity col is removed too because it is not important, sensor data are required.\n idx_IN_columns = [0,1,2,3,4,5,6,7,8,9,10,11,12,13,14,15,16,17,18,19,20,21,22,23,24,25,26,27,28,29,30,31,32,33,34,35,36,37,38,39]#all of features except feature 40 (Person_ID\n target[i] = np.asarray(ir[40], dtype=np.int)# Person ID is the class\n else:\n idx_IN_columns = [0,1,2,3,4,5,6,7,8,9,10,11,12,13,14,15,16,17,18,19,20,21,22,23,24,25,26,27,28,29,30,31,32,33,34,35,36,37,38,39,40]#all of features except feature 41 (Activity_ID\n target[i] = np.asarray(ir[41], dtype=np.int)# Activity ID is the class\n \n \n extractedData = temp_data[idx_IN_columns]\n #print(extractedData)\n data[i] = np.asarray(extractedData , dtype=np.int)\n print(target[i])\n \n return Bunch(data=data, target=target)\n # target_names=target_names)\n #feature_names=['sepal length (cm)', 'sepal width (cm)',\n # 'petal length (cm)', 'petal width (cm)'])", "title": "" }, { "docid": "632db86e8061ec1ef3f2d5c2ce481b9e", "score": "0.48885354", "text": "def __init__(self, reference_file):\n self.reference_results = np.load(open(reference_file, 'rb'))", "title": "" }, { "docid": "13f52f070f653d54098e1beda0a9fcbc", "score": "0.487911", "text": "def testReferenceReturned(self):\n h5f = h5py.File(self.h5_fname, \"r+\")\n\n # n-D\n a0 = h5f[\"my_array\"]\n self.aw.setArrayData(a0, copy=False)\n a1 = self.aw.getData(copy=False)\n self.assertIs(a0, a1)\n\n # 1D\n b0 = h5f[\"my_1D_array\"]\n self.aw.setArrayData(b0, copy=False)\n b1 = self.aw.getData(copy=False)\n self.assertIs(b0, b1)\n\n h5f.close()", "title": "" }, { "docid": "a71e49456ea0c7eb48c48f245608213c", "score": "0.48657942", "text": "def load_ref_hist_mappings(self):\n\n # DEBUG DEBUG DEBUG\n assert len(self.ref_hist_mappings) < 1, \\\n \"ERROR Should not be RE-loading \" \\\n \"reference histogram mappings!\"\n # DEBUG DEBUG DEBUG end\n\n self.logger.info(\"Loading reference histogram mappings \" \\\n \"from file `%s'\" % \\\n self.ref_hist_mappings_file_name)\n\n mappings_lines = None\n try:\n mappings_file = file(self.ref_hist_mappings_file_name, \"r\")\n mappings_lines = mappings_file.readlines()\n mappings_file.close()\n except IOError:\n msg = \"ERROR: Could not open reference histogram mapping \"\\\n \"file `%s'\" % self.ref_hist_mappings_file_name\n self.logger.fatal(msg)\n raise Error(msg)\n\n ##########\n\n # The format we expect is: two white-space separated pieces\n # per line. The first the dataset name for which the reference\n # should be used, the second one the name of the reference\n # histogram in the database.\n\n for mapping in mappings_lines:\n # Skip comment lines.\n if not mapping.startswith(\"#\"):\n mapping = mapping.strip()\n if len(mapping) > 0:\n mapping_pieces = mapping.split()\n if len(mapping_pieces) != 2:\n msg = \"ERROR: The reference histogram mapping \" \\\n \"file contains a line I don't \" \\\n \"understand:\\n %s\" % mapping\n self.logger.fatal(msg)\n raise Error(msg)\n dataset_name = mapping_pieces[0].strip()\n ref_hist_name = mapping_pieces[1].strip()\n # We don't want people to accidentally specify\n # multiple mappings for the same dataset. Just\n # don't accept those cases.\n if dataset_name in self.ref_hist_mappings:\n msg = \"ERROR: The reference histogram mapping \" \\\n \"file contains multiple mappings for \" \\\n \"dataset `%s'.\"\n self.logger.fatal(msg)\n raise Error(msg)\n\n # All is well that ends well.\n self.ref_hist_mappings[dataset_name] = ref_hist_name\n\n ##########\n\n self.logger.info(\" Successfully loaded %d mapping(s)\" % \\\n len(self.ref_hist_mappings))\n max_len = max([len(i) for i in self.ref_hist_mappings.keys()])\n for (map_from, map_to) in self.ref_hist_mappings.items():\n self.logger.info(\" %-*s -> %s\" % \\\n (max_len, map_from, map_to))\n\n # End of load_ref_hist_mappings.", "title": "" }, { "docid": "0d12cfe474accc1198daa4cb54faa3c2", "score": "0.4860401", "text": "def _readData(self):\n fh = pf.open(self.values['input'])\n self.data = fh[self.values['ext']].data\n self.hdu = fh[self.values['ext']].header\n self.log.info('Read data from {0:>s} extension {1:d}'.format(self.values['input'], self.values['ext']))\n self.log.debug('Read data dimensions are {0:d} times {1:d}'.format(*self.data.shape))\n\n if 'rue' in self.hdu['OVERSCA'] and self.data.shape[0] < 2500:\n #a single quadrant; over and prescans were simulated, need to be removed...\n self.log.info('Trying to remove pre- and overscan regions from the given in input data...')\n self.log.info('Quadrant is {0:d}'.format(self.hdu['QUADRANT']))\n\n if self.hdu['QUADRANT'] in (0, 2):\n self.data = self.data[:, self.hdu['PRESCANX']:-self.hdu['OVRSCANX']].copy()\n else:\n self.data = self.data[:, self.hdu['OVRSCANX']:-self.hdu['PRESCANX']].copy()", "title": "" }, { "docid": "5c5ac5c5bf1264493e432f358fae4a86", "score": "0.48594093", "text": "def calc_eff_mark2(vanad,backgr,norm_ref=\"bm3_counts\",bottom = 0, top = 127,\n low_frame=0,high_frame=967,eff_sig=10):\n\n import stat,datetime,time,sys\n starttime = time.time()\n omega = vanad[\"mom\"][0] # for reference\n takeoff = vanad[\"mtth\"][0]\n # TODO: intelligent determination of Wombat wavelength\n #crystal = AddCifMetadata.pick_hkl(omega-takeoff/2.0,\"335\") #post April 2009 used 335 only\n #\n # Get important information from the basic files\n #\n # Get file times from timestamps as older NeXuS files had bad values here\n #\n #wl = AddCifMetadata.calc_wavelength(crystal,takeoff)\n vtime = os.stat(vanad.location)[stat.ST_CTIME]\n vtime = datetime.datetime.fromtimestamp(vtime)\n vtime = vtime.strftime(\"%Y-%m-%dT%H:%M:%S%z\")\n btime = os.stat(backgr.location)[stat.ST_CTIME]\n btime = datetime.datetime.fromtimestamp(btime)\n btime = btime.strftime(\"%Y-%m-%dT%H:%M:%S%z\")\n # This step required to insert our metadata hooks into the dataset object\n AddCifMetadata.add_metadata_methods(vanad)\n AddCifMetadata.add_metadata_methods(backgr)\n # Fail early\n print 'Using %s and %s' % (str(vanad.location),str(backgr.location))\n # Subtract the background\n if norm_ref is not None:\n norm_target = reduction.applyNormalization(vanad,norm_ref,-1)\n # store for checking later\n #check_val = backgr[8,64,64]\n nn = reduction.applyNormalization(backgr,norm_ref,norm_target)\n # \n print 'Normalising background to %f' % norm_target\n pure_vanad = (vanad - backgr).get_reduced() #remove the annoying 2nd dimension\n # drop any frames that have been requested\n if low_frame != 0 or high_frame < len(pure_vanad):\n pure_vanad = pure_vanad[low_frame:high_frame]\n print 'Only using part of supplied data: %d to %d, new length %d' % (low_frame,high_frame,len(pure_vanad))\n stth = pure_vanad.stth #store for later\n # pure_vanad.copy_cif_metadata(vanad)\n #print 'Check: %f, %f -> %f' % (vanad[8,64,64],check_val,pure_vanad[8,64,64])\n nosteps = pure_vanad.shape[0]\n # generate a rough correction\n simple_vanad = pure_vanad.intg(axis=0) # sum over the detector step axis\n # calculate typical variability across the detector\n eff_array = array.zeros_like(simple_vanad)\n eff_array[simple_vanad > 10] = simple_vanad\n eff_array = eff_array*eff_array.size/eff_array.sum()\n eff_array[simple_vanad > 0] = 1.0/eff_array\n # apply this temporary correction to last frame which we expect to have the most\n # peaks, as no V peaks will be at low enough angle to fall off the\n # detector during scanning. If this assumption is incorrect, a more\n # rigourous routine could do this twice, for the first and last frames\n frame_last = (pure_vanad.storage[nosteps-1]*eff_array).intg(axis=0) #sum vertically\n print 'Final frame max, min values after correction: %f %f' % (max(frame_last),min(frame_last))# find the peaks, get a background too\n peak_list,back_lev = peak_find(frame_last,sig_val=eff_sig)\n # Prepare return information\n peak_pos = [(stth[nosteps-1]+a*0.125,b*0.125) for (a,b) in peak_list]\n info_list = \"List of peaks found and purged:\\n Position Purge range\"\n for pos,fwhm in peak_pos:\n info_list += \"%8.2f %8.2f\\n\" % (pos,fwhm)\n # Remove these peaks from all frames.\n # degrees. The step size is...\n step_size = (stth[nosteps-1]-stth[0])/(nosteps-1)\n print 'Found step size of %f' % step_size\n # Remove all peaks from the pure data\n purged = peak_scrub(pure_vanad,peak_list,step_size,start_at_end=True)\n # Get gain based on pixels above quarter background\n eff_array,eff_error,non_zero_contribs,fudge_map = nonzero_gain(purged,back_lev/(pure_vanad.shape[1]*4))\n final_map = Dataset(eff_array)\n final_map.var = eff_error\n return final_map,non_zero_contribs,fudge_map,frame_last #last frame, for reference\n #return {\"_[local]_efficiency_data\":eff_array.transpose(),\n # \"_[local]_efficiency_variance\":eff_error.transpose(),\n # \"contributors\":non_zero_contribs,\n # \"_[local]_efficiency_raw_data\":os.path.basename(str(vanad.location)),\n # \"_[local]_efficiency_raw_timestamp\":vtime,\n # \"_[local]_efficiency_background_data\":os.path.basename(str(backgr.location)),\n # \"_[local]_efficiency_background_timestamp\":btime,\n # \"_[local]_efficiency_determination_material\":\"Vanadium\",\n # \"_[local]_efficiency_determination_method\":\"From flood field produced by 9mm V rod\",\n # \"_[local]_efficiency_monochr_omega\":omega,\n # \"_pd_proc_info_data_reduction\":\n # \"Vanadium peaks at the following positions were purged:\\n Pos Range\\n \" + info_list\n # },non_zero_contribs", "title": "" }, { "docid": "adae34f92e6a88cade15a7c5ee1c0fb7", "score": "0.48560834", "text": "def get_ref_af_y ( af_file ):\n af = cst.AirfoilShape.from_txt_file(af_file)\n mod_af = cst.AirfoilShape.from_cst_parameters(af.cst().cst_lower, af.te_lower, af.cst().cst_upper, af.te_upper)\n return mod_af.yco", "title": "" }, { "docid": "f73e849bb7ef8116c29328de15473063", "score": "0.48479775", "text": "def __read_reference_solutions():\n exact = dict()\n for varid in ('psi', 'chi', 'vrt', 'div', 'uchi', 'vchi', 'upsi', 'vpsi',\n 'chigradu', 'chigradv', 'uwnd', 'vwnd'):\n try:\n filename = os.path.join(test_data_path(),\n '{!s}.ref.npy'.format(varid))\n exact[varid] = np.load(filename).squeeze()\n except IOError:\n raise IOError('required data file not found')\n return exact", "title": "" }, { "docid": "5bc9d8b60b8900b00e743c653a866b9e", "score": "0.48293543", "text": "def read(self, infname):\n if self.npts > 0:\n print ('*** receiver function data is already stored!')\n return False\n inarr \t\t = np.loadtxt(infname, dtype=np.float64)\n self.npts = inarr.shape[0] \n self.to = inarr[:,0]\n self.rfo = inarr[:,1]\n try:\n self.stdrfo = inarr[:,2]\n except IndexError:\n self.stdrfo = np.ones(self.npts, dtype = np.float64)*0.1\n self.fs = 1./(self.to[1] - self.to[0])\n return True", "title": "" }, { "docid": "050a5f5b3caf7cad312ee340ab96a563", "score": "0.48207533", "text": "def ff_data_augmentation(patch, ref):\n patch = patch.clone()\n temp = (torch.rand(len(ref)) + torch.rand(len(ref)) * (-1)).numpy()\n patch = torch.FloatTensor(patch.numpy() + ref * temp)\n return patch", "title": "" }, { "docid": "08571ea5d7dabe236c466967511b90cb", "score": "0.48010716", "text": "def test_reindex_against_reference(dials_regression: Path, tmp_path):\n data_dir = os.path.join(dials_regression, \"indexing_test_data\", \"i04_weak_data\")\n pickle_path = os.path.join(data_dir, \"indexed.pickle\")\n experiments_path = os.path.join(data_dir, \"experiments.json\")\n\n commands = [\n shutil.which(\"dials.reindex\"),\n pickle_path,\n experiments_path,\n \"change_of_basis_op=a,b,c\",\n \"space_group=P4\",\n \"output.reflections=P4.refl\",\n \"output.experiments=P4.expt\",\n ]\n\n result = subprocess.run(commands, cwd=tmp_path, capture_output=True)\n assert not result.returncode and not result.stderr\n assert (tmp_path / \"P4.refl\").is_file()\n assert (tmp_path / \"P4.expt\").is_file()\n new_experiments = load.experiment_list(tmp_path / \"P4.expt\", check_format=False)\n assert new_experiments[0].crystal.get_space_group().type().hall_symbol() == \" P 4\"\n\n # Now have something in P4, get another dataset in a different indexing scheme\n\n cb_op = sgtbx.change_of_basis_op(\"a,-b,-c\")\n commands = [\n shutil.which(\"dials.reindex\"),\n \"P4.refl\",\n \"P4.expt\",\n f\"change_of_basis_op={cb_op}\",\n \"output.experiments=P4_reindexed.expt\",\n \"output.reflections=P4_reindexed.refl\",\n ]\n result = subprocess.run(commands, cwd=tmp_path, capture_output=True)\n assert not result.returncode and not result.stderr\n\n # now run reference reindexing\n commands = [\n shutil.which(\"dials.reindex\"),\n \"P4.refl\",\n \"P4.expt\",\n \"reference.experiments=P4_reindexed.expt\",\n \"reference.reflections=P4_reindexed.refl\",\n ]\n result = subprocess.run(commands, cwd=tmp_path, capture_output=True)\n assert not result.returncode and not result.stderr\n\n # expect reindexed_reflections to be same as P4_reindexed, not P4_reflections\n reindexed_reflections = flex.reflection_table.from_file(tmp_path / \"reindexed.refl\")\n P4_reindexed = flex.reflection_table.from_file(tmp_path / \"P4_reindexed.refl\")\n P4_reflections = flex.reflection_table.from_file(tmp_path / \"P4.refl\")\n\n h1, k1, l1 = reindexed_reflections[\"miller_index\"].as_vec3_double().parts()\n h2, k2, l2 = P4_reindexed[\"miller_index\"].as_vec3_double().parts()\n h3, k3, l3 = P4_reflections[\"miller_index\"].as_vec3_double().parts()\n\n # hkl1 and hkl2 should be same, as should have been reindexed by against the\n # reference, with the program determining a reindexing operator of a,-b,-c\n assert list(h1) == pytest.approx(list(h2))\n assert list(l1) == pytest.approx(list(l2))\n assert list(k1) == pytest.approx(list(k2))\n # h1 and h3 should be same, but not l and k, as these dataset should differ\n # by an a twinning operator of a,-b,-c\n assert list(h1) == pytest.approx(list(h3))\n assert list(l1) != pytest.approx(list(l3))\n assert list(k1) != pytest.approx(list(k3))", "title": "" }, { "docid": "fcd9ae449be4d8904036adbd60cda4b1", "score": "0.48007515", "text": "def load_sequential_aras_occur(isPerson):\n #by Adele\n #print (\"module_path: \" + module_path )\n #/by Adele\n \n with open(\"E:\\Lessons_tutorials\\Behavioural user profile articles\\Datasets\\Aras\\House A\\CSV_Summery\\Sequential\\Day\\occur\\Whole_data.csv\") as csv_file:\n \n data_file = csv.reader(csv_file)\n #target_names = np.array(temp[2:]) # the name of labels \n n_samples = 2068#sum(1 for row in data_file) \n n_features = 41\n data = np.empty((n_samples, n_features))\n target = np.empty((n_samples,), dtype=np.int)\n temp_data = np.empty(n_features+1)# copy whole data to it, then select all col except 20\n \n idx_IN_columns = [] \n for i, ir in enumerate(data_file):\n #print(i)\n temp_data = np.asarray(ir[:42], dtype=np.int)\n #print(temp_data)\n if isPerson == True:\n n_features = 40\n data = np.empty((n_samples, n_features))\n \n # in version 2 the activity col is removed too because it is not important, sensor data are required.\n idx_IN_columns = [0,1,2,3,4,5,6,7,8,9,10,11,12,13,14,15,16,17,18,19,20,21,22,23,24,25,26,27,28,29,30,31,32,33,34,35,36,37,38,39]#all of features except feature 40 (Person_ID\n target[i] = np.asarray(ir[40], dtype=np.int)# Person ID is the class\n else:\n idx_IN_columns = [0,1,2,3,4,5,6,7,8,9,10,11,12,13,14,15,16,17,18,19,20,21,22,23,24,25,26,27,28,29,30,31,32,33,34,35,36,37,38,39,40]#all of features except feature 41 (Activity_ID\n target[i] = np.asarray(ir[41], dtype=np.int)# Activity ID is the class\n \n \n extractedData = temp_data[idx_IN_columns]\n #print(extractedData)\n data[i] = np.asarray(extractedData , dtype=np.int)\n print(target[i])\n \n return Bunch(data=data, target=target)\n # target_names=target_names)\n #feature_names=['sepal length (cm)', 'sepal width (cm)',\n # 'petal length (cm)', 'petal width (cm)'])", "title": "" }, { "docid": "d7d1e5810484b203cd09c731323929bc", "score": "0.47900528", "text": "def read_python_FFTfield(self):\n dcr=np.load(self.infile)\n Ng=dcr.shape[1]\n# print('ngrid='+str(Ng))\n if (Ng != self.Ngrid):\n print('Ngrid set='+str(self.Ngrid))\n print('Ngrid read='+str(Ng))\n exit()\n # Set NYQUIST to be real\n return dcr", "title": "" }, { "docid": "a993f7f5ad1271955e95a4737bd90ead", "score": "0.47891465", "text": "def read():\r\n print \"Reading S. cerevisiae FunCat annotated sequence data set (D1 FC seq) ...\"\r\n dir = dirname(dirname(abspath(__file__))) + sep + 'datasets' + \\\r\n sep + 'S_cerevisiae_FC' + sep + 'seq_yeast_FUN' + sep\r\n train_data = dir + 'seq_yeast_FUN.train.arff'\r\n valid_data = dir + 'seq_yeast_FUN.valid.arff'\r\n test_data = dir + 'seq_yeast_FUN.test.arff'\r\n print \" Reading S. cerevisiae FunCat annotated sequence (D1 FC seq) TRAIN set ...\"\r\n train, idx2attr, idx2class = transform_data(\r\n train_data, include_meta=True)\r\n print \" ... Finished.\"\r\n print \" Reading S. cerevisiae FunCat annotated sequence (D1 FC seq) VALIDATION set ...\"\r\n valid = transform_data(valid_data)\r\n print \" ... Finished.\"\r\n print \" Reading S. cerevisiae FunCat annotated sequence (D1 FC seq) TEST set ...\"\r\n test = transform_data(test_data)\r\n print \" ... Finished.\"\r\n print \" Joining S. cerevisiae FunCat annotated sequence (D1 FC seq) TEST and VALIDATION set ...\"\r\n tv_data = _join(train, valid)\r\n print \" ... Finished.\"\r\n print \"... Finished\"\r\n return tv_data, test, idx2attr, idx2class", "title": "" }, { "docid": "812d5fbad9eb70a716ab4b58f2a23cae", "score": "0.4787292", "text": "def getRef(fp, n_ref):\n\n refs = {}\n reads = {}\n cnt = 0\n for record in fasta.fasta_iter(fp):\n name = record['name']\n seq = record['sequence']\n if 'ReFeReNcE' in name:\n refs[name] = seq\n cnt += 1 \n if cnt >= n_ref:\n break\n\n if cnt < n_ref:\n print 'not enough ReFeReNcE seqs'\n sys.exit(1)\n\n template = refs.values()[0].upper() #use the first refSeq as template\n profile = []\n length = len(template)\n for i in range(length):\n if template[i] == 'N' or not template[i].isalpha():\n profile.append(0)\n else:\n profile.append(1)\n\n return name, template, profile #return the ref seq and its mask profile", "title": "" }, { "docid": "96d98dc265fa87cdefdbc6da9e3416d9", "score": "0.4785966", "text": "def map_isoforms_to_reference_transcripts(self):\n m5out = self.output_analysis_fn + \".blasr.out.m5\"\n cmd = 'blasr %s %s --bestn 1 -m 5 --out %s' % \\\n (self.isoseq_output_fa, self.reference_transcripts_fn, m5out)\n execute(cmd)\n return [r for r in BLASRM5Reader(m5out)]", "title": "" }, { "docid": "585aa88fd1cce6de7ba92541b433678e", "score": "0.47792578", "text": "def load_data(scenario):\n data_file = 'measurements_' + str(scenario) + '.data'\n ref_file = 'reference_' + str(scenario) + '.data'\n \n data = np.loadtxt(data_file,skiprows = 0)\n reference = np.loadtxt(ref_file,skiprows = 0)\n \n return (data,reference)", "title": "" }, { "docid": "22f9d5c4b308ce33d72cf660f99e4c55", "score": "0.47790813", "text": "def ref_delta_g_20():\n return np.load('tests/ref-data/spc2-fes1d-delta-gs-20.npy')", "title": "" }, { "docid": "c4c33a627258edf7ae2a7db14f1deb3d", "score": "0.4776031", "text": "def LoadFRG(fpath):\n\tdef readFRGFile(fpath):\n\t\tdata = {}\n\n\t\twith open(fpath, 'r') as f:\n\t\t\tReadingHeader = True \t#check when header has been fully parsed\n\t\t\tBothDirections = False\t#sweep forward + reverse\n\n\t\t\twhile ReadingHeader:\t\n\t\t\t\tline = f.readline()\n\t\t\t\tlineparts = line.split(':')\n\n\t\t\t\tif 'Data' in lineparts[0]:\n\t\t\t\t\tReadingHeader = False\n\t\t\t\t\tf.readline()\n\t\t\t\t\tf.readline()\n\t\t\t\telse:\n\t\t\t\t\ttry:\n\t\t\t\t\t\tdata[lineparts[0]] = float(lineparts[1])\n\t\t\t\t\texcept:\n\t\t\t\t\t\tdata[lineparts[0]] = lineparts[1][1:].replace('\\n', '')\n\n\n\t\t\tvforward = []\n\t\t\tiforward = []\n\t\t\ttimeforward = []\t\t\t\n\t\t\tif data['sweepDir'] == 'Forward + Reverse':\n\t\t\t\tBothDirections = True\n\t\t\t\tvreverse = []\n\t\t\t\tireverse = []\n\t\t\t\ttimereverse = []\n\n\t\t\tfor line in f:\n\t\t\t\tlineparts = f.readline().split('\\t')\n\t\t\t\tif len(lineparts) == 1:\n\t\t\t\t\tbreak\n\t\t\t\tvforward.append(lineparts[0])\t\n\t\t\t\tiforward.append(lineparts[1])\n\t\t\t\ttimeforward.append(lineparts[2])\n\t\t\t\tif BothDirections:\n\t\t\t\t\tvreverse.append(lineparts[0])\t\n\t\t\t\t\tireverse.append(lineparts[1])\n\t\t\t\t\ttimereverse.append(lineparts[2])\n\n\t\t\tdata['V'] = np.array(vforward).astype(float)\n\t\t\tdata['I'] = np.array(iforward).astype(float)\n\t\t\tdata['J'] = data['I']/data['area_cm2']\n\t\t\tdata['delay'] = np.array(timeforward).astype(float)\n\n\t\t\tif BothDirections:\n\t\t\t\tdata2 = data.copy()\n\t\t\t\tdata2['sampleName'] = data['sampleName'] + '_Reverse'\n\t\t\t\tdata['sampleName'] = data['sampleName'] + '_Forward'\n\t\t\t\tdata2['V'] = np.array(vreverse).astype(float)\n\t\t\t\tdata2['I'] = np.array(ireverse).astype(float)\n\t\t\t\tdata2['J'] = data2['I']/data2['area_cm2']\n\t\t\t\tdata2['delay'] = np.array(timereverse).astype(float)\n\t\t\t\toutput = [data, data2]\n\t\t\telse:\n\t\t\t\toutput = data\n\n\t\treturn output\n\n\n\n\tfids = [os.path.join(fpath, x) for x in os.listdir(fpath)]\n\t\n\talldata = {}\n\tfor f in fids:\n\t\toutput = readFRGFile(f)\n\t\tif type(output) == list:\n\t\t\tfor each in output:\n\t\t\t\talldata[each['sampleName']] = each\n\t\telse:\n\t\t\talldata[output['sampleName']] = output\n\treturn alldata", "title": "" }, { "docid": "97e8bf36e1576f80ac6529d3e15e972c", "score": "0.4774368", "text": "def read_dat(filename):\n with open(filename, 'r') as dat_file:\n # reading in the header block\n is_header = True\n header_block = []\n while is_header:\n tmp_line = dat_file.readline()\n if tmp_line.find('#') == 0:\n header_block.append(tmp_line)\n eq_pos = tmp_line.find('=')\n if tmp_line.find(\"De\") != -1:\n De = float(tmp_line[eq_pos+1:])\n elif tmp_line.find(\"time\") != -1:\n time = float(tmp_line[eq_pos+1:])\n elif tmp_line.find(\"viscRat\") != -1:\n visc_rat = float(tmp_line[eq_pos+1:])\n elif tmp_line.find(\"volRat\") != -1:\n vol_rat = float(tmp_line[eq_pos+1:])\n elif tmp_line.find(\"deformRate\") != -1:\n deformRate = float(tmp_line[eq_pos+1:])\n elif tmp_line.find(\"tnum\") != -1:\n _tnum = float(tmp_line[eq_pos+1:])\n else:\n is_header = False\n # get the next two Tecplot lines and then go back one line\n header_block.append(tmp_line)\n last_pos = dat_file.tell()\n tmp_line = dat_file.readline()\n header_block.append(tmp_line)\n dat_file.seek(last_pos)\n\n try:\n reader = csv.reader(dat_file, delimiter=' ')\n type_line = next(reader)\n nvert = int(type_line[1][2:])\n nface = int(type_line[2][2:])\n all_data = [] # position + all other data\n f2v = [] # connectivity\n\n count = 0\n while count < nvert:\n lst = next(reader)\n all_data.append(lst)\n count += 1\n all_data = np.array(all_data, dtype=float)\n\n count = 0\n while count < nface:\n lst = next(reader)[0:3] # should just be 3 values\n f2v.append([int(i) for i in lst])\n count += 1\n f2v = np.array(f2v, dtype=int)\n\n except csv.Error as e:\n sys.exit('file %s, line %d: %s' % (filename, reader.line_num, e))\n\n try:\n params = {\"visc_rat\": visc_rat, \"vol_rat\": vol_rat, \"De\": De, \"deformRate\": deformRate,\n \"time\": time, \"nvert\": nvert, \"nface\": nface, \"header_block\": header_block}\n return (all_data, f2v, params)\n except NameError as e:\n print(\"One of the required variables was not instantiated: {}\".format(e))", "title": "" }, { "docid": "80db1bee8c85d44e696e1e75742402f6", "score": "0.47732306", "text": "def _read_calibration(self) -> None:\n coeff = self._read(_BME680_BME680_COEFF_ADDR1, 25)\n coeff += self._read(_BME680_BME680_COEFF_ADDR2, 16)\n\n coeff = list(struct.unpack(\"<hbBHhbBhhbbHhhBBBHbbbBbHhbb\", bytes(coeff[1:39])))\n # print(\"\\n\\n\",coeff)\n coeff = [float(i) for i in coeff]\n self._temp_calibration = [coeff[x] for x in [23, 0, 1]]\n self._pressure_calibration = [\n coeff[x] for x in [3, 4, 5, 7, 8, 10, 9, 12, 13, 14]\n ]\n self._humidity_calibration = [coeff[x] for x in [17, 16, 18, 19, 20, 21, 22]]\n self._gas_calibration = [coeff[x] for x in [25, 24, 26]]\n\n # flip around H1 & H2\n self._humidity_calibration[1] *= 16\n self._humidity_calibration[1] += self._humidity_calibration[0] % 16\n self._humidity_calibration[0] /= 16\n\n self._heat_range = (self._read_byte(0x02) & 0x30) / 16\n self._heat_val = self._read_byte(0x00)\n self._sw_err = (self._read_byte(0x04) & 0xF0) / 16", "title": "" }, { "docid": "7fb3407c0fe262c2529ac9875b47230a", "score": "0.47712365", "text": "def read_body_Ainc(f,verbose,glueball_corr) :\n for ibin in range(0, numbin):\n for iopA in range(0,numop) :\n for iblockA in range(0, nblock) :\n for iopB in range(0,numop) : \n for iblockB in range(0, nblock) :\n for t in range(0, Ntmax) :\n \n try:\n bbb = f.read(8)\n except IOError as e:\n print (\"I/O error({0}): {1}\".format(e.errno, e.strerror))\n except:\n print (\"Unexpected error:\", sys.exc_info()[0])\n raise\n\n vvv = struct.unpack('d',bbb)\n if verbose :\n print (t, vvv[0])\n \n glueball_corr[iblockA,iblockB,t, iopA,iopB, ibin] = \\\n glueball_corr[iblockA,iblockB,t, iopA,iopB, ibin] + vvv[0]\n\n\n\n f.close()", "title": "" }, { "docid": "f6cfbf516b9e176d0a45a4a8d03bccd5", "score": "0.47711852", "text": "def test_read_data():\n # generate data and save to a tempfile\n Time = np.linspace(0, 1800, num=1800)\n AltTime = Time\n y2 = 0.5 - 0.5 * np.tanh((Time-600)/100) # TGA result\n y = y2/y2[0]\n dy = 0.005-0.005 * np.tanh(Time/100 - 6)**2 # exact derivative of former\n ny = y + 0.1*np.random.randn(len(Time)) # noisy\n outdata = np.stack((Time, AltTime, y, dy, y2, ny), axis=1)\n # delete=False is needed so that the tempfile can be written to in windows\n # create working directory\n fp = open(\"output.csv\", mode=\"w+t\")\n # delete=False so we can close() the file and delete later\n\n np.savetxt(fp.name, outdata,\n delimiter=\",\",\n header=\"Time,AltTime,y,dy,y2,ny\",\n comments='')\n fp.close()\n fname = \"output.csv\"\n # line with only the required fields\n line = {\"fname\": fname,\n \"dep_col_name\": \"y\"}\n # populate rest of the fields\n line = _set_data_line_defaults(line, header=0)\n read_Time, read_y = read_data(**line)\n\n # Test normalization, gradient calculation and ind_col_name functionality\n line['normalize'] = True\n line['dep_col_name'] = 'y2'\n read_Time, norm_y2 = read_data(**line)\n\n # Test normalization, gradient calculation and ind_col_name functionality\n line['gradient'] = True\n line['normalize'] = True\n line['ind_col_name'] = 'AltTime'\n line['dep_col_name'] = 'y2'\n read_AltTime, calc_dy = read_data(**line)\n\n # delete file now that we don't need it anymore\n os.remove(fname)\n\n assert np.allclose(read_y, y)\n assert np.allclose(norm_y2, y)\n assert np.allclose(read_Time, Time)\n assert np.allclose(read_Time, read_AltTime)\n # rtol = 1e-5 fails\n assert np.allclose(dy, calc_dy, rtol=1e-4)", "title": "" }, { "docid": "b4fd7366f1fcf4fd0bdce957f61c242d", "score": "0.47710222", "text": "def genreg(masterphotref_fistar,\n outfile,\n xycols=(1,2)):\n\n # get the x and y coordinate columns from the source list (fistar)\n srcxy = np.genfromtxt(masterphotref_fistar,\n usecols=xycols,\n dtype='f8,f8',\n names=['x','y'])\n\n xsize = 2048.\n ysize = 2048.\n\n sel = (\n (srcxy['x']>xsize+1) | (srcxy['x']<0) |\n (srcxy['y']>ysize+1) | (srcxy['y']<0)\n )\n # in *rare* instances (i.e., this happened for one star, in one ccd, out\n # of the entire TESS cycle1-4 reduction), srcxy can be outside the bounds.\n # in such cases raise a warning and just \"mask\" those xy values to 1, since\n # less than ~10 sources will not affect the photref frame generation\n if len(srcxy[sel]) > 0:\n N_off_silicon = len(srcxy[sel])\n print(f\"WARNING! Got {N_off_silicon} sources in genreg from \"\n f\"{masterphotref_fistar}. Masking...\")\n if N_off_silicon > 10:\n msg = 'This should never happen'\n raise AssertionError(msg)\n srcxy[sel] = np.ones_like(srcxy[sel])\n\n # set up the grid (this weirdness is transcribed directly from Chelsea's\n # regslct.py) TODO: figure out WTF this does\n\n BX = 30; BY = 30\n mx = np.zeros(BX*BY)-1\n my = np.zeros(BX*BY)-1\n ma = np.zeros(BX*BY)\n bx = (srcxy['x']*BX/xsize).astype(int)\n by = (srcxy['y']*BY/ysize).astype(int)\n mx[by*bx+bx] = srcxy['x']\n my[by*bx+bx] = srcxy['y']\n\n outf = open(outfile,'wb')\n\n for i in range(int(BX*BY)):\n outf.write((\"%8.0f %8.0f %8.0f\\n\" % (mx[i],my[i],20)).encode(\"utf-8\"))\n\n outf.close()", "title": "" }, { "docid": "52f7bffc915699a950907ba7a5cf945d", "score": "0.4768592", "text": "def get_abi_ref(dataset, check=False, dtype=None):\n ref = dataset.Rad * dataset.kappa0\n if check:\n DQF = dataset.DQF\n ref[DQF < 0] = np.nan\n ref[DQF > 1] = np.nan\n if dtype is None:\n return ref\n else:\n return ref.astype(dtype)", "title": "" }, { "docid": "20f60d1ead558d432600bdd9b5112cf3", "score": "0.4765774", "text": "def __extract_cascadedata(self):\n \n return fromfile(self.fpath, dtype = int32)[:128*128*16*8].reshape(8,16,128,128)", "title": "" }, { "docid": "f600ce327e7bdaf8e39fa1145e1f617d", "score": "0.4762291", "text": "def get_altseq(ref_seq, ref_allele, var_allele, var_pos, strand, features_info, cds_info=None):\n if cds_info is None:\n cds_info = []\n alt_seq = \"\"\n if len(cds_info) == 2:\n start_coding_index = cds_info[0] - 1 # it should be index not pos\n stop_coding_index = cds_info[1] # get end position of the last cds\n else:\n start_coding_index = 0\n total_len = 0\n for x in features_info:\n total_len += x[1] - x[0] + 1\n stop_coding_index = total_len # the features are sorted by end therefroe the end pos of the last item is the last coding nc\n\n if strand == '-': # ge the correct orientation, because exons are oredered based on their position\n ref_seq = ref_seq[\n ::-1] # in order to calculate from the first base of the first feature (sorted by genomic coordinates)\n ref_allele = ref_allele.complement() # the reverse will be done on return\n var_allele = var_allele.complement() # the reverse will be done on return\n\n ref_seq = ref_seq[\n start_coding_index:stop_coding_index] # just keep the coding regions (mostly effective in case of protein-coding genes)\n nc_index = 0\n if len(ref_allele) == len(var_allele) or ref_allele[0] == var_allele[0]:\n for feature in features_info: # for every exon, cds or stop codon\n if var_pos in range(feature[0], feature[\n 1] + 1): # get index of the var relative to the position of the overlapping feature in the coding region\n var_index_in_cds = nc_index + (var_pos - feature[0])\n # modify the coding reference sequence accoding to the var_allele\n c = len(ref_allele)\n alt_seq = ref_seq[0:var_index_in_cds] + var_allele + ref_seq[\n var_index_in_cds + c::] # variant and ref strand??\n if strand == '-':\n return ref_seq[::-1], alt_seq[::-1]\n else:\n return ref_seq, alt_seq\n\n nc_index += (feature[1] - feature[0] + 1)\n\n return ref_seq, alt_seq", "title": "" }, { "docid": "6d2969e33e747376b22064309480b8c2", "score": "0.47605693", "text": "def load_bias(self):\n if not os.path.exists(self.data_file):\n return\n \n with h5.File(self.data_file, 'r') as hfile:\n if \"bias\" in hfile.keys():\n data = np.array(hfile[\"bias\"])\n self.bias.bias_array = data\n self.log(\"Bias loaded from {}\".format(self.data_file))\n\n # Subtract of the an overall constant\n self.bias.bias_array -= self.bias.bias_array[0]", "title": "" }, { "docid": "d344b939282e1ed8a445e60f6ccfbb8f", "score": "0.4756925", "text": "def test_fqe_data_set_wfn_data():\n test = fqe_data.FqeData(1, 1, 2)\n ref = numpy.random.rand(2, 2) + 1.j * numpy.random.rand(2, 2)\n test.set_wfn(strategy='from_data', raw_data=ref)\n assert numpy.allclose(test.coeff, ref)", "title": "" }, { "docid": "1663928531405a5099c5bb945235365a", "score": "0.4753721", "text": "def initcafg(self):\n\n #all available redshift for UVB \n avail_z=np.arange(0,10,0.05)\n\n #pick closest in redshift\n zindex=np.argmin(abs(avail_z-self.redshift))\n nameuvb='fg_uvb_dec11/fg_uvb_dec11_z_{}.dat'.format(avail_z[zindex])\n try:\n fgtab=np.loadtxt(nameuvb,dtype={'names': ('nu','f'),'formats':('f10','f10')})\n except:\n print('Could not read UVB file {}'.format(nameuvb))\n exit()\n\n #from Ry to Hz\n self.source['lgnu']=np.log10(fgtab['nu'])+np.log10(3.2898419499e15)\n\n #from (10^-21 erg s^-1 cm^-2 Hz^-1 sr^-1) to Log of erg/s/cm2/Hz\n self.source['lgfpiJ']=np.log10(fgtab['f'])-21+np.log10(4*np.pi)\n\n #now reverse freq to have high freq first and low freq last (as in HM12)\n #This is nothing special, but it is needed for how the writing procedure\n #fixes the boundary of the SED\n self.source['lgnu'] = self.source['lgnu'][::-1]\n self.source['lgfpiJ'] = self.source['lgfpiJ'][::-1]", "title": "" }, { "docid": "e0b3d05b5728a40d66074126cceee64d", "score": "0.47522852", "text": "def dtopo_tt3_v2(indir,outfile):\n from numpy import mod,zeros\n\n fid_data = indir+'/claw.data'\n\n fid=open(fid_data,'r')\n for i in range(15):\n line=fid.readline()\n\n line=fid.readline().split()\n t0 =float(line[0])\n\n for i in range(2):\n line=fid.readline()\n\n line=fid.readline().split()\n mt =int(line[0])\n\n line=fid.readline().split()\n tf =float(line[0])\n\n fid.close()\n\n dt =(tf-t0)/mt\n\n dir = indir\n\n infile=\"fort.qxxxx\"\n\n infile1=dir+\"/fort.q0000\"\n fid= open(infile1,'r')\n fout=open(outfile,'w')\n\n for i in range(2):\n line=fid.readline()\n\n line=fid.readline().split()\n mx = int(line[0])\n line=fid.readline().split()\n my = int(line[0])\n line=fid.readline().split()\n xlow = float(line[0])\n line=fid.readline().split()\n ylow = float(line[0])\n line=fid.readline().split()\n dx = float(line[0])\n line=fid.readline().split()\n dy = float(line[0])\n\n line=fid.readline()\n\n fout.write(\"%10d \\n\" % mx)\n fout.write(\"%10d \\n\" % my)\n fout.write(\"%10d \\n\" % mt)\n fout.write(\"%20.10e \\n\" % xlow)\n fout.write(\"%20.10e \\n\" % ylow)\n fout.write(\"%20.10e \\n\" % (t0+dt))\n fout.write(\"%20.10e \\n\" % dx)\n fout.write(\"%20.10e \\n\" % dy)\n fout.write(\"%20.10e \\n\" % dt)\n\n h_orig=zeros(mx*my)\n k=0\n for j in range(my):\n for i in range(mx):\n line=fid.readline().split()\n h_orig[k]=float(line[0])\n k+=1\n line=fid.readline()\n\n fid.close()\n\n infile1=infile\n dz=zeros(mx*my)\n\n for k in range(1,mt+1):\n for ipos in range(10,6,-1):\n idigit=mod(k,10)\n infile1=infile1[0:ipos-1]+str(idigit)+infile1[ipos:10]\n k=k/10\n\n fid1= open(dir+'/'+infile1,'r')\n print fid1\n\n for i in range(9):\n line=fid1.readline().split()\n \n k=0 \n for j in range(my):\n for i in range(mx):\n line1=fid1.readline().split()\n hnew=float(line1[0])\n dz[i+(my-j-1)*mx]=hnew-h_orig[k]\n k+=1\n line=fid1.readline()\n \n for j in range(my):\n for i in range(mx): \n k=i+j*mx\n \n fout.write(\"%20.10e \" % dz[k])\n\n fout.write(\"\\n\")\n\n fid1.close()\n\n fout.close()", "title": "" }, { "docid": "64b52f94534221feaa42782df1147aff", "score": "0.4752102", "text": "def load_sequential_aras_first(isPerson):\n #by Adele\n #print (\"module_path: \" + module_path )\n #/by Adele\n \n with open(\"E:\\Lessons_tutorials\\Behavioural user profile articles\\Datasets\\Aras\\House A\\CSV_Summery\\Sequential\\Day\\Whole_data.csv\") as csv_file:\n \n data_file = csv.reader(csv_file)\n #target_names = np.array(temp[2:]) # the name of labels \n n_samples = 2068#sum(1 for row in data_file) \n n_features = 41\n data = np.empty((n_samples, n_features))\n target = np.empty((n_samples,), dtype=np.int)\n temp_data = np.empty(n_features+1)# copy whole data to it, then select all col except 20\n \n idx_IN_columns = [] \n for i, ir in enumerate(data_file):\n #print(i)\n temp_data = np.asarray(ir[:42], dtype=np.int)\n #print(temp_data)\n if isPerson == True:\n n_features = 40\n data = np.empty((n_samples, n_features))\n \n # in version 2 the activity col is removed too because it is not important, sensor data are required.\n idx_IN_columns = [0,1,2,3,4,5,6,7,8,9,10,11,12,13,14,15,16,17,18,19,20,21,22,23,24,25,26,27,28,29,30,31,32,33,34,35,36,37,38,39]#all of features except feature 40 (Person_ID\n target[i] = np.asarray(ir[40], dtype=np.int)# Person ID is the class\n else:\n idx_IN_columns = [0,1,2,3,4,5,6,7,8,9,10,11,12,13,14,15,16,17,18,19,20,21,22,23,24,25,26,27,28,29,30,31,32,33,34,35,36,37,38,39,40]#all of features except feature 41 (Activity_ID\n target[i] = np.asarray(ir[41], dtype=np.int)# Activity ID is the class\n \n \n extractedData = temp_data[idx_IN_columns]\n #print(extractedData)\n data[i] = np.asarray(extractedData , dtype=np.int)\n #print(target[i])\n \n \n #print(data)\n #print(target)\n return Bunch(data=data, target=target)\n # target_names=target_names)\n #feature_names=['sepal length (cm)', 'sepal width (cm)',\n # 'petal length (cm)', 'petal width (cm)'])", "title": "" }, { "docid": "54b64006b59b81b898b6cc71ccf6d48d", "score": "0.47514185", "text": "def load_original_data():\n print(\"Loading original data, only happens the first time\")\n\n # Load dataframes\n drug_chemical_info_with_fp = pd.read_csv(\"Data/DrugComb/drug_chemical_info_with_fingerprints.csv\")\n drugcomb_scored = pd.read_csv(\"Data/DrugComb/drugcombs_scored.csv\")\n drug_protein_link = pd.read_csv(\"Data/DrugComb/drug_protein_links.tsv\", sep=\"\\t\")\n protein_protein_interactions = pd.read_csv(\"Data/DrugComb/protein_protein_links.txt\", sep=' ')\n\n ####################################################################################################################\n # Build node feature matrix\n ####################################################################################################################\n drug_chemical_info_with_fp['has_fp'] = drug_chemical_info_with_fp['fp0'].apply(lambda fp: fp != -1)\n drug_chemical_info_with_fp['is_drug'] = 1\n drug_chemical_info_with_fp = drug_chemical_info_with_fp.rename(columns={'drugName': 'name'})\n\n print(\"Number of drugs\", len(drug_chemical_info_with_fp))\n\n # Retrieve all protein names\n all_proteins = set(protein_protein_interactions['protein1']). \\\n union(set(protein_protein_interactions['protein2'])).union(set(drug_protein_link['protein']))\n protein_nodes = pd.DataFrame(all_proteins, columns=['name'])\n protein_nodes['is_drug'] = 0\n protein_nodes['has_fp'] = False\n\n print(\"Number of proteins\", len(protein_nodes))\n\n nodes = pd.concat((drug_chemical_info_with_fp, protein_nodes), ignore_index=True, sort=False)\n nodes = nodes.fillna(-1)\n x = nodes.drop(['cIds', 'drugNameOfficial', 'molecularWeight', 'smilesString', 'name'], axis=1)\n x = x.to_numpy().astype(np.int)\n\n ####################################################################################################################\n # Build edge index\n ####################################################################################################################\n\n # Dictionaries to retrieve indices\n ##################################################\n cid_to_idx_dict = {nodes.at[i, 'cIds']: i for i in range(len(nodes))}\n name_to_idx_dict = {nodes.at[i, 'name']: i for i in range(len(nodes))}\n\n # PPI\n ##################################################\n protein_protein_interactions['idx_prot1'] = protein_protein_interactions['protein1'].apply(\n lambda s: name_to_idx_dict[s])\n protein_protein_interactions['idx_prot2'] = protein_protein_interactions['protein2'].apply(\n lambda s: name_to_idx_dict[s])\n\n edge_index_ppi = protein_protein_interactions[['idx_prot1', 'idx_prot2']].to_numpy().T\n edge_attr_ppi = np.zeros((edge_index_ppi.shape[1], 4))\n\n print(\"Number of Prot-Prot Interactions\", edge_index_ppi.shape[1])\n\n # Drug Protein interaction\n ##################################################\n drug_protein_link['idx_chemical'] = drug_protein_link['chemical'].\\\n apply(lambda s: cid_to_idx_dict[s] if s in cid_to_idx_dict.keys() else -1)\n drug_protein_link['idx_prot'] = drug_protein_link['protein'].apply(lambda s: name_to_idx_dict[s])\n\n edge_index_dpi = drug_protein_link[drug_protein_link['idx_chemical'] != -1][['idx_chemical', 'idx_prot']].\\\n to_numpy().T\n edge_attr_dpi = np.zeros((edge_index_dpi.shape[1], 4))\n\n print(\"Number of Drug-Prot Interactions\", edge_index_dpi.shape[1])\n\n # Drug Synergy scores\n ##################################################\n drugcomb_scored['idx_Drug1'] = drugcomb_scored['Drug1'].apply(\n lambda s: name_to_idx_dict[s] if s in name_to_idx_dict.keys() else -1)\n drugcomb_scored['idx_Drug2'] = drugcomb_scored['Drug2'].apply(\n lambda s: name_to_idx_dict[s] if s in name_to_idx_dict.keys() else -1)\n\n # Remove measures that have been performed several times.\n drug_drug_edges = drugcomb_scored[drugcomb_scored[['idx_Drug1', 'idx_Drug2']].duplicated() == False]\n # Remove measures for which there is no information about one of the drugs\n drug_drug_edges = drug_drug_edges[drug_drug_edges['idx_Drug1'] != -1]\n drug_drug_edges = drug_drug_edges[drug_drug_edges['idx_Drug2'] != -1]\n\n edge_index_ddi = drug_drug_edges[['idx_Drug1', 'idx_Drug2']].to_numpy().T\n edge_attr_ddi = drug_drug_edges[['ZIP', 'Bliss', 'Loewe', 'HSA']].to_numpy()\n\n print(\"Number of Drug-Drug Interactions\", edge_index_ddi.shape[1])\n\n # Aggregate different edges\n ##################################################\n edge_index = np.concatenate((edge_index_ppi, edge_index_dpi, edge_index_ddi), axis=1)\n edge_attr = np.concatenate((edge_attr_ppi, edge_attr_dpi, edge_attr_ddi), axis=0)\n # Edges are directed, we need to feed them both ways\n edge_index = np.concatenate((edge_index, edge_index[::-1, :]), axis=1)\n edge_attr = np.concatenate((edge_attr, edge_attr), axis=0)\n\n data = Data(x=torch.tensor(x, dtype=torch.float),\n edge_index=torch.tensor(edge_index, dtype=torch.long),\n edge_attr=torch.tensor(edge_attr, dtype=torch.float))\n\n # Add drug-drug interaction informations as attribute to data, useful for training\n data.number_of_drugs = len(drug_chemical_info_with_fp)\n data.edge_index_ddi = torch.tensor(edge_index_ddi)\n data.edge_attr_ddi = torch.tensor(edge_attr_ddi)\n\n return data", "title": "" }, { "docid": "37da8b2ee508383391bce66308f27560", "score": "0.4750281", "text": "def TInReference(self, groundTruth, index, infer=False):\n i = self.readToRead[index]\n return groundTruth.MapPBISubreadToReference(\n i.tpbi.movie, i.tpbi.holeNumber,\n i.abs_tstart, i.abs_tend, infer)", "title": "" }, { "docid": "fd50618a92d363c291877803066875cb", "score": "0.47472703", "text": "def test_missing_references(self):\n buff = StringIO(\"\"\"0 @I1@ INDI\n1 FAMS @FmissingS@\n1 FAMC @FmissingC@\n0 @F1@ FAM\n1 WIFE @ImissingW@\n1 HUSB @ImissingH@\n1 CHIL @ImissingC@\"\"\")\n (ind, fam, warns) = parse_file(buff)\n self.assertTrue(len([x for x in warns if x.story == \"US26\"]) == 5)\n self.assertTrue(ind['@I1@'].spouse_family_ids == [])\n self.assertTrue(ind['@I1@'].spouse_families == [])\n self.assertTrue(ind['@I1@'].child_family_ids == [])\n self.assertTrue(ind['@I1@'].child_families == [])\n self.assertTrue(fam['@F1@'].husband_id == None)\n self.assertTrue(fam['@F1@'].husband == None)\n self.assertTrue(fam['@F1@'].wife_id == None)\n self.assertTrue(fam['@F1@'].wife == None)\n self.assertTrue(fam['@F1@'].children_id_list == [])\n self.assertTrue(fam['@F1@'].children_list == [])", "title": "" }, { "docid": "7fb5c516623d59d8620c3411eaa43fe6", "score": "0.47471806", "text": "def read_debt(self):\n debtfile = pd.read_csv(INPUTPATH + 'debt_financing.csv')\n debtfile.set_index('indcode', inplace=True)\n self.Deltas = copy.deepcopy(debtfile)", "title": "" }, { "docid": "dca9f1418a7412c5e3b895affd6eef9d", "score": "0.4747172", "text": "def load_data_single_mod(gen,model,scenario,Tanglob_idx=False,Tref_all=True,Tref_start='1870-01-01',Tref_end='1900-01-01',usr_time_res=\"ann\",var='tas'):\n # the dictionaries are NOT ordered properly + some other adjustments -> will need to be careful with my old scripts\n\n # see e-mail from Verena on 20191112 for additional infos how could read in several files at once with xarr\n # additionally: she transforms dataset into dataarray to make indexing easier -> for consistency reason with earlier\n # version of emulator (& thus to be able to reuse my scripts), I do not do this (fow now).\n \n\t# right now I keep reloading constants fields for each run I add -> does not really make sense. \n # Maybe add boolean to decide instead. however they are small & I have to read them in at some point anyways\n # -> maybe path of least resistence is to not care about it\n\tprint('start with model',model)\n\n\t# vars which used to be part of the inputs but did not really make sense as I employ the same ones all the time anyways (could be changed later if needed)\n\t#var='tas'\n\ttemp_res = usr_time_res # if not, reading the var file needs to be changed as time var is not named in the same way anymore\n\tspatial_res = 'g05'\n\n\n # load in the constants files\n\tdir_data = '/net/so4/landclim/snath/data/'\n\tfile_ls = 'interim_invariant_lsmask_regrid_g05.nc' # ERA-interim mask regridded by Richard from 73x144 to 72x144\n\tfile_srex = 'srex-region-masks_20120709.srex_mask_SREX_masks_all.50deg.time-invariant.nc'\n\tfile_srex_shape = 'referenceRegions.shp'\n\n\n\t#df_all_regs = geopandas.read_file(dir_data+file_srex_shape)\n\tsrex_names = ['ALA','CGI','WNA','CNA','ENA','CAM','AMZ','NEB','WSA','SSA','NEU','CEU','MED','SAH','WAF','EAF','SAF',\n 'NAS','WAS','CAS','TIB','EAS','SAS','SEA','NAU','SAU'] # SREX names ordered according to SREX mask I am \n # employing\n\t#df_srex = df_all_regs.loc[df_all_regs['LAB'].isin(srex_names)] # alternative indexing: search in column LAB for names\n\tsrex_raw = xr.open_mfdataset(dir_data+file_srex, combine='by_coords',decode_times=False) # srex_raw nrs from 1-26\n\t#df_srex=srex_raw \n\t#srex_raw[\"time\"]=pd.to_datetime(srex_raw.time.values)\n\tfrac_l = xr.open_mfdataset(dir_data+file_ls, combine='by_coords',decode_times=False) #land-sea mask of ERA-interim bilinearily interpolated \n\tlons, lats = np.meshgrid(frac_l.longitude.values,frac_l.latitude.values) # the lon, lat grid (just to derive weights) \n\t#frac_1[\"time\"]=pd.to_datetime(frac_1.time.values)\n\tfrac_l = frac_l.where(frac_l.latitude>-60,0) # remove Antarctica from frac_l field (ie set frac l to 0)\n\n\tidx_l=np.squeeze(frac_l.lsm.values)>0.0 # idex land #-> everything >0 I consider land\n \n\n\twgt = norm_cos_wgt(lats) # area weights of each grid point\n\twgt_l = wgt[idx_l] # area weights for land grid points\n\tlon_pc, lat_pc = mpu.infer_interval_breaks(frac_l.longitude, frac_l.latitude) # the lon / lat for the plotting with pcolormesh\n\tsrex=(np.squeeze(srex_raw.srex_mask.values)-1)[idx_l] # srex indices on land\n\n \n\ty={}\n\tT_ref = np.zeros([36,72])\n\trun_nrs={}\n\tif Tanglob_idx == True:\n\t\tTan_wgt_globmean = {}\n\tif gen == 5:\n\t\tdir_var='/net/so4/landclim/snath/DA/data/%s/regridded/'%model\n\t\trun_names_list=sorted(glob.glob(dir_var+var+'_'+temp_res+'_'+model+'_'+scenario+'_'+'r*i1p1'+'_'+spatial_res+'.nc'))\n # ATTENTION: are ordered but does not work for models with runs above digit 9 \n\t\t#index_tr = [i for i, s in enumerate(run_names_list) if 'r1i1p1' in s][0] # find training run \n\t\t#print(run_names_list)\n\t\t#run_names_list.insert(0, run_names_list.pop(index_tr)) # move training run to begin of list\n\n # exception for cmip5 GISS-E2-H_rcp85_r2i1p1 ie the strange run I excluded from ESD paper\n\t\tif '/net/atmos/data/cmip5-ng/tas/tas_%s_GISS-E2-H_rcp85_r2i1p1_g025.nc'%usr_time_res in run_names_list:\n\t\t\trun_names_list.remove('/net/atmos/data/cmip5-ng/tas/tas_%s_GISS-E2-H_rcp85_r2i1p1_g025.nc'%usr_time_res )\n \n # loop over all runs to obtain the absolute values \n\t\tprint(run_names_list)\n\t\tfor run_name in run_names_list:\n\n\t\t\tdata = xr.open_mfdataset(run_name,decode_times=False)\n\t\t\tif usr_time_res==\"ann\":\n\t\t\t\tdata=data.rename({'year':'time'})\n\t\t\t\tdata=data.rename({'longitude':'lon'})\n\t\t\t\tdata=data.rename({'latitude':'lat'}) \n\t\t\tdata[\"time\"]=cf_units.num2date(data.time.values, 'days since 1800-01-01 00:00:00', cf_units.CALENDAR_STANDARD)\n\t\t\tdata=data.sel(time=slice('1870-01-01', '2101-01-01'))#.roll(lon=72)\n # rename so it is consisten with cmip6 \n # roll so that it is on same grid as others (no longer Pacific centered) \n\n\t\t\t#print(data.time.values)\n\t\t\tdata = data.assign_coords(lon= (((data.lon + 180) % 360) - 180)) # assign_coords so same labels as others\n\t\t\trun=int(data.attrs['source_ensemble'].split('r')[1].split('i')[0]) # extract ens member\n\t\t\trun_nrs[run_name]=run\n \n\t\t\ty[run] = data.tas.values # still absolute values + still contains sea pixels\n\t\t\tT_ref += data.tas.sel(time=slice(Tref_start, Tref_end)).mean(dim='time').values*1.0/len(run_names_list) # sum up all ref climates\n \n \n\t\t\tif run==1 and Tref_all != True:\n\t\t\t\tT_ref_1=data.tas.sel(time=slice(Tref_start, Tref_end)).mean(dim='time').values\n\t\t\t\tprint('create ref for ',run_name)\n \n\t\t\tif Tanglob_idx == True:\n\t\t\t\tTan_wgt_globmean[run] = np.asarray([np.average(y[run][t],weights=wgt) for t in np.arange(y[run].shape[0])]) #area weighted but abs values \n \n \n\tif gen == 6:\n\t\tdir_var = '/net/so4/landclim/snath/DA/data/%s/regridded/'%model\n\t\trun_names_list=sorted(glob.glob(dir_var+var+'_'+temp_res+'_'+model+'_'+scenario+'_'+'r*i1p1f*'+'_'+spatial_res+'.nc'))\n # ATTENTION: are ordered but does not work for models with runs above digit 9\n # idea is: every ssp one needs a corresponding hist one (vice versa not the case)\n\t\t#index_tr = [i for i, s in enumerate(run_names_list) if 'r1i1p1' in s][0] # find training run \n\t\t#run_names_list.insert(0, run_names_list.pop(index_tr)) # move training run to begin of list\n\n\t\tif model == 'EC-Earth3': # temporary fix added on 20191118 because these runs are not available as historical ones\n\t\t\trun_names_list.remove('/net/cfc/cmip6/Next_Generation/tas/%s/g025/tas_%s_EC-Earth3_ssp585_r1i1p1f1_g025.nc'%(usr_time_res,usr_time_res))\n\t\t\t#run_names_list.remove('/net/cfc/cmip6/Next_Generation/tas/ann/g025/tas_ann_EC-Earth3_ssp585_r6i1p1f1_g025.nc') #6 removed because historical seems faulty (ie only ghg driven) on 20191119\n\t\t\t#run_names_list.remove('/net/cfc/cmip6/Next_Generation/tas/ann/g025/tas_ann_EC-Earth3_ssp585_r11i1p1f1_g025.nc')\n\t\t\t#run_names_list.remove('/net/cfc/cmip6/Next_Generation/tas/ann/g025/tas_ann_EC-Earth3_ssp585_r13i1p1f1_g025.nc')\n\t\t\t#run_names_list.remove('/net/cfc/cmip6/Next_Generation/tas/ann/g025/tas_ann_EC-Earth3_ssp585_r15i1p1f1_g025.nc')\n\t\t\tprint('ATTENTION: ssp realizations 1 because no historical available on 20191122 (open issue on ESGF).')\n\t\tprint(run_names_list)\n\t\tfor run_name in run_names_list:\n\t\t\t#run_name_ssp = run_name\n\t\t\t#run_name_hist = run_name.replace(scenario,'historical') \n\t\t\trun_name_hist = run_name \n\t\t\trun_name_ssp = dir_var+var+'_%s_'%(usr_time_res)+model+'_ssp370_r1i1p1f1_g05.nc' \n\t\t\tif model=='HadGEM3-GC31-LL':\n\t\t\t\trun_name_ssp = dir_var+var+'_%s_'%(usr_time_res)+model+'_ssp245_r1i1p1f3_g05.nc' \n\t\t\tif model=='CNRM-CM6-1':\n\t\t\t\trun_name_ssp = dir_var+var+'_%s_'%(usr_time_res) +model+'_ssp370_r1i1p1f2_g05.nc' \n\t\t\tif model=='GISS-E2-1-G':\n\t\t\t\trun_name_ssp = dir_var+var+'_%s_'%(usr_time_res) +model+'_ssp370_r6i1p1f2_g05.nc' \n\n\t\t\tdata = xr.open_mfdataset([run_name_hist,run_name_ssp],concat_dim='time').sel(time=slice('1944-01-01', '2019-01-01'))\n\t\t\t#data = data.assign_coords(lon= (((data.lon + 180) % 360) - 180)) # assign_coords so same labels as others\n\t\t\trun = int(run_name_hist.split('_')[4].split('r')[1].split('i')[0])#data.attrs['realization_index']\n\t\t\trun_nrs[run_name]=run\n \n\t\t\ty[run] = data[var].values # still absolute values + still contains sea pixels\n\t\t\tT_ref += data[var].sel(time=slice(Tref_start, Tref_end)).mean(dim='time').values*1.0/len(run_names_list) # sum up all ref climates\n \n\t\t\tif run==1 and Tref_all != True:\n\t\t\t\tT_ref_1=data[var].sel(time=slice(Tref_start, Tref_end)).mean(dim='time').values\n\t\t\t\tprint('create ref for ',run_name) \n\n\t\t\tif model=='CAMS-CSM1-0': # temporary (?) fix added on 20191119 because CAMS-CSM1-0 data are currently only available till 2099 instead of 2100\n\t\t\t\ty[run]=y[run][:-1]\n \n\t\t\tif Tanglob_idx == True:\n\t\t\t\tTan_wgt_globmean[run] = np.asarray([np.average(y[run][t],weights=wgt) for t in np.arange(y[run].shape[0])]) #area weighted but abs values \n\n # obtain the anomalies\n\tfor run_name in run_names_list:\n\t\trun = run_nrs[run_name]\n\t\tif Tref_all == True:\n\t\t\ty[run]=(y[run]-T_ref)\n\t\t\tif Tanglob_idx == True:\n\t\t\t\tTan_wgt_globmean[run]=Tan_wgt_globmean[run]-np.average(T_ref,weights=wgt)\n\t\telse:\n\t\t\ty[run]=y[run]\n\t\t\tif Tanglob_idx == True:\n\t\t\t\tTan_wgt_globmean[run]=Tan_wgt_globmean[run]-np.average(T_ref_1,weights=wgt) \n\ttime=data[\"time\"]\n\tif Tanglob_idx == False: \n\t\treturn y,time,srex,srex_names,lon_pc,lat_pc,idx_l,wgt_l\n\telse:\n\t\treturn y,time,srex,srex_names,lon_pc,lat_pc,idx_l,wgt_l,Tan_wgt_globmean", "title": "" }, { "docid": "6b94b9a51e916bbd19da92f1e67c74cd", "score": "0.47448468", "text": "def test_missing_reference_data():\n td = TorsionDriveData.from_qdata(\n dihedral=(6, 10, 11, 8), qdata_file=get_data(\"biphenyl_qdata.txt\")\n )\n del td.reference_data[0]\n with pytest.raises(MissingReferenceData):\n td.validate_angles()", "title": "" }, { "docid": "4b3ee2bbebe912e9550c038bebd0a40c", "score": "0.47353658", "text": "def get_forces_data(outcar_filename=\"OUTCAR\", convergence=None, warn=False):\n with open(outcar_filename, \"r\") as f:\n outcar = f.read()\n\n number_of_ions_re = re.compile(r\"NIONS = \\s+([\\d]+)\")\n try:\n number_of_ions = int(number_of_ions_re.findall(outcar)[0])\n except IndexError as exc:\n raise ValueError(\"Unable to read NIONS.\") from exc\n except:\n raise\n if not convergence:\n convergence_re = re.compile(r\"EDIFFG = -([\\d\\.E-]+)\")\n try:\n convergence = float(convergence_re.findall(outcar)[0])\n except IndexError as exc:\n raise ValueError(\"Unable to read EDIFFG.\") from exc\n except:\n raise\n\n # find force output block positions\n forces_block_start = []\n forces_header_re = re.compile(\"\\sPOSITION\\s+TOTAL-FORCE \\(eV/Angst\\)\")\n for i, line in enumerate(outcar.split(\"\\n\")):\n if forces_header_re.search(line):\n forces_block_start.append(i)\n if not forces_block_start:\n raise ValueError(\"No FORCES blocks found.\")\n # Ideally we want the most recent complete block of forces data.\n most_recent_block_start = forces_block_start[-1] + 2\n forces_lines = outcar.split(\"\\n\")[\n most_recent_block_start : most_recent_block_start + number_of_ions\n ]\n if not forces_block_is_well_formed(forces_lines):\n # If the most recent forces block is ill-formed, try to parse the previous block.\n if warn:\n print(\n \"The last FORCES block is not well-formed. Trying to parse the preceeding block.\"\n )\n next_most_recent_block_start = forces_block_start[-2] + 2\n forces_lines = outcar.split(\"\\n\")[\n next_most_recent_block_start : next_most_recent_block_start + number_of_ions\n ]\n if not forces_block_is_well_formed(forces_lines):\n # If the last two forces blocks are ill-formed, we assume the input file is mis-formatted.\n raise Exception(\n \"The last two FORCES blocks are not well-formed. Your input might be mis-formatted.\"\n )\n forces = []\n for line in forces_lines:\n forces.append([float(s) for s in line.split()[-3:]])\n forces_data = ForcesData(forces=forces, convergence=convergence)\n return forces_data", "title": "" }, { "docid": "3ad1cdc5e35c2eb3930375ee3ac5e721", "score": "0.4734519", "text": "def read_data(self, force_distances=False):\n import pandas as pd\n from io import StringIO\n\n fin = open(self._input_file)\n l = fin.readline()\n bytes_read = len(l)\n p = l.split()\n self.N = int(p[0])\n if len(p) > 1:\n self.fit_measure_ = float(p[1])\n\n l = [fin.readline() for _ in range(self.N)]\n bytes_read += sum(len(x) for x in l)\n\n self.sequences = pd.read_table(\n StringIO(u\"\".join(l)),\n nrows=self.N,\n sep=\"\\t\",\n header=None,\n engine=\"python\")\n\n assert self.N == len(self.sequences)\n\n log.info(\"Read %d sequences.\", self.N)\n\n if self.sequences.shape[1] > 1:\n log.info(\"Setting embedding from input data\")\n self.embedding = self.sequences.set_index(0)\n self.embedding.columns = self.coord_dims\n self.embedding.index.name = \"Sequence\"\n\n if (not hasattr(self, \"embedding\")) or force_distances:\n log.info(\"Memory usage %gMB\" % (util.memory_usage()))\n log.info(\"Reading distances.\")\n self.read_distances(fin)\n log.info(\"Memory usage %gMB\" % (util.memory_usage()))", "title": "" }, { "docid": "b5573c69ea9117cb6778fc2d0b4eaa0d", "score": "0.47334468", "text": "def _fix_georeferencing(self):\n logger.info(\"Fixing georeferencing information in %s\", self.ri_path)\n\n # Set the coordinate reference system for rasterio methods\n with xr.open_dataset(self.ri_path, engine=\"netcdf4\") as data:\n # data = xr.open_dataset(self.ri_path)\n data = set_georeferencing(\"ri\", data)\n\n # Change longitude range and rebuild this coordinate dataset\n lons = data[\"longitude\"].data - 360\n data = data.assign_coords({\"longitude\": lons})\n data[\"longitude\"].attrs = {\n \"units\": \"degrees_east\",\n \"long_name\": \"Longitude\",\n \"actual_range\": [\n data[\"longitude\"].data.min(),\n data[\"longitude\"].data.max()\n ]\n }\n\n # The default nan value (-9.96921e+36) throws overflow warnings\n navalue = -9999\n data[\"precip\"].data[np.isnan(data[\"precip\"].data)] = navalue\n data[\"precip\"].encoding[\"missing_value\"] = -9999\n data[\"precip\"].encoding[\"_FillValue\"] = -9999\n\n # Overwrite download path\n os.remove(self.ri_path)\n data.to_netcdf(self.ri_path)\n\n logger.info(\"Georeferencing information adjusted, overwriting %s\",\n self.ri_path)", "title": "" }, { "docid": "6171afdc51971ba49a671845fc2ecbd8", "score": "0.473311", "text": "def read_short_dat(filename):\n with open(filename, 'r') as dat_file:\n # reading in the header block\n is_header = True\n header_block = []\n while is_header:\n tmp_line = dat_file.readline()\n if tmp_line.find('#') == 0:\n header_block.append(tmp_line)\n else:\n is_header = False\n # get the next two Tecplot lines and then go back one line\n header_block.append(tmp_line)\n last_pos = dat_file.tell()\n tmp_line = dat_file.readline()\n header_block.append(tmp_line)\n dat_file.seek(last_pos)\n\n try:\n reader = csv.reader(dat_file, delimiter=' ')\n type_line = next(reader)\n nvert = int(type_line[1][2:])\n nface = int(type_line[2][2:])\n x_data = [] # position\n f2v = [] # connectivity\n\n count = 0\n while count < nvert:\n lst = next(reader)[0:3]\n x_data.append(lst)\n count += 1\n x_data = np.array(x_data, dtype=float)\n\n count = 0\n while count < nface:\n lst = next(reader)[0:3] # should just be 3 values\n f2v.append([int(i) for i in lst])\n count += 1\n f2v = np.array(f2v, dtype=int)\n\n except csv.Error as e:\n sys.exit('file %s, line %d: %s' % (filename, reader.line_num, e))\n\n try:\n params = {\"header_block\": header_block}\n return (x_data, f2v, params)\n except NameError as e:\n print(\"One of the required variables was not instantiated: {}\".format(e))", "title": "" }, { "docid": "cfc0aa800cc7109504f36afac087b470", "score": "0.47310045", "text": "def testNewreadoutlier(self):\n imageids, imsizes, phasecenters, masks, models, paramdic, newformat=self.imset.newreadoutlier(self.newoutlierfile)\n # the reference outlier file contains each field's paramaters per line\n print \"Using \", self.newoutlierfile\n f = open(self.newoutlierreffile,'r')\n lines=f.readlines()\n f.close()\n cnt = 0\n for elm in lines:\n if len(elm.split())!=0 and elm.split()[0]!='#' :\n cnt +=1\n print \"N fields=\",cnt\n #print \"imsizes=\",imsizes,\" phasecenters=\",phasecenters\n print \"len(imsizes)=\",len(imsizes), \" len(imageids)=\",len(imageids)\n if len(imsizes) == len(phasecenters) == len(imageids) == len(masks) == len(models) == cnt:\n self.res=True\n else:\n self.res=False\n self.assertTrue(self.res)", "title": "" }, { "docid": "e4740d61cc550ea7366ccd24ebe7e7a5", "score": "0.47260442", "text": "def _readData(self):\n raw = numpy.genfromtxt(self.filename, delimiter=[8]*10)\n\n # component 0\n dataStart = self._headerTotalLines\n acc0 = self._extract(raw, dataStart, self._dataNumLines)\n dataStart += self._dataNumLines\n if self._dataNumValues == 3:\n vel0 = self._extract(raw, dataStart, self._dataNumLines)\n dataStart += self._dataNumLines\n disp0 = self._extract(raw, dataStart, self._dataNumLines)\n dataStart += self._dataNumLines\n\n # component 1\n dataStart = self._headerTotalLines + \\\n 1*(self._headerTotalLines + self._dataNumValues*self._dataNumLines)\n acc1 = self._extract(raw, dataStart, self._dataNumLines)\n dataStart += self._dataNumLines\n if self._dataNumValues == 3:\n vel1 = self._extract(raw, dataStart, self._dataNumLines)\n dataStart += self._dataNumLines\n disp1 = self._extract(raw, dataStart, self._dataNumLines)\n dataStart += self._dataNumLines\n\n # component 2\n dataStart = self._headerTotalLines + \\\n 2*(self._headerTotalLines + self._dataNumValues*self._dataNumLines)\n acc2 = self._extract(raw, dataStart, self._dataNumLines)\n dataStart += self._dataNumLines\n if self._dataNumValues == 3:\n vel2 = self._extract(raw, dataStart, self._dataNumLines)\n dataStart += self._dataNumLines\n disp2 = self._extract(raw, dataStart, self._dataNumLines)\n dataStart += self._dataNumLines\n\n # Assemble components\n self.acc = numpy.array( (acc0, acc1, acc2) ).transpose()\n self.acc *= self.header['toSI']\n if self._dataNumValues == 3:\n self.vel = numpy.array( (vel0, vel1, vel2) ).transpose()\n self.vel *= self.header['toSI']\n self.disp = numpy.array( (disp0, disp1, disp2) ).transpose()\n self.disp *= self.header['toSI']\n\n # Rotate to E,N,U\n R = numpy.zeros((3, 3), dtype=numpy.float64)\n for i,az in enumerate(self.azimuth.values()):\n if az <= 360.0:\n azR = az / 180.0 * math.pi\n R[i,:] = (math.sin(azR), math.cos(azR), 0.0)\n else:\n R[i,:] = (0.0, 0.0, 1.0)\n self.acc = numpy.dot(self.acc, R)\n if not self.vel is None:\n self.vel = numpy.dot(self.vel, R)\n if not self.disp is None:\n self.disp = numpy.dot(self.disp, R)\n\n return", "title": "" }, { "docid": "1b439b80bb993bbdb8e1b5c1f2bf243b", "score": "0.47248137", "text": "def make_reference(self, reference_probes):\n self.refs = pd.DataFrame(reference_probes, columns=['sequence'])\n self.refs['name'] = [naming['reference'](i) for i, _ in self.refs.iterrows()]\n self.refs['color'] = 'tomato'\n self.refs['type'] = 'stellaris (sense) reference'\n self.refs = self.refs[benchling_headers]", "title": "" }, { "docid": "82005183b11a6e328c7802733a263954", "score": "0.47230077", "text": "def backward(self, top, propagate_down, bottom):\n\n batch=1\n index = 0\n level = 6\n dis = 7\n SepSize = batch * level\n self.ref= np.zeros(bottom[0].num,dtype=np.float32)\n\n for k in range(dis):\n for i in range(SepSize*k,SepSize*(k+1)-batch):\n for j in range(SepSize*k + int((i-SepSize*k)/batch+1)*batch,SepSize*(k+1)):\n\n\t\t if self.loss[index]>0:\n self.ref[i] += -1\n self.ref[j] += +1\n index +=1\n\n for k in range(dis):\n\t\tindex = k*SepSize\n\t\tself.ref[index] = (bottom[0].data[index] - bottom[1].data[index])*bottom[0].num /100\n\n for k in range(dis):\n\t\tif (bottom[0].data[(k+1)*SepSize-1] < 0):\n\t\t\tself.ref[(k+1)*SepSize-1] += bottom[0].data[(k+1)*SepSize-1]*bottom[0].num /100\n\n\tbottom[0].diff[...]= np.reshape(self.ref,(bottom[0].num,1))/bottom[0].num", "title": "" }, { "docid": "2d2c72529e014e69538255bd85bd9ad3", "score": "0.47196406", "text": "def fix_nonref_positions(in_file, ref_file):\n ignore_chrs = [\".\"]\n ref2bit = twobit.TwoBitFile(open(ref_file))\n out_file = in_file.replace(\"-raw.vcf\", \".vcf\")\n\n with open(in_file) as in_handle:\n with open(out_file, \"w\") as out_handle:\n for line in in_handle:\n if line.startswith(\"#\"):\n out_handle.write(line)\n else:\n parts = line.rstrip(\"\\r\\n\").split(\"\\t\")\n pos = int(parts[1])\n # handle chr/non-chr naming\n if parts[0] not in ref2bit.keys():\n parts[0] = parts[0].replace(\"chr\", \"\")\n ref_base = None\n if parts[0] not in ignore_chrs:\n try:\n ref_base = ref2bit[parts[0]].get(pos-1, pos).upper()\n except Exception, msg:\n # off the end of the chromosome\n if str(msg).startswith(\"end before start\"):\n print msg\n else:\n print parts\n raise\n parts = fix_vcf_line(parts, ref_base)\n if parts is not None:\n out_handle.write(\"\\t\".join(parts) + \"\\n\")\n return out_file", "title": "" }, { "docid": "c48fb2692c4c5e73cfff883b7a41c001", "score": "0.47189966", "text": "def load_data(self):\n fn = \"../data/redclump_sample_A_updatedvalues_only.txt\"\n print \"Reading %s ...\" % fn\n data = np.genfromtxt(fn)\n names = [\"ID\", \"distance\", \"Radius_gal\", \"Phi_gal\", \"z\", \"Teff\",\n \"logg\", \"[Fe/H]\", \"[alpha/Fe]\", \"age\", \"mass\"]\n print data[2]\n print data.shape\n print \"Read %s\" % fn\n ages = data[:, 9].flatten()\n sigma = 0.25 / np.log10(np.e) # MAGIC\n ivars = np.zeros_like(ages) + 1. / (sigma * sigma)\n self.set_data(ages, ivars)", "title": "" }, { "docid": "ddf1ec23b0976a68cd2917aa6f6e2aec", "score": "0.4716494", "text": "def load(self):\n data = np.load(self.path)\n self.target = data['target']\n self.indices = list(data['indices'])\n self.ref = list(data['ref'])", "title": "" }, { "docid": "9190505865acffe7522fcfd3d4fab6fc", "score": "0.47135812", "text": "def get_reference_focus_data(self, reference_trajectory):\n\n # Reorder image.\n foc_X = self.foc_X.reshape((self.N), order=\"F\")\n foc_Y = self.foc_Y.reshape((self.N), order=\"F\")\n foc_Z = self.foc_Z.reshape((self.N), order=\"F\")\n image = np.asanyarray([foc_X, foc_Y, foc_Z])\n\n # Get local_look and local_squint for the distributed target.\n self.local_look_ref_traj, self.local_squint_ref_traj = Utils.get_angles_antenna_distributed_target(reference_trajectory, image)\n\n N = image.shape[1]\n NT = len(reference_trajectory.flight_x)\n\n # Compute distances between sensor and target.\n self.distances_ref_traj = np.sqrt( (np.full((N, NT), reference_trajectory.flight_x[:]) - np.full((N, NT), foc_X[:, None]) )**2 + \n (np.full((N, NT), reference_trajectory.flight_y[:]) - np.full((N, NT), foc_Y[:, None]) )**2 + \n (np.full((N, NT), reference_trajectory.flight_z[:]) - np.full((N, NT), foc_Z[:, None]) )**2)", "title": "" }, { "docid": "e5e87521336ac960999949ce42b6fb32", "score": "0.47075123", "text": "def read(self) -> None:\n (\n self.iteration,\n self.s,\n self.y,\n self.rho,\n self.r0,\n self.f0,\n self.e0,\n self.task,\n ) = self.load()\n self.load_restart = True", "title": "" }, { "docid": "863e2f9fb817febc0c63b3c844366811", "score": "0.47070032", "text": "def readCCout():\n\n inputName = 'ccinput.dat'\n ccextension = '_cc-fit.out'\n \n data = np.loadtxt(inputName+ccextension, skiprows = 13)\n \n return data", "title": "" }, { "docid": "dd0dc9a1497547a6da69c2a041d31210", "score": "0.47039145", "text": "def get_ref_step2(self):\n wrong_ref = self.check_for_wrong_ref() or self.wrong_ref_pp()\n\n # if self.ref: # probabaly should be calculated once\n # if self.author != \"תנך\" and self.ref.index.title in library.get_indexes_in_category('Tanakh'):\n # include_dependant = True\n # wrong_ref = True\n # elif self.author != \"תלמוד בבלי\" and self.ref.index.title in library.get_indexes_in_category('Bavli'):\n # include_dependant = False\n # wrong_ref = True\n\n if self.ref:\n if wrong_ref: # or (self.raw_ref and not self.ref and not self.raw_ref.is_sham):\n new_ref = None\n include_dependant = True\n if self.author == \"משנה תורה\":\n include_dependant = False\n look_here = self.get_index_options(include_dependant=include_dependant)\n if look_here:\n new_ref = self.get_new_ref_w_look_here(look_here)\n else: # couldn't find a indexs for this author\n parser.missing_authors.add(self.author)\n # if look_here and (self.index or wrong_ref):\n # # than try to get the true title from the cat from look_here\n # look_here_shared_title_word = '({})'.format(\n # '|'.join(list(set.intersection(*[set(x.title.split()) for x in look_here]))))\n # alt_ref_titles = map(lambda x: x['text'], self.ref.index.schema['titles'])\n # found_index = [ind for ind in look_here for tanakh_book in alt_ref_titles if\n # tanakh_book in re.sub(look_here_shared_title_word, '', ind.title).strip()]\n # if found_index:\n # if len(set(found_index)) > 1: # assert len(found_index) == 0\n # print \"more than one index option\" # todo: problem with אלשיך דברים and with books I, II\n # print found_index[0].title, found_index[-1].title\n # self.index = found_index[0]\n # try:\n # new_ref = Ref('{} {}'.format(self.index.title, re.sub(self.ref.index.title, '', self.ref.normal()).strip()))\n # print \"deleting wrong: {} found new Index: {} new ref: {}\".format(self.ref, self.index, new_ref)\n # self.ref = new_ref\n # except exceptions.InputError as e:\n # print \"inputError for this string {}, extracted from this rawref {}\".format('{} {}'.format(self.index.title, re.sub(self.ref.index.title, '', self.ref.normal()).strip()), self.raw_ref)\n # tanakh is wrong but can't find the index ex: רש\"ר הירש\n if self.index and not self.ref:\n ind = self.index\n #todo: look into the original catching see why it is not catching kalla. also see how to copy the intresting parts to here\n self.raw_ref.book = ind.title\n split_raw_text = re.sub('[\\(\\)]', '', self.raw_ref.rawText).split()\n self.raw_ref.section_level = split_raw_text\n\n pass\n if not self.index and not new_ref:# todo: it was an or why? turend into an and lets see if/what it breaks.\n self.change_look_here(author = 'תרגום יונתן', new_author = 'תרגום', category = 'Writings')\n if not self.index:\n # print \"deleting wrong: {} couldn't find an index\".format(self.ref)\n self.ref = None\n else: # not wrong_ref. and found a ref, might just be a correct ref. todo: how to test it is correct?\n pass\n # try to get the ref from the index via the titles found (without an index - ex. ילקוט שמעוני, דניאל תתרסה)\n # it is look at nodes names when we are not testing giving wrong Ref?\n elif hasattr(self, \"opt_titles\") and self.opt_titles:\n indexs = self.extract_cat(include_dependant=True)\n if indexs:\n indexs.extend(self.indexs)\n # if self.author == 'משנה תורה':\n # self.reduce_indexs_with_matcher()\n node_name = ''\n for opt_title in self.opt_titles:\n try:\n new_index = library.get_index(opt_title)\n if not indexs or new_index.is_complex(): # or library.get_title_node_dict('he')[opt_title] != library.get_title_node_dict('en')[new_index.title]\n self.index = new_index\n possible_nodes = new_index.all_titles('he')\n node_guess = intersect_list_string(possible_nodes, self.raw_ref.rawText)\n if not self.ref and node_guess:\n try:\n r = Ref(node_guess)\n if not self.check_for_wrong_ref(r):\n self.ref = r\n except InputError:\n self.ref = ''\n print('the node guess is not a Ref')\n elif node_guess:\n # then we should check witch is the better option\n # maybe using the length of matching words in regards to the titles\n ref_opt1 = self.ref.he_normal()\n ref_opt2 = node_guess\n sets = strings2sets(ref_opt1,ref_opt2, self.raw_ref.rawText)\n if abs(len(set[1])-len(set[2])) < abs(len(set[0])-len(set[2])):\n self.ref = Ref(node_guess)\n elif new_index not in indexs:\n intersected = self.intersected_indexes(new_index, indexs)\n if intersected:\n node_name = Ref(intersected[0].title).he_normal() # TODO: find cases with list longer than one and figure it out.\n self.index = intersected\n except (exceptions.BookNameError, TypeError):\n # print \"excepted {}\".format(opt_title)\n pass\n if node_name:\n try:\n new_string = re.sub(opt_title, node_name, self.text)\n new_string = re.sub('[()]', '', new_string) # because we are going to use Ref rather than library.get_refs_in_string() that needes the parentheses\n self.ref = Ref(new_string)\n except exceptions.InputError:\n try:\n self.ref = Ref(','.join(new_string.split(',')[0:2]))\n except exceptions.InputError:\n # try again without the name of the node, but with the index recognizing that the node is in that index\n try:\n new_string = re.sub(opt_title, \"\", new_string)\n self.ref = Ref(new_string)\n except exceptions.InputError:\n print(\"we tried\")\n self.ref = None\n elif (self.indexs or self.index) and self.raw_ref and not self.raw_ref.is_sham:\n self.ref_opt = []\n if not self.indexs:\n self.indexs = [self.index]\n for ind in self.indexs:\n new_index = ind if isinstance(ind, Index) else library.get_index(ind)\n self.index = new_index\n possible_nodes = new_index.all_titles('he')\n ns_titles_and_refs = []\n if hasattr(new_index, 'alt_structs'):\n ns=[]\n ns_titles_and_refs = dict()\n [ns.extend(alt['nodes']) for alt in new_index.alt_structs.values()]\n if any('titles' in nsone for nsone in ns): # 'titles' in ns[0].keys():\n ns_titles_and_refs = dict([(x['titles'][1]['text'], x['wholeRef']) for x\n in ns if ('wholeRef' in x and 'titles' in x)])\n if any('sharedTitle' in nsone for nsone in ns): # todo: old code: 'sharedTitle' in ns[0].keys():\n ns_titles_and_refs.update(dict([(Term().load_by_title(x['sharedTitle']).get_primary_title('he')\n, x['wholeRef']) for x in ns if 'sharedTitle' in x]))\n possible_nodes.extend(ns_titles_and_refs)\n node_guess = intersect_list_string(possible_nodes, self.raw_ref.rawText)\n if not self.ref and node_guess:\n try:\n if ns_titles_and_refs:\n try:\n r = Ref(ns_titles_and_refs.get(node_guess, ''))\n r.normal()\n except (InputError, AttributeError):\n r = Ref(node_guess)\n else:\n r = Ref(node_guess)\n if not self.check_for_wrong_ref(r):\n self.ref_opt.append(r)\n except InputError:\n # self.ref_opt.append('')\n print('the node guess is not a Ref')\n if self.ref_opt:\n if len(self.ref_opt) == 1:\n r = self.ref_opt[0]\n #self.ref = r\n if not r.sections:\n sections = ' '.join([sect for sect in re.sub('[(,)]', ' ', self.raw_ref.rawText).split() if is_hebrew_number(sect)]) #todo: maybe bad huristic and should wait to do this on the PM level?\n if sections:\n try:\n self.ref = Ref(r.he_normal() + ' ' + sections)\n except InputError as e:\n pass\n return\n self.ref = r\n else: # more than one option, we need to choose the better one.\n he_options = [r.he_normal().replace(\"ן\", \"ם\") for r in self.ref_opt]\n try:\n better = intersect_list_string(he_options, re.sub('[()]', '', self.raw_ref.rawText), ref_opt = True)\n r = Ref(better)\n if not r.sections:\n sections = ' '.join(\n [sect for sect in re.sub('[(,)]', ' ', self.raw_ref.rawText).split() if\n is_hebrew_number(\n sect)]) # todo: maybe bad huristic and should wait to do this on the PM level?\n if sections:\n try:\n self.ref = Ref(r.he_normal() + ' ' + sections)\n except (AttributeError, InputError):\n pass\n return\n self.ref = r\n except InputError:\n print(\"more than one option to guess from\")\n\n\n # elif node_guess:\n # # then we should check witch is the better option\n # # maybe using the length of matching words in regards to the titles\n # ref_opt1 = self.ref.he_normal()\n # ref_opt2 = node_guess\n # # sets = strings2sets(ref_opt1, ref_opt2, self.raw_ref.rawText)\n # if abs(len(set[1]) - len(set[2])) < abs(len(set[0]) - len(set[2])):\n # self.ref = Ref(node_guess)", "title": "" }, { "docid": "8c9cf859a5192a7066c7d24f18e992df", "score": "0.47011754", "text": "def test_readwriteread():\n uv_in = UVCal()\n uv_out = UVCal()\n testfile = os.path.join(DATA_PATH, 'zen.2457698.40355.xx.fitsA')\n write_file = os.path.join(DATA_PATH, 'test/outtest_omnical.fits')\n uv_in.read_calfits(testfile)\n uv_in.write_calfits(write_file, clobber=True)\n uv_out.read_calfits(write_file)\n nt.assert_equal(uv_in, uv_out)\n\n # test without freq_range parameter\n uv_in.freq_range = None\n uv_in.write_calfits(write_file, clobber=True)\n uv_out.read_calfits(write_file)\n nt.assert_equal(uv_in, uv_out)", "title": "" }, { "docid": "ddf3d1076918f601a7b704a10561fda6", "score": "0.46959445", "text": "def gfalternate_updatedict(cf,ds_tower,ds_alt):\n if \"alternate\" not in dir(ds_tower): return\n section = \"Drivers\"\n series_list = cf[section].keys()\n for series in series_list:\n if \"GapFillFromAlternate\" not in cf[section][series].keys(): continue\n # name of alternate output series in ds\n output_list = cf[section][series][\"GapFillFromAlternate\"].keys()\n # loop over the outputs listed in the control file\n for output in output_list:\n if output not in ds_tower.alternate.keys(): ds_tower.alternate[output] = {}\n ds_tower.alternate[output][\"label_tower\"] = series\n # source name\n ds_tower.alternate[output][\"source\"] = cf[section][series][\"GapFillFromAlternate\"][output][\"source\"]\n # site name\n ds_tower.alternate[output][\"site_name\"] = ds_tower.globalattributes[\"site_name\"]\n # alternate data file name\n # first, look in the [Files] section for a generic file name\n file_list = cf[\"Files\"].keys()\n lower_file_list = [item.lower() for item in file_list]\n if ds_tower.alternate[output][\"source\"].lower() in lower_file_list:\n # found a generic file name\n i = lower_file_list.index(ds_tower.alternate[output][\"source\"].lower())\n ds_tower.alternate[output][\"file_name\"] = cf[\"Files\"][file_list[i]]\n else:\n # no generic file name found, look for a file name in the variable section\n ds_tower.alternate[output][\"file_name\"] = cf[section][series][\"GapFillFromAlternate\"][output][\"file_name\"]\n # if the file has not already been read, do it now\n if ds_tower.alternate[output][\"file_name\"] not in ds_alt:\n ds_alt[ds_tower.alternate[output][\"file_name\"]] = qcio.nc_read_series(ds_tower.alternate[output][\"file_name\"])\n # get the type of fit\n ds_tower.alternate[output][\"fit_type\"] = \"OLS\"\n if \"fit\" in cf[section][series][\"GapFillFromAlternate\"][output]:\n if cf[section][series][\"GapFillFromAlternate\"][output][\"fit\"].lower() in [\"ols\",\"ols_thru0\",\"mrev\",\"replace\",\"rma\",\"odr\"]:\n ds_tower.alternate[output][\"fit_type\"] = cf[section][series][\"GapFillFromAlternate\"][output][\"fit\"]\n else:\n log.info(\"gfAlternate: unrecognised fit option for series \"+output)\n # force the fit through the origin\n #ds_tower.alternate[output][\"thru0\"] = \"no\"\n #if \"thru0\" in cf[section][series][\"GapFillFromAlternate\"][output]:\n #if cf[section][series][\"GapFillFromAlternate\"][output][\"thru0\"].lower() in [\"yes\",\"true\"]:\n #ds_tower.alternate[output][\"thru0\"] = \"yes\"\n #else:\n #log.info(\"gfAlternate: unrecognised thru0 option for series \"+output)\n # correct for lag?\n ds_tower.alternate[output][\"lag\"] = \"yes\"\n if \"lag\" in cf[section][series][\"GapFillFromAlternate\"][output]:\n if cf[section][series][\"GapFillFromAlternate\"][output][\"lag\"].lower() in [\"no\",\"false\"]:\n ds_tower.alternate[output][\"lag\"] = \"no\"\n else:\n log.info(\"gfAlternate: unrecognised lag option for series \"+output)\n # alternate data variable name if different from name used in control file\n if \"alternate_name\" in cf[section][series][\"GapFillFromAlternate\"][output]:\n ds_tower.alternate[output][\"alternate_name\"] = cf[section][series][\"GapFillFromAlternate\"][output][\"alternate_name\"]\n else:\n ds_tower.alternate[output][\"alternate_name\"] = series\n # results of best fit for plotting later on\n if \"results\" not in ds_tower.alternate[output].keys():\n ds_tower.alternate[output][\"results\"] = {\"startdate\":[],\"enddate\":[],\"No. points\":[],\"r\":[],\n \"Bias\":[],\"RMSE\":[],\"Frac Bias\":[],\"NMSE\":[],\n \"Avg (tower)\":[],\"Avg (alternate)\":[],\n \"Var (tower)\":[],\"Var (alternate)\":[],\"Var ratio\":[],\n \"Lag (uncorrected)\":[],\"Lag (corrected)\":[],\n \"Slope\":[],\"Offset\":[]}\n # create an empty series in ds if the alternate output series doesn't exist yet\n if output not in ds_tower.series.keys():\n data,flag,attr = qcutils.MakeEmptySeries(ds_tower,output)\n qcutils.CreateSeries(ds_tower,output,data,Flag=flag,Attr=attr)", "title": "" }, { "docid": "a1b7d82507b0ec6bd0d3e84537d53d50", "score": "0.46922886", "text": "def load_chain(outfile):\n chain = np.genfromtxt(outfile)\n return reshape_chain(chain)", "title": "" } ]