code
stringlengths 4
4.48k
| docstring
stringlengths 1
6.45k
| _id
stringlengths 24
24
|
---|---|---|
class GSPAuction(Auction): <NEW_LINE> <INDENT> def __init__(self, candidates=[], n_winners=1): <NEW_LINE> <INDENT> Auction.__init__(self, candidates) <NEW_LINE> self.n_winners = min(n_winners, len(candidates)) <NEW_LINE> self.candidates = sorted(self.candidates, key=lambda x: -x['bid'] * x['quality_score']) <NEW_LINE> <DEDENT> def GetWinners(self, extended=False): <NEW_LINE> <INDENT> self.candidates.append(GSPCandidate(0, 0)) <NEW_LINE> self.candidates.append(GSPCandidate(0, 0)) <NEW_LINE> for i in range(self.n_winners): <NEW_LINE> <INDENT> self.winners.append(GSPWinner( self.candidates[i], self.candidates[i + 1]['quality_score'] * self.candidates[i + 1]['bid'] / self.candidates[i]['quality_score'])) <NEW_LINE> <DEDENT> ret_winners = self.winners + ([self.candidates[self.n_winners]] if extended else []) <NEW_LINE> self.candidates = self.candidates[:-2] <NEW_LINE> return ret_winners | This is the most fundamental GSP auction.
Given the number of winners: n_winners
1. rank candidates by bid * quality_score
2. select the top n_winners as winners.
3. the price of each winner is bid' * quality_score' / quality_score,
where bid' and quality_score' are for the next candidate. | 6259905507d97122c42181f0 |
class ConnectionFailureToRemoteLibvirtInstance(MCVirtException): <NEW_LINE> <INDENT> pass | Connection failure whilst attempting to obtain a remote libvirt connection. | 62599055cc0a2c111447c532 |
class PyFenicsDolfinx(PythonPackage): <NEW_LINE> <INDENT> homepage = "https://github.com/FEniCS/dolfinx" <NEW_LINE> url = "https://github.com/FEniCS/dolfinx/archive/0.1.0.tar.gz" <NEW_LINE> git = "https://github.com/FEniCS/dolfinx.git" <NEW_LINE> maintainers = ["js947", "chrisrichardson", "garth-wells"] <NEW_LINE> version("main", branch="main") <NEW_LINE> version("0.1.0", sha256="0269379769b5b6d4d1864ded64402ecaea08054c2a5793c8685ea15a59af5e33") <NEW_LINE> depends_on("[email protected]:", type="build") <NEW_LINE> depends_on("hdf5", type="build") <NEW_LINE> depends_on("pkgconfig", type=("build", "run")) <NEW_LINE> depends_on('[email protected]:', type=('build', 'run')) <NEW_LINE> depends_on("py-setuptools", type="build") <NEW_LINE> depends_on("fenics-dolfinx@main", when="@main") <NEW_LINE> depends_on("[email protected]", when="@0.1.0") <NEW_LINE> depends_on("fenics-basix@main", type=("build", "link"), when="@main") <NEW_LINE> depends_on("[email protected]", type=("build", "link"), when="@0.1.0") <NEW_LINE> depends_on("py-mpi4py", type=("build", "run")) <NEW_LINE> depends_on("py-petsc4py", type=("build", "run")) <NEW_LINE> depends_on("[email protected]:2.7.99", type=("build", "run")) <NEW_LINE> depends_on("[email protected]:", type="build") <NEW_LINE> depends_on("py-fenics-ffcx@main", type="run", when="@main") <NEW_LINE> depends_on("[email protected]", type="run", when="@0.1.0") <NEW_LINE> depends_on("py-fenics-ufl@main", type="run", when="@main") <NEW_LINE> depends_on("[email protected]", type="run", when="@0.1.0") <NEW_LINE> depends_on("py-cffi", type="run") <NEW_LINE> depends_on("py-numpy", type="run") <NEW_LINE> phases = ['build_ext', 'build', 'install'] <NEW_LINE> build_directory = 'python' | Python interface library to Next generation FEniCS problem solving
environment | 6259905594891a1f408ba199 |
class Dialog1b(QDialog): <NEW_LINE> <INDENT> def __init__(self): <NEW_LINE> <INDENT> QDialog.__init__(self) <NEW_LINE> Dialog.setWindowTitle(self, "tile stitching") <NEW_LINE> btn_start = QPushButton("&start") <NEW_LINE> btn_start.clicked.connect(self.start_stitching) <NEW_LINE> l1 = QLabel('grid size x') <NEW_LINE> self.grid_x = QLineEdit() <NEW_LINE> l2 = QLabel('grid size y') <NEW_LINE> self.grid_y = QLineEdit() <NEW_LINE> l3 = QLabel('overlap') <NEW_LINE> self.overlap = QLineEdit() <NEW_LINE> l4 = QLabel('number of z planes') <NEW_LINE> self.z_planes = QLineEdit() <NEW_LINE> l7 = QLabel('image suffix') <NEW_LINE> self.suffix = QLineEdit() <NEW_LINE> self.suffix.setText('_ORG.tif') <NEW_LINE> vbox = QVBoxLayout() <NEW_LINE> l8 = QLabel('DatasetID') <NEW_LINE> self.dID = QLineEdit() <NEW_LINE> self.cb = QtWidgets.QCheckBox('upload to omero') <NEW_LINE> hbox = QHBoxLayout() <NEW_LINE> for w in [self.cb, l8, self.dID]: <NEW_LINE> <INDENT> hbox.addWidget(w) <NEW_LINE> <DEDENT> for w in [l7, self.suffix, l1, self.grid_x, l2, self.grid_y, l3, self.overlap, l4, self.z_planes, btn_start]: <NEW_LINE> <INDENT> vbox.addWidget(w) <NEW_LINE> <DEDENT> vbox.addLayout(hbox) <NEW_LINE> self.setLayout(vbox) <NEW_LINE> <DEDENT> def start_stitching(self): <NEW_LINE> <INDENT> file_choices = "TXT (*.txt)|*.txt" <NEW_LINE> input_file = QFileDialog.getOpenFileName(self, 'open input file', '', file_choices) <NEW_LINE> with open(input_file[0]) as f: <NEW_LINE> <INDENT> lines = f.readlines() <NEW_LINE> for l in lines: <NEW_LINE> <INDENT> text = l.split('|') <NEW_LINE> prefix = text[0] <NEW_LINE> directory = text[1] <NEW_LINE> timepoints = text[2].rstrip() <NEW_LINE> outfile = directory+'//'+prefix+'_stitched.tif' <NEW_LINE> zplanes = self.z_planes.text() <NEW_LINE> overlap = self.overlap.text() <NEW_LINE> gridx = self.grid_x.text() <NEW_LINE> gridy = self.grid_y.text() <NEW_LINE> suffix = self.suffix.text() <NEW_LINE> arguments = directory+'#'+prefix+'#'+zplanes+'#'+overlap+'#'+timepoints+'#'+gridx+'#'+gridy+'#'+suffix+'#'+outfile <NEW_LINE> print(arguments) <NEW_LINE> os.system(fiji+' -macro '+ stitching_macro + ' ' + arguments) <NEW_LINE> did = self.dID.text() <NEW_LINE> if self.cb.isChecked(): <NEW_LINE> <INDENT> os.system(fiji+' -macro '+ omero_upload_macro + ' '+outfile+'#'+did) <NEW_LINE> <DEDENT> self.close() | Dialog1b(QDialog)
asks fr metadata for the stitching process
offers to upload results to omero under a given ID | 6259905523e79379d538da42 |
class Ship(Card): <NEW_LINE> <INDENT> def __init__(self, data): <NEW_LINE> <INDENT> Card.__init__(self, data) <NEW_LINE> <DEDENT> def __str__(self): <NEW_LINE> <INDENT> return 'Ship({})'.format(self.name) <NEW_LINE> <DEDENT> def __repr__(self): <NEW_LINE> <INDENT> return 'Ship({})'.format( self.name.__repr__(), ) | Represents a ship card. | 6259905507f4c71912bb0980 |
class Command(BaseCommand): <NEW_LINE> <INDENT> def handle(self, *args, **options): <NEW_LINE> <INDENT> load_status() | Loads data into newly created database | 625990554e4d56256637394d |
class TestPullGpupsSparse(unittest.TestCase): <NEW_LINE> <INDENT> def test_static_graph(self): <NEW_LINE> <INDENT> startup_program = fluid.Program() <NEW_LINE> train_program = fluid.Program() <NEW_LINE> slots = [] <NEW_LINE> with fluid.program_guard(train_program, startup_program): <NEW_LINE> <INDENT> l = fluid.layers.data( name='input', shape=[1], dtype="int64", lod_level=1) <NEW_LINE> slots.append(l) <NEW_LINE> output = _pull_gpups_sparse( slots, size=[11], is_distributed=True, is_sparse=True) <NEW_LINE> cost = paddle.fluid.layers.mean(output) <NEW_LINE> sgd_optimizer = fluid.optimizer.SGD(learning_rate=0.001) <NEW_LINE> sgd_optimizer.minimize(cost, train_program) <NEW_LINE> block = train_program.global_block() <NEW_LINE> place = fluid.CPUPlace() <NEW_LINE> if fluid.core.is_compiled_with_cuda(): <NEW_LINE> <INDENT> place = fluid.CUDAPlace(0) <NEW_LINE> <DEDENT> exe = fluid.Executor(place) <NEW_LINE> exe.run(startup_program) <NEW_LINE> img = np.array([1]).astype(np.int64) <NEW_LINE> res = exe.run(train_program, feed={'input': img}, fetch_list=[output]) | Test PullGpupsSparse op. | 62599055baa26c4b54d507ea |
class SumInventory(SumBase): <NEW_LINE> <INDENT> __intypes__ = [inventory.Inventory] <NEW_LINE> def update(self, store, context): <NEW_LINE> <INDENT> value = self.eval_args(context)[0] <NEW_LINE> store[self.handle].add_inventory(value) | Calculate the sum of the inventories. The result is an Inventory. | 62599055adb09d7d5dc0bab1 |
class Recipient(NamedTuple): <NEW_LINE> <INDENT> messenger: str <NEW_LINE> user: TypeUser <NEW_LINE> address: str | Class is used to represent a message recipient. | 625990558e71fb1e983bd010 |
class mnist_co(Dataset): <NEW_LINE> <INDENT> def __init__(self, data_path, nt, num_datapoints=None): <NEW_LINE> <INDENT> self.data_path = data_path <NEW_LINE> self.num_totalpoints = len(fnmatch.filter(os.listdir(self.data_path), '*.hkl')) <NEW_LINE> if num_datapoints is not None: <NEW_LINE> <INDENT> self.num_datapoints = num_datapoints <NEW_LINE> <DEDENT> else: <NEW_LINE> <INDENT> self.num_datapoints = self.num_totalpoints <NEW_LINE> <DEDENT> assert num_datapoints <= self.num_totalpoints <NEW_LINE> self.nt = nt <NEW_LINE> self.indices = np.arange(self.num_datapoints) <NEW_LINE> <DEDENT> def __len__(self): <NEW_LINE> <INDENT> return self.num_datapoints <NEW_LINE> <DEDENT> def __getitem__(self, idx): <NEW_LINE> <INDENT> clip_path = self.data_path + '/' + str(idx) + '.hkl' <NEW_LINE> f = open(clip_path, 'r') <NEW_LINE> storage_dict = hkl.load(f) <NEW_LINE> f.close() <NEW_LINE> return storage_dict['videos'].astype(np.float32)[:self.nt] <NEW_LINE> <DEDENT> def shuffle(self): <NEW_LINE> <INDENT> self.indices = np.random.permutation(self.indices) | Assumes that there is a root directory which contains each individual video
saved as 1.hkl, 2.hkl....N.hkl
Clip IDs may be provided or picked at random given the number to be picked | 6259905532920d7e50bc7595 |
class IImport(IPythonNode): <NEW_LINE> <INDENT> fromimport = Attribute("The module name from import or None") <NEW_LINE> names = Attribute(u"List of tuples containing (importname, asname)") | Import line.
| 62599055dd821e528d6da423 |
class BlenderVRConfigFileOperator(Operator): <NEW_LINE> <INDENT> bl_label = "Manipulate BlenderVR Configuration File" <NEW_LINE> bl_idname = 'bvr.configfile' <NEW_LINE> bl_options = {'REGISTER'} <NEW_LINE> action = bpy.props.StringProperty(options={'HIDDEN'}, default="undefined") <NEW_LINE> def execute(self, context): <NEW_LINE> <INDENT> act = self.action <NEW_LINE> if act == "new": <NEW_LINE> <INDENT> return self.execute_new(context) <NEW_LINE> <DEDENT> elif act == "load": <NEW_LINE> <INDENT> return self.execute_load(context) <NEW_LINE> <DEDENT> elif act : <NEW_LINE> <INDENT> self.report({'ERROR'}, 'action "{}" (in configfile) not defined yet'.format(act)) <NEW_LINE> return {'CANCELLED'} <NEW_LINE> <DEDENT> <DEDENT> def execute_new(self, context): <NEW_LINE> <INDENT> self.report({'ERROR'}, 'currently unimplemented') <NEW_LINE> return {'CANCELLED'} <NEW_LINE> return {'FINISHED'} <NEW_LINE> <DEDENT> def execute_load(self, context): <NEW_LINE> <INDENT> global console <NEW_LINE> scene = context.scene <NEW_LINE> props = scene.blendervr <NEW_LINE> pickle_profile = osp.abspath(props.profile_file) <NEW_LINE> xml_config = osp.abspath(props.config_file_path) <NEW_LINE> if console is None: <NEW_LINE> <INDENT> console = bvrconsole.BVRConsoleControler(pickle_profile) <NEW_LINE> console.start() <NEW_LINE> <DEDENT> logger.info("Loading XML VR device configuration file %s", xml_config) <NEW_LINE> console.profile.setValue(['config', 'file'], xml_config) <NEW_LINE> props.status_loaded_config_file = console.load_configuration_file() <NEW_LINE> if not props.status_loaded_config_file: <NEW_LINE> <INDENT> self.report({'ERROR'}, 'VR system configuration file load fail.') <NEW_LINE> return {'CANCELLED'} <NEW_LINE> <DEDENT> print(console._screenSets.items()) <NEW_LINE> bvrprops.current_screens.clear() <NEW_LINE> currentScreenSet = console.profile.getValue(['screen', 'set']) <NEW_LINE> possibleScreenSets = list(OrderedDict(sorted(console._screenSets.items())).keys()) <NEW_LINE> self.report({'INFO'}, 'VR system configuration file loaded.') <NEW_LINE> return {'FINISHED'} | Load/reload/create BlenderVR configuration file.
When the file is loaded, its available screens sets is used to
refresh tool ui (list of screens sets). | 625990554a966d76dd5f0438 |
class Source(BaseModel): <NEW_LINE> <INDENT> originAbsenceDetection: bool = False | Models a source. | 62599055d7e4931a7ef3d5c5 |
class ComponentPattern(object): <NEW_LINE> <INDENT> _components = None <NEW_LINE> @staticmethod <NEW_LINE> def guess_components(): <NEW_LINE> <INDENT> if not settings.env_root: <NEW_LINE> <INDENT> return () <NEW_LINE> <DEDENT> comp = [] <NEW_LINE> for base in os.listdir(settings.cases_dir): <NEW_LINE> <INDENT> full = os.path.join(settings.cases_dir, base) <NEW_LINE> if os.path.isdir(full): <NEW_LINE> <INDENT> comp.append(base) <NEW_LINE> <DEDENT> <DEDENT> return set(comp) <NEW_LINE> <DEDENT> @classmethod <NEW_LINE> def is_component(cls, comp): <NEW_LINE> <INDENT> if cls._components is None: <NEW_LINE> <INDENT> cls._components = cls.guess_components() <NEW_LINE> <DEDENT> return comp in cls._components <NEW_LINE> <DEDENT> def load(self, comp): <NEW_LINE> <INDENT> if self.is_component(comp): <NEW_LINE> <INDENT> return os.path.join(settings.cases_dir, comp) | tests from a component name | 6259905545492302aabfda1f |
class ShiningLaser(Ability): <NEW_LINE> <INDENT> def __init__(self): <NEW_LINE> <INDENT> super().__init__() <NEW_LINE> self.name = 'Shining Laser' <NEW_LINE> self.element = Elements.LIGHT <NEW_LINE> self.mana_cost = 7 <NEW_LINE> self.attack_power = 20 <NEW_LINE> self.category = Category.MAGIC <NEW_LINE> <DEDENT> @property <NEW_LINE> def base_cast_time(self) -> int: <NEW_LINE> <INDENT> return 1 <NEW_LINE> <DEDENT> @staticmethod <NEW_LINE> def get_casting_message(elemental_name: str) -> str: <NEW_LINE> <INDENT> return f"{WARNING} {elemental_name} is shining mightily!!" | A charge ability that takes one turn to activate. | 6259905599cbb53fe6832427 |
class SubFormatSSR_Transition(FixedTransition): <NEW_LINE> <INDENT> command_attributes = {'tlmsid': 'OFMTSSSR'} <NEW_LINE> state_keys = ['subformat'] <NEW_LINE> transition_key = 'subformat' <NEW_LINE> transition_val = 'SSR' | Transition to telemetry SSR subformat | 625990553eb6a72ae038bba8 |
class Solution: <NEW_LINE> <INDENT> def sortColors2(self, colors, k): <NEW_LINE> <INDENT> self.quickSort(0, len(colors) - 1, colors) <NEW_LINE> <DEDENT> def quickSort(self, start, end, colors): <NEW_LINE> <INDENT> if start >= end: <NEW_LINE> <INDENT> return <NEW_LINE> <DEDENT> pivot = colors[start + (end - start) // 2] <NEW_LINE> left = start <NEW_LINE> right = end <NEW_LINE> while left <= right: <NEW_LINE> <INDENT> while left <= right and colors[left] < pivot: <NEW_LINE> <INDENT> left += 1 <NEW_LINE> <DEDENT> while left <= right and colors[right] > pivot: <NEW_LINE> <INDENT> right -= 1 <NEW_LINE> <DEDENT> if left <= right: <NEW_LINE> <INDENT> colors[left], colors[right] = colors[right], colors[left] <NEW_LINE> left += 1 <NEW_LINE> right -= 1 <NEW_LINE> <DEDENT> <DEDENT> self.quickSort(start, right, colors) <NEW_LINE> self.quickSort(left, end, colors) | @param colors: A list of integer
@param k: An integer
@return: nothing | 62599055e64d504609df9e74 |
@base.ReleaseTracks(base.ReleaseTrack.GA) <NEW_LINE> class Create(base.CreateCommand): <NEW_LINE> <INDENT> @staticmethod <NEW_LINE> def Args(parser): <NEW_LINE> <INDENT> _Args(parser) <NEW_LINE> flags.AddClusterAutoscalingFlags(parser, hidden=True) <NEW_LINE> flags.AddLocalSSDFlag(parser, suppressed=True) <NEW_LINE> flags.AddPreemptibleFlag(parser, for_node_pool=True, suppressed=True) <NEW_LINE> flags.AddEnableAutoRepairFlag(parser, for_node_pool=True, suppressed=True) <NEW_LINE> flags.AddEnableAutoUpgradeFlag(parser, for_node_pool=True, suppressed=True) <NEW_LINE> flags.AddServiceAccountFlag(parser, suppressed=True) <NEW_LINE> flags.AddOldNodePoolScopesFlag(parser) <NEW_LINE> flags.AddNodeTaintsFlag(parser, for_node_pool=True, hidden=True) <NEW_LINE> <DEDENT> def ParseCreateNodePoolOptions(self, args): <NEW_LINE> <INDENT> return ParseCreateNodePoolOptionsBase(args) <NEW_LINE> <DEDENT> def Run(self, args): <NEW_LINE> <INDENT> adapter = self.context['api_adapter'] <NEW_LINE> location_get = self.context['location_get'] <NEW_LINE> location = location_get(args) <NEW_LINE> if not args.scopes: <NEW_LINE> <INDENT> args.scopes = [] <NEW_LINE> <DEDENT> try: <NEW_LINE> <INDENT> if not args.scopes: <NEW_LINE> <INDENT> args.scopes = [] <NEW_LINE> <DEDENT> pool_ref = adapter.ParseNodePool(args.name, location) <NEW_LINE> options = self.ParseCreateNodePoolOptions(args) <NEW_LINE> if options.enable_autorepair is not None: <NEW_LINE> <INDENT> log.status.Print( messages.AutoUpdateUpgradeRepairMessage(options.enable_autorepair, 'autorepair')) <NEW_LINE> <DEDENT> if options.enable_autoupgrade is not None: <NEW_LINE> <INDENT> log.status.Print( messages.AutoUpdateUpgradeRepairMessage(options.enable_autoupgrade, 'autoupgrade')) <NEW_LINE> <DEDENT> operation_ref = adapter.CreateNodePool(pool_ref, options) <NEW_LINE> adapter.WaitForOperation( operation_ref, 'Creating node pool {0}'.format(pool_ref.nodePoolId), timeout_s=args.timeout) <NEW_LINE> pool = adapter.GetNodePool(pool_ref) <NEW_LINE> <DEDENT> except apitools_exceptions.HttpError as error: <NEW_LINE> <INDENT> raise exceptions.HttpException(error, util.HTTP_ERROR_FORMAT) <NEW_LINE> <DEDENT> log.CreatedResource(pool_ref) <NEW_LINE> return [pool] | Create a node pool in a running cluster. | 6259905516aa5153ce401a2d |
class DeclineTaskView(TaskApprovePermitMixin, UpdateView): <NEW_LINE> <INDENT> model = Task <NEW_LINE> form_class = DeclineTaskForm <NEW_LINE> template_name = 'task_management/task_decline_form.html' <NEW_LINE> def post(self, request, *args, **kwargs): <NEW_LINE> <INDENT> result = super(DeclineTaskView, self).post(request, *args, **kwargs) <NEW_LINE> TaskActionLog.log(self.request.user, 'decline task', self.object) <NEW_LINE> send_message(self.request.user, 'decline task', self.object) <NEW_LINE> return result | View for decline task | 62599055e76e3b2f99fd9f47 |
class InvalidRequest(AnalytixError): <NEW_LINE> <INDENT> pass | Exception thrown when a request to be made to the YouTube
Analytics API is not valid. | 62599055adb09d7d5dc0bab3 |
class Enviroment: <NEW_LINE> <INDENT> def __init__(self, name,probDistro, paramsDistro=[],desc=None,dicObj={}): <NEW_LINE> <INDENT> self.name=name <NEW_LINE> self.probDistro=probDistro <NEW_LINE> self.paramsDistro=paramsDistro <NEW_LINE> self.desc=desc <NEW_LINE> self.dicObj=cp.deepcopy(dicObj) <NEW_LINE> <DEDENT> def __repr__(self): <NEW_LINE> <INDENT> return self.name +': ' + self.desc <NEW_LINE> <DEDENT> def __cmp__(self, other): <NEW_LINE> <INDENT> pass <NEW_LINE> <DEDENT> def applyDistro(self, num=1): <NEW_LINE> <INDENT> return self.probDistro(*self.paramsDistro,num) <NEW_LINE> <DEDENT> def addObj(self,obj, prob, num=1): <NEW_LINE> <INDENT> self.dicObj[obj.fClass]=cp.deepcopy((prob,obj, num)) <NEW_LINE> sorted(self.dicObj[obj.fClass], reverse=True) <NEW_LINE> <DEDENT> def createObj(self, name, desc,fClass, prob,num=1): <NEW_LINE> <INDENT> self.dicObj[fClass]=(prob,Foundable(name,owner,fClass),num) <NEW_LINE> sorted(self.dicObj[obj.fClass], reverse=True) <NEW_LINE> <DEDENT> def rmObj(self,Obj): <NEW_LINE> <INDENT> L=self.dicObj[Obj.fClass] <NEW_LINE> for i in range(len(L)): <NEW_LINE> <INDENT> if L[i][1]==obj: <NEW_LINE> <INDENT> L.remove(L[i]) <NEW_LINE> break | docstring for Enviroment.
| 6259905563d6d428bbee3d1c |
class RegressionDataset(StructuredDataset): <NEW_LINE> <INDENT> def __init__(self, df, dep_var_name, protected_attribute_names, privileged_classes, instance_weights_name='', categorical_features=[], na_values=[], custom_preprocessing=None, metadata=None): <NEW_LINE> <INDENT> if custom_preprocessing: <NEW_LINE> <INDENT> df = custom_preprocessing(df) <NEW_LINE> <DEDENT> dropped = df.dropna() <NEW_LINE> count = df.shape[0] - dropped.shape[0] <NEW_LINE> if count > 0: <NEW_LINE> <INDENT> warning("Missing Data: {} rows removed from {}.".format(count, type(self).__name__)) <NEW_LINE> <DEDENT> df = dropped <NEW_LINE> df = pd.get_dummies(df, columns=categorical_features, prefix_sep='=') <NEW_LINE> privileged_protected_attributes = [] <NEW_LINE> unprivileged_protected_attributes = [] <NEW_LINE> for attr, vals in zip(protected_attribute_names, privileged_classes): <NEW_LINE> <INDENT> privileged_values = [1.] <NEW_LINE> unprivileged_values = [0.] <NEW_LINE> if callable(vals): <NEW_LINE> <INDENT> df[attr] = df[attr].apply(vals) <NEW_LINE> <DEDENT> elif np.issubdtype(df[attr].dtype, np.number): <NEW_LINE> <INDENT> privileged_values = vals <NEW_LINE> unprivileged_values = list(set(df[attr]).difference(vals)) <NEW_LINE> <DEDENT> else: <NEW_LINE> <INDENT> priv = np.logical_or.reduce(np.equal.outer(vals, df[attr].to_numpy())) <NEW_LINE> df.loc[priv, attr] = privileged_values[0] <NEW_LINE> df.loc[~priv, attr] = unprivileged_values[0] <NEW_LINE> <DEDENT> privileged_protected_attributes.append( np.array(privileged_values, dtype=np.float64)) <NEW_LINE> unprivileged_protected_attributes.append( np.array(unprivileged_values, dtype=np.float64)) <NEW_LINE> <DEDENT> df = pd.DataFrame(MinMaxScaler().fit_transform(df.values), columns=list(df), index=df.index) <NEW_LINE> super(RegressionDataset, self).__init__(df=df, label_names=[dep_var_name], protected_attribute_names=protected_attribute_names, privileged_protected_attributes=privileged_protected_attributes, unprivileged_protected_attributes=unprivileged_protected_attributes, instance_weights_name=instance_weights_name, scores_names=[], metadata=metadata) | Base class for regression datasets. | 62599055004d5f362081fa91 |
class Users(AbstractUser): <NEW_LINE> <INDENT> mobile = models.CharField(max_length=11, unique=True, verbose_name='手机号') <NEW_LINE> email_active = models.BooleanField(default=False, verbose_name="邮箱验证状态") <NEW_LINE> default_address = models.ForeignKey('Address', related_name='users', null=True, blank=True, on_delete=models.SET_NULL, verbose_name='默认地址') <NEW_LINE> class Meta: <NEW_LINE> <INDENT> db_table = "tb_users" <NEW_LINE> verbose_name = "用户" <NEW_LINE> verbose_name_plural = verbose_name | 自定义用户模型类/继承系统模型类并添加自定义字段 | 62599055d7e4931a7ef3d5c7 |
class MEUnit(HybridBlock): <NEW_LINE> <INDENT> def __init__(self, in_channels, out_channels, side_channels, groups, downsample, ignore_group, **kwargs): <NEW_LINE> <INDENT> super(MEUnit, self).__init__(**kwargs) <NEW_LINE> self.downsample = downsample <NEW_LINE> mid_channels = out_channels // 4 <NEW_LINE> if downsample: <NEW_LINE> <INDENT> out_channels -= in_channels <NEW_LINE> <DEDENT> with self.name_scope(): <NEW_LINE> <INDENT> self.compress_conv1 = conv1x1( in_channels=in_channels, out_channels=mid_channels, groups=(1 if ignore_group else groups)) <NEW_LINE> self.compress_bn1 = nn.BatchNorm(in_channels=mid_channels) <NEW_LINE> self.c_shuffle = ChannelShuffle( channels=mid_channels, groups=groups) <NEW_LINE> self.dw_conv2 = depthwise_conv3x3( channels=mid_channels, strides=(2 if self.downsample else 1)) <NEW_LINE> self.dw_bn2 = nn.BatchNorm(in_channels=mid_channels) <NEW_LINE> self.expand_conv3 = conv1x1( in_channels=mid_channels, out_channels=out_channels, groups=groups) <NEW_LINE> self.expand_bn3 = nn.BatchNorm(in_channels=out_channels) <NEW_LINE> if downsample: <NEW_LINE> <INDENT> self.avgpool = nn.AvgPool2D(pool_size=3, strides=2, padding=1) <NEW_LINE> <DEDENT> self.activ = nn.Activation("relu") <NEW_LINE> self.s_merge_conv = conv1x1( in_channels=mid_channels, out_channels=side_channels) <NEW_LINE> self.s_merge_bn = nn.BatchNorm(in_channels=side_channels) <NEW_LINE> self.s_conv = conv3x3( in_channels=side_channels, out_channels=side_channels, strides=(2 if self.downsample else 1)) <NEW_LINE> self.s_conv_bn = nn.BatchNorm(in_channels=side_channels) <NEW_LINE> self.s_evolve_conv = conv1x1( in_channels=side_channels, out_channels=mid_channels) <NEW_LINE> self.s_evolve_bn = nn.BatchNorm(in_channels=mid_channels) <NEW_LINE> <DEDENT> <DEDENT> def hybrid_forward(self, F, x): <NEW_LINE> <INDENT> identity = x <NEW_LINE> x = self.compress_conv1(x) <NEW_LINE> x = self.compress_bn1(x) <NEW_LINE> x = self.activ(x) <NEW_LINE> x = self.c_shuffle(x) <NEW_LINE> y = self.s_merge_conv(x) <NEW_LINE> y = self.s_merge_bn(y) <NEW_LINE> y = self.activ(y) <NEW_LINE> x = self.dw_conv2(x) <NEW_LINE> x = self.dw_bn2(x) <NEW_LINE> y = self.s_conv(y) <NEW_LINE> y = self.s_conv_bn(y) <NEW_LINE> y = self.activ(y) <NEW_LINE> y = self.s_evolve_conv(y) <NEW_LINE> y = self.s_evolve_bn(y) <NEW_LINE> y = F.sigmoid(y) <NEW_LINE> x = x * y <NEW_LINE> x = self.expand_conv3(x) <NEW_LINE> x = self.expand_bn3(x) <NEW_LINE> if self.downsample: <NEW_LINE> <INDENT> identity = self.avgpool(identity) <NEW_LINE> x = F.concat(x, identity, dim=1) <NEW_LINE> <DEDENT> else: <NEW_LINE> <INDENT> x = x + identity <NEW_LINE> <DEDENT> x = self.activ(x) <NEW_LINE> return x | MENet unit.
Parameters:
----------
in_channels : int
Number of input channels.
out_channels : int
Number of output channels.
side_channels : int
Number of side channels.
groups : int
Number of groups in convolution layers.
downsample : bool
Whether do downsample.
ignore_group : bool
Whether ignore group value in the first convolution layer. | 62599055379a373c97d9a56e |
class DescribeTrainingJobsRequest(AbstractModel): <NEW_LINE> <INDENT> def __init__(self): <NEW_LINE> <INDENT> self.Offset = None <NEW_LINE> self.Limit = None <NEW_LINE> self.CreationTimeAfter = None <NEW_LINE> self.CreationTimeBefore = None <NEW_LINE> self.NameContains = None <NEW_LINE> self.StatusEquals = None <NEW_LINE> self.Filters = None <NEW_LINE> <DEDENT> def _deserialize(self, params): <NEW_LINE> <INDENT> self.Offset = params.get("Offset") <NEW_LINE> self.Limit = params.get("Limit") <NEW_LINE> self.CreationTimeAfter = params.get("CreationTimeAfter") <NEW_LINE> self.CreationTimeBefore = params.get("CreationTimeBefore") <NEW_LINE> self.NameContains = params.get("NameContains") <NEW_LINE> self.StatusEquals = params.get("StatusEquals") <NEW_LINE> if params.get("Filters") is not None: <NEW_LINE> <INDENT> self.Filters = [] <NEW_LINE> for item in params.get("Filters"): <NEW_LINE> <INDENT> obj = Filter() <NEW_LINE> obj._deserialize(item) <NEW_LINE> self.Filters.append(obj) <NEW_LINE> <DEDENT> <DEDENT> memeber_set = set(params.keys()) <NEW_LINE> for name, value in vars(self).items(): <NEW_LINE> <INDENT> if name in memeber_set: <NEW_LINE> <INDENT> memeber_set.remove(name) <NEW_LINE> <DEDENT> <DEDENT> if len(memeber_set) > 0: <NEW_LINE> <INDENT> warnings.warn("%s fileds are useless." % ",".join(memeber_set)) | DescribeTrainingJobs请求参数结构体
| 6259905555399d3f05627a69 |
class V1EnvVarSource(object): <NEW_LINE> <INDENT> def __init__(self, config_map_key_ref=None, field_ref=None, resource_field_ref=None, secret_key_ref=None): <NEW_LINE> <INDENT> self.swagger_types = { 'config_map_key_ref': 'V1ConfigMapKeySelector', 'field_ref': 'V1ObjectFieldSelector', 'resource_field_ref': 'V1ResourceFieldSelector', 'secret_key_ref': 'V1SecretKeySelector' } <NEW_LINE> self.attribute_map = { 'config_map_key_ref': 'configMapKeyRef', 'field_ref': 'fieldRef', 'resource_field_ref': 'resourceFieldRef', 'secret_key_ref': 'secretKeyRef' } <NEW_LINE> self._config_map_key_ref = config_map_key_ref <NEW_LINE> self._field_ref = field_ref <NEW_LINE> self._resource_field_ref = resource_field_ref <NEW_LINE> self._secret_key_ref = secret_key_ref <NEW_LINE> <DEDENT> @property <NEW_LINE> def config_map_key_ref(self): <NEW_LINE> <INDENT> return self._config_map_key_ref <NEW_LINE> <DEDENT> @config_map_key_ref.setter <NEW_LINE> def config_map_key_ref(self, config_map_key_ref): <NEW_LINE> <INDENT> self._config_map_key_ref = config_map_key_ref <NEW_LINE> <DEDENT> @property <NEW_LINE> def field_ref(self): <NEW_LINE> <INDENT> return self._field_ref <NEW_LINE> <DEDENT> @field_ref.setter <NEW_LINE> def field_ref(self, field_ref): <NEW_LINE> <INDENT> self._field_ref = field_ref <NEW_LINE> <DEDENT> @property <NEW_LINE> def resource_field_ref(self): <NEW_LINE> <INDENT> return self._resource_field_ref <NEW_LINE> <DEDENT> @resource_field_ref.setter <NEW_LINE> def resource_field_ref(self, resource_field_ref): <NEW_LINE> <INDENT> self._resource_field_ref = resource_field_ref <NEW_LINE> <DEDENT> @property <NEW_LINE> def secret_key_ref(self): <NEW_LINE> <INDENT> return self._secret_key_ref <NEW_LINE> <DEDENT> @secret_key_ref.setter <NEW_LINE> def secret_key_ref(self, secret_key_ref): <NEW_LINE> <INDENT> self._secret_key_ref = secret_key_ref <NEW_LINE> <DEDENT> def to_dict(self): <NEW_LINE> <INDENT> result = {} <NEW_LINE> for attr, _ in iteritems(self.swagger_types): <NEW_LINE> <INDENT> value = getattr(self, attr) <NEW_LINE> if isinstance(value, list): <NEW_LINE> <INDENT> result[attr] = list(map( lambda x: x.to_dict() if hasattr(x, "to_dict") else x, value )) <NEW_LINE> <DEDENT> elif hasattr(value, "to_dict"): <NEW_LINE> <INDENT> result[attr] = value.to_dict() <NEW_LINE> <DEDENT> elif isinstance(value, dict): <NEW_LINE> <INDENT> result[attr] = dict(map( lambda item: (item[0], item[1].to_dict()) if hasattr(item[1], "to_dict") else item, value.items() )) <NEW_LINE> <DEDENT> else: <NEW_LINE> <INDENT> result[attr] = value <NEW_LINE> <DEDENT> <DEDENT> return result <NEW_LINE> <DEDENT> def to_str(self): <NEW_LINE> <INDENT> return pformat(self.to_dict()) <NEW_LINE> <DEDENT> def __repr__(self): <NEW_LINE> <INDENT> return self.to_str() <NEW_LINE> <DEDENT> def __eq__(self, other): <NEW_LINE> <INDENT> return self.__dict__ == other.__dict__ <NEW_LINE> <DEDENT> def __ne__(self, other): <NEW_LINE> <INDENT> return not self == other | NOTE: This class is auto generated by the swagger code generator program.
Do not edit the class manually. | 62599055009cb60464d02a7f |
class BasicNetworkSwitch(IPMixin, PortMixin, Device): <NEW_LINE> <INDENT> _clusto_type = 'networkswitch' <NEW_LINE> _driver_name = 'basicnetworkswitch' <NEW_LINE> _portmeta = {'pwr-nema-5' : {'numports':1}, 'nic-eth' : {'numports':24}} | Basic network switch driver | 62599055be8e80087fbc05cb |
class SiteDetail(generics.RetrieveAPIView): <NEW_LINE> <INDENT> serializer_class = serializers.SiteSerializer <NEW_LINE> lookup_field = 'domain' <NEW_LINE> def get_queryset(self): <NEW_LINE> <INDENT> domain = self.kwargs['domain'] <NEW_LINE> return Site.objects.filter(domain=domain) | Individual SecureTheNews site and its latest scan results | 6259905507d97122c42181f5 |
class DeclareMethodTicketFieldValidityRuleTests(TestBase): <NEW_LINE> <INDENT> pass | The client MUST provide a valid access ticket giving "active" access to
the realm in which the exchange exists or will be created, or "passive"
access if the if-exists flag is set.
Client creates access ticket with wrong access rights and attempts to use
in this method. | 6259905516aa5153ce401a2e |
class StanzaHandled(Exception): <NEW_LINE> <INDENT> pass | StanzaHandled: An Exception which means that the stanza was handled.
If a stanza handler doesn't raise this (or a StanzaError), then the
stanza wasn't handled, and the dispatcher goes on to try some more
dispatchers. | 62599055e64d504609df9e75 |
class Schedule(EventBinder): <NEW_LINE> <INDENT> def __init__(self, schedule): <NEW_LINE> <INDENT> self.schedule = schedule <NEW_LINE> <DEDENT> def bind(self, callback): <NEW_LINE> <INDENT> self.schedule.do(functools.partial(callback, None)) <NEW_LINE> <DEDENT> def get_filter(self): <NEW_LINE> <INDENT> raise NotImplementedError("get_filter() is not supported on Schedule") | A class for binding rules to cron-like schedules.
This uses the schedule module to provide human-readable scheduling
syntax. | 625990553c8af77a43b689e5 |
class ValidateAlembic(api.InstancePlugin): <NEW_LINE> <INDENT> families = ["alembic"] <NEW_LINE> order = inventory.get_order(__file__, "ValidateAlembic") <NEW_LINE> label = "Alembic" <NEW_LINE> actions = [RepairAlembic] <NEW_LINE> optional = True <NEW_LINE> hosts = ["houdini"] <NEW_LINE> def process(self, instance): <NEW_LINE> <INDENT> msg = "Partition mode is not correct. Expected \"Use Combination of " <NEW_LINE> msg += "Transform/Shape Node\"" <NEW_LINE> assert instance[0].parm("partition_mode").eval() == 4, msg <NEW_LINE> msg = "Collapse mode is not correct. Expected \"Collapse Non-Animating" <NEW_LINE> msg += " Identity Objects\"" <NEW_LINE> assert instance[0].parm("collapse").eval() == 1, msg | Validates Alembic settings | 62599055baa26c4b54d507ee |
class SNSRegionConfig(RegionConfig): <NEW_LINE> <INDENT> def __init__(self): <NEW_LINE> <INDENT> self.topics = {} <NEW_LINE> self.topics_count = 0 <NEW_LINE> <DEDENT> def parse_subscription(self, params, region, subscription): <NEW_LINE> <INDENT> topic_arn = subscription.pop('TopicArn') <NEW_LINE> topic_name = topic_arn.split(':')[-1] <NEW_LINE> if topic_name in self.topics: <NEW_LINE> <INDENT> topic = self.topics[topic_name] <NEW_LINE> manage_dictionary(topic['subscriptions'], 'protocol', {}) <NEW_LINE> protocol = subscription.pop('Protocol') <NEW_LINE> manage_dictionary(topic['subscriptions']['protocol'], protocol, []) <NEW_LINE> topic['subscriptions']['protocol'][protocol].append(subscription) <NEW_LINE> topic['subscriptions_count'] += 1 <NEW_LINE> <DEDENT> <DEDENT> def parse_topic(self, params, region, topic): <NEW_LINE> <INDENT> topic['arn'] = topic.pop('TopicArn') <NEW_LINE> topic['name'] = topic['arn'].split(':')[-1] <NEW_LINE> (prefix, partition, service, region, account, name) = topic['arn'].split(':') <NEW_LINE> api_client = api_clients[region] <NEW_LINE> attributes = api_client.get_topic_attributes(TopicArn=topic['arn'])['Attributes'] <NEW_LINE> for k in ['Owner', 'DisplayName']: <NEW_LINE> <INDENT> topic[k] = attributes[k] if k in attributes else None <NEW_LINE> <DEDENT> for k in ['Policy', 'DeliveryPolicy', 'EffectiveDeliveryPolicy']: <NEW_LINE> <INDENT> topic[k] = json.loads(attributes[k]) if k in attributes else None <NEW_LINE> <DEDENT> topic['name'] = topic['arn'].split(':')[-1] <NEW_LINE> manage_dictionary(topic, 'subscriptions', {}) <NEW_LINE> manage_dictionary(topic, 'subscriptions_count', 0) <NEW_LINE> self.topics[topic['name']] = topic | SNS configuration for a single AWS region
:ivar topics: Dictionary of topics [name]
:ivar topics_count: Number of topics in the region | 62599055adb09d7d5dc0bab5 |
class VmMigrateView(View): <NEW_LINE> <INDENT> def get(self, request, *args, **kwargs): <NEW_LINE> <INDENT> vm_uuid = kwargs.get('vm_uuid', '') <NEW_LINE> vm_manager = VmManager() <NEW_LINE> vm = vm_manager.get_vm_by_uuid(vm_uuid=vm_uuid, related_fields=( 'host', 'host__group', 'host__group__center', 'image', 'mac_ip')) <NEW_LINE> if not vm: <NEW_LINE> <INDENT> return render(request, 'error.html', {'errors': ['云主机不存在']}) <NEW_LINE> <DEDENT> hosts = HostManager().get_hosts_by_group_id(group_id=vm.host.group_id) <NEW_LINE> return render(request, 'vm_migrate.html', context={'vm': vm, 'hosts': hosts}) | 虚拟机迁移类视图 | 625990558e71fb1e983bd014 |
class Config: <NEW_LINE> <INDENT> SITECONFFILE = '/etc/codebayrc.py' <NEW_LINE> USERCONFFILE = '.codebayrc.py' <NEW_LINE> def loadConfigFile(self, filename): <NEW_LINE> <INDENT> try: <NEW_LINE> <INDENT> f = open(filename) <NEW_LINE> <DEDENT> except IOError: <NEW_LINE> <INDENT> pass <NEW_LINE> <DEDENT> else: <NEW_LINE> <INDENT> f.close() <NEW_LINE> execfile(filename) <NEW_LINE> <DEDENT> <DEDENT> def loadStandardConfig(self): <NEW_LINE> <INDENT> self.loadConfigFile(self.SITECONFFILE) <NEW_LINE> if 'HOME' in os.environ: <NEW_LINE> <INDENT> home = os.environ['HOME'] <NEW_LINE> <DEDENT> else: <NEW_LINE> <INDENT> home = os.path.expanduser("~") <NEW_LINE> <DEDENT> userconf = os.path.join(home, self.USERCONFFILE) <NEW_LINE> self.loadConfigFile(userconf) <NEW_LINE> <DEDENT> def add(self, name, default): <NEW_LINE> <INDENT> if not hasattr(self, name): <NEW_LINE> <INDENT> setattr(self, name, default) <NEW_LINE> <DEDENT> return getattr(self, name) <NEW_LINE> <DEDENT> def set(self, name, value): <NEW_LINE> <INDENT> setattr(self, name, value) <NEW_LINE> return getattr(self, name) <NEW_LINE> <DEDENT> def get(self, name): <NEW_LINE> <INDENT> return getattr(self, name) | Configuration holder class. | 625990550a50d4780f706864 |
class ArrayOfEnum(ARRAY): <NEW_LINE> <INDENT> def bind_expression(self, bindvalue): <NEW_LINE> <INDENT> return cast(bindvalue, self) <NEW_LINE> <DEDENT> def result_processor(self, dialect, coltype): <NEW_LINE> <INDENT> super_rp = super(ArrayOfEnum, self).result_processor( dialect, coltype) <NEW_LINE> def handle_raw_string(value): <NEW_LINE> <INDENT> inner = re.match(r"^{(.*)}$", value).group(1) <NEW_LINE> return inner.split(",") <NEW_LINE> <DEDENT> def process(value): <NEW_LINE> <INDENT> if value is None: <NEW_LINE> <INDENT> return None <NEW_LINE> <DEDENT> return super_rp(handle_raw_string(value)) <NEW_LINE> <DEDENT> return process | Helper class to support arrays of enums in PostgreSQL | 62599055b830903b9686ef23 |
class Delete(base.DeleteCommand): <NEW_LINE> <INDENT> @staticmethod <NEW_LINE> def Args(parser): <NEW_LINE> <INDENT> parser.add_argument('sink_name', help='The name of the sink to delete.') <NEW_LINE> util.AddNonProjectArgs(parser, 'Delete a sink') <NEW_LINE> parser.display_info.AddCacheUpdater(None) <NEW_LINE> <DEDENT> def Run(self, args): <NEW_LINE> <INDENT> sink_ref = util.GetSinkReference(args.sink_name, args) <NEW_LINE> sink_resource = util.CreateResourceName(util.GetParentFromArgs(args), 'sinks', sink_ref.sinksId) <NEW_LINE> console_io.PromptContinue('Really delete sink [%s]?' % sink_ref.sinksId, cancel_on_no=True) <NEW_LINE> util.GetClient().projects_sinks.Delete( util.GetMessages().LoggingProjectsSinksDeleteRequest( sinkName=sink_resource)) <NEW_LINE> log.DeletedResource(sink_ref) | Deletes a sink.
Deletes a sink and halts the export of log entries associated with that sink.
Deleting a sink does not affect log entries already exported through
the deleted sink, and will not affect other sinks that are exporting
the same log(s). | 6259905545492302aabfda22 |
class State: <NEW_LINE> <INDENT> def __init__(self, tokenize=defaults.TOKENIZE, diff=defaults.DIFF, revert_radius=reverts.defaults.RADIUS, revert_detector=None): <NEW_LINE> <INDENT> self.tokenize = tokenize <NEW_LINE> self.diff = diff <NEW_LINE> if revert_detector is None: <NEW_LINE> <INDENT> self.revert_detector = reverts.Detector(int(revert_radius)) <NEW_LINE> <DEDENT> else: <NEW_LINE> <INDENT> self.revert_detector = revert_detector <NEW_LINE> <DEDENT> self.last = None <NEW_LINE> <DEDENT> def process(self, text, revision=None, checksum=None): <NEW_LINE> <INDENT> if checksum is None: <NEW_LINE> <INDENT> checksum = sha1(bytes(text, 'utf8')).hexdigest() <NEW_LINE> <DEDENT> version = Version() <NEW_LINE> revert = self.revert_detector.process(checksum, version) <NEW_LINE> if revert is not None: <NEW_LINE> <INDENT> tokens_added = Tokens() <NEW_LINE> tokens_removed = Tokens() <NEW_LINE> _, _, reverted_to = revert <NEW_LINE> version.tokens = reverted_to.tokens <NEW_LINE> <DEDENT> else: <NEW_LINE> <INDENT> if self.last is None: <NEW_LINE> <INDENT> version.tokens = Tokens(Token(t) for t in self.tokenize(text)) <NEW_LINE> tokens_added = version.tokens <NEW_LINE> tokens_removed = Tokens() <NEW_LINE> <DEDENT> else: <NEW_LINE> <INDENT> version.tokens, tokens_added, tokens_removed = self.last.tokens.compare(self.tokenize(text), self.diff) <NEW_LINE> <DEDENT> <DEDENT> version.tokens.persist(revision) <NEW_LINE> self.last = version <NEW_LINE> return version.tokens, tokens_added, tokens_removed | Represents the state of word persistence in a page.
See `<https://meta.wikimedia.org/wiki/Research:Content_persistence>`_
:Parameters:
tokenize : function( `str` ) --> list( `str` )
A tokenizing function
diff : function(list( `str` ), list( `str` )) --> list( `ops` )
A function to perform a difference between token lists
revert_radius : int
the maximum revision distance that a revert can span.
revert_detector : :class:`mw.lib.reverts.Detector`
a revert detector to start process with
:Example:
>>> from pprint import pprint
>>> from mw.lib import persistence
>>>
>>> state = persistence.State()
>>>
>>> pprint(state.process("Apples are red.", revision=1))
([Token(text='Apples', revisions=[1]),
Token(text=' ', revisions=[1]),
Token(text='are', revisions=[1]),
Token(text=' ', revisions=[1]),
Token(text='red', revisions=[1]),
Token(text='.', revisions=[1])],
[Token(text='Apples', revisions=[1]),
Token(text=' ', revisions=[1]),
Token(text='are', revisions=[1]),
Token(text=' ', revisions=[1]),
Token(text='red', revisions=[1]),
Token(text='.', revisions=[1])],
[])
>>> pprint(state.process("Apples are blue.", revision=2))
([Token(text='Apples', revisions=[1, 2]),
Token(text=' ', revisions=[1, 2]),
Token(text='are', revisions=[1, 2]),
Token(text=' ', revisions=[1, 2]),
Token(text='blue', revisions=[2]),
Token(text='.', revisions=[1, 2])],
[Token(text='blue', revisions=[2])],
[Token(text='red', revisions=[1])])
>>> pprint(state.process("Apples are red.", revision=3)) # A revert!
([Token(text='Apples', revisions=[1, 2, 3]),
Token(text=' ', revisions=[1, 2, 3]),
Token(text='are', revisions=[1, 2, 3]),
Token(text=' ', revisions=[1, 2, 3]),
Token(text='red', revisions=[1, 3]),
Token(text='.', revisions=[1, 2, 3])],
[],
[]) | 625990553617ad0b5ee07693 |
class Solution: <NEW_LINE> <INDENT> def sortList(self, head): <NEW_LINE> <INDENT> if not head or not head.next: <NEW_LINE> <INDENT> return head <NEW_LINE> <DEDENT> middle = sortList(head) <NEW_LINE> right = self.findMiddle(middle.next) <NEW_LINE> middle.next = None <NEW_LINE> left = self.sortList(head) <NEW_LINE> return self.merge(left, right) <NEW_LINE> <DEDENT> def findMiddle(self, head): <NEW_LINE> <INDENT> if not head: <NEW_LINE> <INDENT> return None <NEW_LINE> <DEDENT> slow = head <NEW_LINE> fast = head.next <NEW_LINE> while fast and fast.next: <NEW_LINE> <INDENT> slow = slow.next <NEW_LINE> fast = fast.next.next <NEW_LINE> <DEDENT> return slow <NEW_LINE> <DEDENT> def merge(self, l1, l2): <NEW_LINE> <INDENT> if not l1: <NEW_LINE> <INDENT> return l2 <NEW_LINE> <DEDENT> result = ListNode(-1) <NEW_LINE> curr = result <NEW_LINE> while l1 and l2: <NEW_LINE> <INDENT> if l1.val < l2.val: <NEW_LINE> <INDENT> curr.next = l1 <NEW_LINE> l1 = l1.next <NEW_LINE> <DEDENT> else: <NEW_LINE> <INDENT> curr.next = l2 <NEW_LINE> l2 = l2.next <NEW_LINE> <DEDENT> curr = curr.next <NEW_LINE> <DEDENT> curr.next = l1 if l1 else l2 <NEW_LINE> return result.next | @param head: The first node of the linked list.
@return: You should return the head of the sorted linked list,
using constant space complexity. | 62599055d7e4931a7ef3d5c9 |
class RebootSignal(NTCError): <NEW_LINE> <INDENT> pass | Error for sending reboot signal. | 62599055fff4ab517ebced6e |
class PostCommentDetailView(generics.RetrieveUpdateDestroyAPIView): <NEW_LINE> <INDENT> queryset = Comment.objects.all() <NEW_LINE> serializer_class = serializers.CommentSerializer <NEW_LINE> def get_object(self): <NEW_LINE> <INDENT> post_id = self.kwargs.get('post_id') <NEW_LINE> comment_id = self.kwargs.get('pk') <NEW_LINE> return Comment.objects.get(post=post_id, id=comment_id) <NEW_LINE> <DEDENT> def get(self, request, *args, **kwargs): <NEW_LINE> <INDENT> super(PostCommentDetailView, self).retrieve(request, args, kwargs) <NEW_LINE> instance = self.get_object() <NEW_LINE> serializer = self.get_serializer(instance) <NEW_LINE> data = serializer.data <NEW_LINE> response = {"status_code": status.HTTP_200_OK, "message": "Successfully retrieved", "result": data} <NEW_LINE> return Response(response) | Get specific comment on specific post | 6259905515baa723494634de |
class DistMatrix(): <NEW_LINE> <INDENT> def __init__(self, data, row_items=None, col_items=None): <NEW_LINE> <INDENT> self.dim = data.shape <NEW_LINE> self.X = np.array(data) <NEW_LINE> self.row_items = row_items <NEW_LINE> self.col_items = col_items <NEW_LINE> <DEDENT> def get_KNN(self, i, k): <NEW_LINE> <INDENT> idxs = np.argsort(self.X[i, :])[:] <NEW_LINE> return self.X[:, idxs] <NEW_LINE> <DEDENT> def invert(self, typ): <NEW_LINE> <INDENT> if typ == 0: <NEW_LINE> <INDENT> return -self.X <NEW_LINE> <DEDENT> elif typ == 1: <NEW_LINE> <INDENT> return 1.-self.X <NEW_LINE> <DEDENT> elif typ == 2: <NEW_LINE> <INDENT> return 1./self.X <NEW_LINE> <DEDENT> else: <NEW_LINE> <INDENT> raise ValueError('Unknown option for typ of matrix inversion.') | Distance matrix.
.. attribute:: dim
Matrix dimension.
.. attribute:: X
Matrix data.
.. attribute:: row_items
Items corresponding to matrix rows.
.. attribute:: col_items
Items corresponding to matrix columns. | 6259905599cbb53fe683242b |
class DragDropListbox(tkinter.Listbox): <NEW_LINE> <INDENT> def __init__(self, master, **kw): <NEW_LINE> <INDENT> kw["selectmode"] = tkinter.SINGLE <NEW_LINE> tkinter.Listbox.__init__(self, master, kw) <NEW_LINE> self.bind("<Button-1>", self.setCurrent) <NEW_LINE> self.bind("<B1-Motion>", self.shiftSelection) <NEW_LINE> self.curIndex = None <NEW_LINE> <DEDENT> def setCurrent(self, event): <NEW_LINE> <INDENT> self.curIndex = self.nearest(event.y) <NEW_LINE> <DEDENT> def shiftSelection(self, event): <NEW_LINE> <INDENT> i = self.nearest(event.y) <NEW_LINE> if i < self.curIndex: <NEW_LINE> <INDENT> x = self.get(i) <NEW_LINE> self.delete(i) <NEW_LINE> self.insert(i + 1, x) <NEW_LINE> self.curIndex = i <NEW_LINE> <DEDENT> elif i > self.curIndex: <NEW_LINE> <INDENT> x = self.get(i) <NEW_LINE> self.delete(i) <NEW_LINE> self.insert(i - 1, x) <NEW_LINE> self.curIndex = i <NEW_LINE> <DEDENT> <DEDENT> def get_list_items(self): <NEW_LINE> <INDENT> return super().get(0, tkinter.END) | A Tkinter listbox with drag'n'drop reordering of entries. | 6259905573bcbd0ca4bcb7dd |
class FPN(SegmentationModel): <NEW_LINE> <INDENT> def __init__( self, encoder_name: str = "resnet34", encoder_depth: int = 5, encoder_weights: Optional[str] = "imagenet", decoder_pyramid_channels: int = 256, decoder_segmentation_channels: int = 128, decoder_merge_policy: str = "add", decoder_dropout: float = 0.2, in_channels: int = 3, classes: int = 1, activation: Optional[str] = None, upsampling: int = 4, aux_params: Optional[dict] = None, ): <NEW_LINE> <INDENT> super().__init__() <NEW_LINE> self.encoder = get_encoder( encoder_name, in_channels=in_channels, depth=encoder_depth, weights=encoder_weights, ) <NEW_LINE> self.decoder = FPNDecoder( encoder_channels=self.encoder.out_channels, encoder_depth=encoder_depth, pyramid_channels=decoder_pyramid_channels, segmentation_channels=decoder_segmentation_channels, dropout=decoder_dropout, merge_policy=decoder_merge_policy, ) <NEW_LINE> self.segmentation_head = SegmentationHead( in_channels=self.decoder.out_channels, out_channels=classes, activation=activation, kernel_size=1, upsampling=upsampling, ) <NEW_LINE> if aux_params is not None: <NEW_LINE> <INDENT> self.classification_head = ClassificationHead( in_channels=self.encoder.out_channels[-1], **aux_params ) <NEW_LINE> <DEDENT> else: <NEW_LINE> <INDENT> self.classification_head = None <NEW_LINE> <DEDENT> self.name = "fpn-{}".format(encoder_name) <NEW_LINE> self.initialize() | FPN_ is a fully convolution neural network for image semantic segmentation
Args:
encoder_name: name of classification model (without last dense layers) used as feature
extractor to build segmentation model.
encoder_depth: number of stages used in decoder, larger depth - more features are generated.
e.g. for depth=3 encoder will generate list of features with following spatial shapes
[(H,W), (H/2, W/2), (H/4, W/4), (H/8, W/8)], so in general the deepest feature will have
spatial resolution (H/(2^depth), W/(2^depth)]
encoder_weights: one of ``None`` (random initialization), ``imagenet`` (pre-training on ImageNet).
decoder_pyramid_channels: a number of convolution filters in Feature Pyramid of FPN_.
decoder_segmentation_channels: a number of convolution filters in segmentation head of FPN_.
decoder_merge_policy: determines how to merge outputs inside FPN.
One of [``add``, ``cat``]
decoder_dropout: spatial dropout rate in range (0, 1).
in_channels: number of input channels for model, default is 3.
classes: a number of classes for output (output shape - ``(batch, classes, h, w)``).
activation (str, callable): activation function used in ``.predict(x)`` method for inference.
One of [``sigmoid``, ``softmax2d``, callable, None]
upsampling: optional, final upsampling factor
(default is 4 to preserve input -> output spatial shape identity)
aux_params: if specified model will have additional classification auxiliary output
build on top of encoder, supported params:
- classes (int): number of classes
- pooling (str): one of 'max', 'avg'. Default is 'avg'.
- dropout (float): dropout factor in [0, 1)
- activation (str): activation function to apply "sigmoid"/"softmax" (could be None to return logits)
Returns:
``torch.nn.Module``: **FPN**
.. _FPN:
http://presentations.cocodataset.org/COCO17-Stuff-FAIR.pdf | 62599055cad5886f8bdc5b26 |
class MultiStepRelaxFWWorkflow(AbstractFWWorkflow): <NEW_LINE> <INDENT> def __init__(self, structure, pseudos, ksampling=1000, relax_algo="atoms_only", accuracy="normal", spin_mode="polarized", smearing="fermi_dirac:0.1 eV", charge=0.0, scf_algorithm=None, autoparal=False, max_restart=10, folder=None, **extra_abivars): <NEW_LINE> <INDENT> task = MultiStepRelaxStrategyFireTask(structure=structure, pseudos=pseudos, ksampling=ksampling, relax_algo=relax_algo, accuracy=accuracy, spin_mode=spin_mode, smearing=smearing, charge=charge, scf_algorithm=scf_algorithm, deps={}, additional_steps=max_restart, **extra_abivars) <NEW_LINE> spec = {} <NEW_LINE> if folder: <NEW_LINE> <INDENT> spec['_launch_dir'] = os.path.join(folder, 'relax') <NEW_LINE> <DEDENT> fw = Firework(task, spec=spec) <NEW_LINE> if autoparal: <NEW_LINE> <INDENT> autoparal_fw = self.create_autoparal_fw(task, spec['_launch_dir']) <NEW_LINE> self.wf = Workflow([autoparal_fw, fw], {autoparal_fw: [fw]}) <NEW_LINE> <DEDENT> else: <NEW_LINE> <INDENT> self.wf = Workflow([fw]) | Workflow for structural relaxations performed in a multi-step procedure.
Using a small number of maximum relaxation steps the overall process is
automatically split in as many FWs are needed. | 6259905599cbb53fe683242c |
class ResourceGroup(MyDocument): <NEW_LINE> <INDENT> __collection__ = 'resource_group' <NEW_LINE> __lazy__ = False <NEW_LINE> name = StringField() | 资源套餐组,不同的套餐组有不同的限额 | 625990550fa83653e46f6430 |
class APNs(object): <NEW_LINE> <INDENT> def __init__(self, use_sandbox=False, cert_file=None, key_file=None, enhanced=False): <NEW_LINE> <INDENT> super(APNs, self).__init__() <NEW_LINE> self.use_sandbox = use_sandbox <NEW_LINE> self.cert_file = cert_file <NEW_LINE> self.key_file = key_file <NEW_LINE> self._feedback_connection = None <NEW_LINE> self._gateway_connection = None <NEW_LINE> self.enhanced = enhanced <NEW_LINE> <DEDENT> @staticmethod <NEW_LINE> def packed_uchar(num): <NEW_LINE> <INDENT> return struct.pack('>B', num) <NEW_LINE> <DEDENT> @staticmethod <NEW_LINE> def packed_ushort_big_endian(num): <NEW_LINE> <INDENT> return struct.pack('>H', num) <NEW_LINE> <DEDENT> @staticmethod <NEW_LINE> def unpacked_ushort_big_endian(bytes_value): <NEW_LINE> <INDENT> return struct.unpack('>H', bytes_value)[0] <NEW_LINE> <DEDENT> @staticmethod <NEW_LINE> def packed_uint_big_endian(num): <NEW_LINE> <INDENT> return struct.pack('>I', num) <NEW_LINE> <DEDENT> @staticmethod <NEW_LINE> def unpacked_uint_big_endian(bytes_value): <NEW_LINE> <INDENT> return struct.unpack('>I', bytes_value)[0] <NEW_LINE> <DEDENT> @staticmethod <NEW_LINE> def unpacked_char_big_endian(bytes_value): <NEW_LINE> <INDENT> return struct.unpack('c', bytes_value)[0] <NEW_LINE> <DEDENT> @property <NEW_LINE> def feedback_server(self): <NEW_LINE> <INDENT> if not self._feedback_connection: <NEW_LINE> <INDENT> self._feedback_connection = FeedbackConnection( use_sandbox=self.use_sandbox, cert_file=self.cert_file, key_file=self.key_file, ) <NEW_LINE> <DEDENT> return self._feedback_connection <NEW_LINE> <DEDENT> @property <NEW_LINE> def gateway_server(self): <NEW_LINE> <INDENT> if not self._gateway_connection: <NEW_LINE> <INDENT> self._gateway_connection = GatewayConnection( use_sandbox=self.use_sandbox, cert_file=self.cert_file, key_file=self.key_file, enhanced=self.enhanced, ) <NEW_LINE> <DEDENT> return self._gateway_connection | A class representing an Apple Push Notification service connection | 625990563c8af77a43b689e6 |
class SimulatedAnnealingAcceptanceFunction(AbstractAcceptanceFunction): <NEW_LINE> <INDENT> def __init__(self, diff_multiplier=1, multiplier=1): <NEW_LINE> <INDENT> super().__init__() <NEW_LINE> self._diff_multiplier = diff_multiplier <NEW_LINE> self._multiplier = multiplier <NEW_LINE> <DEDENT> def accept(self, delta_value, temperature): <NEW_LINE> <INDENT> probability = self._multiplier * math.exp(-(self._diff_multiplier * delta_value) / temperature) <NEW_LINE> random_number = random.random() <NEW_LINE> return probability > random_number | An acceptance function for simulated annealing.
A smaller quality value is assumed to be better than a bigger one.
Parameters
----------
diff_multiplier : int or float, optional
The delta_value will be multiplied by this multiplier. A bigger value
leads to more solutions being accepted, while a smaller value will
lead to less solutions being accepted.
Positive values will initialise the class for minising, negative values
will intialise the function for maximising.
The default value is 1.
multiplier : int or float, optional
Will be multiplied with the whole probability. Must be positive.
Should be in the interval ]0,1]. This multiplier can be used to
decrease the odds of values being accepted. While it is possible to use
a multiplier greater than 1, this might cause weird behaviour.
The default value is 1.
Attributes
----------
_diff_multiplier : int or float
The delta_value will be multiplied by this multiplier.
_multiplier : int or float
Is multiplied with the whole probability.
Examples
--------
A simple example of the default behaviour, delta (200) and
temperature (1000) are arbitrarily chosen, they have little meaning in
this example:
.. doctest::
>>> import random
>>> from lclpy.localsearch.acceptance.simulated_annealing_acceptance_function \
... import SimulatedAnnealingAcceptanceFunction
... # set seed of random
... # this isn't needed, it's only used to make sure the results will
... # always be the same.
>>> random.seed(2)
... # init
>>> test = SimulatedAnnealingAcceptanceFunction()
... # tests
>>> test.accept(200, 1000)
False
>>> test.accept(200, 1000)
False
>>> test.accept(200, 1000)
True
>>> test.accept(200, 1000)
True
>>> test.accept(200, 1000)
False | 625990567d847024c075d927 |
class ServicesRecordFile: <NEW_LINE> <INDENT> def __init__(self, filename): <NEW_LINE> <INDENT> self.servicesfile = filename <NEW_LINE> <DEDENT> def write(self, services): <NEW_LINE> <INDENT> f=open(self.servicesfile, 'w') <NEW_LINE> f.write("<services>") <NEW_LINE> for service in services: <NEW_LINE> <INDENT> f.write('<service>' + xml.sax.saxutils.escape(service.name) + '</service>'); <NEW_LINE> <DEDENT> f.write("</services>") <NEW_LINE> f.close() <NEW_LINE> <DEDENT> def read(self): <NEW_LINE> <INDENT> parser = xml.sax.make_parser() <NEW_LINE> parser.setFeature(xml.sax.handler.feature_namespaces, 0) <NEW_LINE> handler = _ServicesRecordFileParser() <NEW_LINE> parser.setContentHandler(handler) <NEW_LINE> parser.parse(self.servicesfile) <NEW_LINE> return handler.services | a snap service record file, contains list of services configured, to restore | 625990563539df3088ecd7f3 |
class UpdateNetworkProfile(neutronV20.UpdateCommand): <NEW_LINE> <INDENT> resource = RESOURCE <NEW_LINE> def add_known_arguments(self, parser): <NEW_LINE> <INDENT> parser.add_argument("--remove-tenant", action='append', dest='remove_tenants', help=_("Remove tenant from the network profile. " "You can repeat this option.")) <NEW_LINE> parser.add_argument("--add-tenant", action='append', dest='add_tenants', help=_("Add tenant to the network profile. " "You can repeat this option.")) <NEW_LINE> <DEDENT> def args2body(self, parsed_args): <NEW_LINE> <INDENT> body = {} <NEW_LINE> neutronV20.update_dict(parsed_args, body, ['remove_tenants', 'add_tenants']) <NEW_LINE> return {'network_profile': body} | Update network profile's information. | 6259905671ff763f4b5e8cfd |
class Elevation(IntEnum): <NEW_LINE> <INDENT> ALL, BOT_MODERATORS, GUILD_OWNERS, BOT_OWNERS = range(4) | Basic permission elevation levels. | 62599056d53ae8145f9199b1 |
class Timeout(Publisher, DirectPublisherMixin): <NEW_LINE> <INDENT> def __init__(self, scheduler, timeout_thunk): <NEW_LINE> <INDENT> super().__init__() <NEW_LINE> self.scheduler = scheduler <NEW_LINE> self.timeout_thunk = timeout_thunk <NEW_LINE> self.cancel = None <NEW_LINE> <DEDENT> def start(self, interval): <NEW_LINE> <INDENT> if self.cancel: <NEW_LINE> <INDENT> self.cancel() <NEW_LINE> <DEDENT> self.cancel = self.scheduler.schedule_later_one_time(self, interval) <NEW_LINE> <DEDENT> def clear(self): <NEW_LINE> <INDENT> if self.cancel: <NEW_LINE> <INDENT> self.cancel() <NEW_LINE> self.cancel = None <NEW_LINE> <DEDENT> <DEDENT> def _observe(self): <NEW_LINE> <INDENT> self.cancel = None <NEW_LINE> self._dispatch_next(self.timeout_thunk()) <NEW_LINE> return True | A publisher that can shedule timeouts for itself. When a
timeout occurs, an event is published on the default topic.
The timeout_thunk is called to get the actual event. | 6259905655399d3f05627a6d |
class ConsoleTerminateEvent(ConsoleEvent): <NEW_LINE> <INDENT> def __init__(self, command: Optional["Command"], io: IO, exit_code: int) -> None: <NEW_LINE> <INDENT> super().__init__(command, io) <NEW_LINE> self._exit_code = exit_code <NEW_LINE> <DEDENT> @property <NEW_LINE> def exit_code(self) -> int: <NEW_LINE> <INDENT> return self._exit_code <NEW_LINE> <DEDENT> def set_exit_code(self, exit_code: int) -> None: <NEW_LINE> <INDENT> self._exit_code = exit_code | An event triggered by after the execution of a command. | 62599056a17c0f6771d5d648 |
class TransactionsMixin(object): <NEW_LINE> <INDENT> pass | Redis Transaction Commands Mixin | 625990564e4d562566373956 |
class ToTensorTransform(BaseTransform): <NEW_LINE> <INDENT> def __init__(self): <NEW_LINE> <INDENT> super(ToTensorTransform, self).__init__() <NEW_LINE> self.name = "To-Tensor" <NEW_LINE> if torch.cuda.is_available(): <NEW_LINE> <INDENT> self.device = torch.device('cuda:1') <NEW_LINE> <DEDENT> else: <NEW_LINE> <INDENT> self.device = torch.device('cpu') <NEW_LINE> <DEDENT> <DEDENT> def __call__(self, train_list, rest_list, **kwargs): <NEW_LINE> <INDENT> def to_tensor(data): <NEW_LINE> <INDENT> x, y = data.x, data.y <NEW_LINE> x = torch.from_numpy(x).float().to(self.device) <NEW_LINE> y = torch.from_numpy(np.array([y])).long().to(self.device) <NEW_LINE> return RawData.create_from_ref(data, x=x, y=y) <NEW_LINE> <DEDENT> return self.transform(to_tensor, train_list, rest_list) <NEW_LINE> <DEDENT> def __repr__(self): <NEW_LINE> <INDENT> return '{}()'.format(self.__class__.__name__) | Transforms data to pytorch-tensors. | 6259905615baa723494634e1 |
class InvenioWebSubmitAdminWarningNoUpdate(Exception): <NEW_LINE> <INDENT> pass | Exception used when a no update was made as a result of an action | 62599056a219f33f346c7d53 |
class VOCSegmentation(Dataset): <NEW_LINE> <INDENT> NUM_CLASSES = 21 <NEW_LINE> def __init__(self, args, base_dir='datasets/VOCdevkit/VOC2012/', split='train'): <NEW_LINE> <INDENT> super(VOCSegmentation, self).__init__() <NEW_LINE> self.base_dir = base_dir <NEW_LINE> self.image_dir = os.path.join(self.base_dir, 'JPEGImages') <NEW_LINE> self.cat_dir = os.path.join(self.base_dir, 'SegmentationClass') <NEW_LINE> self.args = args <NEW_LINE> splits_dir = os.path.join(self.base_dir, 'ImageSets', 'Segmentation') <NEW_LINE> self.split = [split] <NEW_LINE> self.im_ids = [] <NEW_LINE> self.images = [] <NEW_LINE> self.categories = [] <NEW_LINE> for splt in self.split: <NEW_LINE> <INDENT> with open(os.path.join(os.path.join(splits_dir, splt + '.txt')), "r") as f: <NEW_LINE> <INDENT> lines = f.read().splitlines() <NEW_LINE> <DEDENT> for ii, line in enumerate(lines): <NEW_LINE> <INDENT> image = os.path.join(self.image_dir, line + ".jpg") <NEW_LINE> cat = os.path.join(self.cat_dir, line + ".png") <NEW_LINE> assert os.path.isfile(image) <NEW_LINE> assert os.path.isfile(cat) <NEW_LINE> self.im_ids.append(line) <NEW_LINE> self.images.append(image) <NEW_LINE> self.categories.append(cat) <NEW_LINE> <DEDENT> <DEDENT> assert (len(self.images) == len(self.categories)) <NEW_LINE> print('Number of images in {}: {:d}'.format(split, len(self.images))) <NEW_LINE> <DEDENT> def __len__(self): <NEW_LINE> <INDENT> return len(self.images) <NEW_LINE> <DEDENT> def __getitem__(self, index): <NEW_LINE> <INDENT> image = Image.open(self.images[index]).convert('RGB') <NEW_LINE> label = Image.open(self.categories[index]) <NEW_LINE> sample = {'image': image, 'label': label} <NEW_LINE> for split in self.split: <NEW_LINE> <INDENT> if split == "train": <NEW_LINE> <INDENT> return self.transform_train(sample) <NEW_LINE> <DEDENT> elif split == 'val': <NEW_LINE> <INDENT> return self.transform_val(sample) <NEW_LINE> <DEDENT> <DEDENT> <DEDENT> def transform_train(self, sample): <NEW_LINE> <INDENT> composed_transforms = transforms.Compose([ tr.RandomHorizontalFlip(), tr.RandomSizedCrop(self.args.crop_size), tr.ToTensor(), tr.Normalize(mean=(0.485, 0.456, 0.406), std=(0.229, 0.224, 0.225))]) <NEW_LINE> sample = composed_transforms(sample) <NEW_LINE> return sample <NEW_LINE> <DEDENT> def transform_val(self, sample): <NEW_LINE> <INDENT> composed_transforms = transforms.Compose([ tr.CenterCrop(self.args.crop_size), tr.ToTensor(), tr.Normalize(mean=(0.485, 0.456, 0.406), std=(0.229, 0.224, 0.225))]) <NEW_LINE> return composed_transforms(sample) <NEW_LINE> <DEDENT> def __str__(self): <NEW_LINE> <INDENT> return 'VOC2012(split=' + str(self.split) + ')' | PascalVoc dataset | 625990560a50d4780f706866 |
class UserOwnContributedProjectAPIView(ProjectAPIView): <NEW_LINE> <INDENT> serializer_class = ProjectInlineUserSerializer <NEW_LINE> permission_classes = [permissions.IsAuthenticated] <NEW_LINE> search_fields = ('name',) <NEW_LINE> ordering_fields = ('name', 'project_type', 'timestamp') <NEW_LINE> filter_fields = ('project_type', 'status') <NEW_LINE> def get_queryset(self, *args, **kwargs): <NEW_LINE> <INDENT> user_id = self.request.user.id <NEW_LINE> if user_id is None: <NEW_LINE> <INDENT> return Project.objects.none() <NEW_LINE> <DEDENT> user = User.objects.get(id=user_id) <NEW_LINE> projects = user.contributed_projects.filter(status__in=['answering', 'checking', 'completed']) <NEW_LINE> return projects <NEW_LINE> <DEDENT> def post(self, request, *args, **kwargs): <NEW_LINE> <INDENT> return Response({"detail": "Not allowed here"}, status=400) | get:
【参与任务】 获取当前用户参与的任务列表
post:
没有post方法 | 62599056d6c5a102081e366d |
class AbstractSymbol: <NEW_LINE> <INDENT> def __init__(self, id, project): <NEW_LINE> <INDENT> self.id = id <NEW_LINE> self.project = project <NEW_LINE> <DEDENT> def __hash__(self): <NEW_LINE> <INDENT> return self.id <NEW_LINE> <DEDENT> def __eq__(self, other): <NEW_LINE> <INDENT> return self.id == other.id | Base class for all other classes in this file. | 6259905682261d6c52730971 |
@dataclass <NEW_LINE> class NodeShallowing(NodeMove): <NEW_LINE> <INDENT> _inherited_slots: ClassVar[List[str]] = [] <NEW_LINE> class_class_uri: ClassVar[URIRef] = KGCL.NodeShallowing <NEW_LINE> class_class_curie: ClassVar[str] = "kgcl:NodeShallowing" <NEW_LINE> class_name: ClassVar[str] = "node shallowing" <NEW_LINE> class_model_uri: ClassVar[URIRef] = KGCL.NodeShallowing <NEW_LINE> id: Union[str, NodeShallowingId] = None <NEW_LINE> change_description: Optional[str] = None <NEW_LINE> def __post_init__(self, *_: List[str], **kwargs: Dict[str, Any]): <NEW_LINE> <INDENT> if self._is_empty(self.id): <NEW_LINE> <INDENT> self.MissingRequiredField("id") <NEW_LINE> <DEDENT> if not isinstance(self.id, NodeShallowingId): <NEW_LINE> <INDENT> self.id = NodeShallowingId(self.id) <NEW_LINE> <DEDENT> if self.change_description is not None and not isinstance( self.change_description, str ): <NEW_LINE> <INDENT> self.change_description = str(self.change_description) <NEW_LINE> <DEDENT> super().__post_init__(**kwargs) | The opposite of node deepening. | 625990568e7ae83300eea5dd |
class AuthorRegistration(APIView): <NEW_LINE> <INDENT> parser_classes = (MultiPartParser, FormParser, JSONParser) <NEW_LINE> def post(self, request): <NEW_LINE> <INDENT> serializer = RegistrationSerializer(data = request.DATA) <NEW_LINE> if serializer.is_valid(raise_exception = True): <NEW_LINE> <INDENT> author = serializer.create(serializer.validated_data) <NEW_LINE> token, created = Token.objects.get_or_create(user=author.user) <NEW_LINE> serializer = AuthorSerializer(author, context={'request': request}) <NEW_LINE> return Response(status=status.HTTP_201_CREATED) <NEW_LINE> <DEDENT> else: <NEW_LINE> <INDENT> return Response(serializer._errors, status=status.HTTP_400_BAD_REQUEST) | Takes incoming JSON, validates it and builds a Author/User Model | 62599056e5267d203ee6ce3e |
@implementer(IActivity) <NEW_LINE> class Activity(Item): <NEW_LINE> <INDENT> pass | Item Subclass for Activity
| 62599056009cb60464d02a84 |
class QuantumLinuxBridgeVIFDriver(vif.VIFDriver): <NEW_LINE> <INDENT> def get_bridge_name(self, network_id): <NEW_LINE> <INDENT> return ("brq" + network_id)[:LINUX_DEV_LEN] <NEW_LINE> <DEDENT> def get_dev_name(self, iface_id): <NEW_LINE> <INDENT> return ("tap" + iface_id)[:LINUX_DEV_LEN] <NEW_LINE> <DEDENT> def plug(self, instance, vif): <NEW_LINE> <INDENT> network, mapping = vif <NEW_LINE> iface_id = mapping['vif_uuid'] <NEW_LINE> dev = self.get_dev_name(iface_id) <NEW_LINE> bridge = self.get_bridge_name(network['id']) <NEW_LINE> linux_net.LinuxBridgeInterfaceDriver.ensure_bridge(bridge, None, filtering=False) <NEW_LINE> conf = vconfig.LibvirtConfigGuestInterface() <NEW_LINE> conf.target_dev = dev <NEW_LINE> conf.net_type = "bridge" <NEW_LINE> conf.mac_addr = mapping['mac'] <NEW_LINE> conf.source_dev = bridge <NEW_LINE> if CONF.libvirt_use_virtio_for_bridges: <NEW_LINE> <INDENT> conf.model = "virtio" <NEW_LINE> <DEDENT> return conf <NEW_LINE> <DEDENT> def unplug(self, instance, vif): <NEW_LINE> <INDENT> pass | VIF driver for Linux Bridge when running Quantum. | 6259905699cbb53fe683242f |
class Migration(migrations.Migration): <NEW_LINE> <INDENT> dependencies = [ migrations.swappable_dependency(settings.AUTH_USER_MODEL), ('ws', '0035_bump_car_year_validator'), ] <NEW_LINE> operations = [ migrations.AddField( model_name='participant', name='temp_user', field=models.ForeignKey( default=None, null=True, on_delete=django.db.models.deletion.CASCADE, to=settings.AUTH_USER_MODEL, ), ), migrations.RunPython( copy_user_id, reverse_code=do_nothing, ), migrations.AlterField( model_name='participant', name='temp_user', field=models.OneToOneField( on_delete=django.db.models.deletion.CASCADE, to=settings.AUTH_USER_MODEL ), ), migrations.RemoveField( model_name='participant', name='user_id', ), migrations.RenameField( model_name='participant', old_name='temp_user', new_name='user', ), ] | An lousy migration which is just working around Django's ORM.
The whole point of this migration is to add a FK constraint to `user_id`.
In raw SQL, it would be as simple as adding the constraint, since the
column is already not nullable. However, I cannot find a way to get Django
to recognize that `user_id` (an IntegerField) can become a ForeignKey.
The approach below is not great. I could use raw SQL to get the job done,
but Django's `makemigrations` will still think there are changes that
need to be applied. This lets me avoid using `--fake` trickery.
This just takes advantage of the fact that the participants table is
quite small (~6,000), and we don't get a ton of new signups. | 6259905601c39578d7f141df |
class IconWidget(QWidget): <NEW_LINE> <INDENT> def __init__(self, parent=None, icon=QIcon(), iconSize=QSize(), **kwargs): <NEW_LINE> <INDENT> sizePolicy = kwargs.pop("sizePolicy", QSizePolicy(QSizePolicy.Fixed, QSizePolicy.Fixed)) <NEW_LINE> super().__init__(parent, **kwargs) <NEW_LINE> self.__icon = QIcon(icon) <NEW_LINE> self.__iconSize = QSize(iconSize) <NEW_LINE> self.setSizePolicy(sizePolicy) <NEW_LINE> <DEDENT> def setIcon(self, icon): <NEW_LINE> <INDENT> if self.__icon != icon: <NEW_LINE> <INDENT> self.__icon = QIcon(icon) <NEW_LINE> self.updateGeometry() <NEW_LINE> self.update() <NEW_LINE> <DEDENT> <DEDENT> def icon(self): <NEW_LINE> <INDENT> return QIcon(self.__icon) <NEW_LINE> <DEDENT> def iconSize(self): <NEW_LINE> <INDENT> if not self.__iconSize.isValid(): <NEW_LINE> <INDENT> size = self.style().pixelMetric(QStyle.PM_ButtonIconSize) <NEW_LINE> return QSize(size, size) <NEW_LINE> <DEDENT> else: <NEW_LINE> <INDENT> return QSize(self.__iconSize) <NEW_LINE> <DEDENT> <DEDENT> def setIconSize(self, iconSize): <NEW_LINE> <INDENT> if self.__iconSize != iconSize: <NEW_LINE> <INDENT> self.__iconSize = QSize(iconSize) <NEW_LINE> self.updateGeometry() <NEW_LINE> self.update() <NEW_LINE> <DEDENT> <DEDENT> def sizeHint(self): <NEW_LINE> <INDENT> sh = self.iconSize() <NEW_LINE> m = self.contentsMargins() <NEW_LINE> return QSize(sh.width() + m.left() + m.right(), sh.height() + m.top() + m.bottom()) <NEW_LINE> <DEDENT> def paintEvent(self, event): <NEW_LINE> <INDENT> painter = QStylePainter(self) <NEW_LINE> opt = QStyleOption() <NEW_LINE> opt.initFrom(self) <NEW_LINE> painter.drawPrimitive(QStyle.PE_Widget, opt) <NEW_LINE> if not self.__icon.isNull(): <NEW_LINE> <INDENT> rect = self.contentsRect() <NEW_LINE> if opt.state & QStyle.State_Active: <NEW_LINE> <INDENT> mode = QIcon.Active <NEW_LINE> <DEDENT> else: <NEW_LINE> <INDENT> mode = QIcon.Disabled <NEW_LINE> <DEDENT> self.__icon.paint(painter, rect, Qt.AlignCenter, mode, QIcon.Off) <NEW_LINE> <DEDENT> painter.end() | A widget displaying an `QIcon` | 62599056be8e80087fbc05d2 |
class DescribeWeeklyReportNonlocalLoginPlacesResponse(AbstractModel): <NEW_LINE> <INDENT> def __init__(self): <NEW_LINE> <INDENT> self.WeeklyReportNonlocalLoginPlaces = None <NEW_LINE> self.TotalCount = None <NEW_LINE> self.RequestId = None <NEW_LINE> <DEDENT> def _deserialize(self, params): <NEW_LINE> <INDENT> if params.get("WeeklyReportNonlocalLoginPlaces") is not None: <NEW_LINE> <INDENT> self.WeeklyReportNonlocalLoginPlaces = [] <NEW_LINE> for item in params.get("WeeklyReportNonlocalLoginPlaces"): <NEW_LINE> <INDENT> obj = WeeklyReportNonlocalLoginPlace() <NEW_LINE> obj._deserialize(item) <NEW_LINE> self.WeeklyReportNonlocalLoginPlaces.append(obj) <NEW_LINE> <DEDENT> <DEDENT> self.TotalCount = params.get("TotalCount") <NEW_LINE> self.RequestId = params.get("RequestId") | DescribeWeeklyReportNonlocalLoginPlaces返回参数结构体
| 62599056a17c0f6771d5d649 |
class MultiGraphConvLayer(nn.Module): <NEW_LINE> <INDENT> def __init__(self, mem_dim, layers, heads, dropout): <NEW_LINE> <INDENT> super(MultiGraphConvLayer, self).__init__() <NEW_LINE> self.mem_dim = mem_dim <NEW_LINE> self.layers = layers <NEW_LINE> self.head_dim = self.mem_dim // self.layers <NEW_LINE> self.heads = heads <NEW_LINE> self.gcn_drop = nn.Dropout(dropout) <NEW_LINE> self.Linear = nn.Linear(self.mem_dim * self.heads, self.mem_dim) <NEW_LINE> self.weight_list = nn.ModuleList() <NEW_LINE> for i in range(self.heads): <NEW_LINE> <INDENT> for j in range(self.layers): <NEW_LINE> <INDENT> self.weight_list.append(nn.Linear(self.mem_dim + self.head_dim * j, self.head_dim)) <NEW_LINE> <DEDENT> <DEDENT> self.weight_list = self.weight_list.cuda() <NEW_LINE> self.Linear = self.Linear.cuda() <NEW_LINE> <DEDENT> def forward(self, adj_list, gcn_inputs): <NEW_LINE> <INDENT> multi_head_list = [] <NEW_LINE> for i in range(self.heads): <NEW_LINE> <INDENT> adj = adj_list[i] <NEW_LINE> denom = adj.sum(2).unsqueeze(2) + 1 <NEW_LINE> outputs = gcn_inputs <NEW_LINE> cache_list = [outputs] <NEW_LINE> output_list = [] <NEW_LINE> for l in range(self.layers): <NEW_LINE> <INDENT> index = i * self.layers + l <NEW_LINE> Ax = adj.bmm(outputs) <NEW_LINE> AxW = self.weight_list[index](Ax) <NEW_LINE> AxW = AxW + self.weight_list[index](outputs) <NEW_LINE> AxW = AxW / denom <NEW_LINE> gAxW = F.relu(AxW) <NEW_LINE> cache_list.append(gAxW) <NEW_LINE> outputs = torch.cat(cache_list, dim=2) <NEW_LINE> output_list.append(self.gcn_drop(gAxW)) <NEW_LINE> <DEDENT> gcn_ouputs = torch.cat(output_list, dim=2) <NEW_LINE> gcn_ouputs = gcn_ouputs + gcn_inputs <NEW_LINE> multi_head_list.append(gcn_ouputs) <NEW_LINE> <DEDENT> final_output = torch.cat(multi_head_list, dim=2) <NEW_LINE> out = self.Linear(final_output) <NEW_LINE> return out | A GCN module operated on dependency graphs. | 6259905694891a1f408ba19e |
class TestV1Job(unittest.TestCase): <NEW_LINE> <INDENT> def setUp(self): <NEW_LINE> <INDENT> pass <NEW_LINE> <DEDENT> def tearDown(self): <NEW_LINE> <INDENT> pass <NEW_LINE> <DEDENT> def testV1Job(self): <NEW_LINE> <INDENT> pass | V1Job unit test stubs | 625990560fa83653e46f6435 |
class NumericScalarArg(Argument): <NEW_LINE> <INDENT> def get_arg(self): <NEW_LINE> <INDENT> if isinstance(self.default, Missing): <NEW_LINE> <INDENT> if self.required is True: <NEW_LINE> <INDENT> arg = ' = '.join([self.new_name_converted, '0']) <NEW_LINE> <DEDENT> else: <NEW_LINE> <INDENT> arg = ' = '.join([self.new_name_converted, str(None)]) <NEW_LINE> <DEDENT> <DEDENT> else: <NEW_LINE> <INDENT> arg = ' = '.join([self.new_name_converted, str(self.default)]) <NEW_LINE> <DEDENT> return arg <NEW_LINE> <DEDENT> def get_body(self): <NEW_LINE> <INDENT> template = "if {name_converted} is not None:\n {inout}['{" "name}'] = try_set(\nobj={name_converted},\n" "none_acceptable={none_acceptable},\n" "is_of_type=numbers.Real" <NEW_LINE> body = template.format( inout=self.inout, name=self.name, name_converted=self.name_converted, none_acceptable=not self.required) <NEW_LINE> if not isinstance(self.range, Missing): <NEW_LINE> <INDENT> sorted_range = json.dumps( self.range, sort_keys=True).replace( '"', "'") <NEW_LINE> range_check = ", valid_range={0}".format(sorted_range) <NEW_LINE> <DEDENT> else: <NEW_LINE> <INDENT> range_check = "" <NEW_LINE> <DEDENT> return body + range_check + ")" <NEW_LINE> <DEDENT> @property <NEW_LINE> def type_python(self): <NEW_LINE> <INDENT> return "numbers.Real" | argument of numeric scalar type | 6259905663b5f9789fe866c2 |
class DirectoryError(Exception): <NEW_LINE> <INDENT> def __init__(self, message, directory): <NEW_LINE> <INDENT> self._message = message <NEW_LINE> self._directory = directory <NEW_LINE> <DEDENT> @property <NEW_LINE> def message(self): <NEW_LINE> <INDENT> return self._message <NEW_LINE> <DEDENT> @property <NEW_LINE> def directory(self): <NEW_LINE> <INDENT> return self._directory | This error is thrown when the upload_status module cannot access a directory | 62599056435de62698e9d353 |
class Negative(Function): <NEW_LINE> <INDENT> def forward(self, x): <NEW_LINE> <INDENT> x = self._convert2tensor(x) <NEW_LINE> self.x = x <NEW_LINE> if isinstance(self.x, Constant): <NEW_LINE> <INDENT> return Constant(-x.value) <NEW_LINE> <DEDENT> return Tensor(-x.value, function=self) <NEW_LINE> <DEDENT> def backward(self, delta): <NEW_LINE> <INDENT> dx = -delta <NEW_LINE> self.x.backward(dx) | element-wise negative
y = -x | 625990568e71fb1e983bd019 |
class AnagraficaStatiArticoliFilter(AnagraficaFilter): <NEW_LINE> <INDENT> def __init__(self, anagrafica): <NEW_LINE> <INDENT> AnagraficaFilter.__init__(self, anagrafica, 'anagrafica_stati_articoli_filter_table') <NEW_LINE> self._widgetFirstFocus = self.denominazione_filter_entry <NEW_LINE> <DEDENT> def draw(self): <NEW_LINE> <INDENT> treeview = self._anagrafica.anagrafica_filter_treeview <NEW_LINE> renderer = gtk.CellRendererText() <NEW_LINE> column = gtk.TreeViewColumn('Descrizione', renderer, text=1) <NEW_LINE> column.set_sizing(GTK_COLUMN_GROWN_ONLY) <NEW_LINE> column.set_clickable(True) <NEW_LINE> column.connect("clicked", self._changeOrderBy, 'denominazione') <NEW_LINE> column.set_resizable(True) <NEW_LINE> column.set_expand(False) <NEW_LINE> treeview.append_column(column) <NEW_LINE> treeview.set_search_column(1) <NEW_LINE> self.refresh() <NEW_LINE> <DEDENT> def clear(self): <NEW_LINE> <INDENT> self.denominazione_filter_entry.set_text('') <NEW_LINE> self.refresh() <NEW_LINE> <DEDENT> def refresh(self): <NEW_LINE> <INDENT> denominazione = prepareFilterString(self.denominazione_filter_entry.get_text()) <NEW_LINE> self.numRecords = StatoArticolo().count(denominazione=denominazione) <NEW_LINE> self._refreshPageCount() <NEW_LINE> stas = StatoArticolo().select(denominazione=denominazione, orderBy = self.orderBy, offset = self.offset, batchSize = self.batchSize) <NEW_LINE> model = gtk.ListStore(object, str) <NEW_LINE> for s in stas: <NEW_LINE> <INDENT> model.append((s, (s.denominazione or ''))) <NEW_LINE> <DEDENT> self._anagrafica.anagrafica_filter_treeview.set_model(model) | Filtro per la ricerca nell'anagrafica degli stati articolo | 62599056e64d504609df9e78 |
class ArticleViewSet(viewsets.ModelViewSet): <NEW_LINE> <INDENT> queryset = Article.objects.all() <NEW_LINE> serializer_class = ArticleSerializer <NEW_LINE> permission_classes = (TopicsAppPermission,) | API endpoint for articles | 6259905663d6d428bbee3d24 |
@dataclass(frozen=True) <NEW_LINE> class GetUpdates(Request): <NEW_LINE> <INDENT> offset: Optional[int] = None <NEW_LINE> limit: Optional[int] = None <NEW_LINE> timeout: Optional[int] = None <NEW_LINE> allowed_updates: Optional[List[UpdateType]] = None <NEW_LINE> def parse_result(self, data) -> List['api.Update']: <NEW_LINE> <INDENT> assert isinstance(data, list), "Should be list." <NEW_LINE> return [api.Update.parse(r) for r in data] <NEW_LINE> <DEDENT> async def send(self) -> List['api.Update']: <NEW_LINE> <INDENT> res = await context.bot.send(self) <NEW_LINE> return res.result | Represents GetUpdates request object:
https://core.telegram.org/bots/api#getupdates | 625990561f037a2d8b9e5314 |
class DdosCustomPolicy(Resource): <NEW_LINE> <INDENT> _validation = { 'name': {'readonly': True}, 'type': {'readonly': True}, 'etag': {'readonly': True}, 'resource_guid': {'readonly': True}, 'provisioning_state': {'readonly': True}, 'public_ip_addresses': {'readonly': True}, } <NEW_LINE> _attribute_map = { 'id': {'key': 'id', 'type': 'str'}, 'name': {'key': 'name', 'type': 'str'}, 'type': {'key': 'type', 'type': 'str'}, 'location': {'key': 'location', 'type': 'str'}, 'tags': {'key': 'tags', 'type': '{str}'}, 'etag': {'key': 'etag', 'type': 'str'}, 'resource_guid': {'key': 'properties.resourceGuid', 'type': 'str'}, 'provisioning_state': {'key': 'properties.provisioningState', 'type': 'str'}, 'public_ip_addresses': {'key': 'properties.publicIPAddresses', 'type': '[SubResource]'}, 'protocol_custom_settings': {'key': 'properties.protocolCustomSettings', 'type': '[ProtocolCustomSettingsFormat]'}, } <NEW_LINE> def __init__( self, *, id: Optional[str] = None, location: Optional[str] = None, tags: Optional[Dict[str, str]] = None, protocol_custom_settings: Optional[List["ProtocolCustomSettingsFormat"]] = None, **kwargs ): <NEW_LINE> <INDENT> super(DdosCustomPolicy, self).__init__(id=id, location=location, tags=tags, **kwargs) <NEW_LINE> self.etag = None <NEW_LINE> self.resource_guid = None <NEW_LINE> self.provisioning_state = None <NEW_LINE> self.public_ip_addresses = None <NEW_LINE> self.protocol_custom_settings = protocol_custom_settings | A DDoS custom policy in a resource group.
Variables are only populated by the server, and will be ignored when sending a request.
:param id: Resource ID.
:type id: str
:ivar name: Resource name.
:vartype name: str
:ivar type: Resource type.
:vartype type: str
:param location: Resource location.
:type location: str
:param tags: A set of tags. Resource tags.
:type tags: dict[str, str]
:ivar etag: A unique read-only string that changes whenever the resource is updated.
:vartype etag: str
:ivar resource_guid: The resource GUID property of the DDoS custom policy resource. It uniquely
identifies the resource, even if the user changes its name or migrate the resource across
subscriptions or resource groups.
:vartype resource_guid: str
:ivar provisioning_state: The provisioning state of the DDoS custom policy resource. Possible
values include: "Succeeded", "Updating", "Deleting", "Failed".
:vartype provisioning_state: str or ~azure.mgmt.network.v2020_06_01.models.ProvisioningState
:ivar public_ip_addresses: The list of public IPs associated with the DDoS custom policy
resource. This list is read-only.
:vartype public_ip_addresses: list[~azure.mgmt.network.v2020_06_01.models.SubResource]
:param protocol_custom_settings: The protocol-specific DDoS policy customization parameters.
:type protocol_custom_settings:
list[~azure.mgmt.network.v2020_06_01.models.ProtocolCustomSettingsFormat] | 6259905632920d7e50bc759b |
class ThiefBlueprint(Blueprint): <NEW_LINE> <INDENT> def __init__(self): <NEW_LINE> <INDENT> super().__init__("thief", 2, 20) <NEW_LINE> self.add_component(Health, 5) <NEW_LINE> self.add_component(Movable, 2) <NEW_LINE> self.add_component(Collecter, 2, 50, ResourceCategory.MINERAL) | Used to create a copy of a thief (Rebels collecter unit) | 62599056dc8b845886d54b16 |
class Answer(TranslationMixin): <NEW_LINE> <INDENT> tracker = FieldTracker() <NEW_LINE> question = models.ForeignKey(WorksheetQuestion) <NEW_LINE> sequence_number = models.IntegerField( help_text=_( 'Used to order the answers for a question into the correct ' 'sequence.'), blank=False, null=False, default=0, ) <NEW_LINE> is_correct = models.BooleanField( help_text=_('Is this answer correct?'), blank=False, null=False, ) <NEW_LINE> answer = models.CharField( help_text=_('Answer.'), blank=False, null=False, max_length=200, ) <NEW_LINE> answer_explanation = models.TextField( help_text=_('Answer explanation. Markdown is supported.'), blank=True, null=False, ) <NEW_LINE> class Meta: <NEW_LINE> <INDENT> app_label = 'lesson' <NEW_LINE> ordering = ['question', 'sequence_number'] <NEW_LINE> <DEDENT> def save(self, *args, **kwargs): <NEW_LINE> <INDENT> if not self.pk: <NEW_LINE> <INDENT> max_number = Answer.objects.all(). filter(question=self.question).aggregate( models.Max('sequence_number')) <NEW_LINE> max_number = max_number['sequence_number__max'] <NEW_LINE> if max_number is not None: <NEW_LINE> <INDENT> self.sequence_number = max_number + 1 <NEW_LINE> <DEDENT> <DEDENT> super(Answer, self).save(*args, **kwargs) <NEW_LINE> <DEDENT> def __unicode__(self): <NEW_LINE> <INDENT> return self.answer | Answer lesson model.
Each lesson can have zero or more questions associated with it. The
question will have one or more answers and the answer will have an
indication if it is a correct answer or not. One question could have
more than one correct answers (or no correct answers). | 62599056435de62698e9d354 |
class WindChillMinFormula(object): <NEW_LINE> <INDENT> def __init__(self, index): <NEW_LINE> <INDENT> self.index = index <NEW_LINE> <DEDENT> index = None <NEW_LINE> min_windchill = None <NEW_LINE> def append(self, sample): <NEW_LINE> <INDENT> value_temp = sample[self.index[0]] <NEW_LINE> value_wind = sample[self.index[1]] <NEW_LINE> if value_temp is not None and value_wind is not None : <NEW_LINE> <INDENT> sample_windchill = meteo.WindChill(value_temp, meteo.msToKmh(value_wind)) <NEW_LINE> if sample_windchill is not None: <NEW_LINE> <INDENT> if self.min_windchill is None or self.min_windchill > sample_windchill: <NEW_LINE> <INDENT> self.min_windchill = sample_windchill <NEW_LINE> <DEDENT> <DEDENT> <DEDENT> <DEDENT> def value(self): <NEW_LINE> <INDENT> return self.min_windchill | Minimum WindChill temperature. Requires temperature and wind speed in Km/h. | 62599056d7e4931a7ef3d5cf |
class MPPT(object): <NEW_LINE> <INDENT> def __init__(self, system, name): <NEW_LINE> <INDENT> self._algebs.extend(['pwa']) <NEW_LINE> self._fnamey.extend(['P_w^{opt}']) <NEW_LINE> <DEDENT> def init1(self, dae): <NEW_LINE> <INDENT> dae.y[self.pwa] = mmax(mmin(2 * dae.x[self.omega_m] - 1, 1), 0) <NEW_LINE> <DEDENT> def gcall(self, dae): <NEW_LINE> <INDENT> dae.g[self.pwa] = mmax(mmin(2 * dae.x[self.omega_m] - 1, 1), 0) - dae.y[self.pwa] <NEW_LINE> dae.hard_limit(self.pwa, 0, 1) <NEW_LINE> <DEDENT> def gycall(self, dae): <NEW_LINE> <INDENT> pass <NEW_LINE> <DEDENT> def jac0(self, dae): <NEW_LINE> <INDENT> dae.add_jac(Gy0, -1, self.pwa, self.pwa) <NEW_LINE> dae.add_jac(Gx0, 2, self.pwa, self.omega_m) <NEW_LINE> dae.add_jac(Gy0, 1e-6, self.pwa, self.pwa) | MPPT control algorithm | 625990564a966d76dd5f0442 |
class SearchClient(object): <NEW_LINE> <INDENT> @property <NEW_LINE> def supported_sizes(self): <NEW_LINE> <INDENT> return self._supported_sizes_map.keys() <NEW_LINE> <DEDENT> @property <NEW_LINE> def supported_styles(self): <NEW_LINE> <INDENT> return self._supported_styles_map.keys() <NEW_LINE> <DEDENT> def _size_to_native_size(self, size): <NEW_LINE> <INDENT> if size == '': <NEW_LINE> <INDENT> return size <NEW_LINE> <DEDENT> if size not in self._supported_sizes_map: <NEW_LINE> <INDENT> raise ValueError("Unsupported size '%s'" % size) <NEW_LINE> <DEDENT> else: <NEW_LINE> <INDENT> return self._supported_sizes_map[size] <NEW_LINE> <DEDENT> <DEDENT> def _style_to_native_style(self, style): <NEW_LINE> <INDENT> if style == '': <NEW_LINE> <INDENT> return style <NEW_LINE> <DEDENT> if style not in self._supported_styles_map: <NEW_LINE> <INDENT> raise ValueError("Unsupported style '%s'" % style) <NEW_LINE> <DEDENT> else: <NEW_LINE> <INDENT> return self._supported_styles_map[style] <NEW_LINE> <DEDENT> <DEDENT> def _fetch_results(self, query, num_results, aux_params={}, headers={}): <NEW_LINE> <INDENT> if self.async_query: <NEW_LINE> <INDENT> jobs = [gevent.spawn(self._fetch_results_from_offset, query, result_offset, aux_params=aux_params, headers=headers, num_results=num_results) for result_offset in range(0, num_results, self._results_per_req)] <NEW_LINE> gevent.joinall(jobs, timeout=self.timeout) <NEW_LINE> results = [] <NEW_LINE> for job in jobs: <NEW_LINE> <INDENT> if job.value: <NEW_LINE> <INDENT> results.extend(job.value) <NEW_LINE> <DEDENT> <DEDENT> <DEDENT> else: <NEW_LINE> <INDENT> results = [] <NEW_LINE> for result_offset in range(0, num_results, self._results_per_req): <NEW_LINE> <INDENT> results.extend(self._fetch_results_from_offset(query, result_offset, aux_params=aux_params, headers=headers, num_results=num_results)) <NEW_LINE> <DEDENT> <DEDENT> if not results: <NEW_LINE> <INDENT> raise QueryException("No image URLs could be retrieved") <NEW_LINE> <DEDENT> return results | Base class for all search clients, providing utility methods common
to all classes. Requires the subclass to define the following:
PROPERTIES:
+ _supported_sizes_map (Dict)
mapping of external supported sizes to internal (API) supported sizes
e.g. {'large': 'xxlarge', 'medium': 'xlarge|large|medium', 'small': 'small'}
+ _supported_styles_map (Dict)
mapping of external supported styles to internal (API) supported styles
e.g. {'photo': 'Photo', 'graphics': 'Graphics'}
+ _results_per_req (Integer)
maximum number of queries to make per request, normally defined by the API
+ async_query (Bool)
make queries asynchronously or not
+ timeout (Float)
timeout in seconds for HTTP requests
METHODS:
+ def _fetch_results_from_offset(self, query, result_offset,
aux_params={}, headers={},
num_results=-1)
this method should return a set of results given an offset from the
first result and a count of results to return | 62599056e5267d203ee6ce40 |
class SelectorDIC(ModelSelector): <NEW_LINE> <INDENT> def select(self): <NEW_LINE> <INDENT> warnings.filterwarnings("ignore", category=DeprecationWarning) <NEW_LINE> DIC_scores = [] <NEW_LINE> results = [] <NEW_LINE> antiRes = [] <NEW_LINE> try: <NEW_LINE> <INDENT> for n_component in self.n_components: <NEW_LINE> <INDENT> model = self.base_model(n_component) <NEW_LINE> logL = model.score(self.X, self.lengths) <NEW_LINE> antiLogL = np.mean( [ model.score(*self.hwords[word]) for word in self.words if word != self.this_word ] ) <NEW_LINE> DIC = logL - antiLogL <NEW_LINE> DIC_scores.append(DIC) <NEW_LINE> results.append(logL) <NEW_LINE> antiRes.append(antiLogL) <NEW_LINE> <DEDENT> <DEDENT> except Exception as e: <NEW_LINE> <INDENT> pass <NEW_LINE> <DEDENT> best_component = self.n_components[np.argmax(DIC_scores)] if DIC_scores else self.n_constant <NEW_LINE> return self.base_model(best_component) | select best model based on Discriminative Information Criterion
Biem, Alain. "A model selection criterion for classification: Application to hmm topology optimization."
Document Analysis and Recognition, 2003. Proceedings. Seventh International Conference on. IEEE, 2003.
http://citeseerx.ist.psu.edu/viewdoc/download?doi=10.1.1.58.6208&rep=rep1&type=pdf
https://pdfs.semanticscholar.org/ed3d/7c4a5f607201f3848d4c02dd9ba17c791fc2.pdf
DIC = log(P(X(i)) - 1/(M-1)SUM(log(P(X(all but i)) | 62599056097d151d1a2c25bd |
class BugFreeLoginOrCondition(unittest.TestCase): <NEW_LINE> <INDENT> def setUp(self): <NEW_LINE> <INDENT> self.executable_path = chrome_driver <NEW_LINE> self.driver = webdriver.Chrome(executable_path=self.executable_path) <NEW_LINE> self.url = "http://localhost/bugfree" <NEW_LINE> open_url(self.driver,self.url) <NEW_LINE> login_bugfree(self.driver,"admin","123456") <NEW_LINE> <DEDENT> def test_bugfree_bug1(self): <NEW_LINE> <INDENT> driver=self.driver <NEW_LINE> driver.find_element_by_id("CustomSetLink").click() <NEW_LINE> time.sleep(3) <NEW_LINE> get_screenshot_immediately(driver) <NEW_LINE> driver.find_element_by_xpath(".//*[@id='CustomSetTable']/tbody/tr[2]/td/input[1]").click() <NEW_LINE> <DEDENT> def test_bugfree_bug2(self): <NEW_LINE> <INDENT> driver = self.driver <NEW_LINE> driver.find_element_by_link_text("Case").click() <NEW_LINE> Select(driver.find_element_by_id("BugFreeQuery_andor1")).select_by_visible_text(u"或者") <NEW_LINE> self.driver.find_element_by_id("SaveQuery").click() <NEW_LINE> self.driver.find_element_by_id("dialogQueryTitle").send_keys("test_%s" % generate_random_num(1,99)) <NEW_LINE> time.sleep(2) <NEW_LINE> get_screenshot_immediately(driver) <NEW_LINE> self.driver.find_element_by_name("yt0").click() <NEW_LINE> driver.close() <NEW_LINE> <DEDENT> def test_bugfree_bug3(self): <NEW_LINE> <INDENT> driver=self.driver <NEW_LINE> driver.find_element_by_xpath(".//*[@id='SearchConditionRow0']/td[7]/a/img").click() <NEW_LINE> time.sleep(2) <NEW_LINE> get_screenshot_immediately(driver) <NEW_LINE> driver.find_element_by_xpath(".//*[@id='SearchConditionRow1']/td[7]/a[2]/img").click() <NEW_LINE> time.sleep(2) <NEW_LINE> get_screenshot_immediately(driver) <NEW_LINE> <DEDENT> def tearDown(self): <NEW_LINE> <INDENT> self.driver.quit() | 登录后查询或条件 | 6259905607f4c71912bb098c |
class RepresentationMaker(object): <NEW_LINE> <INDENT> def __init__(self, dw_conn, scope): <NEW_LINE> <INDENT> self.dw_conn = dw_conn <NEW_LINE> self.scope = scope <NEW_LINE> self.dim_reps = [] <NEW_LINE> self.fts_reps = [] <NEW_LINE> <DEDENT> def check_table_type(self, table, typelist): <NEW_LINE> <INDENT> for table_type in typelist: <NEW_LINE> <INDENT> if isinstance(table, table_type): <NEW_LINE> <INDENT> return True <NEW_LINE> <DEDENT> <DEDENT> return False <NEW_LINE> <DEDENT> def run(self): <NEW_LINE> <INDENT> pygrametl = self.scope['pygrametl'] <NEW_LINE> tables = pygrametl._alltables <NEW_LINE> for table in tables: <NEW_LINE> <INDENT> if self.check_table_type(table, DIM_CLASSES): <NEW_LINE> <INDENT> if isinstance(table, TypeOneSlowlyChangingDimension): <NEW_LINE> <INDENT> dim = SCDType1DimRepresentation(table, self.dw_conn) <NEW_LINE> <DEDENT> elif isinstance(table, SlowlyChangingDimension): <NEW_LINE> <INDENT> dim = SCDType2DimRepresentation(table, self.dw_conn) <NEW_LINE> <DEDENT> else: <NEW_LINE> <INDENT> dim = DimRepresentation(table, self.dw_conn) <NEW_LINE> <DEDENT> self.dim_reps.append(dim) <NEW_LINE> <DEDENT> elif self.check_table_type(table, FT_CLASSES): <NEW_LINE> <INDENT> ft = FTRepresentation(table, self.dw_conn) <NEW_LINE> self.fts_reps.append(ft) <NEW_LINE> <DEDENT> <DEDENT> snowflakes = [] <NEW_LINE> for x, value in self.scope.items(): <NEW_LINE> <INDENT> if isinstance(value, SnowflakedDimension): <NEW_LINE> <INDENT> snowflakes.append(value) <NEW_LINE> <DEDENT> <DEDENT> dw_rep = DWRepresentation(self.dim_reps, self.dw_conn, self.fts_reps, snowflakes) <NEW_LINE> pygrametl._alltables.clear() <NEW_LINE> return dw_rep | Creates a DWRepresentation object from an associated program scope | 62599056a17c0f6771d5d64a |
class BadObjectType(ODBError): <NEW_LINE> <INDENT> pass | The object had an unsupported type | 625990567047854f46340911 |
class Demo(ModuleMixin): <NEW_LINE> <INDENT> order = 1 <NEW_LINE> label = 'Introduction' <NEW_LINE> verbose_name = '我的桌面' <NEW_LINE> icon = '<i class="material-icons">account_balance</i>' <NEW_LINE> @property <NEW_LINE> def urls(self): <NEW_LINE> <INDENT> index_view = generic.TemplateView.as_view(template_name='demo/index.html') <NEW_LINE> return frontend.ModuleURLResolver( '^', [url('^$', index_view, name="index")], module=self, app_name='demo', namespace='demo') <NEW_LINE> <DEDENT> def index_url(self): <NEW_LINE> <INDENT> return '/' <NEW_LINE> <DEDENT> def installed(self): <NEW_LINE> <INDENT> return True | Home page module | 62599056460517430c432afa |
class home(mainHandler): <NEW_LINE> <INDENT> def get(self, **kwargs): <NEW_LINE> <INDENT> user = users.get_current_user() <NEW_LINE> if user: <NEW_LINE> <INDENT> username = user.nickname() <NEW_LINE> loginLink = users.create_login_url(self.request.uri) <NEW_LINE> logoutLink = users.create_logout_url(self.request.uri) <NEW_LINE> <DEDENT> else: <NEW_LINE> <INDENT> username = None <NEW_LINE> loginLink = users.create_login_url(self.request.uri) <NEW_LINE> logoutLink = "/" <NEW_LINE> <DEDENT> comments_n = recentComments() <NEW_LINE> self.renderPage("version0.html", username = username, loginLink = loginLink, logoutLink = logoutLink, comments = comments_n) <NEW_LINE> <DEDENT> def post(self, **kwargs): <NEW_LINE> <INDENT> name = str(self.request.get('dname')) <NEW_LINE> text = str(self.request.get('comment')) <NEW_LINE> if name and text: <NEW_LINE> <INDENT> c = Comment(text = text, user = name) <NEW_LINE> c.put() <NEW_LINE> sleep(0.1) <NEW_LINE> recentComments(True) <NEW_LINE> <DEDENT> self.redirect('/') | Handler class for home | 62599056d99f1b3c44d06bf2 |
class Stack(object): <NEW_LINE> <INDENT> def __init__(self, depth): <NEW_LINE> <INDENT> pass <NEW_LINE> <DEDENT> def push(self): <NEW_LINE> <INDENT> pass <NEW_LINE> <DEDENT> def pop(self): <NEW_LINE> <INDENT> pass <NEW_LINE> <DEDENT> def is_empty(self): <NEW_LINE> <INDENT> pass | FIFO | 6259905623e79379d538da4e |
class PhotoopException(PydlException): <NEW_LINE> <INDENT> pass | Exceptions raised by :mod:`pydl.photoop` that don't fit into a
standard exception class like :exc:`ValueError`. | 625990564428ac0f6e659a8c |
class _ExecutionContext(object): <NEW_LINE> <INDENT> def __init__( self, watermarks, keyed_states): <NEW_LINE> <INDENT> self.watermarks = watermarks <NEW_LINE> self.keyed_states = keyed_states <NEW_LINE> self._step_context = None <NEW_LINE> <DEDENT> def get_step_context(self): <NEW_LINE> <INDENT> if not self._step_context: <NEW_LINE> <INDENT> self._step_context = DirectStepContext(self.keyed_states) <NEW_LINE> <DEDENT> return self._step_context <NEW_LINE> <DEDENT> def reset(self): <NEW_LINE> <INDENT> self._step_context = None | Contains the context for the execution of a single PTransform.
It holds the watermarks for that transform, as well as keyed states. | 62599056097d151d1a2c25be |
class GeneralConfig(ro.ReadOnly): <NEW_LINE> <INDENT> RANDOM_SEED = 0 <NEW_LINE> LOGGING_LEVEL = logging.INFO | Container for general configurations. | 625990568e71fb1e983bd01c |
class SnapshotPatch(Model): <NEW_LINE> <INDENT> _attribute_map = { 'tags': {'key': 'tags', 'type': 'object'}, } <NEW_LINE> def __init__(self, **kwargs): <NEW_LINE> <INDENT> super(SnapshotPatch, self).__init__(**kwargs) <NEW_LINE> self.tags = kwargs.get('tags', None) | Snapshot patch.
:param tags: Resource tags
:type tags: object | 62599056b5575c28eb713775 |
class ReprMixin(SuperBase): <NEW_LINE> <INDENT> __slots__ = () <NEW_LINE> __repr_slots__ = () <NEW_LINE> @repr_compat <NEW_LINE> def __repr__(self): <NEW_LINE> <INDENT> attr_names = [] <NEW_LINE> attr_names_set = set() <NEW_LINE> for parent_class in self.__class__.__mro__: <NEW_LINE> <INDENT> if hasattr(parent_class, "__repr_slots__"): <NEW_LINE> <INDENT> attr_names.extend((attrname for attrname in parent_class.__repr_slots__ if attrname not in attr_names_set)) <NEW_LINE> attr_names_set.update(parent_class.__repr_slots__) <NEW_LINE> <DEDENT> elif hasattr(parent_class, "__slots__"): <NEW_LINE> <INDENT> attr_names.extend((attrname for attrname in parent_class.__slots__ if attrname not in attr_names_set)) <NEW_LINE> attr_names_set.update(parent_class.__slots__) <NEW_LINE> <DEDENT> <DEDENT> items = [] <NEW_LINE> for attrname in attr_names: <NEW_LINE> <INDENT> attr_value = self <NEW_LINE> try: <NEW_LINE> <INDENT> for subattr in attrname.split('.'): <NEW_LINE> <INDENT> attr_value = getattr(attr_value, subattr) <NEW_LINE> <DEDENT> <DEDENT> except AttributeError: <NEW_LINE> <INDENT> attr_value = ':unset:' <NEW_LINE> <DEDENT> items.append((attrname, repr(attr_value))) <NEW_LINE> <DEDENT> attr_listing = ', '.join(("%s=%s" % attr_descr for attr_descr in items)) <NEW_LINE> return "%s(%s)" % (self.__class__.__name__, attr_listing) | A :term:`mixin` class inherited in front of/instead of object which gives
any object a decent __repr__ function. The object can additionally define a list in one of
__slots__ or __repr_slots__ which gives attributes for this repr function to report.
If __repr_slots__ is given, it will take precedence over any __slots__. For extended inheritance
heirerarchies, the attributes are stacked, with superclass attributes listed before subclass
attributes.
.. attribute:: __repr_slots__
This is a list of the attribute names for the repr to display. One should be placed in
any subclass using this mixin that needs specific members displayed.
.. automethod:: __repr__ | 62599056b830903b9686ef27 |
class GsStatusLaunchMergeToolCommand(TextCommand, GitCommand): <NEW_LINE> <INDENT> def run(self, edit): <NEW_LINE> <INDENT> interface = ui.get_interface(self.view.id()) <NEW_LINE> valid_ranges = (interface.get_view_regions("unstaged_files") + interface.get_view_regions("untracked_files") + interface.get_view_regions("merge_conflicts") + interface.get_view_regions("staged_files")) <NEW_LINE> lines = util.view.get_lines_from_regions( self.view, self.view.sel(), valid_ranges=valid_ranges ) <NEW_LINE> file_paths = tuple(line[4:].strip() for line in lines if line) <NEW_LINE> if len(file_paths) > 1: <NEW_LINE> <INDENT> sublime.error_message("You can only launch merge tool for a single file at a time.") <NEW_LINE> return <NEW_LINE> <DEDENT> sublime.set_timeout_async(lambda: self.launch_tool_for_file(file_paths[0]), 0) | Launch external merge tool for selected file. | 62599056dd821e528d6da429 |
class RandomAllyCast(Component): <NEW_LINE> <INDENT> def get_targets(self, selected, caster, players, monsters): <NEW_LINE> <INDENT> if isinstance(type(caster), type(players[0])): <NEW_LINE> <INDENT> return [random.choice([player for player in players if not player.fallen])] <NEW_LINE> <DEDENT> return [random.choice([monster for monster in monsters if not monster.fallen])] | Returns a random ally for casting a move | 625990564a966d76dd5f0444 |
class PhonemeList(MutableSequence): <NEW_LINE> <INDENT> def __init__(self, blocks: Union[Phoneme, Iterable[Phoneme]]): <NEW_LINE> <INDENT> if isinstance(blocks, Phoneme): <NEW_LINE> <INDENT> self._pho_list = [blocks] <NEW_LINE> <DEDENT> elif isinstance(blocks, Iterable): <NEW_LINE> <INDENT> self._pho_list = list(blocks) <NEW_LINE> <DEDENT> else: <NEW_LINE> <INDENT> raise ValueError(f"Expecting a list of blocks or a phonemes, " f"got {str(type(blocks))}") <NEW_LINE> <DEDENT> <DEDENT> @classmethod <NEW_LINE> def from_pho_str(cls, pho_str_list: str): <NEW_LINE> <INDENT> return cls([Phoneme.from_str(pho_str) for pho_str in pho_str_list.split("\n") if pho_str.strip()]) <NEW_LINE> <DEDENT> def __len__(self) -> int: <NEW_LINE> <INDENT> return len(self._pho_list) <NEW_LINE> <DEDENT> def __delitem__(self, index: int): <NEW_LINE> <INDENT> del self._pho_list[index] <NEW_LINE> <DEDENT> def insert(self, index, value: Phoneme): <NEW_LINE> <INDENT> assert isinstance(value, Phoneme) <NEW_LINE> self._pho_list.insert(index, value) <NEW_LINE> <DEDENT> def append(self, value: Phoneme): <NEW_LINE> <INDENT> assert isinstance(value, Phoneme) <NEW_LINE> self._pho_list.append(value) <NEW_LINE> <DEDENT> def __setitem__(self, index: int, value: Phoneme): <NEW_LINE> <INDENT> assert isinstance(value, Phoneme) <NEW_LINE> self._pho_list[index] = value <NEW_LINE> <DEDENT> def __getitem__(self, index: int) -> Phoneme: <NEW_LINE> <INDENT> return self._pho_list[index] <NEW_LINE> <DEDENT> def __iter__(self) -> Iterable[Phoneme]: <NEW_LINE> <INDENT> return iter(self._pho_list) <NEW_LINE> <DEDENT> def __add__(self, other: 'PhonemeList'): <NEW_LINE> <INDENT> assert self.__class__ == other.__class__ <NEW_LINE> return PhonemeList(list(self._pho_list) + list(other._pho_list)) <NEW_LINE> <DEDENT> def __str__(self): <NEW_LINE> <INDENT> return "\n".join([str(phoneme) for phoneme in self]) <NEW_LINE> <DEDENT> @property <NEW_LINE> def phonemes_str(self): <NEW_LINE> <INDENT> return "".join([str(phoneme.name) for phoneme in self]) | A list of phonemes. Can be printed into a .pho string formatted file | 625990568e7ae83300eea5e1 |
class LSTM(nn.Module): <NEW_LINE> <INDENT> def __init__(self, params): <NEW_LINE> <INDENT> super(LSTM, self).__init__() <NEW_LINE> self.hidden_dim = params['nhid'] <NEW_LINE> self.n_layers = params['nlayers'] <NEW_LINE> self.batch = params['batch'] <NEW_LINE> self.seq = params['seq'] <NEW_LINE> self.dropout = params['dropout'] <NEW_LINE> alphabet_size = self.output_size = params['alphabet_size'] <NEW_LINE> self.lstm = nn.LSTM(alphabet_size, self.hidden_dim, self.n_layers, batch_first=True, dropout=self.dropout) <NEW_LINE> self.h2O = nn.Linear(self.hidden_dim, self.output_size) <NEW_LINE> self.hidden = self.init_hidden(params['type']) <NEW_LINE> <DEDENT> def init_hidden(self, type): <NEW_LINE> <INDENT> h_0 = Variable( torch.zeros(self.n_layers, self.batch, self.hidden_dim).type(type)) <NEW_LINE> c_0 = Variable( torch.zeros(self.n_layers, self.batch, self.hidden_dim).type(type)) <NEW_LINE> return h_0, c_0 <NEW_LINE> <DEDENT> def count_parameters(self): <NEW_LINE> <INDENT> return sum(p.numel() for p in self.parameters() if p.requires_grad) <NEW_LINE> <DEDENT> def forward(self, sequence): <NEW_LINE> <INDENT> out = sequence.contiguous().view(self.batch, self.seq, -1) <NEW_LINE> lstm_out, self.hidden = self.lstm(out, self.hidden) <NEW_LINE> out = self.h2O(lstm_out) <NEW_LINE> return out.view(-1, self.output_size) <NEW_LINE> <DEDENT> def gen_text(self, out, ix2char, nchars, t=0.5): <NEW_LINE> <INDENT> string = '' <NEW_LINE> self.eval() <NEW_LINE> while len(string) < nchars: <NEW_LINE> <INDENT> out = self(out) <NEW_LINE> _, idxs = out.max(1) <NEW_LINE> soft_out = F.softmax(out / t, dim=1) <NEW_LINE> p = soft_out.data.cpu().numpy() <NEW_LINE> for j in range(soft_out.size()[0]): <NEW_LINE> <INDENT> idxs[j] = np.random.choice(out.size()[1], p=p[j]) <NEW_LINE> string += ix2char[idxs[j].data[0]] <NEW_LINE> <DEDENT> <DEDENT> return string | LSTM neural network
Args:
params (dict): holds the program hyperparameters | 625990562ae34c7f260ac63a |
class Scope(object): <NEW_LINE> <INDENT> next_id = 0 <NEW_LINE> def __init__(self, name, parent=None): <NEW_LINE> <INDENT> self.id = Scope.next_id <NEW_LINE> Scope.next_id += 1 <NEW_LINE> self.name = name <NEW_LINE> self.parent = parent <NEW_LINE> self.children = [] <NEW_LINE> self.symbols = {} <NEW_LINE> self.typenames = set() <NEW_LINE> <DEDENT> @property <NEW_LINE> def qualified_name(self): <NEW_LINE> <INDENT> if self.parent is None: <NEW_LINE> <INDENT> return self.name <NEW_LINE> <DEDENT> return self.parent.qualified_name + '.' + self.name <NEW_LINE> <DEDENT> def defining_scope(self, name): <NEW_LINE> <INDENT> if name in self.symbols: <NEW_LINE> <INDENT> return self <NEW_LINE> <DEDENT> if self.parent: <NEW_LINE> <INDENT> return self.parent.defining_scope(name) <NEW_LINE> <DEDENT> return None <NEW_LINE> <DEDENT> def add(self, symbol): <NEW_LINE> <INDENT> if symbol in self.symbols: <NEW_LINE> <INDENT> self.symbols[symbol.name].append(symbol) <NEW_LINE> <DEDENT> else: <NEW_LINE> <INDENT> self.symbols[symbol.name] = [symbol] <NEW_LINE> <DEDENT> <DEDENT> def get(self, name, default=None): <NEW_LINE> <INDENT> if name in self.symbols: <NEW_LINE> <INDENT> return self[name] <NEW_LINE> <DEDENT> return default <NEW_LINE> <DEDENT> def getlist(self, name): <NEW_LINE> <INDENT> return self.symbols[name] <NEW_LINE> <DEDENT> def __iter__(self): <NEW_LINE> <INDENT> return iter(self.symbols) <NEW_LINE> <DEDENT> def __contains__(self, name): <NEW_LINE> <INDENT> return name in self.symbols <NEW_LINE> <DEDENT> def __getitem__(self, name): <NEW_LINE> <INDENT> return self.symbols[name][0] <NEW_LINE> <DEDENT> def __hash__(self): <NEW_LINE> <INDENT> return hash(self.id) <NEW_LINE> <DEDENT> def __eq__(self, other): <NEW_LINE> <INDENT> return self.id == other.id <NEW_LINE> <DEDENT> def __str__(self): <NEW_LINE> <INDENT> return self.qualified_name | A scope works like a dictionary of symbols, but that can store multiple
values for the same symbol name. This is necessary because some symbols
may be overloaded.
Internally, all values for a key are stored as a list, but the standard
dict access method (`__getitem__`) will only return the first value for a
key. To get the full list of all values for a symbol name, use the list
methods. | 6259905629b78933be26ab6e |
class King(Piece): <NEW_LINE> <INDENT> name = 'King' <NEW_LINE> value = 1000 <NEW_LINE> symbol = 'K' <NEW_LINE> max_moves = 1 <NEW_LINE> number = 1 <NEW_LINE> def __init__(self, colour=PLAIN): <NEW_LINE> <INDENT> super(King, self).__init__(colour=colour) <NEW_LINE> self.can_castle = True <NEW_LINE> <DEDENT> def possibleMoves(self, start): <NEW_LINE> <INDENT> row, col = position(start) <NEW_LINE> directions = [] <NEW_LINE> if row < 7: <NEW_LINE> <INDENT> if col > 0: <NEW_LINE> <INDENT> directions.append([squarify(row+1, col-1)]) <NEW_LINE> <DEDENT> directions.append([squarify(row+1, col)]) <NEW_LINE> if col < 7: <NEW_LINE> <INDENT> directions.append([squarify(row+1, col+1)]) <NEW_LINE> <DEDENT> <DEDENT> if col < 7: <NEW_LINE> <INDENT> directions.append([squarify(row, col+1)]) <NEW_LINE> <DEDENT> if row > 0: <NEW_LINE> <INDENT> if col < 7: <NEW_LINE> <INDENT> directions.append([squarify(row-1, col+1)]) <NEW_LINE> <DEDENT> directions.append([squarify(row-1, col)]) <NEW_LINE> if col > 0: <NEW_LINE> <INDENT> directions.append([squarify(row-1, col-1)]) <NEW_LINE> <DEDENT> <DEDENT> if col > 0: <NEW_LINE> <INDENT> directions.append([squarify(row, col-1)]) <NEW_LINE> <DEDENT> return directions <NEW_LINE> <DEDENT> def castlingMoves(self, start): <NEW_LINE> <INDENT> directions = [] <NEW_LINE> if self.can_castle: <NEW_LINE> <INDENT> if self.colour == WHITE and start == 'E1': <NEW_LINE> <INDENT> directions.append('C1') <NEW_LINE> directions.append('G1') <NEW_LINE> <DEDENT> elif self.colour == BLACK and start == 'E8': <NEW_LINE> <INDENT> directions.append('C8') <NEW_LINE> directions.append('G8') <NEW_LINE> <DEDENT> <DEDENT> return directions <NEW_LINE> <DEDENT> def afterMove(self, **kwargs): <NEW_LINE> <INDENT> self.can_castle = False | The King piece. Do not lose this one! ;-)
>>> piece = King(WHITE)
>>> str(piece)
'K'
>>> print piece
K
>>> start = 'E1'
>>> piece.possibleMoves(start)
[['D2'], ['E2'], ['F2'], ['F1'], ['D1']]
>>> piece.castlingMoves(start)
['C1', 'G1']
After the King has moved once, it should not have the castling
moves anymore.
>>> piece.afterMove()
>>> piece.castlingMoves(start)
[]
>>> start = 'E3'
>>> wanted = ('D4', 'E4', 'F4', 'F3', 'F2', 'E2', 'D2', 'D3')
>>> _wantedMovesPossible(piece, start, wanted)
True
>>> unwanted = ('E1', 'E5', 'G3')
>>> _unwantedMovesNotPossible(piece, start, unwanted)
True
>>> start = 'B1'
>>> piece.possibleMoves(start)
[['A2'], ['B2'], ['C2'], ['C1'], ['A1']] | 6259905607f4c71912bb098e |
@dataclass(frozen=True) <NEW_LINE> class Ping(Event): <NEW_LINE> <INDENT> payload: bytes = b"" <NEW_LINE> def response(self) -> "Pong": <NEW_LINE> <INDENT> return Pong(payload=self.payload) | The Ping event can be sent to trigger a ping frame and is fired
when a Ping is received.
**wsproto does not automatically send a pong response to a ping event.** To
comply with the RFC you MUST send a pong even as soon as is practical. The
:meth:`response` method provides a suitable event for this purpose.
Fields:
.. attribute:: payload
An optional payload to emit with the ping frame. | 62599056009cb60464d02a88 |
class AccountRetencionSoportada(ModelSQL, ModelView): <NEW_LINE> <INDENT> __name__ = 'account.retencion.soportada' <NEW_LINE> name = fields.Char('Number', required=True) <NEW_LINE> amount = fields.Numeric('Amount', digits=(16, 2), required=True) <NEW_LINE> date = fields.Date('Date', required=True) <NEW_LINE> tax = fields.Many2One('account.retencion', 'Tax', domain=[('type', '=', 'soportada')]) <NEW_LINE> voucher = fields.Many2One('account.voucher', 'Voucher') <NEW_LINE> party = fields.Many2One('party.party', 'Party') <NEW_LINE> @staticmethod <NEW_LINE> def default_amount(): <NEW_LINE> <INDENT> return Decimal('0.00') <NEW_LINE> <DEDENT> @staticmethod <NEW_LINE> def default_date(): <NEW_LINE> <INDENT> Date = Pool().get('ir.date') <NEW_LINE> return Date.today() | Account Retencion Soportada | 62599056be8e80087fbc05d6 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.