max_stars_count
int64
301
224k
text
stringlengths
6
1.05M
token_count
int64
3
727k
715
<filename>Mathematics/NChooseK/python/NChooseK.py def bin_coeff(n, k): ans = 1 if(k > (n-k)): k = n-k for i in range(k): ans *= (n - i) ans /= (i + 1) return ans
95
1,036
/* #undef AC_APPLE_UNIVERSAL_BUILD */ /* #undef CONFIG_BLKID_DEBUG */ #define CONFIG_BUILD_FINDFS 1 /* #undef CONFIG_DEVELOPER_FEATURES */ /* #undef CONFIG_JBD_DEBUG */ #define CONFIG_MMP 1 /* #undef CONFIG_TDB */ /* #undef CONFIG_TESTIO_DEBUG */ /* #undef DISABLE_BACKTRACE */ #define ENABLE_BMAP_STATS 1 /* #undef ENABLE_BMAP_STATS_OPS */ /* #undef ENABLE_NLS */ /* #undef HAVE_ADD_KEY */ /* #undef HAVE_ATTR_XATTR_H */ /* #undef HAVE_BACKTRACE */ /* #undef HAVE_BLKID_PROBE_ENABLE_PARTITIONS */ /* #undef HAVE_BLKID_PROBE_GET_TOPOLOGY */ /* #undef HAVE_BLKID_TOPOLOGY_GET_DAX */ /* #undef HAVE_CFLOCALECOPYPREFERREDLANGUAGES */ /* #undef HAVE_CFPREFERENCESCOPYAPPVALUE */ /* #undef HAVE_CHFLAGS */ /* #undef HAVE_DCGETTEXT */ #define HAVE_DIRENT_H 1 #define HAVE_DLOPEN 1 /* #undef HAVE_DOPRNT */ #define HAVE_ERRNO_H 1 /* #undef HAVE_EXECINFO_H */ #define HAVE_EXT2_IOCTLS 1 /* #undef HAVE_FADVISE64 */ #define HAVE_FALLOCATE 1 #define HAVE_FALLOCATE64 1 #define HAVE_FCHOWN 1 #define HAVE_FCNTL 1 #define HAVE_FDATASYNC 1 #define HAVE_FSTAT64 1 #define HAVE_FSYNC 1 #define HAVE_FTRUNCATE64 1 /* #undef HAVE_FUSE_H */ #define HAVE_FUTIMES 1 #define HAVE_GETCWD 1 #define HAVE_GETDTABLESIZE 1 #define HAVE_GETHOSTNAME 1 /* #undef HAVE_GETMNTINFO */ #define HAVE_GETOPT_H 1 #define HAVE_GETPWUID_R 1 #define HAVE_GETRLIMIT 1 #define HAVE_GETRUSAGE 1 /* #undef HAVE_GETTEXT */ /* #undef HAVE_ICONV */ #define HAVE_INTPTR_T 1 #define HAVE_INTTYPES_H 1 #define HAVE_JRAND48 1 /* #undef HAVE_KEYCTL */ #define HAVE_LINUX_FALLOC_H 1 #define HAVE_LINUX_FD_H 1 #define HAVE_LINUX_FSMAP_H 1 #define HAVE_LINUX_LOOP_H 1 #define HAVE_LINUX_MAJOR_H 1 #define HAVE_LINUX_TYPES_H 1 #define HAVE_LLISTXATTR 1 /* #undef HAVE_LLSEEK */ /* #undef HAVE_LLSEEK_PROTOTYPE */ #define HAVE_LSEEK64 1 #define HAVE_LSEEK64_PROTOTYPE 1 /* #undef HAVE_MAGIC_H */ /* #undef HAVE_MALLINFO */ #define HAVE_MALLOC_H 1 #define HAVE_MBSTOWCS 1 #define HAVE_MEMALIGN 1 #define HAVE_MEMORY_H 1 #define HAVE_MEMPCPY 1 #define HAVE_MMAP 1 #define HAVE_MNTENT_H 1 #define HAVE_MOUNT_NODEV 1 #define HAVE_MOUNT_NOSUID 1 #define HAVE_MSYNC 1 #define HAVE_NANOSLEEP 1 #define HAVE_NETINET_IN_H 1 /* #undef HAVE_NET_IF_DL_H */ #define HAVE_NET_IF_H 1 #define HAVE_OPEN64 1 #define HAVE_OPTRESET 1 #define HAVE_PATHCONF 1 #define HAVE_PATHS_H 1 #define HAVE_POSIX_FADVISE 1 #define HAVE_POSIX_FADVISE64 1 #define HAVE_POSIX_MEMALIGN 1 #define HAVE_PRCTL 1 #define HAVE_PREAD 1 #define HAVE_PREAD64 1 #define HAVE_PTHREAD 1 #define HAVE_PTHREAD_H 1 #define HAVE_PTHREAD_PRIO_INHERIT 1 #define HAVE_PWRITE 1 #define HAVE_PWRITE64 1 #define HAVE_RECLEN_DIRENT 1 /* #undef HAVE_SA_LEN */ #define HAVE_SECURE_GETENV 1 #define HAVE_SEMAPHORE_H 1 /* #undef HAVE_SEM_INIT */ #define HAVE_SETJMP_H 1 #define HAVE_SETMNTENT 1 #define HAVE_SETRESGID 1 #define HAVE_SETRESUID 1 #define HAVE_SIGNAL_H 1 #define HAVE_SNPRINTF 1 #define HAVE_SRANDOM 1 /* #undef HAVE_STAT_FLAGS */ #define HAVE_STDARG_H 1 #define HAVE_STDINT_H 1 #define HAVE_STDLIB_H 1 #define HAVE_STPCPY 1 #define HAVE_STRCASECMP 1 #define HAVE_STRDUP 1 #define HAVE_STRINGS_H 1 #define HAVE_STRING_H 1 #define HAVE_STRNLEN 1 #define HAVE_STRPTIME 1 #define HAVE_STRTOULL 1 #define HAVE_STRUCT_STAT_ST_ATIM 1 #define HAVE_SYNC_FILE_RANGE 1 #define HAVE_SYSCONF 1 /* #undef HAVE_SYS_ACL_H */ /* #undef HAVE_SYS_DISKLABEL_H */ /* #undef HAVE_SYS_DISK_H */ #define HAVE_SYS_FILE_H 1 #define HAVE_SYS_IOCTL_H 1 /* #undef HAVE_SYS_KEY_H */ /* #undef HAVE_SYS_MKDEV_H */ #define HAVE_SYS_MMAN_H 1 #define HAVE_SYS_MOUNT_H 1 #define HAVE_SYS_PRCTL_H 1 #define HAVE_SYS_RESOURCE_H 1 #define HAVE_SYS_SELECT_H 1 #define HAVE_SYS_SOCKET_H 1 /* #undef HAVE_SYS_SOCKIO_H */ #define HAVE_SYS_STAT_H 1 #define HAVE_SYS_SYSCALL_H 1 #define HAVE_SYS_SYSMACROS_H 1 #define HAVE_SYS_TIME_H 1 #define HAVE_SYS_TYPES_H 1 #define HAVE_SYS_UN_H 1 #define HAVE_SYS_WAIT_H 1 #define HAVE_SYS_XATTR_H 1 #define HAVE_TERMIOS_H 1 /* #undef HAVE_TERMIO_H */ #define HAVE_TYPE_SSIZE_T 1 #define HAVE_UNISTD_H 1 #define HAVE_USLEEP 1 #define HAVE_UTIME 1 #define HAVE_UTIMES 1 #define HAVE_UTIME_H 1 #define HAVE_VALLOC 1 #define HAVE_VPRINTF 1 /* #undef HAVE___SECURE_GETENV */ #define PACKAGE "e2fsprogs" #define PACKAGE_BUGREPORT "" #define PACKAGE_NAME "" #define PACKAGE_STRING "" #define PACKAGE_TARNAME "" #define PACKAGE_URL "" #define PACKAGE_VERSION "" /* #undef PTHREAD_CREATE_JOINABLE */ #define SIZEOF_INT 4 /* probe SIZEOF_LONG */ #define SIZEOF_LONG_LONG 8 #define SIZEOF_OFF_T 8 #define SIZEOF_SHORT 2 /* probe SIZEOF_TIME_T */ #define STDC_HEADERS 1 #ifdef HAVE__THREAD_LOCAL # define TLS _Thread_local #endif #ifndef _ALL_SOURCE # define _ALL_SOURCE 1 #endif #ifndef _GNU_SOURCE # define _GNU_SOURCE 1 #endif #ifndef _POSIX_PTHREAD_SEMANTICS # define _POSIX_PTHREAD_SEMANTICS 1 #endif #ifndef _TANDEM_SOURCE # define _TANDEM_SOURCE 1 #endif #ifndef __EXTENSIONS__ # define __EXTENSIONS__ 1 #endif /* #undef USE_UUIDD */ #define VERSION "0.14.1" #if defined AC_APPLE_UNIVERSAL_BUILD # if defined __BIG_ENDIAN__ # define WORDS_BIGENDIAN 1 # endif #else # ifndef WORDS_BIGENDIAN /* # undef WORDS_BIGENDIAN */ # endif #endif /* #undef _INTL_REDIRECT_MACROS */ /* #undef _MINIX */ /* #undef _POSIX_1_SOURCE */ /* #undef _POSIX_SOURCE */ #define LOCALEDIR "/share/locale" #define ROOT_SYSCONFDIR "/etc"
2,320
2,659
<gh_stars>1000+ #!/usr/bin/env python3 # -*- coding: utf-8 -*- # Copyright 2021 4Paradigm # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import sys def collect(file_name): info = {} cases = [] for line in open(file_name): parts = [_.strip() for _ in line.split(" ") if _.strip() != ""] if len(parts) < 4: continue try: case_name = parts[0] time = float(parts[1]) cpu_time = float(parts[3]) cases.append(case_name) info[case_name] = (time, cpu_time) except: print("Skip line: \"" + line.strip() + "\"") return info, cases def compare(base_file, cur_file): html = """ <html><head> <meta charset="utf-8"> <style type="text/css"> th { border: 1px solid black; text-align: center; padding: 3px; background-color: green; color: white;} td { border: 1px solid black; text-align: center; padding: 3px; } .posrate { color: green; } .negrate { color: red; } </style> </head><body> <table border="1"> <tr> <th>Case</th> <th>总耗时</th> <th>基线总耗时</th> <th>总耗时变化</th> <th>CPU耗时</th> <th>基线CPU耗时</th> <th>CPU耗时变化</th> </tr> ${CASES} </table> </body></html> """ rows = [] base_dict, base_cases = collect(base_file) cur_dict, _ = collect(cur_file) for case_name in base_cases: if not case_name in cur_dict: continue (total_time, cpu_time) = cur_dict[case_name] (base_total_time, base_cpu_time) = base_dict[case_name] total_time_rate = float(total_time - base_total_time) / base_total_time cpu_time_rate = float(cpu_time - base_cpu_time) / base_cpu_time def create_rate_str(rate): rate = rate * 100 if rate > 5: return "<div class=\"negrate\">+%.1f%%</div>" % rate elif rate < -5: return "<div class=\"posrate\">%.1f%%</div>" % rate else: return "%.1f" % rate row = """<tr> <td>%s</td> <td>%d</td> <td>%d</td> <td>%s</td> <td>%d</td> <td>%d</td> <td>%s</td> </tr>""" % (case_name, total_time, base_total_time, create_rate_str(total_time_rate), cpu_time, base_cpu_time, create_rate_str(cpu_time_rate)) rows.append(row) html = html.replace("${CASES}", "\n".join(rows)) with open("./benchmark_compare.html", "w") as f: f.write(html) if __name__ == "__main__": compare(sys.argv[1], sys.argv[2])
1,749
815
<filename>Qt/ApplicationComponents/pqResetDefaultSettingsReaction.h /*========================================================================= Program: ParaView Module: pqResetDefaultSettingsReaction.h Copyright (c) 2005,2006 Sandia Corporation, Kitware Inc. All rights reserved. ParaView is a free software; you can redistribute it and/or modify it under the terms of the ParaView license version 1.2. See License_v1.2.txt for the full ParaView license. A copy of this license can be obtained by contacting Kitware Inc. 28 Corporate Drive Clifton Park, NY 12065 USA THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHORS OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. ========================================================================*/ #ifndef pqResetDefaultSettingsReaction_h #define pqResetDefaultSettingsReaction_h #include "pqReaction.h" #include <QStringList> /** * @class pqResetDefaultSettingsReaction * @brief reaction to restore user settings to default. * @ingroup Reactions * * pqResetDefaultSettingsReaction can restore user settings to default. It * pops up a prompt indicating whether the user wants to generate backups for * settings being restored. If so, backups are generated. */ class PQAPPLICATIONCOMPONENTS_EXPORT pqResetDefaultSettingsReaction : public pqReaction { Q_OBJECT typedef pqReaction Superclass; public: pqResetDefaultSettingsReaction(QAction* parent); ~pqResetDefaultSettingsReaction() override; /** * Reset to default settings. Application must be restarted for the changes to * take effect. */ virtual void resetSettingsToDefault(); protected: /** * Called when the action is triggered. */ void onTriggered() override { this->resetSettingsToDefault(); } virtual void clearSettings(); virtual QStringList backupSettings(); private: Q_DISABLE_COPY(pqResetDefaultSettingsReaction) }; #endif
765
3,850
/** * Copyright (C) 2010-2016 eBusiness Information, Excilys Group * * Licensed under the Apache License, Version 2.0 (the "License"); you may not * use this file except in compliance with the License. You may obtain a copy of * the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed To in writing, software * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the * License for the specific language governing permissions and limitations under * the License. */ package org.androidannotations.preference; import org.androidannotations.annotations.EFragment; import org.androidannotations.annotations.PreferenceByKey; import org.androidannotations.annotations.PreferenceChange; import org.androidannotations.annotations.PreferenceClick; import org.androidannotations.annotations.PreferenceScreen; import android.support.v7.preference.Preference; import android.support.v7.preference.PreferenceFragmentCompat; import android.support.v7.preference.SwitchPreference; @PreferenceScreen(R.xml.preferences) @EFragment public class SupportV7SettingsFragment extends PreferenceFragmentCompat { @PreferenceByKey(R.string.myKey) SwitchPreference pref; @PreferenceByKey(R.string.myKey) Preference pref2; @PreferenceChange(R.string.myKey) void prefChanged(Preference pref) { } @PreferenceClick(R.string.myKey) void prefClicked(SwitchPreference pref) { } }
437
801
<gh_stars>100-1000 from numpy import dot from neupy.utils import format_data from neupy.exceptions import NotTrained from neupy.core.properties import BoundedProperty from neupy.algorithms.base import BaseSkeleton from .utils import pdf_between_data __all__ = ('GRNN',) class GRNN(BaseSkeleton): """ Generalized Regression Neural Network (GRNN). Network applies only to the regression problems. Parameters ---------- std : float Standard deviation for PDF function. If your input features have high values than standard deviation should also be high. For instance, if input features from range ``[0, 20]`` that standard deviation should be also a big value like ``10`` or ``15``. Small values will lead to bad prediction. {Verbose.verbose} Notes ----- - GRNN Network is sensitive for cases when one input feature has higher values than the other one. Input data has to be normalized before training. - Standard deviation has to match the range of the input features Check ``std`` parameter description for more information. - The bigger training dataset the slower prediction. Algorithm is much more efficient for small datasets. - Network uses lazy learning which mean that network doesn't need iterative training. It just stores parameters and use them to make a predictions. Methods ------- train(X_train, y_train, copy=True) Network just stores all the information about the data and use it for the prediction. Parameter ``copy`` copies input data before saving it inside the network. predict(X) Return prediction per each sample in the ``X``. {BaseSkeleton.fit} Examples -------- >>> import numpy as np >>> from sklearn import datasets, preprocessing >>> from sklearn.model_selection import train_test_split >>> from neupy import algorithms >>> >>> dataset = datasets.load_diabetes() >>> x_train, x_test, y_train, y_test = train_test_split( ... preprocessing.minmax_scale(dataset.data), ... preprocessing.minmax_scale(dataset.target.reshape(-1, 1)), ... test_size=0.3, ... ) >>> >>> nw = algorithms.GRNN(std=0.1, verbose=False) >>> nw.train(x_train, y_train) >>> >>> y_predicted = nw.predict(x_test) >>> mse = np.mean((y_predicted - y_test) ** 2) >>> mse 0.05280970704568171 """ std = BoundedProperty(minval=0) def __init__(self, std, verbose=False): self.std = std self.X_train = None self.y_train = None super(GRNN, self).__init__(verbose=verbose) def train(self, X_train, y_train, copy=True): """ Trains network. PNN doesn't actually train, it just stores input data and use it for prediction. Parameters ---------- X_train : array-like (n_samples, n_features) y_train : array-like (n_samples,) Target variable should be vector or matrix with one feature column. copy : bool If value equal to ``True`` than input matrices will be copied. Defaults to ``True``. Raises ------ ValueError In case if something is wrong with input data. """ X_train = format_data(X_train, copy=copy) y_train = format_data(y_train, copy=copy) if y_train.shape[1] != 1: raise ValueError("Target value must be one dimensional array") self.X_train = X_train self.y_train = y_train if X_train.shape[0] != y_train.shape[0]: raise ValueError("Number of samples in the input and target " "datasets are different") def predict(self, X): """ Make a prediction from the input data. Parameters ---------- X : array-like (n_samples, n_features) Raises ------ ValueError In case if something is wrong with input data. Returns ------- array-like (n_samples,) """ if self.X_train is None: raise NotTrained( "Cannot make a prediction. Network hasn't been trained yet") X = format_data(X) if X.shape[1] != self.X_train.shape[1]: raise ValueError( "Input data must contain {0} features, got {1}" "".format(self.X_train.shape[1], X.shape[1])) ratios = pdf_between_data(self.X_train, X, self.std) return (dot(self.y_train.T, ratios) / ratios.sum(axis=0)).T
1,877
311
<filename>FetLife/fetlife/src/main/java/com/bitlove/fetlife/view/adapter/RelationsRecyclerAdapter.java package com.bitlove.fetlife.view.adapter; import com.bitlove.fetlife.FetLifeApplication; import com.bitlove.fetlife.model.pojos.fetlife.db.RelationReference; import com.bitlove.fetlife.model.pojos.fetlife.db.RelationReference_Table; import com.bitlove.fetlife.model.pojos.fetlife.dbjson.Member; import com.bitlove.fetlife.model.pojos.fetlife.dbjson.Member_Table; import com.bitlove.fetlife.util.ServerIdUtil; import com.raizlabs.android.dbflow.annotation.Collate; import com.raizlabs.android.dbflow.sql.language.OrderBy; import com.raizlabs.android.dbflow.sql.language.Select; import java.text.Collator; import java.util.ArrayList; import java.util.Collections; import java.util.Comparator; import java.util.List; public class RelationsRecyclerAdapter extends MembersRecyclerAdapter { private String memberId; private final int relationType; public RelationsRecyclerAdapter(String memberId, int relationType, FetLifeApplication fetLifeApplication) { super(fetLifeApplication); this.memberId = memberId; this.relationType = relationType; loadItems(); } protected void loadItems() { //TODO: think of moving to separate thread with specific DB executor try { if (ServerIdUtil.isServerId(memberId)) { if (ServerIdUtil.containsServerId(memberId)) { memberId = ServerIdUtil.getLocalId(memberId); } else { return; } } List<RelationReference> relationReferences = new Select().from(RelationReference.class).where(RelationReference_Table.userId.is(memberId)).and(RelationReference_Table.relationType.is(relationType)).orderBy(OrderBy.fromProperty(RelationReference_Table.nickname).ascending().collate(Collate.UNICODE)).queryList(); List<String> relationIds = new ArrayList<>(); for (RelationReference relationReference : relationReferences) { relationIds.add(relationReference.getId()); } itemList = new Select().from(Member.class).where(Member_Table.id.in(relationIds)).orderBy(OrderBy.fromProperty(Member_Table.nickname).ascending().collate(Collate.UNICODE)).queryList(); final Collator coll = Collator.getInstance(); coll.setStrength(Collator.IDENTICAL); Collections.sort(itemList, new Comparator<Member>() { @Override public int compare(Member member, Member member2) { //Workaround to match with DB sorting String nickname1 = member.getNickname().replaceAll("_","z"); String nickname2 = member2.getNickname().replaceAll("_","z"); return coll.compare(nickname1,nickname2); } }); } catch (Throwable t) { itemList = new ArrayList<>(); } } }
1,223
323
<filename>examples/solv/repoinfo_cache.h struct repoinfo; struct stat; extern void set_userhome(void); extern char *calc_cachepath(Repo *repo, const char *repoext, int forcesystemloc); extern void calc_cookie_fp(FILE *fp, Id chktype, unsigned char *out); extern void calc_cookie_stat(struct stat *stb, Id chktype, unsigned char *cookie, unsigned char *out); extern int usecachedrepo(struct repoinfo *cinfo, const char *repoext, int mark); extern void writecachedrepo(struct repoinfo *cinfo, const char *repoext, Repodata *repodata);
184
743
<reponame>riag23/AdaptiveCards<filename>source/uwp/AdaptiveCardTestApp/Expected/v1.3.Tests.LongLabelsToJson.json {"actions":[{"title":"OK","type":"Action.Submit"}],"body":[{"choices":[{"title":"Option 1","value":"1"},{"title":"Option 2","value":"2"}],"id":"choiceSet","label":"Longer label to verify label wrapping for an Input.ChoiceSet. If that was not long enough, then this should make it long enough. Did you know that honeybees can flap their wings 200 times every second?","style":"Compact","type":"Input.ChoiceSet"},{"id":"toggle","label":"Longer label to verify label wrapping for an Input.Toggle. If that was not long enough, then this should make it long enough. Did you know octopuses have three hearts?","title":"I accept the terms and agreements","type":"Input.Toggle","value":"false"},{"id":"time","label":"Longer label to verify label wrapping for an Input.Time. If that was not long enough, then this should make it long enough. Did you know bats always turn left when leaving a cave?","type":"Input.Time"},{"id":"text","label":"Longer label to verify label wrapping for an Input.Text. If that was not long enough, then this should make it long enough. Did you know that elephants are the only animal that cannot jump?","style":"text","type":"Input.Text"},{"id":"textWithInline","inlineAction":{"iconUrl":"http://adaptivecards.io/content/cats/1.png","title":"Inline Action","type":"Action.Submit"},"label":"Longer label to verify label wrapping for an Input.Text. If that was not long enough, then this should make it long enough. Did you know that wild dolphins call each other by name?","style":"text","type":"Input.Text"},{"id":"number","label":"Longer label to verify label wrapping for an Input.Number. If that was not long enough, then this should make it long enough. Did you know that slugs have four noses?","type":"Input.Number"},{"id":"date","label":"Longer label to verify label wrapping for an Input.Date. If that was not long enough, then this should make it long enough. Did you know a snail can sleep for three years?","type":"Input.Date"}],"type":"AdaptiveCard","version":"1.0"}
533
1,405
<gh_stars>1000+ package com.android.internal.telephony; import android.os.Binder; import android.os.IBinder; import android.os.IInterface; import android.os.Parcel; public interface ITelephony extends IInterface { public abstract class Stub extends Binder implements ITelephony { class Proxy implements ITelephony { /* renamed from: a reason: collision with root package name */ private IBinder f35a; @Override // com.android.internal.telephony.ITelephony public void answerRingingCall() { Parcel obtain = Parcel.obtain(); Parcel obtain2 = Parcel.obtain(); try { obtain.writeInterfaceToken("com.android.internal.telephony.ITelephony"); this.f35a.transact(1, obtain, obtain2, 0); obtain2.readException(); } finally { obtain2.recycle(); obtain.recycle(); } } public IBinder asBinder() { return this.f35a; } @Override // com.android.internal.telephony.ITelephony public boolean endCall() { boolean z = false; Parcel obtain = Parcel.obtain(); Parcel obtain2 = Parcel.obtain(); try { obtain.writeInterfaceToken("com.android.internal.telephony.ITelephony"); this.f35a.transact(3, obtain, obtain2, 0); obtain2.readException(); if (obtain2.readInt() != 0) { z = true; } return z; } finally { obtain2.recycle(); obtain.recycle(); } } @Override // com.android.internal.telephony.ITelephony public void silenceRinger() { Parcel obtain = Parcel.obtain(); Parcel obtain2 = Parcel.obtain(); try { obtain.writeInterfaceToken("com.android.internal.telephony.ITelephony"); this.f35a.transact(2, obtain, obtain2, 0); obtain2.readException(); } finally { obtain2.recycle(); obtain.recycle(); } } } public Stub() { attachInterface(this, "com.android.internal.telephony.ITelephony"); } @Override // android.os.Binder public boolean onTransact(int i, Parcel parcel, Parcel parcel2, int i2) { switch (i) { case 1: parcel.enforceInterface("com.android.internal.telephony.ITelephony"); answerRingingCall(); parcel2.writeNoException(); return true; case 2: parcel.enforceInterface("com.android.internal.telephony.ITelephony"); silenceRinger(); parcel2.writeNoException(); return true; case 3: parcel.enforceInterface("com.android.internal.telephony.ITelephony"); boolean endCall = endCall(); parcel2.writeNoException(); parcel2.writeInt(endCall ? 1 : 0); return true; case 1598968902: parcel2.writeString("com.android.internal.telephony.ITelephony"); return true; default: return super.onTransact(i, parcel, parcel2, i2); } } } void answerRingingCall(); boolean endCall(); void silenceRinger(); }
2,040
327
// # thread local ############################################################# #pragma once #ifdef __GNUC__ # define THREAD_LOCAL __thread #elif defined _MSC_VER || defined __INTEL_COMPILER # define THREAD_LOCAL __declspec(thread) #elif __STDC_VERSION__ >= 201112L # define THREAD_LOCAL _Thread_local #else # define THREAD_LOCAL #endif
118
602
<filename>infrastructure/src/main/java/org/corfudb/infrastructure/management/CompleteGraphAdvisor.java package org.corfudb.infrastructure.management; import com.google.common.collect.ImmutableList; import lombok.NonNull; import lombok.extern.slf4j.Slf4j; import org.corfudb.infrastructure.management.failuredetector.ClusterGraph; import org.corfudb.protocols.wireprotocol.ClusterState; import org.corfudb.protocols.wireprotocol.failuredetector.NodeRank; import java.util.List; import java.util.Optional; /** * This class is an implementation of {@link ClusterType} where the ideal state * of the Corfu cluster is a fully connected network (i.e. complete graph) in which there is an * active link amongst all nodes in the cluster. Failed and Healed nodes are recommended so that * the cluster remains fully connected. * <p> * Created by <NAME> on 10/27/18. */ @Slf4j public class CompleteGraphAdvisor implements ClusterAdvisor { private static final ClusterType CLUSTER_TYPE = ClusterType.COMPLETE_GRAPH; private final String localEndpoint; public CompleteGraphAdvisor(@NonNull String localEndpoint) { this.localEndpoint = localEndpoint; } @Override public ClusterType getType() { return CLUSTER_TYPE; } /** * Provides list of servers from a given layout(epoch) that this implementation of * COMPLETE_GRAPH algorithm has determined as failed. The implementation of the * algorithm in this method is an approach by executing the following steps: * <p> * The failed node is the recommendation of this strategy which their removal * from cluster will lead to a fully connected cluster. * <p> * To find a failed node: * - get decision maker * - find a failed node. Check if decision maker is not equal to failed node and failed node is not in unresponsive * servers already. * * @param clusterState represents the state of connectivity amongst the Corfu cluster * nodes from a node's perspective. * @return a server considered as failed according to the underlying strategy. */ @Override public Optional<NodeRank> failedServer(ClusterState clusterState) { log.trace("Detecting failed nodes for: ClusterState= {}", clusterState); ClusterGraph graph = ClusterGraph.toClusterGraph(clusterState); ClusterGraph symmetric = graph.toSymmetric(); Optional<NodeRank> maybeDecisionMaker = symmetric.getDecisionMaker(); if (!maybeDecisionMaker.isPresent()) { log.error("Decision maker not found for graph: {}", symmetric.toJson()); return Optional.empty(); } NodeRank decisionMaker = maybeDecisionMaker.get(); if (!decisionMaker.is(localEndpoint)) { String message = "The node can't be a decision maker, skip operation. Decision maker node is: {}"; log.trace(message, decisionMaker); return Optional.empty(); } Optional<NodeRank> maybeFailedNode = symmetric.findFailedNode(); if (!maybeFailedNode.isPresent()) { return Optional.empty(); } NodeRank failedNode = maybeFailedNode.get(); if (decisionMaker.equals(failedNode)) { log.error("Decision maker and failed node are same node: {}", decisionMaker); return Optional.empty(); } ImmutableList<String> unresponsiveNodes = clusterState.getUnresponsiveNodes(); if (unresponsiveNodes.contains(failedNode.getEndpoint())) { log.trace("Failed node already in the list of unresponsive nodes: {}", unresponsiveNodes); return Optional.empty(); } log.debug("Failed node found: {}", failedNode); return Optional.of(failedNode); } /** * Provide a server considered to have healed in the Corfu cluster according to * the COMPLETE_GRAPH implementation of algorithm for * {@link ClusterType}. * <p> * The node can heal only itself. The node responsible only for itself, can't heal other nodes. * It simplifies healing algorithm and guaranties that if node became available it mark itself as a responsible * node in the layout. It helps us to simplify analysis/debugging process and brings simple and reliable algorithm * for healing process. * * @param clusterState represents the state of connectivity amongst the Corfu cluster * nodes from a node's perspective. * @return a {@link List} of servers considered as healed according to the underlying * {@link ClusterType}. */ @Override public Optional<NodeRank> healedServer(ClusterState clusterState) { log.trace("Detecting the healed nodes for: ClusterState: {}", clusterState); ImmutableList<String> unresponsiveNodes = clusterState.getUnresponsiveNodes(); if (unresponsiveNodes.isEmpty()) { log.trace("All nodes responsive. Nothing to heal"); return Optional.empty(); } if (!unresponsiveNodes.contains(localEndpoint)) { log.trace("Local node is responsive. Nothing to heal"); return Optional.empty(); } //Transform a ClusterState to the ClusterGraph and make it symmetric (symmetric failures) ClusterGraph symmetricGraph = ClusterGraph.toClusterGraph(clusterState).toSymmetric(); //See if local node is healed. return symmetricGraph.findFullyConnectedNode(localEndpoint); } /** * Returns a new cluster graph from the cluster state * * @param clusterState a cluster state * @return a transformed cluster graph */ @Override public ClusterGraph getGraph(ClusterState clusterState) { return ClusterGraph.toClusterGraph(clusterState).toSymmetric(); } }
2,035
1,452
<reponame>daniu22/Cheetah-Software #include "vn/protocol/spi.h" #include <string.h> #include "vn/util.h" #define UNUSED(x) (void)(sizeof(x)) VnError VnSpi_genGenericCommand( char cmdId, char* buffer, size_t* size, size_t desiredLength, size_t* responseSize) { size_t i; if (*size < 1 || *size < desiredLength) return E_BUFFER_TOO_SMALL; buffer[0] = cmdId; for (i = 1; i < desiredLength; i++) buffer[i] = 0x00; *responseSize = 2; *size = desiredLength > 1 ? desiredLength : 1; return E_NONE; } VnError VnSpi_genWriteSettings( char* buffer, size_t* size, size_t desiredLength, size_t* responseSize) { return VnSpi_genGenericCommand( 3, buffer, size, desiredLength, responseSize); } VnError VnSpi_genRestorFactorySettings( char* buffer, size_t* size, size_t desiredLength, size_t* responseSize) { return VnSpi_genGenericCommand( 4, buffer, size, desiredLength, responseSize); } VnError VnSpi_genTare( char* buffer, size_t* size, size_t desiredLength, size_t* responseSize) { return VnSpi_genGenericCommand( 5, buffer, size, desiredLength, responseSize); } VnError VnSpi_genReset( char* buffer, size_t* size, size_t desiredLength, size_t* responseSize) { return VnSpi_genGenericCommand( 6, buffer, size, desiredLength, responseSize); } VnError VnSpi_genRead( char* buffer, size_t* size, uint8_t regId, size_t desiredLength) { size_t i; if (*size < 4 || *size < desiredLength) return E_BUFFER_TOO_SMALL; buffer[0] = 0x01; buffer[1] = regId; buffer[2] = 0x00; buffer[3] = 0x00; for (i = 4; i < desiredLength; i++) buffer[i] = 0x00; *size = desiredLength > 3 ? desiredLength : 3; return E_NONE; } VnError VnSpi_parseUserTag( const char* response, char* tag, size_t tagLength) { const char* pos = response + 3; if (*pos != 0) return *pos + E_SENSOR_HARD_FAULT - 1; pos++; if (tagLength < strlen(pos) + 1) return E_BUFFER_TOO_SMALL; #if defined(_MSC_VER) /* Disable warnings regarding using strcpy_s since this * function's signature does not provide us with information * about the length of 'out'. */ #pragma warning(push) #pragma warning(disable:4996) #endif strcpy(tag, pos); #if defined(_MSC_VER) #pragma warning(pop) #endif return E_NONE; } VnError VnSpi_parseModelNumber( const char* response, char* productName, size_t productNameLength) { const char* pos = response + 3; if (*pos != 0) return *pos + E_SENSOR_HARD_FAULT - 1; pos++; if (productNameLength < strlen(pos) + 1) return E_BUFFER_TOO_SMALL; #if defined(_MSC_VER) /* Disable warnings regarding using strcpy_s since this * function's signature does not provide us with information * about the length of 'out'. */ #pragma warning(push) #pragma warning(disable:4996) #endif strcpy(productName, pos); #if defined(_MSC_VER) #pragma warning(pop) #endif return E_NONE; } VnError VnSpi_parseHardwareRevision( const char* response, uint32_t* revision) { const char* pos = response + 3; if (*pos != 0) return *pos + E_SENSOR_HARD_FAULT - 1; pos++; *revision = VnUtil_extractUint32(pos); pos += sizeof(uint32_t); return E_NONE; } VnError VnSpi_parseSerialNumber( const char* response, uint32_t* serialNum) { const char* pos = response + 3; if (*pos != 0) return *pos + E_SENSOR_HARD_FAULT - 1; pos++; *serialNum = VnUtil_extractUint32(pos); pos += sizeof(uint32_t); return E_NONE; } VnError VnSpi_parseFirmwareVersion( const char* response, char* firmwareVersion, size_t firmwareVersionLength) { const char* pos = response + 3; if (*pos != 0) return *pos + E_SENSOR_HARD_FAULT - 1; pos++; if (firmwareVersionLength < strlen(pos) + 1) return E_BUFFER_TOO_SMALL; #if defined(_MSC_VER) /* Disable warnings regarding using strcpy_s since this * function's signature does not provide us with information * about the length of 'out'. */ #pragma warning(push) #pragma warning(disable:4996) #endif strcpy(firmwareVersion, pos); #if defined(_MSC_VER) #pragma warning(pop) #endif return E_NONE; } VnError VnSpi_parseSerialBaudRate( const char* response, uint32_t* baudrate) { const char* pos = response + 3; if (*pos != 0) return *pos + E_SENSOR_HARD_FAULT - 1; pos++; *baudrate = VnUtil_extractUint32(pos); pos += sizeof(uint32_t); return E_NONE; } VnError VnSpi_parseAsyncDataOutputType( const char* response, uint32_t* ador) { const char* pos = response + 3; if (*pos != 0) return *pos + E_SENSOR_HARD_FAULT - 1; pos++; *ador = VnUtil_extractUint32(pos); pos += sizeof(uint32_t); return E_NONE; } VnError VnSpi_parseAsyncDataOutputFrequency( const char* response, uint32_t* adof) { const char* pos = response + 3; if (*pos != 0) return *pos + E_SENSOR_HARD_FAULT - 1; pos++; *adof = VnUtil_extractUint32(pos); pos += sizeof(uint32_t); return E_NONE; } VnError VnSpi_parseYawPitchRoll( const char* response, vec3f* yawPitchRoll) { const char* pos = response + 3; if (*pos != 0) return *pos + E_SENSOR_HARD_FAULT - 1; pos++; *yawPitchRoll = VnUtil_extractVec3f(pos); pos += 3 * sizeof(float); return E_NONE; } VnError VnSpi_parseAttitudeQuaternion( const char* response, vec4f* quat) { const char* pos = response + 3; if (*pos != 0) return *pos + E_SENSOR_HARD_FAULT - 1; pos++; *quat = VnUtil_extractVec4f(pos); pos += 4 * sizeof(float); return E_NONE; } VnError VnSpi_parseQuaternionMagneticAccelerationAndAngularRates( const char* response, vec4f* quat, vec3f* mag, vec3f* accel, vec3f* gyro) { const char* pos = response + 3; if (*pos != 0) return *pos + E_SENSOR_HARD_FAULT - 1; pos++; *quat = VnUtil_extractVec4f(pos); pos += 4 * sizeof(float); *mag = VnUtil_extractVec3f(pos); pos += 3 * sizeof(float); *accel = VnUtil_extractVec3f(pos); pos += 3 * sizeof(float); *gyro = VnUtil_extractVec3f(pos); pos += 3 * sizeof(float); return E_NONE; } VnError VnSpi_parseMagneticMeasurements( const char* response, vec3f* mag) { const char* pos = response + 3; if (*pos != 0) return *pos + E_SENSOR_HARD_FAULT - 1; pos++; *mag = VnUtil_extractVec3f(pos); pos += 3 * sizeof(float); return E_NONE; } VnError VnSpi_parseAccelerationMeasurements( const char* response, vec3f* accel) { const char* pos = response + 3; if (*pos != 0) return *pos + E_SENSOR_HARD_FAULT - 1; pos++; *accel = VnUtil_extractVec3f(pos); pos += 3 * sizeof(float); return E_NONE; } VnError VnSpi_parseAngularRateMeasurements( const char* response, vec3f* gyro) { const char* pos = response + 3; if (*pos != 0) return *pos + E_SENSOR_HARD_FAULT - 1; pos++; *gyro = VnUtil_extractVec3f(pos); pos += 3 * sizeof(float); return E_NONE; } VnError VnSpi_parseMagneticAccelerationAndAngularRates( const char* response, vec3f* mag, vec3f* accel, vec3f* gyro) { const char* pos = response + 3; if (*pos != 0) return *pos + E_SENSOR_HARD_FAULT - 1; pos++; *mag = VnUtil_extractVec3f(pos); pos += 3 * sizeof(float); *accel = VnUtil_extractVec3f(pos); pos += 3 * sizeof(float); *gyro = VnUtil_extractVec3f(pos); pos += 3 * sizeof(float); return E_NONE; } VnError VnSpi_parseMagneticAndGravityReferenceVectors( const char* response, vec3f* magRef, vec3f* accRef) { const char* pos = response + 3; if (*pos != 0) return *pos + E_SENSOR_HARD_FAULT - 1; pos++; *magRef = VnUtil_extractVec3f(pos); pos += 3 * sizeof(float); *accRef = VnUtil_extractVec3f(pos); pos += 3 * sizeof(float); return E_NONE; } VnError VnSpi_parseFilterMeasurementsVarianceParameters( const char* response, float* angularWalkVariance, vec3f* angularRateVariance, vec3f* magneticVariance, vec3f* accelerationVariance) { const char* pos = response + 3; if (*pos != 0) return *pos + E_SENSOR_HARD_FAULT - 1; pos++; *angularWalkVariance = VnUtil_extractFloat(pos); pos += sizeof(float); *angularRateVariance = VnUtil_extractVec3f(pos); pos += 3 * sizeof(float); *magneticVariance = VnUtil_extractVec3f(pos); pos += 3 * sizeof(float); *accelerationVariance = VnUtil_extractVec3f(pos); pos += 3 * sizeof(float); return E_NONE; } VnError VnSpi_parseMagnetometerCompensation( const char* response, mat3f* c, vec3f* b) { const char* pos = response + 3; if (*pos != 0) return *pos + E_SENSOR_HARD_FAULT - 1; pos++; *c = VnUtil_extractMat3f(pos); pos += 9 * sizeof(float); *b = VnUtil_extractVec3f(pos); pos += 3 * sizeof(float); return E_NONE; } VnError VnSpi_parseFilterActiveTuningParameters( const char* response, float* magneticDisturbanceGain, float* accelerationDisturbanceGain, float* magneticDisturbanceMemory, float* accelerationDisturbanceMemory) { const char* pos = response + 3; if (*pos != 0) return *pos + E_SENSOR_HARD_FAULT - 1; pos++; *magneticDisturbanceGain = VnUtil_extractFloat(pos); pos += sizeof(float); *accelerationDisturbanceGain = VnUtil_extractFloat(pos); pos += sizeof(float); *magneticDisturbanceMemory = VnUtil_extractFloat(pos); pos += sizeof(float); *accelerationDisturbanceMemory = VnUtil_extractFloat(pos); pos += sizeof(float); return E_NONE; } VnError VnSpi_parseAccelerationCompensation( const char* response, mat3f* c, vec3f* b) { const char* pos = response + 3; if (*pos != 0) return *pos + E_SENSOR_HARD_FAULT - 1; pos++; *c = VnUtil_extractMat3f(pos); pos += 9 * sizeof(float); *b = VnUtil_extractVec3f(pos); pos += 3 * sizeof(float); return E_NONE; } VnError VnSpi_parseReferenceFrameRotation( const char* response, mat3f* c) { const char* pos = response + 3; if (*pos != 0) return *pos + E_SENSOR_HARD_FAULT - 1; pos++; *c = VnUtil_extractMat3f(pos); pos += 9 * sizeof(float); return E_NONE; } VnError VnSpi_parseYawPitchRollMagneticAccelerationAndAngularRates( const char* response, vec3f* yawPitchRoll, vec3f* mag, vec3f* accel, vec3f* gyro) { const char* pos = response + 3; if (*pos != 0) return *pos + E_SENSOR_HARD_FAULT - 1; pos++; *yawPitchRoll = VnUtil_extractVec3f(pos); pos += 3 * sizeof(float); *mag = VnUtil_extractVec3f(pos); pos += 3 * sizeof(float); *accel = VnUtil_extractVec3f(pos); pos += 3 * sizeof(float); *gyro = VnUtil_extractVec3f(pos); pos += 3 * sizeof(float); return E_NONE; } VnError VnSpi_parseCommunicationProtocolControl( const char* response, uint8_t* serialCount, uint8_t* serialStatus, uint8_t* spiCount, uint8_t* spiStatus, uint8_t* serialChecksum, uint8_t* spiChecksum, uint8_t* errorMode) { const char* pos = response + 3; if (*pos != 0) return *pos + E_SENSOR_HARD_FAULT - 1; pos++; *serialCount = (uint8_t) *pos; pos += sizeof(uint8_t); *serialStatus = (uint8_t) *pos; pos += sizeof(uint8_t); *spiCount = (uint8_t) *pos; pos += sizeof(uint8_t); *spiStatus = (uint8_t) *pos; pos += sizeof(uint8_t); *serialChecksum = (uint8_t) *pos; pos += sizeof(uint8_t); *spiChecksum = (uint8_t) *pos; pos += sizeof(uint8_t); *errorMode = (uint8_t) *pos; pos += sizeof(uint8_t); return E_NONE; } VnError VnSpi_parseSynchronizationControl( const char* response, uint8_t* syncInMode, uint8_t* syncInEdge, uint16_t* syncInSkipFactor, uint32_t* reserved1, uint8_t* syncOutMode, uint8_t* syncOutPolarity, uint16_t* syncOutSkipFactor, uint32_t* syncOutPulseWidth, uint32_t* reserved2) { const char* pos = response + 3; if (*pos != 0) return *pos + E_SENSOR_HARD_FAULT - 1; pos++; *syncInMode = (uint8_t) *pos; pos += sizeof(uint8_t); *syncInEdge = (uint8_t) *pos; pos += sizeof(uint8_t); *syncInSkipFactor = VnUtil_extractUint16(pos); pos += sizeof(uint16_t); *reserved1 = VnUtil_extractUint32(pos); pos += sizeof(uint32_t); *syncOutMode = (uint8_t) *pos; pos += sizeof(uint8_t); *syncOutPolarity = (uint8_t) *pos; pos += sizeof(uint8_t); *syncOutSkipFactor = VnUtil_extractUint16(pos); pos += sizeof(uint16_t); *syncOutPulseWidth = VnUtil_extractUint32(pos); pos += sizeof(uint32_t); *reserved2 = VnUtil_extractUint32(pos); pos += sizeof(uint32_t); return E_NONE; } VnError VnSpi_parseSynchronizationStatus( const char* response, uint32_t* syncInCount, uint32_t* syncInTime, uint32_t* syncOutCount) { const char* pos = response + 3; if (*pos != 0) return *pos + E_SENSOR_HARD_FAULT - 1; pos++; *syncInCount = VnUtil_extractUint32(pos); pos += sizeof(uint32_t); *syncInTime = VnUtil_extractUint32(pos); pos += sizeof(uint32_t); *syncOutCount = VnUtil_extractUint32(pos); pos += sizeof(uint32_t); return E_NONE; } VnError VnSpi_parseFilterBasicControl( const char* response, uint8_t* magMode, uint8_t* extMagMode, uint8_t* extAccMode, uint8_t* extGyroMode, vec3f* gyroLimit) { const char* pos = response + 3; if (*pos != 0) return *pos + E_SENSOR_HARD_FAULT - 1; pos++; *magMode = (uint8_t) *pos; pos += sizeof(uint8_t); *extMagMode = (uint8_t) *pos; pos += sizeof(uint8_t); *extAccMode = (uint8_t) *pos; pos += sizeof(uint8_t); *extGyroMode = (uint8_t) *pos; pos += sizeof(uint8_t); *gyroLimit = VnUtil_extractVec3f(pos); pos += 3 * sizeof(float); return E_NONE; } VnError VnSpi_parseVpeBasicControl( const char* response, uint8_t* enable, uint8_t* headingMode, uint8_t* filteringMode, uint8_t* tuningMode) { const char* pos = response + 3; if (*pos != 0) return *pos + E_SENSOR_HARD_FAULT - 1; pos++; *enable = (uint8_t) *pos; pos += sizeof(uint8_t); *headingMode = (uint8_t) *pos; pos += sizeof(uint8_t); *filteringMode = (uint8_t) *pos; pos += sizeof(uint8_t); *tuningMode = (uint8_t) *pos; pos += sizeof(uint8_t); return E_NONE; } VnError VnSpi_parseVpeMagnetometerBasicTuning( const char* response, vec3f* baseTuning, vec3f* adaptiveTuning, vec3f* adaptiveFiltering) { const char* pos = response + 3; if (*pos != 0) return *pos + E_SENSOR_HARD_FAULT - 1; pos++; *baseTuning = VnUtil_extractVec3f(pos); pos += 3 * sizeof(float); *adaptiveTuning = VnUtil_extractVec3f(pos); pos += 3 * sizeof(float); *adaptiveFiltering = VnUtil_extractVec3f(pos); pos += 3 * sizeof(float); return E_NONE; } VnError VnSpi_parseVpeMagnetometerAdvancedTuning( const char* response, vec3f* minFiltering, vec3f* maxFiltering, float* maxAdaptRate, float* disturbanceWindow, float* maxTuning) { const char* pos = response + 3; if (*pos != 0) return *pos + E_SENSOR_HARD_FAULT - 1; pos++; *minFiltering = VnUtil_extractVec3f(pos); pos += 3 * sizeof(float); *maxFiltering = VnUtil_extractVec3f(pos); pos += 3 * sizeof(float); *maxAdaptRate = VnUtil_extractFloat(pos); pos += sizeof(float); *disturbanceWindow = VnUtil_extractFloat(pos); pos += sizeof(float); *maxTuning = VnUtil_extractFloat(pos); pos += sizeof(float); return E_NONE; } VnError VnSpi_parseVpeAccelerometerBasicTuning( const char* response, vec3f* baseTuning, vec3f* adaptiveTuning, vec3f* adaptiveFiltering) { const char* pos = response + 3; if (*pos != 0) return *pos + E_SENSOR_HARD_FAULT - 1; pos++; *baseTuning = VnUtil_extractVec3f(pos); pos += 3 * sizeof(float); *adaptiveTuning = VnUtil_extractVec3f(pos); pos += 3 * sizeof(float); *adaptiveFiltering = VnUtil_extractVec3f(pos); pos += 3 * sizeof(float); return E_NONE; } VnError VnSpi_parseVpeAccelerometerAdvancedTuning( const char* response, vec3f* minFiltering, vec3f* maxFiltering, float* maxAdaptRate, float* disturbanceWindow, float* maxTuning) { const char* pos = response + 3; if (*pos != 0) return *pos + E_SENSOR_HARD_FAULT - 1; pos++; *minFiltering = VnUtil_extractVec3f(pos); pos += 3 * sizeof(float); *maxFiltering = VnUtil_extractVec3f(pos); pos += 3 * sizeof(float); *maxAdaptRate = VnUtil_extractFloat(pos); pos += sizeof(float); *disturbanceWindow = VnUtil_extractFloat(pos); pos += sizeof(float); *maxTuning = VnUtil_extractFloat(pos); pos += sizeof(float); return E_NONE; } VnError VnSpi_parseVpeGyroBasicTuning( const char* response, vec3f* angularWalkVariance, vec3f* baseTuning, vec3f* adaptiveTuning) { const char* pos = response + 3; if (*pos != 0) return *pos + E_SENSOR_HARD_FAULT - 1; pos++; *angularWalkVariance = VnUtil_extractVec3f(pos); pos += 3 * sizeof(float); *baseTuning = VnUtil_extractVec3f(pos); pos += 3 * sizeof(float); *adaptiveTuning = VnUtil_extractVec3f(pos); pos += 3 * sizeof(float); return E_NONE; } VnError VnSpi_parseFilterStartupGyroBias( const char* response, vec3f* bias) { const char* pos = response + 3; if (*pos != 0) return *pos + E_SENSOR_HARD_FAULT - 1; pos++; *bias = VnUtil_extractVec3f(pos); pos += 3 * sizeof(float); return E_NONE; } VnError VnSpi_parseMagnetometerCalibrationControl( const char* response, uint8_t* hsiMode, uint8_t* hsiOutput, uint8_t* convergeRate) { const char* pos = response + 3; if (*pos != 0) return *pos + E_SENSOR_HARD_FAULT - 1; pos++; *hsiMode = (uint8_t) *pos; pos += sizeof(uint8_t); *hsiOutput = (uint8_t) *pos; pos += sizeof(uint8_t); *convergeRate = (uint8_t) *pos; pos += sizeof(uint8_t); return E_NONE; } VnError VnSpi_parseCalculatedMagnetometerCalibration( const char* response, mat3f* c, vec3f* b) { const char* pos = response + 3; if (*pos != 0) return *pos + E_SENSOR_HARD_FAULT - 1; pos++; *c = VnUtil_extractMat3f(pos); pos += 9 * sizeof(float); *b = VnUtil_extractVec3f(pos); pos += 3 * sizeof(float); return E_NONE; } VnError VnSpi_parseIndoorHeadingModeControl( const char* response, float* maxRateError, uint8_t* reserved1) { const char* pos = response + 3; if (*pos != 0) return *pos + E_SENSOR_HARD_FAULT - 1; pos++; *maxRateError = VnUtil_extractFloat(pos); pos += sizeof(float); *reserved1 = (uint8_t) *pos; pos += sizeof(uint8_t); return E_NONE; } VnError VnSpi_parseVelocityCompensationMeasurement( const char* response, vec3f* velocity) { const char* pos = response + 3; if (*pos != 0) return *pos + E_SENSOR_HARD_FAULT - 1; pos++; *velocity = VnUtil_extractVec3f(pos); pos += 3 * sizeof(float); return E_NONE; } VnError VnSpi_parseVelocityCompensationControl( const char* response, uint8_t* mode, float* velocityTuning, float* rateTuning) { const char* pos = response + 3; if (*pos != 0) return *pos + E_SENSOR_HARD_FAULT - 1; pos++; *mode = (uint8_t) *pos; pos += sizeof(uint8_t); *velocityTuning = VnUtil_extractFloat(pos); pos += sizeof(float); *rateTuning = VnUtil_extractFloat(pos); pos += sizeof(float); return E_NONE; } VnError VnSpi_parseVelocityCompensationStatus( const char* response, float* x, float* xDot, vec3f* accelOffset, vec3f* omega) { const char* pos = response + 3; if (*pos != 0) return *pos + E_SENSOR_HARD_FAULT - 1; pos++; *x = VnUtil_extractFloat(pos); pos += sizeof(float); *xDot = VnUtil_extractFloat(pos); pos += sizeof(float); *accelOffset = VnUtil_extractVec3f(pos); pos += 3 * sizeof(float); *omega = VnUtil_extractVec3f(pos); pos += 3 * sizeof(float); return E_NONE; } VnError VnSpi_parseImuMeasurements( const char* response, vec3f* mag, vec3f* accel, vec3f* gyro, float* temp, float* pressure) { const char* pos = response + 3; if (*pos != 0) return *pos + E_SENSOR_HARD_FAULT - 1; pos++; *mag = VnUtil_extractVec3f(pos); pos += 3 * sizeof(float); *accel = VnUtil_extractVec3f(pos); pos += 3 * sizeof(float); *gyro = VnUtil_extractVec3f(pos); pos += 3 * sizeof(float); *temp = VnUtil_extractFloat(pos); pos += sizeof(float); *pressure = VnUtil_extractFloat(pos); pos += sizeof(float); return E_NONE; } VnError VnSpi_parseGpsConfiguration( const char* response, uint8_t* mode, uint8_t* ppsSource, uint8_t* reserved1, uint8_t* reserved2, uint8_t* reserved3) { const char* pos = response + 3; if (*pos != 0) return *pos + E_SENSOR_HARD_FAULT - 1; pos++; *mode = (uint8_t) *pos; pos += sizeof(uint8_t); *ppsSource = (uint8_t) *pos; pos += sizeof(uint8_t); *reserved1 = (uint8_t) *pos; pos += sizeof(uint8_t); *reserved2 = (uint8_t) *pos; pos += sizeof(uint8_t); *reserved3 = (uint8_t) *pos; pos += sizeof(uint8_t); return E_NONE; } VnError VnSpi_parseGpsAntennaOffset( const char* response, vec3f* position) { const char* pos = response + 3; if (*pos != 0) return *pos + E_SENSOR_HARD_FAULT - 1; pos++; *position = VnUtil_extractVec3f(pos); pos += 3 * sizeof(float); return E_NONE; } VnError VnSpi_parseGpsSolutionLla( const char* response, double* time, uint16_t* week, uint8_t* gpsFix, uint8_t* numSats, vec3d* lla, vec3f* nedVel, vec3f* nedAcc, float* speedAcc, float* timeAcc) { const char* pos = response + 3; if (*pos != 0) return *pos + E_SENSOR_HARD_FAULT - 1; pos++; *time = VnUtil_extractDouble(pos); pos += sizeof(double); *week = VnUtil_extractUint16(pos); pos += sizeof(uint16_t); *gpsFix = (uint8_t) *pos; pos += sizeof(uint8_t); *numSats = (uint8_t) *pos; pos += sizeof(uint8_t); pos += 4; *lla = VnUtil_extractVec3d(pos); pos += 3 * sizeof(double); *nedVel = VnUtil_extractVec3f(pos); pos += 3 * sizeof(float); *nedAcc = VnUtil_extractVec3f(pos); pos += 3 * sizeof(float); *speedAcc = VnUtil_extractFloat(pos); pos += sizeof(float); *timeAcc = VnUtil_extractFloat(pos); pos += sizeof(float); return E_NONE; } VnError VnSpi_parseGpsSolutionEcef( const char* response, double* tow, uint16_t* week, uint8_t* gpsFix, uint8_t* numSats, vec3d* position, vec3f* velocity, vec3f* posAcc, float* speedAcc, float* timeAcc) { const char* pos = response + 3; if (*pos != 0) return *pos + E_SENSOR_HARD_FAULT - 1; pos++; *tow = VnUtil_extractDouble(pos); pos += sizeof(double); *week = VnUtil_extractUint16(pos); pos += sizeof(uint16_t); *gpsFix = (uint8_t) *pos; pos += sizeof(uint8_t); *numSats = (uint8_t) *pos; pos += sizeof(uint8_t); pos += 4; *position = VnUtil_extractVec3d(pos); pos += 3 * sizeof(double); *velocity = VnUtil_extractVec3f(pos); pos += 3 * sizeof(float); *posAcc = VnUtil_extractVec3f(pos); pos += 3 * sizeof(float); *speedAcc = VnUtil_extractFloat(pos); pos += sizeof(float); *timeAcc = VnUtil_extractFloat(pos); pos += sizeof(float); return E_NONE; } VnError VnSpi_parseInsSolutionLla( const char* response, double* time, uint16_t* week, uint16_t* status, vec3f* yawPitchRoll, vec3d* position, vec3f* nedVel, float* attUncertainty, float* posUncertainty, float* velUncertainty) { const char* pos = response + 3; if (*pos != 0) return *pos + E_SENSOR_HARD_FAULT - 1; pos++; *time = VnUtil_extractDouble(pos); pos += sizeof(double); *week = VnUtil_extractUint16(pos); pos += sizeof(uint16_t); /* Use this cast to avoid a compile warning. */ UNUSED(status); *yawPitchRoll = VnUtil_extractVec3f(pos); pos += 3 * sizeof(float); *position = VnUtil_extractVec3d(pos); pos += 3 * sizeof(double); *nedVel = VnUtil_extractVec3f(pos); pos += 3 * sizeof(float); *attUncertainty = VnUtil_extractFloat(pos); pos += sizeof(float); *posUncertainty = VnUtil_extractFloat(pos); pos += sizeof(float); *velUncertainty = VnUtil_extractFloat(pos); pos += sizeof(float); return E_NONE; } VnError VnSpi_parseInsSolutionEcef( const char* response, double* time, uint16_t* week, uint16_t* status, vec3f* yawPitchRoll, vec3d* position, vec3f* velocity, float* attUncertainty, float* posUncertainty, float* velUncertainty) { const char* pos = response + 3; if (*pos != 0) return *pos + E_SENSOR_HARD_FAULT - 1; pos++; *time = VnUtil_extractDouble(pos); pos += sizeof(double); *week = VnUtil_extractUint16(pos); pos += sizeof(uint16_t); /* Use this cast to avoid a compile warning. */ UNUSED(status); *yawPitchRoll = VnUtil_extractVec3f(pos); pos += 3 * sizeof(float); *position = VnUtil_extractVec3d(pos); pos += 3 * sizeof(double); *velocity = VnUtil_extractVec3f(pos); pos += 3 * sizeof(float); *attUncertainty = VnUtil_extractFloat(pos); pos += sizeof(float); *posUncertainty = VnUtil_extractFloat(pos); pos += sizeof(float); *velUncertainty = VnUtil_extractFloat(pos); pos += sizeof(float); return E_NONE; } VnError VnSpi_parseInsBasicConfiguration( const char* response, uint8_t* scenario, uint8_t* ahrsAiding, uint8_t* estBaseline, uint8_t* resv2) { const char* pos = response + 3; if (*pos != 0) return *pos + E_SENSOR_HARD_FAULT - 1; pos++; *scenario = (uint8_t) *pos; pos += sizeof(uint8_t); *ahrsAiding = (uint8_t) *pos; pos += sizeof(uint8_t); *estBaseline = (uint8_t) *pos; pos += sizeof(uint8_t); *resv2 = (uint8_t) *pos; pos += sizeof(uint8_t); return E_NONE; } VnError VnSpi_parseInsAdvancedConfiguration( const char* response, uint8_t* useMag, uint8_t* usePres, uint8_t* posAtt, uint8_t* velAtt, uint8_t* velBias, uint8_t* useFoam, uint8_t* gpsCovType, uint8_t* velCount, float* velInit, float* moveOrigin, float* gpsTimeout, float* deltaLimitPos, float* deltaLimitVel, float* minPosUncertainty, float* minVelUncertainty) { const char* pos = response + 3; if (*pos != 0) return *pos + E_SENSOR_HARD_FAULT - 1; pos++; *useMag = (uint8_t) *pos; pos += sizeof(uint8_t); *usePres = (uint8_t) *pos; pos += sizeof(uint8_t); *posAtt = (uint8_t) *pos; pos += sizeof(uint8_t); *velAtt = (uint8_t) *pos; pos += sizeof(uint8_t); *velBias = (uint8_t) *pos; pos += sizeof(uint8_t); *useFoam = (uint8_t) *pos; pos += sizeof(uint8_t); *gpsCovType = (uint8_t) *pos; pos += sizeof(uint8_t); *velCount = (uint8_t) *pos; pos += sizeof(uint8_t); *velInit = VnUtil_extractFloat(pos); pos += sizeof(float); *moveOrigin = VnUtil_extractFloat(pos); pos += sizeof(float); *gpsTimeout = VnUtil_extractFloat(pos); pos += sizeof(float); *deltaLimitPos = VnUtil_extractFloat(pos); pos += sizeof(float); *deltaLimitVel = VnUtil_extractFloat(pos); pos += sizeof(float); *minPosUncertainty = VnUtil_extractFloat(pos); pos += sizeof(float); *minVelUncertainty = VnUtil_extractFloat(pos); pos += sizeof(float); return E_NONE; } VnError VnSpi_parseInsStateLla( const char* response, vec3f* yawPitchRoll, vec3d* position, vec3f* velocity, vec3f* accel, vec3f* angularRate) { const char* pos = response + 3; if (*pos != 0) return *pos + E_SENSOR_HARD_FAULT - 1; pos++; *yawPitchRoll = VnUtil_extractVec3f(pos); pos += 3 * sizeof(float); *position = VnUtil_extractVec3d(pos); pos += 3 * sizeof(double); *velocity = VnUtil_extractVec3f(pos); pos += 3 * sizeof(float); *accel = VnUtil_extractVec3f(pos); pos += 3 * sizeof(float); *angularRate = VnUtil_extractVec3f(pos); pos += 3 * sizeof(float); return E_NONE; } VnError VnSpi_parseInsStateEcef( const char* response, vec3f* yawPitchRoll, vec3d* position, vec3f* velocity, vec3f* accel, vec3f* angularRate) { const char* pos = response + 3; if (*pos != 0) return *pos + E_SENSOR_HARD_FAULT - 1; pos++; *yawPitchRoll = VnUtil_extractVec3f(pos); pos += 3 * sizeof(float); *position = VnUtil_extractVec3d(pos); pos += 3 * sizeof(double); *velocity = VnUtil_extractVec3f(pos); pos += 3 * sizeof(float); *accel = VnUtil_extractVec3f(pos); pos += 3 * sizeof(float); *angularRate = VnUtil_extractVec3f(pos); pos += 3 * sizeof(float); return E_NONE; } VnError VnSpi_parseStartupFilterBiasEstimate( const char* response, vec3f* gyroBias, vec3f* accelBias, float* pressureBias) { const char* pos = response + 3; if (*pos != 0) return *pos + E_SENSOR_HARD_FAULT - 1; pos++; *gyroBias = VnUtil_extractVec3f(pos); pos += 3 * sizeof(float); *accelBias = VnUtil_extractVec3f(pos); pos += 3 * sizeof(float); *pressureBias = VnUtil_extractFloat(pos); pos += sizeof(float); return E_NONE; } VnError VnSpi_parseDeltaThetaAndDeltaVelocity( const char* response, float* deltaTime, vec3f* deltaTheta, vec3f* deltaVelocity) { const char* pos = response + 3; if (*pos != 0) return *pos + E_SENSOR_HARD_FAULT - 1; pos++; *deltaTime = VnUtil_extractFloat(pos); pos += sizeof(float); *deltaTheta = VnUtil_extractVec3f(pos); pos += 3 * sizeof(float); *deltaVelocity = VnUtil_extractVec3f(pos); pos += 3 * sizeof(float); return E_NONE; } VnError VnSpi_parseDeltaThetaAndDeltaVelocityConfiguration( const char* response, uint8_t* integrationFrame, uint8_t* gyroCompensation, uint8_t* accelCompensation, uint8_t* reserved1, uint16_t* reserved2) { const char* pos = response + 3; if (*pos != 0) return *pos + E_SENSOR_HARD_FAULT - 1; pos++; *integrationFrame = (uint8_t) *pos; pos += sizeof(uint8_t); *gyroCompensation = (uint8_t) *pos; pos += sizeof(uint8_t); *accelCompensation = (uint8_t) *pos; pos += sizeof(uint8_t); *reserved1 = (uint8_t) *pos; pos += sizeof(uint8_t); *reserved2 = VnUtil_extractUint16(pos); pos += sizeof(uint16_t); return E_NONE; } VnError VnSpi_parseReferenceVectorConfiguration( const char* response, uint8_t* useMagModel, uint8_t* useGravityModel, uint8_t* resv1, uint8_t* resv2, uint32_t* recalcThreshold, float* year, vec3d* position) { const char* pos = response + 3; if (*pos != 0) return *pos + E_SENSOR_HARD_FAULT - 1; pos++; *useMagModel = (uint8_t) *pos; pos += sizeof(uint8_t); *useGravityModel = (uint8_t) *pos; pos += sizeof(uint8_t); *resv1 = (uint8_t) *pos; pos += sizeof(uint8_t); *resv2 = (uint8_t) *pos; pos += sizeof(uint8_t); *recalcThreshold = VnUtil_extractUint32(pos); pos += sizeof(uint32_t); *year = VnUtil_extractFloat(pos); pos += sizeof(float); pos += 4; *position = VnUtil_extractVec3d(pos); pos += 3 * sizeof(double); return E_NONE; } VnError VnSpi_parseGyroCompensation( const char* response, mat3f* c, vec3f* b) { const char* pos = response + 3; if (*pos != 0) return *pos + E_SENSOR_HARD_FAULT - 1; pos++; *c = VnUtil_extractMat3f(pos); pos += 9 * sizeof(float); *b = VnUtil_extractVec3f(pos); pos += 3 * sizeof(float); return E_NONE; } VnError VnSpi_parseImuFilteringConfiguration( const char* response, uint16_t* magWindowSize, uint16_t* accelWindowSize, uint16_t* gyroWindowSize, uint16_t* tempWindowSize, uint16_t* presWindowSize, uint8_t* magFilterMode, uint8_t* accelFilterMode, uint8_t* gyroFilterMode, uint8_t* tempFilterMode, uint8_t* presFilterMode) { const char* pos = response + 3; if (*pos != 0) return *pos + E_SENSOR_HARD_FAULT - 1; pos++; *magWindowSize = VnUtil_extractUint16(pos); pos += sizeof(uint16_t); *accelWindowSize = VnUtil_extractUint16(pos); pos += sizeof(uint16_t); *gyroWindowSize = VnUtil_extractUint16(pos); pos += sizeof(uint16_t); *tempWindowSize = VnUtil_extractUint16(pos); pos += sizeof(uint16_t); *presWindowSize = VnUtil_extractUint16(pos); pos += sizeof(uint16_t); *magFilterMode = (uint8_t) *pos; pos += sizeof(uint8_t); *accelFilterMode = (uint8_t) *pos; pos += sizeof(uint8_t); *gyroFilterMode = (uint8_t) *pos; pos += sizeof(uint8_t); *tempFilterMode = (uint8_t) *pos; pos += sizeof(uint8_t); *presFilterMode = (uint8_t) *pos; pos += sizeof(uint8_t); return E_NONE; } VnError VnSpi_parseGpsCompassBaseline( const char* response, vec3f* position, vec3f* uncertainty) { const char* pos = response + 3; if (*pos != 0) return *pos + E_SENSOR_HARD_FAULT - 1; pos++; *position = VnUtil_extractVec3f(pos); pos += 3 * sizeof(float); *uncertainty = VnUtil_extractVec3f(pos); pos += 3 * sizeof(float); return E_NONE; } VnError VnSpi_parseGpsCompassEstimatedBaseline( const char* response, uint8_t* estBaselineUsed, uint8_t* resv, uint16_t* numMeas, vec3f* position, vec3f* uncertainty) { const char* pos = response + 3; if (*pos != 0) return *pos + E_SENSOR_HARD_FAULT - 1; pos++; *estBaselineUsed = (uint8_t) *pos; pos += sizeof(uint8_t); *resv = (uint8_t) *pos; pos += sizeof(uint8_t); *numMeas = VnUtil_extractUint16(pos); pos += sizeof(uint16_t); *position = VnUtil_extractVec3f(pos); pos += 3 * sizeof(float); *uncertainty = VnUtil_extractVec3f(pos); pos += 3 * sizeof(float); return E_NONE; } VnError VnSpi_parseImuRateConfiguration( const char* response, uint16_t* imuRate, uint16_t* navDivisor, float* filterTargetRate, float* filterMinRate) { const char* pos = response + 3; if (*pos != 0) return *pos + E_SENSOR_HARD_FAULT - 1; pos++; *imuRate = VnUtil_extractUint16(pos); pos += sizeof(uint16_t); *navDivisor = VnUtil_extractUint16(pos); pos += sizeof(uint16_t); *filterTargetRate = VnUtil_extractFloat(pos); pos += sizeof(float); *filterMinRate = VnUtil_extractFloat(pos); pos += sizeof(float); return E_NONE; } VnError VnSpi_parseYawPitchRollTrueBodyAccelerationAndAngularRates( const char* response, vec3f* yawPitchRoll, vec3f* bodyAccel, vec3f* gyro) { const char* pos = response + 3; if (*pos != 0) return *pos + E_SENSOR_HARD_FAULT - 1; pos++; *yawPitchRoll = VnUtil_extractVec3f(pos); pos += 3 * sizeof(float); *bodyAccel = VnUtil_extractVec3f(pos); pos += 3 * sizeof(float); *gyro = VnUtil_extractVec3f(pos); pos += 3 * sizeof(float); return E_NONE; } VnError VnSpi_parseYawPitchRollTrueInertialAccelerationAndAngularRates( const char* response, vec3f* yawPitchRoll, vec3f* inertialAccel, vec3f* gyro) { const char* pos = response + 3; if (*pos != 0) return *pos + E_SENSOR_HARD_FAULT - 1; pos++; *yawPitchRoll = VnUtil_extractVec3f(pos); pos += 3 * sizeof(float); *inertialAccel = VnUtil_extractVec3f(pos); pos += 3 * sizeof(float); *gyro = VnUtil_extractVec3f(pos); pos += 3 * sizeof(float); return E_NONE; } VnError VnSpi_genReadUserTag(char* buffer, size_t* size, size_t desiredLength, size_t* responseSize) { *responseSize = 4; return VnSpi_genRead(buffer, size, 0, desiredLength); } VnError VnSpi_genReadModelNumber(char* buffer, size_t* size, size_t desiredLength, size_t* responseSize) { *responseSize = 4; return VnSpi_genRead(buffer, size, 1, desiredLength); } VnError VnSpi_genReadHardwareRevision(char* buffer, size_t* size, size_t desiredLength, size_t* responseSize) { *responseSize = 8; return VnSpi_genRead(buffer, size, 2, desiredLength); } VnError VnSpi_genReadSerialNumber(char* buffer, size_t* size, size_t desiredLength, size_t* responseSize) { *responseSize = 8; return VnSpi_genRead(buffer, size, 3, desiredLength); } VnError VnSpi_genReadFirmwareVersion(char* buffer, size_t* size, size_t desiredLength, size_t* responseSize) { *responseSize = 4; return VnSpi_genRead(buffer, size, 4, desiredLength); } VnError VnSpi_genReadSerialBaudRate(char* buffer, size_t* size, size_t desiredLength, size_t* responseSize) { *responseSize = 9; return VnSpi_genRead(buffer, size, 5, desiredLength); } VnError VnSpi_genReadAsyncDataOutputType(char* buffer, size_t* size, size_t desiredLength, size_t* responseSize) { *responseSize = 9; return VnSpi_genRead(buffer, size, 6, desiredLength); } VnError VnSpi_genReadAsyncDataOutputFrequency(char* buffer, size_t* size, size_t desiredLength, size_t* responseSize) { *responseSize = 9; return VnSpi_genRead(buffer, size, 7, desiredLength); } VnError VnSpi_genReadYawPitchRoll(char* buffer, size_t* size, size_t desiredLength, size_t* responseSize) { *responseSize = 16; return VnSpi_genRead(buffer, size, 8, desiredLength); } VnError VnSpi_genReadAttitudeQuaternion(char* buffer, size_t* size, size_t desiredLength, size_t* responseSize) { *responseSize = 20; return VnSpi_genRead(buffer, size, 9, desiredLength); } VnError VnSpi_genReadQuaternionMagneticAccelerationAndAngularRates(char* buffer, size_t* size, size_t desiredLength, size_t* responseSize) { *responseSize = 56; return VnSpi_genRead(buffer, size, 15, desiredLength); } VnError VnSpi_genReadMagneticMeasurements(char* buffer, size_t* size, size_t desiredLength, size_t* responseSize) { *responseSize = 16; return VnSpi_genRead(buffer, size, 17, desiredLength); } VnError VnSpi_genReadAccelerationMeasurements(char* buffer, size_t* size, size_t desiredLength, size_t* responseSize) { *responseSize = 16; return VnSpi_genRead(buffer, size, 18, desiredLength); } VnError VnSpi_genReadAngularRateMeasurements(char* buffer, size_t* size, size_t desiredLength, size_t* responseSize) { *responseSize = 16; return VnSpi_genRead(buffer, size, 19, desiredLength); } VnError VnSpi_genReadMagneticAccelerationAndAngularRates(char* buffer, size_t* size, size_t desiredLength, size_t* responseSize) { *responseSize = 40; return VnSpi_genRead(buffer, size, 20, desiredLength); } VnError VnSpi_genReadMagneticAndGravityReferenceVectors(char* buffer, size_t* size, size_t desiredLength, size_t* responseSize) { *responseSize = 28; return VnSpi_genRead(buffer, size, 21, desiredLength); } VnError VnSpi_genReadMagnetometerCompensation(char* buffer, size_t* size, size_t desiredLength, size_t* responseSize) { *responseSize = 52; return VnSpi_genRead(buffer, size, 23, desiredLength); } VnError VnSpi_genReadAccelerationCompensation(char* buffer, size_t* size, size_t desiredLength, size_t* responseSize) { *responseSize = 52; return VnSpi_genRead(buffer, size, 25, desiredLength); } VnError VnSpi_genReadReferenceFrameRotation(char* buffer, size_t* size, size_t desiredLength, size_t* responseSize) { *responseSize = 40; return VnSpi_genRead(buffer, size, 26, desiredLength); } VnError VnSpi_genReadYawPitchRollMagneticAccelerationAndAngularRates(char* buffer, size_t* size, size_t desiredLength, size_t* responseSize) { *responseSize = 52; return VnSpi_genRead(buffer, size, 27, desiredLength); } VnError VnSpi_genReadCommunicationProtocolControl(char* buffer, size_t* size, size_t desiredLength, size_t* responseSize) { *responseSize = 11; return VnSpi_genRead(buffer, size, 30, desiredLength); } VnError VnSpi_genReadSynchronizationControl(char* buffer, size_t* size, size_t desiredLength, size_t* responseSize) { *responseSize = 24; return VnSpi_genRead(buffer, size, 32, desiredLength); } VnError VnSpi_genReadSynchronizationStatus(char* buffer, size_t* size, size_t desiredLength, size_t* responseSize) { *responseSize = 16; return VnSpi_genRead(buffer, size, 33, desiredLength); } VnError VnSpi_genReadVpeBasicControl(char* buffer, size_t* size, size_t desiredLength, size_t* responseSize) { *responseSize = 8; return VnSpi_genRead(buffer, size, 35, desiredLength); } VnError VnSpi_genReadVpeMagnetometerBasicTuning(char* buffer, size_t* size, size_t desiredLength, size_t* responseSize) { *responseSize = 40; return VnSpi_genRead(buffer, size, 36, desiredLength); } VnError VnSpi_genReadVpeAccelerometerBasicTuning(char* buffer, size_t* size, size_t desiredLength, size_t* responseSize) { *responseSize = 40; return VnSpi_genRead(buffer, size, 38, desiredLength); } VnError VnSpi_genReadMagnetometerCalibrationControl(char* buffer, size_t* size, size_t desiredLength, size_t* responseSize) { *responseSize = 7; return VnSpi_genRead(buffer, size, 44, desiredLength); } VnError VnSpi_genReadCalculatedMagnetometerCalibration(char* buffer, size_t* size, size_t desiredLength, size_t* responseSize) { *responseSize = 52; return VnSpi_genRead(buffer, size, 47, desiredLength); } VnError VnSpi_genReadVelocityCompensationMeasurement(char* buffer, size_t* size, size_t desiredLength, size_t* responseSize) { *responseSize = 16; return VnSpi_genRead(buffer, size, 50, desiredLength); } VnError VnSpi_genReadVelocityCompensationControl(char* buffer, size_t* size, size_t desiredLength, size_t* responseSize) { *responseSize = 13; return VnSpi_genRead(buffer, size, 51, desiredLength); } VnError VnSpi_genReadImuMeasurements(char* buffer, size_t* size, size_t desiredLength, size_t* responseSize) { *responseSize = 48; return VnSpi_genRead(buffer, size, 54, desiredLength); } VnError VnSpi_genReadGpsConfiguration(char* buffer, size_t* size, size_t desiredLength, size_t* responseSize) { *responseSize = 9; return VnSpi_genRead(buffer, size, 55, desiredLength); } VnError VnSpi_genReadGpsAntennaOffset(char* buffer, size_t* size, size_t desiredLength, size_t* responseSize) { *responseSize = 16; return VnSpi_genRead(buffer, size, 57, desiredLength); } VnError VnSpi_genReadGpsSolutionLla(char* buffer, size_t* size, size_t desiredLength, size_t* responseSize) { *responseSize = 76; return VnSpi_genRead(buffer, size, 58, desiredLength); } VnError VnSpi_genReadGpsSolutionEcef(char* buffer, size_t* size, size_t desiredLength, size_t* responseSize) { *responseSize = 76; return VnSpi_genRead(buffer, size, 59, desiredLength); } VnError VnSpi_genReadInsSolutionLla(char* buffer, size_t* size, size_t desiredLength, size_t* responseSize) { *responseSize = 76; return VnSpi_genRead(buffer, size, 63, desiredLength); } VnError VnSpi_genReadInsSolutionEcef(char* buffer, size_t* size, size_t desiredLength, size_t* responseSize) { *responseSize = 76; return VnSpi_genRead(buffer, size, 64, desiredLength); } VnError VnSpi_genReadInsBasicConfiguration(char* buffer, size_t* size, size_t desiredLength, size_t* responseSize) { *responseSize = 8; return VnSpi_genRead(buffer, size, 67, desiredLength); } VnError VnSpi_genReadInsStateLla(char* buffer, size_t* size, size_t desiredLength, size_t* responseSize) { *responseSize = 76; return VnSpi_genRead(buffer, size, 72, desiredLength); } VnError VnSpi_genReadInsStateEcef(char* buffer, size_t* size, size_t desiredLength, size_t* responseSize) { *responseSize = 76; return VnSpi_genRead(buffer, size, 73, desiredLength); } VnError VnSpi_genReadStartupFilterBiasEstimate(char* buffer, size_t* size, size_t desiredLength, size_t* responseSize) { *responseSize = 32; return VnSpi_genRead(buffer, size, 74, desiredLength); } VnError VnSpi_genReadDeltaThetaAndDeltaVelocity(char* buffer, size_t* size, size_t desiredLength, size_t* responseSize) { *responseSize = 32; return VnSpi_genRead(buffer, size, 80, desiredLength); } VnError VnSpi_genReadDeltaThetaAndDeltaVelocityConfiguration(char* buffer, size_t* size, size_t desiredLength, size_t* responseSize) { *responseSize = 10; return VnSpi_genRead(buffer, size, 82, desiredLength); } VnError VnSpi_genReadReferenceVectorConfiguration(char* buffer, size_t* size, size_t desiredLength, size_t* responseSize) { *responseSize = 44; return VnSpi_genRead(buffer, size, 83, desiredLength); } VnError VnSpi_genReadGyroCompensation(char* buffer, size_t* size, size_t desiredLength, size_t* responseSize) { *responseSize = 52; return VnSpi_genRead(buffer, size, 84, desiredLength); } VnError VnSpi_genReadImuFilteringConfiguration(char* buffer, size_t* size, size_t desiredLength, size_t* responseSize) { *responseSize = 19; return VnSpi_genRead(buffer, size, 85, desiredLength); } VnError VnSpi_genReadGpsCompassBaseline(char* buffer, size_t* size, size_t desiredLength, size_t* responseSize) { *responseSize = 28; return VnSpi_genRead(buffer, size, 93, desiredLength); } VnError VnSpi_genReadGpsCompassEstimatedBaseline(char* buffer, size_t* size, size_t desiredLength, size_t* responseSize) { *responseSize = 32; return VnSpi_genRead(buffer, size, 97, desiredLength); } VnError VnSpi_genReadYawPitchRollTrueBodyAccelerationAndAngularRates(char* buffer, size_t* size, size_t desiredLength, size_t* responseSize) { *responseSize = 40; return VnSpi_genRead(buffer, size, 239, desiredLength); } VnError VnSpi_genReadYawPitchRollTrueInertialAccelerationAndAngularRates(char* buffer, size_t* size, size_t desiredLength, size_t* responseSize) { *responseSize = 40; return VnSpi_genRead(buffer, size, 240, desiredLength); } VnError VnSpi_genWriteUserTag( char* buffer, size_t* size, size_t desiredLength, size_t* responseSize, char* tag) { char* pos = buffer; if (*size < 4 || *size < desiredLength) return E_BUFFER_TOO_SMALL; *responseSize = 4; *pos++ = 2; *pos++ = 0; *pos++ = 0; *pos++ = 0; memcpy(pos, &tag, strlen(tag)); pos += strlen(tag); return E_NONE; } VnError VnSpi_genWriteSerialBaudRate( char* buffer, size_t* size, size_t desiredLength, size_t* responseSize, uint32_t baudrate) { char* pos = buffer; if (*size < 9 || *size < desiredLength) return E_BUFFER_TOO_SMALL; *responseSize = 9; *pos++ = 2; *pos++ = 5; *pos++ = 0; *pos++ = 0; baudrate = htos32(baudrate); memcpy(pos, &baudrate, sizeof(uint32_t)); pos += sizeof(uint32_t); return E_NONE; } VnError VnSpi_genWriteAsyncDataOutputType( char* buffer, size_t* size, size_t desiredLength, size_t* responseSize, uint32_t ador) { char* pos = buffer; if (*size < 9 || *size < desiredLength) return E_BUFFER_TOO_SMALL; *responseSize = 9; *pos++ = 2; *pos++ = 6; *pos++ = 0; *pos++ = 0; ador = htos32(ador); memcpy(pos, &ador, sizeof(uint32_t)); pos += sizeof(uint32_t); return E_NONE; } VnError VnSpi_genWriteAsyncDataOutputFrequency( char* buffer, size_t* size, size_t desiredLength, size_t* responseSize, uint32_t adof) { char* pos = buffer; if (*size < 9 || *size < desiredLength) return E_BUFFER_TOO_SMALL; *responseSize = 9; *pos++ = 2; *pos++ = 7; *pos++ = 0; *pos++ = 0; adof = htos32(adof); memcpy(pos, &adof, sizeof(uint32_t)); pos += sizeof(uint32_t); return E_NONE; } VnError VnSpi_genWriteMagneticAndGravityReferenceVectors( char* buffer, size_t* size, size_t desiredLength, size_t* responseSize, vec3f magRef, vec3f accRef) { char* pos = buffer; if (*size < 28 || *size < desiredLength) return E_BUFFER_TOO_SMALL; *responseSize = 28; *pos++ = 2; *pos++ = 21; *pos++ = 0; *pos++ = 0; magRef.c[0] = htosf4(magRef.c[0]); magRef.c[1] = htosf4(magRef.c[1]); magRef.c[2] = htosf4(magRef.c[2]); memcpy(pos, &magRef, sizeof(vec3f)); pos += sizeof(vec3f); accRef.c[0] = htosf4(accRef.c[0]); accRef.c[1] = htosf4(accRef.c[1]); accRef.c[2] = htosf4(accRef.c[2]); memcpy(pos, &accRef, sizeof(vec3f)); pos += sizeof(vec3f); return E_NONE; } VnError VnSpi_genWriteMagnetometerCompensation( char* buffer, size_t* size, size_t desiredLength, size_t* responseSize, mat3f c, vec3f b) { char* pos = buffer; if (*size < 52 || *size < desiredLength) return E_BUFFER_TOO_SMALL; *responseSize = 52; *pos++ = 2; *pos++ = 23; *pos++ = 0; *pos++ = 0; c.e[0] = htosf4(c.e[0]); c.e[1] = htosf4(c.e[1]); c.e[2] = htosf4(c.e[2]); c.e[3] = htosf4(c.e[3]); c.e[4] = htosf4(c.e[4]); c.e[5] = htosf4(c.e[5]); c.e[6] = htosf4(c.e[6]); c.e[7] = htosf4(c.e[7]); c.e[8] = htosf4(c.e[8]); memcpy(pos, &c, sizeof(mat3f)); pos += sizeof(mat3f); b.c[0] = htosf4(b.c[0]); b.c[1] = htosf4(b.c[1]); b.c[2] = htosf4(b.c[2]); memcpy(pos, &b, sizeof(vec3f)); pos += sizeof(vec3f); return E_NONE; } VnError VnSpi_genWriteAccelerationCompensation( char* buffer, size_t* size, size_t desiredLength, size_t* responseSize, mat3f c, vec3f b) { char* pos = buffer; if (*size < 52 || *size < desiredLength) return E_BUFFER_TOO_SMALL; *responseSize = 52; *pos++ = 2; *pos++ = 25; *pos++ = 0; *pos++ = 0; c.e[0] = htosf4(c.e[0]); c.e[1] = htosf4(c.e[1]); c.e[2] = htosf4(c.e[2]); c.e[3] = htosf4(c.e[3]); c.e[4] = htosf4(c.e[4]); c.e[5] = htosf4(c.e[5]); c.e[6] = htosf4(c.e[6]); c.e[7] = htosf4(c.e[7]); c.e[8] = htosf4(c.e[8]); memcpy(pos, &c, sizeof(mat3f)); pos += sizeof(mat3f); b.c[0] = htosf4(b.c[0]); b.c[1] = htosf4(b.c[1]); b.c[2] = htosf4(b.c[2]); memcpy(pos, &b, sizeof(vec3f)); pos += sizeof(vec3f); return E_NONE; } VnError VnSpi_genWriteReferenceFrameRotation( char* buffer, size_t* size, size_t desiredLength, size_t* responseSize, mat3f c) { char* pos = buffer; if (*size < 40 || *size < desiredLength) return E_BUFFER_TOO_SMALL; *responseSize = 40; *pos++ = 2; *pos++ = 26; *pos++ = 0; *pos++ = 0; c.e[0] = htosf4(c.e[0]); c.e[1] = htosf4(c.e[1]); c.e[2] = htosf4(c.e[2]); c.e[3] = htosf4(c.e[3]); c.e[4] = htosf4(c.e[4]); c.e[5] = htosf4(c.e[5]); c.e[6] = htosf4(c.e[6]); c.e[7] = htosf4(c.e[7]); c.e[8] = htosf4(c.e[8]); memcpy(pos, &c, sizeof(mat3f)); pos += sizeof(mat3f); return E_NONE; } VnError VnSpi_genWriteCommunicationProtocolControl( char* buffer, size_t* size, size_t desiredLength, size_t* responseSize, uint8_t serialCount, uint8_t serialStatus, uint8_t spiCount, uint8_t spiStatus, uint8_t serialChecksum, uint8_t spiChecksum, uint8_t errorMode) { char* pos = buffer; if (*size < 11 || *size < desiredLength) return E_BUFFER_TOO_SMALL; *responseSize = 11; *pos++ = 2; *pos++ = 30; *pos++ = 0; *pos++ = 0; *pos++ = serialCount; *pos++ = serialStatus; *pos++ = spiCount; *pos++ = spiStatus; *pos++ = serialChecksum; *pos++ = spiChecksum; *pos++ = errorMode; return E_NONE; } VnError VnSpi_genWriteSynchronizationControl( char* buffer, size_t* size, size_t desiredLength, size_t* responseSize, uint8_t syncInMode, uint8_t syncInEdge, uint16_t syncInSkipFactor, uint32_t reserved1, uint8_t syncOutMode, uint8_t syncOutPolarity, uint16_t syncOutSkipFactor, uint32_t syncOutPulseWidth, uint32_t reserved2) { char* pos = buffer; if (*size < 24 || *size < desiredLength) return E_BUFFER_TOO_SMALL; *responseSize = 24; *pos++ = 2; *pos++ = 32; *pos++ = 0; *pos++ = 0; *pos++ = syncInMode; *pos++ = syncInEdge; syncInSkipFactor = htos16(syncInSkipFactor); memcpy(pos, &syncInSkipFactor, sizeof(uint16_t)); pos += sizeof(uint16_t); reserved1 = htos32(reserved1); memcpy(pos, &reserved1, sizeof(uint32_t)); pos += sizeof(uint32_t); *pos++ = syncOutMode; *pos++ = syncOutPolarity; syncOutSkipFactor = htos16(syncOutSkipFactor); memcpy(pos, &syncOutSkipFactor, sizeof(uint16_t)); pos += sizeof(uint16_t); syncOutPulseWidth = htos32(syncOutPulseWidth); memcpy(pos, &syncOutPulseWidth, sizeof(uint32_t)); pos += sizeof(uint32_t); reserved2 = htos32(reserved2); memcpy(pos, &reserved2, sizeof(uint32_t)); pos += sizeof(uint32_t); return E_NONE; } VnError VnSpi_genWriteSynchronizationStatus( char* buffer, size_t* size, size_t desiredLength, size_t* responseSize, uint32_t syncInCount, uint32_t syncInTime, uint32_t syncOutCount) { char* pos = buffer; if (*size < 16 || *size < desiredLength) return E_BUFFER_TOO_SMALL; *responseSize = 16; *pos++ = 2; *pos++ = 33; *pos++ = 0; *pos++ = 0; syncInCount = htos32(syncInCount); memcpy(pos, &syncInCount, sizeof(uint32_t)); pos += sizeof(uint32_t); syncInTime = htos32(syncInTime); memcpy(pos, &syncInTime, sizeof(uint32_t)); pos += sizeof(uint32_t); syncOutCount = htos32(syncOutCount); memcpy(pos, &syncOutCount, sizeof(uint32_t)); pos += sizeof(uint32_t); return E_NONE; } VnError VnSpi_genWriteVpeBasicControl( char* buffer, size_t* size, size_t desiredLength, size_t* responseSize, uint8_t enable, uint8_t headingMode, uint8_t filteringMode, uint8_t tuningMode) { char* pos = buffer; if (*size < 8 || *size < desiredLength) return E_BUFFER_TOO_SMALL; *responseSize = 8; *pos++ = 2; *pos++ = 35; *pos++ = 0; *pos++ = 0; *pos++ = enable; *pos++ = headingMode; *pos++ = filteringMode; *pos++ = tuningMode; return E_NONE; } VnError VnSpi_genWriteVpeMagnetometerBasicTuning( char* buffer, size_t* size, size_t desiredLength, size_t* responseSize, vec3f baseTuning, vec3f adaptiveTuning, vec3f adaptiveFiltering) { char* pos = buffer; if (*size < 40 || *size < desiredLength) return E_BUFFER_TOO_SMALL; *responseSize = 40; *pos++ = 2; *pos++ = 36; *pos++ = 0; *pos++ = 0; baseTuning.c[0] = htosf4(baseTuning.c[0]); baseTuning.c[1] = htosf4(baseTuning.c[1]); baseTuning.c[2] = htosf4(baseTuning.c[2]); memcpy(pos, &baseTuning, sizeof(vec3f)); pos += sizeof(vec3f); adaptiveTuning.c[0] = htosf4(adaptiveTuning.c[0]); adaptiveTuning.c[1] = htosf4(adaptiveTuning.c[1]); adaptiveTuning.c[2] = htosf4(adaptiveTuning.c[2]); memcpy(pos, &adaptiveTuning, sizeof(vec3f)); pos += sizeof(vec3f); adaptiveFiltering.c[0] = htosf4(adaptiveFiltering.c[0]); adaptiveFiltering.c[1] = htosf4(adaptiveFiltering.c[1]); adaptiveFiltering.c[2] = htosf4(adaptiveFiltering.c[2]); memcpy(pos, &adaptiveFiltering, sizeof(vec3f)); pos += sizeof(vec3f); return E_NONE; } VnError VnSpi_genWriteVpeAccelerometerBasicTuning( char* buffer, size_t* size, size_t desiredLength, size_t* responseSize, vec3f baseTuning, vec3f adaptiveTuning, vec3f adaptiveFiltering) { char* pos = buffer; if (*size < 40 || *size < desiredLength) return E_BUFFER_TOO_SMALL; *responseSize = 40; *pos++ = 2; *pos++ = 38; *pos++ = 0; *pos++ = 0; baseTuning.c[0] = htosf4(baseTuning.c[0]); baseTuning.c[1] = htosf4(baseTuning.c[1]); baseTuning.c[2] = htosf4(baseTuning.c[2]); memcpy(pos, &baseTuning, sizeof(vec3f)); pos += sizeof(vec3f); adaptiveTuning.c[0] = htosf4(adaptiveTuning.c[0]); adaptiveTuning.c[1] = htosf4(adaptiveTuning.c[1]); adaptiveTuning.c[2] = htosf4(adaptiveTuning.c[2]); memcpy(pos, &adaptiveTuning, sizeof(vec3f)); pos += sizeof(vec3f); adaptiveFiltering.c[0] = htosf4(adaptiveFiltering.c[0]); adaptiveFiltering.c[1] = htosf4(adaptiveFiltering.c[1]); adaptiveFiltering.c[2] = htosf4(adaptiveFiltering.c[2]); memcpy(pos, &adaptiveFiltering, sizeof(vec3f)); pos += sizeof(vec3f); return E_NONE; } VnError VnSpi_genWriteMagnetometerCalibrationControl( char* buffer, size_t* size, size_t desiredLength, size_t* responseSize, uint8_t hsiMode, uint8_t hsiOutput, uint8_t convergeRate) { char* pos = buffer; if (*size < 7 || *size < desiredLength) return E_BUFFER_TOO_SMALL; *responseSize = 7; *pos++ = 2; *pos++ = 44; *pos++ = 0; *pos++ = 0; *pos++ = hsiMode; *pos++ = hsiOutput; *pos++ = convergeRate; return E_NONE; } VnError VnSpi_genWriteVelocityCompensationMeasurement( char* buffer, size_t* size, size_t desiredLength, size_t* responseSize, vec3f velocity) { char* pos = buffer; if (*size < 16 || *size < desiredLength) return E_BUFFER_TOO_SMALL; *responseSize = 16; *pos++ = 2; *pos++ = 50; *pos++ = 0; *pos++ = 0; velocity.c[0] = htosf4(velocity.c[0]); velocity.c[1] = htosf4(velocity.c[1]); velocity.c[2] = htosf4(velocity.c[2]); memcpy(pos, &velocity, sizeof(vec3f)); pos += sizeof(vec3f); return E_NONE; } VnError VnSpi_genWriteVelocityCompensationControl( char* buffer, size_t* size, size_t desiredLength, size_t* responseSize, uint8_t mode, float velocityTuning, float rateTuning) { char* pos = buffer; if (*size < 13 || *size < desiredLength) return E_BUFFER_TOO_SMALL; *responseSize = 13; *pos++ = 2; *pos++ = 51; *pos++ = 0; *pos++ = 0; *pos++ = mode; velocityTuning = htosf4(velocityTuning); memcpy(pos, &velocityTuning, sizeof(float)); pos += sizeof(float); rateTuning = htosf4(rateTuning); memcpy(pos, &rateTuning, sizeof(float)); pos += sizeof(float); return E_NONE; } VnError VnSpi_genWriteGpsConfiguration( char* buffer, size_t* size, size_t desiredLength, size_t* responseSize, uint8_t mode, uint8_t ppsSource, uint8_t reserved1, uint8_t reserved2, uint8_t reserved3) { char* pos = buffer; if (*size < 9 || *size < desiredLength) return E_BUFFER_TOO_SMALL; *responseSize = 9; *pos++ = 2; *pos++ = 55; *pos++ = 0; *pos++ = 0; *pos++ = mode; *pos++ = ppsSource; *pos++ = reserved1; *pos++ = reserved2; *pos++ = reserved3; return E_NONE; } VnError VnSpi_genWriteGpsAntennaOffset( char* buffer, size_t* size, size_t desiredLength, size_t* responseSize, vec3f position) { char* pos = buffer; if (*size < 16 || *size < desiredLength) return E_BUFFER_TOO_SMALL; *responseSize = 16; *pos++ = 2; *pos++ = 57; *pos++ = 0; *pos++ = 0; position.c[0] = htosf4(position.c[0]); position.c[1] = htosf4(position.c[1]); position.c[2] = htosf4(position.c[2]); memcpy(pos, &position, sizeof(vec3f)); pos += sizeof(vec3f); return E_NONE; } VnError VnSpi_genWriteInsBasicConfiguration( char* buffer, size_t* size, size_t desiredLength, size_t* responseSize, uint8_t scenario, uint8_t ahrsAiding, uint8_t estBaseline, uint8_t resv2) { char* pos = buffer; if (*size < 8 || *size < desiredLength) return E_BUFFER_TOO_SMALL; *responseSize = 8; *pos++ = 2; *pos++ = 67; *pos++ = 0; *pos++ = 0; *pos++ = scenario; *pos++ = ahrsAiding; *pos++ = estBaseline; *pos++ = resv2; return E_NONE; } VnError VnSpi_genWriteStartupFilterBiasEstimate( char* buffer, size_t* size, size_t desiredLength, size_t* responseSize, vec3f gyroBias, vec3f accelBias, float pressureBias) { char* pos = buffer; if (*size < 32 || *size < desiredLength) return E_BUFFER_TOO_SMALL; *responseSize = 32; *pos++ = 2; *pos++ = 74; *pos++ = 0; *pos++ = 0; gyroBias.c[0] = htosf4(gyroBias.c[0]); gyroBias.c[1] = htosf4(gyroBias.c[1]); gyroBias.c[2] = htosf4(gyroBias.c[2]); memcpy(pos, &gyroBias, sizeof(vec3f)); pos += sizeof(vec3f); accelBias.c[0] = htosf4(accelBias.c[0]); accelBias.c[1] = htosf4(accelBias.c[1]); accelBias.c[2] = htosf4(accelBias.c[2]); memcpy(pos, &accelBias, sizeof(vec3f)); pos += sizeof(vec3f); pressureBias = htosf4(pressureBias); memcpy(pos, &pressureBias, sizeof(float)); pos += sizeof(float); return E_NONE; } VnError VnSpi_genWriteDeltaThetaAndDeltaVelocityConfiguration( char* buffer, size_t* size, size_t desiredLength, size_t* responseSize, uint8_t integrationFrame, uint8_t gyroCompensation, uint8_t accelCompensation, uint8_t reserved1, uint16_t reserved2) { char* pos = buffer; if (*size < 10 || *size < desiredLength) return E_BUFFER_TOO_SMALL; *responseSize = 10; *pos++ = 2; *pos++ = 82; *pos++ = 0; *pos++ = 0; *pos++ = integrationFrame; *pos++ = gyroCompensation; *pos++ = accelCompensation; *pos++ = reserved1; reserved2 = htos16(reserved2); memcpy(pos, &reserved2, sizeof(uint16_t)); pos += sizeof(uint16_t); return E_NONE; } VnError VnSpi_genWriteReferenceVectorConfiguration( char* buffer, size_t* size, size_t desiredLength, size_t* responseSize, uint8_t useMagModel, uint8_t useGravityModel, uint8_t resv1, uint8_t resv2, uint32_t recalcThreshold, float year, vec3d position) { char* pos = buffer; if (*size < 44 || *size < desiredLength) return E_BUFFER_TOO_SMALL; *responseSize = 44; *pos++ = 2; *pos++ = 83; *pos++ = 0; *pos++ = 0; *pos++ = useMagModel; *pos++ = useGravityModel; *pos++ = resv1; *pos++ = resv2; recalcThreshold = htos32(recalcThreshold); memcpy(pos, &recalcThreshold, sizeof(uint32_t)); pos += sizeof(uint32_t); year = htosf4(year); memcpy(pos, &year, sizeof(float)); pos += sizeof(float); pos += 4; position.c[0] = htosf8(position.c[0]); position.c[1] = htosf8(position.c[1]); position.c[2] = htosf8(position.c[2]); memcpy(pos, &position, sizeof(vec3d)); pos += sizeof(vec3d); return E_NONE; } VnError VnSpi_genWriteGyroCompensation( char* buffer, size_t* size, size_t desiredLength, size_t* responseSize, mat3f c, vec3f b) { char* pos = buffer; if (*size < 52 || *size < desiredLength) return E_BUFFER_TOO_SMALL; *responseSize = 52; *pos++ = 2; *pos++ = 84; *pos++ = 0; *pos++ = 0; c.e[0] = htosf4(c.e[0]); c.e[1] = htosf4(c.e[1]); c.e[2] = htosf4(c.e[2]); c.e[3] = htosf4(c.e[3]); c.e[4] = htosf4(c.e[4]); c.e[5] = htosf4(c.e[5]); c.e[6] = htosf4(c.e[6]); c.e[7] = htosf4(c.e[7]); c.e[8] = htosf4(c.e[8]); memcpy(pos, &c, sizeof(mat3f)); pos += sizeof(mat3f); b.c[0] = htosf4(b.c[0]); b.c[1] = htosf4(b.c[1]); b.c[2] = htosf4(b.c[2]); memcpy(pos, &b, sizeof(vec3f)); pos += sizeof(vec3f); return E_NONE; } VnError VnSpi_genWriteImuFilteringConfiguration( char* buffer, size_t* size, size_t desiredLength, size_t* responseSize, uint16_t magWindowSize, uint16_t accelWindowSize, uint16_t gyroWindowSize, uint16_t tempWindowSize, uint16_t presWindowSize, uint8_t magFilterMode, uint8_t accelFilterMode, uint8_t gyroFilterMode, uint8_t tempFilterMode, uint8_t presFilterMode) { char* pos = buffer; if (*size < 19 || *size < desiredLength) return E_BUFFER_TOO_SMALL; *responseSize = 19; *pos++ = 2; *pos++ = 85; *pos++ = 0; *pos++ = 0; magWindowSize = htos16(magWindowSize); memcpy(pos, &magWindowSize, sizeof(uint16_t)); pos += sizeof(uint16_t); accelWindowSize = htos16(accelWindowSize); memcpy(pos, &accelWindowSize, sizeof(uint16_t)); pos += sizeof(uint16_t); gyroWindowSize = htos16(gyroWindowSize); memcpy(pos, &gyroWindowSize, sizeof(uint16_t)); pos += sizeof(uint16_t); tempWindowSize = htos16(tempWindowSize); memcpy(pos, &tempWindowSize, sizeof(uint16_t)); pos += sizeof(uint16_t); presWindowSize = htos16(presWindowSize); memcpy(pos, &presWindowSize, sizeof(uint16_t)); pos += sizeof(uint16_t); *pos++ = magFilterMode; *pos++ = accelFilterMode; *pos++ = gyroFilterMode; *pos++ = tempFilterMode; *pos++ = presFilterMode; return E_NONE; } VnError VnSpi_genWriteGpsCompassBaseline( char* buffer, size_t* size, size_t desiredLength, size_t* responseSize, vec3f position, vec3f uncertainty) { char* pos = buffer; if (*size < 28 || *size < desiredLength) return E_BUFFER_TOO_SMALL; *responseSize = 28; *pos++ = 2; *pos++ = 93; *pos++ = 0; *pos++ = 0; position.c[0] = htosf4(position.c[0]); position.c[1] = htosf4(position.c[1]); position.c[2] = htosf4(position.c[2]); memcpy(pos, &position, sizeof(vec3f)); pos += sizeof(vec3f); uncertainty.c[0] = htosf4(uncertainty.c[0]); uncertainty.c[1] = htosf4(uncertainty.c[1]); uncertainty.c[2] = htosf4(uncertainty.c[2]); memcpy(pos, &uncertainty, sizeof(vec3f)); pos += sizeof(vec3f); return E_NONE; }
30,301
1,209
/* analog_value.c output analog values Hello World for ATTINY84 Controller Universal 8bit Graphics Library Copyright (c) 2012, <EMAIL> All rights reserved. Redistribution and use in source and binary forms, with or without modification, are permitted provided that the following conditions are met: * Redistributions of source code must retain the above copyright notice, this list of conditions and the following disclaimer. * Redistributions in binary form must reproduce the above copyright notice, this list of conditions and the following disclaimer in the documentation and/or other materials provided with the distribution. THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. */ #include "u8g.h" #if defined(__AVR__) #include <avr/interrupt.h> #include <avr/io.h> #endif /* Software SPI: uint8_t u8g_InitSPI(u8g_t *u8g, u8g_dev_t *dev, uint8_t sck, uint8_t mosi, uint8_t cs, uint8_t a0, uint8_t reset); Hardware SPI: uint8_t u8g_InitHWSPI(u8g_t *u8g, u8g_dev_t *dev, uint8_t cs, uint8_t a0, uint8_t reset); Parallel Interface: uint8_t u8g_Init8Bit(u8g_t *u8g, u8g_dev_t *dev, uint8_t d0, uint8_t d1, uint8_t d2, uint8_t d3, uint8_t d4, uint8_t d5, uint8_t d6, uint8_t d7, uint8_t en, uint8_t cs1, uint8_t cs2, uint8_t di, uint8_t rw, uint8_t reset); Visit http://code.google.com/p/u8glib/wiki/device for a list of valid devices (second argument of the constructor). The following examples will use the dogm132 device: u8g_dev_st7565_dogm132_sw_spi Note: The device must match the setup: For example, do not use a sw_spi device with u8g_InitHWSPI(). */ u8g_t u8g; void sys_init(void) { #if defined(__AVR__) /* select minimal prescaler (max system speed) */ CLKPR = 0x80; CLKPR = 0x00; #endif } #define ADC_PRESCALAR 0x07 /* Notes - Internet: ADCH is stable after ADEN = 0 (shutdown) - Internet: Discard first conversion result */ /* measure voltage difference between ADC0 (positive, PORT A/Pin 0) and ADC1 (negative, PORT A/Pin 1) gain_bit == 0: 1x gain_bit == 1: 20x */ uint16_t sys_diff_adc_2_3(uint8_t gain_bit) { uint16_t l, h; /* datasheet recomends to turn off ADC for differntial measurement first */ ADCSRA = 0x00 | ADC_PRESCALAR; /* turn off ADC */ /* use PA2 and PA3 as input */ DDRA &= ~_BV(2); DDRA &= ~_BV(3); /* enable, but do not start ADC (ADEN, Bit 7) */ /* clear the interrupt indicator flag (ADIF, Bit 4) */ ADCSRA = 0x90 | ADC_PRESCALAR; ADMUX = 16 | gain_bit; /* enable bipolar mode, voltage diff may be higher or lower, result is signed */ ADCSRB = 0x080; /* enable and start conversion */ ADCSRA = 0xc0|ADC_PRESCALAR; /* wait for conversion to be finished (ADIF, Bit 4) */ while ( (ADCSRA & _BV(4)) == 0 ) ; /* return 8 bit result */ l = ADCL; h = ADCH; /* save some power */ ADCSRA = 0x00 | ADC_PRESCALAR; /* turn off ADC */ return (h<<8) | l ; } /* read from adc7 (Port A, Pin 7) */ uint16_t sys_adc7(void) { uint16_t l, h; /* turn off ADC to force long conversion */ ADCSRA = 0x00 | ADC_PRESCALAR; /* turn off ADC */ /* use PA7 as input pin for the ADC */ DDRA &= ~_BV(7); /* enable, but do not start ADC (ADEN, Bit 7) */ /* clear the interrupt indicator flag (ADIF, Bit 4) */ ADCSRA = 0x90 | ADC_PRESCALAR; /* ADC 7, wait for conversion finished*/ //while( ADCSRA & _BV(6) ) // ; ADMUX = 7; /* default operation */ ADCSRB = 0x0; /* enable and start conversion, maximum prescalar */ ADCSRA = 0xc0|ADC_PRESCALAR; /* wait for conversion to be finished (ADIF, Bit 4) */ while ( (ADCSRA & _BV(4)) == 0 ) ; /* return 8 bit result */ l = ADCL; h = ADCH; /* save some power */ ADCSRA = 0x00 | ADC_PRESCALAR; /* turn off ADC */ return (h<<8) | l ; } void u8g_setup(void) { /* Test Envionment: ATTINY84 and DOGM132 CS: PORTB, Bit 0 A0: PORTB, Bit 1 SCK: PORTA, Bit 4 MOSI: PORTA, Bit 5 */ u8g_InitSPI(&u8g, &u8g_dev_st7565_dogm132_sw_spi, PN(0, 4) , PN(0, 5), PN(1, 0), PN(1, 1), U8G_PIN_NONE); } uint16_t adc_val1 = 0; uint16_t adc_val2 = 0; uint8_t sign; uint16_t val; int16_t sval; int16_t min = 2000; int16_t max = -2000; void draw_signed(uint8_t x, uint8_t y, int16_t v) { if ( v < 0 ) { u8g_DrawStr(&u8g, x, y, "-"); u8g_DrawStr(&u8g, x+8, y, u8g_u16toa(-v, 3)); } else { u8g_DrawStr(&u8g, x, y, "+"); u8g_DrawStr(&u8g, x+8, y, u8g_u16toa(v, 3)); } } void draw(void) { u8g_SetFont(&u8g, u8g_font_fixed_v0r); //u8g_SetFont(&u8g, u8g_font_7x13r); u8g_DrawStr(&u8g, 0, 10, "Analog Values"); u8g_DrawStr(&u8g, 0, 20, u8g_u16toa(adc_val1, 4)); u8g_DrawStr(&u8g, 40, 20, u8g_u16toa(adc_val2, 4)); u8g_DrawStr(&u8g, 80, 20, sign == 0 ? "+" : "-"); u8g_DrawStr(&u8g, 80+8, 20, u8g_u16toa(val, 3)); draw_signed(0, 30, sval); draw_signed(40, 30, min); draw_signed(80, 30, max); } int main(void) { uint8_t i; sys_init(); u8g_setup(); for(;;) { /* why do we need two conversons??? seen notes above */ for( i = 0; i < 100; i++ ) { adc_val1 = sys_adc7(); //adc_val1 = sys_adc7(); adc_val2 = sys_diff_adc_2_3(1); //adc_val2 = sys_diff_adc_2_3(1); sign = 0; val = adc_val2; if ( val >= 512 ) { sign = 1; val = 1024-val ; } sval = val; if ( sign != 0 ) sval = -sval; if ( min > sval ) min = sval; if ( max < sval ) max = sval; } u8g_FirstPage(&u8g); do { draw(); } while ( u8g_NextPage(&u8g) ); u8g_Delay(10); } }
2,870
550
<reponame>RalfRalf/java-sdk import com.qiniu.common.QiniuException; import com.qiniu.http.Response; import com.qiniu.storage.BucketManager; import com.qiniu.util.Auth; import com.qiniu.common.Zone; import com.qiniu.storage.Configuration; public class FetchDemo { public static void main(String args[]) { //设置需要操作的账号的AK和SK String ACCESS_KEY = "Access_Key"; String SECRET_KEY = "Secret_Key"; Auth auth = Auth.create(ACCESS_KEY, SECRET_KEY); Zone z = Zone.zone0(); Configuration c = new Configuration(z); //实例化一个BucketManager对象 BucketManager bucketManager = new BucketManager(auth, c); //文件保存的空间名和文件名 String bucket = "yourbucket"; String key = "yourkey"; //要fetch的url String url = "url"; try { //调用fetch方法抓取文件 bucketManager.fetch(url, bucket, key); } catch (QiniuException e) { //捕获异常信息 Response r = e.response; System.out.println(r.toString()); } } }
559
854
__________________________________________________________________________________________________ 0ms class Solution { public List<List<Integer>> combinationSum(int[] candidates, int target) { List<List<Integer>> ans = new LinkedList<List<Integer>>(); if (candidates == null || candidates.length == 0) { return ans; } int[] visited = new int[candidates.length]; bt(candidates, visited, 0, target, ans); return ans; } public void bt(int[] candidates, int[] visited, int start, int target, List<List<Integer>> ans) { if (target == 0) { List<Integer> combination = new LinkedList<>(); for (int i = 0; i < visited.length; i++) { for (int j = 0; j < visited[i]; j++) { combination.add(candidates[i]); } } ans.add(combination); return; } for (int i = start; i < candidates.length; i++) { if (target - candidates[i] >= 0) { visited[i]++; bt(candidates, visited, i, target - candidates[i], ans); visited[i]--; } } } } __________________________________________________________________________________________________ 2ms class Solution { public List<List<Integer>> combinationSum(int[] candidates, int target) { Arrays.sort(candidates); List<List<Integer>> res = new ArrayList<>(); cal(candidates, target, res, new ArrayList<>(), 0); return res; } public void cal(int[] ll, int target, List<List<Integer>> res, List<Integer> row, int k){ for(int i = k; i < ll.length; i++){ if(target == ll[i]){ row.add(ll[i]); res.add(new ArrayList<>(row)); row.remove(row.size()-1); break; } else if(target < ll[i]) { break; } else { row.add(ll[i]); cal(ll, target - ll[i], res, row, i); row.remove(row.size()-1); } } } } __________________________________________________________________________________________________ 5ms class Solution { public List<List<Integer>> combinationSum(int[] candidates, int target) { Arrays.sort(candidates); List<List<Integer>> res = new ArrayList<List<Integer>>(); Stack<Integer> temp = new Stack<Integer>(); int tempCount = 0; int used = -1; combine(res, candidates, target, temp, tempCount, used); return res; } public void combine(List<List<Integer>> res, int[] candidates, int target, Stack<Integer> temp, int tempCount, int used){ for(int i = 0; i < candidates.length; i++){ if(i < used){ continue; } if(tempCount + candidates[i] == target){ temp.push(candidates[i]); List<Integer> tempRes = new ArrayList<Integer>(); tempRes.addAll(temp); res.add(tempRes); temp.pop(); return; }else if(tempCount + candidates[i] > target){ return; }else{ temp.push(candidates[i]); combine(res, candidates, target, temp, tempCount+candidates[i], i); temp.pop(); } } } } __________________________________________________________________________________________________ 35752 kb class Solution { public List<List<Integer>> combinationSum(int[] candidates, int target) { List<List<Integer>> res = new ArrayList<>(); List<List<Integer>> [] dp = new List[target + 1]; dp[0] = new ArrayList<>(); for(int i = 1; i <= target; i++) { dp[i] = new ArrayList<>(); for(int coin: candidates) { if(coin > i) continue; if(coin == i) { dp[i].add(Arrays.asList(coin)); } else { for(List<Integer> list: dp[i - coin]) { if(coin >= list.get(list.size() - 1)) { List<Integer> tmp = new ArrayList<>(list); tmp.add(coin); dp[i].add(tmp); } } } } } return dp[target]; } } __________________________________________________________________________________________________ 35816 kb class Solution { public List<List<Integer>> combinationSum(int[] candidates, int target) { List<List<Integer>> res = new LinkedList<>(); List<Integer> permutes = new LinkedList<>(); dfs(res, permutes,candidates, target,0); return res; } public void dfs(List<List<Integer>> res, List<Integer> permutes, int[] candidates, int target,int start){ if(target == 0 ){ res.add(new LinkedList<>(permutes)); return; } for(int i = start;i<candidates.length;i++){ if(candidates[i] <= target){ permutes.add(candidates[i]); dfs(res, permutes,candidates,target-candidates[i],i); permutes.remove(permutes.size()-1); } } } } __________________________________________________________________________________________________
2,540
568
<reponame>arthurrib/nfe<gh_stars>100-1000 package com.fincatto.documentofiscal.nfe400.classes.nota; import org.junit.Assert; import org.junit.Test; public class NFIndicadorPresencaCompradorTest { @Test public void deveObterTipoApartirDoSeuCodigo() { Assert.assertEquals(NFIndicadorPresencaComprador.NAO_APLICA, NFIndicadorPresencaComprador.valueOfCodigo("0")); Assert.assertEquals(NFIndicadorPresencaComprador.OPERACAO_PRESENCIAL, NFIndicadorPresencaComprador.valueOfCodigo("1")); Assert.assertEquals(NFIndicadorPresencaComprador.OPERACAO_NAO_PRESENCIAL_INTERNET, NFIndicadorPresencaComprador.valueOfCodigo("2")); Assert.assertEquals(NFIndicadorPresencaComprador.OPERACAO_NAO_PRESENCIAL_TELEATENDIMENTO, NFIndicadorPresencaComprador.valueOfCodigo("3")); Assert.assertEquals(NFIndicadorPresencaComprador.NFCE_EM_OPERACAO_COM_ENTREGA_DOMICILIO, NFIndicadorPresencaComprador.valueOfCodigo("4")); Assert.assertEquals(NFIndicadorPresencaComprador.OPERACAO_PRESENCIAL_FORA_ESTABELECIMENTO, NFIndicadorPresencaComprador.valueOfCodigo("5")); Assert.assertEquals(NFIndicadorPresencaComprador.OPERACAO_NAO_PRESENCIAL_OUTROS, NFIndicadorPresencaComprador.valueOfCodigo("9")); } @Test public void deveRepresentarOCodigoCorretamente() { Assert.assertEquals("0", NFIndicadorPresencaComprador.NAO_APLICA.getCodigo()); Assert.assertEquals("1", NFIndicadorPresencaComprador.OPERACAO_PRESENCIAL.getCodigo()); Assert.assertEquals("2", NFIndicadorPresencaComprador.OPERACAO_NAO_PRESENCIAL_INTERNET.getCodigo()); Assert.assertEquals("3", NFIndicadorPresencaComprador.OPERACAO_NAO_PRESENCIAL_TELEATENDIMENTO.getCodigo()); Assert.assertEquals("4", NFIndicadorPresencaComprador.NFCE_EM_OPERACAO_COM_ENTREGA_DOMICILIO.getCodigo()); Assert.assertEquals("5", NFIndicadorPresencaComprador.OPERACAO_PRESENCIAL_FORA_ESTABELECIMENTO.getCodigo()); Assert.assertEquals("9", NFIndicadorPresencaComprador.OPERACAO_NAO_PRESENCIAL_OUTROS.getCodigo()); } }
925
376
from . import BaseActor from lib.utils.box_ops import box_cxcywh_to_xyxy, box_xywh_to_xyxy import torch from lib.utils.merge import get_qkv, merge_template_search import torch.nn as nn import torch.nn.functional as F from torch.nn.functional import l1_loss class STARKLightningXtrtdistillActor(BaseActor): """ Actor for training the STARK-S and STARK-ST(Stage1)""" def __init__(self, net, objective, loss_weight, settings, net_teacher): super().__init__(net, objective) self.net_teacher = net_teacher self.loss_weight = loss_weight self.settings = settings self.bs = self.settings.batchsize # batch size if "KL" in self.settings.distill_loss_type: print("Distill model with KL Loss") self.distill_loss_kl = nn.KLDivLoss(reduction="batchmean") if "L1" in self.settings.distill_loss_type: print("Distill model with L1 Loss") def __call__(self, data): """ args: data - The input data, should contain the fields 'template', 'search', 'gt_bbox'. template_images: (N_t, batch, 3, H, W) search_images: (N_s, batch, 3, H, W) returns: loss - the training loss status - dict containing detailed losses """ # forward student out_dict = self.forward_pass(self.net, data) # forward teacher out_dict_teacher = self.forward_pass_teacher(self.net_teacher, data, True, True) # process the groundtruth gt_bboxes = data['search_anno'] # (batch, 4) (x1,y1,w,h) # compute losses loss, status = self.compute_losses(out_dict, out_dict_teacher, gt_bboxes[0]) return loss, status def forward_pass(self, net, data): feat_dict_list = [] # process the templates for i in range(self.settings.num_template): template_img_i = data['template_images'][i].view(-1, *data['template_images'].shape[2:]) # (batch, 3, 128, 128) template_att_i = data['template_att'][i].view(-1, *data['template_att'].shape[2:]) # (batch, 128, 128) feat_dict_list.append(net(img=template_img_i, mask=template_att_i, mode='backbone', zx="template%d" % i)) # process the search regions (t-th frame) search_img = data['search_images'].view(-1, *data['search_images'].shape[2:]) # (batch, 3, 320, 320) search_att = data['search_att'].view(-1, *data['search_att'].shape[2:]) # (batch, 320, 320) feat_dict_list.append(net(img=search_img, mask=search_att, mode='backbone', zx="search")) # run the transformer and compute losses q, k, v, key_padding_mask = get_qkv(feat_dict_list) # for student network, here we output the original logits without softmax out_dict, _, _ = net(q=q, k=k, v=v, key_padding_mask=key_padding_mask, mode="transformer", softmax=False) # out_dict: (B, N, C), outputs_coord: (1, B, N, C), target_query: (1, B, N, C) return out_dict def forward_pass_teacher(self, net, data, run_box_head, run_cls_head): feat_dict_list = [] # process the templates for i in range(self.settings.num_template): template_img_i = data['template_images'][i].view(-1, *data['template_images'].shape[2:]) # (batch, 3, 128, 128) template_att_i = data['template_att'][i].view(-1, *data['template_att'].shape[2:]) # (batch, 128, 128) feat_dict_list.append(net(img=template_img_i, mask=template_att_i, mode='backbone', zx="template%d" % i)) # process the search regions (t-th frame) search_img = data['search_images'].view(-1, *data['search_images'].shape[2:]) # (batch, 3, 320, 320) search_att = data['search_att'].view(-1, *data['search_att'].shape[2:]) # (batch, 320, 320) feat_dict_list.append(net(img=search_img, mask=search_att, mode='backbone', zx="search")) # run the transformer and compute losses seq_dict = merge_template_search(feat_dict_list) out_dict, _, _ = net(seq_dict=seq_dict, mode="transformer", run_box_head=run_box_head, run_cls_head=run_cls_head) # out_dict: (B, N, C), outputs_coord: (1, B, N, C), target_query: (1, B, N, C) return out_dict def compute_losses(self, out_dict, out_dict_teacher, gt_bbox, return_status=True): pred_boxes = out_dict["pred_boxes"] pred_boxes_teacher = out_dict_teacher["pred_boxes"] # Get boxes if torch.isnan(pred_boxes).any(): raise ValueError("Network outputs is NAN! Stop Training") pred_boxes_vec = box_cxcywh_to_xyxy(pred_boxes) # (B,4) (x1,y1,x2,y2) pred_boxes_vec_teacher = box_cxcywh_to_xyxy(pred_boxes_teacher) # (B,4) (x1,y1,x2,y2) gt_boxes_vec = box_xywh_to_xyxy(gt_bbox).clamp(min=0.0, max=1.0) # (B,4) # compute giou and iou try: giou_loss, iou = self.objective['giou'](pred_boxes_vec, gt_boxes_vec) # (B,4) (B,4) except: giou_loss, iou = torch.tensor(0.0).cuda(), torch.tensor(0.0).cuda() try: _, iou_teacher = self.objective['giou'](pred_boxes_vec_teacher, gt_boxes_vec) except: iou_teacher = torch.tensor(0.0).cuda() # compute l1 loss l1_loss = self.objective['l1'](pred_boxes_vec, gt_boxes_vec) # (BN,4) (BN,4) # compute distillation loss dis_loss_l1, dis_loss_kl = self.compute_distill_losses(out_dict, out_dict_teacher) # weighted sum loss = self.loss_weight['giou'] * giou_loss + self.loss_weight['l1'] * l1_loss + \ self.loss_weight['l1'] * dis_loss_l1 + self.loss_weight['l1'] * dis_loss_kl if return_status: # status for log mean_iou = iou.detach().mean() mean_iou_teacher = iou_teacher.detach().mean() status = {"Loss/total": loss.item(), "Loss/giou": giou_loss.item(), "Loss/l1": l1_loss.item(), "Loss/distill_l1": dis_loss_l1.item(), "Loss/distill_kl": dis_loss_kl.item(), "IoU": mean_iou.item(), "IoU_teacher": mean_iou_teacher.item()} return loss, status else: return loss def compute_distill_losses(self, out_dict, out_dict_t): ptl, pbr = out_dict["prob_tl"], out_dict["prob_br"] ptl_t, pbr_t = out_dict_t["prob_tl"], out_dict_t["prob_br"] dis_loss_l1, dis_loss_kl = torch.tensor(0.0).cuda(), torch.tensor(0.0).cuda() if "KL" in self.settings.distill_loss_type: dis_loss_kl_tl = self.distill_loss_kl(F.log_softmax(ptl, dim=1), ptl_t) dis_loss_kl_br = self.distill_loss_kl(F.log_softmax(pbr, dim=1), pbr_t) dis_loss_kl = (dis_loss_kl_tl + dis_loss_kl_br) / 2 if "L1" in self.settings.distill_loss_type: dis_loss_l1_tl = l1_loss(F.softmax(ptl), ptl_t, reduction="sum") / self.bs dis_loss_l1_br = l1_loss(F.softmax(pbr), pbr_t, reduction="sum") / self.bs dis_loss_l1 = (dis_loss_l1_tl + dis_loss_l1_br) / 2 return dis_loss_l1, dis_loss_kl
3,679
381
<gh_stars>100-1000 package org.apache.helix.taskexecution; /* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information * regarding copyright ownership. The ASF licenses this file * to you under the Apache License, Version 2.0 (the * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, * software distributed under the License is distributed on an * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY * KIND, either express or implied. See the License for the * specific language governing permissions and limitations * under the License. */ import java.util.Map; import java.util.Set; import org.apache.helix.HelixManager; public class ReportTask extends Task { public ReportTask(String id, Set<String> parentIds, HelixManager helixManager, TaskResultStore resultStore) { super(id, parentIds, helixManager, resultStore); } @Override protected void executeImpl(String resourceName, int numPartitions, int partitionNum) throws Exception { System.out.println("Running reports task"); System.out.println("Impression counts per country"); printCounts(FilterTask.FILTERED_IMPRESSIONS + "_country_counts"); System.out.println("Click counts per country"); printCounts(JoinTask.JOINED_CLICKS + "_country_counts"); System.out.println("Impression counts per gender"); printCounts(FilterTask.FILTERED_IMPRESSIONS + "_gender_counts"); System.out.println("Click counts per gender"); printCounts(JoinTask.JOINED_CLICKS + "_gender_counts"); } private void printCounts(String tableName) throws Exception { Map<String, String> counts = resultStore.hgetAll(tableName); System.out.println(counts); } }
591
1,350
// Copyright (c) Microsoft Corporation. All rights reserved. // Licensed under the MIT License. // Code generated by Microsoft (R) AutoRest Code Generator. package com.azure.resourcemanager.customerinsights.fluent.models; import com.azure.core.annotation.Fluent; import com.azure.core.annotation.JsonFlatten; import com.azure.core.management.ProxyResource; import com.azure.core.util.logging.ClientLogger; import com.azure.resourcemanager.customerinsights.models.ConnectorStates; import com.azure.resourcemanager.customerinsights.models.ConnectorTypes; import com.fasterxml.jackson.annotation.JsonIgnore; import com.fasterxml.jackson.annotation.JsonProperty; import java.time.OffsetDateTime; import java.util.Map; /** The connector resource format. */ @JsonFlatten @Fluent public class ConnectorResourceFormatInner extends ProxyResource { @JsonIgnore private final ClientLogger logger = new ClientLogger(ConnectorResourceFormatInner.class); /* * ID of the connector. */ @JsonProperty(value = "properties.connectorId", access = JsonProperty.Access.WRITE_ONLY) private Integer connectorId; /* * Name of the connector. */ @JsonProperty(value = "properties.connectorName") private String connectorName; /* * Type of connector. */ @JsonProperty(value = "properties.connectorType") private ConnectorTypes connectorType; /* * Display name of the connector. */ @JsonProperty(value = "properties.displayName") private String displayName; /* * Description of the connector. */ @JsonProperty(value = "properties.description") private String description; /* * The connector properties. */ @JsonProperty(value = "properties.connectorProperties") private Map<String, Object> connectorProperties; /* * The created time. */ @JsonProperty(value = "properties.created", access = JsonProperty.Access.WRITE_ONLY) private OffsetDateTime created; /* * The last modified time. */ @JsonProperty(value = "properties.lastModified", access = JsonProperty.Access.WRITE_ONLY) private OffsetDateTime lastModified; /* * State of connector. */ @JsonProperty(value = "properties.state", access = JsonProperty.Access.WRITE_ONLY) private ConnectorStates state; /* * The hub name. */ @JsonProperty(value = "properties.tenantId", access = JsonProperty.Access.WRITE_ONLY) private String tenantId; /* * If this is an internal connector. */ @JsonProperty(value = "properties.isInternal") private Boolean isInternal; /** * Get the connectorId property: ID of the connector. * * @return the connectorId value. */ public Integer connectorId() { return this.connectorId; } /** * Get the connectorName property: Name of the connector. * * @return the connectorName value. */ public String connectorName() { return this.connectorName; } /** * Set the connectorName property: Name of the connector. * * @param connectorName the connectorName value to set. * @return the ConnectorResourceFormatInner object itself. */ public ConnectorResourceFormatInner withConnectorName(String connectorName) { this.connectorName = connectorName; return this; } /** * Get the connectorType property: Type of connector. * * @return the connectorType value. */ public ConnectorTypes connectorType() { return this.connectorType; } /** * Set the connectorType property: Type of connector. * * @param connectorType the connectorType value to set. * @return the ConnectorResourceFormatInner object itself. */ public ConnectorResourceFormatInner withConnectorType(ConnectorTypes connectorType) { this.connectorType = connectorType; return this; } /** * Get the displayName property: Display name of the connector. * * @return the displayName value. */ public String displayName() { return this.displayName; } /** * Set the displayName property: Display name of the connector. * * @param displayName the displayName value to set. * @return the ConnectorResourceFormatInner object itself. */ public ConnectorResourceFormatInner withDisplayName(String displayName) { this.displayName = displayName; return this; } /** * Get the description property: Description of the connector. * * @return the description value. */ public String description() { return this.description; } /** * Set the description property: Description of the connector. * * @param description the description value to set. * @return the ConnectorResourceFormatInner object itself. */ public ConnectorResourceFormatInner withDescription(String description) { this.description = description; return this; } /** * Get the connectorProperties property: The connector properties. * * @return the connectorProperties value. */ public Map<String, Object> connectorProperties() { return this.connectorProperties; } /** * Set the connectorProperties property: The connector properties. * * @param connectorProperties the connectorProperties value to set. * @return the ConnectorResourceFormatInner object itself. */ public ConnectorResourceFormatInner withConnectorProperties(Map<String, Object> connectorProperties) { this.connectorProperties = connectorProperties; return this; } /** * Get the created property: The created time. * * @return the created value. */ public OffsetDateTime created() { return this.created; } /** * Get the lastModified property: The last modified time. * * @return the lastModified value. */ public OffsetDateTime lastModified() { return this.lastModified; } /** * Get the state property: State of connector. * * @return the state value. */ public ConnectorStates state() { return this.state; } /** * Get the tenantId property: The hub name. * * @return the tenantId value. */ public String tenantId() { return this.tenantId; } /** * Get the isInternal property: If this is an internal connector. * * @return the isInternal value. */ public Boolean isInternal() { return this.isInternal; } /** * Set the isInternal property: If this is an internal connector. * * @param isInternal the isInternal value to set. * @return the ConnectorResourceFormatInner object itself. */ public ConnectorResourceFormatInner withIsInternal(Boolean isInternal) { this.isInternal = isInternal; return this; } /** * Validates the instance. * * @throws IllegalArgumentException thrown if the instance is not valid. */ public void validate() { } }
2,543
930
<filename>Linkedin_Video_Downloader/script.py # ALL Imports import tkinter as tk import requests as req import html import time from tkinter.ttk import * from threading import Thread import queue from queue import Empty def Invalid_Url(): """ Sets Status bar label to error message """ Status["text"] = "Invalid URL..." Status["fg"] = "red" def Download_vid(): # Validates Link and download Video global Url_Val url=Url_Val.get() Status["text"]="Downloading" Status["fg"]="green" # Validating Input if not "linkedin.com/posts" in url: Invalid_Url() return response =req.get(url) if not response.status_code == 200: Invalid_Url() return htmlsource = response.text sources = html.unescape(htmlsource).split() for source in sources: if "dms.licdn.com" in source: videourl = source.split(',')[0].split('"src":')[1][1:-1] start_downloading() download_thread=VideoDownload(videourl) download_thread.start() monitor(download_thread) break class VideoDownload(Thread): def __init__(self, url): super().__init__() self.url = url def run(self): """ download video""" # save the picture to a file block_size = 1024 # 1kB r = req.get(self.url, stream=True) total_size = int(r.headers.get("content-length")) with open('video.mp4', 'wb') as file: totaldata=0; for data in r.iter_content(block_size): totaldata+=len(data) per_downloaded=totaldata*100/total_size queue.put(per_downloaded) bar['value'] = per_downloaded file.write(data) time.sleep(0.01) file.close() print("Download Finished") print("Download Complete !!!") Status["text"] = "Finished!!" Status["fg"] = "green" #start download def start_downloading(): bar["value"]=0; def monitor( download_thread): """ Monitor the download thread """ if download_thread.is_alive(): try: bar["value"]=queue.get(0) ld_window.after(10, lambda: monitor(download_thread)) except Empty: pass # GUI ld_window=tk.Tk() ld_window.title("Linkedin Video Downloader") ld_window.geometry("400x300") # Label for URL Input input_label= tk.Label(ld_window,text="Enter Linkedin Video URL:") input_label.pack() queue=queue.Queue() # Input of URL Url_Val = tk.StringVar() Url_Input = tk.Entry(ld_window, textvariable=Url_Val, font=("Calibri", 9)) Url_Input.place( x=25,y=50, width=350) # Button for Download Download_button = tk.Button(ld_window, text="Download", font=("Calibri", 9), command=Download_vid) Download_button.place(x=100, y=100, width=200) # Progress Bar bar = Progressbar(ld_window, length=350, style='grey.Horizontal.TProgressbar',mode='determinate') bar.place(y=200,width=350,x=25) # Text for Status of Downloading Status = tk.Label(ld_window, text="Hello!! :D", fg="blue", font=("Calibri", 9), bd=1, relief=tk.SUNKEN, anchor=tk.W, padx=3) Status.pack(side=tk.BOTTOM, fill=tk.X) ld_window.mainloop()
1,408
711
package com.java110.store.listener; import com.alibaba.fastjson.JSONObject; import com.java110.utils.constant.ResponseConstant; import com.java110.utils.constant.StatusConstant; import com.java110.utils.exception.ListenerExecuteException; import com.java110.entity.center.Business; import com.java110.core.event.service.AbstractBusinessServiceDataFlowListener; import com.java110.store.dao.IStoreServiceDao; import org.slf4j.Logger; import org.slf4j.LoggerFactory; import java.util.HashMap; import java.util.List; import java.util.Map; /** * * 商户 服务侦听 父类 * Created by wuxw on 2018/7/4. */ public abstract class AbstractStoreBusinessServiceDataFlowListener extends AbstractBusinessServiceDataFlowListener{ private final static Logger logger = LoggerFactory.getLogger(AbstractStoreBusinessServiceDataFlowListener.class); /** * 获取 DAO工具类 * @return */ public abstract IStoreServiceDao getStoreServiceDaoImpl(); /** * 刷新 businessStoreInfo 数据 * 主要将 数据库 中字段和 接口传递字段建立关系 * @param businessStoreInfo */ protected void flushBusinessStoreInfo(Map businessStoreInfo,String statusCd){ businessStoreInfo.put("newBId",businessStoreInfo.get("b_id")); businessStoreInfo.put("storeId",businessStoreInfo.get("store_id")); businessStoreInfo.put("userId",businessStoreInfo.get("user_id")); businessStoreInfo.put("storeTypeCd",businessStoreInfo.get("store_type_cd")); businessStoreInfo.put("nearbyLandmarks",businessStoreInfo.get("nearby_landmarks")); businessStoreInfo.put("mapX",businessStoreInfo.get("map_x")); businessStoreInfo.put("mapY",businessStoreInfo.get("map_y")); businessStoreInfo.put("statusCd", statusCd); } /** 刷新 businessStoreAttr 数据 * 主要将 数据库 中字段和 接口传递字段建立关系 * @param businessStoreAttr * @param statusCd */ protected void flushBusinessStoreAttr(Map businessStoreAttr,String statusCd){ businessStoreAttr.put("attrId",businessStoreAttr.get("attr_id")); businessStoreAttr.put("specCd",businessStoreAttr.get("spec_cd")); businessStoreAttr.put("storeId",businessStoreAttr.get("store_id")); businessStoreAttr.put("newBId",businessStoreAttr.get("b_id")); businessStoreAttr.put("statusCd",statusCd); } /** * 刷新 businessStorePhoto 数据 * @param businessStorePhoto * @param statusCd */ protected void flushBusinessStorePhoto(Map businessStorePhoto,String statusCd){ businessStorePhoto.put("storeId",businessStorePhoto.get("store_id")); businessStorePhoto.put("storePhotoId",businessStorePhoto.get("store_photo_id")); businessStorePhoto.put("storePhotoTypeCd",businessStorePhoto.get("store_photo_type_cd")); businessStorePhoto.put("newBId",businessStorePhoto.get("b_id")); businessStorePhoto.put("statusCd",statusCd); } /** * 刷新 businessStoreCerdentials 数据 * @param businessStoreCerdentials * @param statusCd */ protected void flushBusinessStoreCredentials(Map businessStoreCerdentials ,String statusCd){ businessStoreCerdentials.put("storeId",businessStoreCerdentials.get("store_id")); businessStoreCerdentials.put("storeCerdentialsId",businessStoreCerdentials.get("store_cerdentials_id")); businessStoreCerdentials.put("credentialsCd",businessStoreCerdentials.get("credentials_cd")); businessStoreCerdentials.put("validityPeriod",businessStoreCerdentials.get("validity_period")); businessStoreCerdentials.put("positivePhoto",businessStoreCerdentials.get("positive_photo")); businessStoreCerdentials.put("negativePhoto",businessStoreCerdentials.get("negative_photo")); businessStoreCerdentials.put("newBId",businessStoreCerdentials.get("b_id")); businessStoreCerdentials.put("statusCd",statusCd); } /** * 刷新 businessMemberStore 数据 * 主要将 数据库 中字段和 接口传递字段建立关系 * @param businessMemberStore */ protected void flushBusinessMemberStore(Map businessMemberStore,String statusCd){ businessMemberStore.put("newBId",businessMemberStore.get("b_id")); businessMemberStore.put("storeId",businessMemberStore.get("store_id")); businessMemberStore.put("memberStoreId",businessMemberStore.get("member_store_id")); businessMemberStore.put("memberId",businessMemberStore.get("member_id")); businessMemberStore.put("statusCd", statusCd); } /** * 当修改数据时,查询instance表中的数据 自动保存删除数据到business中 * @param businessStore 商户信息 */ protected void autoSaveDelBusinessStore(Business business, JSONObject businessStore){ //自动插入DEL Map info = new HashMap(); info.put("storeId",businessStore.getString("storeId")); info.put("statusCd",StatusConstant.STATUS_CD_VALID); Map currentStoreInfo = getStoreServiceDaoImpl().getStoreInfo(info); if(currentStoreInfo == null || currentStoreInfo.isEmpty()){ throw new ListenerExecuteException(ResponseConstant.RESULT_PARAM_ERROR,"未找到需要修改数据信息,入参错误或数据有问题,请检查"+info); } currentStoreInfo.put("bId",business.getbId()); currentStoreInfo.put("storeId",currentStoreInfo.get("store_id")); currentStoreInfo.put("userId",currentStoreInfo.get("user_id")); currentStoreInfo.put("storeTypeCd",currentStoreInfo.get("store_type_cd")); currentStoreInfo.put("nearbyLandmarks",currentStoreInfo.get("nearby_landmarks")); currentStoreInfo.put("mapX",currentStoreInfo.get("map_x")); currentStoreInfo.put("mapY",currentStoreInfo.get("map_y")); currentStoreInfo.put("operate",StatusConstant.OPERATE_DEL); getStoreServiceDaoImpl().saveBusinessStoreInfo(currentStoreInfo); for (Object key : currentStoreInfo.keySet()) { if (businessStore.get(key) == null) { businessStore.put(key.toString(), currentStoreInfo.get(key)); } } } /** * 当修改数据时,查询instance表中的数据 自动保存删除数据到business中 * @param business 当前业务 * @param storeAttr 商户属性 */ protected void autoSaveDelBusinessStoreAttr(Business business, JSONObject storeAttr){ Map info = new HashMap(); info.put("attrId",storeAttr.getString("attrId")); info.put("storeId",storeAttr.getString("storeId")); info.put("statusCd",StatusConstant.STATUS_CD_VALID); List<Map> currentStoreAttrs = getStoreServiceDaoImpl().getStoreAttrs(info); if(currentStoreAttrs == null || currentStoreAttrs.size() != 1){ throw new ListenerExecuteException(ResponseConstant.RESULT_PARAM_ERROR,"未找到需要修改数据信息,入参错误或数据有问题,请检查"+info); } Map currentStoreAttr = currentStoreAttrs.get(0); currentStoreAttr.put("bId",business.getbId()); currentStoreAttr.put("attrId",currentStoreAttr.get("attr_id")); currentStoreAttr.put("storeId",currentStoreAttr.get("store_id")); currentStoreAttr.put("specCd",currentStoreAttr.get("spec_cd")); currentStoreAttr.put("operate",StatusConstant.OPERATE_DEL); getStoreServiceDaoImpl().saveBusinessStoreAttr(currentStoreAttr); for (Object key : currentStoreAttr.keySet()) { if (storeAttr.get(key) == null) { storeAttr.put(key.toString(), currentStoreAttr.get(key)); } } } /** * 当修改数据时,查询instance表中的数据 自动保存删除数据到business中 * @param business * @param businessStorePhoto 商户照片 */ protected void autoSaveDelBusinessStorePhoto(Business business,JSONObject businessStorePhoto){ Map info = new HashMap(); info.put("storePhotoId",businessStorePhoto.getString("storePhotoId")); info.put("storeId",businessStorePhoto.getString("storeId")); info.put("statusCd",StatusConstant.STATUS_CD_VALID); List<Map> currentStorePhotos = getStoreServiceDaoImpl().getStorePhoto(info); if(currentStorePhotos == null || currentStorePhotos.size() != 1){ throw new ListenerExecuteException(ResponseConstant.RESULT_PARAM_ERROR,"未找到需要修改数据信息,入参错误或数据有问题,请检查"+info); } Map currentStorePhoto = currentStorePhotos.get(0); currentStorePhoto.put("bId",business.getbId()); currentStorePhoto.put("storePhotoId",currentStorePhoto.get("store_photo_id")); currentStorePhoto.put("storeId",currentStorePhoto.get("store_id")); currentStorePhoto.put("storePhotoTypeCd",currentStorePhoto.get("store_photo_type_cd")); currentStorePhoto.put("operate",StatusConstant.OPERATE_DEL); getStoreServiceDaoImpl().saveBusinessStorePhoto(currentStorePhoto); for (Object key : currentStorePhoto.keySet()) { if (businessStorePhoto.get(key) == null) { businessStorePhoto.put(key.toString(), currentStorePhoto.get(key)); } } } /** * 当修改数据时,查询instance表中的数据 自动保存删除数据到business中 * @param business * @param businessStoreCerdentials 商户证件 */ protected void autoSaveDelBusinessStoreCerdentials(Business business,JSONObject businessStoreCerdentials){ Map info = new HashMap(); info.put("storeCerdentialsId",businessStoreCerdentials.getString("storeCerdentialsId")); info.put("storeId",businessStoreCerdentials.getString("storeId")); info.put("statusCd",StatusConstant.STATUS_CD_VALID); List<Map> currentStoreCerdentailses = getStoreServiceDaoImpl().getStoreCerdentials(info); if(currentStoreCerdentailses == null || currentStoreCerdentailses.size() != 1){ throw new ListenerExecuteException(ResponseConstant.RESULT_PARAM_ERROR,"未找到需要修改数据信息,入参错误或数据有问题,请检查"+info); } Map currentStoreCerdentials = currentStoreCerdentailses.get(0); currentStoreCerdentials.put("bId",business.getbId()); currentStoreCerdentials.put("storeCerdentialsId",currentStoreCerdentials.get("store_cerdentials_id")); currentStoreCerdentials.put("storeId",currentStoreCerdentials.get("store_id")); currentStoreCerdentials.put("credentialsCd",currentStoreCerdentials.get("credentials_cd")); currentStoreCerdentials.put("validityPeriod",currentStoreCerdentials.get("validity_period")); currentStoreCerdentials.put("positivePhoto",currentStoreCerdentials.get("positive_photo")); currentStoreCerdentials.put("negativePhoto",currentStoreCerdentials.get("negative_photo")); currentStoreCerdentials.put("operate",StatusConstant.OPERATE_DEL); getStoreServiceDaoImpl().saveBusinessStoreCerdentials(currentStoreCerdentials); } /** * 当修改数据时,查询instance表中的数据 自动保存删除数据到business中 * @param businessMemberStore 商户信息 */ protected void autoSaveDelBusinessMemberStore(Business business, JSONObject businessMemberStore){ //自动插入DEL Map info = new HashMap(); info.put("memberStoreId",businessMemberStore.getString("memberStoreId")); info.put("statusCd",StatusConstant.STATUS_CD_VALID); Map currentMemberStore = getStoreServiceDaoImpl().getMemberStore(info); if(currentMemberStore == null || currentMemberStore.isEmpty()){ throw new ListenerExecuteException(ResponseConstant.RESULT_PARAM_ERROR,"未找到需要修改数据信息,入参错误或数据有问题,请检查"+info); } currentMemberStore.put("bId",business.getbId()); currentMemberStore.put("storeId",currentMemberStore.get("store_id")); currentMemberStore.put("memberStoreId",currentMemberStore.get("member_store_id")); currentMemberStore.put("memberId",currentMemberStore.get("member_id")); currentMemberStore.put("operate",StatusConstant.OPERATE_DEL); getStoreServiceDaoImpl().saveBusinessStoreInfo(currentMemberStore); } /** * 当修改数据时,查询instance表中的数据 自动保存删除数据到business中 * @param businessStoreUser 商户信息 */ protected void autoSaveDelBusinessStoreUser(Business business, JSONObject businessStoreUser){ //自动插入DEL Map info = new HashMap(); info.put("storeId",businessStoreUser.getString("storeId")); info.put("userId",businessStoreUser.getString("userId")); info.put("statusCd",StatusConstant.STATUS_CD_VALID); Map currentStoreUser = getStoreServiceDaoImpl().getStoreUser(info).get(0); if(currentStoreUser == null || currentStoreUser.isEmpty()){ throw new ListenerExecuteException(ResponseConstant.RESULT_PARAM_ERROR,"未找到需要修改数据信息,入参错误或数据有问题,请检查"+info); } currentStoreUser.put("bId",business.getbId()); currentStoreUser.put("storeUserId", currentStoreUser.get("store_user_id")); currentStoreUser.put("storeId",currentStoreUser.get("store_id")); currentStoreUser.put("userId",currentStoreUser.get("user_id")); currentStoreUser.put("relCd",currentStoreUser.get("rel_cd")); currentStoreUser.put("operate",StatusConstant.OPERATE_DEL); getStoreServiceDaoImpl().saveBusinessStoreUser(currentStoreUser); } /** * 刷新 businessMemberStore 数据 * 主要将 数据库 中字段和 接口传递字段建立关系 * @param businessMemberStore */ protected void flushBusinessStoreUser(Map businessMemberStore,String statusCd){ businessMemberStore.put("newBId",businessMemberStore.get("b_id")); businessMemberStore.put("storeUserId", businessMemberStore.get("store_user_id")); businessMemberStore.put("storeId",businessMemberStore.get("store_id")); businessMemberStore.put("userId",businessMemberStore.get("user_id")); businessMemberStore.put("relCd",businessMemberStore.get("rel_cd")); businessMemberStore.put("statusCd", statusCd); } }
6,024
892
{ "schema_version": "1.2.0", "id": "GHSA-hh7m-rx4f-4vpv", "modified": "2021-01-11T20:32:44Z", "published": "2021-01-11T20:38:28Z", "aliases": [ "CVE-2021-21241" ], "summary": "CSRF can expose users authentication token", "details": "### Issue\nThe /login and /change endpoints can return the authenticated user's authentication token in response to a GET request. Since GET requests aren't protected with a CSRF token, this could lead to a malicious 3rd party site acquiring the authentication token.\n\n### Patches\nVersion 3.4.5 and soon to be released 4.0.0 are patched.\n\n### Workarounds\nIf you aren't using authentication tokens - you can set the SECURITY_TOKEN_MAX_AGE to \"0\" (seconds) which should make the token unusable.\n\n### References\nNone", "severity": [ ], "affected": [ { "package": { "ecosystem": "PyPI", "name": "Flask-Security-Too" }, "ranges": [ { "type": "ECOSYSTEM", "events": [ { "introduced": "3.3.0" }, { "fixed": "3.4.5" } ] } ] } ], "references": [ { "type": "WEB", "url": "https://github.com/Flask-Middleware/flask-security/security/advisories/GHSA-hh7m-rx4f-4vpv" }, { "type": "ADVISORY", "url": "https://nvd.nist.gov/vuln/detail/CVE-2021-21241" }, { "type": "WEB", "url": "https://github.com/Flask-Middleware/flask-security/pull/422" }, { "type": "WEB", "url": "https://github.com/Flask-Middleware/flask-security/commit/61d313150b5f620d0b800896c4f2199005e84b1f" }, { "type": "WEB", "url": "https://github.com/Flask-Middleware/flask-security/commit/6d50ee9169acf813257c37b75babe9c28e83542a" }, { "type": "WEB", "url": "https://github.com/Flask-Middleware/flask-security/releases/tag/3.4.5" }, { "type": "WEB", "url": "https://pypi.org/project/Flask-Security-Too" } ], "database_specific": { "cwe_ids": [ "CWE-352" ], "severity": "HIGH", "github_reviewed": true } }
1,045
2,577
/* * Copyright Camunda Services GmbH and/or licensed to Camunda Services GmbH * under one or more contributor license agreements. See the NOTICE file * distributed with this work for additional information regarding copyright * ownership. Camunda licenses this file to you under the Apache License, * Version 2.0; you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package org.camunda.bpm.model.cmmn.impl.instance; import static org.camunda.bpm.model.cmmn.impl.CmmnModelConstants.CMMN11_NS; import static org.camunda.bpm.model.cmmn.impl.CmmnModelConstants.CMMN_ELEMENT_PLAN_FRAGMENT; import java.util.Collection; import org.camunda.bpm.model.cmmn.instance.PlanFragment; import org.camunda.bpm.model.cmmn.instance.PlanItem; import org.camunda.bpm.model.cmmn.instance.PlanItemDefinition; import org.camunda.bpm.model.cmmn.instance.Sentry; import org.camunda.bpm.model.xml.ModelBuilder; import org.camunda.bpm.model.xml.impl.instance.ModelTypeInstanceContext; import org.camunda.bpm.model.xml.type.ModelElementTypeBuilder; import org.camunda.bpm.model.xml.type.ModelElementTypeBuilder.ModelTypeInstanceProvider; import org.camunda.bpm.model.xml.type.child.ChildElementCollection; import org.camunda.bpm.model.xml.type.child.SequenceBuilder; /** * @author <NAME> * */ public class PlanFragmentImpl extends PlanItemDefinitionImpl implements PlanFragment { protected static ChildElementCollection<PlanItem> planItemCollection; protected static ChildElementCollection<Sentry> sentryCollection; public PlanFragmentImpl(ModelTypeInstanceContext instanceContext) { super(instanceContext); } public Collection<PlanItem> getPlanItems() { return planItemCollection.get(this); } public Collection<Sentry> getSentrys() { return sentryCollection.get(this); } public static void registerType(ModelBuilder modelBuilder) { ModelElementTypeBuilder typeBuilder = modelBuilder.defineType(PlanFragment.class, CMMN_ELEMENT_PLAN_FRAGMENT) .namespaceUri(CMMN11_NS) .extendsType(PlanItemDefinition.class) .instanceProvider(new ModelTypeInstanceProvider<PlanFragment>() { public PlanFragment newInstance(ModelTypeInstanceContext instanceContext) { return new PlanFragmentImpl(instanceContext); } }); SequenceBuilder sequenceBuilder = typeBuilder.sequence(); planItemCollection = sequenceBuilder.elementCollection(PlanItem.class) .build(); sentryCollection = sequenceBuilder.elementCollection(Sentry.class) .build(); typeBuilder.build(); } }
935
17,703
#include <string> #include "envoy/http/filter.h" #include "envoy/registry/registry.h" #include "source/extensions/filters/http/common/factory_base.h" #include "source/extensions/filters/http/common/pass_through_filter.h" #include "test/integration/filters/set_is_terminal_filter_config.pb.h" #include "test/integration/filters/set_is_terminal_filter_config.pb.validate.h" #include "absl/strings/match.h" namespace Envoy { // A test filter that control whether it's a terminal filter by protobuf. class SetIsTerminalFilter : public Http::PassThroughFilter {}; class SetIsTerminalFilterFactory : public Extensions::HttpFilters::Common::FactoryBase< test::integration::filters::SetIsTerminalFilterConfig> { public: SetIsTerminalFilterFactory() : FactoryBase("set-is-terminal-filter") {} private: Http::FilterFactoryCb createFilterFactoryFromProtoTyped(const test::integration::filters::SetIsTerminalFilterConfig&, const std::string&, Server::Configuration::FactoryContext&) override { return [](Http::FilterChainFactoryCallbacks& callbacks) -> void { callbacks.addStreamFilter(std::make_shared<SetIsTerminalFilter>()); }; } bool isTerminalFilterByProtoTyped( const test::integration::filters::SetIsTerminalFilterConfig& proto_config, Server::Configuration::FactoryContext&) override { return proto_config.is_terminal_filter(); } }; REGISTER_FACTORY(SetIsTerminalFilterFactory, Server::Configuration::NamedHttpFilterConfigFactory); } // namespace Envoy
594
348
{"nom":"Saint-Agnan-sur-Sarthe","circ":"1ère circonscription","dpt":"Orne","inscrits":77,"abs":28,"votants":49,"blancs":6,"nuls":1,"exp":42,"res":[{"nuance":"SOC","nom":"M. <NAME>","voix":21},{"nuance":"DVD","nom":"<NAME>","voix":21}]}
98
348
<filename>docs/data/leg-t2/051/05103644.json {"nom":"Vincelles","circ":"3ème circonscription","dpt":"Marne","inscrits":256,"abs":139,"votants":117,"blancs":10,"nuls":2,"exp":105,"res":[{"nuance":"REM","nom":"<NAME>","voix":57},{"nuance":"FN","nom":"<NAME>","voix":48}]}
110
368
/* * UtilPdu.h * * Created on: 2013-8-27 * Author: <EMAIL> */ #ifndef UTILPDU_H_ #define UTILPDU_H_ #include "ostype.h" #include <set> #include <map> #include <list> #include <string> using namespace std; #ifdef WIN32 #ifdef BUILD_PDU #define DLL_MODIFIER __declspec(dllexport) #else #define DLL_MODIFIER __declspec(dllimport) #endif #else #define DLL_MODIFIER #endif class DLL_MODIFIER CSimpleBuffer { public: CSimpleBuffer(); ~CSimpleBuffer(); uchar_t* GetBuffer() { return m_buffer; } uint32_t GetAllocSize() { return m_alloc_size; } uint32_t GetWriteOffset() { return m_write_offset; } void IncWriteOffset(uint32_t len) { m_write_offset += len; } void Extend(uint32_t len); uint32_t Write(void* buf, uint32_t len); uint32_t Read(void* buf, uint32_t len); private: uchar_t* m_buffer; uint32_t m_alloc_size; uint32_t m_write_offset; }; class CByteStream { public: CByteStream(uchar_t* buf, uint32_t len); CByteStream(CSimpleBuffer* pSimpBuf, uint32_t pos); ~CByteStream() {} unsigned char* GetBuf() { return m_pSimpBuf ? m_pSimpBuf->GetBuffer() : m_pBuf; } uint32_t GetPos() { return m_pos; } uint32_t GetLen() { return m_len; } void Skip(uint32_t len) { m_pos += len; } static int16_t ReadInt16(uchar_t* buf); static uint16_t ReadUint16(uchar_t* buf); static int32_t ReadInt32(uchar_t* buf); static uint32_t ReadUint32(uchar_t* buf); static void WriteInt16(uchar_t* buf, int16_t data); static void WriteUint16(uchar_t* buf, uint16_t data); static void WriteInt32(uchar_t* buf, int32_t data); static void WriteUint32(uchar_t* buf, uint32_t data); void operator << (int8_t data); void operator << (uint8_t data); void operator << (int16_t data); void operator << (uint16_t data); void operator << (int32_t data); void operator << (uint32_t data); void operator >> (int8_t& data); void operator >> (uint8_t& data); void operator >> (int16_t& data); void operator >> (uint16_t& data); void operator >> (int32_t& data); void operator >> (uint32_t& data); void WriteString(const char* str); void WriteString(const char* str, uint32_t len); char* ReadString(uint32_t& len); void WriteData(uchar_t* data, uint32_t len); uchar_t* ReadData(uint32_t& len); private: void _WriteByte(void* buf, uint32_t len); void _ReadByte(void* buf, uint32_t len); private: CSimpleBuffer* m_pSimpBuf; uchar_t* m_pBuf; uint32_t m_len; uint32_t m_pos; }; #define ERROR_CODE_PARSE_FAILED 1 #define ERROR_CODE_WRONG_SERVICE_ID 2 #define ERROR_CODE_WRONG_COMMAND_ID 3 #define ERROR_CODE_ALLOC_FAILED 4 class CPduException { public: CPduException(uint32_t module_id, uint32_t command_id, uint32_t error_code, const char* error_msg) { m_module_id = module_id; m_command_id = command_id; m_error_code = error_code; m_error_msg = error_msg; } virtual ~CPduException() {} uint32_t GetModuleId() { return m_module_id; } uint32_t GetCommandId() { return m_command_id; } uint32_t GetErrorCode() { return m_error_code; } char* GetErrorMsg() { return (char*)m_error_msg.c_str(); } private: uint32_t m_module_id; uint32_t m_command_id; uint32_t m_error_code; string m_error_msg; }; char* idtourl(uint32_t id); uint32_t urltoid(const char* url); #endif /* UTILPDU_H_ */
1,381
5,169
<gh_stars>1000+ { "name": "TinyKit", "version": "0.8.0", "license": "MIT", "summary": "TinyKit provides practical functionalities that will help us to build apps much more quickly.", "homepage": "https://github.com/royhsu/tiny-core", "authors": { "<NAME>": "<EMAIL>" }, "source": { "git": "https://github.com/royhsu/tiny-kit.git", "tag": "0.8.0" }, "frameworks": "Foundation", "source_files": "Sources/*.swift", "ios": { "source_files": "Sources/iOS/*.swift" }, "platforms": { "ios": "9.0" }, "swift_version": "4.0" }
241
1,687
<reponame>agnes-yang/LeetCode-Solutions-in-Good-Style<gh_stars>1000+ import java.util.Arrays; import java.util.Comparator; import java.util.HashMap; import java.util.Map; import java.util.PriorityQueue; import java.util.Set; public class Solution { public int[] topKFrequent(int[] nums, int k) { int len = nums.length; if (len == 0) { return new int[0]; } Map<Integer, Integer> freqs = new HashMap<>(); for (int num : nums) { freqs.put(num, freqs.getOrDefault(num, 0) + 1); } // 按照频数升序排序 PriorityQueue<int[]> minHeap = new PriorityQueue<>(len, Comparator.comparingInt(o -> o[1])); Set<Map.Entry<Integer, Integer>> entries = freqs.entrySet(); for (Map.Entry<Integer, Integer> entry : entries) { Integer num = entry.getKey(); Integer freq = entry.getValue(); if (minHeap.size() == k) { if (freq > minHeap.peek()[1]){ minHeap.poll(); minHeap.add(new int[]{num, freq}); } } if (minHeap.size() < k) { minHeap.add(new int[]{num, freq}); } } int[] res = new int[k]; for (int i = 0; i < k; i++) { res[i] = minHeap.poll()[0]; } return res; } }
723
10,608
<gh_stars>1000+ {"default": {"description": "A dataset adopting the FEVER methodology that consists of 1,535 real-world claims regarding climate-change collected on the internet. Each claim is accompanied by five manually annotated evidence sentences retrieved from the English Wikipedia that support, refute or do not give enough information to validate the claim totalling in 7,675 claim-evidence pairs. The dataset features challenging claims that relate multiple facets and disputed cases of claims where both supporting and refuting evidence are present.\n", "citation": "@misc{diggelmann2020climatefever,\n title={CLIMATE-FEVER: A Dataset for Verification of Real-World Climate Claims},\n author={<NAME> and <NAME> and <NAME> and <NAME> and <NAME>},\n year={2020},\n eprint={2012.00614},\n archivePrefix={arXiv},\n primaryClass={cs.CL}\n}\n", "homepage": "http://climatefever.ai", "license": "", "features": {"claim_id": {"dtype": "string", "id": null, "_type": "Value"}, "claim": {"dtype": "string", "id": null, "_type": "Value"}, "claim_label": {"num_classes": 4, "names": ["SUPPORTS", "REFUTES", "NOT_ENOUGH_INFO", "DISPUTED"], "names_file": null, "id": null, "_type": "ClassLabel"}, "evidences": [{"evidence_id": {"dtype": "string", "id": null, "_type": "Value"}, "evidence_label": {"num_classes": 3, "names": ["SUPPORTS", "REFUTES", "NOT_ENOUGH_INFO"], "names_file": null, "id": null, "_type": "ClassLabel"}, "article": {"dtype": "string", "id": null, "_type": "Value"}, "evidence": {"dtype": "string", "id": null, "_type": "Value"}, "entropy": {"dtype": "float32", "id": null, "_type": "Value"}, "votes": [{"dtype": "string", "id": null, "_type": "Value"}]}]}, "post_processed": null, "supervised_keys": null, "builder_name": "climate_fever", "config_name": "default", "version": {"version_str": "1.0.1", "description": null, "major": 1, "minor": 0, "patch": 1}, "splits": {"test": {"name": "test", "num_bytes": 2429272, "num_examples": 1535, "dataset_name": "climate_fever"}}, "download_checksums": {"https://github.com/tdiggelm/climate-fever-dataset/archive/1.0.1.zip": {"num_bytes": 687133, "checksum": "ee059c9fb35d25ca03505e9ba476a9f511a1345f52c3b2d8d1851edd6b90c38e"}}, "download_size": 687133, "post_processing_size": null, "dataset_size": 2429272, "size_in_bytes": 3116405}}
784
22,481
<reponame>andersop91/core """Support for Plum Lightpad lights.""" from __future__ import annotations import asyncio from plumlightpad import Plum from homeassistant.components.light import ( ATTR_BRIGHTNESS, ATTR_HS_COLOR, SUPPORT_BRIGHTNESS, SUPPORT_COLOR, LightEntity, ) from homeassistant.config_entries import ConfigEntry from homeassistant.core import HomeAssistant from homeassistant.helpers.aiohttp_client import async_get_clientsession from homeassistant.helpers.entity import DeviceInfo from homeassistant.helpers.entity_platform import AddEntitiesCallback import homeassistant.util.color as color_util from .const import DOMAIN async def async_setup_entry( hass: HomeAssistant, entry: ConfigEntry, async_add_entities: AddEntitiesCallback, ) -> None: """Set up Plum Lightpad dimmer lights and glow rings.""" plum: Plum = hass.data[DOMAIN][entry.entry_id] def setup_entities(device) -> None: entities = [] if "lpid" in device: lightpad = plum.get_lightpad(device["lpid"]) entities.append(GlowRing(lightpad=lightpad)) if "llid" in device: logical_load = plum.get_load(device["llid"]) entities.append(PlumLight(load=logical_load)) if entities: async_add_entities(entities) async def new_load(device): setup_entities(device) async def new_lightpad(device): setup_entities(device) device_web_session = async_get_clientsession(hass, verify_ssl=False) asyncio.create_task( plum.discover( hass.loop, loadListener=new_load, lightpadListener=new_lightpad, websession=device_web_session, ) ) class PlumLight(LightEntity): """Representation of a Plum Lightpad dimmer.""" def __init__(self, load): """Initialize the light.""" self._load = load self._brightness = load.level async def async_added_to_hass(self): """Subscribe to dimmerchange events.""" self._load.add_event_listener("dimmerchange", self.dimmerchange) def dimmerchange(self, event): """Change event handler updating the brightness.""" self._brightness = event["level"] self.schedule_update_ha_state() @property def should_poll(self): """No polling needed.""" return False @property def unique_id(self): """Combine logical load ID with .light to guarantee it is unique.""" return f"{self._load.llid}.light" @property def name(self): """Return the name of the switch if any.""" return self._load.name @property def device_info(self) -> DeviceInfo: """Return the device info.""" return DeviceInfo( identifiers={(DOMAIN, self.unique_id)}, manufacturer="Plum", model="Dimmer", name=self.name, ) @property def brightness(self) -> int: """Return the brightness of this switch between 0..255.""" return self._brightness @property def is_on(self) -> bool: """Return true if light is on.""" return self._brightness > 0 @property def supported_features(self): """Flag supported features.""" if self._load.dimmable: return SUPPORT_BRIGHTNESS return 0 async def async_turn_on(self, **kwargs): """Turn the light on.""" if ATTR_BRIGHTNESS in kwargs: await self._load.turn_on(kwargs[ATTR_BRIGHTNESS]) else: await self._load.turn_on() async def async_turn_off(self, **kwargs): """Turn the light off.""" await self._load.turn_off() class GlowRing(LightEntity): """Representation of a Plum Lightpad dimmer glow ring.""" def __init__(self, lightpad): """Initialize the light.""" self._lightpad = lightpad self._name = f"{lightpad.friendly_name} Glow Ring" self._state = lightpad.glow_enabled self._glow_intensity = lightpad.glow_intensity self._red = lightpad.glow_color["red"] self._green = lightpad.glow_color["green"] self._blue = lightpad.glow_color["blue"] async def async_added_to_hass(self): """Subscribe to configchange events.""" self._lightpad.add_event_listener("configchange", self.configchange_event) def configchange_event(self, event): """Handle Configuration change event.""" config = event["changes"] self._state = config["glowEnabled"] self._glow_intensity = config["glowIntensity"] self._red = config["glowColor"]["red"] self._green = config["glowColor"]["green"] self._blue = config["glowColor"]["blue"] self.schedule_update_ha_state() @property def hs_color(self): """Return the hue and saturation color value [float, float].""" return color_util.color_RGB_to_hs(self._red, self._green, self._blue) @property def should_poll(self): """No polling needed.""" return False @property def unique_id(self): """Combine LightPad ID with .glow to guarantee it is unique.""" return f"{self._lightpad.lpid}.glow" @property def name(self): """Return the name of the switch if any.""" return self._name @property def device_info(self) -> DeviceInfo: """Return the device info.""" return DeviceInfo( identifiers={(DOMAIN, self.unique_id)}, manufacturer="Plum", model="Glow Ring", name=self.name, ) @property def brightness(self) -> int: """Return the brightness of this switch between 0..255.""" return min(max(int(round(self._glow_intensity * 255, 0)), 0), 255) @property def glow_intensity(self): """Brightness in float form.""" return self._glow_intensity @property def is_on(self) -> bool: """Return true if light is on.""" return self._state @property def icon(self): """Return the crop-portrait icon representing the glow ring.""" return "mdi:crop-portrait" @property def supported_features(self): """Flag supported features.""" return SUPPORT_BRIGHTNESS | SUPPORT_COLOR async def async_turn_on(self, **kwargs): """Turn the light on.""" if ATTR_BRIGHTNESS in kwargs: brightness_pct = kwargs[ATTR_BRIGHTNESS] / 255.0 await self._lightpad.set_config({"glowIntensity": brightness_pct}) elif ATTR_HS_COLOR in kwargs: hs_color = kwargs[ATTR_HS_COLOR] red, green, blue = color_util.color_hs_to_RGB(*hs_color) await self._lightpad.set_glow_color(red, green, blue, 0) else: await self._lightpad.set_config({"glowEnabled": True}) async def async_turn_off(self, **kwargs): """Turn the light off.""" if ATTR_BRIGHTNESS in kwargs: brightness_pct = kwargs[ATTR_BRIGHTNESS] / 255.0 await self._lightpad.set_config({"glowIntensity": brightness_pct}) else: await self._lightpad.set_config({"glowEnabled": False})
3,025
4,111
<reponame>ThanoshanMV/shiro /* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information * regarding copyright ownership. The ASF licenses this file * to you under the Apache License, Version 2.0 (the * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, * software distributed under the License is distributed on an * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY * KIND, either express or implied. See the License for the * specific language governing permissions and limitations * under the License. */ package org.apache.shiro.session.mgt.quartz; import org.quartz.Job; import org.quartz.JobDataMap; import org.quartz.JobExecutionContext; import org.quartz.JobExecutionException; import org.slf4j.Logger; import org.slf4j.LoggerFactory; import org.apache.shiro.session.mgt.ValidatingSessionManager; /** * A quartz job that basically just calls the {@link org.apache.shiro.session.mgt.ValidatingSessionManager#validateSessions()} * method on a configured session manager. The session manager will automatically be injected by the * superclass if it is in the job data map or the scheduler map. * * @since 0.1 */ public class QuartzSessionValidationJob implements Job { /*-------------------------------------------- | C O N S T A N T S | ============================================*/ /** * Key used to store the session manager in the job data map for this job. */ static final String SESSION_MANAGER_KEY = "sessionManager"; /*-------------------------------------------- | I N S T A N C E V A R I A B L E S | ============================================*/ private static final Logger log = LoggerFactory.getLogger(QuartzSessionValidationJob.class); /*-------------------------------------------- | C O N S T R U C T O R S | ============================================*/ /*-------------------------------------------- | A C C E S S O R S / M O D I F I E R S | ============================================*/ /*-------------------------------------------- | M E T H O D S | ============================================*/ /** * Called when the job is executed by quartz. This method delegates to the * <tt>validateSessions()</tt> method on the associated session manager. * * @param context the Quartz job execution context for this execution. */ public void execute(JobExecutionContext context) throws JobExecutionException { JobDataMap jobDataMap = context.getMergedJobDataMap(); ValidatingSessionManager sessionManager = (ValidatingSessionManager) jobDataMap.get(SESSION_MANAGER_KEY); if (log.isDebugEnabled()) { log.debug("Executing session validation Quartz job..."); } sessionManager.validateSessions(); if (log.isDebugEnabled()) { log.debug("Session validation Quartz job complete."); } } }
1,033
878
<filename>pandashells/bin/p_regress.py #! /usr/bin/env python # standard library imports import sys import argparse import textwrap import importlib from pandashells.lib import module_checker_lib, arg_lib # import required dependencies module_checker_lib.check_for_modules(['pandas', 'statsmodels', 'scipy']) from pandashells.lib import io_lib import scipy as scp # NOQA import statsmodels.formula.api as sm # this silly function helps use side_effect in mocking tests def get_module(name): # pragma nocover return importlib.import_module(name) def main(): msg = textwrap.dedent( """ Performs (multivariable) linear regression. The fitting model is specified using the R-like, patsy syntax. Input is from stdin and output is either fitting information or the input data with columns added for the fit and residuals. ----------------------------------------------------------------------- Examples: * Fit a line to the sea-level data p.example_data -d sealevel | p.regress -m 'sealevel_mm ~ year' * Fit a trend plus annual cycle to sealevel data p.example_data -d sealevel \\ | p.df 'df["sin"] = np.sin(2 * np.pi * df.year)' \\ | p.df 'df["cos"] = np.cos(2 * np.pi * df.year)' \\ | p.regress -m 'sealevel_mm ~ year + cos + sin' * Examine residual ECDF of trend plus annual fit p.example_data -d sealevel \\ | p.df 'df["sin"] = np.sin(2 * np.pi * df.year)' \\ | p.df 'df["cos"] = np.cos(2 * np.pi * df.year)' \\ | p.regress -m 'sealevel_mm ~ year + cos + sin' --fit \\ | p.cdf -c 'resid_' --title 'ECDF of trend + annual' * Detrend sealevel data to more clearly reveal oscillations p.example_data -d sealevel \\ | p.regress -m 'sealevel_mm ~ year' --fit \\ | p.plot -x year -y resid_ --ylabel 'Trend removed (mm)' \\ --title 'Global Sea Surface Height' * Set origin of sealevel data to 0 and regress with no intercept p.example_data -d sealevel\\ | p.df 'df["year"] = df.year - df.year.iloc[0]'\\ 'df["sealevel_mm"] = df.sealevel_mm - df.sealevel_mm.iloc[0]'\\ | p.regress -m 'sealevel_mm ~ year - 1' --fit\\ | p.plot -x year -y sealevel_mm fit_ --style '.' '-'\\ --alpha .2 1 --legend best --title 'Force Zero Intercept' ----------------------------------------------------------------------- """ ) # read command line arguments parser = argparse.ArgumentParser( formatter_class=argparse.RawDescriptionHelpFormatter, description=msg) arg_lib.add_args(parser, 'io_in', 'io_out') # specify columns to histogram parser.add_argument("-m", "--model", type=str, nargs=1, required=True, help="The model expressed in patsy syntax") msg = "Return input with fit and residual appended" parser.add_argument("--fit", action="store_true", dest='retfit', default=False, help=msg) parser.add_argument("--plot", action="store_true", default=False, help="Make residual plots") # parse arguments args = parser.parse_args() # get the input dataframe df = io_lib.df_from_input(args) # fit the model and add fit, resid columns result = sm.ols(formula=args.model[0], data=df).fit() df['fit_'] = result.fittedvalues df['resid_'] = result.resid # add and output the fit results if requested if args.retfit: io_lib.df_to_output(args, df) return # print the fit summary sys.stdout.write('\n{}\n'.format(result.summary())) sys.stdout.flush() # do plots if requested if args.plot: module_checker_lib.check_for_modules(['matplotlib', 'seaborn']) plot_lib = get_module('pandashells.lib.plot_lib') mpl = get_module('matplotlib') pl = get_module('pylab') sns = get_module('seaborn') pl.subplot(211) pl.plot(df.fit_, df.resid_, '.', alpha=.5) pl.xlabel('Fit') pl.ylabel('Residual') pl.title(args.model[0]) pl.subplot(212) sns.distplot(df.resid_, bins=50) pl.xlabel('Residual with R^2 = {:0.4f}'.format(result.rsquared)) pl.ylabel('Counts') # annoying issue with osx backend forces if statement here if mpl.get_backend().lower() in ['agg', 'macosx']: pl.gcf().set_tight_layout(True) else: pl.gcf().tight_layout() plot_lib.show(args) if __name__ == '__main__': # pragma: no cover main()
2,147
301
<gh_stars>100-1000 /****************************************************************** * * Copyright 2017 Samsung Electronics All Rights Reserved. * * * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. * * ******************************************************************/ #include "CSCsdkCloudHelper.h" #include "CSCsdkUtilityHelper.h" int CSCsdkCloudHelper::s_isCbInvoked = CALLBACK_NOT_INVOKED; std::string CSCsdkCloudHelper::s_cborFilePath = CLOUD_ACL_CONTROLLER_DAT; std::string CSCsdkCloudHelper::s_uid = ""; std::string CSCsdkCloudHelper::s_accesstoken = ""; std::string CSCsdkCloudHelper::s_refreshToken = ""; std::string CSCsdkCloudHelper::s_groupId = ""; std::string CSCsdkCloudHelper::s_aclId = ""; std::string CSCsdkCloudHelper::s_aceid = "fdc36aa0-59f8-4cb7-b457-0f51becf9899"; std::string CSCsdkCloudHelper::s_subjectuuid = "72616e64-5069-6e44-6576-557569643030"; std::string CSCsdkCloudHelper::s_href = "/a/light/0"; std::string CSCsdkCloudHelper::s_deviceId = "99999999-0000-0000-0000-0001200"; std::string CSCsdkCloudHelper::s_groupPolicy = ""; OCPersistentStorage CSCsdkCloudHelper::s_pst = { 0, 0, 0, 0, 0 }; int CSCsdkCloudHelper::waitCallbackRet() { IOTIVITYTEST_LOG(DEBUG, "Waiting for Callback to be invoked"); for (int i = 0; CALLBACK_TIMEOUT > i; ++i) { if (CALLBACK_INVOKED == s_isCbInvoked) { return CALLBACK_INVOKED; } sleep (DELAY_SHORT); printf("Second Elapsed : %d seconds\n", i); if (OC_STACK_OK != OCProcess()) { printf("OCStack process error\n"); return CALLBACK_NOT_INVOKED; } } return CALLBACK_NOT_INVOKED; } void CSCsdkCloudHelper::printRepresentation(OCRepresentation rep) { for (auto itr = rep.begin(); itr != rep.end(); ++itr) { cout << "\t" << itr->attrname() << ":\t" << itr->getValueToString() << endl; if (itr->attrname().compare("accesstoken") == 0) { s_accesstoken = itr->getValueToString(); } if (itr->attrname().compare("uid") == 0) { s_uid = itr->getValueToString(); } if (itr->attrname().compare("gid") == 0) { s_groupId = itr->getValueToString(); } if (itr->attrname().compare("aclid") == 0) { s_aclId = itr->getValueToString(); } if (itr->attrname().compare("aceid") == 0) { s_aceid = itr->getValueToString(); } if (itr->attrname().compare("href") == 0) { s_href = itr->getValueToString(); } if (itr->attrname().compare("subjectuuid") == 0) { s_subjectuuid = itr->getValueToString(); } if (itr->attrname().compare("di") == 0) { s_deviceId = itr->getValueToString(); } if (itr->attrname().compare("gp") == 0) { s_groupPolicy = itr->getValueToString(); } if (itr->type() == AttributeType::Vector) { switch (itr->base_type()) { case AttributeType::OCRepresentation: for (auto itr2 : (*itr).getValue< vector< OCRepresentation > >()) { printRepresentation(itr2); } break; case AttributeType::Integer: for (auto itr2 : (*itr).getValue< vector< int > >()) { cout << "\t\t" << itr2 << endl; } break; case AttributeType::String: for (auto itr2 : (*itr).getValue< vector< string > >()) { cout << "\t\t" << itr2 << endl; } break; default: cout << "Unhandled base type " << itr->base_type() << endl; break; } } else if (itr->type() == AttributeType::OCRepresentation) { printRepresentation((*itr).getValue< OCRepresentation >()); } } } FILE* CSCsdkCloudHelper::controleeOpen(const char *path, const char *mode) { if (0 == strncmp(path, OC_SECURITY_DB_DAT_FILE_NAME, strlen(OC_SECURITY_DB_DAT_FILE_NAME))) { return fopen(path, mode); } else { return fopen(s_cborFilePath.c_str(), mode); } } CSCsdkCloudHelper::CSCsdkCloudHelper() { __FUNC_IN__ m_failureMessage = ""; m_expiresin = 0; m_accesstoken = ""; m_redirecturi = ""; m_refreshtoken = ""; m_sid = ""; m_tokentype = ""; m_uid = ""; } bool CSCsdkCloudHelper::initCloudACLClient() { __FUNC_IN__ CSCsdkCloudHelper::s_pst.open = CSCsdkCloudHelper::controleeOpen; CSCsdkCloudHelper::s_pst.read = fread; CSCsdkCloudHelper::s_pst.write = fwrite; CSCsdkCloudHelper::s_pst.close = fclose; CSCsdkCloudHelper::s_pst.unlink = unlink; if (OC_STACK_OK != OCRegisterPersistentStorageHandler(&CSCsdkCloudHelper::s_pst)) { IOTIVITYTEST_LOG(ERROR, "[CSHelper] OCRegisterPersistentStorageHandler error"); return false; } // initialize OC stack and provisioning manager if (OC_STACK_OK != OCInit(NULL, 0, OC_CLIENT_SERVER)) { IOTIVITYTEST_LOG(ERROR, "[CSHelper] OCStack init error"); return false; } __FUNC_OUT__ return true; } bool CSCsdkCloudHelper::initCloudACLClient(string filePath) { __FUNC_IN__ s_cborFilePath = filePath; CSCsdkCloudHelper::s_pst.open = CSCsdkCloudHelper::controleeOpen; CSCsdkCloudHelper::s_pst.read = fread; CSCsdkCloudHelper::s_pst.write = fwrite; CSCsdkCloudHelper::s_pst.close = fclose; CSCsdkCloudHelper::s_pst.unlink = unlink; if (OC_STACK_OK != OCRegisterPersistentStorageHandler(&CSCsdkCloudHelper::s_pst)) { IOTIVITYTEST_LOG(ERROR, "[CSHelper] OCRegisterPersistentStorageHandler error"); return false; } // initialize OC stack and provisioning manager if (OC_STACK_OK != OCInit(NULL, 0, OC_CLIENT_SERVER)) { IOTIVITYTEST_LOG(ERROR, "[CSHelper] OCStack init error"); return false; } __FUNC_OUT__ return true; } bool CSCsdkCloudHelper::signUp(OCAccountManager::Ptr accountMgr, std::string authprovider, std::string authCode, PostCallback cloudConnectHandler, std::string &uid, std::string &devAccessToken, std::string &devRefreshToken, OCStackResult expectedResult) { __FUNC_IN__ s_isCbInvoked = CALLBACK_NOT_INVOKED; OCStackResult result = accountMgr->signUp(authprovider, authCode, cloudConnectHandler); IOTIVITYTEST_LOG(INFO, "[Cloud Acl] signUp returns %s", CommonUtil::getOCStackResult(result)); if (expectedResult != result) { m_failureMessage = CSCsdkUtilityHelper::setFailureMessage(result, expectedResult); return false; } if (OC_STACK_OK == result) { if (CALLBACK_NOT_INVOKED == CSCsdkCloudHelper::waitCallbackRet()) { IOTIVITYTEST_LOG(ERROR, "[Cloud] CALLBACK_NOT_INVOKED"); return false; } } uid = s_uid; devAccessToken = s_accesstoken; OC_UNUSED(devRefreshToken); //devRefreshToken = s_refreshToken; /* Will be enabled if a cloud Service that returns refresh token is available*/ __FUNC_OUT__ return true; } bool CSCsdkCloudHelper::signIn(OCAccountManager::Ptr accountMgr, const std::string& userUuid, const std::string& accesstoken, PostCallback cloudConnectHandler, OCStackResult expectedResult) { __FUNC_IN__ s_isCbInvoked = CALLBACK_NOT_INVOKED; OCStackResult result = accountMgr->signIn(userUuid, accesstoken, cloudConnectHandler); IOTIVITYTEST_LOG(INFO, "[Cloud Acl] signIn returns %s", CommonUtil::getOCStackResult(result)); if (expectedResult != result) { m_failureMessage = CSCsdkUtilityHelper::setFailureMessage(result, expectedResult); return false; } if (OC_STACK_OK == result) { if (CALLBACK_NOT_INVOKED == CSCsdkCloudHelper::waitCallbackRet()) { IOTIVITYTEST_LOG(ERROR, "[Cloud] CALLBACK_NOT_INVOKED"); return false; } } __FUNC_OUT__ return true; } bool CSCsdkCloudHelper::signOut(OCAccountManager::Ptr accountMgr, const std::string accessToken, PostCallback cloudConnectHandler, OCStackResult expectedResult) { __FUNC_IN__ s_isCbInvoked = CALLBACK_NOT_INVOKED; OCStackResult result = accountMgr->signOut(accessToken, cloudConnectHandler); IOTIVITYTEST_LOG(INFO, "[Cloud Acl] signIn returns %s", CommonUtil::getOCStackResult(result)); if (expectedResult != result) { m_failureMessage = CSCsdkUtilityHelper::setFailureMessage(result, expectedResult); return false; } if (OC_STACK_OK == result) { if (CALLBACK_NOT_INVOKED == CSCsdkCloudHelper::waitCallbackRet()) { IOTIVITYTEST_LOG(ERROR, "[Cloud] CALLBACK_NOT_INVOKED"); return false; } } __FUNC_OUT__ return true; } bool CSCsdkCloudHelper::provisionTrustCertChain(void *ctx, OicSecCredType_t type, uint16_t credId, const OCProvisionDev_t *selectedDeviceInfo, OCProvisionResultCB resultCallback, OCStackResult expectedResult) { __FUNC_IN__ s_isCbInvoked = CALLBACK_NOT_INVOKED; OCStackResult result = OCProvisionTrustCertChain(ctx, type, credId, selectedDeviceInfo, resultCallback); IOTIVITYTEST_LOG(INFO, "[Cloud Acl] provisionTrustCertChain returns %s", CommonUtil::getOCStackResult(result)); if (expectedResult != result) { m_failureMessage = CSCsdkUtilityHelper::setFailureMessage(result, expectedResult); return false; } if (OC_STACK_OK == result) { if (CALLBACK_NOT_INVOKED == CSCsdkCloudHelper::waitCallbackRet()) { IOTIVITYTEST_LOG(ERROR, "[Cloud] CALLBACK_NOT_INVOKED"); return false; } } __FUNC_OUT__ return true; } bool CSCsdkCloudHelper::readFile(const char *name, OCByteString *out) { FILE *file = NULL; int length = 0; uint8_t *buffer = NULL; bool result = false; size_t count, realCount; //Open file file = fopen(name, "rb"); if (!file) { OIC_LOG_V(ERROR, TAG, "Unable to open file %s", name); return result; } //Get file length if (fseek(file, 0, SEEK_END)) { OIC_LOG(ERROR, TAG, "Failed to SEEK_END"); goto exit; } length = ftell(file); if (length < 0) { OIC_LOG(ERROR, TAG, "Failed to ftell"); goto exit; } if (fseek(file, 0, SEEK_SET)) { OIC_LOG(ERROR, TAG, "Failed to SEEK_SET"); goto exit; } //Allocate memory buffer = (uint8_t *) malloc(length); if (!buffer) { OIC_LOG(ERROR, TAG, "Failed to allocate buffer"); goto exit; } //Read file contents into buffer count = 1; realCount = fread(buffer, length, count, file); if (realCount != count) { OIC_LOG_V(ERROR, TAG, "Read %d bytes %zu times instead of %zu", length, realCount, count); goto exit; } out->bytes = buffer; out->len = length; result = true; exit: fclose(file); return result; } bool CSCsdkCloudHelper::saveTrustCertChain(uint8_t *trustCertChain, size_t chainSize, OicEncodingType_t encodingType, uint16_t *credId, OCStackResult expectedResult) { __FUNC_IN__ OIC_LOG_BUFFER(DEBUG, "CLOUD ACL", trustCertChain, chainSize); s_isCbInvoked = CALLBACK_NOT_INVOKED; OCStackResult result = OCSaveTrustCertChain(trustCertChain, chainSize, encodingType, credId); IOTIVITYTEST_LOG(INFO, "[Cloud Acl] OCSaveTrustCertChain returns %s", CommonUtil::getOCStackResult(result)); IOTIVITYTEST_LOG(INFO, "CredId of Saved Trust Cert. Chain into Cred of SVR : %d", *credId); if (expectedResult != result) { m_failureMessage = CSCsdkUtilityHelper::setFailureMessage(result, expectedResult); return false; } __FUNC_OUT__ return true; } bool CSCsdkCloudHelper::cloudCertificateIssueRequest(void* ctx, const char *endPoint, OCCloudResponseCB callback, OCStackResult expectedResult, bool checkCallback) { __FUNC_IN__ s_isCbInvoked = CALLBACK_NOT_INVOKED; OCStackResult result = OCCloudCertificateIssueRequest(ctx, endPoint, callback); IOTIVITYTEST_LOG(INFO, "[Cloud Acl] OCCloudCertificateIssueRequest returns %s", CommonUtil::getOCStackResult(result)); if (expectedResult != result) { m_failureMessage = CSCsdkUtilityHelper::setFailureMessage(result, expectedResult); return false; } if (OC_STACK_OK == result && checkCallback == true) { if (CALLBACK_NOT_INVOKED == CSCsdkCloudHelper::waitCallbackRet()) { m_failureMessage = CSCsdkUtilityHelper::setFailureMessage( "[Cloud] Callback Not Invoked"); return false; } } __FUNC_OUT__ return true; } bool CSCsdkCloudHelper::cloudGetCRL(void* ctx, const char *endPoint, OCCloudResponseCB callback, OCStackResult expectedResult, bool checkCallback) { __FUNC_IN__ s_isCbInvoked = CALLBACK_NOT_INVOKED; OCStackResult result = OCCloudGetCRL(ctx, endPoint, callback); IOTIVITYTEST_LOG(INFO, "[Cloud Acl] OCCloudGetCRL returns %s", CommonUtil::getOCStackResult(result)); if (expectedResult != result) { m_failureMessage = CSCsdkUtilityHelper::setFailureMessage(result, expectedResult); return false; } if (OC_STACK_OK == result && checkCallback == true) { if (CALLBACK_NOT_INVOKED == CSCsdkCloudHelper::waitCallbackRet()) { m_failureMessage = CSCsdkUtilityHelper::setFailureMessage( "[Cloud] Callback Not Invoked"); return false; } } __FUNC_OUT__ return true; } bool CSCsdkCloudHelper::cloudPostCRL(void* ctx, const char *thisUpdate, const char *nextUpdate, const OCByteString *crl, const stringArray_t *serialNumbers, const char *endPoint, OCCloudResponseCB callback, OCStackResult expectedResult, bool checkCallback) { __FUNC_IN__ s_isCbInvoked = CALLBACK_NOT_INVOKED; OCStackResult result = OCCloudPostCRL(ctx, thisUpdate, nextUpdate, crl, serialNumbers, endPoint, callback); IOTIVITYTEST_LOG(INFO, "[Cloud Acl] OCCloudPostCRL returns %s", CommonUtil::getOCStackResult(result)); if (expectedResult != result) { m_failureMessage = CSCsdkUtilityHelper::setFailureMessage(result, expectedResult); return false; } if (OC_STACK_OK == result && checkCallback == true) { if (CALLBACK_NOT_INVOKED == CSCsdkCloudHelper::waitCallbackRet()) { m_failureMessage = CSCsdkUtilityHelper::setFailureMessage( "[Cloud] Callback Not Invoked"); return false; } } __FUNC_OUT__ return true; } bool CSCsdkCloudHelper::cloudAclIdCreate(void* ctx, const char *ownerId, const char *deviceId, const char *endPoint, OCCloudResponseCB callback, OCStackResult expectedResult, bool checkCallback) { __FUNC_IN__ s_isCbInvoked = CALLBACK_NOT_INVOKED; OCStackResult result = OCCloudAclIdCreate(ctx, ownerId, deviceId, endPoint, callback); IOTIVITYTEST_LOG(INFO, "[Cloud Acl] OCCloudAclIdCreate returns %s", CommonUtil::getOCStackResult(result)); if (expectedResult != result) { m_failureMessage = CSCsdkUtilityHelper::setFailureMessage(result, expectedResult); return false; } if (OC_STACK_OK == result && checkCallback == true) { if (CALLBACK_NOT_INVOKED == CSCsdkCloudHelper::waitCallbackRet()) { m_failureMessage = CSCsdkUtilityHelper::setFailureMessage( "[Cloud] Callback Not Invoked"); return false; } } __FUNC_OUT__ return true; } bool CSCsdkCloudHelper::cloudAclIdDelete(void* ctx, const char *aclId, const char *endPoint, OCCloudResponseCB callback, OCStackResult expectedResult, bool checkCallback) { __FUNC_IN__ s_isCbInvoked = CALLBACK_NOT_INVOKED; OCStackResult result = OCCloudAclIdDelete(ctx, aclId, endPoint, callback); IOTIVITYTEST_LOG(INFO, "[Cloud Acl] OCCloudAclIdDelete returns %s", CommonUtil::getOCStackResult(result)); if (expectedResult != result) { m_failureMessage = CSCsdkUtilityHelper::setFailureMessage(result, expectedResult); return false; } if (OC_STACK_OK == result && checkCallback == true) { if (CALLBACK_NOT_INVOKED == CSCsdkCloudHelper::waitCallbackRet()) { m_failureMessage = CSCsdkUtilityHelper::setFailureMessage( "[Cloud] Callback Not Invoked"); return false; } } __FUNC_OUT__ return true; } bool CSCsdkCloudHelper::cloudGetAclIdByDevice(void* ctx, const char *deviceId, const char *endPoint, OCCloudResponseCB callback, std::string &devAclID, OCStackResult expectedResult, bool checkCallback) { __FUNC_IN__ OC_UNUSED(devAclID); s_isCbInvoked = CALLBACK_NOT_INVOKED; OCStackResult result = OCCloudGetAclIdByDevice(ctx, deviceId, endPoint, callback); IOTIVITYTEST_LOG(INFO, "[Cloud Acl] cloudAclIdGetByDevice returns %s", CommonUtil::getOCStackResult(result)); if (expectedResult != result) { m_failureMessage = CSCsdkUtilityHelper::setFailureMessage(result, expectedResult); return false; } if (OC_STACK_OK == result && checkCallback == true) { if (CALLBACK_NOT_INVOKED == CSCsdkCloudHelper::waitCallbackRet()) { m_failureMessage = CSCsdkUtilityHelper::setFailureMessage( "[Cloud] Callback Not Invoked"); return false; } } __FUNC_OUT__ return true; } bool CSCsdkCloudHelper::cloudAclIndividualGetInfo(void* ctx, const char *aclId, const char *endPoint, OCCloudResponseCB callback, OCStackResult expectedResult, bool checkCallback) { __FUNC_IN__ s_isCbInvoked = CALLBACK_NOT_INVOKED; OCStackResult result = OCCloudAclIndividualGetInfo(ctx, aclId, endPoint, callback); IOTIVITYTEST_LOG(INFO, "[Cloud Acl] OCCloudAclIndividualGetInfo returns %s", CommonUtil::getOCStackResult(result)); if (expectedResult != result) { m_failureMessage = CSCsdkUtilityHelper::setFailureMessage(result, expectedResult); return false; } if (OC_STACK_OK == result && checkCallback == true) { if (CALLBACK_NOT_INVOKED == CSCsdkCloudHelper::waitCallbackRet()) { m_failureMessage = CSCsdkUtilityHelper::setFailureMessage( "[Cloud] Callback Not Invoked"); return false; } } __FUNC_OUT__ return true; } bool CSCsdkCloudHelper::cloudAclIndividualAclUpdate(void* ctx, const char *aclId, cloudAce_t *aces, const char *endPoint, OCCloudResponseCB callback, OCStackResult expectedResult, bool checkCallback) { __FUNC_IN__ s_isCbInvoked = CALLBACK_NOT_INVOKED; //IoTivity OCStackResult result = OCCloudAclIndividualAclUpdate(ctx, aclId, aces, endPoint, callback); IOTIVITYTEST_LOG(INFO, "[Cloud Acl] OCCloudAclIndividualUpdateAce returns %s", CommonUtil::getOCStackResult(result)); //Convergence // OCStackResult result = OCCloudAclIndividualUpdateAce(ctx, aclId, aces, endPoint, callback); // IOTIVITYTEST_LOG(INFO, "[Cloud Acl] OCCloudAclIndividualUpdateAce returns %s", // CommonUtil::getOCStackResult(result)); if (expectedResult != result) { m_failureMessage = CSCsdkUtilityHelper::setFailureMessage(result, expectedResult); return false; } if (OC_STACK_OK == result && checkCallback == true) { if (CALLBACK_NOT_INVOKED == CSCsdkCloudHelper::waitCallbackRet()) { m_failureMessage = CSCsdkUtilityHelper::setFailureMessage( "[Cloud] Callback Not Invoked"); return false; } } __FUNC_OUT__ return true; } bool CSCsdkCloudHelper::cloudAclCreateGroup(void* ctx, const char *groupType, const char *groupMasterId, const char *endPoint, OCCloudResponseCB callback, std::string &groupId, OCStackResult expectedResult, bool checkCallback) { __FUNC_IN__ s_isCbInvoked = CALLBACK_NOT_INVOKED; OCStackResult result = OCCloudAclCreateGroup(ctx, groupType, groupMasterId, endPoint, callback); IOTIVITYTEST_LOG(INFO, "[Cloud Acl] OCCloudAclCreateGroup returns %s", CommonUtil::getOCStackResult(result)); if (expectedResult != result) { m_failureMessage = CSCsdkUtilityHelper::setFailureMessage(result, expectedResult); return false; } if (OC_STACK_OK == result && checkCallback == true) { if (CALLBACK_NOT_INVOKED == CSCsdkCloudHelper::waitCallbackRet()) { m_failureMessage = CSCsdkUtilityHelper::setFailureMessage( "[Cloud] Callback Not Invoked"); return false; } groupId = s_groupId; } __FUNC_OUT__ return true; } bool CSCsdkCloudHelper::cloudAclFindMyGroup(void* ctx, const char *memberId, const char *endPoint, OCCloudResponseCB callback, OCStackResult expectedResult, bool checkCallback) { __FUNC_IN__ s_isCbInvoked = CALLBACK_NOT_INVOKED; OCStackResult result = OCCloudAclFindMyGroup(ctx, memberId, endPoint, callback); IOTIVITYTEST_LOG(INFO, "[Cloud Acl] OCCloudAclFindMyGroup returns %s", CommonUtil::getOCStackResult(result)); if (expectedResult != result) { m_failureMessage = CSCsdkUtilityHelper::setFailureMessage(result, expectedResult); return false; } if (OC_STACK_OK == result && checkCallback == true) { if (CALLBACK_NOT_INVOKED == CSCsdkCloudHelper::waitCallbackRet()) { m_failureMessage = CSCsdkUtilityHelper::setFailureMessage( "[Cloud] Callback Not Invoked"); return false; } } __FUNC_OUT__ return true; } bool CSCsdkCloudHelper::cloudAclDeleteGroup(void* ctx, const char *groupId, const char *groupMasterId, const char *endPoint, OCCloudResponseCB callback, OCStackResult expectedResult, bool checkCallback) { __FUNC_IN__ s_isCbInvoked = CALLBACK_NOT_INVOKED; OCStackResult result = OCCloudAclDeleteGroup(ctx, groupId, groupMasterId, endPoint, callback); IOTIVITYTEST_LOG(INFO, "[Cloud Acl] OCCloudAclDeleteGroup returns %s", CommonUtil::getOCStackResult(result)); if (expectedResult != result) { m_failureMessage = CSCsdkUtilityHelper::setFailureMessage(result, expectedResult); return false; } if (OC_STACK_OK == result && checkCallback == true) { if (CALLBACK_NOT_INVOKED == CSCsdkCloudHelper::waitCallbackRet()) { m_failureMessage = CSCsdkUtilityHelper::setFailureMessage( "[Cloud] Callback Not Invoked"); return false; } } __FUNC_OUT__ return true; } bool CSCsdkCloudHelper::cloudAclJoinToInvitedGroup(void* ctx, const char *groupId, char *endPoint, OCCloudResponseCB callback, OCStackResult expectedResult, bool checkCallback) { __FUNC_IN__ s_isCbInvoked = CALLBACK_NOT_INVOKED; OCStackResult result = OCCloudAclJoinToInvitedGroup(ctx, groupId, endPoint, callback); IOTIVITYTEST_LOG(INFO, "[Cloud Acl] OCCloudAclDeleteGroup returns %s", CommonUtil::getOCStackResult(result)); if (expectedResult != result) { m_failureMessage = CSCsdkUtilityHelper::setFailureMessage(result, expectedResult); return false; } if (OC_STACK_OK == result && checkCallback == true) { if (CALLBACK_NOT_INVOKED == CSCsdkCloudHelper::waitCallbackRet()) { m_failureMessage = CSCsdkUtilityHelper::setFailureMessage( "[Cloud] Callback Not Invoked"); return false; } } __FUNC_OUT__ return true; } bool CSCsdkCloudHelper::cloudAclObserveGroup(void* ctx, const char *groupId, const char *endPoint, OCCloudResponseCB callback, OCStackResult expectedResult, bool checkCallback) { __FUNC_IN__ s_isCbInvoked = CALLBACK_NOT_INVOKED; OCStackResult result = OCCloudAclObserveGroup(ctx, groupId, endPoint, callback); IOTIVITYTEST_LOG(INFO, "[Cloud Acl] OCCloudAclObserveGroup returns %s", CommonUtil::getOCStackResult(result)); if (expectedResult != result) { m_failureMessage = CSCsdkUtilityHelper::setFailureMessage(result, expectedResult); return false; } if (OC_STACK_OK == result && checkCallback == true) { if (CALLBACK_NOT_INVOKED == CSCsdkCloudHelper::waitCallbackRet()) { m_failureMessage = CSCsdkUtilityHelper::setFailureMessage( "[Cloud] Callback Not Invoked"); return false; } } __FUNC_OUT__ return true; } bool CSCsdkCloudHelper::cloudAclShareDeviceIntoGroup(void* ctx, const char *groupId, const stringArray_t *memberIds, const stringArray_t *deviceIds, const char *endPoint, OCCloudResponseCB callback, OCStackResult expectedResult, bool checkCallback) { __FUNC_IN__ s_isCbInvoked = CALLBACK_NOT_INVOKED; OCStackResult result = OCCloudAclShareDeviceIntoGroup(ctx, groupId, memberIds, deviceIds, endPoint, callback); IOTIVITYTEST_LOG(INFO, "[Cloud Acl] OCCloudAclShareDeviceIntoGroup returns %s", CommonUtil::getOCStackResult(result)); if (expectedResult != result) { m_failureMessage = CSCsdkUtilityHelper::setFailureMessage(result, expectedResult); return false; } if (OC_STACK_OK == result && checkCallback == true) { if (CALLBACK_NOT_INVOKED == CSCsdkCloudHelper::waitCallbackRet()) { m_failureMessage = CSCsdkUtilityHelper::setFailureMessage( "[Cloud] Callback Not Invoked"); return false; } } __FUNC_OUT__ return true; } bool CSCsdkCloudHelper::cloudAclDeleteDeviceFromGroup(void* ctx, const char *groupId, const stringArray_t *memberIds, const stringArray_t *deviceIds, const char *endPoint, OCCloudResponseCB callback, OCStackResult expectedResult, bool checkCallback) { __FUNC_IN__ s_isCbInvoked = CALLBACK_NOT_INVOKED; OCStackResult result = OCCloudAclDeleteDeviceFromGroup(ctx, groupId, memberIds, deviceIds, endPoint, callback); IOTIVITYTEST_LOG(INFO, "[Cloud Acl] OCCloudAclShareDeviceIntoGroup returns %s", CommonUtil::getOCStackResult(result)); if (expectedResult != result) { m_failureMessage = CSCsdkUtilityHelper::setFailureMessage(result, expectedResult); return false; } if (OC_STACK_OK == result && checkCallback == true) { if (CALLBACK_NOT_INVOKED == CSCsdkCloudHelper::waitCallbackRet()) { m_failureMessage = CSCsdkUtilityHelper::setFailureMessage( "[Cloud] Callback Not Invoked"); return false; } } __FUNC_OUT__ return true; } bool CSCsdkCloudHelper::cloudAclGroupGetInfo(void* ctx, const char *groupId, const char *memberId, const char *endPoint, OCCloudResponseCB callback, OCStackResult expectedResult, bool checkCallback) { __FUNC_IN__ s_isCbInvoked = CALLBACK_NOT_INVOKED; OCStackResult result = OCCloudAclGroupGetInfo(ctx, groupId, memberId, endPoint, callback); IOTIVITYTEST_LOG(INFO, "[Cloud Acl] OCCloudAclGroupGetInfo returns %s", CommonUtil::getOCStackResult(result)); if (expectedResult != result) { m_failureMessage = CSCsdkUtilityHelper::setFailureMessage(result, expectedResult); return false; } if (OC_STACK_OK == result && checkCallback == true) { if (CALLBACK_NOT_INVOKED == CSCsdkCloudHelper::waitCallbackRet()) { m_failureMessage = CSCsdkUtilityHelper::setFailureMessage( "[Cloud] Callback Not Invoked"); return false; } } __FUNC_OUT__ return true; } bool CSCsdkCloudHelper::cloudAclIndividualAceUpdate(void* ctx, const char *aclId, char *aceId, const cloudAce_t *aces, const char *endPoint, OCCloudResponseCB callback, OCStackResult expectedResult, bool checkCallback) { __FUNC_IN__ s_isCbInvoked = CALLBACK_NOT_INVOKED; OCStackResult result = OCCloudAclIndividualAceUpdate(ctx, aclId, aceId, aces, endPoint, callback); IOTIVITYTEST_LOG(INFO, "[Cloud Acl] OCCloudAclIndividualAceUpdate returns %s", CommonUtil::getOCStackResult(result)); // OCStackResult result = OCCloudAclIndividualUpdate(ctx, aclId, aceId, aces, endPoint, // callback); // IOTIVITYTEST_LOG(INFO, "[Cloud Acl] OCCloudAclIndividualUpdate returns %s", // CommonUtil::getOCStackResult(result)); if (expectedResult != result) { m_failureMessage = CSCsdkUtilityHelper::setFailureMessage(result, expectedResult); return false; } if (OC_STACK_OK == result && checkCallback == true) { if (CALLBACK_NOT_INVOKED == CSCsdkCloudHelper::waitCallbackRet()) { m_failureMessage = CSCsdkUtilityHelper::setFailureMessage( "[Cloud] Callback Not Invoked"); return false; } } __FUNC_OUT__ return true; } bool CSCsdkCloudHelper::cloudAclAcesDelete(void* ctx, const char *aclId, const char *endPoint, OCCloudResponseCB callback, OCStackResult expectedResult, bool checkCallback) { __FUNC_IN__ s_isCbInvoked = CALLBACK_NOT_INVOKED; // IoTivity OCStackResult result = OCCloudAclAcesDelete(ctx, aclId, endPoint, callback); IOTIVITYTEST_LOG(INFO, "[Cloud Acl] OCCloudAclAcesDelete returns %s", CommonUtil::getOCStackResult(result)); // Convergence // OCStackResult result = OCCloudAclIndividualDelete(ctx, aclId, endPoint, callback); // IOTIVITYTEST_LOG(INFO, "[Cloud Acl] OCCloudAclIndividualDelete returns %s", // CommonUtil::getOCStackResult(result)); if (expectedResult != result) { m_failureMessage = CSCsdkUtilityHelper::setFailureMessage(result, expectedResult); return false; } if (OC_STACK_OK == result && checkCallback == true) { if (CALLBACK_NOT_INVOKED == CSCsdkCloudHelper::waitCallbackRet()) { m_failureMessage = CSCsdkUtilityHelper::setFailureMessage( "[Cloud] Callback Not Invoked"); return false; } } __FUNC_OUT__ return true; } bool CSCsdkCloudHelper::cloudAclIndividualAceDelete(void* ctx, const char *aclId, const char *aceId, const char *endPoint, OCCloudResponseCB callback, OCStackResult expectedResult, bool checkCallback) { __FUNC_IN__ s_isCbInvoked = CALLBACK_NOT_INVOKED; //IoTivity OCStackResult result = OCCloudAclIndividualAceDelete(ctx, aclId, aceId, endPoint, callback); IOTIVITYTEST_LOG(INFO, "[Cloud Acl] OCCloudAclIndividualAceDelete returns %s", CommonUtil::getOCStackResult(result)); //Convergence // OCStackResult result = OCCloudAclIndividualDeleteAce(ctx, aclId, aceId, endPoint, callback); // IOTIVITYTEST_LOG(INFO, "[Cloud Acl] OCCloudAclIndividualDeleteAce returns %s", // CommonUtil::getOCStackResult(result)); if (expectedResult != result) { m_failureMessage = CSCsdkUtilityHelper::setFailureMessage(result, expectedResult); return false; } if (OC_STACK_OK == result && checkCallback == true) { if (CALLBACK_NOT_INVOKED == CSCsdkCloudHelper::waitCallbackRet()) { m_failureMessage = CSCsdkUtilityHelper::setFailureMessage( "[Cloud] Callback Not Invoked"); return false; } } __FUNC_OUT__ return true; } bool CSCsdkCloudHelper::cloudAclInviteUser(void* ctx, const char *userId, const stringArray_t *groupIds, const stringArray_t *memberIds, const char *endPoint, OCCloudResponseCB callback, OCStackResult expectedResult, bool checkCallback) { __FUNC_IN__ s_isCbInvoked = CALLBACK_NOT_INVOKED; OCStackResult result = OCCloudAclInviteUser(ctx, userId, groupIds, memberIds, endPoint, callback); IOTIVITYTEST_LOG(INFO, "[Cloud Acl] OCCloudAclInviteUser returns %s", CommonUtil::getOCStackResult(result)); if (expectedResult != result) { m_failureMessage = CSCsdkUtilityHelper::setFailureMessage(result, expectedResult); return false; } if (OC_STACK_OK == result && checkCallback == true) { if (CALLBACK_NOT_INVOKED == CSCsdkCloudHelper::waitCallbackRet()) { m_failureMessage = CSCsdkUtilityHelper::setFailureMessage( "[Cloud] Callback Not Invoked"); return false; } } __FUNC_OUT__ return true; } bool CSCsdkCloudHelper::cloudAclGetInvitation(void* ctx, const char *userId, const char *endPoint, OCCloudResponseCB callback, OCStackResult expectedResult, bool checkCallback) { __FUNC_IN__ s_isCbInvoked = CALLBACK_NOT_INVOKED; OCStackResult result = OCCloudAclGetInvitation(ctx, userId, endPoint, callback); IOTIVITYTEST_LOG(INFO, "[Cloud Acl] OCCloudAclGetInvitation returns %s", CommonUtil::getOCStackResult(result)); if (expectedResult != result) { m_failureMessage = CSCsdkUtilityHelper::setFailureMessage(result, expectedResult); return false; } if (OC_STACK_OK == result && checkCallback == true) { if (CALLBACK_NOT_INVOKED == CSCsdkCloudHelper::waitCallbackRet()) { m_failureMessage = CSCsdkUtilityHelper::setFailureMessage( "[Cloud] Callback Not Invoked"); return false; } } __FUNC_OUT__ return true; } bool CSCsdkCloudHelper::cloudAclDeleteInvitation(void* ctx, const char *userId, const char *groupId, const char *endPoint, OCCloudResponseCB callback, OCStackResult expectedResult, bool checkCallback) { __FUNC_IN__ s_isCbInvoked = CALLBACK_NOT_INVOKED; OCStackResult result = OCCloudAclDeleteInvitation(ctx, userId, groupId, endPoint, callback); IOTIVITYTEST_LOG(INFO, "[Cloud Acl] OCCloudAclDeleteInvitation returns %s", CommonUtil::getOCStackResult(result)); if (expectedResult != result) { m_failureMessage = CSCsdkUtilityHelper::setFailureMessage(result, expectedResult); return false; } if (OC_STACK_OK == result && checkCallback == true) { if (CALLBACK_NOT_INVOKED == CSCsdkCloudHelper::waitCallbackRet()) { m_failureMessage = CSCsdkUtilityHelper::setFailureMessage( "[Cloud] Callback Not Invoked"); return false; } } __FUNC_OUT__ return true; } bool CSCsdkCloudHelper::cloudAclCancelInvitation(void* ctx, const char *userId, const char *groupId, const char *memberId, const char *endPoint, OCCloudResponseCB callback, OCStackResult expectedResult, bool checkCallback) { __FUNC_IN__ s_isCbInvoked = CALLBACK_NOT_INVOKED; OCStackResult result = OCCloudAclCancelInvitation(ctx, userId, groupId, memberId, endPoint, callback); IOTIVITYTEST_LOG(INFO, "[Cloud Acl] OCCloudAclCancelInvitation returns %s", CommonUtil::getOCStackResult(result)); if (expectedResult != result) { m_failureMessage = CSCsdkUtilityHelper::setFailureMessage(result, expectedResult); return false; } if (OC_STACK_OK == result && checkCallback == true) { if (CALLBACK_NOT_INVOKED == CSCsdkCloudHelper::waitCallbackRet()) { m_failureMessage = CSCsdkUtilityHelper::setFailureMessage( "[Cloud] Callback Not Invoked"); return false; } } __FUNC_OUT__ return true; } bool CSCsdkCloudHelper::cloudAclPolicyCheck(void* ctx, const char *subjectId, const char *deviceId, const char *method, const char *uri, const char *endPoint, OCCloudResponseCB callback, OCStackResult expectedResult, bool checkCallback) { __FUNC_IN__ s_isCbInvoked = CALLBACK_NOT_INVOKED; IOTIVITYTEST_LOG(INFO, "subjectId = %s", subjectId); IOTIVITYTEST_LOG(INFO, "deviceId = %s", deviceId); IOTIVITYTEST_LOG(INFO, "method = %s", method); IOTIVITYTEST_LOG(INFO, "uri = %s", uri); OCStackResult result = OCCloudAclPolicyCheck(ctx, subjectId, deviceId, method, uri, endPoint, callback); IOTIVITYTEST_LOG(INFO, "[Cloud Acl] OCCloudAclPolicyCheck returns %s", CommonUtil::getOCStackResult(result)); if (expectedResult != result) { m_failureMessage = CSCsdkUtilityHelper::setFailureMessage(result, expectedResult); return false; } if (OC_STACK_OK == result && checkCallback == true) { if (CALLBACK_NOT_INVOKED == CSCsdkCloudHelper::waitCallbackRet()) { m_failureMessage = CSCsdkUtilityHelper::setFailureMessage( "[Cloud] Callback Not Invoked"); return false; } } __FUNC_OUT__ return true; } void CSCsdkCloudHelper::provisionResultCB(void* ctx, int nOfRes, OCProvisionResult_t* arr, bool hasError) { if (!hasError) { IOTIVITYTEST_LOG(INFO, "Provision SUCCEEDED - ctx: %s\n", (char* ) ctx); } else { IOTIVITYTEST_LOG(ERROR, "Provision Failed - ctx: %s", (char* ) ctx); CSCsdkUtilityHelper::printResultList((const OCProvisionResult_t*) arr, nOfRes); } s_isCbInvoked = true; } void CSCsdkCloudHelper::cloudResponseCB(void* ctx, OCClientResponse* response, void* data) { __FUNC_IN__ OC_UNUSED(data); IOTIVITYTEST_LOG(INFO, "%s: Received result = %d for ctx : %s", __func__, response->result, (char* )ctx); if (OC_STACK_OK == response->result || OC_STACK_RESOURCE_CHANGED == response->result || OC_STACK_DUPLICATE_REQUEST == response->result) { printRepresentation(parseOCClientResponse(response)); } if (OC_STACK_RESOURCE_CHANGED >= response->result || OC_STACK_DUPLICATE_REQUEST == response->result) { s_isCbInvoked = true; } } void CSCsdkCloudHelper::aclResponseCB(void* ctx, OCClientResponse* response, void* data) { __FUNC_IN__ OC_UNUSED(data); IOTIVITYTEST_LOG(INFO, "%s: Received result = %d for ctx : %s", __func__, response->result, (char* )ctx); if (OC_STACK_OK == response->result || OC_STACK_RESOURCE_CHANGED == response->result || OC_STACK_DUPLICATE_REQUEST == response->result) { printRepresentation(parseOCClientResponse(response)); } if (OC_STACK_RESOURCE_CHANGED >= response->result || OC_STACK_DUPLICATE_REQUEST == response->result) { s_isCbInvoked = true; } __FUNC_OUT__ } void CSCsdkCloudHelper::createGroupResponseCB(void* ctx, OCClientResponse* response, void* data) { __FUNC_IN__ OC_UNUSED(data); IOTIVITYTEST_LOG(INFO, "%s: Received result = %d for ctx : %s", __func__, response->result, (char* )ctx); if (response->result == OC_STACK_OK || response->result == OC_STACK_RESOURCE_CHANGED) { printRepresentation(parseOCClientResponse(response)); } if (response->result <= OC_STACK_RESOURCE_CHANGED) { s_isCbInvoked = true; } __FUNC_OUT__ } void CSCsdkCloudHelper::handleLoginoutCB(const HeaderOptions &, const OCRepresentation &rep, const int ecode) { IOTIVITYTEST_LOG(INFO, "Auth response received code: %d", ecode); if (rep.getPayload() != NULL) { printRepresentation(rep); } s_isCbInvoked = CALLBACK_INVOKED; } void CSCsdkCloudHelper::onPublish(const OCRepresentation &, const int &eCode) { IOTIVITYTEST_LOG(DEBUG, "Publish resource response received code: %d", eCode); s_isCbInvoked = CALLBACK_INVOKED; } OCRepresentation CSCsdkCloudHelper::parseOCClientResponse(OCClientResponse* clientResponse) { __FUNC_OUT__ if (nullptr == clientResponse) { IOTIVITYTEST_LOG(ERROR, "clientResponse is NULL"); return OCRepresentation(); } if (nullptr == clientResponse->payload || PAYLOAD_TYPE_REPRESENTATION != clientResponse->payload->type) { IOTIVITYTEST_LOG(ERROR, "clientResponse->payload is NULL"); return OCRepresentation(); } if (PAYLOAD_TYPE_REPRESENTATION != clientResponse->payload->type) { IOTIVITYTEST_LOG(ERROR, "clientResponse is not of PAYLOAD_TYPE_REPRESENTATION"); return OCRepresentation(); } MessageContainer oc; oc.setPayload(clientResponse->payload); std::vector< OCRepresentation >::const_iterator it = oc.representations().begin(); if (it == oc.representations().end()) { return OCRepresentation(); } // first one is considered the root, everything else is considered a child of this one. OCRepresentation root = *it; root.setDevAddr(clientResponse->devAddr); root.setUri(clientResponse->resourceUri); ++it; std::for_each(it, oc.representations().end(), [&root](const OCRepresentation& repItr) { root.addChild(repItr);}); __FUNC_OUT__ return root; } std::string CSCsdkCloudHelper::getFailureMessage() { return m_failureMessage; }
18,368
5,534
package com.alibaba.dubbo.rpc.benchmark; /** * TODO Comment of HelloService * * @author tony.chenl */ public class DemoServiceImpl implements DemoService{ ResponseObject responseObject = new ResponseObject(100); public Object sendRequest(Object request) { return request; } }
98
2,338
#define __CLC_FUNCTION atomic_max #include <clc/atomic/atomic_decl.inc>
28
2,833
<filename>netmiko/no_enable.py<gh_stars>1000+ from typing import Optional import re class NoEnable: """ Class for platforms that have no enable mode. Netmiko translates the meaning of "enable" mode to be a proxy for "can go into config mode". In other words, that you ultimately have privileges to execute configuration changes. The expectation on platforms that have no method for elevating privileges is that the standard default privileges allow configuration changes. Consequently check_enable_mode returns True by default for platforms that don't explicitly support enable mode. """ def check_enable_mode(self, check_string: str = "") -> bool: return True def enable( self, cmd: str = "", pattern: str = "", enable_pattern: Optional[str] = None, re_flags: int = re.IGNORECASE, ) -> str: return "" def exit_enable_mode(self, exit_command: str = "") -> str: return ""
335
863
<filename>app/src/main/java/fr/free/nrw/commons/wikidata/WikidataEditListenerImpl.java package fr.free.nrw.commons.wikidata; /** * Listener for wikidata edits */ public class WikidataEditListenerImpl extends WikidataEditListener { public WikidataEditListenerImpl() { } /** * Fired when wikidata P18 edit is successful. If there's an active listener, then it is fired */ @Override public void onSuccessfulWikidataEdit() { if (wikidataP18EditListener != null) { wikidataP18EditListener.onWikidataEditSuccessful(); } } }
223
506
<filename>codeforces/1040/a.py #!/usr/bin/env python3 # https://codeforces.com/contest/1040/problem/A n, a, b = map(int, input().split()) p = [a,b,min(a, b)] c = [int(x) for x in input().split()] s = 0 d = n // 2 if n % 2: s += p[c[d]] if c[d] == 2 else 0 j = d + 1 else: j = d i = d - 1 while i >= 0: if c[i] == c[j]: if c[i] == 2: s += 2*p[c[i]] elif c[j] == 2: s += p[c[i]] elif c[i] == 2: s += p[c[j]] else: s = -1 break i-=1 j+=1 print(s)
287
465
<filename>src/main/java/com/ansel/controller/GoodsBillController.java package com.ansel.controller; import java.util.List; import java.util.Map; import org.springframework.beans.factory.annotation.Autowired; import org.springframework.data.domain.Page; import org.springframework.data.domain.PageRequest; import org.springframework.data.domain.Pageable; import org.springframework.web.bind.annotation.ControllerAdvice; import org.springframework.web.bind.annotation.CrossOrigin; import org.springframework.web.bind.annotation.PathVariable; import org.springframework.web.bind.annotation.RequestMapping; import org.springframework.web.bind.annotation.RequestMethod; import org.springframework.web.bind.annotation.RequestParam; import org.springframework.web.bind.annotation.RestController; import com.ansel.bean.CargoReceiptDetail; import com.ansel.bean.GoodsBill; import com.ansel.bean.GoodsBillEvent; import com.ansel.service.IGoodsBillService; import com.ansel.util.Result; import io.swagger.annotations.Api; import io.swagger.annotations.ApiOperation; @RestController @CrossOrigin @Api(value = "接货 Controller") @ControllerAdvice @RequestMapping(value = "/goodsBill") public class GoodsBillController extends ReturnType { @Autowired private IGoodsBillService goodsBillService; /** * 填写一份货运单合同 */ @ApiOperation(value = "添加货运单", notes = "添加一个新货运单") @RequestMapping(value = "/add", method = RequestMethod.POST, produces = "application/json") public Map<?, ?> addGoodsBill(GoodsBill goodsBill) { return goodsBillService.save(goodsBill); } /** * 添加货物 */ @RequestMapping(value = "/addGoods/{goodsBillDetailId}", method = RequestMethod.POST, produces = "application/json") public String addGoods(@PathVariable("goodsBillDetailId") String goodsBillDetailId, CargoReceiptDetail cargoReceiptDetail) { boolean flag = false; flag = goodsBillService.saveGoods(goodsBillDetailId, cargoReceiptDetail); if (!flag) { return ERROR; } return SUCCESS; } /** * 查询所有运单 */ @RequestMapping(value = "/selectByEvent", method = RequestMethod.GET) public Result selectAllGoodsBills(@RequestParam("pageNum") int pageNum, @RequestParam("limit") int limit) { Pageable pageable = PageRequest.of(pageNum-1, limit); Page<GoodsBillEvent> page = goodsBillService.selectAllGoogsBillByPage(pageable); Result result = new Result(200, "SUCCESS", (int) page.getTotalElements(), page.getContent()); return result; } /** * 查询运单状态 */ @RequestMapping(value = "/selectByEvent/{eventName}", method = RequestMethod.GET) public Result selectGoodsBillByEvent(@PathVariable("eventName") String eventName, @RequestParam("pageNum") int pageNum, @RequestParam("limit") int limit) { Pageable pageable = PageRequest.of(pageNum-1, limit); Page<GoodsBillEvent> page = goodsBillService.selectGoodsBillByEvent(eventName, pageable); Result result = new Result(200, "SUCCESS", (int) page.getTotalElements(), page.getContent()); return result; } /** * 通过id查询单个货运单 */ @RequestMapping(value = "/selectByCode/{goodsBillCode}", method = RequestMethod.GET) public GoodsBill selectGoodsBillByCode(@PathVariable("goodsBillCode") String goodsBillCode) { GoodsBill goodsBill = goodsBillService.selectByGoodsBillCode(goodsBillCode); return goodsBill; } /** * 修改货运单 */ @RequestMapping(value = "/updateByCode/{goodsBillCode}", method = RequestMethod.PUT) public String updateGoodsBill(GoodsBill goodsBill, @PathVariable("goodsBillCode") String goodsBillCode) { boolean flag = false; flag = goodsBillService.update(goodsBill); if (!flag) { return ERROR; } return SUCCESS; } /** * 删除货运单 */ @RequestMapping(value = "/deleteByCode/{goodsBillCode}", method = RequestMethod.PUT) public String deleteGoodsBill(@PathVariable("goodsBillCode") String goodsBillCode) { boolean flag = false; flag = goodsBillService.delete(goodsBillCode); if (!flag) { return ERROR; } return SUCCESS; } @ApiOperation(value = "获取一个用户的待收货物") @RequestMapping(value = "/findWait/{customerCode}", method = RequestMethod.GET) public Result findWaitReceived(@PathVariable("customerCode") String customerCode) { List<GoodsBill> list = goodsBillService.findWaitReceive(customerCode); Result result = new Result(200, "SUCCESS", list.size(), list); return result; } @ApiOperation(value = "获取所有未发过 {提货 | 到货 | 中转 | 已提 | 代收} 回告的运单") @RequestMapping(value = "/findInform/{billType}", method = RequestMethod.GET) public Result findInform(@PathVariable("billType") String billType, @RequestParam("pageNum") int pageNum, @RequestParam("limit") int limit) { Pageable pageable = PageRequest.of(pageNum-1, limit); Page<GoodsBill> page = goodsBillService.findInformGet(billType, pageable); Result result = new Result(200, "SUCCESS", (int) page.getTotalElements(), page.getContent()); return result; } @ApiOperation(value = "获取所有已发过 {提货 | 到货 | 中转 | 已提 | 代收} 回告的运单") @RequestMapping(value = "/findOldInform/{type}", method = RequestMethod.GET) public Result findOldInform(@PathVariable("type") String type, @RequestParam("pageNum") int pageNum, @RequestParam("limit") int limit) { Pageable pageable = PageRequest.of(pageNum-1, limit); Page<GoodsBill> page = goodsBillService.findOldInform(type, pageable); Result result = new Result(200, "SUCCESS", (int) page.getTotalElements(), page.getContent()); return result; } @ApiOperation(value = "获取已提货的运单") @RequestMapping(value = "/findAllGot", method = RequestMethod.GET) public Result findAllGot(@RequestParam("pageNum") int pageNum, @RequestParam("limit") int limit) { Pageable pageable = PageRequest.of(pageNum-1, limit); Page<GoodsBill> page = goodsBillService.findAllGot(pageable); Result result = new Result(200, "SUCCESS", (int) page.getTotalElements(), page.getContent()); return result; } }
2,210
2,601
<filename>include/premake/src/host/os_listWindowsRegistry.c /** * \file os_reg.c * \brief Returns true if the given file exists on the file system. * \author Copyright (c) 2002-2016 <NAME> and the Premake project */ #include "premake.h" #if PLATFORM_WINDOWS typedef struct RegNodeInfo { const char * name; const char * value; DWORD valueSize; DWORD type; } RegNodeInfo; typedef void (*ListCallback)(const RegNodeInfo * info, void * user); extern HKEY getRegistryKey(const char** path); static const char* getTypeString(DWORD type) { switch (type) { case REG_NONE: return "REG_NONE"; case REG_SZ: return "REG_SZ"; case REG_EXPAND_SZ: return "REG_EXPAND_SZ"; case REG_BINARY: return "REG_BINARY"; case REG_DWORD: return "REG_DWORD"; case REG_DWORD_BIG_ENDIAN: return "REG_DWORD_BIG_ENDIAN"; case REG_LINK: return "REG_LINK"; case REG_MULTI_SZ: return "REG_MULTI_SZ"; case REG_RESOURCE_LIST: return "REG_RESOURCE_LIST"; case REG_FULL_RESOURCE_DESCRIPTOR: return "REG_FULL_RESOURCE_DESCRIPTOR"; case REG_RESOURCE_REQUIREMENTS_LIST: return "REG_RESOURCE_REQUIREMENTS_LIST"; case REG_QWORD: return "REG_QWORD"; default: return NULL; } } static HKEY openKey(const char *path) { HKEY key, subkey; // check string if (path == NULL) return NULL; // get HKEY key = getRegistryKey(&path); if (key == NULL) return NULL; // skip the initial path separator if (path[0] == '\\') path++; // open the key for reading if (RegOpenKeyExA(key, path, 0, KEY_READ, &subkey) != ERROR_SUCCESS) subkey = NULL; return subkey; } static int listNodes(HKEY key, ListCallback callback, void * user) { RegNodeInfo node; DWORD maxSubkeyLength; DWORD maxValueLength; DWORD maxNameLength; DWORD numSubkeys; DWORD numValues; DWORD length; DWORD index; char* name; char* value; int ok; if (key == NULL || callback == NULL) return 0; // Initialize node structure node.value = NULL; node.valueSize = 0; node.type = REG_NONE; // Fetch info about key content if (RegQueryInfoKeyA(key, NULL, NULL, NULL, &numSubkeys, &maxSubkeyLength, NULL, &numValues, &maxNameLength, &maxValueLength, NULL, NULL) != ERROR_SUCCESS) return 0; // Allocate name and value buffers if (maxSubkeyLength > maxNameLength) maxNameLength = maxSubkeyLength; maxNameLength++; maxValueLength++; name = (char*)malloc((size_t)maxNameLength); value = (char*)malloc((size_t)maxValueLength + 1); // Iterate over subkeys ok = 1; node.name = name; for (index = 0; index < numSubkeys; index++) { length = maxNameLength; if (RegEnumKeyExA(key, index, name, &length, NULL, NULL, NULL, NULL) != ERROR_SUCCESS) { ok = 0; break; } callback(&node, user); } // Iterate over values if (ok) { node.value = value; for (index = 0; index < numValues; index++) { length = maxNameLength; node.valueSize = maxValueLength; if (RegEnumValueA(key, index, name, &length, NULL, &node.type, (LPBYTE)value, &node.valueSize) != ERROR_SUCCESS) { ok = 0; break; } // Ensure proper termination of strings (two terminators for the REG_MULTI_SZ) value[node.valueSize] = '\0'; value[node.valueSize + 1] = '\0'; callback(&node, user); } } // Free buffers free(name); free(value); return ok; } static void listCallback(const RegNodeInfo* info, void* user) { lua_State* L = (lua_State*)user; const char* typeString; // Insert key into the result table (keys are represented as empty tables) if (info->value == NULL) { lua_createtable(L, 0, 0); lua_setfield(L, -2, info->name); return; } // Values are represented as tables containing "type" and "value" records typeString = getTypeString(info->type); lua_createtable(L, 0, 2); lua_pushstring(L, typeString ? typeString : "Unknown"); lua_setfield(L, -2, "type"); switch (info->type) { // Binary encoded values -> size defined string case REG_NONE: case REG_BINARY: case REG_RESOURCE_LIST: case REG_FULL_RESOURCE_DESCRIPTOR: case REG_RESOURCE_REQUIREMENTS_LIST: { lua_pushlstring(L, info->value, info->valueSize); break; } // String encoded values -> zero terminated string case REG_SZ: case REG_EXPAND_SZ: case REG_LINK: { lua_pushstring(L, info->value); break; } // Numbers case REG_DWORD: { lua_pushinteger(L, *(DWORD32*)info->value); break; } case REG_DWORD_BIG_ENDIAN: { lua_pushinteger(L, (info->value[3] << 0) | (info->value[2] << 8) | (info->value[1] << 16) | (info->value[0] << 24)); break; } case REG_QWORD: { lua_pushinteger(L, *(DWORD64*)info->value); break; } // Multiple strings case REG_MULTI_SZ: { DWORD i, j, k; lua_newtable(L); for (i = j = 0, k = 1; i < info->valueSize; i++) { if (info->value[i] != 0) continue; if (i == j) break; lua_pushlstring(L, &info->value[j], i - j); lua_rawseti(L, -2, k); j = i + 1; k++; } break; } // Unknown field -> nil default: { lua_pushnil(L); break; } } lua_setfield(L, -2, "value"); // Complete the value subtable lua_setfield(L, -2, info->name); } int os_listWindowsRegistry(lua_State* L) { HKEY key = openKey(luaL_checkstring(L, 1)); if (key == NULL) { lua_pushnil(L); return 1; } lua_newtable(L); if (!listNodes(key, listCallback, (void *)L)) { // Discard table in case of fault and push nil instead lua_pop(L, 1); lua_pushnil(L); } RegCloseKey(key); return 1; } #else int os_listWindowsRegistry(lua_State* L) { lua_pushnil(L); return 1; } #endif
2,495
1,909
package org.knowm.xchange.okex.v5.service; import java.io.IOException; import java.util.List; import org.knowm.xchange.client.ResilienceRegistries; import org.knowm.xchange.dto.account.AccountInfo; import org.knowm.xchange.okex.v5.OkexAdapters; import org.knowm.xchange.okex.v5.OkexExchange; import org.knowm.xchange.okex.v5.dto.OkexResponse; import org.knowm.xchange.okex.v5.dto.account.OkexAssetBalance; import org.knowm.xchange.okex.v5.dto.account.OkexWalletBalance; import org.knowm.xchange.service.account.AccountService; /** Author: <NAME> (<EMAIL>) Created: 08-06-2021 */ public class OkexAccountService extends OkexAccountServiceRaw implements AccountService { public OkexAccountService(OkexExchange exchange, ResilienceRegistries resilienceRegistries) { super(exchange, resilienceRegistries); } public AccountInfo getAccountInfo() throws IOException { // null to get assets (with non-zero balance), remaining balance, and available amount in the // account. OkexResponse<List<OkexWalletBalance>> tradingBalances = getWalletBalances(null); OkexResponse<List<OkexAssetBalance>> assetBalances = getAssetBalances(null); return new AccountInfo( OkexAdapters.adaptOkexBalances(tradingBalances.getData()), OkexAdapters.adaptOkexAssetBalances(assetBalances.getData())); } }
468
1,133
<gh_stars>1000+ #!/usr/bin/env python3 # # Author: <NAME> # Copyright 2015-present, NASA-JPL/Caltech # import os import glob import datetime import numpy as np import isce, isceobj, stdproc from isceobj.Util.Poly2D import Poly2D from isceobj.Location.Offset import OffsetField, Offset from isceobj.Alos2Proc.Alos2ProcPublic import readOffset from isceobj.Alos2Proc.runSwathOffset import swathOffset from contrib.alos2proc.alos2proc import rg_filter from StackPulic import loadTrack from StackPulic import saveTrack from StackPulic import subbandParameters from StackPulic import stackDateStatistics from StackPulic import acquisitionModesAlos2 def cmdLineParse(): ''' command line parser. ''' import sys import argparse parser = argparse.ArgumentParser(description='resample data to a common grid') parser.add_argument('-idir', dest='idir', type=str, required=True, help = 'input directory where data of each date (YYMMDD) is located. only folders are recognized') parser.add_argument('-odir', dest='odir', type=str, required=True, help = 'output directory where resampled version of each date is output') parser.add_argument('-ref_date', dest='ref_date', type=str, required=True, help = 'reference date. format: YYMMDD') parser.add_argument('-sec_date', dest='sec_date', type=str, nargs='+', default=[], help = 'a number of secondary dates seperated by blanks, can also include ref_date. format: YYMMDD YYMMDD YYMMDD. If provided, only resample these dates') parser.add_argument('-ref_frame', dest='ref_frame', type=str, default=None, help = 'frame number of the swath whose grid is used as reference. e.g. 2800. default: first frame') parser.add_argument('-ref_swath', dest='ref_swath', type=int, default=None, help = 'swath number of the swath whose grid is used as reference. e.g. 1. default: first swath') parser.add_argument('-nrlks1', dest='nrlks1', type=int, default=1, help = 'range offsets between swaths/frames should be integer multiples of -nrlks1. default: 1 ') parser.add_argument('-nalks1', dest='nalks1', type=int, default=14, help = 'azimuth offsets between swaths/frames should be integer multiples of -nalks1. default: 14') parser.add_argument('-subband', dest='subband', action='store_true', default=False, help='create and resample subband SLCs') if len(sys.argv) <= 1: print('') parser.print_help() sys.exit(1) else: return parser.parse_args() if __name__ == '__main__': inps = cmdLineParse() #get user parameters from input idir = inps.idir odir = inps.odir dateReference = inps.ref_date dateSecondary = inps.sec_date frameReference = inps.ref_frame swathReference = inps.ref_swath nRange = inps.nrlks1 nAzimuth = inps.nalks1 subbandFlag = inps.subband ####################################################### DEBUG=False spotlightModes, stripmapModes, scansarNominalModes, scansarWideModes, scansarModes = acquisitionModesAlos2() #get date statistics dateDirs, dates, frames, swaths, dateIndexReference = stackDateStatistics(idir, dateReference) ndate = len(dates) nframe = len(frames) nswath = len(swaths) if frameReference is None: frameReference = frames[0] else: if frameReference not in frames: raise Exception('specified -ref_frame {} not in frame list {}'.format(frameReference, frames)) if swathReference is None: swathReference = swaths[0] else: if swathReference not in swaths: raise Exception('specified -ref_swath {} not in swath list {}'.format(swathReference, swaths)) #find frame and swath indexes of reference swath frameReferenceIndex = frames.index(frameReference) swathReferenceIndex = swaths.index(swathReference) print('resampling all frames and swaths to frame: {} (index: {}) swath: {} (index {})'.format( frameReference, frameReferenceIndex, swathReference, swathReferenceIndex)) #read swath offsets and save in 2-d lists swathRangeOffsetGeometrical = [] swathAzimuthOffsetGeometrical = [] swathRangeOffsetMatching = [] swathAzimuthOffsetMatching = [] for i, frameNumber in enumerate(frames): swathRangeOffsetGeometrical0 = [] swathAzimuthOffsetGeometrical0 = [] swathRangeOffsetMatching0 = [] swathAzimuthOffsetMatching0 = [] if nswath >= 2: frameDir = 'f{}_{}'.format(i+1, frameNumber) with open(os.path.join(idir, dateReference, frameDir, 'mosaic/swath_offset.txt'), 'r') as f: lines = f.readlines() for linex in lines: if 'range offset' in linex: swathRangeOffsetGeometrical0.append(float(linex.split()[3])) swathRangeOffsetMatching0.append(float(linex.split()[4])) if 'azimuth offset' in linex: swathAzimuthOffsetGeometrical0.append(float(linex.split()[3])) swathAzimuthOffsetMatching0.append(float(linex.split()[4])) else: swathRangeOffsetGeometrical0.append(0.0) swathRangeOffsetMatching0.append(0.0) swathAzimuthOffsetGeometrical0.append(0.0) swathAzimuthOffsetMatching0.append(0.0) swathRangeOffsetGeometrical.append(swathRangeOffsetGeometrical0) swathAzimuthOffsetGeometrical.append(swathAzimuthOffsetGeometrical0) swathRangeOffsetMatching.append(swathRangeOffsetMatching0) swathAzimuthOffsetMatching.append(swathAzimuthOffsetMatching0) #read frame offsets and save in 1-d list frameRangeOffsetGeometrical = [] frameAzimuthOffsetGeometrical = [] frameRangeOffsetMatching = [] frameAzimuthOffsetMatching = [] if nframe >= 2: with open(os.path.join(idir, dateReference, 'insar/frame_offset.txt'), 'r') as f: lines = f.readlines() for linex in lines: if 'range offset' in linex: frameRangeOffsetGeometrical.append(float(linex.split()[3])) frameRangeOffsetMatching.append(float(linex.split()[4])) if 'azimuth offset' in linex: frameAzimuthOffsetGeometrical.append(float(linex.split()[3])) frameAzimuthOffsetMatching.append(float(linex.split()[4])) else: frameRangeOffsetGeometrical.append(0.0) frameRangeOffsetMatching.append(0.0) frameAzimuthOffsetGeometrical.append(0.0) frameAzimuthOffsetMatching.append(0.0) #compute accurate starting range and sensing start using offset file for reference date #swath offset is computed between adjacent swaths within a frame, offset unit: first swath sample size #frame offset is computed between first swaths of adjacent frames, offset unit: first swath sample size startingRangeAll = [[None for j in range(nswath)] for i in range(nframe)] sensingStartAll = [[None for j in range(nswath)] for i in range(nframe)] trackReference = loadTrack(dateDirs[dateIndexReference], dates[dateIndexReference]) for i, frameNumber in enumerate(frames): #startingRange and sensingStart of first swath of current frame # for i1 in range(i+1): # startingRangeFirst = trackReference.frames[0].swaths[0].startingRange - \ # frameRangeOffsetMatching[i1] * trackReference.frames[0].swaths[0].rangePixelSize # sensingStartFirst = trackReference.frames[0].swaths[0].sensingStart - \ # datetime.timedelta(seconds = frameAzimuthOffsetMatching[i1] * trackReference.frames[0].swaths[0].azimuthLineInterval) startingRangeFirst = trackReference.frames[0].swaths[0].startingRange - \ sum(frameRangeOffsetMatching[0:i+1]) * trackReference.frames[0].swaths[0].rangePixelSize sensingStartFirst = trackReference.frames[0].swaths[0].sensingStart - \ datetime.timedelta(seconds = sum(frameAzimuthOffsetMatching[0:i+1]) * trackReference.frames[0].swaths[0].azimuthLineInterval) #startingRange and sensingStart of each swath of current frame for j, swathNumber in enumerate(range(swaths[0], swaths[-1] + 1)): # for j1 in range(j+1): # startingRangeAll[i][j] = startingRangeFirst - \ # swathRangeOffsetMatching[i][j1] * trackReference.frames[i].swaths[0].rangePixelSize # sensingStartAll[i][j] = sensingStartFirst - \ # datetime.timedelta(seconds = swathAzimuthOffsetMatching[i][j1] * trackReference.frames[i].swaths[0].azimuthLineInterval) startingRangeAll[i][j] = startingRangeFirst - \ sum(swathRangeOffsetMatching[i][0:j+1]) * trackReference.frames[i].swaths[0].rangePixelSize sensingStartAll[i][j] = sensingStartFirst - \ datetime.timedelta(seconds = sum(swathAzimuthOffsetMatching[i][0:j+1]) * trackReference.frames[i].swaths[0].azimuthLineInterval) #check computation result if DEBUG: for i, frameNumber in enumerate(frames): for j, swathNumber in enumerate(range(swaths[0], swaths[-1] + 1)): print(i, j, (trackReference.frames[i].swaths[j].startingRange-startingRangeAll[i][j])/trackReference.frames[0].swaths[0].rangePixelSize, (trackReference.frames[i].swaths[j].sensingStart-sensingStartAll[i][j]).total_seconds()/trackReference.frames[0].swaths[0].azimuthLineInterval) #update startingRange and sensingStart of reference track for i, frameNumber in enumerate(frames): for j, swathNumber in enumerate(range(swaths[0], swaths[-1] + 1)): trackReference.frames[i].swaths[j].startingRange = startingRangeAll[i][j] trackReference.frames[i].swaths[j].sensingStart = sensingStartAll[i][j] ##find minimum startingRange and sensingStart startingRangeMinimum = trackReference.frames[0].swaths[0].startingRange sensingStartMinimum = trackReference.frames[0].swaths[0].sensingStart for i, frameNumber in enumerate(frames): for j, swathNumber in enumerate(range(swaths[0], swaths[-1] + 1)): if trackReference.frames[i].swaths[j].startingRange < startingRangeMinimum: startingRangeMinimum = trackReference.frames[i].swaths[j].startingRange if trackReference.frames[i].swaths[j].sensingStart < sensingStartMinimum: sensingStartMinimum = trackReference.frames[i].swaths[j].sensingStart print('startingRangeMinimum (m): {}'.format(startingRangeMinimum)) print('sensingStartMinimum: {}'.format(sensingStartMinimum)) #adjust each swath of each frame to minimum startingRange and sensingStart #load reference track again for saving track parameters of resampled trackReferenceResampled = loadTrack(dateDirs[dateIndexReference], dates[dateIndexReference]) for i, frameNumber in enumerate(frames): for j, swathNumber in enumerate(range(swaths[0], swaths[-1] + 1)): #current swath swathReference = trackReference.frames[i].swaths[j] #swath of reference sample size swathReferenceReference = trackReference.frames[frameReferenceIndex].swaths[swathReferenceIndex] #current swath resampled swathReferenceResampled = trackReferenceResampled.frames[i].swaths[j] #update startingRange and sensingStart offsetRange = (swathReference.startingRange - startingRangeMinimum) / (swathReferenceReference.rangePixelSize*nRange) offsetAzimuth = (swathReference.sensingStart - sensingStartMinimum).total_seconds() / (swathReferenceReference.azimuthLineInterval*nAzimuth) swathReferenceResampled.startingRange = startingRangeMinimum + round(offsetRange) * (swathReferenceReference.rangePixelSize*nRange) swathReferenceResampled.sensingStart = sensingStartMinimum + datetime.timedelta(seconds = round(offsetAzimuth) * (swathReferenceReference.azimuthLineInterval*nAzimuth)) #update other parameters swathReferenceResampled.numberOfSamples = round(swathReference.numberOfSamples * swathReference.rangePixelSize / swathReferenceReference.rangePixelSize) swathReferenceResampled.numberOfLines = round(swathReference.numberOfLines * swathReference.azimuthLineInterval / swathReferenceReference.azimuthLineInterval) swathReferenceResampled.rangeSamplingRate = swathReferenceReference.rangeSamplingRate swathReferenceResampled.rangePixelSize = swathReferenceReference.rangePixelSize swathReferenceResampled.prf = swathReferenceReference.prf swathReferenceResampled.azimuthPixelSize = swathReferenceReference.azimuthPixelSize swathReferenceResampled.azimuthLineInterval = swathReferenceReference.azimuthLineInterval #should also update dopplerVsPixel, azimuthFmrateVsPixel? #if hasattr(swathReference, 'burstLength'): if swathReference.burstLength is not None: swathReferenceResampled.burstLength *= (swathReference.burstLength * swathReference.azimuthLineInterval / swathReferenceReference.azimuthLineInterval) #if hasattr(swathReference, 'burstCycleLength'): if swathReference.burstCycleLength is not None: swathReferenceResampled.burstCycleLength *= (swathReference.burstCycleLength * swathReference.azimuthLineInterval / swathReferenceReference.azimuthLineInterval) #no need to update parameters for ScanSAR burst-by-burst processing, since we are not doing such burst-by-burst processing. #resample each date os.makedirs(odir, exist_ok=True) os.chdir(odir) for idate in range(ndate): if dateSecondary != []: if dates[idate] not in dateSecondary: continue os.makedirs(dates[idate], exist_ok=True) os.chdir(dates[idate]) trackSecondary = loadTrack(dateDirs[idate], dates[idate]) for i, frameNumber in enumerate(frames): frameDir = 'f{}_{}'.format(i+1, frameNumber) os.makedirs(frameDir, exist_ok=True) os.chdir(frameDir) for j, swathNumber in enumerate(range(swaths[0], swaths[-1] + 1)): swathDir = 's{}'.format(swathNumber) os.makedirs(swathDir, exist_ok=True) os.chdir(swathDir) #current swath swathReference = trackReference.frames[i].swaths[j] #swath of reference sample size swathReferenceReference = trackReference.frames[frameReferenceIndex].swaths[swathReferenceIndex] #current swath resampled swathReferenceResampled = trackReferenceResampled.frames[i].swaths[j] #current swath to be resampled swathSecondary = trackSecondary.frames[i].swaths[j] #current slc to be processed slc = os.path.join(dateDirs[idate], frameDir, swathDir, dates[idate]+'.slc') #0. create subband SLCs if subbandFlag: subbandRadarWavelength, subbandBandWidth, subbandFrequencyCenter, subbandPrefix = subbandParameters(trackReference) slcLower = dates[idate]+'_{}_tmp.slc'.format(subbandPrefix[0]) slcUpper = dates[idate]+'_{}_tmp.slc'.format(subbandPrefix[1]) rg_filter(slc, 2, [slcLower, slcUpper], subbandBandWidth, subbandFrequencyCenter, 257, 2048, 0.1, 0, 0.0) slcList = [slc, slcLower, slcUpper] slcListResampled = [dates[idate]+'.slc', dates[idate]+'_{}.slc'.format(subbandPrefix[0]), dates[idate]+'_{}.slc'.format(subbandPrefix[1])] slcListRemoved = [slcLower, slcUpper] else: slcList = [slc] slcListResampled = [dates[idate]+'.slc'] slcListRemoved = [] #1. compute offset polynomial if idate == dateIndexReference: rangePoly = Poly2D() rangePoly.initPoly(rangeOrder=1,azimuthOrder=0,coeffs=[[ (swathReferenceResampled.startingRange - swathReference.startingRange) / swathReference.rangePixelSize, swathReferenceResampled.rangePixelSize / swathReference.rangePixelSize - 1.0]]) azimuthPoly = Poly2D() azimuthPoly.initPoly(rangeOrder=0,azimuthOrder=1,coeffs=[ [(swathReferenceResampled.sensingStart - swathReference.sensingStart).total_seconds() / swathReference.azimuthLineInterval], [swathReferenceResampled.azimuthLineInterval / swathReference.azimuthLineInterval - 1.0]]) if DEBUG: print() print('rangePoly.getCoeffs(): {}'.format(rangePoly.getCoeffs())) print('azimuthPoly.getCoeffs(): {}'.format(azimuthPoly.getCoeffs())) print('rangePoly._meanRange: {}'.format(rangePoly._meanRange)) print('rangePoly._normRange: {}'.format(rangePoly._normRange)) print('rangePoly._meanAzimuth: {}'.format(rangePoly._meanAzimuth)) print('rangePoly._normAzimuth: {}'.format(rangePoly._normAzimuth)) print('azimuthPoly._meanRange: {}'.format(azimuthPoly._meanRange)) print('azimuthPoly._normRange: {}'.format(azimuthPoly._normRange)) print('azimuthPoly._meanAzimuth: {}'.format(azimuthPoly._meanAzimuth)) print('azimuthPoly._normAzimuth: {}'.format(azimuthPoly._normAzimuth)) print() else: offsets = readOffset(os.path.join(dateDirs[idate], frameDir, swathDir, 'cull.off')) # x1 x2 x3 # y1 y2 y3 #create new offset field to save offsets: swathReferenceResampled --> swathReference --> swathSecondary offsetsUpdated = OffsetField() for offset in offsets: offsetUpdate = Offset() x1 = offset.x * swathReference.rangePixelSize / swathReferenceResampled.rangePixelSize + \ (swathReference.startingRange - swathReferenceResampled.startingRange) / swathReferenceResampled.rangePixelSize y1 = offset.y * swathReference.azimuthLineInterval / swathReferenceResampled.azimuthLineInterval + \ (swathReference.sensingStart - swathReferenceResampled.sensingStart).total_seconds() / swathReferenceResampled.azimuthLineInterval x3 = offset.x + offset.dx y3 = offset.y + offset.dy dx = x3 - x1 dy = y3 - y1 offsetUpdate.setCoordinate(x1, y1) offsetUpdate.setOffset(dx, dy) offsetUpdate.setSignalToNoise(offset.snr) offsetUpdate.setCovariance(offset.sigmax, offset.sigmay, offset.sigmaxy) offsetsUpdated.addOffset(offsetUpdate) azimuthPoly, rangePoly = offsetsUpdated.getFitPolynomials(rangeOrder=2,azimuthOrder=2,maxOrder=True, usenumpy=False) #check polynomial accuracy if DEBUG: print() print(' x y dx dy dx(poly) dy(poly) dx - dx(poly) dy - dy(poly)') print('==============================================================================================================') for offset in offsetsUpdated: print('%11.3f %11.3f %11.3f %11.3f %11.3f %11.3f %11.3f %11.3f'%(offset.x, offset.y, offset.dx, offset.dy, rangePoly(offset.y, offset.x), azimuthPoly(offset.y, offset.x), offset.dx - rangePoly(offset.y, offset.x), offset.dy - azimuthPoly(offset.y, offset.x))) print() if DEBUG: print() print('rangePoly.getCoeffs(): {}'.format(rangePoly.getCoeffs())) print('azimuthPoly.getCoeffs(): {}'.format(azimuthPoly.getCoeffs())) print('rangePoly._meanRange: {}'.format(rangePoly._meanRange)) print('rangePoly._normRange: {}'.format(rangePoly._normRange)) print('rangePoly._meanAzimuth: {}'.format(rangePoly._meanAzimuth)) print('rangePoly._normAzimuth: {}'.format(rangePoly._normAzimuth)) print('azimuthPoly._meanRange: {}'.format(azimuthPoly._meanRange)) print('azimuthPoly._normRange: {}'.format(azimuthPoly._normRange)) print('azimuthPoly._meanAzimuth: {}'.format(azimuthPoly._meanAzimuth)) print('azimuthPoly._normAzimuth: {}'.format(azimuthPoly._normAzimuth)) print() #2. carrier phase dpoly = Poly2D() order = len(swathSecondary.dopplerVsPixel) - 1 coeffs = [2*np.pi*val*swathSecondary.azimuthLineInterval for val in swathSecondary.dopplerVsPixel] dpoly.initPoly(rangeOrder=order, azimuthOrder=0) dpoly.setCoeffs([coeffs]) #azCarrPoly = Poly2D() #azCarrPoly.initPoly(rangeOrder=0,azimuthOrder=0,coeffs=[[0.]]) #3. resample images #checked: offset computation results using azimuthPoly/rangePoly and in resamp_slc.f90 #checked: no flattenning #checked: no reading of range and azimuth images #checked: range/azimuth carrier values: 0, 0 #checked: doppler no problem # but doppler is computed using reference's coordinate in: # isce/components/stdproc/stdproc/resamp_slc/src/resamp_slc.f90 # I have fixed it. for slcInput, slcOutput in zip(slcList, slcListResampled): inimg = isceobj.createSlcImage() inimg.load(slcInput + '.xml') inimg.filename = slcInput inimg.extraFilename = slcInput+'.vrt' inimg.setAccessMode('READ') rObj = stdproc.createResamp_slc() #the following two items are actually not used, since we are not flattenning? #but need to set these otherwise the program complains rObj.slantRangePixelSpacing = swathSecondary.rangePixelSize rObj.radarWavelength = trackSecondary.radarWavelength #rObj.azimuthCarrierPoly = azCarrPoly rObj.dopplerPoly = dpoly rObj.azimuthOffsetsPoly = azimuthPoly rObj.rangeOffsetsPoly = rangePoly rObj.imageIn = inimg ####Setting reference values #the following four items are actually not used, since we are not flattenning? #but need to set these otherwise the program complains rObj.startingRange = swathSecondary.startingRange rObj.referenceSlantRangePixelSpacing = swathReferenceResampled.rangePixelSize rObj.referenceStartingRange = swathReferenceResampled.startingRange rObj.referenceWavelength = trackReferenceResampled.radarWavelength width = swathReferenceResampled.numberOfSamples length = swathReferenceResampled.numberOfLines imgOut = isceobj.createSlcImage() imgOut.setWidth(width) imgOut.filename = slcOutput imgOut.setAccessMode('write') rObj.outputWidth = width rObj.outputLines = length #rObj.residualRangeImage = rngImg #rObj.residualAzimuthImage = aziImg rObj.resamp_slc(imageOut=imgOut) imgOut.renderHdr() for x in slcListRemoved: os.remove(x) os.remove(x + '.vrt') os.remove(x + '.xml') os.chdir('../') os.chdir('../') os.chdir('../') #dump resampled reference paramter files, only do this when reference is resampled dumpFlag = True if dateSecondary != []: if dates[dateIndexReference] not in dateSecondary: dumpFlag = False if dumpFlag: #we are still in directory 'odir' os.chdir(dates[dateIndexReference]) saveTrack(trackReferenceResampled, dates[dateIndexReference])
11,856
1,350
// Copyright (c) Microsoft Corporation. All rights reserved. // Licensed under the MIT License. // Code generated by Microsoft (R) AutoRest Code Generator. package com.azure.resourcemanager.resourcegraph.models; import com.azure.core.annotation.Fluent; import com.azure.core.util.logging.ClientLogger; import com.fasterxml.jackson.annotation.JsonIgnore; import com.fasterxml.jackson.annotation.JsonProperty; /** A request to compute additional statistics (facets) over the query results. */ @Fluent public final class FacetRequest { @JsonIgnore private final ClientLogger logger = new ClientLogger(FacetRequest.class); /* * The column or list of columns to summarize by */ @JsonProperty(value = "expression", required = true) private String expression; /* * The options for facet evaluation */ @JsonProperty(value = "options") private FacetRequestOptions options; /** * Get the expression property: The column or list of columns to summarize by. * * @return the expression value. */ public String expression() { return this.expression; } /** * Set the expression property: The column or list of columns to summarize by. * * @param expression the expression value to set. * @return the FacetRequest object itself. */ public FacetRequest withExpression(String expression) { this.expression = expression; return this; } /** * Get the options property: The options for facet evaluation. * * @return the options value. */ public FacetRequestOptions options() { return this.options; } /** * Set the options property: The options for facet evaluation. * * @param options the options value to set. * @return the FacetRequest object itself. */ public FacetRequest withOptions(FacetRequestOptions options) { this.options = options; return this; } /** * Validates the instance. * * @throws IllegalArgumentException thrown if the instance is not valid. */ public void validate() { if (expression() == null) { throw logger .logExceptionAsError( new IllegalArgumentException("Missing required property expression in model FacetRequest")); } if (options() != null) { options().validate(); } } }
863
1,264
/* * Copyright 2018-2021 the original author or authors. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * https://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package org.springframework.data.mongodb.core.validation; import org.bson.Document; import org.springframework.data.mongodb.core.query.Criteria; import org.springframework.data.mongodb.core.query.CriteriaDefinition; import org.springframework.data.mongodb.core.query.SerializationUtils; import org.springframework.util.Assert; import org.springframework.util.ObjectUtils; /** * {@link Validator} implementation based on {@link CriteriaDefinition query expressions}. * * @author <NAME> * @author <NAME> * @since 2.1 * @see Criteria * @see <a href="https://docs.mongodb.com/manual/core/schema-validation/#query-expressions">Schema Validation</a> */ class CriteriaValidator implements Validator { private final CriteriaDefinition criteria; private CriteriaValidator(CriteriaDefinition criteria) { this.criteria = criteria; } /** * Creates a new {@link Validator} object, which is basically setup of query operators, based on a * {@link CriteriaDefinition} instance. * * @param criteria the criteria to build the {@code validator} from. Must not be {@literal null}. * @return new instance of {@link CriteriaValidator}. * @throws IllegalArgumentException when criteria is {@literal null}. */ static CriteriaValidator of(CriteriaDefinition criteria) { Assert.notNull(criteria, "Criteria must not be null!"); return new CriteriaValidator(criteria); } /* * (non-Javadoc) * @see org.springframework.data.mongodb.core.validation.Validator#toDocument() */ @Override public Document toDocument() { return criteria.getCriteriaObject(); } /* * (non-Javadoc) * @see java.lang.Object#toString() */ @Override public String toString() { return SerializationUtils.serializeToJsonSafely(toDocument()); } @Override public boolean equals(Object o) { if (this == o) return true; if (o == null || getClass() != o.getClass()) return false; CriteriaValidator that = (CriteriaValidator) o; return ObjectUtils.nullSafeEquals(criteria, that.criteria); } @Override public int hashCode() { return ObjectUtils.nullSafeHashCode(criteria); } }
844
679
<gh_stars>100-1000 /************************************************************** * * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information * regarding copyright ownership. The ASF licenses this file * to you under the Apache License, Version 2.0 (the * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, * software distributed under the License is distributed on an * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY * KIND, either express or implied. See the License for the * specific language governing permissions and limitations * under the License. * *************************************************************/ #include "precompiled_sd.hxx" #include "AllMasterPagesSelector.hxx" #include "PreviewValueSet.hxx" #include "ViewShellBase.hxx" #include "SidebarShellManager.hxx" #include "MasterPageContainer.hxx" #include "MasterPageDescriptor.hxx" #include "app.hrc" #include "helpids.h" #include <tools/link.hxx> #include <set> namespace { using namespace sd::sidebar; int GetURLPriority (const SharedMasterPageDescriptor& rpDescriptor) { int nPriority (0); switch (rpDescriptor->GetURLClassification()) { case MasterPageDescriptor::URLCLASS_USER: nPriority = 0; break; case MasterPageDescriptor::URLCLASS_LAYOUT: nPriority = 1; break; case MasterPageDescriptor::URLCLASS_PRESENTATION: nPriority = 2; break; case MasterPageDescriptor::URLCLASS_OTHER: nPriority = 3; break; case MasterPageDescriptor::URLCLASS_UNKNOWN: nPriority = 4; break; default: case MasterPageDescriptor::URLCLASS_UNDETERMINED: nPriority = 5; break; } return nPriority; } class MasterPageDescriptorOrder { public: bool operator() ( const SharedMasterPageDescriptor& rp1, const SharedMasterPageDescriptor& rp2) { if (rp1->meOrigin == MasterPageContainer::DEFAULT) return true; else if (rp2->meOrigin == MasterPageContainer::DEFAULT) return false; else if (rp1->GetURLClassification() == rp2->GetURLClassification()) return rp1->mnTemplateIndex < rp2->mnTemplateIndex; else return GetURLPriority(rp1) < GetURLPriority(rp2); } }; } // end of anonymous namespace namespace sd { namespace sidebar { class AllMasterPagesSelector::SortedMasterPageDescriptorList : public ::std::set<SharedMasterPageDescriptor,MasterPageDescriptorOrder> { public: SortedMasterPageDescriptorList (void) {} }; MasterPagesSelector* AllMasterPagesSelector::Create ( ::Window* pParent, ViewShellBase& rViewShellBase, const cssu::Reference<css::ui::XSidebar>& rxSidebar) { SdDrawDocument* pDocument = rViewShellBase.GetDocument(); if (pDocument == NULL) return NULL; ::boost::shared_ptr<MasterPageContainer> pContainer (new MasterPageContainer()); MasterPagesSelector* pSelector( new AllMasterPagesSelector ( pParent, *pDocument, rViewShellBase, pContainer, rxSidebar)); pSelector->LateInit(); pSelector->SetHelpId(HID_SD_TASK_PANE_PREVIEW_ALL); return pSelector; } AllMasterPagesSelector::AllMasterPagesSelector ( ::Window* pParent, SdDrawDocument& rDocument, ViewShellBase& rBase, const ::boost::shared_ptr<MasterPageContainer>& rpContainer, const cssu::Reference<css::ui::XSidebar>& rxSidebar) : MasterPagesSelector(pParent, rDocument, rBase, rpContainer, rxSidebar), mpSortedMasterPages(new SortedMasterPageDescriptorList()) { MasterPagesSelector::Fill(); } AllMasterPagesSelector::~AllMasterPagesSelector (void) { } void AllMasterPagesSelector::Fill (ItemList& rItemList) { if (mpSortedMasterPages->empty()) UpdateMasterPageList(); UpdatePageSet(rItemList); } void AllMasterPagesSelector::NotifyContainerChangeEvent ( const MasterPageContainerChangeEvent& rEvent) { switch (rEvent.meEventType) { case MasterPageContainerChangeEvent::CHILD_ADDED: AddItem(rEvent.maChildToken); MasterPagesSelector::Fill(); break; case MasterPageContainerChangeEvent::INDEX_CHANGED: case MasterPageContainerChangeEvent::INDEXES_CHANGED: mpSortedMasterPages->clear(); MasterPagesSelector::Fill(); break; default: MasterPagesSelector::NotifyContainerChangeEvent(rEvent); break; } } void AllMasterPagesSelector::UpdateMasterPageList (void) { mpSortedMasterPages->clear(); int nTokenCount = mpContainer->GetTokenCount(); for (int i=0; i<nTokenCount; i++) AddItem(mpContainer->GetTokenForIndex(i)); } void AllMasterPagesSelector::AddItem (MasterPageContainer::Token aToken) { switch (mpContainer->GetOriginForToken(aToken)) { case MasterPageContainer::DEFAULT: case MasterPageContainer::TEMPLATE: // Templates are added only when coming from the // MasterPageContainerFiller so that they have an id which // defines their place in the list. Templates (pre) loaded from // RecentlyUsedMasterPages are ignored (they will be loaded // later by the MasterPageContainerFiller.) if (mpContainer->GetTemplateIndexForToken(aToken) >= 0) mpSortedMasterPages->insert(mpContainer->GetDescriptorForToken(aToken)); break; default: break; } } void AllMasterPagesSelector::UpdatePageSet (ItemList& rItemList) { SortedMasterPageDescriptorList::const_iterator iDescriptor; SortedMasterPageDescriptorList::const_iterator iEnd (mpSortedMasterPages->end()); for (iDescriptor=mpSortedMasterPages->begin(); iDescriptor!=iEnd; ++iDescriptor) rItemList.push_back((*iDescriptor)->maToken); } void AllMasterPagesSelector::GetState (SfxItemSet& rItemSet) { // MasterPagesSelector::GetState(rItemSet); if (rItemSet.GetItemState(SID_TP_EDIT_MASTER) == SFX_ITEM_AVAILABLE) rItemSet.DisableItem(SID_TP_EDIT_MASTER); } } } // end of namespace sd::sidebar
2,467
854
<reponame>mongodb/morphia package dev.morphia.annotations; import dev.morphia.annotations.internal.MorphiaExperimental; import java.lang.annotation.ElementType; import java.lang.annotation.Retention; import java.lang.annotation.RetentionPolicy; import java.lang.annotation.Target; /** * Defines a name for a constructor parameter. * * @morphia.experimental */ @Target(ElementType.PARAMETER) @Retention(RetentionPolicy.RUNTIME) @MorphiaExperimental public @interface Name { /** * @return the field name for the parameter * @see Property */ String value(); }
195
348
<gh_stars>100-1000 {"nom":"Braux-Sainte-Cohière","circ":"4ème circonscription","dpt":"Marne","inscrits":90,"abs":26,"votants":64,"blancs":3,"nuls":2,"exp":59,"res":[{"nuance":"LR","nom":"<NAME>","voix":36},{"nuance":"FN","nom":"<NAME>","voix":23}]}
105
1,755
<gh_stars>1000+ #define PJ_LIB__ #include <projects.h> #include <string.h> struct pj_opaque { struct PJconsts *link; double lamp; double cphip, sphip; }; PROJ_HEAD(ob_tran, "General Oblique Transformation") "\n\tMisc Sph" "\n\to_proj= plus parameters for projection" "\n\to_lat_p= o_lon_p= (new pole) or" "\n\to_alpha= o_lon_c= o_lat_c= or" "\n\to_lon_1= o_lat_1= o_lon_2= o_lat_2="; #define TOL 1e-10 static XY o_forward(LP lp, PJ *P) { /* spheroid */ struct pj_opaque *Q = P->opaque; double coslam, sinphi, cosphi; coslam = cos(lp.lam); sinphi = sin(lp.phi); cosphi = cos(lp.phi); lp.lam = adjlon(aatan2(cosphi * sin(lp.lam), Q->sphip * cosphi * coslam + Q->cphip * sinphi) + Q->lamp); lp.phi = aasin(P->ctx,Q->sphip * sinphi - Q->cphip * cosphi * coslam); return Q->link->fwd(lp, Q->link); } static XY t_forward(LP lp, PJ *P) { /* spheroid */ struct pj_opaque *Q = P->opaque; double cosphi, coslam; cosphi = cos(lp.phi); coslam = cos(lp.lam); lp.lam = adjlon(aatan2(cosphi * sin(lp.lam), sin(lp.phi)) + Q->lamp); lp.phi = aasin(P->ctx, - cosphi * coslam); return Q->link->fwd(lp, Q->link); } static LP o_inverse(XY xy, PJ *P) { /* spheroid */ LP lp = {0.0,0.0}; struct pj_opaque *Q = P->opaque; double coslam, sinphi, cosphi; lp = Q->link->inv(xy, Q->link); if (lp.lam != HUGE_VAL) { coslam = cos(lp.lam -= Q->lamp); sinphi = sin(lp.phi); cosphi = cos(lp.phi); lp.phi = aasin(P->ctx,Q->sphip * sinphi + Q->cphip * cosphi * coslam); lp.lam = aatan2(cosphi * sin(lp.lam), Q->sphip * cosphi * coslam - Q->cphip * sinphi); } return lp; } static LP t_inverse(XY xy, PJ *P) { /* spheroid */ LP lp = {0.0,0.0}; struct pj_opaque *Q = P->opaque; double cosphi, t; lp = Q->link->inv(xy, Q->link); if (lp.lam != HUGE_VAL) { cosphi = cos(lp.phi); t = lp.lam - Q->lamp; lp.lam = aatan2(cosphi * sin(t), - sin(lp.phi)); lp.phi = aasin(P->ctx,cosphi * cos(t)); } return lp; } static void *freeup_new (PJ *P) { /* Destructor */ if (0==P) return 0; if (0==P->opaque) return pj_dealloc (P); if (P->opaque->link) return pj_dealloc (P->opaque->link); pj_dealloc (P->opaque); return pj_dealloc(P); } static void freeup (PJ *P) { freeup_new (P); return; } PJ *PROJECTION(ob_tran) { int i; double phip; char *name, *s; struct pj_opaque *Q = pj_calloc (1, sizeof (struct pj_opaque)); if (0==Q) return freeup_new (P); P->opaque = Q; /* get name of projection to be translated */ if (!(name = pj_param(P->ctx, P->params, "so_proj").s)) E_ERROR(-26); for (i = 0; (s = pj_list[i].id) && strcmp(name, s) ; ++i) ; if (!s || !(Q->link = (*pj_list[i].proj)(0))) E_ERROR(-37); /* copy existing header into new */ P->es = 0.; /* force to spherical */ Q->link->params = P->params; Q->link->ctx = P->ctx; Q->link->over = P->over; Q->link->geoc = P->geoc; Q->link->a = P->a; Q->link->es = P->es; Q->link->ra = P->ra; Q->link->lam0 = P->lam0; Q->link->phi0 = P->phi0; Q->link->x0 = P->x0; Q->link->y0 = P->y0; Q->link->k0 = P->k0; /* force spherical earth */ Q->link->one_es = Q->link->rone_es = 1.; Q->link->es = Q->link->e = 0.; if (!(Q->link = pj_list[i].proj(Q->link))) { return freeup_new(P); } if (pj_param(P->ctx, P->params, "to_alpha").i) { double lamc, phic, alpha; lamc = pj_param(P->ctx, P->params, "ro_lon_c").f; phic = pj_param(P->ctx, P->params, "ro_lat_c").f; alpha = pj_param(P->ctx, P->params, "ro_alpha").f; /* if (fabs(phic) <= TOL || fabs(fabs(phic) - HALFPI) <= TOL || fabs(fabs(alpha) - HALFPI) <= TOL) */ if (fabs(fabs(phic) - M_HALFPI) <= TOL) E_ERROR(-32); Q->lamp = lamc + aatan2(-cos(alpha), -sin(alpha) * sin(phic)); phip = aasin(P->ctx,cos(phic) * sin(alpha)); } else if (pj_param(P->ctx, P->params, "to_lat_p").i) { /* specified new pole */ Q->lamp = pj_param(P->ctx, P->params, "ro_lon_p").f; phip = pj_param(P->ctx, P->params, "ro_lat_p").f; } else { /* specified new "equator" points */ double lam1, lam2, phi1, phi2, con; lam1 = pj_param(P->ctx, P->params, "ro_lon_1").f; phi1 = pj_param(P->ctx, P->params, "ro_lat_1").f; lam2 = pj_param(P->ctx, P->params, "ro_lon_2").f; phi2 = pj_param(P->ctx, P->params, "ro_lat_2").f; if (fabs(phi1 - phi2) <= TOL || (con = fabs(phi1)) <= TOL || fabs(con - M_HALFPI) <= TOL || fabs(fabs(phi2) - M_HALFPI) <= TOL) E_ERROR(-33); Q->lamp = atan2(cos(phi1) * sin(phi2) * cos(lam1) - sin(phi1) * cos(phi2) * cos(lam2), sin(phi1) * cos(phi2) * sin(lam2) - cos(phi1) * sin(phi2) * sin(lam1)); phip = atan(-cos(Q->lamp - lam1) / tan(phi1)); } if (fabs(phip) > TOL) { /* oblique */ Q->cphip = cos(phip); Q->sphip = sin(phip); P->fwd = o_forward; P->inv = Q->link->inv ? o_inverse : 0; } else { /* transverse */ P->fwd = t_forward; P->inv = Q->link->inv ? t_inverse : 0; } return P; } #ifndef PJ_SELFTEST int pj_ob_tran_selftest (void) {return 0;} #else int pj_ob_tran_selftest (void) { double tolerance_lp = 1e-10; double tolerance_xy = 1e-7; char s_args[] = {"+proj=ob_tran +a=6400000 +o_proj=latlon +o_lon_p=20 +o_lat_p=20 +lon_0=180"}; LP fwd_in[] = { { 2, 1}, { 2,-1}, {-2, 1}, {-2,-1} }; XY s_fwd_expect[] = { {-2.6856872138416592, 1.2374302350496296}, {-2.6954069748943286, 1.2026833954513816}, {-2.8993663925401947, 1.2374302350496296}, {-2.8896466314875244, 1.2026833954513816}, }; XY inv_in[] = { { 200, 100}, { 200,-100}, {-200, 100}, {-200,-100} }; LP s_inv_expect[] = { { 121.5518748407577, -2.5361001573966084}, { 63.261184340201858, 17.585319578673531}, {-141.10073322351622, 26.091712304855108}, {-65.862385598848391, 51.830295078417215}, }; return pj_generic_selftest (0, s_args, tolerance_xy, tolerance_lp, 4, 4, fwd_in, 0, s_fwd_expect, inv_in, 0, s_inv_expect); } #endif
3,505
614
[ { "home": "Portugal", "away": "France", "homescore": "1", "awayscore": "0" }, { "home": "Lin<NAME>", "away": "Celtic" }, { "home": "FK Qarabag", "away": "F91 Dudelange" }, { "home": "Alashkert FC", "away": "<NAME>" }, { "home": "T.N.S.", "away": "Apoel Nicosia" }, { "home": "Car<NAME>", "away": "West Ham United" }, { "home": "Germany U19", "away": "Italy U19", "homescore": "0", "awayscore": "1" }, { "home": "Portugal U19", "away": "Austria U19", "homescore": "1", "awayscore": "1" }, { "home": "FC Groningen", "away": "Queens Park Rangers", "homescore": "3", "awayscore": "1" }, { "home": "Malmo FF", "away": "Orebro SK", "homescore": "1", "awayscore": "0" }, { "home": "Cruzeiro", "away": "Atletico Paranaense", "homescore": "0", "awayscore": "3" }, { "home": "Deportes Quindío", "away": "Deportivo Pereira" }, { "home": "La Bocana", "away": "Comerciantes Unidos", "homescore": "2", "awayscore": "0" }, { "home": "UTC", "away": "Unión Comercio", "homescore": "1", "awayscore": "1" } ]
428
2,151
// Copyright (c) 2016 The WebM project authors. All Rights Reserved. // // Use of this source code is governed by a BSD-style license // that can be found in the LICENSE file in the root of the source // tree. An additional intellectual property rights grant can be found // in the file PATENTS. All contributing project authors may // be found in the AUTHORS file in the root of the source tree. #include "src/unknown_parser.h" #include <cassert> #include <cstdint> #include "webm/element.h" #include "webm/reader.h" #include "webm/status.h" namespace webm { Status UnknownParser::Init(const ElementMetadata& metadata, std::uint64_t max_size) { assert(metadata.size == kUnknownElementSize || metadata.size <= max_size); if (metadata.size == kUnknownElementSize) { return Status(Status::kIndefiniteUnknownElement); } metadata_ = metadata; bytes_remaining_ = metadata.size; return Status(Status::kOkCompleted); } Status UnknownParser::Feed(Callback* callback, Reader* reader, std::uint64_t* num_bytes_read) { assert(callback != nullptr); assert(reader != nullptr); assert(num_bytes_read != nullptr); const std::uint64_t original_bytes_remaining = bytes_remaining_; const Status status = callback->OnUnknownElement(metadata_, reader, &bytes_remaining_); assert(bytes_remaining_ <= original_bytes_remaining); *num_bytes_read = original_bytes_remaining - bytes_remaining_; return status; } } // namespace webm
498
3,100
/****************************************************************************** * * Project: NTF Translator * Purpose: NTFCodeList class implementation. * Author: <NAME>, <EMAIL> * ****************************************************************************** * Copyright (c) 2001, <NAME> * * Permission is hereby granted, free of charge, to any person obtaining a * copy of this software and associated documentation files (the "Software"), * to deal in the Software without restriction, including without limitation * the rights to use, copy, modify, merge, publish, distribute, sublicense, * and/or sell copies of the Software, and to permit persons to whom the * Software is furnished to do so, subject to the following conditions: * * The above copyright notice and this permission notice shall be included * in all copies or substantial portions of the Software. * * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER * DEALINGS IN THE SOFTWARE. ****************************************************************************/ #include <algorithm> #include <stdarg.h> #include "ntf.h" #include "cpl_conv.h" #include "cpl_string.h" CPL_CVSID("$Id$") /************************************************************************/ /* NTFCodeList */ /************************************************************************/ NTFCodeList::NTFCodeList( NTFRecord * poRecord ) : nNumCode(std::max(0, atoi(poRecord->GetField(20,22)))), papszCodeVal(static_cast<char **>(CPLMalloc(sizeof(char*) * nNumCode))), papszCodeDes(static_cast<char **>(CPLMalloc(sizeof(char*) * nNumCode))) { CPLAssert( EQUAL(poRecord->GetField(1,2),"42") ); snprintf( szValType, sizeof(szValType), "%s", poRecord->GetField(13,14) ); snprintf( szFInter, sizeof(szFInter), "%s", poRecord->GetField(15,19) ); const int nRecordLen = poRecord->GetLength(); const char *pszText = poRecord->GetData() + 22; int iThisField = 0; for( ; nRecordLen > 22 && *pszText != '\0' && iThisField < nNumCode; iThisField++ ) { char szVal[128] = {}; int iLen = 0; while( iLen < static_cast<int>(sizeof(szVal)) - 1 && *pszText != '\\' && *pszText != '\0' ) { szVal[iLen++] = *(pszText++); } szVal[iLen] = '\0'; if( *pszText == '\\' ) pszText++; iLen = 0; char szDes[128] = {}; while( iLen < static_cast<int>(sizeof(szDes)) - 1 && *pszText != '\\' && *pszText != '\0' ) { szDes[iLen++] = *(pszText++); } szDes[iLen] = '\0'; if( *pszText == '\\' ) pszText++; papszCodeVal[iThisField] = CPLStrdup(szVal); papszCodeDes[iThisField] = CPLStrdup(szDes); } if( iThisField < nNumCode ) { nNumCode = iThisField; CPLDebug( "NTF", "Didn't get all the expected fields from a CODELIST." ); } } /************************************************************************/ /* ~NTFCodeList() */ /************************************************************************/ NTFCodeList::~NTFCodeList() { for( int i = 0; i < nNumCode; i++ ) { CPLFree( papszCodeVal[i] ); CPLFree( papszCodeDes[i] ); } CPLFree( papszCodeVal ); CPLFree( papszCodeDes ); } /************************************************************************/ /* Lookup() */ /************************************************************************/ const char *NTFCodeList::Lookup( const char * pszCode ) { for( int i = 0; i < nNumCode; i++ ) { if( EQUAL(pszCode,papszCodeVal[i]) ) return papszCodeDes[i]; } return nullptr; }
1,717
5,250
<filename>modules/flowable-dmn-rest/src/main/java/org/flowable/dmn/rest/service/api/repository/DecisionTableResourceDataResource.java /* Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package org.flowable.dmn.rest.service.api.repository; import javax.servlet.http.HttpServletResponse; import org.flowable.dmn.api.DmnDecision; import org.springframework.web.bind.annotation.GetMapping; import org.springframework.web.bind.annotation.PathVariable; import org.springframework.web.bind.annotation.ResponseBody; import org.springframework.web.bind.annotation.RestController; import io.swagger.annotations.Api; import io.swagger.annotations.ApiOperation; import io.swagger.annotations.ApiParam; import io.swagger.annotations.ApiResponse; import io.swagger.annotations.ApiResponses; import io.swagger.annotations.Authorization; /** * @author <NAME> * * @deprecated use {@link DecisionResourceDataResource} instead. */ @Deprecated @RestController @Api(tags = { "Decision Tables" }, description = "Manage Decision Tables", authorizations = { @Authorization(value = "basicAuth") }) public class DecisionTableResourceDataResource extends BaseDecisionResource { @ApiOperation(value = "Get a decision table resource content", tags = { "Decision Tables" }) @ApiResponses(value = { @ApiResponse(code = 200, message = "Indicates both decision table and resource have been found and the resource data has been returned."), @ApiResponse(code = 404, message = "Indicates the requested decision table was not found or there is no resource with the given id present in the decision table. The status-description contains additional information.") }) @GetMapping(value = "/dmn-repository/decision-tables/{decisionTableId}/resourcedata", produces = "application/json") @ResponseBody public byte[] getDecisionTableResource(@ApiParam(name = "decisionTableId") @PathVariable String decisionTableId, HttpServletResponse response) { DmnDecision decisionTable = getDecisionFromRequest(decisionTableId); return getDeploymentResourceData(decisionTable.getDeploymentId(), decisionTable.getResourceName(), response); } }
791
1,345
<reponame>MaksHess/napari<gh_stars>1000+ """Napari Configuration. """ import os from ._octree import get_octree_config def _set(env_var: str) -> bool: """Return True if the env variable is set and non-zero. Returns ------- bool True if the env var was set to a non-zero value. """ return os.getenv(env_var) not in [None, "0"] """ Experimental Features Async Loading ------------- Image layers will use the ChunkLoader to load data instead of loading the data directly. Image layers will not call np.asarray() in the GUI thread. The ChunkLoader will call np.asarray() in a worker thread. That means any IO or computation done as part of the load will not block the GUI thread. Set NAPARI_ASYNC=1 to turn on async loading with default settings. Octree Rendering ---------------- Image layers use an octree for rendering. The octree organizes the image into chunks/tiles. Only a subset of those chunks/tiles are loaded and drawn at a time. Octree rendering is a work in progress. Enabled one of two ways: 1) Set NAPARI_OCTREE=1 to enabled octree rendering with defaults. 2) Set NAPARI_OCTREE=/tmp/config.json use a config file. See napari/utils/_octree.py for the config file format. Shared Memory Server -------------------- Experimental shared memory service. Only enabled if NAPARI_MON is set to the path of a config file. See this PR for more info: https://github.com/napari/napari/pull/1909. """ # Config for async/octree. If octree_config['octree']['enabled'] is False # only async is enabled, not the octree. octree_config = get_octree_config() # Shorthand for async loading with or without an octree. async_loading = octree_config is not None # Shorthand for async with an octree. async_octree = octree_config and octree_config['octree']['enabled'] # Shared Memory Server monitor = _set("NAPARI_MON") """ Other Config Options """ # Added this temporarily for octree debugging. The welcome visual causes # breakpoints to hit in image visual code. It's easier if we don't show it. allow_welcome_visual = True
629
32,544
<reponame>DBatOWL/tutorials package com.baeldung.boot.jsp.exception; import com.baeldung.boot.jsp.dto.Book; import lombok.Getter; @Getter public class DuplicateBookException extends RuntimeException { private final Book book; public DuplicateBookException(Book book) { this.book = book; } }
119
303
<filename>www/drag/metadata/9/9285.json {"id":9285,"line-1":"Beyneu District","line-2":"Kazakhstan","attribution":"©2016 Cnes/Spot Image, DigitalGlobe, Landsat","url":"https://www.google.com/maps/@45.950195,53.291759,14z/data=!3m1!1e3"}
98
1,602
<filename>test/parallel/taskCompare/elliot/ompTaskSpawn.h<gh_stars>1000+ #include <stdint.h> #ifdef _OPENMP #include <omp.h> #endif static void ompTaskSpawn(int64_t trials, int64_t numTasks, int64_t runSerial) { int i, j; #ifdef _OPENMP if (runSerial) { omp_set_num_threads(1); } else { omp_set_num_threads(numTasks); } #endif for (i=0; i<trials; i++) { #ifdef _OPENMP #pragma omp parallel for #endif for (j=0; j<numTasks; j++) { } } }
227
2,287
<filename>v2ex-iOS/Views/V2TopicReplyCell.h<gh_stars>1000+ // // V2TopicReplyCell.h // v2ex-iOS // // Created by Singro on 3/20/14. // Copyright (c) 2014 Singro. All rights reserved. // #import <UIKit/UIKit.h> @interface V2TopicReplyCell : UITableViewCell @property (nonatomic, strong) V2ReplyModel *model; @property (nonatomic, strong) V2ReplyModel *selectedReplyModel; @property (nonatomic, assign) UINavigationController *navi; @property (nonatomic, assign) V2ReplyList *replyList; @property (nonatomic, copy) void (^longPressedBlock)(); @property (nonatomic, copy) void (^reloadCellBlock)(); + (CGFloat)getCellHeightWithReplyModel:(V2ReplyModel *)model; @end static NSString * const kSelectMemberNotification = @"SelectMemberNotification";
263
746
<gh_stars>100-1000 package org.protege.editor.owl.ui.ontology.imports.missing; import org.protege.editor.core.ui.wizard.Wizard; import org.protege.editor.owl.OWLEditorKit; import java.awt.*; /** * Author: <NAME><br> * The University Of Manchester<br> * Medical Informatics Group<br> * Date: 17-Oct-2006<br><br> * <EMAIL><br> * www.cs.man.ac.uk/~horridgm<br><br> */ public class MissingImportWizard extends Wizard { public MissingImportWizard(Frame frame, OWLEditorKit owlEditorKit) { super(frame); setTitle("Resolve missing import wizard"); registerWizardPanel(ResolutionTypePanel.ID, new ResolutionTypePanel(owlEditorKit)); registerWizardPanel(SpecifyFilePathPanel.ID, new SpecifyFilePathPanel(owlEditorKit)); registerWizardPanel(CopyOptionPanel.ID, new CopyOptionPanel(owlEditorKit)); setCurrentPanel(ResolutionTypePanel.ID); } public static void main(String[] args) { MissingImportWizard w = new MissingImportWizard(null, null); w.showModalDialog(); } }
392
857
<gh_stars>100-1000 # Copyright (c) LinkedIn Corporation. All rights reserved. Licensed under the BSD-2 Clause license. # See LICENSE in the project root for license information. from falcon import HTTPError, HTTPBadRequest from ujson import dumps as json_dumps import time from ...auth import login_required, check_calendar_auth_by_id from ... import db, constants from ...utils import load_json_body, user_in_team, create_notification, create_audit from ...constants import EVENT_SUBSTITUTED @login_required def on_post(req, resp): """ Override/substitute existing events. For example, if the current on-call is unexpectedly busy from 3-4, another user can override that event for that time period and take over the shift. Override may delete or edit existing events, and may create new events. The API's response contains the information for all undeleted events that were passed in the event_ids param, along with the events created by the override. Params: - **start**: Start time for the event substitution - **end**: End time for event substitution - **event_ids**: List of event ids to override - **user**: User who will be taking over **Example request:** .. sourcecode:: http POST api/v0/events/override HTTP/1.1 Content-Type: application/json { "start": 1493677400, "end": 1493678400, "event_ids": [1], "user": "jdoe" } **Example response:** .. sourcecode:: http HTTP/1.1 200 OK Content-Type: application/json [ { "end": 1493678400, "full_name": "<NAME>", "id": 3, "role": "primary", "start": 1493677400, "team": "team-foo", "user": "jdoe" } ] """ data = load_json_body(req) event_ids = data['event_ids'] start = data['start'] end = data['end'] user = data['user'] get_events_query = '''SELECT `start`, `end`, `id`, `schedule_id`, `user_id`, `role_id`, `team_id` FROM `event` WHERE `id` IN %s''' insert_event_query = 'INSERT INTO `event`(`start`, `end`, `user_id`, `team_id`, `role_id`)' \ 'VALUES (%(start)s, %(end)s, %(user_id)s, %(team_id)s, %(role_id)s)' event_return_query = '''SELECT `event`.`start`, `event`.`end`, `event`.`id`, `role`.`name` AS `role`, `team`.`name` AS `team`, `user`.`name` AS `user`, `user`.`full_name` FROM `event` JOIN `role` ON `event`.`role_id` = `role`.`id` JOIN `team` ON `event`.`team_id` = `team`.`id` JOIN `user` ON `event`.`user_id` = `user`.`id` WHERE `event`.`id` IN %s''' connection = db.connect() cursor = connection.cursor(db.DictCursor) try: cursor.execute(get_events_query, (event_ids,)) events = cursor.fetchall() now = time.time() cursor.execute('SELECT `id` FROM `user` WHERE `name` = %s', user) user_id = cursor.fetchone() if not (events and user_id): raise HTTPBadRequest('Invalid name or list of events') else: user_id = user_id['id'] team_id = events[0]['team_id'] check_calendar_auth_by_id(team_id, req) # Check that events are not in the past if start < now - constants.GRACE_PERIOD: raise HTTPBadRequest('Invalid override request', 'Cannot edit events in the past') # Check that events are from the same team if any([ev['team_id'] != team_id for ev in events]): raise HTTPBadRequest('Invalid override request', 'Events must be from the same team') # Check override user's membership in the team if not user_in_team(cursor, user_id, team_id): raise HTTPBadRequest('Invalid override request', 'Substituting user must be part of the team') # Check events have the same role if len(set([ev['role_id'] for ev in events])) > 1: raise HTTPBadRequest('Invalid override request', 'events must have the same role') # Check events have same user if len(set([ev['user_id'] for ev in events])) > 1: raise HTTPBadRequest('Invalid override request', 'events must have the same role') edit_start = [] edit_end = [] delete = [] split = [] events = sorted(events, key=lambda x: x['start']) # Truncate start/end if needed start = max(events[0]['start'], start) end = min(max(e['end'] for e in events), end) for idx, e in enumerate(events): # Check for consecutive events if idx != 0 and e['start'] != events[idx - 1]['end']: raise HTTPBadRequest('Invalid override request', 'events must be consecutive') # Sort events into lists according to how they need to be edited if start <= e['start'] and end >= e['end']: delete.append(e) elif start > e['start'] and start < e['end'] <= end: edit_end.append(e) elif start <= e['start'] < end and end < e['end']: edit_start.append(e) elif start > e['start'] and end < e['end']: split.append(e) else: raise HTTPBadRequest('Invalid override request', 'events must overlap with override time range') # Edit events if edit_start: ids = [e['id'] for e in edit_start] cursor.execute('UPDATE `event` SET `start` = %s WHERE `id` IN %s', (end, ids)) if edit_end: ids = [e['id'] for e in edit_end] cursor.execute('UPDATE `event` SET `end` = %s WHERE `id` IN %s', (start, ids)) if delete: ids = [e['id'] for e in delete] cursor.execute('DELETE FROM `event` WHERE `id` IN %s', (ids,)) if split: create = [] for e in split: left_event = e.copy() right_event = e.copy() left_event['end'] = start right_event['start'] = end create.append(left_event) create.append(right_event) ids = [] # Create left/right events for e in create: cursor.execute(insert_event_query, e) ids.append(cursor.lastrowid) event_ids.append(cursor.lastrowid) # Delete the split event ids = [e['id'] for e in split] cursor.execute('DELETE FROM `event` WHERE `id` IN %s', (ids,)) # Insert new override event override_event = { 'start': start, 'end': end, 'role_id': events[0]['role_id'], 'team_id': events[0]['team_id'], 'user_id': user_id } cursor.execute('''INSERT INTO `event`(`start`, `end`, `user_id`, `team_id`, `role_id`) VALUES (%(start)s, %(end)s, %(user_id)s, %(team_id)s, %(role_id)s)''', override_event) event_ids.append(cursor.lastrowid) cursor.execute(event_return_query, (event_ids,)) ret_data = cursor.fetchall() cursor.execute('SELECT full_name, id FROM user WHERE id IN %s', ((user_id, events[0]['user_id']),)) full_names = {row['id']: row['full_name'] for row in cursor} context = {'full_name_0': full_names[user_id], 'full_name_1': full_names[events[0]['user_id']], 'role': ret_data[0]['role'], 'team': ret_data[0]['team']} create_notification(context, events[0]['team_id'], [events[0]['role_id']], EVENT_SUBSTITUTED, [user_id, events[0]['user_id']], cursor, start_time=start, end_time=end) create_audit({'new_events': ret_data, 'request_body': data}, ret_data[0]['team'], EVENT_SUBSTITUTED, req, cursor) resp.body = json_dumps(ret_data) except HTTPError: raise else: connection.commit() finally: cursor.close() connection.close()
3,861
7,150
<gh_stars>1000+ #include "stdio.h" int binary_search_first(int *a,int length,int key){ int low=0; int high = length-1; int mid = 0; while(low<high){ mid = (low+high)/2; if (a[mid]>=key)//往左找 { high = mid; }else{ low = mid+1; } } return high; } int binary_search_last(int *a,int length,int key){ int low = 0; int high = length -1; int mid = 0; while(low<high){ mid = (low+high+1)/2; if (a[mid]<=key)// 往右找 { low=mid; }else{ high = mid-1; } } return low; } int element_appear_times(int *a,int length,int key){ printf("(%d) first appear at index %d\n",key,binary_search_first(a,length,key)); printf("(%d) last appear at index %d\n",key,binary_search_last(a,length,key)); return binary_search_last(a,length,key)-binary_search_first(a,length,key)+1; } int main(){ int a[] = {2,3,4,4,4,4,4,5,5,7,7,11,11,11,11,11,32,32,54}; printf("(2)appear times = %d\n", element_appear_times(a,sizeof(a)/sizeof(int),2) ); printf("(4)appear times = %d\n", element_appear_times(a,sizeof(a)/sizeof(int),4) ); printf("(11)appear times = %d\n", element_appear_times(a,sizeof(a)/sizeof(int),11) ); printf("(36)appear times = %d\n", element_appear_times(a,sizeof(a)/sizeof(int),36) ); printf("(54)appear times = %d\n", element_appear_times(a,sizeof(a)/sizeof(int),54) ); return 0; }
633
2,151
<reponame>caokun8008/ckeos //===----------------------------------------------------------------------===// // // The LLVM Compiler Infrastructure // // This file is dual licensed under the MIT and the University of Illinois Open // Source Licenses. See LICENSE.TXT for details. // //===----------------------------------------------------------------------===// // UNSUPPORTED: c++98, c++03 // <experimental/memory_resource> // memory_resource::do_allocate(size_t, size_t); /* protected */ // memory_resource::do_deallocate(void*, size_t, size_t); /* protected */ // memory_resource::do_is_equal(memory_resource const&); /* protected */ #include <experimental/memory_resource> namespace ex = std::experimental::pmr; int main() { ex::memory_resource *m = ex::new_delete_resource(); m->do_allocate(0, 0); // expected-error{{'do_allocate' is a protected member}} m->do_deallocate(nullptr, 0, 0); // expected-error{{'do_deallocate' is a protected member}} m->do_is_equal(*m); // expected-error{{'do_is_equal' is a protected member}} }
350
777
<reponame>google-ar/chromium<gh_stars>100-1000 // Copyright 2014 The Chromium Authors. All rights reserved. // Use of this source code is governed by a BSD-style license that can be // found in the LICENSE file. #include "net/extras/sqlite/sqlite_channel_id_store.h" #include <memory> #include <set> #include <utility> #include <vector> #include "base/bind.h" #include "base/files/file_path.h" #include "base/files/file_util.h" #include "base/location.h" #include "base/logging.h" #include "base/macros.h" #include "base/metrics/histogram_macros.h" #include "base/sequenced_task_runner.h" #include "base/strings/string_util.h" #include "crypto/ec_private_key.h" #include "net/cert/asn1_util.h" #include "net/cert/x509_certificate.h" #include "net/cookies/cookie_util.h" #include "net/ssl/channel_id_service.h" #include "net/ssl/ssl_client_cert_type.h" #include "sql/error_delegate_util.h" #include "sql/meta_table.h" #include "sql/statement.h" #include "sql/transaction.h" #include "url/gurl.h" namespace { // Version number of the database. const int kCurrentVersionNumber = 5; const int kCompatibleVersionNumber = 5; } // namespace namespace net { // This class is designed to be shared between any calling threads and the // background task runner. It batches operations and commits them on a timer. class SQLiteChannelIDStore::Backend : public base::RefCountedThreadSafe<SQLiteChannelIDStore::Backend> { public: Backend( const base::FilePath& path, const scoped_refptr<base::SequencedTaskRunner>& background_task_runner) : path_(path), num_pending_(0), force_keep_session_state_(false), background_task_runner_(background_task_runner), corruption_detected_(false) {} // Creates or loads the SQLite database. void Load(const LoadedCallback& loaded_callback); // Batch a channel ID addition. void AddChannelID(const DefaultChannelIDStore::ChannelID& channel_id); // Batch a channel ID deletion. void DeleteChannelID(const DefaultChannelIDStore::ChannelID& channel_id); // Post background delete of all channel ids for |server_identifiers|. void DeleteAllInList(const std::list<std::string>& server_identifiers); // Commit any pending operations and close the database. This must be called // before the object is destructed. void Close(); void SetForceKeepSessionState(); private: friend class base::RefCountedThreadSafe<SQLiteChannelIDStore::Backend>; // You should call Close() before destructing this object. virtual ~Backend() { DCHECK(!db_.get()) << "Close should have already been called."; DCHECK_EQ(0u, num_pending_); DCHECK(pending_.empty()); } void LoadInBackground( std::vector<std::unique_ptr<DefaultChannelIDStore::ChannelID>>* channel_ids); // Database upgrade statements. bool EnsureDatabaseVersion(); class PendingOperation { public: enum OperationType { CHANNEL_ID_ADD, CHANNEL_ID_DELETE }; PendingOperation(OperationType op, const DefaultChannelIDStore::ChannelID& channel_id) : op_(op), channel_id_(channel_id) {} OperationType op() const { return op_; } const DefaultChannelIDStore::ChannelID& channel_id() const { return channel_id_; } private: OperationType op_; DefaultChannelIDStore::ChannelID channel_id_; }; private: // Batch a channel id operation (add or delete). void BatchOperation(PendingOperation::OperationType op, const DefaultChannelIDStore::ChannelID& channel_id); // Prunes the list of pending operations to remove any operations for an // identifier in |server_identifiers|. void PrunePendingOperationsForDeletes( const std::list<std::string>& server_identifiers); // Commit our pending operations to the database. void Commit(); // Close() executed on the background task runner. void InternalBackgroundClose(); void BackgroundDeleteAllInList( const std::list<std::string>& server_identifiers); void DatabaseErrorCallback(int error, sql::Statement* stmt); void KillDatabase(); const base::FilePath path_; std::unique_ptr<sql::Connection> db_; sql::MetaTable meta_table_; typedef std::list<PendingOperation*> PendingOperationsList; PendingOperationsList pending_; PendingOperationsList::size_type num_pending_; // True if the persistent store should skip clear on exit rules. bool force_keep_session_state_; // Guard |pending_|, |num_pending_| and |force_keep_session_state_|. base::Lock lock_; scoped_refptr<base::SequencedTaskRunner> background_task_runner_; // Indicates if the kill-database callback has been scheduled. bool corruption_detected_; DISALLOW_COPY_AND_ASSIGN(Backend); }; void SQLiteChannelIDStore::Backend::Load( const LoadedCallback& loaded_callback) { // This function should be called only once per instance. DCHECK(!db_.get()); std::unique_ptr< std::vector<std::unique_ptr<DefaultChannelIDStore::ChannelID>>> channel_ids( new std::vector<std::unique_ptr<DefaultChannelIDStore::ChannelID>>()); std::vector<std::unique_ptr<DefaultChannelIDStore::ChannelID>>* channel_ids_ptr = channel_ids.get(); background_task_runner_->PostTaskAndReply( FROM_HERE, base::Bind(&Backend::LoadInBackground, this, channel_ids_ptr), base::Bind(loaded_callback, base::Passed(&channel_ids))); } void SQLiteChannelIDStore::Backend::LoadInBackground( std::vector<std::unique_ptr<DefaultChannelIDStore::ChannelID>>* channel_ids) { DCHECK(background_task_runner_->RunsTasksOnCurrentThread()); // This method should be called only once per instance. DCHECK(!db_.get()); base::TimeTicks start = base::TimeTicks::Now(); // Ensure the parent directory for storing certs is created before reading // from it. const base::FilePath dir = path_.DirName(); if (!base::PathExists(dir) && !base::CreateDirectory(dir)) return; int64_t db_size = 0; if (base::GetFileSize(path_, &db_size)) UMA_HISTOGRAM_COUNTS("DomainBoundCerts.DBSizeInKB", db_size / 1024); db_.reset(new sql::Connection); db_->set_histogram_tag("DomainBoundCerts"); // Unretained to avoid a ref loop with db_. db_->set_error_callback( base::Bind(&SQLiteChannelIDStore::Backend::DatabaseErrorCallback, base::Unretained(this))); if (!db_->Open(path_)) { NOTREACHED() << "Unable to open cert DB."; if (corruption_detected_) KillDatabase(); db_.reset(); return; } if (!EnsureDatabaseVersion()) { NOTREACHED() << "Unable to open cert DB."; if (corruption_detected_) KillDatabase(); meta_table_.Reset(); db_.reset(); return; } db_->Preload(); // Slurp all the certs into the out-vector. sql::Statement smt(db_->GetUniqueStatement( "SELECT host, private_key, public_key, creation_time FROM channel_id")); if (!smt.is_valid()) { if (corruption_detected_) KillDatabase(); meta_table_.Reset(); db_.reset(); return; } while (smt.Step()) { std::vector<uint8_t> private_key_from_db, public_key_from_db; smt.ColumnBlobAsVector(1, &private_key_from_db); smt.ColumnBlobAsVector(2, &public_key_from_db); std::unique_ptr<crypto::ECPrivateKey> key( crypto::ECPrivateKey::CreateFromEncryptedPrivateKeyInfo( private_key_from_db, public_key_from_db)); if (!key) continue; std::unique_ptr<DefaultChannelIDStore::ChannelID> channel_id( new DefaultChannelIDStore::ChannelID( smt.ColumnString(0), // host base::Time::FromInternalValue(smt.ColumnInt64(3)), std::move(key))); channel_ids->push_back(std::move(channel_id)); } UMA_HISTOGRAM_COUNTS_10000( "DomainBoundCerts.DBLoadedCount", static_cast<base::HistogramBase::Sample>(channel_ids->size())); base::TimeDelta load_time = base::TimeTicks::Now() - start; UMA_HISTOGRAM_CUSTOM_TIMES("DomainBoundCerts.DBLoadTime", load_time, base::TimeDelta::FromMilliseconds(1), base::TimeDelta::FromMinutes(1), 50); DVLOG(1) << "loaded " << channel_ids->size() << " in " << load_time.InMilliseconds() << " ms"; } bool SQLiteChannelIDStore::Backend::EnsureDatabaseVersion() { // Version check. if (!meta_table_.Init( db_.get(), kCurrentVersionNumber, kCompatibleVersionNumber)) { return false; } if (meta_table_.GetCompatibleVersionNumber() > kCurrentVersionNumber) { LOG(WARNING) << "Server bound cert database is too new."; return false; } int cur_version = meta_table_.GetVersionNumber(); sql::Transaction transaction(db_.get()); if (!transaction.Begin()) return false; // Create new table if it doesn't already exist if (!db_->DoesTableExist("channel_id")) { if (!db_->Execute( "CREATE TABLE channel_id (" "host TEXT NOT NULL UNIQUE PRIMARY KEY," "private_key BLOB NOT NULL," "public_key BLOB NOT NULL," "creation_time INTEGER)")) { return false; } } // Migrate from previous versions to new version if possible if (cur_version >= 2 && cur_version <= 4) { sql::Statement statement(db_->GetUniqueStatement( "SELECT origin, cert, private_key, cert_type FROM origin_bound_certs")); sql::Statement insert_statement(db_->GetUniqueStatement( "INSERT INTO channel_id (host, private_key, public_key, creation_time) " "VALUES (?, ?, ?, ?)")); if (!statement.is_valid() || !insert_statement.is_valid()) { LOG(WARNING) << "Unable to update server bound cert database to " << "version 5."; return false; } while (statement.Step()) { if (statement.ColumnInt64(3) != CLIENT_CERT_ECDSA_SIGN) continue; std::string origin = statement.ColumnString(0); std::string cert_from_db; statement.ColumnBlobAsString(1, &cert_from_db); std::string private_key; statement.ColumnBlobAsString(2, &private_key); // Parse the cert and extract the real value and then update the DB. scoped_refptr<X509Certificate> cert(X509Certificate::CreateFromBytes( cert_from_db.data(), static_cast<int>(cert_from_db.size()))); if (cert.get()) { insert_statement.Reset(true); insert_statement.BindString(0, origin); insert_statement.BindBlob(1, private_key.data(), static_cast<int>(private_key.size())); base::StringPiece spki; if (!asn1::ExtractSPKIFromDERCert(cert_from_db, &spki)) { LOG(WARNING) << "Unable to extract SPKI from cert when migrating " "channel id database to version 5."; return false; } insert_statement.BindBlob(2, spki.data(), static_cast<int>(spki.size())); insert_statement.BindInt64(3, cert->valid_start().ToInternalValue()); if (!insert_statement.Run()) { LOG(WARNING) << "Unable to update channel id database to " << "version 5."; return false; } } else { // If there's a cert we can't parse, just leave it. It'll get replaced // with a new one if we ever try to use it. LOG(WARNING) << "Error parsing cert for database upgrade for origin " << statement.ColumnString(0); } } } if (cur_version < kCurrentVersionNumber) { sql::Statement statement( db_->GetUniqueStatement("DROP TABLE origin_bound_certs")); if (!statement.Run()) { LOG(WARNING) << "Error dropping old origin_bound_certs table"; return false; } meta_table_.SetVersionNumber(kCurrentVersionNumber); meta_table_.SetCompatibleVersionNumber(kCompatibleVersionNumber); } transaction.Commit(); // Put future migration cases here. return true; } void SQLiteChannelIDStore::Backend::DatabaseErrorCallback( int error, sql::Statement* stmt) { DCHECK(background_task_runner_->RunsTasksOnCurrentThread()); if (!sql::IsErrorCatastrophic(error)) return; // TODO(shess): Running KillDatabase() multiple times should be // safe. if (corruption_detected_) return; corruption_detected_ = true; // TODO(shess): Consider just calling RazeAndClose() immediately. // db_ may not be safe to reset at this point, but RazeAndClose() // would cause the stack to unwind safely with errors. background_task_runner_->PostTask(FROM_HERE, base::Bind(&Backend::KillDatabase, this)); } void SQLiteChannelIDStore::Backend::KillDatabase() { DCHECK(background_task_runner_->RunsTasksOnCurrentThread()); if (db_) { // This Backend will now be in-memory only. In a future run the database // will be recreated. Hopefully things go better then! bool success = db_->RazeAndClose(); UMA_HISTOGRAM_BOOLEAN("DomainBoundCerts.KillDatabaseResult", success); meta_table_.Reset(); db_.reset(); } } void SQLiteChannelIDStore::Backend::AddChannelID( const DefaultChannelIDStore::ChannelID& channel_id) { BatchOperation(PendingOperation::CHANNEL_ID_ADD, channel_id); } void SQLiteChannelIDStore::Backend::DeleteChannelID( const DefaultChannelIDStore::ChannelID& channel_id) { BatchOperation(PendingOperation::CHANNEL_ID_DELETE, channel_id); } void SQLiteChannelIDStore::Backend::DeleteAllInList( const std::list<std::string>& server_identifiers) { if (server_identifiers.empty()) return; // Perform deletion on background task runner. background_task_runner_->PostTask( FROM_HERE, base::Bind( &Backend::BackgroundDeleteAllInList, this, server_identifiers)); } void SQLiteChannelIDStore::Backend::BatchOperation( PendingOperation::OperationType op, const DefaultChannelIDStore::ChannelID& channel_id) { // Commit every 30 seconds. static const int kCommitIntervalMs = 30 * 1000; // Commit right away if we have more than 512 outstanding operations. static const size_t kCommitAfterBatchSize = 512; // We do a full copy of the cert here, and hopefully just here. std::unique_ptr<PendingOperation> po(new PendingOperation(op, channel_id)); PendingOperationsList::size_type num_pending; { base::AutoLock locked(lock_); pending_.push_back(po.release()); num_pending = ++num_pending_; } if (num_pending == 1) { // We've gotten our first entry for this batch, fire off the timer. background_task_runner_->PostDelayedTask( FROM_HERE, base::Bind(&Backend::Commit, this), base::TimeDelta::FromMilliseconds(kCommitIntervalMs)); } else if (num_pending == kCommitAfterBatchSize) { // We've reached a big enough batch, fire off a commit now. background_task_runner_->PostTask(FROM_HERE, base::Bind(&Backend::Commit, this)); } } void SQLiteChannelIDStore::Backend::PrunePendingOperationsForDeletes( const std::list<std::string>& server_identifiers) { DCHECK(background_task_runner_->RunsTasksOnCurrentThread()); base::AutoLock locked(lock_); for (PendingOperationsList::iterator it = pending_.begin(); it != pending_.end();) { bool remove = std::find(server_identifiers.begin(), server_identifiers.end(), (*it)->channel_id().server_identifier()) != server_identifiers.end(); if (remove) { std::unique_ptr<PendingOperation> po(*it); it = pending_.erase(it); --num_pending_; } else { ++it; } } } void SQLiteChannelIDStore::Backend::Commit() { DCHECK(background_task_runner_->RunsTasksOnCurrentThread()); PendingOperationsList ops; { base::AutoLock locked(lock_); pending_.swap(ops); num_pending_ = 0; } // Maybe an old timer fired or we are already Close()'ed. if (!db_.get() || ops.empty()) return; sql::Statement add_statement(db_->GetCachedStatement( SQL_FROM_HERE, "INSERT INTO channel_id (host, private_key, public_key, " "creation_time) VALUES (?,?,?,?)")); if (!add_statement.is_valid()) return; sql::Statement del_statement(db_->GetCachedStatement( SQL_FROM_HERE, "DELETE FROM channel_id WHERE host=?")); if (!del_statement.is_valid()) return; sql::Transaction transaction(db_.get()); if (!transaction.Begin()) return; for (PendingOperationsList::iterator it = ops.begin(); it != ops.end(); ++it) { // Free the certs as we commit them to the database. std::unique_ptr<PendingOperation> po(*it); switch (po->op()) { case PendingOperation::CHANNEL_ID_ADD: { add_statement.Reset(true); add_statement.BindString(0, po->channel_id().server_identifier()); std::vector<uint8_t> private_key, public_key; if (!po->channel_id().key()->ExportEncryptedPrivateKey(&private_key)) continue; if (!po->channel_id().key()->ExportPublicKey(&public_key)) continue; add_statement.BindBlob( 1, private_key.data(), static_cast<int>(private_key.size())); add_statement.BindBlob(2, public_key.data(), static_cast<int>(public_key.size())); add_statement.BindInt64( 3, po->channel_id().creation_time().ToInternalValue()); if (!add_statement.Run()) NOTREACHED() << "Could not add a server bound cert to the DB."; break; } case PendingOperation::CHANNEL_ID_DELETE: del_statement.Reset(true); del_statement.BindString(0, po->channel_id().server_identifier()); if (!del_statement.Run()) NOTREACHED() << "Could not delete a server bound cert from the DB."; break; default: NOTREACHED(); break; } } transaction.Commit(); } // Fire off a close message to the background task runner. We could still have a // pending commit timer that will be holding a reference on us, but if/when // this fires we will already have been cleaned up and it will be ignored. void SQLiteChannelIDStore::Backend::Close() { // Must close the backend on the background task runner. background_task_runner_->PostTask( FROM_HERE, base::Bind(&Backend::InternalBackgroundClose, this)); } void SQLiteChannelIDStore::Backend::InternalBackgroundClose() { DCHECK(background_task_runner_->RunsTasksOnCurrentThread()); // Commit any pending operations Commit(); db_.reset(); } void SQLiteChannelIDStore::Backend::BackgroundDeleteAllInList( const std::list<std::string>& server_identifiers) { DCHECK(background_task_runner_->RunsTasksOnCurrentThread()); if (!db_.get()) return; PrunePendingOperationsForDeletes(server_identifiers); sql::Statement del_smt(db_->GetCachedStatement( SQL_FROM_HERE, "DELETE FROM channel_id WHERE host=?")); if (!del_smt.is_valid()) { LOG(WARNING) << "Unable to delete channel ids."; return; } sql::Transaction transaction(db_.get()); if (!transaction.Begin()) { LOG(WARNING) << "Unable to delete channel ids."; return; } for (std::list<std::string>::const_iterator it = server_identifiers.begin(); it != server_identifiers.end(); ++it) { del_smt.Reset(true); del_smt.BindString(0, *it); if (!del_smt.Run()) NOTREACHED() << "Could not delete a channel id from the DB."; } if (!transaction.Commit()) LOG(WARNING) << "Unable to delete channel ids."; } void SQLiteChannelIDStore::Backend::SetForceKeepSessionState() { base::AutoLock locked(lock_); force_keep_session_state_ = true; } SQLiteChannelIDStore::SQLiteChannelIDStore( const base::FilePath& path, const scoped_refptr<base::SequencedTaskRunner>& background_task_runner) : backend_(new Backend(path, background_task_runner)) { } void SQLiteChannelIDStore::Load(const LoadedCallback& loaded_callback) { backend_->Load(loaded_callback); } void SQLiteChannelIDStore::AddChannelID( const DefaultChannelIDStore::ChannelID& channel_id) { backend_->AddChannelID(channel_id); } void SQLiteChannelIDStore::DeleteChannelID( const DefaultChannelIDStore::ChannelID& channel_id) { backend_->DeleteChannelID(channel_id); } void SQLiteChannelIDStore::DeleteAllInList( const std::list<std::string>& server_identifiers) { backend_->DeleteAllInList(server_identifiers); } void SQLiteChannelIDStore::SetForceKeepSessionState() { backend_->SetForceKeepSessionState(); } SQLiteChannelIDStore::~SQLiteChannelIDStore() { backend_->Close(); // We release our reference to the Backend, though it will probably still have // a reference if the background task runner has not run Close() yet. } } // namespace net
7,749
310
<gh_stars>100-1000 { "name": "X-A1", "description": "A 16.3 megapixel digital camera.", "url": "http://www.fujifilm.com/products/digital_cameras/x/fujifilm_x_a1/" }
77
514
<reponame>srinivas32/mle<gh_stars>100-1000 #include "test.h" char *str = "hello\nworld"; void test(buffer_t *buf, mark_t *cur) { mark_move_beginning(cur); mark_move_col(cur, 5); ASSERT("col1", 5, cur->col); mark_move_col(cur, 6); ASSERT("oob", 5, cur->col); }
134
1,362
# coding=utf-8 r""" This code was generated by \ / _ _ _| _ _ | (_)\/(_)(_|\/| |(/_ v1.0.0 / / """ from tests import IntegrationTestCase from tests.holodeck import Request from twilio.base.exceptions import TwilioException from twilio.http.response import Response class EvaluationTestCase(IntegrationTestCase): def test_create_request(self): self.holodeck.mock(Response(500, '')) with self.assertRaises(TwilioException): self.client.numbers.v2.regulatory_compliance \ .bundles("BUXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX") \ .evaluations.create() self.holodeck.assert_has_request(Request( 'post', 'https://numbers.twilio.com/v2/RegulatoryCompliance/Bundles/BUXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX/Evaluations', )) def test_create_response(self): self.holodeck.mock(Response( 201, ''' { "sid": "ELaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa", "account_sid": "AC<KEY>", "regulation_sid": "RNaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa", "bundle_sid": "BUaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa", "status": "noncompliant", "date_created": "2020-04-28T18:14:01Z", "url": "https://numbers.twilio.com/v2/RegulatoryCompliance/Bundles/BUaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa/Evaluations/ELaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa", "results": [ { "friendly_name": "Business", "object_type": "business", "passed": false, "failure_reason": "A Business End-User is missing. Please add one to the regulatory bundle.", "error_code": 22214, "valid": [], "invalid": [ { "friendly_name": "Business Name", "object_field": "business_name", "failure_reason": "The Business Name is missing. Please enter in a Business Name on the Business information.", "error_code": 22215 }, { "friendly_name": "Business Registration Number", "object_field": "business_registration_number", "failure_reason": "The Business Registration Number is missing. Please enter in a Business Registration Number on the Business information.", "error_code": 22215 }, { "friendly_name": "<NAME>", "object_field": "first_name", "failure_reason": "The First Name is missing. Please enter in a First Name on the Business information.", "error_code": 22215 }, { "friendly_name": "<NAME>", "object_field": "last_name", "failure_reason": "The Last Name is missing. Please enter in a Last Name on the Business information.", "error_code": 22215 } ], "requirement_friendly_name": "Business", "requirement_name": "business_info" }, { "friendly_name": "Excerpt from the commercial register (Extrait K-bis) showing name of Authorized Representative", "object_type": "commercial_registrar_excerpt", "passed": false, "failure_reason": "An Excerpt from the commercial register (Extrait K-bis) showing name of Authorized Representative is missing. Please add one to the regulatory bundle.", "error_code": 22216, "valid": [], "invalid": [ { "friendly_name": "Business Name", "object_field": "business_name", "failure_reason": "The Business Name is missing. Or, it does not match the Business Name you entered within Business information. Please enter in the Business Name shown on the Excerpt from the commercial register (Extrait K-bis) showing name of Authorized Representative or make sure both Business Name fields use the same exact inputs.", "error_code": 22217 } ], "requirement_friendly_name": "Business Name", "requirement_name": "business_name_info" }, { "friendly_name": "Excerpt from the commercial register showing French address", "object_type": "commercial_registrar_excerpt", "passed": false, "failure_reason": "An Excerpt from the commercial register showing French address is missing. Please add one to the regulatory bundle.", "error_code": 22216, "valid": [], "invalid": [ { "friendly_name": "Address sid(s)", "object_field": "address_sids", "failure_reason": "The Address is missing. Please enter in the address shown on the Excerpt from the commercial register showing French address.", "error_code": 22219 } ], "requirement_friendly_name": "Business Address (Proof of Address)", "requirement_name": "business_address_proof_info" }, { "friendly_name": "Excerpt from the commercial register (Extrait K-bis)", "object_type": "commercial_registrar_excerpt", "passed": false, "failure_reason": "An Excerpt from the commercial register (Extrait K-bis) is missing. Please add one to the regulatory bundle.", "error_code": 22216, "valid": [], "invalid": [ { "friendly_name": "Document Number", "object_field": "document_number", "failure_reason": "The Document Number is missing. Please enter in the Document Number shown on the Excerpt from the commercial register (Extrait K-bis).", "error_code": 22217 } ], "requirement_friendly_name": "Business Registration Number", "requirement_name": "business_reg_no_info" }, { "friendly_name": "Government-issued ID", "object_type": "government_issued_document", "passed": false, "failure_reason": "A Government-issued ID is missing. Please add one to the regulatory bundle.", "error_code": 22216, "valid": [], "invalid": [ { "friendly_name": "<NAME>", "object_field": "first_name", "failure_reason": "The First Name is missing. Or, it does not match the First Name you entered within Business information. Please enter in the First Name shown on the Government-issued ID or make sure both First Name fields use the same exact inputs.", "error_code": 22217 }, { "friendly_name": "<NAME>", "object_field": "last_name", "failure_reason": "The Last Name is missing. Or, it does not match the Last Name you entered within Business information. Please enter in the Last Name shown on the Government-issued ID or make sure both Last Name fields use the same exact inputs.", "error_code": 22217 } ], "requirement_friendly_name": "Name of Authorized Representative", "requirement_name": "name_of_auth_rep_info" }, { "friendly_name": "Executed Copy of Power of Attorney", "object_type": "power_of_attorney", "passed": false, "failure_reason": "An Executed Copy of Power of Attorney is missing. Please add one to the regulatory bundle.", "error_code": 22216, "valid": [], "invalid": [], "requirement_friendly_name": "Power of Attorney", "requirement_name": "power_of_attorney_info" }, { "friendly_name": "Government-issued ID", "object_type": "government_issued_document", "passed": false, "failure_reason": "A Government-issued ID is missing. Please add one to the regulatory bundle.", "error_code": 22216, "valid": [], "invalid": [ { "friendly_name": "<NAME>", "object_field": "first_name", "failure_reason": "The First Name is missing on the Governnment-Issued ID.", "error_code": 22217 }, { "friendly_name": "<NAME>", "object_field": "last_name", "failure_reason": "The Last Name is missing on the Government-issued ID", "error_code": 22217 } ], "requirement_friendly_name": "Name of Person granted the Power of Attorney", "requirement_name": "name_in_power_of_attorney_info" } ] } ''' )) actual = self.client.numbers.v2.regulatory_compliance \ .bundles("BUXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX") \ .evaluations.create() self.assertIsNotNone(actual) def test_list_request(self): self.holodeck.mock(Response(500, '')) with self.assertRaises(TwilioException): self.client.numbers.v2.regulatory_compliance \ .bundles("BUXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX") \ .evaluations.list() self.holodeck.assert_has_request(Request( 'get', 'https://numbers.twilio.com/v2/RegulatoryCompliance/Bundles/BUXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX/Evaluations', )) def test_read_empty_response(self): self.holodeck.mock(Response( 200, ''' { "results": [], "meta": { "page": 0, "page_size": 50, "first_page_url": "https://numbers.twilio.com/v2/RegulatoryCompliance/Bundles/BUaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa/Evaluations?PageSize=50&Page=0", "previous_page_url": null, "url": "https://numbers.twilio.com/v2/RegulatoryCompliance/Bundles/BUaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa/Evaluations?PageSize=50&Page=0", "next_page_url": null, "key": "results" } } ''' )) actual = self.client.numbers.v2.regulatory_compliance \ .bundles("BUXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX") \ .evaluations.list() self.assertIsNotNone(actual) def test_read_full_response(self): self.holodeck.mock(Response( 200, ''' { "results": [ { "sid": "ELaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa", "account_sid": "<KEY>", "regulation_sid": "RNaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa", "bundle_sid": "BUaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa", "status": "noncompliant", "date_created": "2020-04-28T18:14:01Z", "url": "https://numbers.twilio.com/v2/RegulatoryCompliance/Bundles/BUaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa/Evaluations/ELaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa", "results": [ { "friendly_name": "Business", "object_type": "business", "passed": false, "failure_reason": "A Business End-User is missing. Please add one to the regulatory bundle.", "error_code": 22214, "valid": [], "invalid": [ { "friendly_name": "Business Name", "object_field": "business_name", "failure_reason": "The Business Name is missing. Please enter in a Business Name on the Business information.", "error_code": 22215 }, { "friendly_name": "Business Registration Number", "object_field": "business_registration_number", "failure_reason": "The Business Registration Number is missing. Please enter in a Business Registration Number on the Business information.", "error_code": 22215 }, { "friendly_name": "First Name", "object_field": "first_name", "failure_reason": "The First Name is missing. Please enter in a First Name on the Business information.", "error_code": 22215 }, { "friendly_name": "Last Name", "object_field": "last_name", "failure_reason": "The Last Name is missing. Please enter in a Last Name on the Business information.", "error_code": 22215 } ], "requirement_friendly_name": "Business", "requirement_name": "business_info" }, { "friendly_name": "Excerpt from the commercial register (Extrait K-bis) showing name of Authorized Representative", "object_type": "commercial_registrar_excerpt", "passed": false, "failure_reason": "An Excerpt from the commercial register (Extrait K-bis) showing name of Authorized Representative is missing. Please add one to the regulatory bundle.", "error_code": 22216, "valid": [], "invalid": [ { "friendly_name": "Business Name", "object_field": "business_name", "failure_reason": "The Business Name is missing. Or, it does not match the Business Name you entered within Business information. Please enter in the Business Name shown on the Excerpt from the commercial register (Extrait K-bis) showing name of Authorized Representative or make sure both Business Name fields use the same exact inputs.", "error_code": 22217 } ], "requirement_friendly_name": "Business Name", "requirement_name": "business_name_info" }, { "friendly_name": "Excerpt from the commercial register showing French address", "object_type": "commercial_registrar_excerpt", "passed": false, "failure_reason": "An Excerpt from the commercial register showing French address is missing. Please add one to the regulatory bundle.", "error_code": 22216, "valid": [], "invalid": [ { "friendly_name": "Address sid(s)", "object_field": "address_sids", "failure_reason": "The Address is missing. Please enter in the address shown on the Excerpt from the commercial register showing French address.", "error_code": 22219 } ], "requirement_friendly_name": "Business Address (Proof of Address)", "requirement_name": "business_address_proof_info" }, { "friendly_name": "Excerpt from the commercial register (Extrait K-bis)", "object_type": "commercial_registrar_excerpt", "passed": false, "failure_reason": "An Excerpt from the commercial register (Extrait K-bis) is missing. Please add one to the regulatory bundle.", "error_code": 22216, "valid": [], "invalid": [ { "friendly_name": "Document Number", "object_field": "document_number", "failure_reason": "The Document Number is missing. Please enter in the Document Number shown on the Excerpt from the commercial register (Extrait K-bis).", "error_code": 22217 } ], "requirement_friendly_name": "Business Registration Number", "requirement_name": "business_reg_no_info" }, { "friendly_name": "Government-issued ID", "object_type": "government_issued_document", "passed": false, "failure_reason": "A Government-issued ID is missing. Please add one to the regulatory bundle.", "error_code": 22216, "valid": [], "invalid": [ { "friendly_name": "<NAME>", "object_field": "first_name", "failure_reason": "The First Name is missing. Or, it does not match the First Name you entered within Business information. Please enter in the First Name shown on the Government-issued ID or make sure both First Name fields use the same exact inputs.", "error_code": 22217 }, { "friendly_name": "<NAME>", "object_field": "last_name", "failure_reason": "The Last Name is missing. Or, it does not match the Last Name you entered within Business information. Please enter in the Last Name shown on the Government-issued ID or make sure both Last Name fields use the same exact inputs.", "error_code": 22217 } ], "requirement_friendly_name": "Name of Authorized Representative", "requirement_name": "name_of_auth_rep_info" }, { "friendly_name": "Executed Copy of Power of Attorney", "object_type": "power_of_attorney", "passed": false, "failure_reason": "An Executed Copy of Power of Attorney is missing. Please add one to the regulatory bundle.", "error_code": 22216, "valid": [], "invalid": [], "requirement_friendly_name": "Power of Attorney", "requirement_name": "power_of_attorney_info" }, { "friendly_name": "Government-issued ID", "object_type": "government_issued_document", "passed": false, "failure_reason": "A Government-issued ID is missing. Please add one to the regulatory bundle.", "error_code": 22216, "valid": [], "invalid": [ { "friendly_name": "<NAME>", "object_field": "first_name", "failure_reason": "The First Name is missing on the Governnment-Issued ID.", "error_code": 22217 }, { "friendly_name": "<NAME>", "object_field": "last_name", "failure_reason": "The Last Name is missing on the Government-issued ID", "error_code": 22217 } ], "requirement_friendly_name": "Name of Person granted the Power of Attorney", "requirement_name": "name_in_power_of_attorney_info" } ] } ], "meta": { "page": 0, "page_size": 50, "first_page_url": "https://numbers.twilio.com/v2/RegulatoryCompliance/Bundles/BUaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa/Evaluations?PageSize=50&Page=0", "previous_page_url": null, "url": "https://numbers.twilio.com/v2/RegulatoryCompliance/Bundles/BUaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa/Evaluations?PageSize=50&Page=0", "next_page_url": null, "key": "results" } } ''' )) actual = self.client.numbers.v2.regulatory_compliance \ .bundles("BUXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX") \ .evaluations.list() self.assertIsNotNone(actual) def test_fetch_request(self): self.holodeck.mock(Response(500, '')) with self.assertRaises(TwilioException): self.client.numbers.v2.regulatory_compliance \ .bundles("BUXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX") \ .evaluations("ELXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX").fetch() self.holodeck.assert_has_request(Request( 'get', 'https://numbers.twilio.com/v2/RegulatoryCompliance/Bundles/BUXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX/Evaluations/ELXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX', )) def test_fetch_response(self): self.holodeck.mock(Response( 200, ''' { "sid": "ELaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa", "account_sid": "ACaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa", "regulation_sid": "RNaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa", "bundle_sid": "BUaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa", "status": "noncompliant", "date_created": "2020-04-28T18:14:01Z", "url": "https://numbers.twilio.com/v2/RegulatoryCompliance/Bundles/BUaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa/Evaluations/ELaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa", "results": [ { "friendly_name": "Business", "object_type": "business", "passed": false, "failure_reason": "A Business End-User is missing. Please add one to the regulatory bundle.", "error_code": 22214, "valid": [], "invalid": [ { "friendly_name": "Business Name", "object_field": "business_name", "failure_reason": "The Business Name is missing. Please enter in a Business Name on the Business information.", "error_code": 22215 }, { "friendly_name": "Business Registration Number", "object_field": "business_registration_number", "failure_reason": "The Business Registration Number is missing. Please enter in a Business Registration Number on the Business information.", "error_code": 22215 }, { "friendly_name": "First Name", "object_field": "first_name", "failure_reason": "The First Name is missing. Please enter in a First Name on the Business information.", "error_code": 22215 }, { "friendly_name": "Last Name", "object_field": "last_name", "failure_reason": "The Last Name is missing. Please enter in a Last Name on the Business information.", "error_code": 22215 } ], "requirement_friendly_name": "Business", "requirement_name": "business_info" }, { "friendly_name": "Excerpt from the commercial register (Extrait K-bis) showing name of Authorized Representative", "object_type": "commercial_registrar_excerpt", "passed": false, "failure_reason": "An Excerpt from the commercial register (Extrait K-bis) showing name of Authorized Representative is missing. Please add one to the regulatory bundle.", "error_code": 22216, "valid": [], "invalid": [ { "friendly_name": "Business Name", "object_field": "business_name", "failure_reason": "The Business Name is missing. Or, it does not match the Business Name you entered within Business information. Please enter in the Business Name shown on the Excerpt from the commercial register (Extrait K-bis) showing name of Authorized Representative or make sure both Business Name fields use the same exact inputs.", "error_code": 22217 } ], "requirement_friendly_name": "Business Name", "requirement_name": "business_name_info" }, { "friendly_name": "Excerpt from the commercial register showing French address", "object_type": "commercial_registrar_excerpt", "passed": false, "failure_reason": "An Excerpt from the commercial register showing French address is missing. Please add one to the regulatory bundle.", "error_code": 22216, "valid": [], "invalid": [ { "friendly_name": "Address sid(s)", "object_field": "address_sids", "failure_reason": "The Address is missing. Please enter in the address shown on the Excerpt from the commercial register showing French address.", "error_code": 22219 } ], "requirement_friendly_name": "Business Address (Proof of Address)", "requirement_name": "business_address_proof_info" }, { "friendly_name": "Excerpt from the commercial register (Extrait K-bis)", "object_type": "commercial_registrar_excerpt", "passed": false, "failure_reason": "An Excerpt from the commercial register (Extrait K-bis) is missing. Please add one to the regulatory bundle.", "error_code": 22216, "valid": [], "invalid": [ { "friendly_name": "Document Number", "object_field": "document_number", "failure_reason": "The Document Number is missing. Please enter in the Document Number shown on the Excerpt from the commercial register (Extrait K-bis).", "error_code": 22217 } ], "requirement_friendly_name": "Business Registration Number", "requirement_name": "business_reg_no_info" }, { "friendly_name": "Government-issued ID", "object_type": "government_issued_document", "passed": false, "failure_reason": "A Government-issued ID is missing. Please add one to the regulatory bundle.", "error_code": 22216, "valid": [], "invalid": [ { "friendly_name": "<NAME>", "object_field": "first_name", "failure_reason": "The First Name is missing. Or, it does not match the First Name you entered within Business information. Please enter in the First Name shown on the Government-issued ID or make sure both First Name fields use the same exact inputs.", "error_code": 22217 }, { "friendly_name": "<NAME>", "object_field": "last_name", "failure_reason": "The Last Name is missing. Or, it does not match the Last Name you entered within Business information. Please enter in the Last Name shown on the Government-issued ID or make sure both Last Name fields use the same exact inputs.", "error_code": 22217 } ], "requirement_friendly_name": "Name of Authorized Representative", "requirement_name": "name_of_auth_rep_info" }, { "friendly_name": "Executed Copy of Power of Attorney", "object_type": "power_of_attorney", "passed": false, "failure_reason": "An Executed Copy of Power of Attorney is missing. Please add one to the regulatory bundle.", "error_code": 22216, "valid": [], "invalid": [], "requirement_friendly_name": "Power of Attorney", "requirement_name": "power_of_attorney_info" }, { "friendly_name": "Government-issued ID", "object_type": "government_issued_document", "passed": false, "failure_reason": "A Government-issued ID is missing. Please add one to the regulatory bundle.", "error_code": 22216, "valid": [], "invalid": [ { "friendly_name": "<NAME>", "object_field": "first_name", "failure_reason": "The First Name is missing on the Governnment-Issued ID.", "error_code": 22217 }, { "friendly_name": "<NAME>", "object_field": "last_name", "failure_reason": "The Last Name is missing on the Government-issued ID", "error_code": 22217 } ], "requirement_friendly_name": "Name of Person granted the Power of Attorney", "requirement_name": "name_in_power_of_attorney_info" } ] } ''' )) actual = self.client.numbers.v2.regulatory_compliance \ .bundles("BUXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX") \ .evaluations("ELXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX").fetch() self.assertIsNotNone(actual)
21,148
621
<reponame>fengjixuchui/VivienneVMM<filename>VivienneTests/test_token_parser.cpp #include <Windows.h> #include <cstdio> #include "test_util.h" #include "..\common\arch_x64.h" #include "..\common\driver_io_types.h" #include "..\VivienneCL\token_parser.h" //============================================================================= // Token Parser Test Cases //============================================================================= // // TestParseMemoryDescriptionToken // _Check_return_ static BOOL TestParseMemoryDescriptionToken( _In_z_ PCSTR pszToken ) { MEMORY_DATA_TYPE MemoryDataType = MDT_BYTE; CEC_MEMORY_DESCRIPTION MemoryDescription = {}; BOOL status = TRUE; status = ParseMemoryDescriptionToken( pszToken, MemoryDataType, &MemoryDescription); if (!status) { goto exit; } exit: return status; } #define ASSERT_VALID_MEMORY_DESCRIPTION(pszToken) \ if (!TestParseMemoryDescriptionToken(pszToken)) \ { \ FAIL_TEST("ParseMemoryDescriptionToken failed (v): '%s'", pszToken);\ } #define ASSERT_INVALID_MEMORY_DESCRIPTION(pszToken) \ if (TestParseMemoryDescriptionToken(pszToken)) \ { \ FAIL_TEST("ParseMemoryDescriptionToken failed (inv): '%s'", pszToken);\ } // // TestVirtualAddressMemoryDescriptions // static VOID TestVirtualAddressMemoryDescriptions() { // // Valid // ASSERT_VALID_MEMORY_DESCRIPTION("14FF20"); ASSERT_VALID_MEMORY_DESCRIPTION("0x14FF20"); ASSERT_VALID_MEMORY_DESCRIPTION("FFFFFFFF"); ASSERT_VALID_MEMORY_DESCRIPTION("0xFFFFFFFF"); // // Invalid // ASSERT_INVALID_MEMORY_DESCRIPTION("raxf"); ASSERT_INVALID_MEMORY_DESCRIPTION("0xzzZ"); } // // TestIndirectAddressMemoryDescriptions // static VOID TestIndirectAddressMemoryDescriptions() { // // BASE // // Valid ASSERT_VALID_MEMORY_DESCRIPTION("rbx"); // Invalid ASSERT_INVALID_MEMORY_DESCRIPTION(" rbx "); ASSERT_INVALID_MEMORY_DESCRIPTION("rc"); // // BASE +- DISPLACEMENT // // Valid ASSERT_VALID_MEMORY_DESCRIPTION("rip+1"); ASSERT_VALID_MEMORY_DESCRIPTION("rip+956759"); ASSERT_VALID_MEMORY_DESCRIPTION("rip+0x956759"); ASSERT_VALID_MEMORY_DESCRIPTION("rip+DEEEED"); ASSERT_VALID_MEMORY_DESCRIPTION("rip+0xDEEEED"); ASSERT_VALID_MEMORY_DESCRIPTION("rip+ab12FFF3cd"); ASSERT_VALID_MEMORY_DESCRIPTION("rip+0xab12FFF3cd"); ASSERT_VALID_MEMORY_DESCRIPTION("rax-1"); ASSERT_VALID_MEMORY_DESCRIPTION("rax-123123132"); ASSERT_VALID_MEMORY_DESCRIPTION("rax-0x123123132"); ASSERT_VALID_MEMORY_DESCRIPTION("rax-AAbb"); ASSERT_VALID_MEMORY_DESCRIPTION("rax-0xAAbb"); ASSERT_VALID_MEMORY_DESCRIPTION("rax-1100FfFf"); ASSERT_VALID_MEMORY_DESCRIPTION("rax-0x1100FfFf"); // Invalid ASSERT_INVALID_MEMORY_DESCRIPTION("rax + 1"); ASSERT_INVALID_MEMORY_DESCRIPTION("rax - 1"); ASSERT_INVALID_MEMORY_DESCRIPTION("rax-f+f"); // // BASE + INDEX // // Valid ASSERT_VALID_MEMORY_DESCRIPTION("rip+rip"); ASSERT_VALID_MEMORY_DESCRIPTION("rip+rax"); // Invalid ASSERT_INVALID_MEMORY_DESCRIPTION("rip-rip"); ASSERT_INVALID_MEMORY_DESCRIPTION("rip-rax"); // // BASE + INDEX +- DISPLACEMENT // // Valid ASSERT_VALID_MEMORY_DESCRIPTION("rbx+rsi+1"); ASSERT_VALID_MEMORY_DESCRIPTION("rbx+rsi+323520"); ASSERT_VALID_MEMORY_DESCRIPTION("rbx+rsi+0x323520"); ASSERT_VALID_MEMORY_DESCRIPTION("rbx+rsi+AAAA"); ASSERT_VALID_MEMORY_DESCRIPTION("rbx+rsi+0xAAAA"); ASSERT_VALID_MEMORY_DESCRIPTION("rbx+rsi+5AA3AA"); ASSERT_VALID_MEMORY_DESCRIPTION("rbx+rsi+0x5AA3AA"); ASSERT_VALID_MEMORY_DESCRIPTION("rdi+rsi-1"); ASSERT_VALID_MEMORY_DESCRIPTION("rdi+rsi-100101"); ASSERT_VALID_MEMORY_DESCRIPTION("rdi+rsi-0x100101"); ASSERT_VALID_MEMORY_DESCRIPTION("rdi+rsi-ABCDEF"); ASSERT_VALID_MEMORY_DESCRIPTION("rdi+rsi-0xABCDEF"); ASSERT_VALID_MEMORY_DESCRIPTION("rdi+rsi-111ABCDEF"); ASSERT_VALID_MEMORY_DESCRIPTION("rdi+rsi-1110xABCDEF"); // Invalid ASSERT_INVALID_MEMORY_DESCRIPTION("rdi+rsi+rax"); ASSERT_INVALID_MEMORY_DESCRIPTION("rdi+rsi-rax"); ASSERT_INVALID_MEMORY_DESCRIPTION("rdi+1-2"); // // INDEX * SCALE_FACTOR // // Valid ASSERT_VALID_MEMORY_DESCRIPTION("rsi*1"); ASSERT_VALID_MEMORY_DESCRIPTION("rsi*2"); ASSERT_VALID_MEMORY_DESCRIPTION("rsi*4"); ASSERT_VALID_MEMORY_DESCRIPTION("rsi*8"); // Invalid ASSERT_INVALID_MEMORY_DESCRIPTION("rsi*0"); ASSERT_INVALID_MEMORY_DESCRIPTION("rsi*3"); ASSERT_INVALID_MEMORY_DESCRIPTION("rsi*16"); ASSERT_INVALID_MEMORY_DESCRIPTION("rsi*+1"); ASSERT_INVALID_MEMORY_DESCRIPTION("rsi*-1"); ASSERT_INVALID_MEMORY_DESCRIPTION("rsi*+2"); ASSERT_INVALID_MEMORY_DESCRIPTION("rsi*-2"); // // INDEX * SCALE_FACTOR +- DISPLACEMENT // // Valid ASSERT_VALID_MEMORY_DESCRIPTION("rsp*2+1"); ASSERT_VALID_MEMORY_DESCRIPTION("rsp*2+86821"); ASSERT_VALID_MEMORY_DESCRIPTION("rsp*2+0x86821"); ASSERT_VALID_MEMORY_DESCRIPTION("rsp*2+FFFF"); ASSERT_VALID_MEMORY_DESCRIPTION("rsp*2+0xFFFF"); ASSERT_VALID_MEMORY_DESCRIPTION("rsp*2+1FF531FF"); ASSERT_VALID_MEMORY_DESCRIPTION("rsp*2+0x1FF531FF"); ASSERT_VALID_MEMORY_DESCRIPTION("rdx*2-1"); ASSERT_VALID_MEMORY_DESCRIPTION("rdx*2-12151"); ASSERT_VALID_MEMORY_DESCRIPTION("rdx*2-0x12151"); ASSERT_VALID_MEMORY_DESCRIPTION("rdx*2-ABaCE"); ASSERT_VALID_MEMORY_DESCRIPTION("rdx*2-0xABaCE"); ASSERT_VALID_MEMORY_DESCRIPTION("rdx*2-382ff2aB"); ASSERT_VALID_MEMORY_DESCRIPTION("rdx*2-0x382ff2aB"); // Invalid ASSERT_INVALID_MEMORY_DESCRIPTION("rdx*2+rax"); ASSERT_INVALID_MEMORY_DESCRIPTION("rdx*2-rax"); ASSERT_INVALID_MEMORY_DESCRIPTION("rdx*2+0xABaCE+rax"); // // BASE + INDEX * SCALE_FACTOR +- DISPLACEMENT // // Valid ASSERT_VALID_MEMORY_DESCRIPTION("rbx+rdi*8+1"); ASSERT_VALID_MEMORY_DESCRIPTION("rbx+rdi*8+323520"); ASSERT_VALID_MEMORY_DESCRIPTION("rbx+rdi*8+0x323520"); ASSERT_VALID_MEMORY_DESCRIPTION("rbx+rdi*8+0EEEEEF"); ASSERT_VALID_MEMORY_DESCRIPTION("rbx+rdi*8+0xEEEEEF"); ASSERT_VALID_MEMORY_DESCRIPTION("rbx+rdi*8+EEEEEF333"); ASSERT_VALID_MEMORY_DESCRIPTION("rbx+rdi*8+0xEEEEEF333"); ASSERT_VALID_MEMORY_DESCRIPTION("rcx+rbp*8-1"); ASSERT_VALID_MEMORY_DESCRIPTION("rcx+rbp*8-323520"); ASSERT_VALID_MEMORY_DESCRIPTION("rcx+rbp*8-0x323520"); ASSERT_VALID_MEMORY_DESCRIPTION("rcx+rbp*8-BBDDFF"); ASSERT_VALID_MEMORY_DESCRIPTION("rcx+rbp*8-0xBBDDFF"); ASSERT_VALID_MEMORY_DESCRIPTION("rcx+rbp*8-B2BDDF3F"); ASSERT_VALID_MEMORY_DESCRIPTION("rcx+rbp*8-0xB2BDDF3F"); // Invalid ASSERT_INVALID_MEMORY_DESCRIPTION("rsp+rdx*2+rax"); ASSERT_INVALID_MEMORY_DESCRIPTION("rsp-rdx*2+rax"); ASSERT_INVALID_MEMORY_DESCRIPTION("rsp+rdx*2+0xABaCE+rax"); ASSERT_INVALID_MEMORY_DESCRIPTION("rsp-rdx*2+0xABaCE+rax"); } //============================================================================= // Test Interface //============================================================================= // // TestTokenParser // VOID TestTokenParser() { PRINT_TEST_HEADER; TestVirtualAddressMemoryDescriptions(); TestIndirectAddressMemoryDescriptions(); PRINT_TEST_FOOTER; }
3,564
22,453
<gh_stars>1000+ from __future__ import print_function, division import numpy as np from scipy.stats import chi2, multivariate_normal from mlfromscratch.utils import mean_squared_error, train_test_split, polynomial_features class BayesianRegression(object): """Bayesian regression model. If poly_degree is specified the features will be transformed to with a polynomial basis function, which allows for polynomial regression. Assumes Normal prior and likelihood for the weights and scaled inverse chi-squared prior and likelihood for the variance of the weights. Parameters: ----------- n_draws: float The number of simulated draws from the posterior of the parameters. mu0: array The mean values of the prior Normal distribution of the parameters. omega0: array The precision matrix of the prior Normal distribution of the parameters. nu0: float The degrees of freedom of the prior scaled inverse chi squared distribution. sigma_sq0: float The scale parameter of the prior scaled inverse chi squared distribution. poly_degree: int The polynomial degree that the features should be transformed to. Allows for polynomial regression. cred_int: float The credible interval (ETI in this impl.). 95 => 95% credible interval of the posterior of the parameters. Reference: https://github.com/mattiasvillani/BayesLearnCourse/raw/master/Slides/BayesLearnL5.pdf """ def __init__(self, n_draws, mu0, omega0, nu0, sigma_sq0, poly_degree=0, cred_int=95): self.w = None self.n_draws = n_draws self.poly_degree = poly_degree self.cred_int = cred_int # Prior parameters self.mu0 = mu0 self.omega0 = omega0 self.nu0 = nu0 self.sigma_sq0 = sigma_sq0 # Allows for simulation from the scaled inverse chi squared # distribution. Assumes the variance is distributed according to # this distribution. # Reference: # https://en.wikipedia.org/wiki/Scaled_inverse_chi-squared_distribution def _draw_scaled_inv_chi_sq(self, n, df, scale): X = chi2.rvs(size=n, df=df) sigma_sq = df * scale / X return sigma_sq def fit(self, X, y): # If polynomial transformation if self.poly_degree: X = polynomial_features(X, degree=self.poly_degree) n_samples, n_features = np.shape(X) X_X = X.T.dot(X) # Least squares approximate of beta beta_hat = np.linalg.pinv(X_X).dot(X.T).dot(y) # The posterior parameters can be determined analytically since we assume # conjugate priors for the likelihoods. # Normal prior / likelihood => Normal posterior mu_n = np.linalg.pinv(X_X + self.omega0).dot(X_X.dot(beta_hat)+self.omega0.dot(self.mu0)) omega_n = X_X + self.omega0 # Scaled inverse chi-squared prior / likelihood => Scaled inverse chi-squared posterior nu_n = self.nu0 + n_samples sigma_sq_n = (1.0/nu_n)*(self.nu0*self.sigma_sq0 + \ (y.T.dot(y) + self.mu0.T.dot(self.omega0).dot(self.mu0) - mu_n.T.dot(omega_n.dot(mu_n)))) # Simulate parameter values for n_draws beta_draws = np.empty((self.n_draws, n_features)) for i in range(self.n_draws): sigma_sq = self._draw_scaled_inv_chi_sq(n=1, df=nu_n, scale=sigma_sq_n) beta = multivariate_normal.rvs(size=1, mean=mu_n[:,0], cov=sigma_sq*np.linalg.pinv(omega_n)) # Save parameter draws beta_draws[i, :] = beta # Select the mean of the simulated variables as the ones used to make predictions self.w = np.mean(beta_draws, axis=0) # Lower and upper boundary of the credible interval l_eti = 50 - self.cred_int/2 u_eti = 50 + self.cred_int/2 self.eti = np.array([[np.percentile(beta_draws[:,i], q=l_eti), np.percentile(beta_draws[:,i], q=u_eti)] \ for i in range(n_features)]) def predict(self, X, eti=False): # If polynomial transformation if self.poly_degree: X = polynomial_features(X, degree=self.poly_degree) y_pred = X.dot(self.w) # If the lower and upper boundaries for the 95% # equal tail interval should be returned if eti: lower_w = self.eti[:, 0] upper_w = self.eti[:, 1] y_lower_pred = X.dot(lower_w) y_upper_pred = X.dot(upper_w) return y_pred, y_lower_pred, y_upper_pred return y_pred
1,951
1,562
// Copyright 2020 Google LLC // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. #include <gflags/gflags.h> #include <cmath> #include <cstdio> #include <cstdlib> #include <cstring> #include <ctime> #include <glog/logging.h> #include "pffft_sapi.sapi.h" // NOLINT(build/include) #include "sandboxed_api/util/flag.h" #include "sandboxed_api/vars.h" ABSL_DECLARE_FLAG(string, sandbox2_danger_danger_permit_all); ABSL_DECLARE_FLAG(string, sandbox2_danger_danger_permit_all_and_log); class PffftSapiSandbox : public PffftSandbox { public: std::unique_ptr<sandbox2::Policy> ModifyPolicy(sandbox2::PolicyBuilder*) { return sandbox2::PolicyBuilder() .AllowStaticStartup() .AllowOpen() .AllowRead() .AllowWrite() .AllowSystemMalloc() .AllowExit() .AllowSyscalls({ __NR_futex, __NR_close, __NR_getrusage, }) .BuildOrDie(); } }; // output_format flag determines whether the output shows information in detail // or not. By default, the flag is set as 0, meaning an elaborate display // (see ShowOutput method). static bool ValidateFlag(const char* flagname, int32_t value) { if (value >= 0 && value < 32768) { return true; } LOG(ERROR) << "Invalid value for --" << flagname << "."; return false; } DEFINE_int32(output_format, 0, "Value to specific the output format."); DEFINE_validator(output_format, &ValidateFlag); double UclockSec() { return static_cast<double>(clock()) / CLOCKS_PER_SEC; } void ShowOutput(const char* name, int n, int complex, float flops, float t0, float t1, int max_iter) { float mflops = flops / 1e6 / (t1 - t0 + 1e-16); if (FLAGS_output_format) { if (flops != -1) { printf("|%9.0f ", mflops); } else { printf("| n/a "); } } else if (flops != -1) { printf("n=%5d, %s %16s : %6.0f MFlops [t=%6.0f ns, %d runs]\n", n, (complex ? "CPLX" : "REAL"), name, mflops, (t1 - t0) / 2 / max_iter * 1e9, max_iter); } fflush(stdout); } absl::Status PffftMain() { LOG(INFO) << "Initializing sandbox...\n"; PffftSapiSandbox sandbox; SAPI_RETURN_IF_ERROR(sandbox.Init()); PffftApi api(&sandbox); // kTransformSizes is a vector keeping the values by which iterates n, its // value representing the input length. More concrete, n is the number of data // points the caclulus is up to (determinating its accuracy). To show the // performance of Fast-Fourier Transformations the program is testing for // various values of n. constexpr int kTransformSizes[] = { 64, 96, 128, 160, 192, 256, 384, 5 * 96, 512, 5 * 128, 3 * 256, 800, 1024, 2048, 2400, 4096, 8192, 9 * 1024, 16384, 32768}; for (int complex : {0, 1}) { for (int n : kTransformSizes) { const int n_float = n * (complex ? 2 : 1); int n_bytes = n_float * sizeof(float); std::vector<float> work(2 * n_float + 15, 0.0); sapi::v::Array<float> work_array(&work[0], work.size()); std::vector<float> x(n_bytes, 0.0); sapi::v::Array<float> x_array(&x[0], x.size()); std::vector<float> y(n_bytes, 0.0); sapi::v::Array<float> y_array(&y[0], y.size()); std::vector<float> z(n_bytes, 0.0); sapi::v::Array<float> z_array(&z[0], z.size()); double t0; double t1; double flops; int max_iter = 5120000 / n * 4; for (int k = 0; k < n_float; ++k) { x[k] = 0; } // FFTPack benchmark { // SIMD_SZ == 4 (returning value of pffft_simd_size()) int simd_size_iter = max_iter / 4; if (simd_size_iter == 0) simd_size_iter = 1; if (complex) { SAPI_RETURN_IF_ERROR(api.cffti(n, work_array.PtrBoth())) } else { SAPI_RETURN_IF_ERROR(api.rffti(n, work_array.PtrBoth())); } t0 = UclockSec(); for (int iter = 0; iter < simd_size_iter; ++iter) { if (complex) { SAPI_RETURN_IF_ERROR( api.cfftf(n, x_array.PtrBoth(), work_array.PtrBoth())); SAPI_RETURN_IF_ERROR( api.cfftb(n, x_array.PtrBoth(), work_array.PtrBoth())); } else { SAPI_RETURN_IF_ERROR( api.rfftf(n, x_array.PtrBoth(), work_array.PtrBoth())); SAPI_RETURN_IF_ERROR( api.rfftb(n, x_array.PtrBoth(), work_array.PtrBoth())); } } t1 = UclockSec(); flops = (simd_size_iter * 2) * ((complex ? 5 : 2.5) * static_cast<double>(n) * log(static_cast<double>(n)) / M_LN2); ShowOutput("FFTPack", n, complex, flops, t0, t1, simd_size_iter); } // PFFFT benchmark { SAPI_ASSIGN_OR_RETURN( PFFFT_Setup * s, api.pffft_new_setup(n, complex ? PFFFT_COMPLEX : PFFFT_REAL)); sapi::v::RemotePtr s_reg(s); t0 = UclockSec(); for (int iter = 0; iter < max_iter; ++iter) { SAPI_RETURN_IF_ERROR( api.pffft_transform(&s_reg, x_array.PtrBoth(), z_array.PtrBoth(), y_array.PtrBoth(), PFFFT_FORWARD)); SAPI_RETURN_IF_ERROR( api.pffft_transform(&s_reg, x_array.PtrBoth(), z_array.PtrBoth(), y_array.PtrBoth(), PFFFT_FORWARD)); } t1 = UclockSec(); SAPI_RETURN_IF_ERROR(api.pffft_destroy_setup(&s_reg)); flops = (max_iter * 2) * ((complex ? 5 : 2.5) * static_cast<double>(n) * log(static_cast<double>(n)) / M_LN2); ShowOutput("PFFFT", n, complex, flops, t0, t1, max_iter); LOG(INFO) << "n = " << n << " SUCCESSFULLY"; } } } return absl::OkStatus(); } int main(int argc, char* argv[]) { // Initialize Google's logging library. google::InitGoogleLogging(argv[0]); gflags::ParseCommandLineFlags(&argc, &argv, true); if (absl::Status status = PffftMain(); !status.ok()) { LOG(ERROR) << "Initialization failed: " << status.ToString(); return EXIT_FAILURE; } return EXIT_SUCCESS; }
3,047
582
/******************************************************************************* * Copyright (c) 2003, 2010 IBM Corporation and others. * All rights reserved. This program and the accompanying materials * are made available under the terms of the Eclipse Public License v1.0 * which accompanies this distribution, and is available at * http://www.eclipse.org/legal/epl-v10.html * * Contributors: * IBM Corporation - initial API and implementation *******************************************************************************/ package org.eclipse.draw2d.graph; import org.eclipse.draw2d.geometry.Insets; /** * For INTERNAL use only. * * @author hudsonr * @since 2.1.2 */ class SubgraphBoundary extends Node { /** * constant indicating TOP. */ public static final int TOP = 0; /** * constant indicating LEFT. */ public static final int LEFT = 1; /** * constant indicating BOTTOM. */ public static final int BOTTOM = 2; /** * constant indicating RIGHT. */ public static final int RIGHT = 3; /** * Constructs a new boundary. * * @param s * the subgraph * @param p * the padding * @param side * which side */ public SubgraphBoundary(Subgraph s, Insets p, int side) { super(null, s); this.width = s.width; this.height = s.height; this.padding = new Insets(); switch (side) { case LEFT: width = s.insets.left; y = s.y; padding.left = p.left; padding.right = s.innerPadding.left; padding.top = padding.bottom = 0; setParent(s.getParent()); data = "left(" + s + ")"; //$NON-NLS-1$ //$NON-NLS-2$ break; case RIGHT: width = s.insets.right; y = s.y; padding.right = p.right; padding.left = s.innerPadding.right; padding.top = padding.bottom = 0; setParent(s.getParent()); data = "right(" + s + ")"; //$NON-NLS-1$ //$NON-NLS-2$ break; case TOP: height = s.insets.top; // $TODO width of head/tail should be 0 width = 5; padding.top = p.top; padding.bottom = s.innerPadding.top; padding.left = padding.right = 0; data = "top(" + s + ")"; //$NON-NLS-1$ //$NON-NLS-2$ break; case BOTTOM: height = s.insets.bottom; // $TODO width of head/tail should be 0 width = 5; padding.top = s.innerPadding.bottom; padding.bottom = p.bottom; padding.left = padding.right = 0; data = "bottom(" + s + ")"; //$NON-NLS-1$ //$NON-NLS-2$ break; } } }
1,328
1,272
/* * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. * * Licensed under the Apache License, Version 2.0 (the "License"). * You may not use this file except in compliance with the License. * A copy of the License is located at * * http://aws.amazon.com/apache2.0/ * * or in the "license" file accompanying this file. This file is distributed * on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either * express or implied. See the License for the specific language governing * permissions and limitations under the License. */ #ifndef ALEXA_CLIENT_SDK_AVSCOMMON_UTILS_INCLUDE_AVSCOMMON_UTILS_MACADDRESSSTRING_H_ #define ALEXA_CLIENT_SDK_AVSCOMMON_UTILS_INCLUDE_AVSCOMMON_UTILS_MACADDRESSSTRING_H_ #include <memory> #include <string> namespace alexaClientSDK { namespace avsCommon { namespace utils { /** * A class used to validate a MAC address string before construction. */ class MacAddressString { public: /// Default copy-constructor so objects can be passed by value. MacAddressString(const MacAddressString&) = default; /** * Factory that validates the MAC address before constructing the actual object. * * @params macAddress user supplied MacAddress * @return nullptr if the input MAC address is illegal, otherwise a unique_ptr to a MacAddressString object that can * be used to get the desired string. */ static std::unique_ptr<MacAddressString> create(const std::string& macAddress); /** * Returns a the MAC address. * * @return The MAC address. */ std::string getString() const; /** * Utility function to truncate a valid MAC address. The first 4 octets are X'd out. * * @return The truncated mac address. */ std::string getTruncatedString() const; private: /// The constructor will only be called with a legal macAddress input. We don't check here because this function is /// private and is only called from the public create(...) factory method. explicit MacAddressString(const std::string& macAddress); /// a well formed MAC address string const std::string m_macAddress; }; } // namespace utils } // namespace avsCommon } // namespace alexaClientSDK #endif // ALEXA_CLIENT_SDK_AVSCOMMON_UTILS_INCLUDE_AVSCOMMON_UTILS_MACADDRESSSTRING_H_
753
733
# coding=utf-8 # Copyright 2019 The SEED Authors # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """Implements a generalized onpolicy loss.""" import abc import inspect import gin from seed_rl.agents.policy_gradient.modules import logging_module import tensorflow as tf @gin.configurable class GeneralizedOnPolicyLoss(tf.Module, logging_module.LoggingModule): """TensorFlow module implementing the generalized onpolicy loss.""" def __init__(self, agent, reward_normalizer, parametric_action_distribution, advantage_estimator, policy_loss, discount_factor, regularizer=None, max_abs_reward=None, handle_abandoned_episodes_properly=True, huber_delta=None, value_ppo_style_clip_eps=None, baseline_cost=1., include_regularization_in_returns=False, frame_skip=1, reward_scaling=1.0): """Creates a GeneralizedOnPolicyLoss.""" self._agent = agent self._reward_normalizer = reward_normalizer self._parametric_action_distribution = parametric_action_distribution self._advantage_estimator = advantage_estimator self._policy_loss = policy_loss self._regularizer = regularizer self._max_abs_reward = max_abs_reward self._reward_scaling = reward_scaling self._baseline_cost = baseline_cost # Provided here so that it is shared. self._discount_factor = discount_factor self._frame_skip = frame_skip self._handle_abandoned_episodes_properly = handle_abandoned_episodes_properly self._value_ppo_style_clip_eps = value_ppo_style_clip_eps self._include_regularization_in_returns = include_regularization_in_returns if huber_delta is not None: self.v_loss_fn = tf.keras.losses.Huber( delta=huber_delta, reduction=tf.keras.losses.Reduction.NONE) else: self.v_loss_fn = tf.keras.losses.MeanSquaredError( reduction=tf.keras.losses.Reduction.NONE) def init(self): for module in self.submodules: if hasattr(module, 'init'): if not inspect.signature(module.init).parameters: module.init() def compute_advantages(self, agent_state, prev_actions, env_outputs, agent_outputs, return_learner_outputs=False): # Extract rewards and done information. rewards, done, _, abandoned, _ = tf.nest.map_structure(lambda t: t[1:], env_outputs) if self._max_abs_reward is not None: rewards = tf.clip_by_value(rewards, -self._max_abs_reward, self._max_abs_reward) rewards *= self._reward_scaling # Compute the outputs of the neural networks on the learner. learner_outputs, _ = self._agent((prev_actions, env_outputs), agent_state, unroll=True, is_training=True) # At this point, we have unroll length + 1 steps. The last step is only used # as bootstrap value, so it's removed. agent_outputs = tf.nest.map_structure(lambda t: t[:-1], agent_outputs) learner_v = learner_outputs.baseline # current value function learner_outputs = tf.nest.map_structure(lambda t: t[:-1], learner_outputs) target_action_log_probs = self._parametric_action_distribution( learner_outputs.policy_logits).log_prob(agent_outputs.action) behaviour_action_log_probs = self._parametric_action_distribution( agent_outputs.policy_logits).log_prob(agent_outputs.action) # Compute the advantages. if self._reward_normalizer: corrected_predictions = self._reward_normalizer.correct_prediction( learner_v) unnormalized_predictions = self._reward_normalizer.unnormalize_prediction( corrected_predictions) else: corrected_predictions = learner_v unnormalized_predictions = learner_v if not self._handle_abandoned_episodes_properly: abandoned = tf.zeros_like(abandoned) done_terminated = tf.logical_and(done, ~abandoned) done_abandoned = tf.logical_and(done, abandoned) if self._include_regularization_in_returns and self._regularizer: additional_rewards, _ = self._regularizer( self._parametric_action_distribution, learner_outputs.policy_logits, agent_outputs.policy_logits, agent_outputs.action, with_logging=False) assert rewards.shape == additional_rewards.shape rewards += additional_rewards # tf.math.pow does not work on TPU so we compute it manually. adjusted_discount_factor = 1. for _ in range(self._frame_skip): adjusted_discount_factor *= self._discount_factor vs, advantages = self._advantage_estimator( unnormalized_predictions, rewards, done_terminated, done_abandoned, adjusted_discount_factor, target_action_log_probs, behaviour_action_log_probs) if self._reward_normalizer: normalized_targets = self._reward_normalizer.normalize_target(vs) normalized_advantages = self._reward_normalizer.normalize_advantage( advantages) self._reward_normalizer.update_normalization_statistics(vs) else: normalized_targets = vs normalized_advantages = advantages outputs = (normalized_targets, normalized_advantages) if return_learner_outputs: outputs += (learner_outputs,) return outputs def __call__(self, agent_state, prev_actions, env_outputs, agent_outputs, normalized_targets=None, normalized_advantages=None): """Computes the loss.""" if normalized_targets is None: normalized_targets, normalized_advantages, learner_outputs = \ self.compute_advantages( agent_state, prev_actions, env_outputs, agent_outputs, return_learner_outputs=True) # The last timestep is only used for computing advantages so we # remove it here. agent_state, prev_actions, env_outputs, agent_outputs = \ tf.nest.map_structure( lambda t: t[:-1], (agent_state, prev_actions, env_outputs, agent_outputs)) else: # Advantages are already precomputed. learner_outputs, _ = self._agent((prev_actions, env_outputs), agent_state, unroll=True, is_training=True) target_action_log_probs = self._parametric_action_distribution( learner_outputs.policy_logits).log_prob(agent_outputs.action) behaviour_action_log_probs = self._parametric_action_distribution( agent_outputs.policy_logits).log_prob(agent_outputs.action) # Compute the advantages. if self._reward_normalizer: corrected_predictions = self._reward_normalizer.correct_prediction( learner_outputs.baseline) old_corrected_predictions = self._reward_normalizer.correct_prediction( agent_outputs.baseline) else: corrected_predictions = learner_outputs.baseline old_corrected_predictions = agent_outputs.baseline # Compute the advantage-based loss. policy_loss = tf.reduce_mean( self._policy_loss( normalized_advantages, target_action_log_probs, behaviour_action_log_probs, actions=agent_outputs.action, target_logits=learner_outputs.policy_logits, behaviour_logits=agent_outputs.policy_logits, parametric_action_distribution=self._parametric_action_distribution) ) # Value function loss v_error = normalized_targets - corrected_predictions self.log('GeneralizedOnPolicyLoss/V_error', v_error) self.log('GeneralizedOnPolicyLoss/abs_V_error', tf.abs(v_error)) self.log('GeneralizedOnPolicyLoss/corrected_predictions', corrected_predictions) # Huber loss reduces the last dimension so we add a dummy one here. normalized_targets = normalized_targets[..., tf.newaxis] corrected_predictions = corrected_predictions[..., tf.newaxis] v_loss = self.v_loss_fn(normalized_targets, corrected_predictions) # PPO-style value loss clipping if self._value_ppo_style_clip_eps is not None: old_corrected_predictions = old_corrected_predictions[..., tf.newaxis] clipped_corrected_predictions = tf.clip_by_value( corrected_predictions, old_corrected_predictions - self._value_ppo_style_clip_eps, old_corrected_predictions + self._value_ppo_style_clip_eps) clipped_v_loss = self.v_loss_fn(normalized_targets, clipped_corrected_predictions) v_loss = tf.maximum(v_loss, clipped_v_loss) v_loss = tf.reduce_mean(v_loss) # Compute the regularization loss. if self._regularizer: per_step_regularization, regularization_loss = self._regularizer( self._parametric_action_distribution, learner_outputs.policy_logits, agent_outputs.policy_logits, agent_outputs.action) if not self._include_regularization_in_returns: regularization_loss += tf.reduce_mean(per_step_regularization) else: regularization_loss = 0. total_loss = policy_loss + self._baseline_cost*v_loss + regularization_loss return total_loss class PolicyLoss(tf.Module, metaclass=abc.ABCMeta): """Abstract base class for policy losses.""" @abc.abstractmethod def __call__(self, advantages, target_action_log_probs, behaviour_action_log_probs): r"""Computes policy loss. Args: advantages: A float32 tensor of shape [T, B] of advantages. target_action_log_probs: A float32 tensor of shape [T, B] with log-probabilities of taking the action by the current policy behaviour_action_log_probs: A float32 tensor of shape [T, B] with log-probabilities of taking the action by the behavioural policy Returns: A float32 tensor of shape [T, B] with the policy loss. """ raise NotImplementedError('`__call__()` is not implemented!') class RegularizationLoss(tf.Module, metaclass=abc.ABCMeta): """Abstract base class for policy losses.""" @abc.abstractmethod def __call__(self, parametric_action_distribution, target_action_logits, behaviour_action_logits, actions): r"""Computes regularization loss. Args: parametric_action_distribution: Parametric action distribution. target_action_logits: A float32 tensor of shape [T, B, A] with the logits of the target policy. behaviour_action_logits: A float32 tensor of shape [T, B, A] with the logits of the behavioural policy. actions: A float32 tensor of shape [T, B, A] with the actions taken by the behaviour policy. Returns: A float32 tensor of shape [T, B] with the regularization loss. """ raise NotImplementedError('`__call__()` is not implemented!')
4,618
5,133
<filename>processor/src/test/java/org/mapstruct/ap/test/bugs/_543/SourceUtil.java /* * Copyright MapStruct Authors. * * Licensed under the Apache License version 2.0, available at http://www.apache.org/licenses/LICENSE-2.0 */ package org.mapstruct.ap.test.bugs._543; import org.mapstruct.ap.test.bugs._543.dto.Source; import org.mapstruct.ap.test.bugs._543.dto.Target; /** * @author <NAME> */ public class SourceUtil { private SourceUtil() { } public static Target from(Source source) { if ( source == null ) { return null; } return new Target( source.getString() ); } }
246
576
/* * Licensed to the Apache Software Foundation (ASF) under one or more * contributor license agreements. See the NOTICE file distributed with * this work for additional information regarding copyright ownership. * The ASF licenses this file to You under the Apache License, Version 2.0 * (the "License"); you may not use this file except in compliance with * the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package org.apache.commons.collections4.bloomfilter.hasher; /** * An instance of HashFunctionIdentity that is suitable for deserializing * HashFunctionIdentity data from a stream or any other situation where the * hash function is not available but the identify of the function is required. * * @since 4.5 */ public final class HashFunctionIdentityImpl implements HashFunctionIdentity { private final String name; private final String provider; private final Signedness signedness; private final ProcessType process; private final long signature; /** * Creates a copy of the HashFunctionIdentity. * @param identity the identity to copy. */ public HashFunctionIdentityImpl(final HashFunctionIdentity identity) { this.name = identity.getName(); this.provider = identity.getProvider(); this.signedness = identity.getSignedness(); this.process = identity.getProcessType(); this.signature = identity.getSignature(); } /** * Creates a HashFunctionIdentity from component values. * @param provider the name of the provider. * @param name the name of the hash function. * @param signedness the signedness of the hash function. * @param process the processes of the hash function. * @param signature the signature for the hash function. */ public HashFunctionIdentityImpl(final String provider, final String name, final Signedness signedness, final ProcessType process, final long signature) { this.name = name; this.provider = provider; this.signedness = signedness; this.process = process; this.signature = signature; } @Override public String getName() { return name; } @Override public ProcessType getProcessType() { return process; } @Override public String getProvider() { return provider; } @Override public long getSignature() { return signature; } @Override public Signedness getSignedness() { return signedness; } }
896
3,133
{ "kind": "CustomResourceDefinition", "apiVersion": "apiextensions.k8s.io/v1beta1", "metadata": { "name": "gcpsamples.gcp.stacks.crossplane.io", "selfLink": "/apis/apiextensions.k8s.io/v1beta1/customresourcedefinitions/gcpsamples.gcp.stacks.crossplane.io", "uid": "c0bbac74-acab-4620-b628-1d5f91b19040", "resourceVersion": "5567", "generation": 1, "creationTimestamp": "2020-04-20T17:27:56Z", "annotations": { "kubectl.kubernetes.io/last-applied-configuration": "{\"apiVersion\":\"apiextensions.k8s.io/v1beta1\",\"kind\":\"CustomResourceDefinition\",\"metadata\":{\"annotations\":{},\"creationTimestamp\":\"2020-04-20T16:57:37Z\",\"generation\":1,\"name\":\"gcpsamples.gcp.stacks.crossplane.io\",\"resourceVersion\":\"549\",\"selfLink\":\"/apis/apiextensions.k8s.io/v1/customresourcedefinitions/gcpsamples.gcp.stacks.crossplane.io\",\"uid\":\"db5f4321-3226-44b0-8247-66fd7ef59dc8\"},\"spec\":{\"conversion\":{\"strategy\":\"None\"},\"group\":\"gcp.stacks.crossplane.io\",\"names\":{\"kind\":\"GCPSample\",\"listKind\":\"GCPSampleList\",\"plural\":\"gcpsamples\",\"singular\":\"gcpsample\"},\"preserveUnknownFields\":true,\"scope\":\"Cluster\",\"versions\":[{\"name\":\"v1alpha1\",\"served\":true,\"storage\":true}]},\"status\":{\"acceptedNames\":{\"kind\":\"GCPSample\",\"listKind\":\"GCPSampleList\",\"plural\":\"gcpsamples\",\"singular\":\"gcpsample\"},\"conditions\":[{\"lastTransitionTime\":\"2020-04-20T16:57:37Z\",\"message\":\"no conflicts found\",\"reason\":\"NoConflicts\",\"status\":\"True\",\"type\":\"NamesAccepted\"},{\"lastTransitionTime\":\"2020-04-20T16:57:37Z\",\"message\":\"the initial names have been accepted\",\"reason\":\"InitialNamesAccepted\",\"status\":\"True\",\"type\":\"Established\"}],\"storedVersions\":[\"v1alpha1\"]}}\n" } }, "spec": { "group": "gcp.stacks.crossplane.io", "version": "v1alpha1", "names": { "plural": "gcpsamples", "singular": "gcpsample", "kind": "GCPSample", "listKind": "GCPSampleList" }, "scope": "Cluster", "versions": [ { "name": "v1alpha1", "served": true, "storage": true } ], "conversion": { "strategy": "None" }, "preserveUnknownFields": true }, "status": { "conditions": [ { "type": "NamesAccepted", "status": "True", "lastTransitionTime": "2020-04-20T17:27:56Z", "reason": "NoConflicts", "message": "no conflicts found" }, { "type": "Established", "status": "True", "lastTransitionTime": "2020-04-20T17:27:56Z", "reason": "InitialNamesAccepted", "message": "the initial names have been accepted" } ], "acceptedNames": { "plural": "gcpsamples", "singular": "gcpsample", "kind": "GCPSample", "listKind": "GCPSampleList" }, "storedVersions": [ "v1alpha1" ] } }
1,259
3,976
import xlwt with open('numbers.txt', 'r', encoding='utf-8') as f: data = f.read() _numbers = eval(data) numbers = list() for i in range(3): numbers.extend(_numbers[i]) row = len(numbers)//len(_numbers) file = xlwt.Workbook() table = file.add_sheet('numbers') for i in range(len(numbers)): table.write(i // row, i % row, numbers[i]) file.save('numbers.xls')
184
981
<gh_stars>100-1000 /******************************************************************* * File automatically generated by rebuild_wrappers.py (v2.1.0.16) * *******************************************************************/ #ifndef __wrappedlibxxf86vmDEFS_H_ #define __wrappedlibxxf86vmDEFS_H_ #endif // __wrappedlibxxf86vmDEFS_H_
94
6,304
<reponame>travisleithead/skia<gh_stars>1000+ /* * Copyright 2018 Google Inc. * * Use of this source code is governed by a BSD-style license that can be * found in the LICENSE file. */ #ifndef DDLTileHelper_DEFINED #define DDLTileHelper_DEFINED #include "include/core/SkDeferredDisplayList.h" #include "include/core/SkRect.h" #include "include/core/SkRefCnt.h" #include "include/core/SkSpan.h" #include "include/core/SkSurfaceCharacterization.h" class DDLPromiseImageHelper; class PromiseImageCallbackContext; class SkCanvas; class SkData; class SkDeferredDisplayListRecorder; class SkPicture; class SkSurface; class SkSurfaceCharacterization; class SkTaskGroup; class DDLTileHelper { public: // The TileData class encapsulates the information and behavior of a single tile when // rendering with DDLs. class TileData { public: TileData(); ~TileData(); bool initialized() const { return fID != -1; } void init(int id, GrDirectContext*, const SkSurfaceCharacterization& dstChar, const SkIRect& clip, const SkIRect& paddingOutsets); // Create the DDL for this tile (i.e., fill in 'fDisplayList'). void createDDL(const SkPicture*); void dropDDL() { fDisplayList.reset(); } // Precompile all the programs required to draw this tile's DDL void precompile(GrDirectContext*); // Just draw the re-inflated per-tile SKP directly into this tile w/o going through a DDL // first. This is used for determining the overhead of using DDLs (i.e., it replaces // a 'createDDL' and 'draw' pair. void drawSKPDirectly(GrDirectContext*, const SkPicture*); // Replay the recorded DDL into the tile surface - filling in 'fBackendTexture'. void draw(GrDirectContext*); void reset(); int id() const { return fID; } SkIRect clipRect() const { return fClip; } SkISize paddedRectSize() const { return { fClip.width() + fPaddingOutsets.fLeft + fPaddingOutsets.fRight, fClip.height() + fPaddingOutsets.fTop + fPaddingOutsets.fBottom }; } SkIVector padOffset() const { return { fPaddingOutsets.fLeft, fPaddingOutsets.fTop }; } SkDeferredDisplayList* ddl() { return fDisplayList.get(); } sk_sp<SkImage> makePromiseImageForDst(sk_sp<GrContextThreadSafeProxy>); void dropCallbackContext() { fCallbackContext.reset(); } static void CreateBackendTexture(GrDirectContext*, TileData*); static void DeleteBackendTexture(GrDirectContext*, TileData*); private: sk_sp<SkSurface> makeWrappedTileDest(GrRecordingContext* context); sk_sp<PromiseImageCallbackContext> refCallbackContext() { return fCallbackContext; } int fID = -1; SkIRect fClip; // in the device space of the final SkSurface SkIRect fPaddingOutsets; // random padding for the output surface SkSurfaceCharacterization fPlaybackChar; // characterization for the tile's dst surface // The callback context holds (via its SkPromiseImageTexture) the backend texture // that is both wrapped in 'fTileSurface' and backs this tile's promise image // (i.e., the one returned by 'makePromiseImage'). sk_sp<PromiseImageCallbackContext> fCallbackContext; // 'fTileSurface' wraps the backend texture in 'fCallbackContext' and must exist until // after 'fDisplayList' has been flushed (bc it owns the proxy the DDL's destination // trampoline points at). // TODO: fix the ref-order so we don't need 'fTileSurface' here sk_sp<SkSurface> fTileSurface; sk_sp<SkDeferredDisplayList> fDisplayList; }; DDLTileHelper(GrDirectContext*, const SkSurfaceCharacterization& dstChar, const SkIRect& viewport, int numXDivisions, int numYDivisions, bool addRandomPaddingToDst); void kickOffThreadedWork(SkTaskGroup* recordingTaskGroup, SkTaskGroup* gpuTaskGroup, GrDirectContext*, SkPicture*); void createDDLsInParallel(SkPicture*); // Create the DDL that will compose all the tile images into a final result. void createComposeDDL(); const sk_sp<SkDeferredDisplayList>& composeDDL() const { return fComposeDDL; } // For each tile, create its DDL and then draw it - all on a single thread. This is to allow // comparison w/ just drawing the SKP directly (i.e., drawAllTilesDirectly). The // DDL creations and draws are interleaved to prevent starvation of the GPU. // Note: this is somewhat of a misuse/pessimistic-use of DDLs since they are supposed to // be created on a separate thread. void interleaveDDLCreationAndDraw(GrDirectContext*, SkPicture*); // This draws all the per-tile SKPs directly into all of the tiles w/o converting them to // DDLs first - all on a single thread. void drawAllTilesDirectly(GrDirectContext*, SkPicture*); void dropCallbackContexts(); void resetAllTiles(); int numTiles() const { return fNumXDivisions * fNumYDivisions; } void createBackendTextures(SkTaskGroup*, GrDirectContext*); void deleteBackendTextures(SkTaskGroup*, GrDirectContext*); private: int fNumXDivisions; // number of tiles horizontally int fNumYDivisions; // number of tiles vertically SkAutoTArray<TileData> fTiles; // 'fNumXDivisions' x 'fNumYDivisions' sk_sp<SkDeferredDisplayList> fComposeDDL; const SkSurfaceCharacterization fDstCharacterization; }; #endif
2,357
5,964
<reponame>wenfeifei/miniblink49 // Copyright 2015 The Chromium Authors. All rights reserved. // Use of this source code is governed by a BSD-style license that can be // found in the LICENSE file. #ifndef net_FixedReceivedData_h #define net_FixedReceivedData_h #include <vector> #include "net/RequestPeer.h" namespace net { class FixedReceivedData final : public RequestPeer::ThreadSafeReceivedData { public: FixedReceivedData(const char* data, size_t length, int encodedLength); explicit FixedReceivedData(ReceivedData* data); FixedReceivedData(const std::vector<char>& data, int encodedLength); ~FixedReceivedData() override; const char* payload() const override; int length() const override; int encodedLength() const override; private: std::vector<char> data_; int m_encodedLength; DISALLOW_COPY_AND_ASSIGN(FixedReceivedData); }; } // namespace net #endif // net_FixedReceivedData_h
310
348
{"nom":"Saint-Etienne","circ":"2ème circonscription","dpt":"Loire","inscrits":53195,"abs":30005,"votants":23190,"blancs":236,"nuls":90,"exp":22864,"res":[{"nuance":"REM","nom":"<NAME>","voix":8860},{"nuance":"FI","nom":"Mme <NAME>","voix":3516},{"nuance":"LR","nom":"Mme <NAME>","voix":3249},{"nuance":"FN","nom":"<NAME>","voix":3119},{"nuance":"ECO","nom":"M. <NAME>","voix":2124},{"nuance":"COM","nom":"Mme <NAME>","voix":907},{"nuance":"DIV","nom":"M. <NAME>","voix":347},{"nuance":"ECO","nom":"Mme <NAME>","voix":344},{"nuance":"ECO","nom":"Mme <NAME>","voix":211},{"nuance":"EXG","nom":"<NAME>","voix":187}]}
246
793
#include <stdlib.h> #include <check.h> #include "csptr/smart_ptr.h" #include "test.h" #include "utils.h" const struct meta m = {1, 2, 3}; Suite *master_suite(void) { Suite *s = suite_create("master"); suite_add_tcase(s, make_test_case("misc", misc_tests)); suite_add_tcase(s, make_test_case("scalar", scalar_tests)); suite_add_tcase(s, make_test_case("array", array_tests)); suite_add_tcase(s, make_test_case("shared", shared_tests)); return s; } int main(void) { SRunner *sr = srunner_create(master_suite()); srunner_run_all(sr, CK_NORMAL); int number_failed = srunner_ntests_failed(sr); srunner_free(sr); return (number_failed == 0) ? EXIT_SUCCESS : EXIT_FAILURE; }
304
487
<filename>test/gtest/complex_bitwise.cpp #include <boost/algorithm/string/replace.hpp> #include <boost/optional.hpp> #include "gtest/gtest.h" #include "ir/ir.h" #include "helpers.h" #include "lib/log.h" #include "lib/sourceCodeBuilder.h" #include "frontends/common/parseInput.h" #include "frontends/common/resolveReferences/referenceMap.h" #include "frontends/p4/toP4/toP4.h" #include "frontends/p4/typeChecking/typeChecker.h" #include "frontends/p4/typeMap.h" #include "midend/simplifyBitwise.h" using namespace P4; namespace Test { namespace { boost::optional<FrontendTestCase> createSimplifyBitwiseTestCase(const std::string &ingressSource) { std::string source = P4_SOURCE(P4Headers::V1MODEL, R"( header H { bit<32> f1; bit<32> f2; bit<32> f3; } struct Headers { H h; } struct Metadata { } parser parse(packet_in packet, out Headers headers, inout Metadata meta, inout standard_metadata_t sm) { state start { packet.extract(headers.h); transition accept; } } control verifyChecksum(inout Headers headers, inout Metadata meta) { apply { } } control ingress(inout Headers headers, inout Metadata meta, inout standard_metadata_t sm) { apply { %INGRESS% } } control egress(inout Headers headers, inout Metadata meta, inout standard_metadata_t sm) { apply { } } control computeChecksum(inout Headers headers, inout Metadata meta) { apply { } } control deparse(packet_out packet, in Headers headers) { apply { packet.emit(headers.h); } } V1Switch(parse(), verifyChecksum(), ingress(), egress(), computeChecksum(), deparse()) main; )"); boost::replace_first(source, "%INGRESS%", ingressSource); return FrontendTestCase::create(source, CompilerOptions::FrontendVersion::P4_16); } class CountAssignmentStatements : public Inspector { int _as_total = 0; bool preorder(const IR::AssignmentStatement *) { _as_total++; return true; } public: int as_total() { return _as_total; } }; } // namespace class SimplifyBitwiseTest : public P4CTest { }; TEST_F(SimplifyBitwiseTest, SimpleSplit) { auto test = createSimplifyBitwiseTestCase(P4_SOURCE(R"( headers.h.f1 = headers.h.f2 & 0xffff | headers.h.f1 & 0xffff0000; )")); ReferenceMap refMap; TypeMap typeMap; CountAssignmentStatements cas; Util::SourceCodeBuilder builder; ToP4 dump(builder, false); PassManager quick_midend = { new TypeChecking(&refMap, &typeMap, true), new SimplifyBitwise, &cas, &dump }; test->program->apply(quick_midend); EXPECT_EQ(2, cas.as_total()); std::string program_string = builder.toString(); std::string value1 = "headers.h.f1[15:0] = headers.h.f2[15:0]"; std::string value2 = "headers.h.f1[31:16] = headers.h.f1[31:16]"; EXPECT_FALSE(program_string.find(value1) == std::string::npos); EXPECT_FALSE(program_string.find(value2) == std::string::npos); } TEST_F(SimplifyBitwiseTest, ManySplit) { auto test = createSimplifyBitwiseTestCase(P4_SOURCE(R"( headers.h.f1 = headers.h.f2 & 0x55555555 | headers.h.f1 & 0xaaaaaaaa; )")); ReferenceMap refMap; TypeMap typeMap; CountAssignmentStatements cas; Util::SourceCodeBuilder builder; ToP4 dump(builder, false); PassManager quick_midend = { new TypeChecking(&refMap, &typeMap, true), new SimplifyBitwise, &cas, &dump }; test->program->apply(quick_midend); EXPECT_EQ(32, cas.as_total()); std::string program_string = builder.toString(); for (int i = 0; i < 32; i += 2) { std::string value1 = "headers.h.f1[" + std::to_string(i) + ":" + std::to_string(i) + "] = headers.h.f2[" + std::to_string(i) + ":" + std::to_string(i) + "]"; std::string value2 = "headers.h.f1[" + std::to_string(i+1) + ":" + std::to_string(i+1) + "] = headers.h.f1[" + std::to_string(i+1) + ":" + std::to_string(i+1) + "]"; EXPECT_FALSE(program_string.find(value1) == std::string::npos); EXPECT_FALSE(program_string.find(value2) == std::string::npos); } } TEST_F(SimplifyBitwiseTest, SplitWithZero) { auto test = createSimplifyBitwiseTestCase(P4_SOURCE(R"( headers.h.f1 = headers.h.f2 & 0xff | headers.h.f3 & 0xff000000; )")); ReferenceMap refMap; TypeMap typeMap; CountAssignmentStatements cas; Util::SourceCodeBuilder builder; ToP4 dump(builder, false); PassManager quick_midend = { new TypeChecking(&refMap, &typeMap, true), new SimplifyBitwise, &cas, &dump }; test->program->apply(quick_midend); EXPECT_EQ(3, cas.as_total()); std::string program_string = builder.toString(); std::string value1 = "headers.h.f1[7:0] = headers.h.f2[7:0]"; std::string value2 = "headers.h.f1[31:24] = headers.h.f3[31:24]"; std::string value3 = "headers.h.f1[23:8] = 16w0"; EXPECT_FALSE(program_string.find(value1) == std::string::npos); EXPECT_FALSE(program_string.find(value2) == std::string::npos); EXPECT_FALSE(program_string.find(value3) == std::string::npos); } } // namespace Test
2,288
1,305
<reponame>suxingjie99/JavaSource<filename>src/org/example/source/java/security/cert/CRLSelector.java<gh_stars>1000+ /* * Copyright (c) 2000, 2013, Oracle and/or its affiliates. All rights reserved. * ORACLE PROPRIETARY/CONFIDENTIAL. Use is subject to license terms. * * * * * * * * * * * * * * * * * * * * */ package java.security.cert; /** * A selector that defines a set of criteria for selecting {@code CRL}s. * Classes that implement this interface are often used to specify * which {@code CRL}s should be retrieved from a {@code CertStore}. * <p> * <b>Concurrent Access</b> * <p> * Unless otherwise specified, the methods defined in this interface are not * thread-safe. Multiple threads that need to access a single * object concurrently should synchronize amongst themselves and * provide the necessary locking. Multiple threads each manipulating * separate objects need not synchronize. * * @see CRL * @see CertStore * @see CertStore#getCRLs * * @author <NAME> * @since 1.4 */ public interface CRLSelector extends Cloneable { /** * Decides whether a {@code CRL} should be selected. * * @param crl the {@code CRL} to be checked * @return {@code true} if the {@code CRL} should be selected, * {@code false} otherwise */ boolean match(CRL crl); /** * Makes a copy of this {@code CRLSelector}. Changes to the * copy will not affect the original and vice versa. * * @return a copy of this {@code CRLSelector} */ Object clone(); }
539
473
<filename>front-end/qemu-2.3/target-moxie/machine.h extern const VMStateDescription vmstate_moxie_cpu;
41
455
/* -*- mode: c; c-file-style: "linux" -*- */ /* Run a micro benchmark on ip6_route_output(). * * It creates a /sys/kernel/kbench directory. By default, a scan is * done in 2000::/3 (considered as a linear space). * * The module only acts on the initial network namespace. * * Copyright (C) 2017 <NAME> * Based on https://git.kernel.org/pub/scm/linux/kernel/git/davem/net_test_tools.git/tree/kbench_mod.c * * This program is free software; you can redistribute it and/or * modify it under the terms of the GNU General Public License * as published by the Free Software Foundation; either version * 2 of the License, or (at your option) any later version. */ #define pr_fmt(fmt) "kbench: " fmt #include <linux/kernel.h> #include <linux/version.h> #include <linux/module.h> #include <linux/inet.h> #include <linux/sort.h> #include <linux/netdevice.h> #include <linux/mutex.h> #include <net/ip6_route.h> #include <net/ip6_fib.h> #include <linux/timex.h> #define DEFAULT_WARMUP_COUNT 100000 #define DEFAULT_LOOP_COUNT 5000 #define DEFAULT_MAX_LOOP_COUNT 1000000 #define DEFAULT_OIF 0 #define DEFAULT_IIF 0 #define DEFAULT_MARK 0x00000000 #define DEFAULT_LABEL 0 #define DEFAULT_DST_IPADDR_S { .s6_addr = {0x20} } #define DEFAULT_DST_IPADDR_E { .s6_addr = {0x3f, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, \ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff} } #define DEFAULT_SRC_IPADDR { .s6_addr32 = {} } #define HIST_BUCKETS 15 #define HIST_WIDTH 50 static unsigned long warmup_count = DEFAULT_WARMUP_COUNT; static unsigned long loop_count = DEFAULT_LOOP_COUNT; static unsigned long max_loop_count = DEFAULT_MAX_LOOP_COUNT; static int flow_oif = DEFAULT_OIF; static int flow_iif = DEFAULT_IIF; static u32 flow_label = DEFAULT_LABEL; static u32 flow_mark = DEFAULT_MARK; static struct in6_addr flow_dst_ipaddr_s = DEFAULT_DST_IPADDR_S; static struct in6_addr flow_dst_ipaddr_e = DEFAULT_DST_IPADDR_E; static struct in6_addr flow_src_ipaddr = DEFAULT_SRC_IPADDR; static DEFINE_MUTEX(kb_lock); /* Compatibility with older kernel versions */ #ifndef __ATTR_RW # define __ATTR_RW(_name) __ATTR(_name, \ (S_IWUSR | S_IRUGO), \ _name##_show, _name##_store) #endif /* Helpers */ static int compare(const void *lhs, const void *rhs) { unsigned long long lhs_integer = *(const unsigned long long *)(lhs); unsigned long long rhs_integer = *(const unsigned long long *)(rhs); if (lhs_integer < rhs_integer) return -1; if (lhs_integer > rhs_integer) return 1; return 0; } static unsigned long long percentile(int p, unsigned long long *sorted, unsigned int count) { int index = p * count / 100; int index2 = index + 1; if (p * count % 100 == 0) return sorted[index]; if (index2 >= count) index2 = index - 1; if (index2 < 0) index2 = index; return (sorted[index] + sorted[index+1]) / 2; } static unsigned long long mad(unsigned long long *sorted, unsigned long long median, unsigned count) { unsigned long long *dmedian = kmalloc(sizeof(unsigned long long) * count, GFP_KERNEL); unsigned long long res; unsigned i; for (i = 0; i < count; i++) { if (sorted[i] > median) dmedian[i] = sorted[i] - median; else dmedian[i] = median - sorted[i]; } sort(dmedian, count, sizeof(unsigned long long), compare, NULL); res = percentile(50, dmedian, count); kfree(dmedian); return res; } static void lcg32(unsigned long *cur) { *cur = *cur * 1664525 + 1013904223; } #if LINUX_VERSION_CODE >= KERNEL_VERSION(3,18,0) #ifdef CONFIG_IPV6_SUBTREES #define FWS_INIT FWS_S #else #define FWS_INIT FWS_L #endif static void collect_depth(struct fib6_node *root, unsigned long *avgdepth, unsigned long *maxdepth) { unsigned long totdepth, depth; unsigned int count; struct fib6_node *fn, *pn, *node; #if LINUX_VERSION_CODE >= KERNEL_VERSION(4,18,0) struct fib6_info *leaf; #else struct rt6_info *leaf; #endif enum fib6_walk_state state; for (node = root, leaf = NULL, depth = 0, totdepth = 0, count = 0, *maxdepth = 0, *avgdepth = 0, state = FWS_INIT;;) { fn = node; if (!fn) goto end; switch (state) { #ifdef CONFIG_IPV6_SUBTREES case FWS_S: if (FIB6_SUBTREE(fn)) { node = FIB6_SUBTREE(fn); continue; } state = FWS_L; #endif case FWS_L: if (fn->left) { node = fn->left; depth++; state = FWS_INIT; continue; } state = FWS_R; case FWS_R: if (fn->right) { node = fn->right; depth++; state = FWS_INIT; continue; } state = FWS_C; leaf = fn->leaf; case FWS_C: if (leaf && fn->fn_flags & RTN_RTINFO) { totdepth += depth; count++; if (depth > *maxdepth) *maxdepth = depth; leaf = NULL; continue; } state = FWS_U; case FWS_U: if (fn == root) { goto end; } pn = fn->parent; node = pn; depth--; #ifdef CONFIG_IPV6_SUBTREES if (FIB6_SUBTREE(pn) == fn) { WARN_ON(!(fn->fn_flags & RTN_ROOT)); state = FWS_L; continue; } #endif if (pn->left == fn) { state = FWS_R; continue; } if (pn->right == fn) { state = FWS_C; leaf = node->leaf; continue; } } } end: if (count > 0) *avgdepth = totdepth*10 / count; } #endif /* Benchmark */ static int do_bench(char *buf, int verbose) { unsigned long long *results; unsigned long long t1, t2, average; unsigned long i, j, total, count, count2, carry; bool scan; unsigned long rnd = 0; struct flowi6 fl6; struct in6_addr delta = {}; results = kmalloc(sizeof(*results) * loop_count, GFP_KERNEL); if (!results) return scnprintf(buf, PAGE_SIZE, "msg=\"no memory\"\n"); mutex_lock(&kb_lock); memset(&fl6, 0, sizeof(fl6)); fl6.flowi6_oif = flow_oif; fl6.flowi6_iif = flow_iif; fl6.flowi6_mark = flow_mark; fl6.flowi6_proto = NEXTHDR_TCP; fl6.flowlabel = flow_label; memcpy(&fl6.daddr, &flow_dst_ipaddr_s, sizeof(flow_dst_ipaddr_s)); memcpy(&fl6.saddr, &flow_src_ipaddr, sizeof(flow_src_ipaddr)); for (i = 0, carry = 0; i < 4; i++) { if ((unsigned long long)ntohl(flow_dst_ipaddr_s.s6_addr32[3 - i]) + carry <= ntohl(flow_dst_ipaddr_e.s6_addr32[3 - i])) { delta.s6_addr32[3-i] = ntohl(flow_dst_ipaddr_e.s6_addr32[3 - i]) - ntohl(flow_dst_ipaddr_s.s6_addr32[3 - i]) - carry; carry = 0; } else { delta.s6_addr32[3-i] = ntohl(flow_dst_ipaddr_s.s6_addr32[3 - i]) + carry - ntohl(flow_dst_ipaddr_e.s6_addr32[3 - i]); carry = 1; } } if (carry == 0 && (delta.s6_addr32[0] != 0 || delta.s6_addr32[1] != 0 || delta.s6_addr32[2] != 0 || delta.s6_addr32[3] != 0)) { unsigned long rem; unsigned long long quo; scan = true; for (i = 0, rem = 0; i < 4; i++) { quo = delta.s6_addr32[i] + ((unsigned long long)rem << 32); rem = quo % loop_count; quo /= loop_count; delta.s6_addr32[i] = quo; } if (delta.s6_addr32[0] == 0 && delta.s6_addr32[1] == 0 && delta.s6_addr32[2] == 0 && delta.s6_addr32[3] == 0) delta.s6_addr32[3] = 1; for (i = 0; i < 4; i++) delta.s6_addr32[i] = htonl(delta.s6_addr32[i]); } for (i = 0; i < warmup_count; i++) { struct dst_entry *dst = ip6_route_output(&init_net, NULL, &fl6); if (dst->error && dst->error != -ENETUNREACH) { dst_release(dst); kfree(results); return scnprintf(buf, PAGE_SIZE, "err=%d msg=\"lookup error\"\n", dst->error); } dst_release(dst); } average = 0; for (i = total = 0; i < max_loop_count; i++) { struct dst_entry *dst; if (total >= loop_count) break; if (scan) { for (j = 0, carry = 0; j < 4; j++) { carry = ((unsigned long long)ntohl(fl6.daddr.s6_addr32[3-j]) + ntohl(delta.s6_addr32[3-j]) + carry > ULONG_MAX); fl6.daddr.s6_addr32[3-j] = htonl(ntohl(fl6.daddr.s6_addr32[3-j]) + ntohl(delta.s6_addr32[3-j]) + carry); } if (ntohl(fl6.daddr.s6_addr32[0]) > ntohl(flow_dst_ipaddr_e.s6_addr32[0]) || (ntohl(fl6.daddr.s6_addr32[0]) == ntohl(flow_dst_ipaddr_e.s6_addr32[0]) && (ntohl(fl6.daddr.s6_addr32[1]) > ntohl(flow_dst_ipaddr_e.s6_addr32[1]) || (ntohl(fl6.daddr.s6_addr32[1]) == ntohl(flow_dst_ipaddr_e.s6_addr32[1]) && (ntohl(fl6.daddr.s6_addr32[2]) > ntohl(flow_dst_ipaddr_e.s6_addr32[2]) || (ntohl(fl6.daddr.s6_addr32[2]) == ntohl(flow_dst_ipaddr_e.s6_addr32[2]) && ntohl(fl6.daddr.s6_addr32[3]) > ntohl(flow_dst_ipaddr_e.s6_addr32[3]))))))) { memcpy(&fl6.daddr, &flow_dst_ipaddr_s, sizeof(flow_dst_ipaddr_s)); /* Add a bit of (reproducible) * randomness to the first step to * avoid using the same routes. */ for (j = 0, carry = 0; j < 4; j++) { unsigned long add = ntohl(delta.s6_addr32[3-j]); lcg32(&rnd); add &= rnd; carry = ((unsigned long long)ntohl(fl6.daddr.s6_addr32[3-j]) + add + carry > ULONG_MAX); fl6.daddr.s6_addr32[3-j] = htonl(ntohl(fl6.daddr.s6_addr32[3-j]) + add + carry); } schedule(); } } /* Could use sched_clock() to get a number of * nanoseconds instead. This would be the one used for * ftrace. get_cycles() use RDTSC behind the scene and * this instruction is virtualized with low-overhead * (see cpu_has_vmx_rdtscp() for support, which can be * checked with the following command-line: `sudo * rdmsr 0x0000048b` (which is * MSR_IA32_VMX_PROCBASED_CTLS2), and it should have * 0x8 bit set (which is SECONDARY_EXEC_RDTSCP) in the * high word. For example, if we have 0x7cff00000000, * high word is 0x7cff, so 0x8 bit is set and it's * OK. */ t1 = get_cycles(); dst = ip6_route_output(&init_net, NULL, &fl6); t2 = get_cycles(); if (dst->error == -ENETUNREACH) { printk_ratelimited(KERN_WARNING "not reachable\n"); dst_release(dst); continue; } dst_release(dst); results[total] = t2 - t1; average += results[total]; total++; } mutex_unlock(&kb_lock); /* Compute statistics */ sort(results, total, sizeof(*results), compare, NULL); if (total == 0) { scnprintf(buf, PAGE_SIZE, "msg=\"no match\"\n"); } else { unsigned long long p95 = percentile(95, results, total); unsigned long long p90 = percentile(90, results, total); unsigned long long p50 = percentile(50, results, total); average /= total; scnprintf(buf, PAGE_SIZE, "min=%llu max=%llu count=%lu average=%llu 95th=%llu 90th=%llu 50th=%llu mad=%llu\n", results[0], results[total - 1], total, average, p95, p90, p50, mad(results, p50, total)); #if LINUX_VERSION_CODE >= KERNEL_VERSION(3,18,0) do { unsigned long avgdepth, maxdepth; struct fib6_table *table = init_net.ipv6.fib6_main_tbl; #if LINUX_VERSION_CODE >= KERNEL_VERSION(4,15,0) spin_lock_bh(&table->tb6_lock); #else read_lock_bh(&table->tb6_lock); #endif collect_depth(&table->tb6_root, &avgdepth, &maxdepth); #if LINUX_VERSION_CODE >= KERNEL_VERSION(4,15,0) spin_unlock_bh(&table->tb6_lock); #else read_unlock_bh(&table->tb6_lock); #endif scnprintf(buf + strnlen(buf, PAGE_SIZE), PAGE_SIZE - strnlen(buf, PAGE_SIZE), "table=%u avgdepth=%lu.%lu maxdepth=%lu\n", table->tb6_id, avgdepth/10, avgdepth%10, maxdepth); } while(0); #endif if (verbose) { /* Display an histogram */ unsigned long long share = (p95 - results[0]) / HIST_BUCKETS; unsigned long long start = results[0]; int order = (ilog2(share) * 3 + 5) / 10; char *hist_buf = buf + strnlen(buf, PAGE_SIZE); if (order <= 0) order = 1; for (i = order, order = 1; i > 1; i--) { order *= 10; } share = share/order * order; if (share <= 0) share = 1; start = start/order * order; hist_buf += scnprintf(hist_buf, buf + PAGE_SIZE - hist_buf, " %8s │", "value"); hist_buf += scnprintf(hist_buf, buf + PAGE_SIZE - hist_buf, "%*s┊%*s", HIST_WIDTH/2, "", HIST_WIDTH/2-1, ""); hist_buf += scnprintf(hist_buf, buf + PAGE_SIZE - hist_buf, " %8s\n", "count"); for (i = 0, count = 0, count2 = 0;;) { if (i < total && results[i] < start + share) { count++; count2++; i++; continue; } hist_buf += scnprintf(hist_buf, buf + PAGE_SIZE - hist_buf, " %8llu │", start); for (j = 0; j < count * HIST_WIDTH / total; j++) hist_buf += scnprintf(hist_buf, buf + PAGE_SIZE - hist_buf, "▒"); for (; j < count2 * HIST_WIDTH / total; j++) hist_buf += scnprintf(hist_buf, buf + PAGE_SIZE - hist_buf, "░"); hist_buf += scnprintf(hist_buf, buf + PAGE_SIZE - hist_buf, "%*s %8lu\n", (int)(HIST_WIDTH - count2 * HIST_WIDTH / total), "", count); count = 0; start += share; if (i >= total) break; if (results[i] > p95) break; if (hist_buf >= buf + PAGE_SIZE - HIST_WIDTH - 20) break; } } } kfree(results); return strnlen(buf, PAGE_SIZE); } /* Sysfs attributes */ static ssize_t warmup_count_show(struct kobject *kobj, struct kobj_attribute *attr, char *buf) { ssize_t res; mutex_lock(&kb_lock); res = scnprintf(buf, PAGE_SIZE, "%lu\n", warmup_count); mutex_unlock(&kb_lock); return res; } static ssize_t warmup_count_store(struct kobject *kobj, struct kobj_attribute *attr, const char *buf, size_t count) { unsigned long val; int err = kstrtoul(buf, 0, &val); if (err < 0) return err; if (val < 1) return -EINVAL; mutex_lock(&kb_lock); warmup_count = val; mutex_unlock(&kb_lock); return count; } static ssize_t loop_count_show(struct kobject *kobj, struct kobj_attribute *attr, char *buf) { ssize_t res; mutex_lock(&kb_lock); res = scnprintf(buf, PAGE_SIZE, "%lu\n", loop_count); mutex_unlock(&kb_lock); return res; } static ssize_t loop_count_store(struct kobject *kobj, struct kobj_attribute *attr, const char *buf, size_t count) { unsigned long val; int err = kstrtoul(buf, 0, &val); if (err < 0) return err; if (val < 1) return -EINVAL; mutex_lock(&kb_lock); loop_count = val; mutex_unlock(&kb_lock); return count; } static ssize_t max_loop_count_show(struct kobject *kobj, struct kobj_attribute *attr, char *buf) { ssize_t res; mutex_lock(&kb_lock); res = scnprintf(buf, PAGE_SIZE, "%lu\n", max_loop_count); mutex_unlock(&kb_lock); return res; } static ssize_t max_loop_count_store(struct kobject *kobj, struct kobj_attribute *attr, const char *buf, size_t count) { unsigned long val; int err = kstrtoul(buf, 0, &val); if (err < 0) return err; if (val < 1) return -EINVAL; mutex_lock(&kb_lock); max_loop_count = val; mutex_unlock(&kb_lock); return count; } static ssize_t flow_oif_show(struct kobject *kobj, struct kobj_attribute *attr, char *buf) { ssize_t res; struct net_device *dev; mutex_lock(&kb_lock); dev = dev_get_by_index(&init_net, flow_oif); if (!dev) res = scnprintf(buf, PAGE_SIZE, "%d\n", flow_oif); else res = scnprintf(buf, PAGE_SIZE, "%s\n", dev->name); mutex_unlock(&kb_lock); return res; } static ssize_t flow_oif_store(struct kobject *kobj, struct kobj_attribute *attr, const char *buf, size_t count) { int val; int err = kstrtoint(buf, 0, &val); if (err < 0) { struct net_device *dev; char ifname[IFNAMSIZ] = {0, }; sscanf(buf, "%15s", ifname); dev = dev_get_by_name(&init_net, ifname); if (!dev) return -ENODEV; mutex_lock(&kb_lock); flow_oif = dev->ifindex; mutex_unlock(&kb_lock); return count; } if (val < 0) return -EINVAL; mutex_lock(&kb_lock); flow_oif = val; mutex_unlock(&kb_lock); return count; } static ssize_t flow_iif_show(struct kobject *kobj, struct kobj_attribute *attr, char *buf) { ssize_t res; struct net_device *dev; mutex_lock(&kb_lock); dev = dev_get_by_index(&init_net, flow_iif); if (!dev) res = scnprintf(buf, PAGE_SIZE, "%d\n", flow_iif); else res = scnprintf(buf, PAGE_SIZE, "%s\n", dev->name); mutex_unlock(&kb_lock); return res; } static ssize_t flow_iif_store(struct kobject *kobj, struct kobj_attribute *attr, const char *buf, size_t count) { int val; int err = kstrtoint(buf, 0, &val); if (err < 0) { struct net_device *dev; char ifname[IFNAMSIZ] = {0, }; sscanf(buf, "%15s", ifname); dev = dev_get_by_name(&init_net, ifname); if (!dev) return -ENODEV; mutex_lock(&kb_lock); flow_iif = dev->ifindex; mutex_unlock(&kb_lock); return count; } if (val < 0) return -EINVAL; mutex_lock(&kb_lock); flow_iif = val; mutex_unlock(&kb_lock); return count; } static ssize_t flow_label_show(struct kobject *kobj, struct kobj_attribute *attr, char *buf) { ssize_t res; mutex_lock(&kb_lock); res = scnprintf(buf, PAGE_SIZE, "0x%08x\n", (u32)flow_label); mutex_unlock(&kb_lock); return res; } static ssize_t flow_label_store(struct kobject *kobj, struct kobj_attribute *attr, const char *buf, size_t count) { u32 val; int err = kstrtou32(buf, 0, &val); if (err < 0) return err; mutex_lock(&kb_lock); flow_label = val; mutex_unlock(&kb_lock); return count; } static ssize_t flow_mark_show(struct kobject *kobj, struct kobj_attribute *attr, char *buf) { ssize_t res; mutex_lock(&kb_lock); res = scnprintf(buf, PAGE_SIZE, "0x%08x\n", flow_mark); mutex_unlock(&kb_lock); return res; } static ssize_t flow_mark_store(struct kobject *kobj, struct kobj_attribute *attr, const char *buf, size_t count) { u32 val; int err = kstrtou32(buf, 0, &val); if (err < 0) return err; mutex_lock(&kb_lock); flow_mark = val; mutex_unlock(&kb_lock); return count; } static ssize_t flow_dst_ipaddr_s_show(struct kobject *kobj, struct kobj_attribute *attr, char *buf) { ssize_t res; mutex_lock(&kb_lock); res = scnprintf(buf, PAGE_SIZE, "%pI6c\n", &flow_dst_ipaddr_s); mutex_unlock(&kb_lock); return res; } static ssize_t flow_dst_ipaddr_s_store(struct kobject *kobj, struct kobj_attribute *attr, const char *buf, size_t count) { const char *end; struct in6_addr parsed; if (!in6_pton(buf, count, parsed.s6_addr, -1, &end) || (*end != '\0' && *end != '\n')) return -EINVAL; mutex_lock(&kb_lock); memcpy(&flow_dst_ipaddr_s, &parsed, sizeof(parsed)); mutex_unlock(&kb_lock); return count; } static ssize_t flow_dst_ipaddr_e_show(struct kobject *kobj, struct kobj_attribute *attr, char *buf) { ssize_t res; mutex_lock(&kb_lock); res = scnprintf(buf, PAGE_SIZE, "%pI6c\n", &flow_dst_ipaddr_e); mutex_unlock(&kb_lock); return res; } static ssize_t flow_dst_ipaddr_e_store(struct kobject *kobj, struct kobj_attribute *attr, const char *buf, size_t count) { const char *end; struct in6_addr parsed; if (!in6_pton(buf, count, parsed.s6_addr, -1, &end) || (*end != '\0' && *end != '\n')) return -EINVAL; mutex_lock(&kb_lock); memcpy(&flow_dst_ipaddr_e, &parsed, sizeof(parsed)); mutex_unlock(&kb_lock); return count; } static ssize_t flow_src_ipaddr_show(struct kobject *kobj, struct kobj_attribute *attr, char *buf) { ssize_t res; mutex_lock(&kb_lock); res = scnprintf(buf, PAGE_SIZE, "%pI6c\n", &flow_src_ipaddr); mutex_unlock(&kb_lock); return res; } static ssize_t flow_src_ipaddr_store(struct kobject *kobj, struct kobj_attribute *attr, const char *buf, size_t count) { const char *end; struct in6_addr parsed; if (!in6_pton(buf, count, parsed.s6_addr, -1, &end) || (*end != '\0' && *end != '\n')) return -EINVAL; mutex_lock(&kb_lock); memcpy(&flow_src_ipaddr, &parsed, sizeof(parsed)); mutex_unlock(&kb_lock); return count; } static ssize_t run_show(struct kobject *kobj, struct kobj_attribute *attr, char *buf) { return do_bench(buf, 0); } static ssize_t run_verbose_show(struct kobject *kobj, struct kobj_attribute *attr, char *buf) { return do_bench(buf, 1); } static struct kobj_attribute warmup_count_attr = __ATTR_RW(warmup_count); static struct kobj_attribute loop_count_attr = __ATTR_RW(loop_count); static struct kobj_attribute max_loop_count_attr = __ATTR_RW(max_loop_count); static struct kobj_attribute flow_oif_attr = __ATTR_RW(flow_oif); static struct kobj_attribute flow_iif_attr = __ATTR_RW(flow_iif); static struct kobj_attribute flow_label_attr = __ATTR_RW(flow_label); static struct kobj_attribute flow_mark_attr = __ATTR_RW(flow_mark); static struct kobj_attribute flow_dst_ipaddr_s_attr = __ATTR_RW(flow_dst_ipaddr_s); static struct kobj_attribute flow_dst_ipaddr_e_attr = __ATTR_RW(flow_dst_ipaddr_e); static struct kobj_attribute flow_src_ipaddr_attr = __ATTR_RW(flow_src_ipaddr); static struct kobj_attribute run_attr = __ATTR_RO(run); static struct kobj_attribute run_verbose_attr = __ATTR_RO(run_verbose); static struct attribute *bench_attributes[] = { &warmup_count_attr.attr, &loop_count_attr.attr, &max_loop_count_attr.attr, &flow_oif_attr.attr, &flow_iif_attr.attr, &flow_label_attr.attr, &flow_mark_attr.attr, &flow_dst_ipaddr_s_attr.attr, &flow_dst_ipaddr_e_attr.attr, &flow_src_ipaddr_attr.attr, &run_attr.attr, &run_verbose_attr.attr, NULL }; static struct attribute_group bench_attr_group = { .attrs = bench_attributes, }; static struct kobject *bench_kobj; int init_module(void) { int rc; bench_kobj = kobject_create_and_add("kbench", kernel_kobj); if (!bench_kobj) return -ENOMEM; rc = sysfs_create_group(bench_kobj, &bench_attr_group); if (rc) { kobject_put(bench_kobj); return rc; } return 0; } void cleanup_module(void) { sysfs_remove_group(bench_kobj, &bench_attr_group); kobject_put(bench_kobj); } MODULE_LICENSE("GPL"); MODULE_DESCRIPTION("Micro-benchmark for fib_lookup()");
9,895
619
<reponame>moredu/upm /* * Author: <NAME> <<EMAIL>> * Copyright (c) 2016 Intel Corporation. * * This program and the accompanying materials are made available under the * terms of the The MIT License which is available at * https://opensource.org/licenses/MIT. * * SPDX-License-Identifier: MIT */ #include <iostream> #include <stdexcept> #include "upm_string_parser.hpp" #include "bh1750.hpp" using namespace upm; using namespace std; BH1750::BH1750(int bus, int addr, BH1750_OPMODES_T mode) : m_bh1750(bh1750_init(bus, addr, mode)) { if (!m_bh1750) throw std::runtime_error(string(__FUNCTION__) + ": bh1750_init() failed"); } BH1750::BH1750(std::string initStr) : mraaIo(initStr) { mraa_io_descriptor* descs = mraaIo.getMraaDescriptors(); std::vector<std::string> upmTokens; if(!mraaIo.getLeftoverStr().empty()) { upmTokens = UpmStringParser::parse(mraaIo.getLeftoverStr()); } // make sure MRAA is initialized int mraa_rv; if ((mraa_rv = mraa_init()) != MRAA_SUCCESS) { throw std::runtime_error(std::string(__FUNCTION__) + ": mraa_init() failed"); } m_bh1750 = (bh1750_context)malloc(sizeof(struct _bh1750_context)); if(!m_bh1750) throw std::runtime_error(std::string(__FUNCTION__) + ": bh1750_init() failed"); m_bh1750->is_continuous = false; // init the i2c context if(!descs->i2cs) { throw std::runtime_error(std::string(__FUNCTION__) + ": mraa_i2c_init() failed"); } else { if( !(m_bh1750->i2c = descs->i2cs[0]) ) { free(m_bh1750); throw std::runtime_error(std::string(__FUNCTION__) + ": mraa_i2c_init() failed"); } } for (std::string tok : upmTokens) { if(tok.substr(0, 5) == "mode:") { BH1750_OPMODES_T mode = (BH1750_OPMODES_T)std::stoi(tok.substr(5), nullptr, 0); if(bh1750_set_opmode(m_bh1750, mode) != UPM_SUCCESS) { bh1750_close(m_bh1750); throw std::runtime_error(std::string(__FUNCTION__) + ": bh1750_init() failed"); } } if(tok.substr(0, 8) == "powerUp:") { powerUp(); } if(tok.substr(0, 10) == "powerDown:") { powerDown(); } if(tok.substr(0, 12) == "sendCommand:") { uint8_t mode = (uint8_t)std::stoul(tok.substr(12), nullptr, 0); sendCommand(mode); } } } BH1750::~BH1750() { bh1750_close(m_bh1750); } void BH1750::reset() { bh1750_reset(m_bh1750); } float BH1750::getLux() { float lux = 0.0; if (bh1750_get_lux(m_bh1750, &lux) != UPM_SUCCESS) throw std::runtime_error(string(__FUNCTION__) + ": bh1750_get_lux() failed"); return lux; } float BH1750::getLuminance() { return getLux(); } void BH1750::powerUp() { if (bh1750_power_up(m_bh1750) != UPM_SUCCESS) throw std::runtime_error(string(__FUNCTION__) + ": bh1750_power_up() failed"); } void BH1750::powerDown() { if (bh1750_power_down(m_bh1750) != UPM_SUCCESS) throw std::runtime_error(string(__FUNCTION__) + ": bh1750_power_down() failed"); } void BH1750::setOpmode(BH1750_OPMODES_T mode) { if (bh1750_set_opmode(m_bh1750, mode) != UPM_SUCCESS) throw std::runtime_error(string(__FUNCTION__) + ": bh1750_set_opmode() failed"); } void BH1750::sendCommand(uint8_t cmd) { if (bh1750_send_command(m_bh1750, cmd) != UPM_SUCCESS) throw std::runtime_error(string(__FUNCTION__) + ": bh1750_send_command() failed"); } uint16_t BH1750::readData() { uint16_t data = 0; if (bh1750_read_data(m_bh1750, &data) != UPM_SUCCESS) throw std::runtime_error(string(__FUNCTION__) + ": bh1750_read_data() failed"); return data; }
2,023
405
<reponame>zishuimuyu/jeewx package org.jeecgframework.poi.excel.annotation; import java.lang.annotation.Retention; import java.lang.annotation.RetentionPolicy; import java.lang.annotation.Target; /** * excel 导出是用于标记id的 * @author jueyue * */ @Retention(RetentionPolicy.RUNTIME) @Target({ java.lang.annotation.ElementType.TYPE }) public @interface ExcelTarget { /** *定义excel导出ID 来限定导出字段 */ public String id(); }
190
1,444
<reponame>GabrielSturtevant/mage package mage.cards.t; import java.util.UUID; import mage.abilities.costs.AlternativeCostSourceAbility; import mage.abilities.costs.common.ReturnToHandChosenControlledPermanentCost; import mage.abilities.effects.common.CounterTargetEffect; import mage.cards.CardImpl; import mage.cards.CardSetInfo; import mage.constants.CardType; import mage.constants.SubType; import mage.filter.common.FilterControlledLandPermanent; import mage.target.TargetSpell; import mage.target.common.TargetControlledPermanent; /** * * @author markedagain */ public final class Thwart extends CardImpl { private static final FilterControlledLandPermanent filter = new FilterControlledLandPermanent("Islands"); static{ filter.add(SubType.ISLAND.getPredicate()); } public Thwart(UUID ownerId, CardSetInfo setInfo) { super(ownerId,setInfo,new CardType[]{CardType.INSTANT},"{2}{U}{U}"); // You may return three Islands you control to their owner's hand rather than pay Thwart's mana cost. AlternativeCostSourceAbility ability; ability = new AlternativeCostSourceAbility(new ReturnToHandChosenControlledPermanentCost(new TargetControlledPermanent(3, 3, filter, true))); this.addAbility(ability); // Counter target spell. this.getSpellAbility().addEffect(new CounterTargetEffect()); this.getSpellAbility().addTarget(new TargetSpell()); } private Thwart(final Thwart card) { super(card); } @Override public Thwart copy() { return new Thwart(this); } }
542
711
<gh_stars>100-1000 package com.java110.code.util; import java.io.BufferedReader; import java.io.ByteArrayOutputStream; import java.io.File; import java.io.FileInputStream; import java.io.FileNotFoundException; import java.io.FileOutputStream; import java.io.IOException; import java.io.InputStream; import java.io.OutputStream; import java.net.URL; import java.nio.ByteBuffer; import java.nio.channels.FileChannel; import java.util.Date; public class FileUtilBase { public static void save(byte[] body, File file) throws IOException { writeToFile(body, file); } public static void save(byte[] body, String fileName, String filePath) throws IOException{ writeToFile(body, fileName, filePath); } /*************************************************************************** * 删除指定目录结构 * * @param directoryName */ protected static void deleteFileDirectory(String filePath) { /** * 指定删除目录路径构造一个文件对象 * */ File file = new File(filePath); File[] fileList = file.listFiles(); /** * 初始化子目录路径 */ String dirPath = null; if (fileList != null) for (int i = 0; i < fileList.length; i++) { /** * 如果是文件就将其删除 */ if (fileList[i].isFile()) fileList[i].delete(); /** * 如果是目录,那么将些目录下所有文件删除后再将其目录删除, */ if (fileList[i].isDirectory()) { dirPath = fileList[i].getPath(); // 递归删除指定目录下所有文件 deleteFileDirectory(dirPath); } } /** * 删除给定根目录 */ file.delete(); } /** * 把文件读取到byte[]中 * * @param fileName * @return * @throws FileNotFoundException */ protected static byte[] getFileByte(String fileName, boolean isDelete) throws FileNotFoundException { FileInputStream fileInputStream = new FileInputStream(fileName); byte[] buffer = getFileByte(fileInputStream); if (isDelete) { new File(fileName).delete(); } return buffer; } /** * 将byte[]写入文件 * * @param buffer * @param file * @throws IOException */ protected static File writeToFile(byte[] buffer, String fileName, String filePath) throws IOException { File dir = new File(filePath); if (!dir.exists()) { dir.mkdirs(); } String abPath = filePath.concat(fileName); File file = new File(abPath); if (!file.exists()) { file.createNewFile(); } FileOutputStream out = new FileOutputStream(file); out.write(buffer); out.close(); return file; } /** * 将byte[]写入文件 * * @param buffer * @param file * @throws IOException */ protected static File writeToFile(byte[] buffer, File file) throws IOException { FileOutputStream out = new FileOutputStream(file); out.write(buffer); out.close(); return file; } /** * 把URL中的数据读取到byte[]中 * * @param url * @return * @throws IOException */ protected static byte[] getFileByte(URL url) throws IOException { if (url != null) { return getFileByte(url.openStream()); } else { return null; } } /** * 从IS中获取byte[] * * @param in * @return */ protected static byte[] getFileByte(InputStream in) { ByteArrayOutputStream out = new ByteArrayOutputStream(4096); try { copy(in, out); } catch (IOException e) { e.printStackTrace(); } return out.toByteArray(); } protected static void copy(InputStream in, OutputStream out) throws IOException { try { byte[] buffer = new byte[4096]; int nrOfBytes = -1; while ((nrOfBytes = in.read(buffer)) != -1) { out.write(buffer, 0, nrOfBytes); } out.flush(); } catch (IOException e) { } finally { try { if (in != null) { in.close(); } } catch (IOException ex) { } try { if (out != null) { out.close(); } } catch (IOException ex) { } } } /** * 截取路径文件名,包括扩展名 * @param fileName * @return */ public static String getFileNameSuff(String fileName) { return fileName.substring(fileName.lastIndexOf("/")+1); } /** * 以路径最后斜杠位置加1开始取,取到扩展名。不包括扩展名 * @param fileName * @return */ public static String getFileName(String fileName) { int pos = fileName.lastIndexOf("."); if(pos==-1){ return fileName; } return fileName.substring(fileName.lastIndexOf("/")+1,pos); } /** * 复制文件 * * @param f1 * 当前文件流 * @param f2 * 目标文件流 * @return * @throws Exception */ public static long copyfile(File f1, File f2) throws Exception { mkdirs(f2.getParent()); if (f2.exists() && f2.isFile()) { f2.delete(); } System.out.println("添加:"+f2.getAbsolutePath()); long time = new Date().getTime(); int length = 2097152; FileInputStream in = new FileInputStream(f1); FileOutputStream out = new FileOutputStream(f2); FileChannel inC = in.getChannel(); FileChannel outC = out.getChannel(); ByteBuffer b = null; while (true) { if (inC.position() == inC.size()) { inC.close(); outC.close(); return new Date().getTime() - time; } if ((inC.size() - inC.position()) < length) { length = (int) (inC.size() - inC.position()); } else length = 2097152; b = ByteBuffer.allocateDirect(length); inC.read(b); b.flip(); outC.write(b); outC.force(false); } } public static long copyfile(String filePath1, String filePath2) throws Exception { File f1=new File(filePath1); File f2=new File(filePath2); return copyfile(f1,f2); } /** * 创建文件夹及父节点 * @param file */ public static void mkdirs(File file){ if(file.isFile()){ file=new File(file.getParent()); } if(!file.exists()){ file.mkdirs(); } } /** * 创建文件夹及父节点 * @param file */ public static void mkdirs(String filePath){ File file = new File(filePath); mkdirs(file); } /** * 先创建父节点,再创建文件 * @param file * @throws IOException */ public static void createrFile(File file) throws IOException{ mkdirs(file.getParent()); // 判断父节点是否存在 if(!file.exists()){ file.createNewFile(); } } /** * 把字节写入文件中 * @param buffer * @param file * @return * @throws IOException */ public static File writeNewFile(byte[] buffer, File file) throws IOException{ return writeToFile(buffer, file); } /** * 删除文件 * @param file File 对象 */ public static void deleteFile(File file) { System.out.println("删除文件:"+file.getAbsolutePath()); file.delete(); } /** * 删除文件 * @param filePath */ public static void deleteFile(String filePath) { deleteFile(new File(filePath)); } /** * 创建父节点目录, * 判断删除文件2并复制文件1到文件2 * @param f1 当前文件流 * @param f2 目标文件流 * @return * @throws Exception */ public static void deleteAndCopyFile(File file1, File file2) throws Exception { mkdirs(file2.getParent()); if (file2.exists() && file2.isFile()) { file2.delete(); } if(file1.exists() && file1.isFile()){ copyfile(file1, file2); file1.delete(); } } /** * 创建父节点目录, * 判断删除文件2并复制文件1到文件2 * * @param f1 * 当前文件流 * @param f2 * 目标文件流 * @return * @throws Exception */ public static void deleteAndCopyFile(String file1Path, String file2Path) throws Exception { File file1 = new File(file1Path); File file2 = new File(file2Path); deleteAndCopyFile(file1, file2); } /** * 获得文件的扩展名 含 标点符号 * @param fileName * @return */ public static String getExtention(String fileName) { int pos = fileName.lastIndexOf("."); if(pos+1<fileName.length()){ return fileName.substring(pos); } return ""; } /** * 获得文件的扩展名 不含标点符号 * @param fileName * @return */ public static String getFileType(String fileName) { int pos = fileName.lastIndexOf("."); if(pos+1<fileName.length()){ return fileName.substring(pos+1); } return ""; } /** * 根据路径获得文件内容 * @param fileName * @return */ public static String getFileContent(String filePath) { File file = new File(filePath); return getFileContent(file); } /** * 根据File对象获得文件内容 * @param fileName * @return */ public static String getFileContent(File file) { String htmlCode = ""; try { String encoding = "UTF-8"; if (file.isFile() && file.exists()) { // 判断文件是否存在 BufferedReader bufferedReader = new BufferedReader(new UnicodeReader(new FileInputStream(file), encoding)); String lineTxt = null; while ((lineTxt = bufferedReader.readLine()) != null) { htmlCode += lineTxt; } bufferedReader.close(); } else { System.out.println("找不到指定的文件"); } } catch (Exception e) { System.out.println("读取文件内容出错"); e.printStackTrace(); } return htmlCode; } public static void upload(String filePath, String saveAsFileName,File upload) throws Exception { if (upload != null) { if (!filePath.equals("")) { File file = new File(filePath); if (!file.exists()) { file.mkdirs(); } } filePath =filePath.concat(saveAsFileName); File imageFile = new File(filePath); deleteAndCopyFile(upload, imageFile); } } /** * 文件字节大小转换文字描述 * @param fileName * @return */ public static String convertfilesize(long filesize) { String strunit="bytes"; String straftercomma=""; int intdivisor=1; if(filesize>=1024*1024) { strunit = "mb"; intdivisor=1024*1024; } else if(filesize>=1024) { strunit = "kb"; intdivisor=1024; } if(intdivisor==1) return filesize + " " + strunit; straftercomma = "" + 100 * (filesize % intdivisor) / intdivisor ; if(straftercomma=="") straftercomma=".0"; return filesize / intdivisor + "." + straftercomma + " " + strunit; } }
4,496
2,591
package liquibase.command; /** * Builder for configuring {@link CommandStep} settings, such as {@link CommandArgumentDefinition}s and {@link CommandResultDefinition}s */ public class CommandBuilder { private final String[][] commandNames; /** * Creates a builder for the given command name */ public CommandBuilder(String[]... commandNames) { this.commandNames = commandNames; } /** * Starts the building of a new {@link CommandArgumentDefinition}. */ public <DataType> CommandArgumentDefinition.Building<DataType> argument(String name, Class<DataType> type) { return new CommandArgumentDefinition.Building<>(commandNames, new CommandArgumentDefinition<>(name, type)); } /** * Starts the building of a new {@link CommandResultDefinition}. */ public <DataType> CommandResultDefinition.Building<DataType> result(String name, Class<DataType> type) { return new CommandResultDefinition.Building<>(new CommandResultDefinition<>(name, type)); } }
323
2,293
<gh_stars>1000+ BANDS = ["Metallica", "Black Sabbath", "Iron Maiden"]
26
532
/******************************************************************************* TOOLS.C Author: <NAME> Date: 8-DEC-88 Copyright (c) 1992-5 MusculoGraphics, Inc. All rights reserved. Portions of this source code are copyrighted by MusculoGraphics, Inc. Description: This file contains a bunch of generic tool routines that are used in a wide range of circumstances. Routines: finddistance : computes distance between two points, spans ref frames get_path_between_frames : finds the path betwen any two ref frames find_joint_between_frames : finds the joint spanning two ref frames findvelocity : finds the velocity of a musculotendon unit evaluate : finds the current value of a reference equation (dof) error : handles error messages setgencoord : sets value of a gencoord, marks joint matrices dirty member : checks an int array for a specified member change_filename_suffix : puts a new suffix on a filename message : prints a message to stdout gencoord_in_path : checks if a gencoord is used in a chain of joints print_4x4matrix : prints a 4x4 matrix to stdout register_tool : records tool info in array when tool is created strcat3 : concatenates three strings together mstrcpy : like strcpy, but mallocs space for the copy first draw_title_area : draws the title area at the top of each tool window check_title_area : checks if user hit a button in the title area print_time : prints the current time to stdio (for benchmarking) make_time_string : converts current time into readable string convert_string : converts special chars in a string to underscores set_viewport : sets the viewport and resets MODELVIEW and PROJECTION mats *******************************************************************************/ #include "universal.h" #include <stdarg.h> #include <errno.h> #include <ctype.h> #include <io.h> #define _POSIX_ 1 #include <fcntl.h> #include <direct.h> #include "globals.h" #include "functions.h" #include "normio.h" /*************** DEFINES (for this file only) *********************************/ #define BETA_VERSION 0 /*************** STATIC GLOBAL VARIABLES (for this file only) *****************/ static int last_index = -1; char* keys[] = { "null_key","null_key","null_key","null_key","null_key","null_key","null_key","null_key", "backspace_key","tab_key","null_key","null_key","null_key","return_key","enter_key", "null_key","null_key","null_key","null_key","null_key","null_key","null_key","null_key", "null_key","null_key","null_key","null_key","escape_key","null_key","null_key", "null_key","null_key","space_key","bang_key","double_quote_key","pound_sign_key", "dollar_key","percent_key","ampersand_key","single_quote_key", "left_paren_key","right_paren_key","asterisk_key","plus_key", "comma_key","dash_key","period_key","slash_key","zero_key","one_key","two_key","three_key", "four_key","five_key","six_key","seven_key","eight_key","nine_key","colon_key","semicolon_key", "less_than_key","equals_key","greater_than_key","question_mark_key","at_sign_key", "A_key","B_key","C_key","D_key","E_key","F_key","G_key","H_key","I_key","J_key","K_key", "L_key","M_key","N_key","O_key","P_key","Q_key","R_key","S_key","T_key","U_key","V_key", "W_key","X_key","Y_key","Z_key","left_bracket_key","backslash_key", "right_bracket_key","carat_key","underscore_key","back_quote_key", "a_key","b_key","c_key","d_key","e_key","f_key","g_key","h_key","i_key", "j_key","k_key","l_key","m_key","n_key","o_key","p_key","q_key","r_key","s_key", "t_key","u_key","v_key","w_key","x_key","y_key","z_key","left_brace_key", "vertical_bar_key","right_brace_key","tilde_key","delete_key", "leftmouse_button","middlemouse_button","rightmouse_button", "mouse_motion","window_shut","window_quit","input_change","depth_change","window_thaw", "window_freeze","f1_key","f2_key","f3_key","f4_key","f5_key","f6_key","f7_key", "f8_key","f9_key","f10_key","f11_key","f12_key","left_arrow_key","up_arrow_key", "right_arrow_key","down_arrow_key","page_up_key","page_down_key","home_key", "end_key","insert_key","shift_key","control_key","alt_ket","caps_lock_key"}; /*************** GLOBAL VARIABLES (used in only a few files) ******************/ /*************** EXTERNED VARIABLES (declared in another file) ****************/ #if ! ENGINE extern HWND __mdiClientHwnd; // Windows handle to main SIMM window #endif /*************** PROTOTYPES for STATIC FUNCTIONS (for this file only) *********/ static SBoolean verify_date(int day, int month, int year); unsigned sysid(unsigned char id[16]); #if OPENSMAC #undef ENGINE #define ENGINE 1 #endif /* MAKE_STRING_LOWER_CASE: */ void make_string_lower_case(char str_buffer[]) { char c; int curpos = 0; while ((c=str_buffer[curpos++]) != STRING_TERMINATOR) if (c >= 65 && c <= 91) str_buffer[curpos-1] += 32; } /* CALC_VECTOR_LENGTH: This routine calculates the length of a vector. The * vector is defined by two points, which can be in different body segment * reference frames. */ double calc_vector_length(ModelStruct* ms, double p1[], int frame1, double p2[], int frame2) { double ans, x, p3[3]; p3[0] = p1[0]; p3[1] = p1[1]; p3[2] = p1[2]; // Convert the start point into the end point's frame. if (frame1 != frame2) convert(ms, p3, frame1, frame2); x = ((p2[0]-p3[0])*(p2[0]-p3[0]) + (p2[1]-p3[1])*(p2[1]-p3[1]) + (p2[2]-p3[2])*(p2[2]-p3[2])); ans = sqrt(x); return ans; } /* GET_PATH_BETWEEN_FRAMES: This routine returns the path between two body * segment reference frames. The path consists of the list of body segments * in between the two specified frames. */ int* get_path_between_frames(ModelStruct* ms, int frame1, int frame2) { return (ms->pathptrs[ms->numsegments*frame1+frame2]); } /* FIND_JOINT_BETWEEN_FRAMES: This routine finds the joint which spans the * two specified body segment reference frames. It also indicates whether * the joint is traversed forwards or backwards to get from the from_frame * to the to_frame. If the reference frames are not adjacent, it returns NULL. */ int find_joint_between_frames(ModelStruct* ms, int from_frame, int to_frame, Direction* dir) { int i; for (i=0; i<ms->numjoints; i++) { if (ms->joint[i].from == from_frame && ms->joint[i].to == to_frame) { *dir = FORWARD; return i; } if (ms->joint[i].from == to_frame && ms->joint[i].to == from_frame) { *dir = INVERSE; return i; } } return ZERO; } #if ! CORTEX_PLUGIN && ! OPENSMAC && ! SIMMTOOPENSIM #endif /* EVALUATE_DOF: This routine calculates the current value of a dof. It stores * the value inside the dof structure, and also returns the value. If the dof * is a constant, then the value is already stored in var->value. */ double evaluate_dof(ModelStruct* ms, DofStruct* var) { if (var->type == constant_dof) return var->value; else { var->value = interpolate_function(var->gencoord->value, var->function, zeroth, 1.0, 1.0); return var->value; } } #if ! ENGINE void deiconify_message_window () { #if ! NO_GUI int i, savedID = glutGetWindow(); for (i = 0; i < root.numwindows; i++) if (root.window[i].win_parameters->id == root.messages.window_id) break; if (i == root.numwindows) return; glutSetWindow(root.messages.window_id); if (i < root.numwindows && root.window[i].state == WINDOW_ICONIC) glutShowWindow(); else glutPopWindow(); glutSetWindow(savedID); #endif } #endif /* ENGINE */ /* ERROR: this routine prints an error message depending on a string * and error status that are passed in. */ void error(ErrorAction action, char str_buffer[]) { #if ! ENGINE && ! CORTEX_PLUGIN deiconify_message_window(); #endif if (str_buffer != NULL) message(str_buffer,HIGHLIGHT_TEXT,DEFAULT_MESSAGE_X_OFFSET); if (action == recover) message("Attempting to recover.",0,DEFAULT_MESSAGE_X_OFFSET); else if (action == abort_action) message("Action cancelled.",0,DEFAULT_MESSAGE_X_OFFSET); else if (action == exit_program) { putchar('\a'); /* was ringbell() */ fflush(stdout); message("Program terminated.",HIGHLIGHT_TEXT,DEFAULT_MESSAGE_X_OFFSET); #if ! NO_GUI fprintf(stderr,"Fatal Error.\nProgram terminated."); #endif exit(0); } } int find_next_active_field(Form* form, int current_field, TextFieldAction tfa) { int field, increment; if (tfa == goto_next_field) increment = 1; else if (tfa == goto_previous_field) increment = form->numoptions - 1; else return current_field; field = (current_field+increment) % form->numoptions; while (field != current_field) { if ((form->option[field].active == yes) && (form->option[field].editable == yes)) break; field = (field+increment) % form->numoptions; } return field; } /* SET_GENCOORD_VALUE: this is an important little routine. It should be the * ONLY way that the value of a generalized coordinate is changed. * It sets the value, and then marks some conversion matrices invalid * so that they will not be used again without being recalculated. * The matrices which use the dof in question, as stored in the jointnum[] * array for that dof, are marked. */ #if OPENSMAC int set_gencoord_value(ModelStruct* ms, GeneralizedCoord* gencoord, double value, SBoolean solveLoopsAndConstraints) { int i; SBoolean solveLoops, solveConstraints, sol; /* check whether the gencoord value has changed. If not, don't bother * updating anything. */ if ((DABS(value - gencoord->value) <= gencoord->tolerance)) return 0; if (gencoord->type == rotation_gencoord) checkGencoordRange(gencoord, &value); if (gencoord->clamped == yes) { if (value < gencoord->range.start) value = gencoord->range.start; else if (value > gencoord->range.end) value = gencoord->range.end; } /* Resolve any closed loops in the model, then update the gencoord value * (which may have been changed to close the loops). If any other * gencoord values are changed to close loops, resolveClosedLoops takes * care of changing their values. If the solver could not find a valid * solution, the gencoord is not updated and the configuration does not * change. */ if (solveLoopsAndConstraints == yes) { sol = solveLCAffectedByGC(ms, gencoord, &value); ms->constraintsOK = sol;// && ms->constraintsOK; ms->loopsOK = sol;// && ms->loopsOK; gencoord->value = value; if (sol == no) return 0; } else { /* loops and constraints are not solved, copy new value into gencoord */ gencoord->value = value; } for (i=0; i<gencoord->numjoints; i++) invalidate_joint_matrix(ms, &ms->joint[gencoord->jointnum[i]]); /* if the gencoord being changed is a translational dof, then we need to * invalidate the current bounds of the scene to prevent the model from * sliding behind the far clipping plane. -- added KMS 10/7/99 */ if (gencoord->type == translation_gencoord) ms->max_diagonal_needs_recalc = yes; return 1; } #else int set_gencoord_value(ModelStruct* ms, GeneralizedCoord* gencoord, double value, SBoolean solveLoopsAndConstraints) { int i; SBoolean sol; int genc = getGencoordIndex(ms, gencoord); #if ! OPENSMAC /* check whether the gencoord value has changed. If not, don't bother * updating anything. Also check the value in the model viewer window * to see whether that needs updating */ if ((DABS(value - gencoord->value) <= gencoord->tolerance) #if ! CORTEX_PLUGIN && (DABS(value - ms->gencslider.sl[genc].value) <= gencoord->tolerance) #endif ) { return 0; } #endif #if ! ENGINE && ! CORTEX_PLUGIN if (gencoord->type == rotation_gencoord) checkGencoordRange(gencoord, &value); ms->gencform.option[genc].use_alternate_colors = no; ms->gc_chpanel.checkbox[genc].use_alternate_colors = no; #endif if (gencoord->clamped == yes) // if ((gencoord->clamped == yes) && (solveLoopsAndConstraints == yes)) //added dkb apr 16 2003 { // if the value in the motion file for a clamped gencoord is outside the gencoord range, // set the value to the closest range point // DKB TODO: set some kind of flag, colour etc? to let user know what is happening if (value < gencoord->range.start) { #if ! CORTEX_PLUGIN //ms->gencform.option[genc].use_alternate_colors = yes; ///dkb jul 2008 ms->gc_chpanel.checkbox[genc].use_alternate_colors = yes; ///dkb jul 2008 #endif value = gencoord->range.start; } else if (value > gencoord->range.end) { #if ! CORTEX_PLUGIN // ms->gencform.option[genc].use_alternate_colors = yes; ///dkb jul 2008 ms->gc_chpanel.checkbox[genc].use_alternate_colors = yes; #endif value = gencoord->range.end; } } else { #if ! ENGINE && ! CORTEX_PLUGIN if (value < gencoord->range.start || value > gencoord->range.end) ms->gencform.option[genc].use_alternate_colors = yes; #endif } #if ! ENGINE /* Resolve any closed loops in the model, then update the gencoord value * (which may have been changed to close the loops). If any other * gencoord values are changed to close loops, resolveClosedLoops takes * care of changing their values. If the solver could not find a valid * solution, the gencoord is not updated and the configuration does not * change. */ if (solveLoopsAndConstraints == yes) { sol = solveLCAffectedByGC(ms, gencoord, &value); ms->constraintsOK = sol;// && ms->constraintsOK; ms->loopsOK = sol;// && ms->loopsOK; gencoord->value = value; if (sol == no) return 0; } else { /* loops and constraints are not solved, copy new value into gencoord */ gencoord->value = value; } #if ! CORTEX_PLUGIN if (value < gencoord->range.start || value > gencoord->range.end) ms->gencform.option[genc].use_alternate_colors = yes; ms->gencslider.sl[genc].value = value; storeDoubleInForm(&ms->gencform.option[genc], gencoord->value, 3); #endif for (i=0; i<gencoord->numjoints; i++) invalidate_joint_matrix(ms, &ms->joint[gencoord->jointnum[i]]); /* hack so that ground-reaction forces are shown only during a motion */ ms->dis.applied_motion = NULL; /* if the gencoord being changed is a translational dof, then we need to * invalidate the current bounds of the scene to prevent the model from * sliding behind the far clipping plane. -- added KMS 10/7/99 */ if (gencoord->type == translation_gencoord) ms->max_diagonal_needs_recalc = yes; #endif return 1; } #endif /* OPENSMAC */ void set_gencoord_velocity(ModelStruct* ms, GeneralizedCoord* gencoord, double value) { gencoord->velocity = value; } char* get_suffix(char str[]) { int cp = 0; cp = strlen(str) - 1; while (cp >= 0 && str[cp] != '.') cp--; if (cp == 0) return NULL; return &str[cp+1]; } /* CHANGE_FILENAME_SUFFIX: this routine changes the suffix of a file name. * It scans the name for a "." (starting from the end) and assumes that * everything after the "." is the suffix which is to be changed. Examples: * input = "foo.bar", suffix = "tree" --------> output = "foo.tree" * input = "foo.foo", suffix = "bar" ---------> output = "foo.bar" * input = "foo", suffix = "bar" -------------> output = "foo.bar" * input = "foo.bar.tree", suffix = "rock" ---> output = "foo.bar.rock" */ void change_filename_suffix(const char input[], char output[], const char suffix[], int outputSize) { int cp; cp = strlen(input) - 1; while (input[cp] != '.' && cp > 0) cp--; if (cp == 0) { if (suffix) sprintf(output, "%s.%s", input, suffix); else strcpy(output, input); } else { if (suffix) { strncpy(output, input, cp + 1); output[cp + 1] = STRING_TERMINATOR; strcat(output, suffix); } else { strncpy(output, input, cp); output[cp] = STRING_TERMINATOR; } } } #if ENGINE || CORTEX_PLUGIN #define NO_GUI 1 #endif void message(char message_str[], int format, int xoffset) { #if OPENSMAC static int overwritable = 0; if (overwritable || (format & OVERWRITE_LAST_LINE)) { add_line_to_converter_dialog(message_str, 1); } else { add_line_to_converter_dialog(message_str, 0); } if (format & OVERWRITABLE) overwritable = 1; else overwritable = 0; #elif NO_GUI printf("%s\n", message_str); fflush(stdout); #else int i, nl, winid; HelpStruct* hp; #if 0 printf("%s\n", message_str); fflush(stdout); #endif /* If there are no lines malloced in the message structure, just print * the message to stdout and return. */ if (root.messages.line == NULL) { printf("%s\n", message_str); return; } winid = glutGetWindow(); hp = &root.messages; /* If the last line in the list is overwritable, or if the incoming * line is OVERWRITE_LAST_LINE, overwrite the last line with the * incoming one. */ if (hp->num_lines > 0 && ((hp->line[hp->num_lines - 1].format & OVERWRITABLE) || (format & OVERWRITE_LAST_LINE))) { hp->num_lines--; FREE_IFNOTNULL(hp->line[hp->num_lines].text); } if (hp->num_lines < hp->num_lines_malloced) { nl = hp->num_lines; mstrcpy(&hp->line[nl].text,message_str); hp->line[nl].format = format; hp->line[nl].xoffset = xoffset; hp->num_lines++; if (hp->num_lines <= hp->lines_per_page) hp->sl.thumb_thickness = -1; else hp->sl.thumb_thickness = hp->lines_per_page* (hp->sl.shaft.y2-hp->sl.shaft.y1)/hp->num_lines; } else { FREE_IFNOTNULL(hp->line[0].text); for (i=0; i<hp->num_lines_malloced; i++) { hp->line[i].text = hp->line[i+1].text; hp->line[i].format = hp->line[i+1].format; hp->line[i].xoffset = hp->line[i+1].xoffset; } nl = hp->num_lines_malloced - 1; mstrcpy(&hp->line[nl].text,message_str); hp->line[nl].format = format; hp->line[nl].xoffset = xoffset; } hp->sl.max_value = hp->num_lines*20.0; hp->sl.value = hp->sl.min_value; hp->starting_line = HELP_WINDOW_TEXT_Y_SPACING * _MAX(0,hp->num_lines-hp->lines_per_page); draw_message_window(NULL,NULL); glutSetWindow(winid); #endif /* ! NO_GUI */ } /* ------------------------------------------------------------------------- simm_printf - this routine provides printf-style output to the simm message window. The 'hilite_text' parameter specifies whether the text is displayed normal or hilited. NOTE: this routine will buffer text until an end-of-line character is detected. This allows you to build a single line message via multiple calls to this routine. However, this means that you MUST TERMINATE EACH MESSAGE LINE WITH A '\n' CHARACTER FOR THE LINE TO BE SENT TO THE MESSAGE WINDOW. ---------------------------------------------------------------------------- */ public int simm_printf (SBoolean hilite_text, const char* format, ...) { static char sMessageBuf[CHARBUFFER]; va_list ap; int n, simmMsgFormat = 0; va_start(ap, format); n = vsprintf(msg, format, ap); va_end(ap); #if ! ENGINE && ! CORTEX_PLUGIN if (hilite_text) { simmMsgFormat += HIGHLIGHT_TEXT; deiconify_message_window(); } #endif if (strchr(msg, '\n')) { char* p = strtok(msg, "\n"); if (strlen(sMessageBuf) > 0) { if (p) strcat(sMessageBuf, p); message(sMessageBuf, simmMsgFormat, DEFAULT_MESSAGE_X_OFFSET); sMessageBuf[0] = '\0'; } else if (p) message(p, simmMsgFormat, DEFAULT_MESSAGE_X_OFFSET); if (p) for (p = strtok(NULL, "\n"); p; p = strtok(NULL, "\n")) message(p, simmMsgFormat, DEFAULT_MESSAGE_X_OFFSET); } else strcat(sMessageBuf, msg); return n; } SBoolean gencoord_in_path(ModelStruct* ms, int frame1, int frame2, GeneralizedCoord* gencoord) { int i, j, joint; int* path; path = GET_PATH(ms, frame1, frame2); for (i=0; path[i] != ms->numjoints + 1; i++) { joint = ABS(path[i]) - 1; for (j=0; j<6; j++) if (ms->joint[joint].dofs[j].type == function_dof) if (ms->joint[joint].dofs[j].gencoord == gencoord) return yes; } return no; } void print_4x4matrix(double matrix[][4]) { int i, j; for (i=0; i<4; i++) { for (j=0; j<4; j++) printf("%8.5lf ", matrix[i][j]); printf("\n"); } } #if ! ENGINE ToolStruct* register_tool(int struct_size, unsigned int event_mask, void (*event_handler)(SimmEvent), void (*command_handler)(char*), SBoolean (*query_handler)(QueryType, void*), char name[], int* ref_number) { int i; for (i=0; i<TOOLBUFFER; i++) if (tool[i].used == no) break; if (i == TOOLBUFFER) { fprintf(stderr, "ERROR: tools array overflow.\n"); return (NULL); } tool[i].used = yes; mstrcpy(&tool[i].name,name); tool[i].simm_event_mask = event_mask; tool[i].simm_event_handler = event_handler; tool[i].command_handler = command_handler; tool[i].query_handler = query_handler; tool[i].tool_struct = (void*)simm_calloc(1, struct_size); if (tool[i].tool_struct == NULL) error(exit_program,tool_message); *ref_number = root.numtools++; return (&tool[i]); } #endif void strcat3(char dest[], const char str1[], const char str2[], const char str3[], int destSize) { if (dest && str1) { (void)strcpy(dest, str1); if (str2) (void)strcat(dest, str2); if (str3) (void)strcat(dest, str3); } } #if ! MEMORY_LEAK /* MSTRCPY: this routine is like strcpy(), but it first mallocs space for * the copy of the string, and frees any space the destination pointer used * to point to. */ ReturnCode mstrcpy(char* dest_str[], const char original_str[]) { char* p; int len; #if 0 if (*dest_str == original_str) return code_fine; //FREE_IFNOTNULL(*dest_str); #endif if (original_str == NULL) { *dest_str = NULL; return code_fine; } len = STRLEN(original_str); p = (char*)simm_malloc(len * sizeof(char)); if (p == NULL) { *dest_str = NULL; return code_bad; } (void)strcpy(p, original_str); *dest_str = p; return code_fine; } #endif /* MSTRCAT: this routine is like strcat(), but it first mallocs space for * the copy of the string. */ ReturnCode mstrcat(char* old_str[], const char append_str[]) { int new_size; ReturnCode rc; new_size = strlen(*old_str) + strlen(append_str) + 1; if ((*old_str = (char*)simm_realloc(*old_str, new_size*sizeof(char), &rc)) == NULL) return code_bad; (void)strcat(*old_str, append_str); return code_fine; } #ifndef NO_GUI #if ! ENGINE void draw_title_area(WindowParams* win_params, ModelStruct* ms, PlotStruct* ps, int title_mask) { simm_color(TOOL_TITLE_AREA_BACKGROUND); glRecti(win_params->vp.x1, win_params->vp.y2-TITLE_AREA_HEIGHT, win_params->vp.x2+1, win_params->vp.y2+1); simm_color(TOOL_TITLE_AREA_BORDER); glBegin(GL_LINE_STRIP); glVertex2i(win_params->vp.x1-1, win_params->vp.y2-TITLE_AREA_HEIGHT); glVertex2i(win_params->vp.x2+1, win_params->vp.y2-TITLE_AREA_HEIGHT); glEnd(); simm_color(TOOL_TITLE_AREA_TEXT); glueSetFont(root.gfont.largefont); if (title_mask & SHOW_MODEL) { glRasterPos2i(win_params->vp.x1+15, win_params->vp.y2-23); glueDrawString("Model: "); if (ms == NULL) glueDrawString("none"); else glueDrawString(ms->name); } if (title_mask & SHOW_PLOT) { glRasterPos2i((win_params->vp.x1+win_params->vp.x2)/2-35, win_params->vp.y2-23); glueDrawString("Plot: "); if (ps == NULL) glueDrawString("none"); else glueDrawString(ps->title); } if (title_mask & SHOW_MODEL) { root.model_selector.origin.x = win_params->vp.x1+15; root.model_selector.origin.y = win_params->vp.y2-TITLE_AREA_HEIGHT+5; draw_menu(&root.model_selector); } if (title_mask & SHOW_PLOT) { root.plot_selector.origin.x = (win_params->vp.x1+win_params->vp.x2)/2 - 35; root.plot_selector.origin.y = win_params->vp.y2-TITLE_AREA_HEIGHT+5; draw_menu(&root.plot_selector); } if (title_mask & SHOW_HELP) { root.help_selector.origin.x = win_params->vp.x2-85; root.help_selector.origin.y = win_params->vp.y2-TITLE_AREA_HEIGHT+5; draw_menu(&root.help_selector); } } typedef struct { TitleAreaCBParams params; ModelStruct* model; PlotStruct* plot; int entry; title_area_cb titleAreaCB; } TitleAreaMenuCBData; static void model_menu_cb(int item, void* userData) { TitleAreaMenuCBData* data = (TitleAreaMenuCBData*) userData; int i, numstructs = 0; if (data->model && data->entry > 0) glueCheckMenuItem(root.modelmenu,data->entry,GLUE_UNCHECK); //highlight_menu_item(&root.model_selector,0,off,yes); if (item <= 0 || root.nummodels == 0) return; for (i=0; i<MODELBUFFER; i++) { if (gModel[i] == NULL) continue; if (++numstructs == item) break; } if (i == MODELBUFFER) return; data->params.struct_ptr = (void*)(gModel[i]); if (data->titleAreaCB) data->titleAreaCB(MODEL_SELECTED, &data->params); free(data); } static void plot_menu_cb(int item, void* userData) { TitleAreaMenuCBData* data = (TitleAreaMenuCBData*) userData; int i, numstructs = 0; if (data->plot != NULL && data->entry > 0) glueCheckMenuItem(root.plotmenu,data->entry+1,GLUE_UNCHECK); //highlight_menu_item(&root.plot_selector,0,off,yes); if (item <= 0) return; if (item == 1) { data->params.struct_ptr = (void*) NULL; } else { item -= 1; if (item <= 0 || root.numplots == 0) return; for (i=0; i<PLOTBUFFER; i++) { if (gPlot[i] == NULL) continue; if (++numstructs == item) break; } if (i == PLOTBUFFER) return; data->params.struct_ptr = (void*)(gPlot[i]); } if (data->titleAreaCB) data->titleAreaCB(PLOT_SELECTED, &data->params); free(data); } static TitleAreaMenuCBData* alloc_title_area_data(WindowParams* win_params, void* struct_ptr, int entry, ModelStruct* ms, PlotStruct* ps, title_area_cb titleAreaCB) { TitleAreaMenuCBData* data = NULL; if (titleAreaCB) { data = (TitleAreaMenuCBData*) simm_malloc(sizeof(TitleAreaMenuCBData)); data->params.win_params = win_params; data->params.struct_ptr = struct_ptr; data->entry = entry; data->model = ms; data->plot = ps; data->titleAreaCB= titleAreaCB; } return data; } int check_title_area(int title_mask, int mx, int my, WindowParams* win_params, void** struct_ptr, ModelStruct* ms, PlotStruct* ps, title_area_cb titleAreaCB) { int entry = 0; root.model_selector.origin.x = win_params->vp.x1+15; root.model_selector.origin.y = win_params->vp.y2-TITLE_AREA_HEIGHT+5; root.plot_selector.origin.x = (win_params->vp.x1+win_params->vp.x2)/2 - 35; root.plot_selector.origin.y = win_params->vp.y2-TITLE_AREA_HEIGHT+5; root.help_selector.origin.x = win_params->vp.x2-85; root.help_selector.origin.y = win_params->vp.y2-TITLE_AREA_HEIGHT+5; if ((title_mask & SHOW_MODEL) && (check_menu(&root.model_selector,mx,my) == 0)) { highlight_menu_item(&root.model_selector,0,on,yes); if (ms != NULL) { if ((entry = find_model_ordinal(ms->modelnum)) > 0) { glueCheckMenuItem(root.modelmenu,entry,GLUE_CHECK); } } gluePopMenu(root.modelmenu, model_menu_cb, alloc_title_area_data(win_params, *struct_ptr, entry, ms, ps, titleAreaCB)); return MODEL_SELECTED; } if ((title_mask & SHOW_PLOT) && (check_menu(&root.plot_selector,mx,my) == 0)) { highlight_menu_item(&root.plot_selector,0,on,yes); if (ps != NULL) { if ((entry = find_plot_ordinal(ps->plotnum)) > 0) { glueCheckMenuItem(root.plotmenu,entry+1,GLUE_CHECK); } } gluePopMenu(root.plotmenu, plot_menu_cb, alloc_title_area_data(win_params, *struct_ptr, entry, ms, ps, titleAreaCB)); return PLOT_SELECTED; } if ((title_mask & SHOW_HELP) && (check_menu(&root.help_selector,mx,my) == 0)) { if (titleAreaCB) { TitleAreaMenuCBData* data = alloc_title_area_data(win_params, *struct_ptr, entry, ms, ps, titleAreaCB); titleAreaCB(HELP_SELECTED, &data->params); free(data); } return HELP_SELECTED; } return NULL_SELECTED; } #endif /* ! NO_GUI */ #endif /* ENGINE */ void make_time_string(char** time_string) { time_t t = time(NULL); strftime(buffer, CHARBUFFER, "%m/%d/%Y %I:%M:%S %p", localtime(&t)); mstrcpy(time_string, buffer); } #if ! SIMMTOOPENSIM && ! OPENSMAC /* CONVERT_STRING: this routine scans a string and converts all special * characters into underscores. A special character is any character * other than a letter or number. If prependUnderscore is 'yes,' this * function will prepend an underscore character if the string starts with * a number. It is assumed that the string already has space for this extra * character. The resulting string is one token, and can therefore be used * as a variable name in SIMM-written C code. */ void convert_string(char str[], SBoolean prependUnderscore) { int i, len; len = strlen(str); for (i = 0; i < len; i++) { if (str[i] >= 97 && str[i] <= 122) /* lowercase letters */ continue; if (str[i] >= 65 && str[i] <= 90) /* uppercase letters */ continue; if (str[i] >= 48 && str[i] <= 57) /* numbers */ continue; str[i] = '_'; } /* If the first character is a number, prepend an underscore. */ if (str[0] >= 48 && str[0] <= 57) { for (i = len + 1; i > 0; i--) str[i] = str[i-1]; str[0] = '_'; } } #endif /* convertSpacesInString: this routine scans a string and converts all spaces * into underscores. */ void convertSpacesInString(char str[]) { unsigned int i; for (i = 0; i < strlen(str); i++) { if (str[i] == 32) str[i] = '_'; } } #if ! ENGINE void simm_color(int index) { glColor3fv(root.color.cmap[index].rgb); } void set_hourglass_cursor(double percent) { int index, num_cursors = 11, new_cursor = 20; GLshort junk = 0xfff; if (percent > 99.99) { index = -1; } else { index = percent*num_cursors/100.0; if (index < 0) index = 0; if (index >= num_cursors) index = num_cursors - 1; } if (index == last_index) return; if (index == -1) glutSetCursor(GLUT_CURSOR_INHERIT); else glutSetCursor(GLUT_CURSOR_WAIT); last_index = index; } #endif /* ENGINE */ /* ------------------------------------------------------------------------- simm_fopen - CodeWarrior's fopen() appears to have a bug in which the "w" and "w+" modes do not discard the previous contents of the file being opened. Therefore you should always call this routine instead of fopen() to workaround the bug. ---------------------------------------------------------------------------- */ FILE* simm_fopen (const char* name, const char* mode) { #ifdef WIN32 if (mode && mode[0] == 'w') remove(name); #endif errno = 0; return fopen(name, mode); } /* ------------------------------------------------------------------------- simm_open - CodeWarrior's open() appears to have a bug in which the O_CREAT mode does not discard the previous contents of the file being opened. Therefore you should always call this routine instead of open() to workaround the bug. ---------------------------------------------------------------------------- */ int simm_open (const char *name, int oflag, ...) { #ifdef WIN32 if ((oflag & O_CREAT) && ((oflag & O_WRONLY) || (oflag & O_TRUNC))) remove(name); #endif errno = 0; return open(name, oflag); } /* --------------------------------------------------------------------------- simm_lookup_file - this routine looks for the specified file in a list of directory paths starting with the first path and moving forward through the list until the file is found or the list is exhausted. If the file is found, it is opened using the specified 'mode', and the path+name of the file is returned in the buffer pointed to by 'pathList'. NOTE: this routine uses strtok() to parse the specified 'pathList'. strtok() will modify the pathList by inserting NUL characters. ------------------------------------------------------------------------------ */ FILE* simm_lookup_file (char* pathList, const char* fileName, const char* mode) { #define PATH_SEPERATORS ",;" char* p; if (pathList == NULL || fileName == NULL || mode == NULL) return NULL; for (p = strtok(pathList, PATH_SEPERATORS); p; p = strtok(NULL, PATH_SEPERATORS)) { FILE* f; char buf[CHARBUFFER] = ""; strcpy(buf, p); if (buf[strlen(buf) - 1] != DIR_SEP_CHAR) strcat(buf, DIR_SEP_STRING); strcat(buf, fileName); f = simm_fopen(buf, mode); if (f) { strcpy(pathList, buf); /* return path+file in 'pathList' */ return f; } } return NULL; } #if ! MEMORY_LEAK void* simm_malloc(unsigned mem_size) { void* ptr; // Temporary hack so that you don't have to check mem_size before calling // simm_malloc()-- Make sure you don't try to malloc 0 bytes because malloc // will [appropriately] return NULL. The long-term solution is to check all // places where simm_malloc() is called and be smart enough not to call // the routine if mem_size is 0. if (mem_size <= 0) mem_size = sizeof(int); ptr = malloc(mem_size); if (ptr == NULL) { // error() may need to malloc, so don't call it. //sprintf(errorbuffer,"Ran out of memory. Unable to malloc %d bytes.", //(int)mem_size); //error(none,errorbuffer); } return ptr; } void* simm_calloc(unsigned num_elements, unsigned elem_size) { void* ptr; // Temporary hack so that you don't have to check mem_size before calling // simm_calloc()-- Make sure you don't try to calloc 0 bytes because calloc // will [appropriately] return NULL. The long-term solution is to check all // places where simm_calloc() is called and be smart enough not to call // the routine if mem_size is 0. if (num_elements*elem_size <= 0) { num_elements = 1; elem_size = sizeof(int); } ptr = calloc(num_elements, elem_size); if (ptr == NULL) { sprintf(errorbuffer, "Ran out of memory. Unable to calloc %d bytes.", (int)(num_elements*elem_size)); error(none, errorbuffer); } return ptr; } void* simm_realloc(void* ptr, unsigned mem_size, ReturnCode* rc) { void* new_ptr; // Temporary hack so that you don't have to check mem_size before calling // simm_realloc()-- Make sure you don't try to realloc 0 bytes because realloc // will [appropriately] return NULL. The long-term solution is to check all // places where simm_realloc() is called and be smart enough not to call // the routine if mem_size is 0. if (mem_size <= 0) mem_size = sizeof(int); new_ptr = realloc(ptr, mem_size); if (new_ptr == NULL) { sprintf(errorbuffer, "Ran out of memory. Unable to realloc %d bytes.", (int)mem_size); *rc = code_bad; return (ptr); } *rc = code_fine; return new_ptr; } #endif char* get_drawmode_name(DrawingMode drawmode) { if (drawmode == wireframe) return "wireframe"; if (drawmode == solid_fill) return "solid_fill"; if (drawmode == flat_shading) return "flat_shading"; if (drawmode == gouraud_shading) return "gouraud_shading"; if (drawmode == outlined_polygons) return "outlined_polygons"; if (drawmode == no_surface) return "none"; if (drawmode == bounding_box) return "bounding_box"; /* this should really be an error, but return gouraud_shading * as if nothing is wrong. */ return "gouraud_shading"; } char* get_simmkey_name(int keynum) { return keys[keynum]; } #if ! ENGINE /* SET_VIEWPORT: the MODELVIEW and PROJECTION matrices should * always be reset when you change the viewport. That's why * this utility routine exists. */ void set_viewport(int x1, int y1, int xsize, int ysize) { glViewport(x1, y1, xsize, ysize); glMatrixMode(GL_PROJECTION); glLoadIdentity(); glMatrixMode(GL_MODELVIEW); glLoadIdentity(); } /* SET_ORTHO2: this utility function sets a 2D ortho, * with or without the infamous 0.5 offset. * * NOTE: I replaced the "-0.5 +0.5" approach with one I read about in * a document titled "OpenGL Correctness Tips" (from Microsoft's MSDN * online docs). This new approach involves a glTranslate by 0.375 * in the x and y direction. -- KMS 11/19/98 */ #define NEW_ORTHO_APPROACH 1 void set_ortho2o(Ortho box) { #if NEW_ORTHO_APPROACH glMatrixMode(GL_PROJECTION); glLoadIdentity(); gluOrtho2D((int) box.x1, (int) box.x2, (int) box.y1, (int) box.y2); glMatrixMode(GL_MODELVIEW); glLoadIdentity(); glTranslatef(0.375, 0.375, 0.0); #else glOrtho(box.x1-0.5, box.x2+0.5, box.y1-0.5, box.y2+0.5, -1.0, 1.0); #endif } void set_ortho2i(GLint ortho[]) { glMatrixMode(GL_PROJECTION); glLoadIdentity(); gluOrtho2D(ortho[0], ortho[2], ortho[1], ortho[3]); glMatrixMode(GL_MODELVIEW); glLoadIdentity(); glTranslatef(0.375, 0.375, 0.0); } void set_ortho2(double x1, double x2, double y1, double y2) { #if NEW_ORTHO_APPROACH glMatrixMode(GL_PROJECTION); glLoadIdentity(); gluOrtho2D((int) x1, (int) x2, (int) y1, (int) y2); glMatrixMode(GL_MODELVIEW); glLoadIdentity(); glTranslatef(0.375, 0.375, 0.0); #else glOrtho(x1-0.5, x2+0.5, y1-0.5, y2+0.5, -1.0, 1.0); #endif } /* ------------------------------------------------------------------------- get_associated_model - return the ModelStruct currently associated with the specified window. ---------------------------------------------------------------------------- */ ModelStruct* get_associated_model (int i) { ModelStruct* ms = NULL; if (i < 0) return NULL; if (root.window[i].type == SCENE) { // Assume that there is always at least one model in the scene // and return the first one. ms = root.window[i].win_struct->scene->model[0]; } else if (root.window[i].type == TOOL) { ToolStruct* tool = root.window[i].win_struct->tool; if (tool && tool->query_handler) tool->query_handler(GET_TOOL_MODEL, &ms); } else // plot window or other { /* If a plot is the topmost window there is only one model, * go ahead and return it as the 'selected' model. */ if (root.nummodels == 1) { int j; for (j=0; j<MODELBUFFER; j++) { if (gModel[j] != NULL) { ms = gModel[j]; break; } } } } return ms; } /* ------------------------------------------------------------------------- get_associated_plot - return the PlotStruct currently associated with the specified window. ---------------------------------------------------------------------------- */ PlotStruct* get_associated_plot (int i) { PlotStruct* plot = NULL; if (i < 0) return NULL; if (root.window[i].type == PLOT) { plot = root.window[i].win_struct->plot; } else if (root.window[i].type == TOOL) { ToolStruct* tool = root.window[i].win_struct->tool; if (tool && tool->query_handler) tool->query_handler(GET_TOOL_PLOT, &plot); } else if (root.window[i].type == PLOTKEY) { plot = root.window[i].win_struct->plotkey->plot; } return plot; } #endif /* ENGINE */ /* ------------------------------------------------------------------------- lookup_polyhedron - this routine tries to read the specified polyhedron by first checking the current directory, and then checking the standard SIMM directory for bones. TODO5.0: If one of the paths to search starts with a drive letter that does not exist, Vista will display an annoying error dialog (other versions of Windows quietly move on). Find some way to check for a valid drive letter before performing the action that causes the error. ---------------------------------------------------------------------------- */ FileReturnCode lookup_polyhedron(PolyhedronStruct* ph, char filename[], ModelStruct* ms) { int i; char* jointpath = NULL; char fullpath[CHARBUFFER], tmppath[CHARBUFFER]; FileReturnCode rc; /* (0) strip the joint file name from ms->jointfilename to get just * the path to the joint file. */ if (ms && ms->jointfilename) { get_pure_path_from_path(ms->jointfilename, &jointpath); } /* (1) First check the bone folder specified in the joint file. * If this is an absolute path, use it as is. If it is a relative * path, it is assumed to be relative to the joint file's folder. */ if (ms && ms->bonepathname) { if (ms->jointfilename) build_full_path(jointpath, ms->bonepathname, tmppath); else build_full_path(NULL, ms->bonepathname, tmppath); build_full_path(tmppath, filename, fullpath); rc = read_polyhedron(ph, fullpath, yes); if (rc == file_good || rc == file_bad) { FREE_IFNOTNULL(jointpath); return rc; } } /* (2) Next check the folder "bones" under the joint file's * folder (PC only). */ if (ms->jointfilename) strcat3(tmppath, jointpath, DIR_SEP_STRING, "bones", CHARBUFFER); else strcpy(tmppath, "bones"); strcat3(fullpath, tmppath, DIR_SEP_STRING, filename, CHARBUFFER); rc = read_polyhedron(ph, fullpath, yes); if (rc == file_good || rc == file_bad) { FREE_IFNOTNULL(jointpath); return rc; } /* (3) Next check the joint file's folder itself (PC only). */ if (ms->jointfilename) strcpy(tmppath, jointpath); else strcpy(tmppath, "."); FREE_IFNOTNULL(jointpath); strcat3(fullpath, tmppath, DIR_SEP_STRING, filename, CHARBUFFER); rc = read_polyhedron(ph, fullpath, yes); if (rc == file_good || rc == file_bad) return rc; #if ! ENGINE /* (4) check the global bones folder. */ build_full_path(get_bones_folder(), filename, fullpath); rc = read_polyhedron(ph, fullpath, yes); if (rc == file_good || rc == file_bad) return rc; /* (5) check the mocap bones folder. */ sprintf(fullpath, "%s%s%s%s", get_mocap_folder(), "bones", DIR_SEP_STRING, filename); rc = read_polyhedron(ph, fullpath, yes); if (rc == file_good || rc == file_bad) return rc; #endif /* You only make it to here if the file was not found in * any of the folders. */ return file_missing; } /* ------------------------------------------------------------------------- is_absolute_path - return yes if the specified path is an absolute path, otherwise return no. ---------------------------------------------------------------------------- */ public SBoolean is_absolute_path (const char* path) { if (path == NULL) return no; while (path && *path && isspace(*path)) path++; #ifdef WIN32 if (*path == '/' || *path == DIR_SEP_CHAR || strlen(path) >= 3 && path[1] == ':' && (path[2] == DIR_SEP_CHAR || path[2] == '/')) { return yes; } else return no; #else return (SBoolean) (*path == DIR_SEP_CHAR); #endif } /* ------------------------------------------------------------------------- append_if_necessary - if the specified character 'c' is not the currently the last character in the specified string 'str', then append 'c' to 'str'. ---------------------------------------------------------------------------- */ void append_if_necessary (char* str, char c) { int n = strlen(str); if (n == 0) return; if (str[n-1] != c) { str[n] = c; str[n+1] = '\0'; } } /* ------------------------------------------------------------------------- build_full_path - this routine combines a "preference path" with a "file path" that may contain absolute or relative path specifications into a full path. ---------------------------------------------------------------------------- */ void build_full_path(const char* prefPath, const char* filePath, char* fullPath) { if (fullPath == NULL) return; // Ignore any leading spaces in 'prefPath' and 'filePath'. while (prefPath && *prefPath && isspace(*prefPath)) prefPath++; while (filePath && *filePath && isspace(*filePath)) filePath++; // Copy the preference path if necessary. if (prefPath && ! is_absolute_path(filePath)) { strcpy(fullPath, prefPath); strip_trailing_white_space(fullPath); append_if_necessary(fullPath, DIR_SEP_CHAR); } else *fullPath = '\0'; // Now append the file path. if (filePath) { strcat(fullPath, filePath); strip_trailing_white_space(fullPath); } } /* ------------------------------------------------------------------------- get_filename_from_path - ---------------------------------------------------------------------------- */ const char* get_filename_from_path (const char* pathname) { char *p = NULL, *p1 = NULL, *p2 = NULL; p1 = strrchr(pathname, DIR_SEP_CHAR); p2 = strrchr(pathname, '/'); p = _MAX(p1,p2); return p ? (p+1) : pathname; } /* ------------------------------------------------------------------------- get_pure_path_from_path - this function assumes that there is a filename on the end of the path! It has to because there is no way to distinguish between a file and a folder at the end of the path. ---------------------------------------------------------------------------- */ void get_pure_path_from_path (const char* fullPath, char** purePath) { int len; char *p = NULL, *p1 = NULL, *p2 = NULL; p1 = strrchr(fullPath, DIR_SEP_CHAR); p2 = strrchr(fullPath, '/'); p = _MAX(p1,p2); if (p) { len = p - fullPath; *purePath = (char*)simm_malloc((len+1) * sizeof(char)); strncpy(*purePath, fullPath, len); (*purePath)[len] = STRING_TERMINATOR; } else // fullPath is just a filename, so purePath is empty. { mstrcpy(purePath, "."); } } /* ------------------------------------------------------------------------- upperstr - ---------------------------------------------------------------------------- */ public void upperstr (char* s) { for ( ; *s; s++) *s = toupper(*s); } /* ------------------------------------------------------------------------- lowerstr - ---------------------------------------------------------------------------- */ public void lowerstr (char* s) { for ( ; *s; s++) *s = tolower(*s); } /* --------------------------------------------------------------------------- read_double - I've found that some implementations of scanf("%lf") do not read "NaN" correctly. This routine reads any floating-point number including "NaN". -- KMS 4/21/00 NOTE: this routine was originally static to analog.c. In moving it here I had to replace the call to _read_token() with fscanf("%s"). I don't think this will have any side-effects. ------------------------------------------------------------------------------ */ public ReturnCode read_double (FILE* f, double* value) { SBoolean eof = (SBoolean) (fscanf(f, "%s", buffer) != 1); if (eof) return code_bad; lowerstr(buffer); if (STRINGS_ARE_EQUAL(buffer, "nan")) { *value = 0.0; /* ACK! can't seem to find a way to assign NAN in VC++!! */ } else if (isdigit(buffer[0]) || buffer[0] == '.' || buffer[0] == '-' || buffer[0] == '+') *value = atof(buffer); else return code_bad; return code_fine; } /* --------------------------------------------------------------------------- read_double_tab - I've found that some implementations of scanf("%lf") do not read "NaN" correctly. This routine reads any floating-point number including "NaN". -- KMS 4/21/00 This function is like read_double(), but is designed for reading tab-delimited numbers (e.g., from XLS files). In these files, two tabs in a row means that a number field is empty-- detect this, read just the first tab from the file, and return 0.0 for the number. ------------------------------------------------------------------------------ */ public ReturnCode read_double_tab(FILE* f, double* value) { SBoolean eof; char c; long position = ftell(f); /* Two tabs in a row is the only allowable way to specify an empty number field, * so read two characters from the file and see if they are both tabs. */ c = fgetc(f); if (c == '\t') { position = ftell(f); c = fgetc(f); if (c == '\t') { /* Put the second tab back and return 0.0 for the empty field. */ fseek(f, position, SEEK_SET); *value = 0.0; return code_fine; } } /* Go back to the saved position to read the number. */ fseek(f, position, SEEK_SET); eof = (SBoolean) (fscanf(f, "%s", buffer) != 1); if (eof) return code_bad; lowerstr(buffer); if (STRINGS_ARE_EQUAL(buffer, "nan")) { *value = 0.0; /* ACK! can't seem to find a way to assign NAN in VC++!! */ } else if (isdigit(buffer[0]) || buffer[0] == '.' || buffer[0] == '-' || buffer[0] == '+') *value = atof(buffer); else return code_bad; return code_fine; } int strings_equal_case_insensitive(const char str1[], const char str2[]) { char buf1[1024]; /* make the strings upper case and compare them */ strcpy(buffer, str1); _strupr(buffer); strcpy(buf1, str2); _strupr(buf1); return !strcmp(buffer, buf1); } int strings_equal_n_case_insensitive(const char str1[], const char str2[], int n) { char buf1[1024]; if ((int)strlen(str1) < n || (int)strlen(str2) < n) return 0; /* make the strings upper case and compare them */ strncpy(buffer, str1, n); buffer[n] = STRING_TERMINATOR; _strupr(buffer); strncpy(buf1, str2, n); buf1[n] = STRING_TERMINATOR; _strupr(buf1); return !strcmp(buffer, buf1); } void addNameToString(char name[], char string[], int maxStringSize) { int newLen = strlen(name) + 2; // size of name + ", " int curLen = strlen(string); /* Add the name to the string as long as there is room * for it plus ", " plus "..." */ if (curLen + newLen + 5 < maxStringSize) { /* if curLen > 1, assume there's already a name in the string */ if (curLen > 1) strcat(string, ", "); strcat(string, name); } else if (curLen + 5 < maxStringSize) { /* if there is room for "..." and the string doesn't already end with "...", add it. */ if (strcmp(&string[curLen-3], "...")) strcat(string, ", ..."); } } void simmPrintMultiLines(char string[], SBoolean hilite, int lineSize, int pixelIndent) { int simmMsgFormat = 0; int len, start = 0, end = 0; if (!string || lineSize <= 0 || pixelIndent < 0) return; #if ! ENGINE if (hilite) { simmMsgFormat += HIGHLIGHT_TEXT; deiconify_message_window(); } #endif len = strlen(string); while (end < len) { end = start + lineSize - 1; // -1 because you include the char at 'end' if (end >= len) { end = len; } else { for (; end > start; end--) { if (CHAR_IS_WHITE_SPACE(string[end])) break; } /* If end == start, there is no white space in the line, * so set end back to what it was and split a word. */ if (end == start) end = start + lineSize - 1; } strncpy(buffer, &string[start], end - start + 1); buffer[end - start + 1] = STRING_TERMINATOR; message(buffer, simmMsgFormat, pixelIndent); start = end + 1; } } #if ! OPENSIM_BUILD && ! CORTEX_PLUGIN const char* get_preference(const char name[]) { int i; for (i=0; i<root.num_preferences; i++) { if (STRINGS_ARE_EQUAL(root.preference[i].name, name)) return root.preference[i].value; } return NULL; } SBoolean is_preference_on(const char value[]) { if (value) { strcpy(buffer, value); _strupr(buffer); if (STRINGS_ARE_EQUAL(buffer, "Y") || STRINGS_ARE_EQUAL(buffer, "YES") || STRINGS_ARE_EQUAL(buffer, "TRUE") || STRINGS_ARE_EQUAL(buffer, "ON") || STRINGS_ARE_EQUAL(buffer, "1") || STRINGS_ARE_EQUAL(buffer, "")) // pre-5.0, some options were on just by being defined (with no value specified) { return yes; } } return no; } void remove_preference(const char name[]) { int i, j; for (i=0; i<root.num_preferences; i++) { if (STRINGS_ARE_EQUAL(root.preference[i].name, name)) { for (j=i; j<root.num_preferences-1; j++) { root.preference[j].name = root.preference[j+1].name; root.preference[j].value = root.preference[j+1].value; } root.num_preferences--; return; } } } static SBoolean read_preference(FILE* file, char* name, char* value) { int len; char *p = value; while (1) { if (fscanf(file, "%s", name) != 1) return no; if (name[0] != '#') // Ignore commented lines. break; else { char buf[256]; fgets(buf, sizeof(buf), file); } } read_line(file, value); _strip_outer_whitespace(value); // Remove surrounding double-quotes, if any. len = strlen(value); if (len > 1 && value[0] == '\"' && value[len-1] == '\"') { memmove(value, &value[1], len-2); value[len-2] = STRING_TERMINATOR; } return yes; } void save_preferences_file(SBoolean verbose) { int i; FILE* fp; char filename[CHARBUFFER], mess[CHARBUFFER]; strcpy(filename, get_preference("RESOURCES_FOLDER")); append_if_necessary(filename, DIR_SEP_CHAR); strcat(filename, "preferences.txt"); fp = simm_fopen(filename, "w"); if (fp == NULL) { if (verbose == yes) { (void)sprintf(mess, "Unable to save preferences file: %s", filename); error(none, mess); } } else { for (i=0; i<root.num_preferences; i++) { int len = strlen(root.preference[i].value); if (root.preference[i].value[len-1] == DIR_SEP_CHAR) // put in double quotes so acpp won't barf when reading back in fprintf(fp, "%s\t\"%s\"\n", root.preference[i].name, root.preference[i].value); else fprintf(fp, "%s\t%s\n", root.preference[i].name, root.preference[i].value); } fclose(fp); if (verbose == yes) { (void)sprintf(mess, "Saved preferences to file: %s", filename); error(none, mess); } } } #if ! ENGINE void load_preferences_file(SBoolean verbose) { FILE* fp; char filename[CHARBUFFER], buf[CHARBUFFER], mess[CHARBUFFER]; const char* tempFileName = glutGetTempFileName(".preferences"); strcpy(filename, get_preference("RESOURCES_FOLDER")); append_if_necessary(filename, DIR_SEP_CHAR); strcat(filename, "preferences.txt"); if ((fp = preprocess_file(filename, tempFileName)) == NULL) { (void)sprintf(errorbuffer, "Unable to open preferences file %s", filename); error(none, errorbuffer); return; } if (fp == NULL) { strcpy(filename, get_preference("RESOURCES_FOLDER")); append_if_necessary(filename, DIR_SEP_CHAR); strcat(filename, "preferences"); fp = simm_fopen(filename, "r"); } if (fp) { static char value[CHARBUFFER]; while (read_preference(fp, buf, value)) enter_preference(buf, value); fclose(fp); if (verbose == yes) { (void)sprintf(mess, "Read preferences file %s", filename); message(mess, 0, DEFAULT_MESSAGE_X_OFFSET); } } else { if (verbose == yes) { (void)sprintf(mess, "Unable to open preferences file: %s", filename); error(none, mess); } } } #endif /* The following utility functions and static strings are * for getting preferences that can either be defined in the * preferences file, or depend on other preferences (user choice). * For example, the bones folder can be specified in the * preferences file ("BONES_FOLDER"), but if it is not, then * it is created by adding "bones" to the resources folder * (preference = "RESOURCES_FOLDER"). When get_bones_folder() is * called, it gets/builds the appropriate value and stores it * in a static string called bones_buf. It is therefore possible * that some code may get and hold a pointer to this static * string while other code later calls get_bones_folder() and * causes the string to be overwritten with a different * value. But this is an improper use of preferences. * SIMM code should not store any preference values; it should * get a preference, use it immediately, and discard it. This * allows the user to change preferences at any time and not * have problems with stale values. */ static char bones_buf[CHARBUFFER]; const char* get_bones_folder(void) { const char* p = get_preference("BONES_FOLDER"); if (p) { strcpy(bones_buf, p); append_if_necessary(bones_buf, DIR_SEP_CHAR); } else { strcpy(bones_buf, get_preference("RESOURCES_FOLDER")); append_if_necessary(bones_buf, DIR_SEP_CHAR); strcat(bones_buf, "bones\\"); } return bones_buf; } static char help_buf[CHARBUFFER]; const char* get_help_folder(void) { const char* p = get_preference("HELP_FOLDER"); if (p) { strcpy(help_buf, p); append_if_necessary(help_buf, DIR_SEP_CHAR); } else { strcpy(help_buf, get_preference("RESOURCES_FOLDER")); append_if_necessary(help_buf, DIR_SEP_CHAR); strcat(help_buf, "help\\"); } return help_buf; } static char mocap_buf[CHARBUFFER]; const char* get_mocap_folder(void) { const char* p = get_preference("MOCAP_FOLDER"); if (p) { strcpy(mocap_buf, p); append_if_necessary(mocap_buf, DIR_SEP_CHAR); } else { strcpy(mocap_buf, get_preference("RESOURCES_FOLDER")); append_if_necessary(mocap_buf, DIR_SEP_CHAR); strcat(mocap_buf, "mocap\\"); } return mocap_buf; } static char mocap_misc_buf[CHARBUFFER]; const char* get_mocap_misc_folder(void) { const char* p = get_preference("MOCAP_MISC_FOLDER"); if (p) { strcpy(mocap_misc_buf, p); append_if_necessary(mocap_misc_buf, DIR_SEP_CHAR); } else { strcpy(mocap_misc_buf, get_mocap_folder()); append_if_necessary(mocap_misc_buf, DIR_SEP_CHAR); strcat(mocap_misc_buf, "misc\\"); } return mocap_misc_buf; } static char color_buf[CHARBUFFER]; const char* get_color_folder(void) { const char* p = get_preference("COLOR_FOLDER"); if (p) strcpy(color_buf, p); else strcpy(color_buf, get_preference("RESOURCES_FOLDER")); append_if_necessary(color_buf, DIR_SEP_CHAR); return color_buf; } #endif #if ! ENGINE /* ------------------------------------------------------------------------- lock_model - acquire the realtime mutex, but only if the model is currently receiving realtime motion data from EVaRT or from its simulation dll. ---------------------------------------------------------------------------- */ void lock_model(ModelStruct* ms) { if (is_model_realtime(ms) == rtNotConnected) return; /* If model is NULL, acquire the locks for all models. */ if (ms == NULL) { int i; for (i = 0; i < MODELBUFFER; i++) { if (gModel[i] && gModel[i]->modelLock) glutAcquireMutex(gModel[i]->modelLock); } } else { glutAcquireMutex(ms->modelLock); } } /* ------------------------------------------------------------------------- unlock_model - release the realtime mutex, but only if the model is currently receiving realtime motion data from EVaRT or from its simulation dll. ---------------------------------------------------------------------------- */ void unlock_model(ModelStruct* ms) { if (is_model_realtime(ms) == rtNotConnected) return; /* If model is NULL, release the locks for all models. */ if (ms == NULL) { int i; for (i = 0; i < MODELBUFFER; i++) { if (gModel[i] && gModel[i]->modelLock) glutReleaseMutex(gModel[i]->modelLock); } } else { glutReleaseMutex(ms->modelLock); } } PlotStruct* get_plot_by_name(const char name[]) { int i; for (i=0; i<PLOTBUFFER; i++) if (gPlot[i] && STRINGS_ARE_EQUAL(gPlot[i]->title, name)) return gPlot[i]; return NULL; } #endif /* ! ENGINE */ /* ------------------------------------------------------------------------- is_model_realtime - returns the current state of the model's realtime connection: rtMocap: connected to EVaRT rtSimulation: connected to a simulation dll rtNotConnected: not connected to either If 'ms' is NULL this function scans all of the models and returns the first realtime state that is not rtNotConnected. ---------------------------------------------------------------------------- */ RTConnection is_model_realtime(ModelStruct* ms) { if (ms == NULL) { int i; for (i = 0; i < MODELBUFFER; i++) { if (gModel[i] && gModel[i]->realtimeState != rtNotConnected) return gModel[i]->realtimeState; } return rtNotConnected; } else { return ms->realtimeState; } } int getMusclePointSegment(dpMuscleStruct *muscle, int pointIndex) { if (muscle == NULL || muscle->path == NULL) return -1; if (pointIndex < 0 || pointIndex >= muscle->path->num_orig_points) return -1; if (muscle->path->mp_orig == NULL) return -1; return muscle->path->mp_orig[pointIndex].segment; } void setMusclePointSegment(dpMuscleStruct *muscle, int pointIndex, int newSeg) { if (muscle == NULL || muscle->path == NULL) return; if (pointIndex < 0 || pointIndex >= muscle->path->num_orig_points) return; if (muscle->path->mp_orig == NULL) return; muscle->path->mp_orig[pointIndex].segment = newSeg; } int makeDir(const char aDirName[]) { #ifdef __linux__ return mkdir(aDirName,S_IRWXU); #else return _mkdir(aDirName); #endif } int find_string_in_list(const char name[], const char* string_list[], int n) { int i; for (i=0; i<n; i++) { if (STRINGS_ARE_EQUAL(name, string_list[i])) return i; } return -1; } #if ! ENGINE static char simm_clipboard[CHARBUFFER]; const char* get_clipboard_text(void) { if (OpenClipboard(__mdiClientHwnd) && IsClipboardFormatAvailable(CF_TEXT)) { HANDLE clipboard_handle = GetClipboardData(CF_TEXT); const char* clip = (const char*)GlobalLock(clipboard_handle); strncpy(simm_clipboard, clip, CHARBUFFER-1); GlobalUnlock(clipboard_handle); simm_clipboard[CHARBUFFER-1] = STRING_TERMINATOR; CloseClipboard(); } return simm_clipboard; } void set_clipboard_text(const char text[]) { if (OpenClipboard(__mdiClientHwnd) && IsClipboardFormatAvailable(CF_TEXT)) { #if 0 strcpy(simm_clipboard, text); SetClipboardData(CF_TEXT, simm_clipboard); CloseClipboard(); #else // Allocate a global memory object for the text. LPTSTR lptstrCopy; int len = strlen(text); HGLOBAL hglbCopy = GlobalAlloc(GMEM_MOVEABLE, (len+1) * sizeof(TCHAR)); if (hglbCopy == NULL) { CloseClipboard(); return; } // Lock the handle and copy the text to the buffer. strcpy(simm_clipboard, text); lptstrCopy = GlobalLock(hglbCopy); memcpy(lptstrCopy, simm_clipboard, len * sizeof(TCHAR)); lptstrCopy[len] = (TCHAR) 0; GlobalUnlock(hglbCopy); // Place the handle on the clipboard. SetClipboardData(CF_TEXT, hglbCopy); #endif } } #endif int getGencoordIndex(ModelStruct* model, GeneralizedCoord* gencoord) { int i; for (i=0; i<model->numgencoords; i++) if (model->gencoord[i] == gencoord) return i; return -1; } int getJointIndex(ModelStruct* model, JointStruct* joint) { int i; for (i=0; i<model->numjoints; i++) if (&model->joint[i] == joint) return i; return -1; } int getLigamentIndex(ModelStruct* model, char lig_name[]) { int i; for (i=0; i<model->numligaments; i++) if (STRINGS_ARE_EQUAL(lig_name, model->ligament[i].name)) return i; return -1; }
27,502
777
// Copyright 2017 The Chromium Authors. All rights reserved. // Use of this source code is governed by a BSD-style license that can be // found in the LICENSE file. package org.chromium.chrome.browser.suggestions; import org.chromium.chrome.browser.ntp.snippets.SnippetArticle; import org.chromium.content_public.browser.LoadUrlParams; /** * Interface exposing to the suggestion surface methods to navigate to other parts of the browser. */ public interface SuggestionsNavigationDelegate { /** @return Whether context menus should allow the option to open a link in incognito. */ boolean isOpenInIncognitoEnabled(); /** @return Whether context menus should allow the option to open a link in a new window. */ boolean isOpenInNewWindowEnabled(); /** Opens the bookmarks page in the current tab. */ void navigateToBookmarks(); /** Opens the Download Manager UI in the current tab. */ void navigateToDownloadManager(); /** Opens the recent tabs page in the current tab. */ void navigateToRecentTabs(); /** Opens the help page for the content suggestions in the current tab. */ void navigateToHelpPage(); /** * Opens a content suggestion and records related metrics. * @param windowOpenDisposition How to open (current tab, new tab, new window etc). * @param article The content suggestion to open. */ void openSnippet(int windowOpenDisposition, SnippetArticle article); /** Opens an url with the desired disposition. */ void openUrl(int windowOpenDisposition, LoadUrlParams loadUrlParams); }
445
675
<reponame>Texas-C/echo /* * Copyright (c) 2019-2021 Valve Corporation * Copyright (c) 2019-2021 LunarG, Inc. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. * * Author: <NAME> <<EMAIL>> * Author: <NAME> <<EMAIL>> * Author: <NAME> <<EMAIL>> */ #pragma once #include <vulkan/vulkan.h> #include <string> struct DeviceFeatures; namespace sync_utils { static constexpr VkQueueFlags kAllQueueTypes = (VK_QUEUE_GRAPHICS_BIT | VK_QUEUE_COMPUTE_BIT | VK_QUEUE_TRANSFER_BIT); VkPipelineStageFlags2KHR DisabledPipelineStages(const DeviceFeatures& features); // Expand all pipeline stage bits. If queue_flags and disabled_feature_mask is provided, the expansion of ALL_COMMANDS_BIT // and ALL_GRAPHICS_BIT will be limited to what is supported. VkPipelineStageFlags2KHR ExpandPipelineStages(VkPipelineStageFlags2KHR stage_mask, VkQueueFlags queue_flags = kAllQueueTypes, const VkPipelineStageFlags2KHR disabled_feature_mask = 0); VkAccessFlags2KHR ExpandAccessFlags(VkAccessFlags2KHR access_mask); VkAccessFlags2KHR CompatibleAccessMask(VkPipelineStageFlags2KHR stage_mask); VkPipelineStageFlags2KHR WithEarlierPipelineStages(VkPipelineStageFlags2KHR stage_mask); VkPipelineStageFlags2KHR WithLaterPipelineStages(VkPipelineStageFlags2KHR stage_mask); int GetGraphicsPipelineStageLogicalOrdinal(VkPipelineStageFlags2KHR flag); VkPipelineStageFlags2KHR GetLogicallyEarliestGraphicsPipelineStage(VkPipelineStageFlags2KHR inflags); VkPipelineStageFlags2KHR GetLogicallyLatestGraphicsPipelineStage(VkPipelineStageFlags2KHR inflags); std::string StringPipelineStageFlags(VkPipelineStageFlags2KHR mask); std::string StringAccessFlags(VkAccessFlags2KHR mask); struct ExecScopes { VkPipelineStageFlags2KHR src; VkPipelineStageFlags2KHR dst; }; ExecScopes GetGlobalStageMasks(const VkDependencyInfoKHR& dep_info); } // namespace sync_utils
892
1,808
// Utils includes #include <utils/ColorArgb.h> const ColorArgb ColorArgb::BLACK = { 255, 0, 0, 0 }; const ColorArgb ColorArgb::RED = { 255, 255, 0, 0 }; const ColorArgb ColorArgb::GREEN = { 255, 0, 255, 0 }; const ColorArgb ColorArgb::BLUE = { 255, 0, 0, 255 }; const ColorArgb ColorArgb::YELLOW = { 255, 255, 255, 0 }; const ColorArgb ColorArgb::WHITE = { 255, 255, 255, 255 };
171
844
<gh_stars>100-1000 from __future__ import absolute_import, division, print_function import numpy as np import pandas as pd from datashape.predicates import isscalar from toolz import concat, partition_all, compose from collections import Iterator, Iterable import datashape from datashape import discover from .core import NetworkDispatcher, ooc_types from .chunks import chunks, Chunks from .numpy_dtype import dshape_to_numpy from .utils import records_to_tuples from functools import partial convert = NetworkDispatcher('convert') @convert.register(np.ndarray, pd.DataFrame, cost=0.2) def dataframe_to_numpy(df, dshape=None, **kwargs): dtype = dshape_to_numpy(dshape or discover(df)) x = df.to_records(index=False) if x.dtype != dtype: x = x.astype(dtype) return x @convert.register(pd.DataFrame, np.ndarray, cost=1.0) def numpy_to_dataframe(x, dshape, **kwargs): dtype = x.dtype names = dtype.names if names is None: if dtype.kind == 'm': # pandas does not do this conversion for us but doesn't work # with non 'ns' unit timedeltas x = x.astype('m8[ns]') else: fields = dtype.fields new_dtype = [] should_astype = False for name in names: original_field_value = fields[name][0] if original_field_value.kind == 'm': # pandas does not do this conversion for us but doesn't work # with non 'ns' unit timedeltas new_dtype.append((name, 'm8[ns]')) # perform the astype at the end of the loop should_astype = True else: new_dtype.append((name, original_field_value)) if should_astype: x = x.astype(new_dtype) df = pd.DataFrame(x, columns=getattr(dshape.measure, 'names', names)) return df @convert.register(pd.Series, np.ndarray, cost=1.0) def numpy_to_series(x, **kwargs): names = x.dtype.names if names is not None: if len(names) > 1: raise ValueError('passed in an ndarray with more than 1 column') name, = names return pd.Series(x[name], name=name) return pd.Series(x) @convert.register(pd.Series, pd.DataFrame, cost=0.1) def DataFrame_to_Series(x, **kwargs): assert len(x.columns) == 1 return x[x.columns[0]] @convert.register(pd.DataFrame, pd.Series, cost=0.1) def series_to_dataframe(x, **kwargs): return x.to_frame() @convert.register(np.recarray, np.ndarray, cost=0.0) def ndarray_to_recarray(x, **kwargs): return x.view(np.recarray) @convert.register(np.ndarray, np.recarray, cost=0.0) def recarray_to_ndarray(x, **kwargs): return x.view(np.ndarray) higher_precision_freqs = frozenset(('ns', 'ps', 'fs', 'as')) @convert.register(np.ndarray, pd.Series, cost=0.1) def series_to_array(s, dshape=None, **kwargs): # if we come from a node that can't be discovered we need to discover # on s dtype = dshape_to_numpy(datashape.dshape(dshape or discover(s))) sdtype = s.dtype values = s.values # don't lose precision of datetime64 more precise than microseconds if ((issubclass(sdtype.type, np.datetime64) and np.datetime_data(sdtype)[0] in higher_precision_freqs) or s.dtype == dtype): return values try: return values.astype(dtype) except ValueError: # object series and record dshape, e.g., a frame row return values @convert.register(list, np.ndarray, cost=10.0) def numpy_to_list(x, **kwargs): dt = None if x.dtype == 'M8[ns]': dt = 'M8[us]' # lose precision when going to Python datetime if x.dtype.fields and any(x.dtype[n] == 'M8[ns]' for n in x.dtype.names): dt = [(n, 'M8[us]' if x.dtype[n] == 'M8[ns]' else x.dtype[n]) for n in x.dtype.names] if dt: return x.astype(dt).tolist() else: return x.tolist() @convert.register(np.ndarray, chunks(np.ndarray), cost=1.0) def numpy_chunks_to_numpy(c, **kwargs): return np.concatenate(list(c)) @convert.register(chunks(np.ndarray), np.ndarray, cost=0.5) def numpy_to_chunks_numpy(x, chunksize=2**20, **kwargs): return chunks(np.ndarray)( lambda: (x[i:i+chunksize] for i in range(0, x.shape[0], chunksize))) @convert.register(pd.DataFrame, chunks(pd.DataFrame), cost=1.0) def chunks_dataframe_to_dataframe(c, **kwargs): c = list(c) if not c: # empty case return pd.DataFrame(columns=kwargs.get('dshape').measure.names) else: return pd.concat(c, axis=0, ignore_index=True) @convert.register(chunks(pd.DataFrame), pd.DataFrame, cost=0.5) def dataframe_to_chunks_dataframe(x, chunksize=2**20, **kwargs): return chunks(pd.DataFrame)( lambda: (x.iloc[i:i+chunksize] for i in range(0, x.shape[0], chunksize))) def ishashable(x): try: hash(x) return True except: return False @convert.register(set, (list, tuple), cost=5.0) def iterable_to_set(x, **kwargs): if x and isinstance(x[0], (tuple, list)) and not ishashable(x): x = map(tuple, x) return set(x) @convert.register(list, (tuple, set), cost=1.0) def iterable_to_list(x, **kwargs): return list(x) @convert.register(tuple, (list, set), cost=1.0) def iterable_to_tuple(x, **kwargs): return tuple(x) def element_of(seq): """ >>> element_of([1, 2, 3]) 1 >>> element_of([[1, 2], [3, 4]]) 1 """ while isinstance(seq, list) and seq: seq = seq[0] return seq @convert.register(np.ndarray, list, cost=10.0) def list_to_numpy(seq, dshape=None, **kwargs): if isinstance(element_of(seq), dict): seq = list(records_to_tuples(dshape, seq)) if (seq and isinstance(seq[0], Iterable) and not ishashable(seq[0]) and not isscalar(dshape)): seq = list(map(tuple, seq)) return np.array(seq, dtype=dshape_to_numpy(dshape)) @convert.register(Iterator, list, cost=0.001) def list_to_iterator(L, **kwargs): return iter(L) @convert.register(list, Iterator, cost=1.0) def iterator_to_list(seq, **kwargs): return list(seq) @convert.register(Iterator, (chunks(pd.DataFrame), chunks(np.ndarray)), cost=10.0) def numpy_chunks_to_iterator(c, **kwargs): return concat(convert(Iterator, chunk, **kwargs) for chunk in c) @convert.register(chunks(np.ndarray), Iterator, cost=10.0) def iterator_to_numpy_chunks(seq, chunksize=1024, **kwargs): seq2 = partition_all(chunksize, seq) try: first, rest = next(seq2), seq2 except StopIteration: # seq is empty def _(): yield convert(np.ndarray, [], **kwargs) else: x = convert(np.ndarray, first, **kwargs) def _(): yield x for i in rest: yield convert(np.ndarray, i, **kwargs) return chunks(np.ndarray)(_) @convert.register(chunks(pd.DataFrame), Iterator, cost=10.0) def iterator_to_DataFrame_chunks(seq, chunksize=1024, **kwargs): seq2 = partition_all(chunksize, seq) add_index = kwargs.get('add_index', False) if not add_index: # Simple, we can dispatch to dask... f = lambda d: convert(pd.DataFrame, d, **kwargs) data = [partial(f, d) for d in seq2] if not data: data = [convert(pd.DataFrame, [], **kwargs)] return chunks(pd.DataFrame)(data) # TODO: Decide whether we should support the `add_index` flag at all. # If so, we need to post-process the converted DataFrame objects sequencially, # so we can't parallelize the process. try: first, rest = next(seq2), seq2 except StopIteration: def _(): yield convert(pd.DataFrame, [], **kwargs) else: df = convert(pd.DataFrame, first, **kwargs) df1, n1 = _add_index(df, 0) def _(): n = n1 yield df1 for i in rest: df = convert(pd.DataFrame, i, **kwargs) df, n = _add_index(df, n) yield df return chunks(pd.DataFrame)(_) def _add_index(df, start, _idx_type=getattr(pd, 'RangeIndex', compose(pd.Index, np.arange))): stop = start + len(df) idx = _idx_type(start=start, stop=stop) df.index = idx return df, stop @convert.register(tuple, np.record) def numpy_record_to_tuple(rec, **kwargs): return rec.tolist() @convert.register(chunks(np.ndarray), chunks(pd.DataFrame), cost=0.5) def chunked_pandas_to_chunked_numpy(c, **kwargs): return chunks(np.ndarray)(lambda: (convert(np.ndarray, chunk, **kwargs) for chunk in c)) @convert.register(chunks(pd.DataFrame), chunks(np.ndarray), cost=0.5) def chunked_numpy_to_chunked_pandas(c, **kwargs): return chunks(pd.DataFrame)(lambda: (convert(pd.DataFrame, chunk, **kwargs) for chunk in c)) @convert.register(chunks(np.ndarray), chunks(list), cost=10.0) def chunked_list_to_chunked_numpy(c, **kwargs): return chunks(np.ndarray)(lambda: (convert(np.ndarray, chunk, **kwargs) for chunk in c)) @convert.register(chunks(list), chunks(np.ndarray), cost=10.0) def chunked_numpy_to_chunked_list(c, **kwargs): return chunks(list)(lambda: (convert(list, chunk, **kwargs) for chunk in c)) @convert.register(chunks(Iterator), chunks(list), cost=0.1) def chunked_list_to_chunked_iterator(c, **kwargs): return chunks(Iterator)(c.data) @convert.register(chunks(list), chunks(Iterator), cost=0.1) def chunked_Iterator_to_chunked_list(c, **kwargs): return chunks(Iterator)(lambda: (convert(Iterator, chunk, **kwargs) for chunk in c)) @convert.register(Iterator, chunks(Iterator), cost=0.1) def chunked_iterator_to_iterator(c, **kwargs): return concat(c) ooc_types |= set([Iterator, Chunks])
4,323
892
{ "schema_version": "1.2.0", "id": "GHSA-3mg7-6jfw-73pw", "modified": "2022-05-01T18:22:53Z", "published": "2022-05-01T18:22:53Z", "aliases": [ "CVE-2007-4372" ], "details": "Unspecified vulnerability in NetWin SurgeMail 38k on Windows Server 2003 has unknown impact and remote attack vectors. NOTE: this information is based upon a vague advisory by a vulnerability information sales organization that does not coordinate with vendors or release actionable advisories. A CVE has been assigned for tracking purposes, but duplicates with other CVEs are difficult to determine.", "severity": [ ], "affected": [ ], "references": [ { "type": "ADVISORY", "url": "https://nvd.nist.gov/vuln/detail/CVE-2007-4372" }, { "type": "WEB", "url": "http://osvdb.org/46400" }, { "type": "WEB", "url": "http://wslabi.com/wabisabilabi/showBidInfo.do?code=ZD-00000078" } ], "database_specific": { "cwe_ids": [ ], "severity": "HIGH", "github_reviewed": false } }
420
713
package org.infinispan.stream.impl.intops; import java.util.stream.BaseStream; import java.util.stream.Stream; /** * Interface to signify that an {@link IntermediateOperation} is a flat map operation. This also provides proper * generics for converting a flat map as a map operation resulting in a Stream containing the proper stream * @author wburns * @since 9.0 */ public interface FlatMappingOperation<InputType, InputStream extends BaseStream<InputType, InputStream>, OutputType, OutputStream extends BaseStream<OutputType, OutputStream>> extends MappingOperation<InputType, InputStream, OutputType, OutputStream> { /** * Instead of flat mapping this returns a stream of {@link OutputStream}. * @param inputStream the stream to convert * @return the stream of streams */ Stream<OutputStream> map(InputStream inputStream); }
238
322
<filename>src/nullappender.cxx // Module: Log4CPLUS // File: nullappender.cxx // Created: 6/2003 // Author: <NAME> // // // Copyright 2003-2017 <NAME> // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. #include <log4cplus/nullappender.h> #include <log4cplus/thread/syncprims-pub-impl.h> namespace log4cplus { /////////////////////////////////////////////////////////////////////////////// // NullAppender ctors and dtor /////////////////////////////////////////////////////////////////////////////// NullAppender::NullAppender() { } NullAppender::NullAppender(const helpers::Properties& properties) : Appender(properties) { } NullAppender::~NullAppender() { destructorImpl(); } /////////////////////////////////////////////////////////////////////////////// // NullAppender public methods /////////////////////////////////////////////////////////////////////////////// void NullAppender::close() { } /////////////////////////////////////////////////////////////////////////////// // NullAppender protected methods /////////////////////////////////////////////////////////////////////////////// // This method does not need to be locked since it is called by // doAppend() which performs the locking void NullAppender::append(const spi::InternalLoggingEvent&) { } } // namespace log4cplus
457