corpus_id
stringlengths
7
12
paper_id
stringlengths
9
16
title
stringlengths
1
261
abstract
stringlengths
70
4.02k
source
stringclasses
1 value
bibtex
stringlengths
208
20.9k
citation_key
stringlengths
6
100
arxiv-665001
2410.02376
Distributed Learning with Discretely Observed Functional Data
<|reference_start|>Distributed Learning with Discretely Observed Functional Data: By selecting different filter functions, spectral algorithms can generate various regularization methods to solve statistical inverse problems within the learning-from-samples framework. This paper combines distributed spectral algorithms with Sobolev kernels to tackle the functional linear regression problem. The design and mathematical analysis of the algorithms require only that the functional covariates are observed at discrete sample points. Furthermore, the hypothesis function spaces of the algorithms are the Sobolev spaces generated by the Sobolev kernels, optimizing both approximation capability and flexibility. Through the establishment of regularity conditions for the target function and functional covariate, we derive matching upper and lower bounds for the convergence of the distributed spectral algorithms in the Sobolev norm. This demonstrates that the proposed regularity conditions are reasonable and that the convergence analysis under these conditions is tight, capturing the essential characteristics of functional linear regression. The analytical techniques and estimates developed in this paper also enhance existing results in the previous literature.<|reference_end|>
arxiv
@article{liu2024distributed, title={Distributed Learning with Discretely Observed Functional Data}, author={Jiading Liu and Lei Shi}, journal={arXiv preprint arXiv:2410.02376}, year={2024}, archivePrefix={arXiv}, eprint={2410.02376}, primaryClass={stat.ML cs.LG} }
liu2024distributed
arxiv-665002
2410.02378
Towards Comprehensive Detection of Chinese Harmful Memes
<|reference_start|>Towards Comprehensive Detection of Chinese Harmful Memes: This paper has been accepted in the NeurIPS 2024 D & B Track. Harmful memes have proliferated on the Chinese Internet, while research on detecting Chinese harmful memes significantly lags behind due to the absence of reliable datasets and effective detectors. To this end, we focus on the comprehensive detection of Chinese harmful memes. We construct ToxiCN MM, the first Chinese harmful meme dataset, which consists of 12,000 samples with fine-grained annotations for various meme types. Additionally, we propose a baseline detector, Multimodal Knowledge Enhancement (MKE), incorporating contextual information of meme content generated by the LLM to enhance the understanding of Chinese memes. During the evaluation phase, we conduct extensive quantitative experiments and qualitative analyses on multiple baselines, including LLMs and our MKE. The experimental results indicate that detecting Chinese harmful memes is challenging for existing models while demonstrating the effectiveness of MKE. The resources for this paper are available at https://github.com/DUT-lujunyu/ToxiCN_MM.<|reference_end|>
arxiv
@article{lu2024towards, title={Towards Comprehensive Detection of Chinese Harmful Memes}, author={Junyu Lu, Bo Xu, Xiaokun Zhang, Hongbo Wang, Haohao Zhu, Dongyu Zhang, Liang Yang, Hongfei Lin}, journal={arXiv preprint arXiv:2410.02378}, year={2024}, archivePrefix={arXiv}, eprint={2410.02378}, primaryClass={cs.CL cs.AI} }
lu2024towards
arxiv-665003
2410.02381
MetaMetrics: Calibrating Metrics For Generation Tasks Using Human Preferences
<|reference_start|>MetaMetrics: Calibrating Metrics For Generation Tasks Using Human Preferences: Understanding the quality of a performance evaluation metric is crucial for ensuring that model outputs align with human preferences. However, it remains unclear how well each metric captures the diverse aspects of these preferences, as metrics often excel in one particular area but not across all dimensions. To address this, it is essential to systematically calibrate metrics to specific aspects of human preference, catering to the unique characteristics of each aspect. We introduce MetaMetrics, a calibrated meta-metric designed to evaluate generation tasks across different modalities in a supervised manner. MetaMetrics optimizes the combination of existing metrics to enhance their alignment with human preferences. Our metric demonstrates flexibility and effectiveness in both language and vision downstream tasks, showing significant benefits across various multilingual and multi-domain scenarios. MetaMetrics aligns closely with human preferences and is highly extendable and easily integrable into any application. This makes MetaMetrics a powerful tool for improving the evaluation of generation tasks, ensuring that metrics are more representative of human judgment across diverse contexts.<|reference_end|>
arxiv
@article{winata2024metametrics:, title={MetaMetrics: Calibrating Metrics For Generation Tasks Using Human Preferences}, author={Genta Indra Winata, David Anugraha, Lucky Susanto, Garry Kuwanto, Derry Tanti Wijaya}, journal={arXiv preprint arXiv:2410.02381}, year={2024}, archivePrefix={arXiv}, eprint={2410.02381}, primaryClass={cs.CL cs.AI cs.CV cs.LG} }
winata2024metametrics:
arxiv-665004
2410.02384
Unveiling AI's Blind Spots: An Oracle for In-Domain, Out-of-Domain, and Adversarial Errors
<|reference_start|>Unveiling AI's Blind Spots: An Oracle for In-Domain, Out-of-Domain, and Adversarial Errors: AI models make mistakes when recognizing images-whether in-domain, out-of-domain, or adversarial. Predicting these errors is critical for improving system reliability, reducing costly mistakes, and enabling proactive corrections in real-world applications such as healthcare, finance, and autonomous systems. However, understanding what mistakes AI models make, why they occur, and how to predict them remains an open challenge. Here, we conduct comprehensive empirical evaluations using a "mentor" model-a deep neural network designed to predict another model's errors. Our findings show that the mentor model excels at learning from a mentee's mistakes on adversarial images with small perturbations and generalizes effectively to predict in-domain and out-of-domain errors of the mentee. Additionally, transformer-based mentor models excel at predicting errors across various mentee architectures. Subsequently, we draw insights from these observations and develop an "oracle" mentor model, dubbed SuperMentor, that achieves 78% accuracy in predicting errors across different error types. Our error prediction framework paves the way for future research on anticipating and correcting AI model behaviours, ultimately increasing trust in AI systems. All code, models, and data will be made publicly available.<|reference_end|>
arxiv
@article{han2024unveiling, title={Unveiling AI's Blind Spots: An Oracle for In-Domain, Out-of-Domain, and Adversarial Errors}, author={Shuangpeng Han, Mengmi Zhang}, journal={arXiv preprint arXiv:2410.02384}, year={2024}, archivePrefix={arXiv}, eprint={2410.02384}, primaryClass={cs.LG} }
han2024unveiling
arxiv-665005
2410.02385
Designing Mechanical Meta-Materials by Learning Equivariant Flows
<|reference_start|>Designing Mechanical Meta-Materials by Learning Equivariant Flows: Mechanical meta-materials are solids whose geometric structure results in exotic nonlinear behaviors that are not typically achievable via homogeneous materials. We show how to drastically expand the design space of a class of mechanical meta-materials known as cellular solids, by generalizing beyond translational symmetry. This is made possible by transforming a reference geometry according to a divergence free flow that is parameterized by a neural network and equivariant under the relevant symmetry group. We show how to construct flows equivariant to the space groups, despite the fact that these groups are not compact. Coupling this flow with a differentiable nonlinear mechanics simulator allows us to represent a much richer set of cellular solids than was previously possible. These materials can be optimized to exhibit desirable mechanical properties such as negative Poisson's ratios or to match target stress-strain curves. We validate these new designs in simulation and by fabricating real-world prototypes. We find that designs with higher-order symmetries can exhibit a wider range of behaviors.<|reference_end|>
arxiv
@article{mirramezani2024designing, title={Designing Mechanical Meta-Materials by Learning Equivariant Flows}, author={Mehran Mirramezani, Anne S. Meeussen, Katia Bertoldi, Peter Orbanz, Ryan P. Adams}, journal={arXiv preprint arXiv:2410.02385}, year={2024}, archivePrefix={arXiv}, eprint={2410.02385}, primaryClass={cs.CE} }
mirramezani2024designing
arxiv-665006
2410.02387
BiSSL: Bilevel Optimization for Self-Supervised Pre-Training and Fine-Tuning
<|reference_start|>BiSSL: Bilevel Optimization for Self-Supervised Pre-Training and Fine-Tuning: In this work, we present BiSSL, a first-of-its-kind training framework that introduces bilevel optimization to enhance the alignment between the pretext pre-training and downstream fine-tuning stages in self-supervised learning. BiSSL formulates the pretext and downstream task objectives as the lower- and upper-level objectives in a bilevel optimization problem and serves as an intermediate training stage within the self-supervised learning pipeline. By more explicitly modeling the interdependence of these training stages, BiSSL facilitates enhanced information sharing between them, ultimately leading to a backbone parameter initialization that is better suited for the downstream task. We propose a training algorithm that alternates between optimizing the two objectives defined in BiSSL. Using a ResNet-18 backbone pre-trained with SimCLR on the STL10 dataset, we demonstrate that our proposed framework consistently achieves improved or competitive classification accuracies across various downstream image classification datasets compared to the conventional self-supervised learning pipeline. Qualitative analyses of the backbone features further suggest that BiSSL enhances the alignment of downstream features in the backbone prior to fine-tuning.<|reference_end|>
arxiv
@article{zakarias2024bissl:, title={BiSSL: Bilevel Optimization for Self-Supervised Pre-Training and Fine-Tuning}, author={Gustav Wagner Zakarias, Lars Kai Hansen, Zheng-Hua Tan}, journal={arXiv preprint arXiv:2410.02387}, year={2024}, archivePrefix={arXiv}, eprint={2410.02387}, primaryClass={cs.LG cs.AI} }
zakarias2024bissl:
arxiv-665007
2410.02388
Boosting Perturbed Gradient Ascent for Last-Iterate Convergence in Games
<|reference_start|>Boosting Perturbed Gradient Ascent for Last-Iterate Convergence in Games: This paper presents a payoff perturbation technique, introducing a strong convexity to players' payoff functions in games. This technique is specifically designed for first-order methods to achieve last-iterate convergence in games where the gradient of the payoff functions is monotone in the strategy profile space, potentially containing additive noise. Although perturbation is known to facilitate the convergence of learning algorithms, the magnitude of perturbation requires careful adjustment to ensure last-iterate convergence. Previous studies have proposed a scheme in which the magnitude is determined by the distance from a periodically re-initialized anchoring or reference strategy. Building upon this, we propose Gradient Ascent with Boosting Payoff Perturbation, which incorporates a novel perturbation into the underlying payoff function, maintaining the periodically re-initializing anchoring strategy scheme. This innovation empowers us to provide faster last-iterate convergence rates against the existing payoff perturbed algorithms, even in the presence of additive noise.<|reference_end|>
arxiv
@article{abe2024boosting, title={Boosting Perturbed Gradient Ascent for Last-Iterate Convergence in Games}, author={Kenshi Abe, Mitsuki Sakamoto, Kaito Ariu, Atsushi Iwasaki}, journal={arXiv preprint arXiv:2410.02388}, year={2024}, archivePrefix={arXiv}, eprint={2410.02388}, primaryClass={cs.GT} }
abe2024boosting
arxiv-665008
2410.02389
Diffusion Meets Options: Hierarchical Generative Skill Composition for Temporally-Extended Tasks
<|reference_start|>Diffusion Meets Options: Hierarchical Generative Skill Composition for Temporally-Extended Tasks: Safe and successful deployment of robots requires not only the ability to generate complex plans but also the capacity to frequently replan and correct execution errors. This paper addresses the challenge of long-horizon trajectory planning under temporally extended objectives in a receding horizon manner. To this end, we propose DOPPLER, a data-driven hierarchical framework that generates and updates plans based on instruction specified by linear temporal logic (LTL). Our method decomposes temporal tasks into chain of options with hierarchical reinforcement learning from offline non-expert datasets. It leverages diffusion models to generate options with low-level actions. We devise a determinantal-guided posterior sampling technique during batch generation, which improves the speed and diversity of diffusion generated options, leading to more efficient querying. Experiments on robot navigation and manipulation tasks demonstrate that DOPPLER can generate sequences of trajectories that progressively satisfy the specified formulae for obstacle avoidance and sequential visitation. Demonstration videos are available online at: https://philiptheother.github.io/doppler/.<|reference_end|>
arxiv
@article{feng2024diffusion, title={Diffusion Meets Options: Hierarchical Generative Skill Composition for Temporally-Extended Tasks}, author={Zeyu Feng, Hao Luan, Kevin Yuchen Ma, Harold Soh}, journal={arXiv preprint arXiv:2410.02389}, year={2024}, archivePrefix={arXiv}, eprint={2410.02389}, primaryClass={cs.RO cs.AI cs.LG} }
feng2024diffusion
arxiv-665009
2410.02391
Optimizing MIMO Efficiency in 5G through Precoding Matrix Techniques
<|reference_start|>Optimizing MIMO Efficiency in 5G through Precoding Matrix Techniques: Multiple-Input Multiple-Output (MIMO) systems play a crucial role in fifth-generation (5G) mobile communications, primarily achieved through the utilization of precoding matrix techniques. This paper presents precoding techniques employing codebooks in downlink MIMO-5G wireless communications, aiming to enhance network performance to meet the overarching 5G objectives of increased capacity and reduced latency. We conduct a comparative analysis of various precoding techniques outlined by the 5G standard through diverse simulations across different scenarios. These simulations enable us to assess the performance of the different precoding techniques, ultimately revealing the strengths and weaknesses inherent in Type I and Type II codebooks.<|reference_end|>
arxiv
@article{díaz-ruiz2024optimizing, title={Optimizing MIMO Efficiency in 5G through Precoding Matrix Techniques}, author={Francisco D'iaz-Ruiz, Francisco J. Mart'in-Vega, Gerardo G'omez and Mari Carmen Aguayo-Torres}, journal={arXiv preprint arXiv:2410.02391}, year={2024}, archivePrefix={arXiv}, eprint={2410.02391}, primaryClass={cs.IT eess.SP math.IT} }
díaz-ruiz2024optimizing
arxiv-665010
2410.02392
MANTRA: The Manifold Triangulations Assemblage
<|reference_start|>MANTRA: The Manifold Triangulations Assemblage: The rising interest in leveraging higher-order interactions present in complex systems has led to a surge in more expressive models exploiting high-order structures in the data, especially in topological deep learning (TDL), which designs neural networks on high-order domains such as simplicial complexes. However, progress in this field is hindered by the scarcity of datasets for benchmarking these architectures. To address this gap, we introduce MANTRA, the first large-scale, diverse, and intrinsically high order dataset for benchmarking high-order models, comprising over 43,000 and 249,000 triangulations of surfaces and three-dimensional manifolds, respectively. With MANTRA, we assess several graph- and simplicial complex-based models on three topological classification tasks. We demonstrate that while simplicial complex-based neural networks generally outperform their graph-based counterparts in capturing simple topological invariants, they also struggle, suggesting a rethink of TDL. Thus, MANTRA serves as a benchmark for assessing and advancing topological methods, leading the way for more effective high-order models.<|reference_end|>
arxiv
@article{ballester2024mantra:, title={MANTRA: The Manifold Triangulations Assemblage}, author={Rub'en Ballester and Ernst R"oell and Daniel Bin Schmid and Mathieu Alain and Sergio Escalera and Carles Casacuberta and Bastian Rieck}, journal={arXiv preprint arXiv:2410.02392}, year={2024}, archivePrefix={arXiv}, eprint={2410.02392}, primaryClass={cs.LG math.AT} }
ballester2024mantra:
arxiv-665011
2410.02394
Online Multi-Label Classification under Noisy and Changing Label Distribution
<|reference_start|>Online Multi-Label Classification under Noisy and Changing Label Distribution: Multi-label data stream usually contains noisy labels in the real-world applications, namely occuring in both relevant and irrelevant labels. However, existing online multi-label classification methods are mostly limited in terms of label quality and fail to deal with the case of noisy labels. On the other hand, the ground-truth label distribution may vary with the time changing, which is hidden in the observed noisy label distribution and difficult to track, posing a major challenge for concept drift adaptation. Motivated by this, we propose an online multi-label classification algorithm under Noisy and Changing Label Distribution (NCLD). The convex objective is designed to simultaneously model the label scoring and the label ranking for high accuracy, whose robustness to NCLD benefits from three novel works: 1) The local feature graph is used to reconstruct the label scores jointly with the observed labels, and an unbiased ranking loss is derived and applied to learn reliable ranking information. 2) By detecting the difference between two adjacent chunks with the unbiased label cardinality, we identify the change in the ground-truth label distribution and reset the ranking or all information learned from the past to match the new distribution. 3) Efficient and accurate updating is achieved based on the updating rule derived from the closed-form optimal model solution. Finally, empirical experimental results validate the effectiveness of our method in classifying instances under NCLD.<|reference_end|>
arxiv
@article{zou2024online, title={Online Multi-Label Classification under Noisy and Changing Label Distribution}, author={Yizhang Zou, Xuegang Hu, Peipei Li, Jun Hu, You Wu}, journal={arXiv preprint arXiv:2410.02394}, year={2024}, archivePrefix={arXiv}, eprint={2410.02394}, primaryClass={cs.LG cs.AI} }
zou2024online
arxiv-665012
2410.02396
Parameter Competition Balancing for Model Merging
<|reference_start|>Parameter Competition Balancing for Model Merging: While fine-tuning pretrained models has become common practice, these models often underperform outside their specific domains. Recently developed model merging techniques enable the direct integration of multiple models, each fine-tuned for distinct tasks, into a single model. This strategy promotes multitasking capabilities without requiring retraining on the original datasets. However, existing methods fall short in addressing potential conflicts and complex correlations between tasks, especially in parameter-level adjustments, posing a challenge in effectively balancing parameter competition across various tasks. This paper introduces an innovative technique named PCB-Merging (Parameter Competition Balancing), a lightweight and training-free technique that adjusts the coefficients of each parameter for effective model merging. PCB-Merging employs intra-balancing to gauge parameter significance within individual tasks and inter-balancing to assess parameter similarities across different tasks. Parameters with low importance scores are dropped, and the remaining ones are rescaled to form the final merged model. We assessed our approach in diverse merging scenarios, including cross-task, cross-domain, and cross-training configurations, as well as out-of-domain generalization. The experimental results reveal that our approach achieves substantial performance enhancements across multiple modalities, domains, model sizes, number of tasks, fine-tuning forms, and large language models, outperforming existing model merging methods. The code is publicly available at: \url{https://github.com/duguodong7/pcb-merging}.<|reference_end|>
arxiv
@article{du2024parameter, title={Parameter Competition Balancing for Model Merging}, author={Guodong Du, Junlin Lee, Jing Li, Runhua Jiang, Yifei Guo, Shuyang Yu, Hanting Liu, Sim Kuan Goh, Ho-Kin Tang, Daojing He, Min Zhang}, journal={arXiv preprint arXiv:2410.02396}, year={2024}, archivePrefix={arXiv}, eprint={2410.02396}, primaryClass={cs.CV cs.AI cs.CL cs.LG} }
du2024parameter
arxiv-665013
2410.02400
An Online Feasible Point Method for Benign Generalized Nash Equilibrium Problems
<|reference_start|>An Online Feasible Point Method for Benign Generalized Nash Equilibrium Problems: We consider a repeatedly played generalized Nash equilibrium game. This induces a multi-agent online learning problem with joint constraints. An important challenge in this setting is that the feasible set for each agent depends on the simultaneous moves of the other agents and, therefore, varies over time. As a consequence, the agents face time-varying constraints, which are not adversarial but rather endogenous to the system. Prior work in this setting focused on convergence to a feasible solution in the limit via integrating the constraints in the objective as a penalty function. However, no existing work can guarantee that the constraints are satisfied for all iterations while simultaneously guaranteeing convergence to a generalized Nash equilibrium. This is a problem of fundamental theoretical interest and practical relevance. In this work, we introduce a new online feasible point method. Under the assumption that limited communication between the agents is allowed, this method guarantees feasibility. We identify the class of benign generalized Nash equilibrium problems, for which the convergence of our method to the equilibrium is guaranteed. We set this class of benign generalized Nash equilibrium games in context with existing definitions and illustrate our method with examples.<|reference_end|>
arxiv
@article{sachs2024an, title={An Online Feasible Point Method for Benign Generalized Nash Equilibrium Problems}, author={Sarah Sachs, Hedi Hadiji, Tim van Erven, Mathias Staudigl}, journal={arXiv preprint arXiv:2410.02400}, year={2024}, archivePrefix={arXiv}, eprint={2410.02400}, primaryClass={cs.LG} }
sachs2024an
arxiv-665014
2410.02401
SynCo: Synthetic Hard Negatives in Contrastive Learning for Better Unsupervised Visual Representations
<|reference_start|>SynCo: Synthetic Hard Negatives in Contrastive Learning for Better Unsupervised Visual Representations: Contrastive learning has become a dominant approach in self-supervised visual representation learning. Hard negatives - samples closely resembling the anchor - are key to enhancing learned representations' discriminative power. However, efficiently leveraging hard negatives remains challenging. We introduce SynCo (sYnthetic Negatives in Contrastive learning), a novel approach that improves model performance by generating synthetic hard negatives on the representation space. Building on the MoCo framework, SynCo introduces six strategies for creating diverse synthetic hard negatives on-the-fly with minimal computational overhead. SynCo achieves faster training and better representation learning, reaching 67.9% top-1 accuracy on ImageNet ILSVRC-201 linear evaluation after 200 pretraining epochs, surpassing MoCo's 67.5% using the same ResNet-50 encoder. It also transfers more effectively to detection tasks: on PASCAL VOC, it outperforms both the supervised baseline and MoCo with 82.6% AP; on COCO, it sets new benchmarks with 41.0% AP for bounding box detection and 35.7% AP for instance segmentation. Our synthetic hard negative generation approach significantly enhances visual representations learned through self-supervised contrastive learning. Code is available at https://github.com/giakoumoglou/synco.<|reference_end|>
arxiv
@article{giakoumoglou2024synco:, title={SynCo: Synthetic Hard Negatives in Contrastive Learning for Better Unsupervised Visual Representations}, author={Nikolaos Giakoumoglou, Tania Stathaki}, journal={arXiv preprint arXiv:2410.02401}, year={2024}, archivePrefix={arXiv}, eprint={2410.02401}, primaryClass={cs.CV cs.AI} }
giakoumoglou2024synco:
arxiv-665015
2410.02405
Cooperative Semantic Knowledge Base Update Policy for Multiple Semantic Communication Pairs
<|reference_start|>Cooperative Semantic Knowledge Base Update Policy for Multiple Semantic Communication Pairs: Semantic communication has emerged as a promising communication paradigm and there have been extensive research focusing on its applications in the increasingly prevalent multi-user scenarios. However, the knowledge discrepancy among multiple users may lead to considerable disparities in their performance. To address this challenge, this paper proposes a novel multi-pair cooperative semantic knowledge base (SKB) update policy. Specifically, for each pair endowed with SKB-enabled semantic communication, its well-understood knowledge in the local SKB is selected out and uploaded to the server to establish a global SKB, via a score-based knowledge selection scheme. The knowledge selection scheme achieves a balance between the uplink transmission overhead and the completeness of the global SKB. Then, with the assistance of the global SKB, each pair's local SKB is refined and their performance is improved. Numerical results show that the proposed cooperative SKB update policy obtains significant performance gains with minimal transmission overhead, especially for the initially poor-performing pairs.<|reference_end|>
arxiv
@article{li2024cooperative, title={Cooperative Semantic Knowledge Base Update Policy for Multiple Semantic Communication Pairs}, author={Shuling Li, Yaping Sun, Jinbei Zhang, Kechao Cai, Hao Chen, Shuguang Cui and Xiaodong Xu}, journal={arXiv preprint arXiv:2410.02405}, year={2024}, archivePrefix={arXiv}, eprint={2410.02405}, primaryClass={cs.IT math.IT} }
li2024cooperative
arxiv-665016
2410.02406
ELLMA-T: an Embodied LLM-agent for Supporting English Language Learning in Social VR
<|reference_start|>ELLMA-T: an Embodied LLM-agent for Supporting English Language Learning in Social VR: Many people struggle with learning a new language, with traditional tools falling short in providing contextualized learning tailored to each learner's needs. The recent development of large language models (LLMs) and embodied conversational agents (ECAs) in social virtual reality (VR) provide new opportunities to practice language learning in a contextualized and naturalistic way that takes into account the learner's language level and needs. To explore this opportunity, we developed ELLMA-T, an ECA that leverages an LLM (GPT-4) and situated learning framework for supporting learning English language in social VR (VRChat). Drawing on qualitative interviews (N=12), we reveal the potential of ELLMA-T to generate realistic, believable and context-specific role plays for agent-learner interaction in VR, and LLM's capability to provide initial language assessment and continuous feedback to learners. We provide five design implications for the future development of LLM-based language agents in social VR.<|reference_end|>
arxiv
@article{pan2024ellma-t:, title={ELLMA-T: an Embodied LLM-agent for Supporting English Language Learning in Social VR}, author={Mengxu Pan, Alexandra Kitson, Hongyu Wan, Mirjana Prpa}, journal={arXiv preprint arXiv:2410.02406}, year={2024}, archivePrefix={arXiv}, eprint={2410.02406}, primaryClass={cs.HC} }
pan2024ellma-t:
arxiv-665017
2410.02409
Additive word complexity and Walnut
<|reference_start|>Additive word complexity and Walnut: In combinatorics on words, a classical topic of study is the number of specific patterns appearing in infinite sequences. For instance, many works have been dedicated to studying the so-called factor complexity of infinite sequences, which gives the number of different factors (contiguous subblocks of their symbols), as well as abelian complexity, which counts factors up to a permutation of letters. In this paper, we consider the relatively unexplored concept of additive complexity, which counts the number of factors up to additive equivalence. We say that two words are additively equivalent if they have the same length and the total weight of their letters is equal. Our contribution is to expand the general knowledge of additive complexity from a theoretical point of view and consider various famous examples. We show a particular case of an analog of the long-standing conjecture on the regularity of the abelian complexity of an automatic sequence. In particular, we use the formalism of logic, and the software Walnut, to decide related properties of automatic sequences. We compare the behaviors of additive and abelian complexities, and we also consider the notion of abelian and additive powers. Along the way, we present some open questions and conjectures for future work.<|reference_end|>
arxiv
@article{popoli2024additive, title={Additive word complexity and Walnut}, author={Pierre Popoli, Jeffrey Shallit, and Manon Stipulanti}, journal={arXiv preprint arXiv:2410.02409}, year={2024}, archivePrefix={arXiv}, eprint={2410.02409}, primaryClass={math.CO cs.DM} }
popoli2024additive
arxiv-665018
2410.02415
Cellular Network Densification: a System-level Analysis with IAB, NCR and RIS
<|reference_start|>Cellular Network Densification: a System-level Analysis with IAB, NCR and RIS: As the number of user equipments increases in fifth generation (5G) and beyond, it is desired to densify the cellular network with auxiliary nodes assisting the base stations. Examples of these nodes are integrated access and backhaul (IAB) nodes, network-controlled repeaters (NCRs) and reconfigurable intelligent surfaces (RISs). In this context, this work presents a system level overview of these three nodes. Moreover, this work evaluates through simulations the impact of network planning aiming at enhancing the performance of a network used to cover an outdoor sport event. We show that, in the considered scenario, in general, IAB nodes provide an improved signal to interference-plus-noise ratio and throughput, compared to NCRs and RISs. However, there are situations where NCR outperforms IAB due to higher level of interference caused by the latter. Finally, we show that the deployment of these nodes in unmanned aerial vehicles (UAVs) also achieves performance gains due to their aerial mobility. However, UAV constraints related to aerial deployment may prevent these nodes from reaching results as good as the ones achieved by their stationary deployment.<|reference_end|>
arxiv
@article{da silva2024cellular, title={Cellular Network Densification: a System-level Analysis with IAB, NCR and RIS}, author={Gabriel C. M. da Silva, Victor F. Monteiro, Diego A. Sousa, Darlan C. Moreira, Tarcisio F. Maciel, Fco. Rafael M. Lima, Behrooz Makki}, journal={arXiv preprint arXiv:2410.02415}, year={2024}, archivePrefix={arXiv}, eprint={2410.02415}, primaryClass={eess.SY cs.NI cs.SY} }
da silva2024cellular
arxiv-665019
2410.02416
Eliminating Oversaturation and Artifacts of High Guidance Scales in Diffusion Models
<|reference_start|>Eliminating Oversaturation and Artifacts of High Guidance Scales in Diffusion Models: Classifier-free guidance (CFG) is crucial for improving both generation quality and alignment between the input condition and final output in diffusion models. While a high guidance scale is generally required to enhance these aspects, it also causes oversaturation and unrealistic artifacts. In this paper, we revisit the CFG update rule and introduce modifications to address this issue. We first decompose the update term in CFG into parallel and orthogonal components with respect to the conditional model prediction and observe that the parallel component primarily causes oversaturation, while the orthogonal component enhances image quality. Accordingly, we propose down-weighting the parallel component to achieve high-quality generations without oversaturation. Additionally, we draw a connection between CFG and gradient ascent and introduce a new rescaling and momentum method for the CFG update rule based on this insight. Our approach, termed adaptive projected guidance (APG), retains the quality-boosting advantages of CFG while enabling the use of higher guidance scales without oversaturation. APG is easy to implement and introduces practically no additional computational overhead to the sampling process. Through extensive experiments, we demonstrate that APG is compatible with various conditional diffusion models and samplers, leading to improved FID, recall, and saturation scores while maintaining precision comparable to CFG, making our method a superior plug-and-play alternative to standard classifier-free guidance.<|reference_end|>
arxiv
@article{sadat2024eliminating, title={Eliminating Oversaturation and Artifacts of High Guidance Scales in Diffusion Models}, author={Seyedmorteza Sadat, Otmar Hilliges, Romann M. Weber}, journal={arXiv preprint arXiv:2410.02416}, year={2024}, archivePrefix={arXiv}, eprint={2410.02416}, primaryClass={cs.LG cs.CV} }
sadat2024eliminating
arxiv-665020
2410.02417
MenakBERT -- Hebrew Diacriticizer
<|reference_start|>MenakBERT -- Hebrew Diacriticizer: Diacritical marks in the Hebrew language give words their vocalized form. The task of adding diacritical marks to plain Hebrew text is still dominated by a system that relies heavily on human-curated resources. Recent models trained on diacritized Hebrew texts still present a gap in performance. We use a recently developed char-based PLM to narrowly bridge this gap. Presenting MenakBERT, a character level transformer pretrained on Hebrew text and fine-tuned to produce diacritical marks for Hebrew sentences. We continue to show how finetuning a model for diacritizing transfers to a task such as part of speech tagging.<|reference_end|>
arxiv
@article{cohen2024menakbert, title={MenakBERT -- Hebrew Diacriticizer}, author={Ido Cohen, Jacob Gidron, Idan Pinto}, journal={arXiv preprint arXiv:2410.02417}, year={2024}, archivePrefix={arXiv}, eprint={2410.02417}, primaryClass={cs.CL cs.LG} }
cohen2024menakbert
arxiv-665021
2410.02420
LoGDesc: Local geometric features aggregation for robust point cloud registration
<|reference_start|>LoGDesc: Local geometric features aggregation for robust point cloud registration: This paper introduces a new hybrid descriptor for 3D point matching and point cloud registration, combining local geometrical properties and learning-based feature propagation for each point's neighborhood structure description. The proposed architecture first extracts prior geometrical information by computing each point's planarity, anisotropy, and omnivariance using a Principal Components Analysis (PCA). This prior information is completed by a descriptor based on the normal vectors estimated thanks to constructing a neighborhood based on triangles. The final geometrical descriptor is propagated between the points using local graph convolutions and attention mechanisms. The new feature extractor is evaluated on ModelNet40, Bunny Stanford dataset, KITTI and MVP (Multi-View Partial)-RG for point cloud registration and shows interesting results, particularly on noisy and low overlapping point clouds.<|reference_end|>
arxiv
@article{slimani2024logdesc:, title={LoGDesc: Local geometric features aggregation for robust point cloud registration}, author={Karim Slimani, Brahim Tamadazte and Catherine Achard}, journal={arXiv preprint arXiv:2410.02420}, year={2024}, archivePrefix={arXiv}, eprint={2410.02420}, primaryClass={cs.CV} }
slimani2024logdesc:
arxiv-665022
2410.02423
PnP-Flow: Plug-and-Play Image Restoration with Flow Matching
<|reference_start|>PnP-Flow: Plug-and-Play Image Restoration with Flow Matching: In this paper, we introduce Plug-and-Play (PnP) Flow Matching, an algorithm for solving imaging inverse problems. PnP methods leverage the strength of pre-trained denoisers, often deep neural networks, by integrating them in optimization schemes. While they achieve state-of-the-art performance on various inverse problems in imaging, PnP approaches face inherent limitations on more generative tasks like inpainting. On the other hand, generative models such as Flow Matching pushed the boundary in image sampling yet lack a clear method for efficient use in image restoration. We propose to combine the PnP framework with Flow Matching (FM) by defining a time-dependent denoiser using a pre-trained FM model. Our algorithm alternates between gradient descent steps on the data-fidelity term, reprojections onto the learned FM path, and denoising. Notably, our method is computationally efficient and memory-friendly, as it avoids backpropagation through ODEs and trace computations. We evaluate its performance on denoising, super-resolution, deblurring, and inpainting tasks, demonstrating superior results compared to existing PnP algorithms and Flow Matching based state-of-the-art methods.<|reference_end|>
arxiv
@article{martin2024pnp-flow:, title={PnP-Flow: Plug-and-Play Image Restoration with Flow Matching}, author={S'egol`ene Martin, Anne Gagneux, Paul Hagemann, Gabriele Steidl}, journal={arXiv preprint arXiv:2410.02423}, year={2024}, archivePrefix={arXiv}, eprint={2410.02423}, primaryClass={cs.CV cs.LG} }
martin2024pnp-flow:
arxiv-665023
2410.02425
LLM-Pilot: Characterize and Optimize Performance of your LLM Inference Services
<|reference_start|>LLM-Pilot: Characterize and Optimize Performance of your LLM Inference Services: As Large Language Models (LLMs) are rapidly growing in popularity, LLM inference services must be able to serve requests from thousands of users while satisfying performance requirements. The performance of an LLM inference service is largely determined by the hardware onto which it is deployed, but understanding of which hardware will deliver on performance requirements remains challenging. In this work we present LLM-Pilot - a first-of-its-kind system for characterizing and predicting performance of LLM inference services. LLM-Pilot performs benchmarking of LLM inference services, under a realistic workload, across a variety of GPUs, and optimizes the service configuration for each considered GPU to maximize performance. Finally, using this characterization data, LLM-Pilot learns a predictive model, which can be used to recommend the most cost-effective hardware for a previously unseen LLM. Compared to existing methods, LLM-Pilot can deliver on performance requirements 33% more frequently, whilst reducing costs by 60% on average.<|reference_end|>
arxiv
@article{łazuka2024llm-pilot:, title={LLM-Pilot: Characterize and Optimize Performance of your LLM Inference Services}, author={Ma{l}gorzata {L}azuka, Andreea Anghel, Thomas Parnell}, journal={arXiv preprint arXiv:2410.02425}, year={2024}, archivePrefix={arXiv}, eprint={2410.02425}, primaryClass={cs.DC cs.CL cs.LG} }
łazuka2024llm-pilot:
arxiv-665024
2410.02426
Learning the Latent Rules of a Game from Data: A Chess Story
<|reference_start|>Learning the Latent Rules of a Game from Data: A Chess Story: We demonstrate that small pretrained foundational generative language models with millions of parameters can learn the latent rules of a process from data associated with the process. Inspired by Stefan Zweig's novella "Schachnovelle," also known as "The Royal Game" in English, we show that 28M and 125M parameter pretrained foundational small language models (SLMs) can be instruction fine-tuned with 1,000-to-1,000,000 examples to learn the rules of chess, propose legal moves, and accurately solve chess problems. We also explore the impact of successive language model fine-tuning epochs on improved outcomes and demonstrate reductions in model hallucinations by increasing the number of instruction fine-tuning examples.<|reference_end|>
arxiv
@article{fauber2024learning, title={Learning the Latent Rules of a Game from Data: A Chess Story}, author={Ben Fauber}, journal={arXiv preprint arXiv:2410.02426}, year={2024}, archivePrefix={arXiv}, eprint={2410.02426}, primaryClass={cs.CL cs.AI} }
fauber2024learning
arxiv-665025
2410.02428
Collective Critics for Creative Story Generation
<|reference_start|>Collective Critics for Creative Story Generation: Generating a long story of several thousand words with narrative coherence using Large Language Models (LLMs) has been a challenging task. Previous research has addressed this challenge by proposing different frameworks that create a story plan and generate a long story based on that plan. However, these frameworks have been mainly focusing on maintaining narrative coherence in stories, often overlooking creativity in story planning and the expressiveness of the stories generated from those plans, which are desirable properties to captivate readers' interest. In this paper, we propose Collective Critics for Creative Story Generation framework (CritiCS), which is composed of plan refining stage (CrPlan) and story generation stage (CrText), to integrate a collective revision mechanism that promotes those properties into long-form story generation process. Specifically, in each stage, a group of LLM critics and one leader collaborate to incrementally refine drafts of plan and story throughout multiple rounds. Extensive human evaluation shows that the CritiCS can significantly enhance story creativity and reader engagement, while also maintaining narrative coherence. Furthermore, the design of the framework allows active participation from human writers in any role within the critique process, enabling interactive human-machine collaboration in story writing.<|reference_end|>
arxiv
@article{bae2024collective, title={Collective Critics for Creative Story Generation}, author={Minwook Bae, Hyounghun Kim}, journal={arXiv preprint arXiv:2410.02428}, year={2024}, archivePrefix={arXiv}, eprint={2410.02428}, primaryClass={cs.CL cs.AI} }
bae2024collective
arxiv-665026
2410.02429
IoT-LLM: Enhancing Real-World IoT Task Reasoning with Large Language Models
<|reference_start|>IoT-LLM: Enhancing Real-World IoT Task Reasoning with Large Language Models: Large Language Models (LLMs) have demonstrated remarkable capabilities across textual and visual domains but often generate outputs that violate physical laws, revealing a gap in their understanding of the physical world. Inspired by human cognition, where perception is fundamental to reasoning, we explore augmenting LLMs with enhanced perception abilities using Internet of Things (IoT) sensor data and pertinent knowledge for IoT task reasoning in the physical world. In this work, we systematically study LLMs capability to address real-world IoT tasks by augmenting their perception and knowledge base, and then propose a unified framework, IoT-LLM, to enhance such capability. In IoT-LLM, we customize three steps for LLMs: preprocessing IoT data into formats amenable to LLMs, activating their commonsense knowledge through chain-of-thought prompting and specialized role definitions, and expanding their understanding via IoT-oriented retrieval-augmented generation based on in-context learning. To evaluate the performance, We design a new benchmark with five real-world IoT tasks with different data types and reasoning difficulties and provide the benchmarking results on six open-source and close-source LLMs. Experimental results demonstrate the limitations of existing LLMs with naive textual inputs that cannot perform these tasks effectively. We show that IoT-LLM significantly enhances the performance of IoT tasks reasoning of LLM, such as GPT-4, achieving an average improvement of 65% across various tasks against previous methods. The results also showcase LLMs ability to comprehend IoT data and the physical law behind data by providing a reasoning process. Limitations of our work are claimed to inspire future research in this new era.<|reference_end|>
arxiv
@article{an2024iot-llm:, title={IoT-LLM: Enhancing Real-World IoT Task Reasoning with Large Language Models}, author={Tuo An, Yunjiao Zhou, Han Zou, Jianfei Yang}, journal={arXiv preprint arXiv:2410.02429}, year={2024}, archivePrefix={arXiv}, eprint={2410.02429}, primaryClass={cs.AI cs.CL} }
an2024iot-llm:
arxiv-665027
2410.02430
Predictive Attractor Models
<|reference_start|>Predictive Attractor Models: Sequential memory, the ability to form and accurately recall a sequence of events or stimuli in the correct order, is a fundamental prerequisite for biological and artificial intelligence as it underpins numerous cognitive functions (e.g., language comprehension, planning, episodic memory formation, etc.) However, existing methods of sequential memory suffer from catastrophic forgetting, limited capacity, slow iterative learning procedures, low-order Markov memory, and, most importantly, the inability to represent and generate multiple valid future possibilities stemming from the same context. Inspired by biologically plausible neuroscience theories of cognition, we propose \textit{Predictive Attractor Models (PAM)}, a novel sequence memory architecture with desirable generative properties. PAM is a streaming model that learns a sequence in an online, continuous manner by observing each input \textit{only once}. Additionally, we find that PAM avoids catastrophic forgetting by uniquely representing past context through lateral inhibition in cortical minicolumns, which prevents new memories from overwriting previously learned knowledge. PAM generates future predictions by sampling from a union set of predicted possibilities; this generative ability is realized through an attractor model trained alongside the predictor. We show that PAM is trained with local computations through Hebbian plasticity rules in a biologically plausible framework. Other desirable traits (e.g., noise tolerance, CPU-based learning, capacity scaling) are discussed throughout the paper. Our findings suggest that PAM represents a significant step forward in the pursuit of biologically plausible and computationally efficient sequential memory models, with broad implications for cognitive science and artificial intelligence research.<|reference_end|>
arxiv
@article{mounir2024predictive, title={Predictive Attractor Models}, author={Ramy Mounir and Sudeep Sarkar}, journal={arXiv preprint arXiv:2410.02430}, year={2024}, archivePrefix={arXiv}, eprint={2410.02430}, primaryClass={cs.AI cs.CV cs.LG q-bio.NC} }
mounir2024predictive
arxiv-665028
2410.02433
Better Call SAUL: Fluent and Consistent Language Model Editing with Generation Regularization
<|reference_start|>Better Call SAUL: Fluent and Consistent Language Model Editing with Generation Regularization: To ensure large language models contain up-to-date knowledge, they need to be updated regularly. However, model editing is challenging as it might also affect knowledge that is unrelated to the new data. State-of-the-art methods identify parameters associated with specific knowledge and then modify them via direct weight updates. However, these locate-and-edit methods suffer from heavy computational overhead and lack theoretical validation. In contrast, directly fine-tuning the model on requested edits affects the model's behavior on unrelated knowledge, and significantly damages the model's generation fluency and consistency. To address these challenges, we propose SAUL, a streamlined model editing method that uses sentence concatenation with augmented random facts for generation regularization. Evaluations on three model editing benchmarks show that SAUL is a practical and reliable solution for model editing outperforming state-of-the-art methods while maintaining generation quality and reducing computational overhead.<|reference_end|>
arxiv
@article{wang2024better, title={Better Call SAUL: Fluent and Consistent Language Model Editing with Generation Regularization}, author={Mingyang Wang, Lukas Lange, Heike Adel, Jannik Str"otgen, Hinrich Sch"utze}, journal={arXiv preprint arXiv:2410.02433}, year={2024}, archivePrefix={arXiv}, eprint={2410.02433}, primaryClass={cs.CL cs.LG} }
wang2024better
arxiv-665029
2410.02434
Load Balancing-based Topology Adaptation for Integrated Access and Backhaul Networks
<|reference_start|>Load Balancing-based Topology Adaptation for Integrated Access and Backhaul Networks: Integrated access and backhaul (IAB) technology is a flexible solution for network densification. IAB nodes can also be deployed in moving nodes such as buses and trains, i.e., mobile IAB (mIAB). As mIAB nodes can move around the coverage area, the connection between mIAB nodes and their parent macro base stations (BSs), IAB donor, is sometimes required to change in order to keep an acceptable backhaul link, the so called topology adaptation (TA). The change from one IAB donor to another may strongly impact the system load distribution, possibly causing unsatisfactory backhaul service due to the lack of radio resources. Based on this, TA should consider both backhaul link quality and traffic load. In this work, we propose a load balancing algorithm based on TA for IAB networks, and compare it with an approach in which TA is triggered based on reference signal received power (RSRP) only. The results show that our proposed algorithm improves the passengers worst connections throughput in uplink (UL) and, more modestly, also in downlink (DL), without impairing the pedestrian quality of service (QoS) significantly.<|reference_end|>
arxiv
@article{paiva2024load, title={Load Balancing-based Topology Adaptation for Integrated Access and Backhaul Networks}, author={Raul Victor de O. Paiva, Fco. Italo G. Carvalho, Fco. Rafael M. Lima, Victor F. Monteiro, Diego A. Sousa, Darlan C. Moreira, Tarcisio F. Maciel, Behrooz Makki}, journal={arXiv preprint arXiv:2410.02434}, year={2024}, archivePrefix={arXiv}, eprint={2410.02434}, primaryClass={cs.NI cs.SY eess.SY} }
paiva2024load
arxiv-665030
2410.02438
Learning K-U-Net with constant complexity: An Application to time series forecasting
<|reference_start|>Learning K-U-Net with constant complexity: An Application to time series forecasting: Training deep models for time series forecasting is a critical task with an inherent challenge of time complexity. While current methods generally ensure linear time complexity, our observations on temporal redundancy show that high-level features are learned 98.44\% slower than low-level features. To address this issue, we introduce a new exponentially weighted stochastic gradient descent algorithm designed to achieve constant time complexity in deep learning models. We prove that the theoretical complexity of this learning method is constant. Evaluation of this method on Kernel U-Net (K-U-Net) on synthetic datasets shows a significant reduction in complexity while improving the accuracy of the test set.<|reference_end|>
arxiv
@article{you2024learning, title={Learning K-U-Net with constant complexity: An Application to time series forecasting}, author={Jiang You, Arben Cela, Ren'e Natowicz, Jacob Ouanounou, Patrick Siarry}, journal={arXiv preprint arXiv:2410.02438}, year={2024}, archivePrefix={arXiv}, eprint={2410.02438}, primaryClass={cs.LG} }
you2024learning
arxiv-665031
2410.02440
Optimizing Adaptive Attacks against Content Watermarks for Language Models
<|reference_start|>Optimizing Adaptive Attacks against Content Watermarks for Language Models: Large Language Models (LLMs) can be \emph{misused} to spread online spam and misinformation. Content watermarking deters misuse by hiding a message in model-generated outputs, enabling their detection using a secret watermarking key. Robustness is a core security property, stating that evading detection requires (significant) degradation of the content's quality. Many LLM watermarking methods have been proposed, but robustness is tested only against \emph{non-adaptive} attackers who lack knowledge of the watermarking method and can find only suboptimal attacks. We formulate the robustness of LLM watermarking as an objective function and propose preference-based optimization to tune \emph{adaptive} attacks against the specific watermarking method. Our evaluation shows that (i) adaptive attacks substantially outperform non-adaptive baselines. (ii) Even in a non-adaptive setting, adaptive attacks optimized against a few known watermarks remain highly effective when tested against other unseen watermarks, and (iii) optimization-based attacks are practical and require less than seven GPU hours. Our findings underscore the need to test robustness against adaptive attackers.<|reference_end|>
arxiv
@article{diaa2024optimizing, title={Optimizing Adaptive Attacks against Content Watermarks for Language Models}, author={Abdulrahman Diaa, Toluwani Aremu, Nils Lukas}, journal={arXiv preprint arXiv:2410.02440}, year={2024}, archivePrefix={arXiv}, eprint={2410.02440}, primaryClass={cs.CR cs.AI} }
diaa2024optimizing
arxiv-665032
2410.02441
Embedded Topic Models Enhanced by Wikification
<|reference_start|>Embedded Topic Models Enhanced by Wikification: Topic modeling analyzes a collection of documents to learn meaningful patterns of words. However, previous topic models consider only the spelling of words and do not take into consideration the homography of words. In this study, we incorporate the Wikipedia knowledge into a neural topic model to make it aware of named entities. We evaluate our method on two datasets, 1) news articles of \textit{New York Times} and 2) the AIDA-CoNLL dataset. Our experiments show that our method improves the performance of neural topic models in generalizability. Moreover, we analyze frequent terms in each topic and the temporal dependencies between topics to demonstrate that our entity-aware topic models can capture the time-series development of topics well.<|reference_end|>
arxiv
@article{shibuya2024embedded, title={Embedded Topic Models Enhanced by Wikification}, author={Takashi Shibuya, Takehito Utsuro}, journal={arXiv preprint arXiv:2410.02441}, year={2024}, archivePrefix={arXiv}, eprint={2410.02441}, primaryClass={cs.CL} }
shibuya2024embedded
arxiv-665033
2410.02442
Towards a Self-rescuing System for UAVs Under GNSS Attack
<|reference_start|>Towards a Self-rescuing System for UAVs Under GNSS Attack: There has been substantial growth in the UAV market along with an expansion in their applications. However, the successful execution of a UAV mission is very often dependent on the use of a GNSS. Unfortunately, the vulnerability of GNSS signals, due to their lack of encryption and authentication, poses a significant cybersecurity issue. This vulnerability makes various attacks, particularly the "GNSS spoofing attack," and "GNSS jamming attack" easily executable. Generally speaking, during this attack, the drone is manipulated into altering its path, usually resulting in an immediate forced landing or crash. As far as we know, we are the first to propose a lightweight-solution that enable a drone to autonomously rescue itself, assuming it is under GNSS attack and the GNSS is no longer available, and return safely to its initial takeoff position, thereby preventing any potential crashes. During the flight, wind plays a critical role as it can instantaneously alter the drone's position. To solve this problem, we have devised a highly effective 2-phases solution: (i) Forward Phase, for monitoring and recording the forward journey, and (ii) Backward Phase, that generates a backward route, based on the Forward Phase and wind presence. The final solution ensures strong performance in consistently returning the drone to the original position, even in wind situations, while maintaining a very fast computation time.<|reference_end|>
arxiv
@article{rigoni2024towards, title={Towards a Self-rescuing System for UAVs Under GNSS Attack}, author={Giulio Rigoni, Nicola Scremin, Mauro Conti}, journal={arXiv preprint arXiv:2410.02442}, year={2024}, archivePrefix={arXiv}, eprint={2410.02442}, primaryClass={cs.CR} }
rigoni2024towards
arxiv-665034
2410.02443
Clinnova Federated Learning Proof of Concept: Key Takeaways from a Cross-border Collaboration
<|reference_start|>Clinnova Federated Learning Proof of Concept: Key Takeaways from a Cross-border Collaboration: Clinnova, a collaborative initiative involving France, Germany, Switzerland, and Luxembourg, is dedicated to unlocking the power of precision medicine through data federation, standardization, and interoperability. This European Greater Region initiative seeks to create an interoperable European standard using artificial intelligence (AI) and data science to enhance healthcare outcomes and efficiency. Key components include multidisciplinary research centers, a federated biobanking strategy, a digital health innovation platform, and a federated AI strategy. It targets inflammatory bowel disease, rheumatoid diseases, and multiple sclerosis (MS), emphasizing data quality to develop AI algorithms for personalized treatment and translational research. The IHU Strasbourg (Institute of Minimal-invasive Surgery) has the lead in this initiative to develop the federated learning (FL) proof of concept (POC) that will serve as a foundation for advancing AI in healthcare. At its core, Clinnova-MS aims to enhance MS patient care by using FL to develop more accurate models that detect disease progression, guide interventions, and validate digital biomarkers across multiple sites. This technical report presents insights and key takeaways from the first cross-border federated POC on MS segmentation of MRI images within the Clinnova framework. While our work marks a significant milestone in advancing MS segmentation through cross-border collaboration, it also underscores the importance of addressing technical, logistical, and ethical considerations to realize the full potential of FL in healthcare settings.<|reference_end|>
arxiv
@article{alekseenko2024clinnova, title={Clinnova Federated Learning Proof of Concept: Key Takeaways from a Cross-border Collaboration}, author={Julia Alekseenko, Bram Stieltjes, Michael Bach, Melanie Boerries, Oliver Opitz, Alexandros Karargyris, Nicolas Padoy}, journal={arXiv preprint arXiv:2410.02443}, year={2024}, archivePrefix={arXiv}, eprint={2410.02443}, primaryClass={cs.CV cs.AI cs.LG} }
alekseenko2024clinnova
arxiv-665035
2410.02449
A fast algorithm for computing a planar support for non-piercing rectangles
<|reference_start|>A fast algorithm for computing a planar support for non-piercing rectangles: For a hypergraph $\mathcal{H}=(X,\mathcal{E})$ a \emph{support} is a graph $G$ on $X$ such that for each $E\in\mathcal{E}$, the induced subgraph of $G$ on the elements in $E$ is connected. If $G$ is planar, we call it a planar support. A set of axis parallel rectangles $\mathcal{R}$ forms a non-piercing family if for any $R_1, R_2 \in \mathcal{R}$, $R_1 \setminus R_2$ is connected. Given a set $P$ of $n$ points in $\mathbb{R}^2$ and a set $\mathcal{R}$ of $m$ \emph{non-piercing} axis-aligned rectangles, we give an algorithm for computing a planar support for the hypergraph $(P,\mathcal{R})$ in $O(n\log^2 n + (n+m)\log m)$ time, where each $R\in\mathcal{R}$ defines a hyperedge consisting of all points of $P$ contained in~$R$. We use this result to show that if for a family of axis-parallel rectangles, any point in the plane is contained in at most $k$ pairwise \emph{crossing} rectangles (a pair of intersecting rectangles such that neither contains a corner of the other is called a crossing pair of rectangles), then we can obtain a support as the union of $k$ planar graphs.<|reference_end|>
arxiv
@article{pal2024a, title={A fast algorithm for computing a planar support for non-piercing rectangles}, author={Ambar Pal, Rajiv Raman, Saurabh Ray, Karamjeet Singh}, journal={arXiv preprint arXiv:2410.02449}, year={2024}, archivePrefix={arXiv}, eprint={2410.02449}, primaryClass={cs.CG} }
pal2024a
arxiv-665036
2410.02450
Personalized Federated Learning for Generative AI-Assisted Semantic Communications
<|reference_start|>Personalized Federated Learning for Generative AI-Assisted Semantic Communications: Semantic Communication (SC) focuses on transmitting only the semantic information rather than the raw data. This approach offers an efficient solution to the issue of spectrum resource utilization caused by the various intelligent applications on Mobile Users (MUs). Generative Artificial Intelligence (GAI) models have recently exhibited remarkable content generation and signal processing capabilities, presenting new opportunities for enhancing SC. Therefore, we propose a GAI-assisted SC (GSC) model deployed between MUs and the Base Station (BS). Then, to train the GSC model using the local data of MUs while ensuring privacy and accommodating heterogeneous requirements of MUs, we introduce Personalized Semantic Federated Learning (PSFL). This approach incorporates a novel Personalized Local Distillation (PLD) and Adaptive Global Pruning (AGP). In PLD, each MU selects a personalized GSC model as a mentor tailored to its local resources and a unified Convolutional Neural Networks (CNN)-based SC (CSC) model as a student. This mentor model is then distilled into the student model for global aggregation. In AGP, we perform network pruning on the aggregated global model according to real-time communication environments, reducing communication energy. Finally, numerical results demonstrate the feasibility and efficiency of the proposed PSFL scheme.<|reference_end|>
arxiv
@article{peng2024personalized, title={Personalized Federated Learning for Generative AI-Assisted Semantic Communications}, author={Yubo Peng, Feibo Jiang, Li Dong, Kezhi Wang, Kun Yang}, journal={arXiv preprint arXiv:2410.02450}, year={2024}, archivePrefix={arXiv}, eprint={2410.02450}, primaryClass={cs.LG cs.DC cs.IT math.IT} }
peng2024personalized
arxiv-665037
2410.02451
Strong Preferences Affect the Robustness of Value Alignment
<|reference_start|>Strong Preferences Affect the Robustness of Value Alignment: Value alignment, which aims to ensure that large language models (LLMs) and other AI agents behave in accordance with human values, is critical for ensuring safety and trustworthiness of these systems. A key component of value alignment is the modeling of human preferences as a representation of human values. In this paper, we investigate the robustness of value alignment by examining the sensitivity of preference models. Specifically, we ask: how do changes in the probabilities of some preferences affect the predictions of these models for other preferences? To answer this question, we theoretically analyze the robustness of widely used preference models by examining their sensitivities to minor changes in preferences they model. Our findings reveal that, in the Bradley-Terry and the Placket-Luce model, the probability of a preference can change significantly as other preferences change, especially when these preferences are dominant (i.e., with probabilities near 0 or 1). We identify specific conditions where this sensitivity becomes significant for these models and discuss the practical implications for the robustness and safety of value alignment in AI systems.<|reference_end|>
arxiv
@article{xu2024strong, title={Strong Preferences Affect the Robustness of Value Alignment}, author={Ziwei Xu, Mohan Kankanhalli}, journal={arXiv preprint arXiv:2410.02451}, year={2024}, archivePrefix={arXiv}, eprint={2410.02451}, primaryClass={cs.AI} }
xu2024strong
arxiv-665038
2410.02453
Quantifying User Coherence: A Unified Framework for Cross-Domain Recommendation Analysis
<|reference_start|>Quantifying User Coherence: A Unified Framework for Cross-Domain Recommendation Analysis: The effectiveness of Recommender Systems (RS) is closely tied to the quality and distinctiveness of user profiles, yet despite many advancements in raw performance, the sensitivity of RS to user profile quality remains under-researched. This paper introduces novel information-theoretic measures for understanding recommender systems: a "surprise" measure quantifying users' deviations from popular choices, and a "conditional surprise" measure capturing user interaction coherence. We evaluate 7 recommendation algorithms across 9 datasets, revealing the relationships between our measures and standard performance metrics. Using a rigorous statistical framework, our analysis quantifies how much user profile density and information measures impact algorithm performance across domains. By segmenting users based on these measures, we achieve improved performance with reduced data and show that simpler algorithms can match complex ones for low-coherence users. Additionally, we employ our measures to analyze how well different recommendation algorithms maintain the coherence and diversity of user preferences in their predictions, providing insights into algorithm behavior. This work advances the theoretical understanding of user behavior and practical heuristics for personalized recommendation systems, promoting more efficient and adaptive architectures.<|reference_end|>
arxiv
@article{soumm2024quantifying, title={Quantifying User Coherence: A Unified Framework for Cross-Domain Recommendation Analysis}, author={Micha"el Soumm, Alexandre Fournier-Montgieux, Adrian Popescu, Bertrand Delezoide}, journal={arXiv preprint arXiv:2410.02453}, year={2024}, archivePrefix={arXiv}, eprint={2410.02453}, primaryClass={cs.IR cs.LG} }
soumm2024quantifying
arxiv-665039
2410.02454
Aggregation of Constrained Crowd Opinions for Urban Planning
<|reference_start|>Aggregation of Constrained Crowd Opinions for Urban Planning: Collective decision making is often a customary action taken in government crowdsourcing. Through ensemble of opinions (popularly known as judgment analysis), governments can satisfy majority of the people who provided opinions. This has various real-world applications like urban planning or participatory budgeting that require setting up {\em facilities} based on the opinions of citizens. Recently, there is an emerging interest in performing judgment analysis on opinions that are constrained. We consider a new dimension of this problem that accommodate background constraints in the problem of judgment analysis, which ensures the collection of more responsible opinions. The background constraints refer to the restrictions (with respect to the existing infrastructure) to be taken care of while performing the consensus of opinions. In this paper, we address the said kind of problems with efficient unsupervised approaches of learning suitably modified to cater to the constraints of urban planning. We demonstrate the effectiveness of this approach in various scenarios where the opinions are taken for setting up ATM counters and sewage lines. Our main contributions encompass a novel approach of collecting data for smart city planning (in the presence of constraints), development of methods for opinion aggregation in various formats. As a whole, we present a new dimension of judgment analysis by adding background constraints to the problem.<|reference_end|>
arxiv
@article{das2024aggregation, title={Aggregation of Constrained Crowd Opinions for Urban Planning}, author={Akanksha Das, Jyoti Patel and Malay Bhattacharyya}, journal={arXiv preprint arXiv:2410.02454}, year={2024}, archivePrefix={arXiv}, eprint={2410.02454}, primaryClass={cs.HC} }
das2024aggregation
arxiv-665040
2410.02456
Recurrent Few-Shot model for Document Verification
<|reference_start|>Recurrent Few-Shot model for Document Verification: General-purpose ID, or travel, document image- and video-based verification systems have yet to achieve good enough performance to be considered a solved problem. There are several factors that negatively impact their performance, including low-resolution images and videos and a lack of sufficient data to train the models. This task is particularly challenging when dealing with unseen class of ID, or travel, documents. In this paper we address this task by proposing a recurrent-based model able to detect forged documents in a few-shot scenario. The recurrent architecture makes the model robust to document resolution variability. Moreover, the few-shot approach allow the model to perform well even for unseen class of documents. Preliminary results on the SIDTD and Findit datasets show good performance of this model for this task.<|reference_end|>
arxiv
@article{talarmain2024recurrent, title={Recurrent Few-Shot model for Document Verification}, author={Maxime Talarmain, Carlos Boned, Sanket Biswas, Oriol Ramos}, journal={In: Barney Smith, E.H., Liwicki, M., Peng, L. (eds) Document Analysis and Recognition - ICDAR 2024. ICDAR 2024. Lecture Notes in Computer Science, vol 14804. Springer, Cham}, year={2024}, doi={10.1007/978-3-031-70533-5_4}, archivePrefix={arXiv}, eprint={2410.02456}, primaryClass={cs.CV cs.AI} }
talarmain2024recurrent
arxiv-665041
2410.02458
MedVisionLlama: Leveraging Pre-Trained Large Language Model Layers to Enhance Medical Image Segmentation
<|reference_start|>MedVisionLlama: Leveraging Pre-Trained Large Language Model Layers to Enhance Medical Image Segmentation: Large Language Models (LLMs), known for their versatility in textual data, are increasingly being explored for their potential to enhance medical image segmentation, a crucial task for accurate diagnostic imaging. This study explores enhancing Vision Transformers (ViTs) for medical image segmentation by integrating pre-trained LLM transformer blocks. Our approach, which incorporates a frozen LLM transformer block into the encoder of a ViT-based model, leads to substantial improvements in segmentation performance across various medical imaging modalities. We propose a Hybrid Attention Mechanism that combines global and local feature learning with a Multi-Scale Fusion Block for aggregating features across different scales. The enhanced model shows significant performance gains, including an average Dice score increase from 0.74 to 0.79 and improvements in accuracy, precision, and the Jaccard Index. These results demonstrate the effectiveness of LLM-based transformers in refining medical image segmentation, highlighting their potential to significantly boost model accuracy and robustness. The source code and our implementation are available at: https://bit.ly/3zf2CVs<|reference_end|>
arxiv
@article{kumar2024medvisionllama:, title={MedVisionLlama: Leveraging Pre-Trained Large Language Model Layers to Enhance Medical Image Segmentation}, author={Gurucharan Marthi Krishna Kumar, Aman Chadha, Janine Mendola, Amir Shmuel}, journal={arXiv preprint arXiv:2410.02458}, year={2024}, archivePrefix={arXiv}, eprint={2410.02458}, primaryClass={eess.IV cs.CL cs.CV} }
kumar2024medvisionllama:
arxiv-665042
2410.02462
Scalable Differential Privacy Mechanisms for Real-Time Machine Learning Applications
<|reference_start|>Scalable Differential Privacy Mechanisms for Real-Time Machine Learning Applications: Large language models (LLMs) are increasingly integrated into real-time machine learning applications, where safeguarding user privacy is paramount. Traditional differential privacy mechanisms often struggle to balance privacy and accuracy, particularly in fast-changing environments with continuously flowing data. To address these issues, we introduce Scalable Differential Privacy (SDP), a framework tailored for real-time machine learning that emphasizes both robust privacy guarantees and enhanced model performance. SDP employs a hierarchical architecture to facilitate efficient noise aggregation across various learning agents. By integrating adaptive noise scheduling and gradient compression methods, our approach minimizes performance degradation while ensuring significant privacy protection. Extensive experiments on diverse datasets reveal that SDP maintains high accuracy levels while applying differential privacy effectively, showcasing its suitability for deployment in sensitive domains. This advancement points towards the potential for widespread adoption of privacy-preserving techniques in machine learning workflows.<|reference_end|>
arxiv
@article{smith2024scalable, title={Scalable Differential Privacy Mechanisms for Real-Time Machine Learning Applications}, author={Jessica Smith, David Williams, Emily Brown}, journal={arXiv preprint arXiv:2410.02462}, year={2024}, archivePrefix={arXiv}, eprint={2410.02462}, primaryClass={cs.CR} }
smith2024scalable
arxiv-665043
2410.02464
A Myhill-Nerode style Characterization for Timed Automata With Integer Resets
<|reference_start|>A Myhill-Nerode style Characterization for Timed Automata With Integer Resets: The well-known Nerode equivalence for finite words plays a fundamental role in our understanding of the class of regular languages. The equivalence leads to the Myhill-Nerode theorem and a canonical automaton, which in turn, is the basis of several automata learning algorithms. A Nerode-like equivalence has been studied for various classes of timed languages. In this work, we focus on timed automata with integer resets. This class is known to have good automata-theoretic properties and is also useful for practical modeling. Our main contribution is a Nerode-style equivalence for this class that depends on a constant K. We show that the equivalence leads to a Myhill-Nerode theorem and a canonical one-clock integer-reset timed automaton with maximum constant K. Based on the canonical form, we develop an Angluin-style active learning algorithm whose query complexity is polynomial in the size of the canonical form.<|reference_end|>
arxiv
@article{doveri2024a, title={A Myhill-Nerode style Characterization for Timed Automata With Integer Resets}, author={Kyveli Doveri, Pierre Ganty, B. Srivathsan}, journal={arXiv preprint arXiv:2410.02464}, year={2024}, archivePrefix={arXiv}, eprint={2410.02464}, primaryClass={cs.FL} }
doveri2024a
arxiv-665044
2410.02465
Response Tuning: Aligning Large Language Models without Instruction
<|reference_start|>Response Tuning: Aligning Large Language Models without Instruction: Instruction tuning-supervised fine-tuning using instruction-response pairs-is a foundational step in transitioning pre-trained Large Language Models (LLMs) into helpful and safe chat assistants. Our hypothesis is that establishing an adequate output space can enable such a transition given the capabilities inherent in pre-trained LLMs. To verify this, we propose Response Tuning (RT), which eliminates the instruction-conditioning step in instruction tuning and solely focuses on response space supervision. Our experiments demonstrate that RT models, trained only using responses, can effectively respond to a wide range of instructions and exhibit helpfulness comparable to that of their instruction-tuned counterparts. Furthermore, we observe that controlling the training response distribution can significantly improve their user preference or elicit target behaviors such as refusing assistance for unsafe queries. Our findings illuminate the role of establishing an adequate output space in alignment, highlighting the potential of the extensive inherent capabilities of pre-trained LLMs.<|reference_end|>
arxiv
@article{an2024response, title={Response Tuning: Aligning Large Language Models without Instruction}, author={Seokhyun An, Hyounghun Kim}, journal={arXiv preprint arXiv:2410.02465}, year={2024}, archivePrefix={arXiv}, eprint={2410.02465}, primaryClass={cs.CL cs.AI} }
an2024response
arxiv-665045
2410.02467
Towards a Theoretical Understanding of Memorization in Diffusion Models
<|reference_start|>Towards a Theoretical Understanding of Memorization in Diffusion Models: As diffusion probabilistic models (DPMs) are being employed as mainstream models for Generative Artificial Intelligence (GenAI), the study of their memorization of training data has attracted growing attention. Existing works in this direction aim to establish an understanding of whether or to what extent DPMs learn via memorization. Such an understanding is crucial for identifying potential risks of data leakage and copyright infringement in diffusion models and, more importantly, for trustworthy application of GenAI. Existing works revealed that conditional DPMs are more prone to training data memorization than unconditional DPMs, and the motivated data extraction methods are mostly for conditional DPMs. However, these understandings are primarily empirical, and extracting training data from unconditional models has been found to be extremely challenging. In this work, we provide a theoretical understanding of memorization in both conditional and unconditional DPMs under the assumption of model convergence. Our theoretical analysis indicates that extracting data from unconditional models can also be effective by constructing a proper surrogate condition. Based on this result, we propose a novel data extraction method named \textbf{Surrogate condItional Data Extraction (SIDE)} that leverages a time-dependent classifier trained on the generated data as a surrogate condition to extract training data from unconditional DPMs. Empirical results demonstrate that our SIDE can extract training data in challenging scenarios where previous methods fail, and it is, on average, over 50\% more effective across different scales of the CelebA dataset.<|reference_end|>
arxiv
@article{chen2024towards, title={Towards a Theoretical Understanding of Memorization in Diffusion Models}, author={Yunhao Chen, Xingjun Ma, Difan Zou, Yu-Gang Jiang}, journal={arXiv preprint arXiv:2410.02467}, year={2024}, archivePrefix={arXiv}, eprint={2410.02467}, primaryClass={cs.LG cs.CR cs.CV} }
chen2024towards
arxiv-665046
2410.02469
Behavior Trees in Functional Safety Supervisors for Autonomous Vehicles
<|reference_start|>Behavior Trees in Functional Safety Supervisors for Autonomous Vehicles: The rapid advancements in autonomous vehicle software present both opportunities and challenges, especially in enhancing road safety. The primary objective of autonomous vehicles is to reduce accident rates through improved safety measures. However, the integration of new algorithms into the autonomous vehicle, such as Artificial Intelligence methods, raises concerns about the compliance with established safety regulations. This paper introduces a novel software architecture based on behavior trees, aligned with established standards and designed to supervise vehicle functional safety in real time. It specifically addresses the integration of algorithms into industrial road vehicles, adhering to the ISO 26262. The proposed supervision methodology involves the detection of hazards and compliance with functional and technical safety requirements when a hazard arises. This methodology, implemented in this study in a Renault M\'egane (currently at SAE level 3 of automation), not only guarantees compliance with safety standards, but also paves the way for safer and more reliable autonomous driving technologies.<|reference_end|>
arxiv
@article{conejo2024behavior, title={Behavior Trees in Functional Safety Supervisors for Autonomous Vehicles}, author={Carlos Conejo, Vicenc{c} Puig, Bernardo Morcego, Francisco Navas, Vicente Milan'es}, journal={arXiv preprint arXiv:2410.02469}, year={2024}, archivePrefix={arXiv}, eprint={2410.02469}, primaryClass={cs.RO cs.SY eess.SY} }
conejo2024behavior
arxiv-665047
2410.02472
Meta-Models: An Architecture for Decoding LLM Behaviors Through Interpreted Embeddings and Natural Language
<|reference_start|>Meta-Models: An Architecture for Decoding LLM Behaviors Through Interpreted Embeddings and Natural Language: As Large Language Models (LLMs) become increasingly integrated into our daily lives, the potential harms from deceptive behavior underlie the need for faithfully interpreting their decision-making. While traditional probing methods have shown some effectiveness, they remain best for narrowly scoped tasks while more comprehensive explanations are still necessary. To this end, we investigate meta-models-an architecture using a "meta-model" that takes activations from an "input-model" and answers natural language questions about the input-model's behaviors. We evaluate the meta-model's ability to generalize by training them on selected task types and assessing their out-of-distribution performance in deceptive scenarios. Our findings show that meta-models generalize well to out-of-distribution tasks and point towards opportunities for future research in this area.<|reference_end|>
arxiv
@article{costarelli2024meta-models:, title={Meta-Models: An Architecture for Decoding LLM Behaviors Through Interpreted Embeddings and Natural Language}, author={Anthony Costarelli, Mat Allen, Severin Field}, journal={arXiv preprint arXiv:2410.02472}, year={2024}, archivePrefix={arXiv}, eprint={2410.02472}, primaryClass={cs.LG cs.AI} }
costarelli2024meta-models:
arxiv-665048
2410.02475
Efficient Residual Learning with Mixture-of-Experts for Universal Dexterous Grasping
<|reference_start|>Efficient Residual Learning with Mixture-of-Experts for Universal Dexterous Grasping: Universal dexterous grasping across diverse objects presents a fundamental yet formidable challenge in robot learning. Existing approaches using reinforcement learning (RL) to develop policies on extensive object datasets face critical limitations, including complex curriculum design for multi-task learning and limited generalization to unseen objects. To overcome these challenges, we introduce ResDex, a novel approach that integrates residual policy learning with a mixture-of-experts (MoE) framework. ResDex is distinguished by its use of geometry-unaware base policies that are efficiently acquired on individual objects and capable of generalizing across a wide range of unseen objects. Our MoE framework incorporates several base policies to facilitate diverse grasping styles suitable for various objects. By learning residual actions alongside weights that combine these base policies, ResDex enables efficient multi-task RL for universal dexterous grasping. ResDex achieves state-of-the-art performance on the DexGraspNet dataset comprising 3,200 objects with an 88.8% success rate. It exhibits no generalization gap with unseen objects and demonstrates superior training efficiency, mastering all tasks within only 12 hours on a single GPU.<|reference_end|>
arxiv
@article{huang2024efficient, title={Efficient Residual Learning with Mixture-of-Experts for Universal Dexterous Grasping}, author={Ziye Huang, Haoqi Yuan, Yuhui Fu, and Zongqing Lu}, journal={arXiv preprint arXiv:2410.02475}, year={2024}, archivePrefix={arXiv}, eprint={2410.02475}, primaryClass={cs.RO cs.LG} }
huang2024efficient
arxiv-665049
2410.02476
Online Convex Optimization with a Separation Oracle
<|reference_start|>Online Convex Optimization with a Separation Oracle: In this paper, we introduce a new projection-free algorithm for Online Convex Optimization (OCO) with a state-of-the-art regret guarantee among separation-based algorithms. Existing projection-free methods based on the classical Frank-Wolfe algorithm achieve a suboptimal regret bound of $O(T^{3/4})$, while more recent separation-based approaches guarantee a regret bound of $O(\kappa \sqrt{T})$, where $\kappa$ denotes the asphericity of the feasible set, defined as the ratio of the radii of the containing and contained balls. However, for ill-conditioned sets, $\kappa$ can be arbitrarily large, potentially leading to poor performance. Our algorithm achieves a regret bound of $\widetilde{O}(\sqrt{dT} + \kappa d)$, while requiring only $\widetilde{O}(1)$ calls to a separation oracle per round. Crucially, the main term in the bound, $\widetilde{O}(\sqrt{d T})$, is independent of $\kappa$, addressing the limitations of previous methods. Additionally, as a by-product of our analysis, we recover the $O(\kappa \sqrt{T})$ regret bound of existing OCO algorithms with a more straightforward analysis and improve the regret bound for projection-free online exp-concave optimization. Finally, for constrained stochastic convex optimization, we achieve a state-of-the-art convergence rate of $\widetilde{O}(\sigma/\sqrt{T} + \kappa d/T)$, where $\sigma$ represents the noise in the stochastic gradients, while requiring only $\widetilde{O}(1)$ calls to a separation oracle per iteration.<|reference_end|>
arxiv
@article{mhammedi2024online, title={Online Convex Optimization with a Separation Oracle}, author={Zakaria Mhammedi}, journal={arXiv preprint arXiv:2410.02476}, year={2024}, archivePrefix={arXiv}, eprint={2410.02476}, primaryClass={cs.LG math.OC} }
mhammedi2024online
arxiv-665050
2410.02477
Learning Diverse Bimanual Dexterous Manipulation Skills from Human Demonstrations
<|reference_start|>Learning Diverse Bimanual Dexterous Manipulation Skills from Human Demonstrations: Bimanual dexterous manipulation is a critical yet underexplored area in robotics. Its high-dimensional action space and inherent task complexity present significant challenges for policy learning, and the limited task diversity in existing benchmarks hinders general-purpose skill development. Existing approaches largely depend on reinforcement learning, often constrained by intricately designed reward functions tailored to a narrow set of tasks. In this work, we present a novel approach for efficiently learning diverse bimanual dexterous skills from abundant human demonstrations. Specifically, we introduce BiDexHD, a framework that unifies task construction from existing bimanual datasets and employs teacher-student policy learning to address all tasks. The teacher learns state-based policies using a general two-stage reward function across tasks with shared behaviors, while the student distills the learned multi-task policies into a vision-based policy. With BiDexHD, scalable learning of numerous bimanual dexterous skills from auto-constructed tasks becomes feasible, offering promising advances toward universal bimanual dexterous manipulation. Our empirical evaluation on the TACO dataset, spanning 141 tasks across six categories, demonstrates a task fulfillment rate of 74.59% on trained tasks and 51.07% on unseen tasks, showcasing the effectiveness and competitive zero-shot generalization capabilities of BiDexHD. For videos and more information, visit our project page https://sites.google.com/view/bidexhd.<|reference_end|>
arxiv
@article{zhou2024learning, title={Learning Diverse Bimanual Dexterous Manipulation Skills from Human Demonstrations}, author={Bohan Zhou, Haoqi Yuan, Yuhui Fu, and Zongqing Lu}, journal={arXiv preprint arXiv:2410.02477}, year={2024}, archivePrefix={arXiv}, eprint={2410.02477}, primaryClass={cs.RO cs.LG} }
zhou2024learning
arxiv-665051
2410.02478
Temporal Predictive Coding for Gradient Compression in Distributed Learning
<|reference_start|>Temporal Predictive Coding for Gradient Compression in Distributed Learning: This paper proposes a prediction-based gradient compression method for distributed learning with event-triggered communication. Our goal is to reduce the amount of information transmitted from the distributed agents to the parameter server by exploiting temporal correlation in the local gradients. We use a linear predictor that \textit{combines past gradients to form a prediction of the current gradient}, with coefficients that are optimized by solving a least-square problem. In each iteration, every agent transmits the predictor coefficients to the server such that the predicted local gradient can be computed. The difference between the true local gradient and the predicted one, termed the \textit{prediction residual, is only transmitted when its norm is above some threshold.} When this additional communication step is omitted, the server uses the prediction as the estimated gradient. This proposed design shows notable performance gains compared to existing methods in the literature, achieving convergence with reduced communication costs.<|reference_end|>
arxiv
@article{edin2024temporal, title={Temporal Predictive Coding for Gradient Compression in Distributed Learning}, author={Adrian Edin, Zheng Chen, Michel Kieffer, and Mikael Johansson}, journal={arXiv preprint arXiv:2410.02478}, year={2024}, archivePrefix={arXiv}, eprint={2410.02478}, primaryClass={cs.IT cs.DC cs.LG eess.SP math.IT} }
edin2024temporal
arxiv-665052
2410.02479
Cross-Embodiment Dexterous Grasping with Reinforcement Learning
<|reference_start|>Cross-Embodiment Dexterous Grasping with Reinforcement Learning: Dexterous hands exhibit significant potential for complex real-world grasping tasks. While recent studies have primarily focused on learning policies for specific robotic hands, the development of a universal policy that controls diverse dexterous hands remains largely unexplored. In this work, we study the learning of cross-embodiment dexterous grasping policies using reinforcement learning (RL). Inspired by the capability of human hands to control various dexterous hands through teleoperation, we propose a universal action space based on the human hand's eigengrasps. The policy outputs eigengrasp actions that are then converted into specific joint actions for each robot hand through a retargeting mapping. We simplify the robot hand's proprioception to include only the positions of fingertips and the palm, offering a unified observation space across different robot hands. Our approach demonstrates an 80% success rate in grasping objects from the YCB dataset across four distinct embodiments using a single vision-based policy. Additionally, our policy exhibits zero-shot generalization to two previously unseen embodiments and significant improvement in efficient finetuning. For further details and videos, visit our project page https://sites.google.com/view/crossdex.<|reference_end|>
arxiv
@article{yuan2024cross-embodiment, title={Cross-Embodiment Dexterous Grasping with Reinforcement Learning}, author={Haoqi Yuan, Bohan Zhou, Yuhui Fu, and Zongqing Lu}, journal={arXiv preprint arXiv:2410.02479}, year={2024}, archivePrefix={arXiv}, eprint={2410.02479}, primaryClass={cs.RO cs.LG} }
yuan2024cross-embodiment
arxiv-665053
2410.02482
It is Giving Major Satisfaction: Why Fairness Matters for Developers
<|reference_start|>It is Giving Major Satisfaction: Why Fairness Matters for Developers: Software practitioners often face unfairness in their work, such as unequal recognition of contributions, gender bias, and unclear criteria for performance reviews. While the link between fairness and job satisfaction has been established in other fields, its relevance to software professionals remains underexplored. This study aims to examine how fairness perceptions relate to job satisfaction among software practitioners, focusing on both general trends and demographic-specific differences. We conducted an online survey of 108 software practitioners, followed by ordinal logistic regression to analyze the relationship between fairness perceptions and job satisfaction in software engineering contexts, with moderation analysis examining how this relationship varies across demographic groups. Our findings indicate that all four fairness dimensions, distributive, procedural, interpersonal, and informational, significantly affect both overall job satisfaction and satisfaction with job security. Among these, interpersonal fairness has the biggest impact, being more than twice as influential on overall job satisfaction. The relationship between fairness perceptions and job satisfaction is notably stronger for female, ethnically underrepresented, less experienced practitioners, and those with work limitations. Fairness in authorship emerged as an important factor for job satisfaction collectively, while fairness in policy implementation, high-demand situations, and working hours particularly impacted specific demographic groups. This study highlights the unique role of fairness in software engineering, offering strategies for organizations to promote fair practices and targeted approaches specific for certain demographic groups.<|reference_end|>
arxiv
@article{sesari2024it, title={It is Giving Major Satisfaction: Why Fairness Matters for Developers}, author={Emeralda Sesari, Federica Sarro, Ayushi Rastogi}, journal={arXiv preprint arXiv:2410.02482}, year={2024}, archivePrefix={arXiv}, eprint={2410.02482}, primaryClass={cs.SE} }
sesari2024it
arxiv-665054
2410.02483
Event-Customized Image Generation
<|reference_start|>Event-Customized Image Generation: Customized Image Generation, generating customized images with user-specified concepts, has raised significant attention due to its creativity and novelty. With impressive progress achieved in subject customization, some pioneer works further explored the customization of action and interaction beyond entity (i.e., human, animal, and object) appearance. However, these approaches only focus on basic actions and interactions between two entities, and their effects are limited by insufficient ''exactly same'' reference images. To extend customized image generation to more complex scenes for general real-world applications, we propose a new task: event-customized image generation. Given a single reference image, we define the ''event'' as all specific actions, poses, relations, or interactions between different entities in the scene. This task aims at accurately capturing the complex event and generating customized images with various target entities. To solve this task, we proposed a novel training-free event customization method: FreeEvent. Specifically, FreeEvent introduces two extra paths alongside the general diffusion denoising process: 1) Entity switching path: it applies cross-attention guidance and regulation for target entity generation. 2) Event transferring path: it injects the spatial feature and self-attention maps from the reference image to the target image for event generation. To further facilitate this new task, we collected two evaluation benchmarks: SWiG-Event and Real-Event. Extensive experiments and ablations have demonstrated the effectiveness of FreeEvent.<|reference_end|>
arxiv
@article{wang2024event-customized, title={Event-Customized Image Generation}, author={Zhen Wang, Yilei Jiang, Dong Zheng, Jun Xiao, Long Chen}, journal={arXiv preprint arXiv:2410.02483}, year={2024}, archivePrefix={arXiv}, eprint={2410.02483}, primaryClass={cs.CV} }
wang2024event-customized
arxiv-665055
2410.02486
Encryption-Friendly LLM Architecture
<|reference_start|>Encryption-Friendly LLM Architecture: Large language models (LLMs) offer personalized responses based on user interactions, but this use case raises serious privacy concerns. Homomorphic encryption (HE) is a cryptographic protocol supporting arithmetic computations in encrypted states and provides a potential solution for privacy-preserving machine learning (PPML). However, the computational intensity of transformers poses challenges for applying HE to LLMs. In this work, we propose a modified HE-friendly transformer architecture with an emphasis on inference following personalized (private) fine-tuning. Utilizing LoRA fine-tuning and Gaussian kernels, we achieve significant computational speedups -- 6.94x for fine-tuning and 2.3x for inference -- while maintaining performance comparable to plaintext models. Our findings provide a viable proof of concept for offering privacy-preserving LLM services in areas where data protection is crucial.<|reference_end|>
arxiv
@article{rho2024encryption-friendly, title={Encryption-Friendly LLM Architecture}, author={Donghwan Rho, Taeseong Kim, Minje Park, Jung Woo Kim, Hyunsik Chae, Jung Hee Cheon, Ernest K. Ryu}, journal={arXiv preprint arXiv:2410.02486}, year={2024}, archivePrefix={arXiv}, eprint={2410.02486}, primaryClass={cs.CR cs.LG} }
rho2024encryption-friendly
arxiv-665056
2410.02487
Optimal Digital Twinning of Random Systems with Twinning Rate Constraints
<|reference_start|>Optimal Digital Twinning of Random Systems with Twinning Rate Constraints: With the massive advancements in processing power, Digital Twins (DTs) have become powerful tools to monitor and analyze physical entities. However, due to the potentially very high number of Physical Systems (PSs) to be tracked and emulated, for instance, in a factory environment or an Internet of Things (IoT) network, continuous twinning might become infeasible. In this paper, a DT system is investigated with a set of random PSs, where the twinning rate is limited due to resource constraints. Three cost functions are considered to quantify and penalize the twinning delay. For these cost functions, the optimal twinning problem under twinning rate constraints is formulated. In a numerical example, the proposed cost functions are evaluated for two, one push-based and one pull-based, benchmark twinning policies. The proposed methodology is the first to investigate the optimal twinning problem with random PSs and twinning rate constraints, and serves as a guideline for real-world implementations on how frequently PSs should be twinned.<|reference_end|>
arxiv
@article{tunc2024optimal, title={Optimal Digital Twinning of Random Systems with Twinning Rate Constraints}, author={Caglar Tunc}, journal={arXiv preprint arXiv:2410.02487}, year={2024}, archivePrefix={arXiv}, eprint={2410.02487}, primaryClass={cs.NI} }
tunc2024optimal
arxiv-665057
2410.02490
Stochastic variance-reduced Gaussian variational inference on the Bures-Wasserstein manifold
<|reference_start|>Stochastic variance-reduced Gaussian variational inference on the Bures-Wasserstein manifold: Optimization in the Bures-Wasserstein space has been gaining popularity in the machine learning community since it draws connections between variational inference and Wasserstein gradient flows. The variational inference objective function of Kullback-Leibler divergence can be written as the sum of the negative entropy and the potential energy, making forward-backward Euler the method of choice. Notably, the backward step admits a closed-form solution in this case, facilitating the practicality of the scheme. However, the forward step is no longer exact since the Bures-Wasserstein gradient of the potential energy involves "intractable" expectations. Recent approaches propose using the Monte Carlo method -- in practice a single-sample estimator -- to approximate these terms, resulting in high variance and poor performance. We propose a novel variance-reduced estimator based on the principle of control variates. We theoretically show that this estimator has a smaller variance than the Monte-Carlo estimator in scenarios of interest. We also prove that variance reduction helps improve the optimization bounds of the current analysis. We demonstrate that the proposed estimator gains order-of-magnitude improvements over the previous Bures-Wasserstein methods.<|reference_end|>
arxiv
@article{luu2024stochastic, title={Stochastic variance-reduced Gaussian variational inference on the Bures-Wasserstein manifold}, author={Hoang Phuc Hau Luu, Hanlin Yu, Bernardo Williams, Marcelo Hartmann, Arto Klami}, journal={arXiv preprint arXiv:2410.02490}, year={2024}, archivePrefix={arXiv}, eprint={2410.02490}, primaryClass={cs.LG stat.ML} }
luu2024stochastic
arxiv-665058
2410.02492
DTVLT: A Multi-modal Diverse Text Benchmark for Visual Language Tracking Based on LLM
<|reference_start|>DTVLT: A Multi-modal Diverse Text Benchmark for Visual Language Tracking Based on LLM: Visual language tracking (VLT) has emerged as a cutting-edge research area, harnessing linguistic data to enhance algorithms with multi-modal inputs and broadening the scope of traditional single object tracking (SOT) to encompass video understanding applications. Despite this, most VLT benchmarks still depend on succinct, human-annotated text descriptions for each video. These descriptions often fall short in capturing the nuances of video content dynamics and lack stylistic variety in language, constrained by their uniform level of detail and a fixed annotation frequency. As a result, algorithms tend to default to a "memorize the answer" strategy, diverging from the core objective of achieving a deeper understanding of video content. Fortunately, the emergence of large language models (LLMs) has enabled the generation of diverse text. This work utilizes LLMs to generate varied semantic annotations (in terms of text lengths and granularities) for representative SOT benchmarks, thereby establishing a novel multi-modal benchmark. Specifically, we (1) propose a new visual language tracking benchmark with diverse texts, named DTVLT, based on five prominent VLT and SOT benchmarks, including three sub-tasks: short-term tracking, long-term tracking, and global instance tracking. (2) We offer four granularity texts in our benchmark, considering the extent and density of semantic information. We expect this multi-granular generation strategy to foster a favorable environment for VLT and video understanding research. (3) We conduct comprehensive experimental analyses on DTVLT, evaluating the impact of diverse text on tracking performance and hope the identified performance bottlenecks of existing algorithms can support further research in VLT and video understanding. The proposed benchmark, experimental results and toolkit will be released gradually on http://videocube.aitestunion.com/.<|reference_end|>
arxiv
@article{li2024dtvlt:, title={DTVLT: A Multi-modal Diverse Text Benchmark for Visual Language Tracking Based on LLM}, author={Xuchen Li, Shiyu Hu, Xiaokun Feng, Dailing Zhang, Meiqi Wu, Jing Zhang, Kaiqi Huang}, journal={arXiv preprint arXiv:2410.02492}, year={2024}, archivePrefix={arXiv}, eprint={2410.02492}, primaryClass={cs.CV cs.CL} }
li2024dtvlt:
arxiv-665059
2410.02496
Efficient learning of differential network in multi-source non-paranormal graphical models
<|reference_start|>Efficient learning of differential network in multi-source non-paranormal graphical models: This paper addresses learning of sparse structural changes or differential network between two classes of non-paranormal graphical models. We assume a multi-source and heterogeneous dataset is available for each class, where the covariance matrices are identical for all non-paranormal graphical models. The differential network, which are encoded by the difference precision matrix, can then be decoded by optimizing a lasso penalized D-trace loss function. To this aim, an efficient approach is proposed that outputs the exact solution path, outperforming the previous methods that only sample from the solution path in pre-selected regularization parameters. Notably, our proposed method has low computational complexity, especially when the differential network are sparse. Our simulations on synthetic data demonstrate a superior performance for our strategy in terms of speed and accuracy compared to an existing method. Moreover, our strategy in combining datasets from multiple sources is shown to be very effective in inferring differential network in real-world problems. This is backed by our experimental results on drug resistance in tumor cancers. In the latter case, our strategy outputs important genes for drug resistance which are already confirmed by various independent studies.<|reference_end|>
arxiv
@article{nikahd2024efficient, title={Efficient learning of differential network in multi-source non-paranormal graphical models}, author={Mojtaba Nikahd and Seyed Abolfazl Motahari}, journal={arXiv preprint arXiv:2410.02496}, year={2024}, archivePrefix={arXiv}, eprint={2410.02496}, primaryClass={cs.LG} }
nikahd2024efficient
arxiv-665060
2410.02498
Dynamic Gradient Alignment for Online Data Mixing
<|reference_start|>Dynamic Gradient Alignment for Online Data Mixing: The composition of training data mixtures is critical for effectively training large language models (LLMs), as it directly impacts their performance on downstream tasks. Our goal is to identify an optimal data mixture to specialize an LLM for a specific task with access to only a few examples. Traditional approaches to this problem include ad-hoc reweighting methods, importance sampling, and gradient alignment techniques. This paper focuses on gradient alignment and introduces Dynamic Gradient Alignment (DGA), a scalable online gradient alignment algorithm. DGA dynamically estimates the pre-training data mixture on which the models' gradients align as well as possible with those of the model on the specific task. DGA is the first gradient alignment approach that incurs minimal overhead compared to standard pre-training and outputs a competitive model, eliminating the need for retraining the model. Experimentally, we demonstrate significant improvements over importance sampling in two key scenarios: (i) when the pre-training set is small and importance sampling overfits due to limited data; and (ii) when there is insufficient specialized data, trapping importance sampling on narrow pockets of data. Our findings underscore the effectiveness of gradient alignment methods in optimizing training data mixtures, particularly in data-constrained environments, and offer a practical solution for enhancing LLM performance on specific tasks with limited data availability.<|reference_end|>
arxiv
@article{fan2024dynamic, title={Dynamic Gradient Alignment for Online Data Mixing}, author={Simin Fan, David Grangier, Pierre Ablin}, journal={arXiv preprint arXiv:2410.02498}, year={2024}, archivePrefix={arXiv}, eprint={2410.02498}, primaryClass={cs.LG cs.CL} }
fan2024dynamic
arxiv-665061
2410.02499
Defining Knowledge: Bridging Epistemology and Large Language Models
<|reference_start|>Defining Knowledge: Bridging Epistemology and Large Language Models: Knowledge claims are abundant in the literature on large language models (LLMs); but can we say that GPT-4 truly "knows" the Earth is round? To address this question, we review standard definitions of knowledge in epistemology and we formalize interpretations applicable to LLMs. In doing so, we identify inconsistencies and gaps in how current NLP research conceptualizes knowledge with respect to epistemological frameworks. Additionally, we conduct a survey of 100 professional philosophers and computer scientists to compare their preferences in knowledge definitions and their views on whether LLMs can really be said to know. Finally, we suggest evaluation protocols for testing knowledge in accordance to the most relevant definitions.<|reference_end|>
arxiv
@article{fierro2024defining, title={Defining Knowledge: Bridging Epistemology and Large Language Models}, author={Constanza Fierro, Ruchira Dhar, Filippos Stamatiou, Nicolas Garneau, Anders S{o}gaard}, journal={arXiv preprint arXiv:2410.02499}, year={2024}, archivePrefix={arXiv}, eprint={2410.02499}, primaryClass={cs.CL} }
fierro2024defining
arxiv-665062
2410.02503
Mixed-Session Conversation with Egocentric Memory
<|reference_start|>Mixed-Session Conversation with Egocentric Memory: Recently introduced dialogue systems have demonstrated high usability. However, they still fall short of reflecting real-world conversation scenarios. Current dialogue systems exhibit an inability to replicate the dynamic, continuous, long-term interactions involving multiple partners. This shortfall arises because there have been limited efforts to account for both aspects of real-world dialogues: deeply layered interactions over the long-term dialogue and widely expanded conversation networks involving multiple participants. As the effort to incorporate these aspects combined, we introduce Mixed-Session Conversation, a dialogue system designed to construct conversations with various partners in a multi-session dialogue setup. We propose a new dataset called MiSC to implement this system. The dialogue episodes of MiSC consist of 6 consecutive sessions, with four speakers (one main speaker and three partners) appearing in each episode. Also, we propose a new dialogue model with a novel memory management mechanism, called Egocentric Memory Enhanced Mixed-Session Conversation Agent (EMMA). EMMA collects and retains memories from the main speaker's perspective during conversations with partners, enabling seamless continuity in subsequent interactions. Extensive human evaluations validate that the dialogues in MiSC demonstrate a seamless conversational flow, even when conversation partners change in each session. EMMA trained with MiSC is also evaluated to maintain high memorability without contradiction throughout the entire conversation.<|reference_end|>
arxiv
@article{jang2024mixed-session, title={Mixed-Session Conversation with Egocentric Memory}, author={Jihyoung Jang, Taeyoung Kim, Hyounghun Kim}, journal={arXiv preprint arXiv:2410.02503}, year={2024}, archivePrefix={arXiv}, eprint={2410.02503}, primaryClass={cs.CL cs.AI} }
jang2024mixed-session
arxiv-665063
2410.02504
Dual Active Learning for Reinforcement Learning from Human Feedback
<|reference_start|>Dual Active Learning for Reinforcement Learning from Human Feedback: Aligning large language models (LLMs) with human preferences is critical to recent advances in generative artificial intelligence. Reinforcement learning from human feedback (RLHF) is widely applied to achieve this objective. A key step in RLHF is to learn the reward function from human feedback. However, human feedback is costly and time-consuming, making it essential to collect high-quality conversation data for human teachers to label. Additionally, different human teachers have different levels of expertise. It is thus critical to query the most appropriate teacher for their opinions. In this paper, we use offline reinforcement learning (RL) to formulate the alignment problem. Motivated by the idea of $D$-optimal design, we first propose a dual active reward learning algorithm for the simultaneous selection of conversations and teachers. Next, we apply pessimistic RL to solve the alignment problem, based on the learned reward estimator. Theoretically, we show that the reward estimator obtained through our proposed adaptive selection strategy achieves minimal generalized variance asymptotically, and prove that the sub-optimality of our pessimistic policy scales as $O(1/\sqrt{T})$ with a given sample budget $T$. Through simulations and experiments on LLMs, we demonstrate the effectiveness of our algorithm and its superiority over state-of-the-arts.<|reference_end|>
arxiv
@article{liu2024dual, title={Dual Active Learning for Reinforcement Learning from Human Feedback}, author={Pangpang Liu, Chengchun Shi, Will Wei Sun}, journal={arXiv preprint arXiv:2410.02504}, year={2024}, archivePrefix={arXiv}, eprint={2410.02504}, primaryClass={stat.ML cs.LG} }
liu2024dual
arxiv-665064
2410.02505
Dog-IQA: Standard-guided Zero-shot MLLM for Mix-grained Image Quality Assessment
<|reference_start|>Dog-IQA: Standard-guided Zero-shot MLLM for Mix-grained Image Quality Assessment: Image quality assessment (IQA) serves as the golden standard for all models' performance in nearly all computer vision fields. However, it still suffers from poor out-of-distribution generalization ability and expensive training costs. To address these problems, we propose Dog-IQA, a standard-guided zero-shot mix-grained IQA method, which is training-free and utilizes the exceptional prior knowledge of multimodal large language models (MLLMs). To obtain accurate IQA scores, namely scores consistent with humans, we design an MLLM-based inference pipeline that imitates human experts. In detail, Dog-IQA applies two techniques. First, Dog-IQA objectively scores with specific standards that utilize MLLM's behavior pattern and minimize the influence of subjective factors. Second, Dog-IQA comprehensively takes local semantic objects and the whole image as input and aggregates their scores, leveraging local and global information. Our proposed Dog-IQA achieves state-of-the-art (SOTA) performance compared with training-free methods, and competitive performance compared with training-based methods in cross-dataset scenarios. Our code will be available at https://github.com/Kai-Liu001/Dog-IQA.<|reference_end|>
arxiv
@article{liu2024dog-iqa:, title={Dog-IQA: Standard-guided Zero-shot MLLM for Mix-grained Image Quality Assessment}, author={Kai Liu, Ziqing Zhang, Wenbo Li, Renjing Pei, Fenglong Song, Xiaohong Liu, Linghe Kong, and Yulun Zhang}, journal={arXiv preprint arXiv:2410.02505}, year={2024}, archivePrefix={arXiv}, eprint={2410.02505}, primaryClass={cs.CV cs.AI} }
liu2024dog-iqa:
arxiv-665065
2410.02506
Cut the Crap: An Economical Communication Pipeline for LLM-based Multi-Agent Systems
<|reference_start|>Cut the Crap: An Economical Communication Pipeline for LLM-based Multi-Agent Systems: Recent advancements in large language model (LLM)-powered agents have shown that collective intelligence can significantly outperform individual capabilities, largely attributed to the meticulously designed inter-agent communication topologies. Though impressive in performance, existing multi-agent pipelines inherently introduce substantial token overhead, as well as increased economic costs, which pose challenges for their large-scale deployments. In response to this challenge, we propose an economical, simple, and robust multi-agent communication framework, termed $\texttt{AgentPrune}$, which can seamlessly integrate into mainstream multi-agent systems and prunes redundant or even malicious communication messages. Technically, $\texttt{AgentPrune}$ is the first to identify and formally define the \textit{communication redundancy} issue present in current LLM-based multi-agent pipelines, and efficiently performs one-shot pruning on the spatial-temporal message-passing graph, yielding a token-economic and high-performing communication topology. Extensive experiments across six benchmarks demonstrate that $\texttt{AgentPrune}$ \textbf{(I)} achieves comparable results as state-of-the-art topologies at merely $\$5.6$ cost compared to their $\$43.7$, \textbf{(II)} integrates seamlessly into existing multi-agent frameworks with $28.1\%\sim72.8\%\downarrow$ token reduction, and \textbf{(III)} successfully defend against two types of agent-based adversarial attacks with $3.5\%\sim10.8\%\uparrow$ performance boost.<|reference_end|>
arxiv
@article{zhang2024cut, title={Cut the Crap: An Economical Communication Pipeline for LLM-based Multi-Agent Systems}, author={Guibin Zhang, Yanwei Yue, Zhixun Li, Sukwon Yun, Guancheng Wan, Kun Wang, Dawei Cheng, Jeffrey Xu Yu, Tianlong Chen}, journal={arXiv preprint arXiv:2410.02506}, year={2024}, archivePrefix={arXiv}, eprint={2410.02506}, primaryClass={cs.MA cs.LG} }
zhang2024cut
arxiv-665066
2410.02507
Can Large Language Models Grasp Legal Theories? Enhance Legal Reasoning with Insights from Multi-Agent Collaboration
<|reference_start|>Can Large Language Models Grasp Legal Theories? Enhance Legal Reasoning with Insights from Multi-Agent Collaboration: Large Language Models (LLMs) could struggle to fully understand legal theories and perform complex legal reasoning tasks. In this study, we introduce a challenging task (confusing charge prediction) to better evaluate LLMs' understanding of legal theories and reasoning capabilities. We also propose a novel framework: Multi-Agent framework for improving complex Legal Reasoning capability (MALR). MALR employs non-parametric learning, encouraging LLMs to automatically decompose complex legal tasks and mimic human learning process to extract insights from legal rules, helping LLMs better understand legal theories and enhance their legal reasoning abilities. Extensive experiments on multiple real-world datasets demonstrate that the proposed framework effectively addresses complex reasoning issues in practical scenarios, paving the way for more reliable applications in the legal domain.<|reference_end|>
arxiv
@article{yuan2024can, title={Can Large Language Models Grasp Legal Theories? Enhance Legal Reasoning with Insights from Multi-Agent Collaboration}, author={Weikang Yuan, Junjie Cao, Zhuoren Jiang, Yangyang Kang, Jun Lin, Kaisong Song, tianqianjin lin, Pengwei Yan, Changlong Sun, Xiaozhong Liu}, journal={arXiv preprint arXiv:2410.02507}, year={2024}, archivePrefix={arXiv}, eprint={2410.02507}, primaryClass={cs.AI cs.CL} }
yuan2024can
arxiv-665067
2410.02510
SwarmCVT: Centroidal Voronoi Tessellation-Based Path Planning for Very-Large-Scale Robotics
<|reference_start|>SwarmCVT: Centroidal Voronoi Tessellation-Based Path Planning for Very-Large-Scale Robotics: Swarm robotics, or very large-scale robotics (VLSR), has many meaningful applications for complicated tasks. However, the complexity of motion control and energy costs stack up quickly as the number of robots increases. In addressing this problem, our previous studies have formulated various methods employing macroscopic and microscopic approaches. These methods enable microscopic robots to adhere to a reference Gaussian mixture model (GMM) distribution observed at the macroscopic scale. As a result, optimizing the macroscopic level will result in an optimal overall result. However, all these methods require systematic and global generation of Gaussian components (GCs) within obstacle-free areas to construct the GMM trajectories. This work utilizes centroidal Voronoi tessellation to generate GCs methodically. Consequently, it demonstrates performance improvement while also ensuring consistency and reliability.<|reference_end|>
arxiv
@article{gao2024swarmcvt:, title={SwarmCVT: Centroidal Voronoi Tessellation-Based Path Planning for Very-Large-Scale Robotics}, author={James Gao, Jacob Lee, Yuting Zhou, Yunze Hu, Chang Liu, Pingping Zhu}, journal={arXiv preprint arXiv:2410.02510}, year={2024}, archivePrefix={arXiv}, eprint={2410.02510}, primaryClass={cs.RO cs.MA cs.SY eess.SY} }
gao2024swarmcvt:
arxiv-665068
2410.02511
Choices are More Important than Efforts: LLM Enables Efficient Multi-Agent Exploration
<|reference_start|>Choices are More Important than Efforts: LLM Enables Efficient Multi-Agent Exploration: With expansive state-action spaces, efficient multi-agent exploration remains a longstanding challenge in reinforcement learning. Although pursuing novelty, diversity, or uncertainty attracts increasing attention, redundant efforts brought by exploration without proper guidance choices poses a practical issue for the community. This paper introduces a systematic approach, termed LEMAE, choosing to channel informative task-relevant guidance from a knowledgeable Large Language Model (LLM) for Efficient Multi-Agent Exploration. Specifically, we ground linguistic knowledge from LLM into symbolic key states, that are critical for task fulfillment, in a discriminative manner at low LLM inference costs. To unleash the power of key states, we design Subspace-based Hindsight Intrinsic Reward (SHIR) to guide agents toward key states by increasing reward density. Additionally, we build the Key State Memory Tree (KSMT) to track transitions between key states in a specific task for organized exploration. Benefiting from diminishing redundant explorations, LEMAE outperforms existing SOTA approaches on the challenging benchmarks (e.g., SMAC and MPE) by a large margin, achieving a 10x acceleration in certain scenarios.<|reference_end|>
arxiv
@article{qu2024choices, title={Choices are More Important than Efforts: LLM Enables Efficient Multi-Agent Exploration}, author={Yun Qu, Boyuan Wang, Yuhang Jiang, Jianzhun Shao, Yixiu Mao, Cheems Wang, Chang Liu, Xiangyang Ji}, journal={arXiv preprint arXiv:2410.02511}, year={2024}, archivePrefix={arXiv}, eprint={2410.02511}, primaryClass={cs.AI cs.MA} }
qu2024choices
arxiv-665069
2410.02512
SAFLEX: Self-Adaptive Augmentation via Feature Label Extrapolation
<|reference_start|>SAFLEX: Self-Adaptive Augmentation via Feature Label Extrapolation: Data augmentation, a cornerstone technique in deep learning, is crucial in enhancing model performance, especially with scarce labeled data. While traditional techniques are effective, their reliance on hand-crafted methods limits their applicability across diverse data types and tasks. Although modern learnable augmentation methods offer increased adaptability, they are computationally expensive and challenging to incorporate within prevalent augmentation workflows. In this work, we present a novel, efficient method for data augmentation, effectively bridging the gap between existing augmentation strategies and emerging datasets and learning tasks. We introduce SAFLEX (Self-Adaptive Augmentation via Feature Label EXtrapolation), which learns the sample weights and soft labels of augmented samples provided by any given upstream augmentation pipeline, using a specifically designed efficient bilevel optimization algorithm. Remarkably, SAFLEX effectively reduces the noise and label errors of the upstream augmentation pipeline with a marginal computational cost. As a versatile module, SAFLEX excels across diverse datasets, including natural and medical images and tabular data, showcasing its prowess in few-shot learning and out-of-distribution generalization. SAFLEX seamlessly integrates with common augmentation strategies like RandAug, CutMix, and those from large pre-trained generative models like stable diffusion and is also compatible with frameworks such as CLIP's fine-tuning. Our findings highlight the potential to adapt existing augmentation pipelines for new data types and tasks, signaling a move towards more adaptable and resilient training frameworks.<|reference_end|>
arxiv
@article{ding2024saflex:, title={SAFLEX: Self-Adaptive Augmentation via Feature Label Extrapolation}, author={Mucong Ding, Bang An, Yuancheng Xu, Anirudh Satheesh, Furong Huang}, journal={arXiv preprint arXiv:2410.02512}, year={2024}, archivePrefix={arXiv}, eprint={2410.02512}, primaryClass={cs.LG cs.AI} }
ding2024saflex:
arxiv-665070
2410.02513
Minimax Group Fairness in Strategic Classification
<|reference_start|>Minimax Group Fairness in Strategic Classification: In strategic classification, agents manipulate their features, at a cost, to receive a positive classification outcome from the learner's classifier. The goal of the learner in such settings is to learn a classifier that is robust to strategic manipulations. While the majority of works in this domain consider accuracy as the primary objective of the learner, in this work, we consider learning objectives that have group fairness guarantees in addition to accuracy guarantees. We work with the minimax group fairness notion that asks for minimizing the maximal group error rate across population groups. We formalize a fairness-aware Stackelberg game between a population of agents consisting of several groups, with each group having its own cost function, and a learner in the agnostic PAC setting in which the learner is working with a hypothesis class H. When the cost functions of the agents are separable, we show the existence of an efficient algorithm that finds an approximately optimal deterministic classifier for the learner when the number of groups is small. This algorithm remains efficient, both statistically and computationally, even when H is the set of all classifiers. We then consider cost functions that are not necessarily separable and show the existence of oracle-efficient algorithms that find approximately optimal randomized classifiers for the learner when H has finite strategic VC dimension. These algorithms work under the assumption that the learner is fully transparent: the learner draws a classifier from its distribution (randomized classifier) before the agents respond by manipulating their feature vectors. We highlight the effectiveness of such transparency in developing oracle-efficient algorithms. We conclude with verifying the efficacy of our algorithms on real data by conducting an experimental analysis.<|reference_end|>
arxiv
@article{diana2024minimax, title={Minimax Group Fairness in Strategic Classification}, author={Emily Diana, Saeed Sharifi-Malvajerdi, Ali Vakilian}, journal={arXiv preprint arXiv:2410.02513}, year={2024}, archivePrefix={arXiv}, eprint={2410.02513}, primaryClass={cs.LG} }
diana2024minimax
arxiv-665071
2410.02516
Learning Emergence of Interaction Patterns across Independent RL Agents in Multi-Agent Environments
<|reference_start|>Learning Emergence of Interaction Patterns across Independent RL Agents in Multi-Agent Environments: Many real-world problems, such as controlling swarms of drones and urban traffic, naturally lend themselves to modeling as multi-agent reinforcement learning (RL) problems. However, existing multi-agent RL methods often suffer from scalability challenges, primarily due to the introduction of communication among agents. Consequently, a key challenge lies in adapting the success of deep learning in single-agent RL to the multi-agent setting. In response to this challenge, we propose an approach that fundamentally reimagines multi-agent environments. Unlike conventional methods that model each agent individually with separate networks, our approach, the Bottom Up Network (BUN), adopts a unique perspective. BUN treats the collective of multi-agents as a unified entity while employing a specialized weight initialization strategy that promotes independent learning. Furthermore, we dynamically establish connections among agents using gradient information, enabling coordination when necessary while maintaining these connections as limited and sparse to effectively manage the computational budget. Our extensive empirical evaluations across a variety of cooperative multi-agent scenarios, including tasks such as cooperative navigation and traffic control, consistently demonstrate BUN's superiority over baseline methods with substantially reduced computational costs.<|reference_end|>
arxiv
@article{baddam2024learning, title={Learning Emergence of Interaction Patterns across Independent RL Agents in Multi-Agent Environments}, author={Vasanth Reddy Baddam, Suat Gumussoy, Almuatazbellah Boker and Hoda Eldardiry}, journal={arXiv preprint arXiv:2410.02516}, year={2024}, archivePrefix={arXiv}, eprint={2410.02516}, primaryClass={cs.MA cs.LG} }
baddam2024learning
arxiv-665072
2410.02519
Semantic-Guided RL for Interpretable Feature Engineering
<|reference_start|>Semantic-Guided RL for Interpretable Feature Engineering: The quality of Machine Learning (ML) models strongly depends on the input data, as such generating high-quality features is often required to improve the predictive accuracy. This process is referred to as Feature Engineering (FE). However, since manual feature engineering is time-consuming and requires case-by-case domain knowledge, Automated Feature Engineering (AutoFE) is crucial. A major challenge that remains is to generate interpretable features. To tackle this problem, we introduce SMART, a hybrid approach that uses semantic technologies to guide the generation of interpretable features through a two-step process: Exploitation and Exploration. The former uses Description Logics (DL) to reason on the semantics embedded in Knowledge Graphs (KG) to infer domain-specific features, while the latter exploits the knowledge graph to conduct a guided exploration of the search space through Deep Reinforcement Learning (DRL). Our experiments on public datasets demonstrate that SMART significantly improves prediction accuracy while ensuring a high level of interpretability.<|reference_end|>
arxiv
@article{bouadi2024semantic-guided, title={Semantic-Guided RL for Interpretable Feature Engineering}, author={Mohamed Bouadi, Arta Alavi, Salima Benbernou, Mourad Ouziri}, journal={arXiv preprint arXiv:2410.02519}, year={2024}, archivePrefix={arXiv}, eprint={2410.02519}, primaryClass={cs.LG} }
bouadi2024semantic-guided
arxiv-665073
2410.02521
Methods for Automatic Matrix Language Determination of Code-Switched Speech
<|reference_start|>Methods for Automatic Matrix Language Determination of Code-Switched Speech: Code-switching (CS) is the process of speakers interchanging between two or more languages which in the modern world becomes increasingly common. In order to better describe CS speech the Matrix Language Frame (MLF) theory introduces the concept of a Matrix Language, which is the language that provides the grammatical structure for a CS utterance. In this work the MLF theory was used to develop systems for Matrix Language Identity (MLID) determination. The MLID of English/Mandarin and English/Spanish CS text and speech was compared to acoustic language identity (LID), which is a typical way to identify a language in monolingual utterances. MLID predictors from audio show higher correlation with the textual principles than LID in all cases while also outperforming LID in an MLID recognition task based on F1 macro (60\%) and correlation score (0.38). This novel approach has identified that non-English languages (Mandarin and Spanish) are preferred over the English language as the ML contrary to the monolingual choice of LID.<|reference_end|>
arxiv
@article{iakovenko2024methods, title={Methods for Automatic Matrix Language Determination of Code-Switched Speech}, author={Olga Iakovenko and Thomas Hain}, journal={arXiv preprint arXiv:2410.02521}, year={2024}, archivePrefix={arXiv}, eprint={2410.02521}, primaryClass={cs.CL} }
iakovenko2024methods
arxiv-665074
2410.02523
Med-TTT: Vision Test-Time Training model for Medical Image Segmentation
<|reference_start|>Med-TTT: Vision Test-Time Training model for Medical Image Segmentation: Medical image segmentation plays a crucial role in clinical diagnosis and treatment planning. Although models based on convolutional neural networks (CNNs) and Transformers have achieved remarkable success in medical image segmentation tasks, they still face challenges such as high computational complexity and the loss of local features when capturing long-range dependencies. To address these limitations, we propose Med-TTT, a visual backbone network integrated with Test-Time Training (TTT) layers, which incorporates dynamic adjustment capabilities. Med-TTT introduces the Vision-TTT layer, which enables effective modeling of long-range dependencies with linear computational complexity and adaptive parameter adjustment during inference. Furthermore, we designed a multi-resolution fusion mechanism to combine image features at different scales, facilitating the identification of subtle lesion characteristics in complex backgrounds. At the same time, we adopt a frequency domain feature enhancement strategy based on high pass filtering, which can better capture texture and fine-grained details in images. Experimental results demonstrate that Med-TTT significantly outperforms existing methods on multiple medical image datasets, exhibiting strong segmentation capabilities, particularly in complex image backgrounds. The model achieves leading performance in terms of accuracy, sensitivity, and Dice coefficient, providing an efficient and robust solution for the field of medical image segmentation.The code is available at https://github.com/Jiashu-Xu/Med-TTT .<|reference_end|>
arxiv
@article{xu2024med-ttt:, title={Med-TTT: Vision Test-Time Training model for Medical Image Segmentation}, author={Jiashu Xu}, journal={arXiv preprint arXiv:2410.02523}, year={2024}, archivePrefix={arXiv}, eprint={2410.02523}, primaryClass={eess.IV cs.CV} }
xu2024med-ttt:
arxiv-665075
2410.02525
Contextual Document Embeddings
<|reference_start|>Contextual Document Embeddings: Dense document embeddings are central to neural retrieval. The dominant paradigm is to train and construct embeddings by running encoders directly on individual documents. In this work, we argue that these embeddings, while effective, are implicitly out-of-context for targeted use cases of retrieval, and that a contextualized document embedding should take into account both the document and neighboring documents in context - analogous to contextualized word embeddings. We propose two complementary methods for contextualized document embeddings: first, an alternative contrastive learning objective that explicitly incorporates the document neighbors into the intra-batch contextual loss; second, a new contextual architecture that explicitly encodes neighbor document information into the encoded representation. Results show that both methods achieve better performance than biencoders in several settings, with differences especially pronounced out-of-domain. We achieve state-of-the-art results on the MTEB benchmark with no hard negative mining, score distillation, dataset-specific instructions, intra-GPU example-sharing, or extremely large batch sizes. Our method can be applied to improve performance on any contrastive learning dataset and any biencoder.<|reference_end|>
arxiv
@article{morris2024contextual, title={Contextual Document Embeddings}, author={John X. Morris, Alexander M. Rush}, journal={arXiv preprint arXiv:2410.02525}, year={2024}, archivePrefix={arXiv}, eprint={2410.02525}, primaryClass={cs.CL cs.AI} }
morris2024contextual
arxiv-665076
2410.02527
Learning from Offline Foundation Features with Tensor Augmentations
<|reference_start|>Learning from Offline Foundation Features with Tensor Augmentations: We introduce Learning from Offline Foundation Features with Tensor Augmentations (LOFF-TA), an efficient training scheme designed to harness the capabilities of foundation models in limited resource settings where their direct development is not feasible. LOFF-TA involves training a compact classifier on cached feature embeddings from a frozen foundation model, resulting in up to $37\times$ faster training and up to $26\times$ reduced GPU memory usage. Because the embeddings of augmented images would be too numerous to store, yet the augmentation process is essential for training, we propose to apply tensor augmentations to the cached embeddings of the original non-augmented images. LOFF-TA makes it possible to leverage the power of foundation models, regardless of their size, in settings with limited computational capacity. Moreover, LOFF-TA can be used to apply foundation models to high-resolution images without increasing compute. In certain scenarios, we find that training with LOFF-TA yields better results than directly fine-tuning the foundation model.<|reference_end|>
arxiv
@article{konuk2024learning, title={Learning from Offline Foundation Features with Tensor Augmentations}, author={Emir Konuk and Christos Matsoukas and Moein Sorkhei and Phitchapha Lertsiravaramet and Kevin Smith}, journal={arXiv preprint arXiv:2410.02527}, year={2024}, archivePrefix={arXiv}, eprint={2410.02527}, primaryClass={cs.CV} }
konuk2024learning
arxiv-665077
2410.02528
HiFiSeg: High-Frequency Information Enhanced Polyp Segmentation with Global-Local Vision Transformer
<|reference_start|>HiFiSeg: High-Frequency Information Enhanced Polyp Segmentation with Global-Local Vision Transformer: Numerous studies have demonstrated the strong performance of Vision Transformer (ViT)-based methods across various computer vision tasks. However, ViT models often struggle to effectively capture high-frequency components in images, which are crucial for detecting small targets and preserving edge details, especially in complex scenarios. This limitation is particularly challenging in colon polyp segmentation, where polyps exhibit significant variability in structure, texture, and shape. High-frequency information, such as boundary details, is essential for achieving precise semantic segmentation in this context. To address these challenges, we propose HiFiSeg, a novel network for colon polyp segmentation that enhances high-frequency information processing through a global-local vision transformer framework. HiFiSeg leverages the pyramid vision transformer (PVT) as its encoder and introduces two key modules: the global-local interaction module (GLIM) and the selective aggregation module (SAM). GLIM employs a parallel structure to fuse global and local information at multiple scales, effectively capturing fine-grained features. SAM selectively integrates boundary details from low-level features with semantic information from high-level features, significantly improving the model's ability to accurately detect and segment polyps. Extensive experiments on five widely recognized benchmark datasets demonstrate the effectiveness of HiFiSeg for polyp segmentation. Notably, the mDice scores on the challenging CVC-ColonDB and ETIS datasets reached 0.826 and 0.822, respectively, underscoring the superior performance of HiFiSeg in handling the specific complexities of this task.<|reference_end|>
arxiv
@article{ren2024hifiseg:, title={HiFiSeg: High-Frequency Information Enhanced Polyp Segmentation with Global-Local Vision Transformer}, author={Jingjing Ren, Xiaoyong Zhang, Lina Zhang}, journal={arXiv preprint arXiv:2410.02528}, year={2024}, archivePrefix={arXiv}, eprint={2410.02528}, primaryClass={cs.CV} }
ren2024hifiseg:
arxiv-665078
2410.02529
An Edge-Computing based Industrial Gateway for Industry 40 using ARM TrustZone Technology
<|reference_start|>An Edge-Computing based Industrial Gateway for Industry 40 using ARM TrustZone Technology: Secure and efficient communication to establish a seamless nexus between the five levels of a typical automation pyramid is paramount to Industry 4.0. Specifically, vertical and horizontal integration of these levels is an overarching requirement to accelerate productivity and improve operational activities. Vertical integration can improve visibility, flexibility, and productivity by connecting systems and applications. Horizontal integration can provide better collaboration and adaptability by connecting internal production facilities, multi-site operations, and third-party partners in a supply chain. In this paper, we propose an Edge-computing-based Industrial Gateway for interfacing information technology and operational technology that can enable Industry 4.0 vertical and horizontal integration. Subsequently, we design and develop a working prototype to demonstrate a remote production-line maintenance use case with a strong focus on security aspects and the edge paradigm to bring computational resources and data storage closer to data sources.<|reference_end|>
arxiv
@article{gupta2024an, title={An Edge-Computing based Industrial Gateway for Industry 4.0 using ARM TrustZone Technology}, author={Sandeep Gupta}, journal={arXiv preprint arXiv:2410.02529}, year={2024}, archivePrefix={arXiv}, eprint={2410.02529}, primaryClass={cs.CR cs.DC cs.ET} }
gupta2024an
arxiv-665079
2410.02530
A Foundation Model for the Solar Dynamics Observatory
<|reference_start|>A Foundation Model for the Solar Dynamics Observatory: SDO-FM is a foundation model using data from NASA's Solar Dynamics Observatory (SDO) spacecraft; integrating three separate instruments to encapsulate the Sun's complex physical interactions into a multi-modal embedding space. This model can be used to streamline scientific investigations involving SDO by making the enormous datasets more computationally accessible for heliophysics research and enable investigations that require instrument fusion. We discuss four key components: an ingestion pipeline to create machine learning ready datasets, the model architecture and training approach, resultant embeddings and fine-tunable models, and finally downstream fine-tuned applications. A key component of this effort has been to include subject matter specialists at each stage of development; reviewing the scientific value and providing guidance for model architecture, dataset, and training paradigm decisions. This paper marks release of our pretrained models and embedding datasets, available to the community on Hugging Face and sdofm.org.<|reference_end|>
arxiv
@article{walsh2024a, title={A Foundation Model for the Solar Dynamics Observatory}, author={James Walsh, Daniel G. Gass, Raul Ramos Pollan, Paul J. Wright, Richard Galvez, Noah Kasmanoff, Jason Naradowsky, Anne Spalding, James Parr, At{i}l{i}m G"unec{s} Baydin}, journal={arXiv preprint arXiv:2410.02530}, year={2024}, archivePrefix={arXiv}, eprint={2410.02530}, primaryClass={astro-ph.SR cs.CV} }
walsh2024a
arxiv-665080
2410.02533
A Schema-aware Logic Reformulation for Graph Reachability
<|reference_start|>A Schema-aware Logic Reformulation for Graph Reachability: Graph reachability is the task of understanding whether two distinct points in a graph are interconnected by arcs to which in general a semantic is attached. Reachability has plenty of applications, ranging from motion planning to routing. Improving reachability requires structural knowledge of relations so as to avoid the complexity of traditional depth-first and breadth-first strategies, implemented in logic languages. In some contexts, graphs are enriched with their schema definitions establishing domain and range for every arc. The introduction of a schema-aware formalization for guiding the search may result in a sensitive improvement by cutting out unuseful paths and prioritising those that, in principle, reach the target earlier. In this work, we propose a strategy to automatically exclude and sort certain graph paths by exploiting the higher-level conceptualization of instances. The aim is to obtain a new first-order logic reformulation of the graph reachability scenario, capable of improving the traditional algorithms in terms of time, space requirements, and number of backtracks. The experiments exhibit the expected advantages of the approach in reducing the number of backtracks during the search strategy, resulting in saving time and space as well.<|reference_end|>
arxiv
@article{di pierro2024a, title={A Schema-aware Logic Reformulation for Graph Reachability}, author={Davide Di Pierro and Stefano Ferilli}, journal={arXiv preprint arXiv:2410.02533}, year={2024}, archivePrefix={arXiv}, eprint={2410.02533}, primaryClass={cs.AI} }
di pierro2024a
arxiv-665081
2410.02534
Pseudo-Stereo Inputs: A Solution to the Occlusion Challenge in Self-Supervised Stereo Matching
<|reference_start|>Pseudo-Stereo Inputs: A Solution to the Occlusion Challenge in Self-Supervised Stereo Matching: Self-supervised stereo matching holds great promise for application and research due to its independence from expensive labeled data. However, direct self-supervised stereo matching paradigms based on photometric loss functions have consistently struggled with performance issues due to the occlusion challenge. The crux of the occlusion challenge lies in the fact that the positions of occluded pixels consistently align with the epipolar search direction defined by the input stereo images, leading to persistent information loss and erroneous feedback at fixed locations during self-supervised training. In this work, we propose a simple yet highly effective pseudo-stereo inputs strategy to address the core occlusion challenge. This strategy decouples the input and feedback images, compelling the network to probabilistically sample information from both sides of the occluding objects. As a result, the persistent lack of information in the aforementioned fixed occlusion areas is mitigated. Building upon this, we further address feedback conflicts and overfitting issues arising from the strategy. By integrating these components, our method achieves stable and significant performance improvements compared to existing methods. Quantitative experiments are conducted to evaluate the performance. Qualitative experiments further demonstrate accurate disparity inference even at occluded regions. These results demonstrate a significant advancement over previous methods in the field of direct self-supervised stereo matching based on photometric loss. The proposed pseudo-stereo inputs strategy, due to its simplicity and effectiveness, has the potential to serve as a new paradigm for direct self-supervised stereo matching. Code is available at https://github.com/qrzyang/Pseudo-Stereo.<|reference_end|>
arxiv
@article{yang2024pseudo-stereo, title={Pseudo-Stereo Inputs: A Solution to the Occlusion Challenge in Self-Supervised Stereo Matching}, author={Ruizhi Yang, Xingqiang Li, Jiajun Bai and Jinsong Du}, journal={arXiv preprint arXiv:2410.02534}, year={2024}, archivePrefix={arXiv}, eprint={2410.02534}, primaryClass={cs.CV} }
yang2024pseudo-stereo
arxiv-665082
2410.02536
Intelligence at the Edge of Chaos
<|reference_start|>Intelligence at the Edge of Chaos: We explore the emergence of intelligent behavior in artificial systems by investigating how the complexity of rule-based systems influences the capabilities of models trained to predict these rules. Our study focuses on elementary cellular automata (ECA), simple yet powerful one-dimensional systems that generate behaviors ranging from trivial to highly complex. By training distinct Large Language Models (LLMs) on different ECAs, we evaluated the relationship between the complexity of the rules' behavior and the intelligence exhibited by the LLMs, as reflected in their performance on downstream tasks. Our findings reveal that rules with higher complexity lead to models exhibiting greater intelligence, as demonstrated by their performance on reasoning and chess move prediction tasks. Both uniform and periodic systems, and often also highly chaotic systems, resulted in poorer downstream performance, highlighting a sweet spot of complexity conducive to intelligence. We conjecture that intelligence arises from the ability to predict complexity and that creating intelligence may require only exposure to complexity.<|reference_end|>
arxiv
@article{zhang2024intelligence, title={Intelligence at the Edge of Chaos}, author={Shiyang Zhang, Aakash Patel, Syed A Rizvi, Nianchen Liu, Sizhuang He, Amin Karbasi, Emanuele Zappala, David van Dijk}, journal={arXiv preprint arXiv:2410.02536}, year={2024}, archivePrefix={arXiv}, eprint={2410.02536}, primaryClass={cs.AI cs.NE} }
zhang2024intelligence
arxiv-665083
2410.02538
Algorithms For Automatic Accentuation And Transcription Of Russian Texts In Speech Recognition Systems
<|reference_start|>Algorithms For Automatic Accentuation And Transcription Of Russian Texts In Speech Recognition Systems: This paper presents an overview of rule-based system for automatic accentuation and phonemic transcription of Russian texts for speech connected tasks, such as Automatic Speech Recognition (ASR). Two parts of the developed system, accentuation and transcription, use different approaches to achieve correct phonemic representations of input phrases. Accentuation is based on "Grammatical dictionary of the Russian language" of A.A. Zaliznyak and wiktionary corpus. To distinguish homographs, the accentuation system also utilises morphological information of the sentences based on Recurrent Neural Networks (RNN). Transcription algorithms apply the rules presented in the monograph of B.M. Lobanov and L.I. Tsirulnik "Computer Synthesis and Voice Cloning". The rules described in the present paper are implemented in an open-source module, which can be of use to any scientific study connected to ASR or Speech To Text (STT) tasks. Automatically marked up text annotations of the Russian Voxforge database were used as training data for an acoustic model in CMU Sphinx. The resulting acoustic model was evaluated on cross-validation, mean Word Accuracy being 71.2%. The developed toolkit is written in the Python language and is accessible on GitHub for any researcher interested.<|reference_end|>
arxiv
@article{iakovenko2024algorithms, title={Algorithms For Automatic Accentuation And Transcription Of Russian Texts In Speech Recognition Systems}, author={Olga Iakovenko, Ivan Bondarenko, Mariya Borovikova and Daniil Vodolazsky}, journal={arXiv preprint arXiv:2410.02538}, year={2024}, doi={10.1007/978-3-319-99579-3_78}, archivePrefix={arXiv}, eprint={2410.02538}, primaryClass={cs.CL} }
iakovenko2024algorithms
arxiv-665084
2410.02539
Exploiting HDMI and USB Ports for GPU Side-Channel Insights
<|reference_start|>Exploiting HDMI and USB Ports for GPU Side-Channel Insights: Modern computers rely on USB and HDMI ports for connecting external peripherals and display devices. Despite their built-in security measures, these ports remain susceptible to passive power-based side-channel attacks. This paper presents a new class of attacks that exploit power consumption patterns at these ports to infer GPU activities. We develop a custom device that plugs into these ports and demonstrate that its high-resolution power measurements can drive successful inferences about GPU processes, such as neural network computations and video rendering. The ubiquitous presence of USB and HDMI ports allows for discreet placement of the device, and its non-interference with data channels ensures that no security alerts are triggered. Our findings underscore the need to reevaluate and strengthen the current generation of HDMI and USB port security defenses.<|reference_end|>
arxiv
@article{arefin2024exploiting, title={Exploiting HDMI and USB Ports for GPU Side-Channel Insights}, author={Sayed Erfan Arefin, Abdul Serwadda}, journal={arXiv preprint arXiv:2410.02539}, year={2024}, archivePrefix={arXiv}, eprint={2410.02539}, primaryClass={cs.CR} }
arefin2024exploiting
arxiv-665085
2410.02540
$hp$-error analysis of mixed-order hybrid high-order methods for elliptic problems on simplicial meshes
<|reference_start|>$hp$-error analysis of mixed-order hybrid high-order methods for elliptic problems on simplicial meshes: We present both $hp$-a priori and $hp$-a posteriori error analysis of a mixed-order hybrid high-order (HHO) method to approximate second-order elliptic problems on simplicial meshes. Our main result on the $hp$-a priori error analysis is a $\frac12$-order $p$-suboptimal error estimate. This result is, to our knowledge, the first of this kind for hybrid nonconforming methods and matches the state-of-the-art for other nonconforming methods as discontinuous Galerkin methods. Our second main result is a residual-based $hp$-a posteriori upper error bound, comprising residual, normal flux jump, tangential jump, and stabilization estimators (plus data oscillation terms). The first three terms are $p$-optimal and only the latter is $\frac12$-order $p$-suboptimal. This result is, to our knowledge, the first $hp$-a posteriori error estimate for HHO methods. A novel approach based on the partition-of-unity provided by hat basis functions and on local Helmholtz decompositions on vertex stars is devised to estimate the nonconformity error. Finally, we establish local lower error bounds. Remarkably, the normal flux jump estimator is only $\frac12$-order $p$-suboptimal, as it can be bounded by the stabilization owing to the local conservation property of HHO methods. Numerical examples illustrate the theory.<|reference_end|>
arxiv
@article{dong2024$hp$-error, title={$hp$-error analysis of mixed-order hybrid high-order methods for elliptic problems on simplicial meshes}, author={Zhaonan Dong, Alexandre Ern}, journal={arXiv preprint arXiv:2410.02540}, year={2024}, archivePrefix={arXiv}, eprint={2410.02540}, primaryClass={math.NA cs.NA} }
dong2024$hp$-error
arxiv-665086
2410.02541
Fair Decentralized Learning
<|reference_start|>Fair Decentralized Learning: Decentralized learning (DL) is an emerging approach that enables nodes to collaboratively train a machine learning model without sharing raw data. In many application domains, such as healthcare, this approach faces challenges due to the high level of heterogeneity in the training data's feature space. Such feature heterogeneity lowers model utility and negatively impacts fairness, particularly for nodes with under-represented training data. In this paper, we introduce \textsc{Facade}, a clustering-based DL algorithm specifically designed for fair model training when the training data exhibits several distinct features. The challenge of \textsc{Facade} is to assign nodes to clusters, one for each feature, based on the similarity in the features of their local data, without requiring individual nodes to know apriori which cluster they belong to. \textsc{Facade} (1) dynamically assigns nodes to their appropriate clusters over time, and (2) enables nodes to collaboratively train a specialized model for each cluster in a fully decentralized manner. We theoretically prove the convergence of \textsc{Facade}, implement our algorithm, and compare it against three state-of-the-art baselines. Our experimental results on three datasets demonstrate the superiority of our approach in terms of model accuracy and fairness compared to all three competitors. Compared to the best-performing baseline, \textsc{Facade} on the CIFAR-10 dataset also reduces communication costs by 32.3\% to reach a target accuracy when cluster sizes are imbalanced.<|reference_end|>
arxiv
@article{biswas2024fair, title={Fair Decentralized Learning}, author={Sayan Biswas, Anne-Marie Kermarrec, Rishi Sharma, Thibaud Trinca, Martijn de Vos}, journal={arXiv preprint arXiv:2410.02541}, year={2024}, archivePrefix={arXiv}, eprint={2410.02541}, primaryClass={cs.LG cs.DC} }
biswas2024fair
arxiv-665087
2410.02543
Diffusion Models are Evolutionary Algorithms
<|reference_start|>Diffusion Models are Evolutionary Algorithms: In a convergence of machine learning and biology, we reveal that diffusion models are evolutionary algorithms. By considering evolution as a denoising process and reversed evolution as diffusion, we mathematically demonstrate that diffusion models inherently perform evolutionary algorithms, naturally encompassing selection, mutation, and reproductive isolation. Building on this equivalence, we propose the Diffusion Evolution method: an evolutionary algorithm utilizing iterative denoising -- as originally introduced in the context of diffusion models -- to heuristically refine solutions in parameter spaces. Unlike traditional approaches, Diffusion Evolution efficiently identifies multiple optimal solutions and outperforms prominent mainstream evolutionary algorithms. Furthermore, leveraging advanced concepts from diffusion models, namely latent space diffusion and accelerated sampling, we introduce Latent Space Diffusion Evolution, which finds solutions for evolutionary tasks in high-dimensional complex parameter space while significantly reducing computational steps. This parallel between diffusion and evolution not only bridges two different fields but also opens new avenues for mutual enhancement, raising questions about open-ended evolution and potentially utilizing non-Gaussian or discrete diffusion models in the context of Diffusion Evolution.<|reference_end|>
arxiv
@article{zhang2024diffusion, title={Diffusion Models are Evolutionary Algorithms}, author={Yanbo Zhang, Benedikt Hartl, Hananel Hazan and Michael Levin}, journal={arXiv preprint arXiv:2410.02543}, year={2024}, archivePrefix={arXiv}, eprint={2410.02543}, primaryClass={cs.NE cs.LG} }
zhang2024diffusion
arxiv-665088
2410.02544
Federated k-Core Decomposition: A Secure Distributed Approach
<|reference_start|>Federated k-Core Decomposition: A Secure Distributed Approach: As one of the most well-studied cohesive subgraph models, the $k$-core is widely used to find graph nodes that are ``central'' or ``important'' in many applications, such as biological networks, social networks, ecological networks, and financial networks. For distributed networks, e.g., Decentralized Online Social Networks (DOSNs) such that each vertex is a client as a single computing unit, the distributed $k$-core decomposition algorithms are already proposed. However, current distributed approaches fail to adequately protect privacy and security. In today's data-driven world, data privacy and security have attracted more and more attention, e.g., DOSNs are proposed to protect privacy by storing user information locally without using a single centralized server. In this work, we are the first to propose the secure version of the distributed $k$-core decomposition.<|reference_end|>
arxiv
@article{guo2024federated, title={Federated k-Core Decomposition: A Secure Distributed Approach}, author={Bin Guo, Emil Sekerinski, Lingyang Chu}, journal={arXiv preprint arXiv:2410.02544}, year={2024}, archivePrefix={arXiv}, eprint={2410.02544}, primaryClass={cs.DC} }
guo2024federated
arxiv-665089
2410.02545
The bunkbed conjecture is false
<|reference_start|>The bunkbed conjecture is false: We give an explicit counterexample to the Bunkbed Conjecture introduced by Kasteleyn in 1985. The counterexample is given by a planar graph on $7222$ vertices, and is built on the recent work of Hollom (2024).<|reference_end|>
arxiv
@article{gladkov2024the, title={The bunkbed conjecture is false}, author={Nikita Gladkov, Igor Pak and Aleksandr Zimin}, journal={arXiv preprint arXiv:2410.02545}, year={2024}, archivePrefix={arXiv}, eprint={2410.02545}, primaryClass={math.CO cs.DM math.PR} }
gladkov2024the
arxiv-665090
2410.02547
Personalized Quantum Federated Learning for Privacy Image Classification
<|reference_start|>Personalized Quantum Federated Learning for Privacy Image Classification: Quantum federated learning has brought about the improvement of privacy image classification, while the lack of personality of the client model may contribute to the suboptimal of quantum federated learning. A personalized quantum federated learning algorithm for privacy image classification is proposed to enhance the personality of the client model in the case of an imbalanced distribution of images. First, a personalized quantum federated learning model is constructed, in which a personalized layer is set for the client model to maintain the personalized parameters. Second, a personalized quantum federated learning algorithm is introduced to secure the information exchanged between the client and server.Third, the personalized federated learning is applied to image classification on the FashionMNIST dataset, and the experimental results indicate that the personalized quantum federated learning algorithm can obtain global and local models with excellent performance, even in situations where local training samples are imbalanced. The server's accuracy is 100% with 8 clients and a distribution parameter of 100, outperforming the non-personalized model by 7%. The average client accuracy is 2.9% higher than that of the non-personalized model with 2 clients and a distribution parameter of 1. Compared to previous quantum federated learning algorithms, the proposed personalized quantum federated learning algorithm eliminates the need for additional local training while safeguarding both model and data privacy.It may facilitate broader adoption and application of quantum technologies, and pave the way for more secure, scalable, and efficient quantum distribute machine learning solutions.<|reference_end|>
arxiv
@article{shi2024personalized, title={Personalized Quantum Federated Learning for Privacy Image Classification}, author={Jinjing Shi, Tian Chen, Shichao Zhang, Xuelong Li}, journal={arXiv preprint arXiv:2410.02547}, year={2024}, archivePrefix={arXiv}, eprint={2410.02547}, primaryClass={quant-ph cs.AI} }
shi2024personalized
arxiv-665091
2410.02548
Local Flow Matching Generative Models
<|reference_start|>Local Flow Matching Generative Models: Flow Matching (FM) is a simulation-free method for learning a continuous and invertible flow to interpolate between two distributions, and in particular to generate data from noise in generative modeling. In this paper, we introduce Local Flow Matching (LFM), which learns a sequence of FM sub-models and each matches a diffusion process up to the time of the step size in the data-to-noise direction. In each step, the two distributions to be interpolated by the sub-model are closer to each other than data vs. noise, and this enables the use of smaller models with faster training. The stepwise structure of LFM is natural to be distilled and different distillation techniques can be adopted to speed up generation. Theoretically, we prove a generation guarantee of the proposed flow model in terms of the $\chi^2$-divergence between the generated and true data distributions. In experiments, we demonstrate the improved training efficiency and competitive generative performance of LFM compared to FM on the unconditional generation of tabular data and image datasets, and also on the conditional generation of robotic manipulation policies.<|reference_end|>
arxiv
@article{xu2024local, title={Local Flow Matching Generative Models}, author={Chen Xu, Xiuyuan Cheng, Yao Xie}, journal={arXiv preprint arXiv:2410.02548}, year={2024}, archivePrefix={arXiv}, eprint={2410.02548}, primaryClass={stat.ML cs.LG} }
xu2024local
arxiv-665092
2410.02550
NestedMorph: Enhancing Deformable Medical Image Registration with Nested Attention Mechanisms
<|reference_start|>NestedMorph: Enhancing Deformable Medical Image Registration with Nested Attention Mechanisms: Deformable image registration is crucial for aligning medical images in a non-linear fashion across different modalities, allowing for precise spatial correspondence between varying anatomical structures. This paper presents NestedMorph, a novel network utilizing a Nested Attention Fusion approach to improve intra-subject deformable registration between T1-weighted (T1w) MRI and diffusion MRI (dMRI) data. NestedMorph integrates high-resolution spatial details from an encoder with semantic information from a decoder using a multi-scale framework, enhancing both local and global feature extraction. Our model notably outperforms existing methods, including CNN-based approaches like VoxelMorph, MIDIR, and CycleMorph, as well as Transformer-based models such as TransMorph and ViT-V-Net, and traditional techniques like NiftyReg and SyN. Evaluations on the HCP dataset demonstrate that NestedMorph achieves superior performance across key metrics, including SSIM, HD95, and SDlogJ, with the highest SSIM of 0.89, and the lowest HD95 of 2.5 and SDlogJ of 0.22. These results highlight NestedMorph's ability to capture both local and global image features effectively, leading to superior registration performance. The promising outcomes of this study underscore NestedMorph's potential to significantly advance deformable medical image registration, providing a robust framework for future research and clinical applications. The source code and our implementation are available at: https://bit.ly/3zdVqcg<|reference_end|>
arxiv
@article{kumar2024nestedmorph:, title={NestedMorph: Enhancing Deformable Medical Image Registration with Nested Attention Mechanisms}, author={Gurucharan Marthi Krishna Kumar, Janine Mendola, Amir Shmuel}, journal={arXiv preprint arXiv:2410.02550}, year={2024}, archivePrefix={arXiv}, eprint={2410.02550}, primaryClass={eess.IV cs.CV} }
kumar2024nestedmorph:
arxiv-665093
2410.02551
ColaCare: Enhancing Electronic Health Record Modeling through Large Language Model-Driven Multi-Agent Collaboration
<|reference_start|>ColaCare: Enhancing Electronic Health Record Modeling through Large Language Model-Driven Multi-Agent Collaboration: We introduce ColaCare, a framework that enhances Electronic Health Record (EHR) modeling through multi-agent collaboration driven by Large Language Models (LLMs). Our approach seamlessly integrates domain-specific expert models with LLMs to bridge the gap between structured EHR data and text-based reasoning. Inspired by clinical consultations, ColaCare employs two types of agents: DoctorAgent and MetaAgent, which collaboratively analyze patient data. Expert models process and generate predictions from numerical EHR data, while LLM agents produce reasoning references and decision-making reports within the collaborative consultation framework. We additionally incorporate the Merck Manual of Diagnosis and Therapy (MSD) medical guideline within a retrieval-augmented generation (RAG) module for authoritative evidence support. Extensive experiments conducted on four distinct EHR datasets demonstrate ColaCare's superior performance in mortality prediction tasks, underscoring its potential to revolutionize clinical decision support systems and advance personalized precision medicine. The code, complete prompt templates, more case studies, etc. are publicly available at the anonymous link: https://colacare.netlify.app.<|reference_end|>
arxiv
@article{wang2024colacare:, title={ColaCare: Enhancing Electronic Health Record Modeling through Large Language Model-Driven Multi-Agent Collaboration}, author={Zixiang Wang, Yinghao Zhu, Huiya Zhao, Xiaochen Zheng, Tianlong Wang, Wen Tang, Yasha Wang, Chengwei Pan, Ewen M. Harrison, Junyi Gao, Liantao Ma}, journal={arXiv preprint arXiv:2410.02551}, year={2024}, archivePrefix={arXiv}, eprint={2410.02551}, primaryClass={cs.LG cs.AI cs.CL} }
wang2024colacare:
arxiv-665094
2410.02552
Automated Music Therapy for Anxiety and Depression Management in Older People (AMITY)
<|reference_start|>Automated Music Therapy for Anxiety and Depression Management in Older People (AMITY): The onset of old age brings physiological and mental changes, with anxiety and depression being common mental disorders that can trigger other health issues and reduce lifespan. However, due to a global shortage of mental health professionals, combined with a growing population and limited awareness, these disorders often go undiagnosed. Music therapy offers a reliable method to address psychological, emotional, and cognitive needs. This paper presents an approach that monitors anxiety and depression symptoms in real time using low-complexity body sensors, followed by automated personalised music therapy, reducing the dependence on therapists and improving mental health care accessibility.<|reference_end|>
arxiv
@article{faizan2024automated, title={Automated Music Therapy for Anxiety and Depression Management in Older People (AMITY)}, author={Malik Faizan, P.J. White, Indrakshi Dey}, journal={arXiv preprint arXiv:2410.02552}, year={2024}, archivePrefix={arXiv}, eprint={2410.02552}, primaryClass={eess.SY cs.SY} }
faizan2024automated
arxiv-665095
2410.02555
Toward Neuronal Implementations of Delayed Optimal Control
<|reference_start|>Toward Neuronal Implementations of Delayed Optimal Control: Animal sensorimotor behavior is frequently modeled using optimal controllers. However, it is unclear how the neuronal circuits within the animal's nervous system implement optimal controller-like behavior. In this work, we study the question of implementing a delayed linear quadratic regulator with linear dynamical "neurons" on a muscle model. We show that for any second-order controller, there are three minimal neural circuit configurations that implement the same controller. Furthermore, the firing rate characteristics of each circuit can vary drastically, even as the overall controller behavior is preserved. Along the way, we introduce concepts that bridge controller realizations to neural implementations that are compatible with known neuronal delay structures.<|reference_end|>
arxiv
@article{li2024toward, title={Toward Neuronal Implementations of Delayed Optimal Control}, author={Jing Shuang Li}, journal={arXiv preprint arXiv:2410.02555}, year={2024}, archivePrefix={arXiv}, eprint={2410.02555}, primaryClass={eess.SY cs.SY q-bio.NC} }
li2024toward
arxiv-665096
2410.02558
Improving Unsupervised Constituency Parsing via Maximizing Semantic Information
<|reference_start|>Improving Unsupervised Constituency Parsing via Maximizing Semantic Information: Unsupervised constituency parsers organize phrases within a sentence into a tree-shaped syntactic constituent structure that reflects the organization of sentence semantics. However, the traditional objective of maximizing sentence log-likelihood (LL) does not explicitly account for the close relationship between the constituent structure and the semantics, resulting in a weak correlation between LL values and parsing accuracy. In this paper, we introduce a novel objective for training unsupervised parsers: maximizing the information between constituent structures and sentence semantics (SemInfo). We introduce a bag-of-substrings model to represent the semantics and apply the probability-weighted information metric to estimate the SemInfo. Additionally, we develop a Tree Conditional Random Field (TreeCRF)-based model to apply the SemInfo maximization objective to Probabilistic Context-Free Grammar (PCFG) induction, the state-of-the-art method for unsupervised constituency parsing. Experiments demonstrate that SemInfo correlates more strongly with parsing accuracy than LL. Our algorithm significantly enhances parsing accuracy by an average of 7.85 points across five PCFG variants and in four languages, achieving new state-of-the-art results in three of the four languages.<|reference_end|>
arxiv
@article{chen2024improving, title={Improving Unsupervised Constituency Parsing via Maximizing Semantic Information}, author={Junjie Chen, Xiangheng He, Yusuke Miyao, Danushka Bollegala}, journal={arXiv preprint arXiv:2410.02558}, year={2024}, archivePrefix={arXiv}, eprint={2410.02558}, primaryClass={cs.CL} }
chen2024improving
arxiv-665097
2410.02559
Obtaining Lower Query Complexities through Lightweight Zeroth-Order Proximal Gradient Algorithms
<|reference_start|>Obtaining Lower Query Complexities through Lightweight Zeroth-Order Proximal Gradient Algorithms: Zeroth-order (ZO) optimization is one key technique for machine learning problems where gradient calculation is expensive or impossible. Several variance reduced ZO proximal algorithms have been proposed to speed up ZO optimization for non-smooth problems, and all of them opted for the coordinated ZO estimator against the random ZO estimator when approximating the true gradient, since the former is more accurate. While the random ZO estimator introduces bigger error and makes convergence analysis more challenging compared to coordinated ZO estimator, it requires only $\mathcal{O}(1)$ computation, which is significantly less than $\mathcal{O}(d)$ computation of the coordinated ZO estimator, with $d$ being dimension of the problem space. To take advantage of the computationally efficient nature of the random ZO estimator, we first propose a ZO objective decrease (ZOOD) property which can incorporate two different types of errors in the upper bound of convergence rate. Next, we propose two generic reduction frameworks for ZO optimization which can automatically derive the convergence results for convex and non-convex problems respectively, as long as the convergence rate for the inner solver satisfies the ZOOD property. With the application of two reduction frameworks on our proposed ZOR-ProxSVRG and ZOR-ProxSAGA, two variance reduced ZO proximal algorithms with fully random ZO estimators, we improve the state-of-the-art function query complexities from $\mathcal{O}\left(\min\{\frac{dn^{1/2}}{\epsilon^2}, \frac{d}{\epsilon^3}\}\right)$ to $\tilde{\mathcal{O}}\left(\frac{n+d}{\epsilon^2}\right)$ under $d > n^{\frac{1}{2}}$ for non-convex problems, and from $\mathcal{O}\left(\frac{d}{\epsilon^2}\right)$ to $\tilde{\mathcal{O}}\left(n\log\frac{1}{\epsilon}+\frac{d}{\epsilon}\right)$ for convex problems.<|reference_end|>
arxiv
@article{gu2024obtaining, title={Obtaining Lower Query Complexities through Lightweight Zeroth-Order Proximal Gradient Algorithms}, author={Bin Gu, Xiyuan Wei, Hualin Zhang, Yi Chang, Heng Huang}, journal={Neural Computation, 2024, 36(5): 897-935}, year={2024}, doi={10.1162/neco_a_01636}, archivePrefix={arXiv}, eprint={2410.02559}, primaryClass={math.OC cs.LG} }
gu2024obtaining
arxiv-665098
2410.02560
Convolutional Variational Autoencoders for Spectrogram Compression in Automatic Speech Recognition
<|reference_start|>Convolutional Variational Autoencoders for Spectrogram Compression in Automatic Speech Recognition: For many Automatic Speech Recognition (ASR) tasks audio features as spectrograms show better results than Mel-frequency Cepstral Coefficients (MFCC), but in practice they are hard to use due to a complex dimensionality of a feature space. The following paper presents an alternative approach towards generating compressed spectrogram representation, based on Convolutional Variational Autoencoders (VAE). A Convolutional VAE model was trained on a subsample of the LibriSpeech dataset to reconstruct short fragments of audio spectrograms (25 ms) from a 13-dimensional embedding. The trained model for a 40-dimensional (300 ms) embedding was used to generate features for corpus of spoken commands on the GoogleSpeechCommands dataset. Using the generated features an ASR system was built and compared to the model with MFCC features.<|reference_end|>
arxiv
@article{iakovenko2024convolutional, title={Convolutional Variational Autoencoders for Spectrogram Compression in Automatic Speech Recognition}, author={Olga Iakovenko and Ivan Bondarenko}, journal={arXiv preprint arXiv:2410.02560}, year={2024}, doi={10.1007/978-3-030-63000-3_5}, archivePrefix={arXiv}, eprint={2410.02560}, primaryClass={cs.SD cs.CL eess.AS} }
iakovenko2024convolutional
arxiv-665099
2410.02561
The Benefit of Being Bayesian in Online Conformal Prediction
<|reference_start|>The Benefit of Being Bayesian in Online Conformal Prediction: Based on the framework of Conformal Prediction (CP), we study the online construction of valid confidence sets given a black-box machine learning model. By converting the target confidence levels into quantile levels, the problem can be reduced to predicting the quantiles (in hindsight) of a sequentially revealed data sequence. Two very different approaches have been studied previously. (i) Direct approach: Assuming the data sequence is iid or exchangeable, one could maintain the empirical distribution of the observed data as an algorithmic belief, and directly predict its quantiles. (ii) Indirect approach: As statistical assumptions often do not hold in practice, a recent trend is to consider the adversarial setting and apply first-order online optimization to moving quantile losses (Gibbs & Cand\`es, 2021). It requires knowing the target quantile level beforehand, and suffers from certain validity issues on the obtained confidence sets, due to the associated loss linearization. This paper presents a novel Bayesian CP framework that combines their strengths. Without any statistical assumption, it is able to both: (i) answer multiple arbitrary confidence level queries online, with provably low regret; and (ii) overcome the validity issues suffered by first-order optimization baselines, due to being "data-centric" rather than "iterate-centric". From a technical perspective, our key idea is to regularize the algorithmic belief of the above direct approach by a Bayesian prior, which "robustifies" it by simulating a non-linearized Follow the Regularized Leader (FTRL) algorithm on the output. For statisticians, this can be regarded as an online adversarial view of Bayesian inference. Importantly, the proposed belief update backbone is shared by prediction heads targeting different confidence levels, bringing practical benefits analogous to U-calibration (Kleinberg et al., 2023).<|reference_end|>
arxiv
@article{zhang2024the, title={The Benefit of Being Bayesian in Online Conformal Prediction}, author={Zhiyu Zhang, Zhou Lu, Heng Yang}, journal={arXiv preprint arXiv:2410.02561}, year={2024}, archivePrefix={arXiv}, eprint={2410.02561}, primaryClass={stat.ML cs.LG} }
zhang2024the
arxiv-665100
2410.02563
Machine Learning Approaches for Active Queue Management: A Survey, Taxonomy, and Future Directions
<|reference_start|>Machine Learning Approaches for Active Queue Management: A Survey, Taxonomy, and Future Directions: Active Queue Management (AQM), a network-layer congestion control technique endorsed by the Internet Engineering Task Force (IETF), encourages routers to discard packets before the occurrence of buffer overflow. Traditional AQM techniques often employ heuristic approaches that require meticulous parameter adjustments, limiting their real-world applicability. In contrast, Machine Learning (ML) approaches offer highly adaptive, data-driven solutions custom to dynamic network conditions. Consequently, many researchers have adapted ML for AQM throughout the years, resulting in a wide variety of algorithms ranging from predicting congestion via supervised learning to discovering optimal packet-dropping policies with reinforcement learning. Despite these remarkable advancements, no previous work has compiled these methods in the form of a survey article. This paper presents the first thorough documentation and analysis of ML-based algorithms for AQM, in which the strengths and limitations of each proposed method are evaluated and compared. In addition, a novel taxonomy of ML approaches based on methodology is also established. The review is concluded by discussing unexplored research gaps and potential new directions for more robust ML-AQM methods.<|reference_end|>
arxiv
@article{toopchinezhad2024machine, title={Machine Learning Approaches for Active Queue Management: A Survey, Taxonomy, and Future Directions}, author={Mohammad Parsa Toopchinezhad and Mahmood Ahmadi}, journal={arXiv preprint arXiv:2410.02563}, year={2024}, archivePrefix={arXiv}, eprint={2410.02563}, primaryClass={cs.NI} }
toopchinezhad2024machine