corpus_id
stringlengths
7
12
paper_id
stringlengths
9
16
title
stringlengths
1
261
abstract
stringlengths
70
4.02k
source
stringclasses
1 value
bibtex
stringlengths
208
20.9k
citation_key
stringlengths
6
100
arxiv-665701
2410.03569
Teaching Transformers Modular Arithmetic at Scale
<|reference_start|>Teaching Transformers Modular Arithmetic at Scale: Modular addition is, on its face, a simple operation: given $N$ elements in $\mathbb{Z}_q$, compute their sum modulo $q$. Yet, scalable machine learning solutions to this problem remain elusive: prior work trains ML models that sum $N \le 6$ elements mod $q \le 1000$. Promising applications of ML models for cryptanalysis-which often involve modular arithmetic with large $N$ and $q$-motivate reconsideration of this problem. This work proposes three changes to the modular addition model training pipeline: more diverse training data, an angular embedding, and a custom loss function. With these changes, we demonstrate success with our approach for $N = 256, q = 3329$, a case which is interesting for cryptographic applications, and a significant increase in $N$ and $q$ over prior work. These techniques also generalize to other modular arithmetic problems, motivating future work.<|reference_end|>
arxiv
@article{saxena2024teaching, title={Teaching Transformers Modular Arithmetic at Scale}, author={Eshika Saxena, Alberto Alfarano, Emily Wenger, Kristin Lauter}, journal={arXiv preprint arXiv:2410.03569}, year={2024}, archivePrefix={arXiv}, eprint={2410.03569}, primaryClass={cs.LG} }
saxena2024teaching
arxiv-665702
2410.03571
Generative AI in the Software Engineering Domain: Tensions of Occupational Identity and Patterns of Identity Protection
<|reference_start|>Generative AI in the Software Engineering Domain: Tensions of Occupational Identity and Patterns of Identity Protection: The adoption of generative Artificial Intelligence (GAI) in organizational settings calls into question workers' roles, and relatedly, the implications for their long-term skill development and domain expertise. In our qualitative study in the software engineering domain, we build on the theoretical lenses of occupational identity and self-determination theory to understand how and why software engineers make sense of GAI for their work. We find that engineers' sense-making is contingent on domain expertise, as juniors and seniors felt their needs for competence, autonomy, and relatedness to be differently impacted by GAI. We shed light on the importance of the individual's role in preserving tacit domain knowledge as engineers engaged in sense-making that protected their occupational identity. We illustrate how organizations play an active role in shaping workers' sense-making process and propose design guidelines on how organizations and system designers can facilitate the impact of technological change on workers' occupational identity.<|reference_end|>
arxiv
@article{schmitt2024generative, title={Generative AI in the Software Engineering Domain: Tensions of Occupational Identity and Patterns of Identity Protection}, author={Anuschka Schmitt, Krzysztof Z. Gajos, Osnat Mokryn}, journal={arXiv preprint arXiv:2410.03571}, year={2024}, archivePrefix={arXiv}, eprint={2410.03571}, primaryClass={cs.HC cs.SE} }
schmitt2024generative
arxiv-665703
2410.03572
Compressing multivariate functions with tree tensor networks
<|reference_start|>Compressing multivariate functions with tree tensor networks: Tensor networks are a compressed format for multi-dimensional data. One-dimensional tensor networks -- often referred to as tensor trains (TT) or matrix product states (MPS) -- are increasingly being used as a numerical ansatz for continuum functions by "quantizing" the inputs into discrete binary digits. Here we demonstrate the power of more general tree tensor networks for this purpose. We provide direct constructions of a number of elementary functions as generic tree tensor networks and interpolative constructions for more complicated functions via a generalization of the tensor cross interpolation algorithm. For a range of multi-dimensional functions we show how more structured tree tensor networks offer a significantly more efficient ansatz than the commonly used tensor train. We demonstrate an application of our methods to solving multi-dimensional, non-linear Fredholm equations, providing a rigorous bound on the rank of the solution which, in turn, guarantees exponentially scaling accuracy with the size of the tree tensor network for certain problems.<|reference_end|>
arxiv
@article{tindall2024compressing, title={Compressing multivariate functions with tree tensor networks}, author={Joseph Tindall, Miles Stoudenmire and Ryan Levy}, journal={arXiv preprint arXiv:2410.03572}, year={2024}, archivePrefix={arXiv}, eprint={2410.03572}, primaryClass={quant-ph cs.NA math.NA physics.comp-ph} }
tindall2024compressing
arxiv-665704
2410.03573
HyResPINNs: Adaptive Hybrid Residual Networks for Learning Optimal Combinations of Neural and RBF Components for Physics-Informed Modeling
<|reference_start|>HyResPINNs: Adaptive Hybrid Residual Networks for Learning Optimal Combinations of Neural and RBF Components for Physics-Informed Modeling: Physics-informed neural networks (PINNs) are an increasingly popular class of techniques for the numerical solution of partial differential equations (PDEs), where neural networks are trained using loss functions regularized by relevant PDE terms to enforce physical constraints. We present a new class of PINNs called HyResPINNs, which augment traditional PINNs with adaptive hybrid residual blocks that combine the outputs of a standard neural network and a radial basis function (RBF) network. A key feature of our method is the inclusion of adaptive combination parameters within each residual block, which dynamically learn to weigh the contributions of the neural network and RBF network outputs. Additionally, adaptive connections between residual blocks allow for flexible information flow throughout the network. We show that HyResPINNs are more robust to training point locations and neural network architectures than traditional PINNs. Moreover, HyResPINNs offer orders of magnitude greater accuracy than competing methods on certain problems, with only modest increases in training costs. We demonstrate the strengths of our approach on challenging PDEs, including the Allen-Cahn equation and the Darcy-Flow equation. Our results suggest that HyResPINNs effectively bridge the gap between traditional numerical methods and modern machine learning-based solvers.<|reference_end|>
arxiv
@article{cooley2024hyrespinns:, title={HyResPINNs: Adaptive Hybrid Residual Networks for Learning Optimal Combinations of Neural and RBF Components for Physics-Informed Modeling}, author={Madison Cooley, Robert M. Kirby, Shandian Zhe, Varun Shankar}, journal={arXiv preprint arXiv:2410.03573}, year={2024}, archivePrefix={arXiv}, eprint={2410.03573}, primaryClass={cs.LG} }
cooley2024hyrespinns:
arxiv-665705
2410.03575
Generalizing the Fr\'echet Derivative Algorithm for the Matrix Exponential
<|reference_start|>Generalizing the Fr\'echet Derivative Algorithm for the Matrix Exponential: The computation of off-diagonal blocks of matrix functions $f(T)$, where $T$ is block triangular, poses a challenging problem in scientific computing. We present a novel algorithm that exploits the structure of block triangular matrices, generalizing the algorithm of Al-Mohy and Higham for computing the Fr\'echet derivative of the matrix exponential. This work has significant applications in fields such as exponential integrators for solving systems of first-order differential equations, Hamiltonian linear systems in control theory, and option pricing in finance. Our approach introduces a linear operator that maps off-diagonal blocks of $T$ into their counterparts in $f(T)$. By studying the algebraic properties of the operator, we establish a comprehensive computational framework, paving the way to extend existing Fr\'echet derivative algorithms of matrix functions to more general settings. For the matrix exponential, in particular, the algorithm employs the scaling and squaring method with diagonal Pad\'e approximants to $\exp(x)$, with parameters chosen based on a rigorous backward error analysis, which notably does not depend on the norm of the off-diagonal blocks. The numerical experiment demonstrates that our algorithm surpasses existing algorithms in terms of accuracy and efficiency, making it highly valuable for a wide range of applications.<|reference_end|>
arxiv
@article{al-mohy2024generalizing, title={Generalizing the Fr\'echet Derivative Algorithm for the Matrix Exponential}, author={Awad H. Al-Mohy}, journal={arXiv preprint arXiv:2410.03575}, year={2024}, archivePrefix={arXiv}, eprint={2410.03575}, primaryClass={math.NA cs.NA} }
al-mohy2024generalizing
arxiv-665706
2410.03576
Table Question Answering for Low-resourced Indic Languages
<|reference_start|>Table Question Answering for Low-resourced Indic Languages: TableQA is the task of answering questions over tables of structured information, returning individual cells or tables as output. TableQA research has focused primarily on high-resource languages, leaving medium- and low-resource languages with little progress due to scarcity of annotated data and neural models. We address this gap by introducing a fully automatic large-scale tableQA data generation process for low-resource languages with limited budget. We incorporate our data generation method on two Indic languages, Bengali and Hindi, which have no tableQA datasets or models. TableQA models trained on our large-scale datasets outperform state-of-the-art LLMs. We further study the trained models on different aspects, including mathematical reasoning capabilities and zero-shot cross-lingual transfer. Our work is the first on low-resource tableQA focusing on scalable data generation and evaluation procedures. Our proposed data generation method can be applied to any low-resource language with a web presence. We release datasets, models, and code (https://github.com/kolk/Low-Resource-TableQA-Indic-languages).<|reference_end|>
arxiv
@article{pal2024table, title={Table Question Answering for Low-resourced Indic Languages}, author={Vaishali Pal, Evangelos Kanoulas, Andrew Yates, Maarten de Rijke}, journal={arXiv preprint arXiv:2410.03576}, year={2024}, archivePrefix={arXiv}, eprint={2410.03576}, primaryClass={cs.CL} }
pal2024table
arxiv-665707
2410.03577
Look Twice Before You Answer: Memory-Space Visual Retracing for Hallucination Mitigation in Multimodal Large Language Models
<|reference_start|>Look Twice Before You Answer: Memory-Space Visual Retracing for Hallucination Mitigation in Multimodal Large Language Models: Despite their impressive capabilities, Multimodal Large Language Models (MLLMs) are susceptible to hallucinations, especially assertively fabricating content not present in the visual inputs. To address the aforementioned challenge, we follow a common cognitive process - when one's initial memory of critical on-sight details fades, it is intuitive to look at them a second time to seek a factual and accurate answer. Therefore, we introduce Memory-space Visual Retracing (MemVR), a novel hallucination mitigation paradigm that without the need for external knowledge retrieval or additional fine-tuning. In particular, we treat visual prompts as supplementary evidence to be reinjected into MLLMs via Feed Forward Network (FFN) as key-value memory, when the model is uncertain or even amnesic about question-relevant visual memories. Comprehensive experimental evaluations demonstrate that MemVR significantly mitigates hallucination issues across various MLLMs and excels in general benchmarks without incurring added time overhead, thus emphasizing its potential for widespread applicability.<|reference_end|>
arxiv
@article{zou2024look, title={Look Twice Before You Answer: Memory-Space Visual Retracing for Hallucination Mitigation in Multimodal Large Language Models}, author={Xin Zou, Yizhou Wang, Yibo Yan, Sirui Huang, Kening Zheng, Junkai Chen, Chang Tang, Xuming Hu}, journal={arXiv preprint arXiv:2410.03577}, year={2024}, archivePrefix={arXiv}, eprint={2410.03577}, primaryClass={cs.CV} }
zou2024look
arxiv-665708
2410.03578
A Practical Concatenated Coding Scheme for Noisy Shuffling Channels with Coset-based Indexing
<|reference_start|>A Practical Concatenated Coding Scheme for Noisy Shuffling Channels with Coset-based Indexing: Noisy shuffling channels capture the main characteristics of DNA storage systems where distinct segments of data are received out of order, after being corrupted by substitution errors. For realistic schemes with short-length segments, practical indexing and channel coding strategies are required to restore the order and combat the channel noise. In this paper, we develop a finite-length concatenated coding scheme that employs Reed-Solomon (RS) codes as outer codes and polar codes as inner codes, and utilizes an implicit indexing method based on cosets of the polar code. We propose a matched decoding method along with a metric for detecting the index that successfully restores the order, and correct channel errors at the receiver. Residual errors that are not corrected by the matched decoder are then corrected by the outer RS code. We derive analytical approximations for the frame error rate of the proposed scheme, and also evaluate its performance through simulations to demonstrate that the proposed implicit indexing method outperforms explicit indexing.<|reference_end|>
arxiv
@article{haghighat2024a, title={A Practical Concatenated Coding Scheme for Noisy Shuffling Channels with Coset-based Indexing}, author={Javad Haghighat, Tolga M. Duman}, journal={GLOBECOM 2023 - 2023 IEEE Global Communications Conference, Kuala Lumpur, Malaysia, 2023, pp. 1842-1847}, year={2024}, doi={10.1109/GLOBECOM54140.2023.10437131}, archivePrefix={arXiv}, eprint={2410.03578}, primaryClass={cs.IT math.IT} }
haghighat2024a
arxiv-665709
2410.03580
A Multi-model Approach for Video Data Retrieval in Autonomous Vehicle Development
<|reference_start|>A Multi-model Approach for Video Data Retrieval in Autonomous Vehicle Development: Autonomous driving software generates enormous amounts of data every second, which software development organizations save for future analysis and testing in the form of logs. However, given the vast size of this data, locating specific scenarios within a collection of vehicle logs can be challenging. Writing the correct SQL queries to find these scenarios requires engineers to have a strong background in SQL and the specific databases in question, further complicating the search process. This paper presents and evaluates a pipeline that allows searching for specific scenarios in log collections using natural language descriptions instead of SQL. The generated descriptions were evaluated by engineers working with vehicle logs at the Zenseact on a scale from 1 to 5. Our approach achieved a mean score of 3.3, demonstrating the potential of using a multi-model architecture to improve the software development workflow. We also present an interface that can visualize the query process and visualize the results.<|reference_end|>
arxiv
@article{knapp2024a, title={A Multi-model Approach for Video Data Retrieval in Autonomous Vehicle Development}, author={Jesper Knapp, Klas Moberg, Yuchuan Jin, Simin Sun and Miroslaw Staron}, journal={arXiv preprint arXiv:2410.03580}, year={2024}, archivePrefix={arXiv}, eprint={2410.03580}, primaryClass={cs.SE cs.AI} }
knapp2024a
arxiv-665710
2410.03581
Nonstationary Sparse Spectral Permanental Process
<|reference_start|>Nonstationary Sparse Spectral Permanental Process: Existing permanental processes often impose constraints on kernel types or stationarity, limiting the model's expressiveness. To overcome these limitations, we propose a novel approach utilizing the sparse spectral representation of nonstationary kernels. This technique relaxes the constraints on kernel types and stationarity, allowing for more flexible modeling while reducing computational complexity to the linear level. Additionally, we introduce a deep kernel variant by hierarchically stacking multiple spectral feature mappings, further enhancing the model's expressiveness to capture complex patterns in data. Experimental results on both synthetic and real-world datasets demonstrate the effectiveness of our approach, particularly in scenarios with pronounced data nonstationarity. Additionally, ablation studies are conducted to provide insights into the impact of various hyperparameters on model performance.<|reference_end|>
arxiv
@article{sun2024nonstationary, title={Nonstationary Sparse Spectral Permanental Process}, author={Zicheng Sun, Yixuan Zhang, Zenan Ling, Xuhui Fan, Feng Zhou}, journal={arXiv preprint arXiv:2410.03581}, year={2024}, archivePrefix={arXiv}, eprint={2410.03581}, primaryClass={stat.ML cs.LG} }
sun2024nonstationary
arxiv-665711
2410.03583
AraSync: Precision Time Synchronization in Rural Wireless Living Lab
<|reference_start|>AraSync: Precision Time Synchronization in Rural Wireless Living Lab: Time synchronization is a critical component in network operation and management, and it is also required by Ultra-Reliable, Low-Latency Communications (URLLC) in next-generation wireless systems such as those of 5G, 6G, and Open RAN. In this context, we design and implement AraSync as an end-to-end time synchronization system in the ARA wireless living lab to enable advanced wireless experiments and applications involving stringent time constraints. We make use of Precision Time Protocol (PTP) at different levels to achieve synchronization accuracy in the order of nanoseconds. Along with fiber networks, AraSync enables time synchronization across the AraHaul wireless x-haul network consisting of long-range, high-capacity mmWave and microwave links. In this paper, we present the detailed design and implementation of AraSync, including its hardware and software components and the PTP network topology. Further, we experimentally characterize the performance of AraSync from spatial and temporal dimensions. Our measurement and analysis of the clock offset and mean path delay show the impact of the wireless channel and weather conditions on the PTP synchronization accuracy.<|reference_end|>
arxiv
@article{nadim2024arasync:, title={AraSync: Precision Time Synchronization in Rural Wireless Living Lab}, author={Md Nadim, Taimoor Ul Islam, Salil Reddy, Tianyi Zhang, Zhibo Meng, Reshal Afzal, Sarath Babu, Arsalan Ahmad, Daji Qiao, Anish Arora, and Hongwei Zhang}, journal={arXiv preprint arXiv:2410.03583}, year={2024}, doi={10.1145/3636534.3697318}, archivePrefix={arXiv}, eprint={2410.03583}, primaryClass={cs.NI cs.PF} }
nadim2024arasync:
arxiv-665712
2410.03584
Discovering Biases in Information Retrieval Models Using Relevance Thesaurus as Global Explanation
<|reference_start|>Discovering Biases in Information Retrieval Models Using Relevance Thesaurus as Global Explanation: Most efforts in interpreting neural relevance models have focused on local explanations, which explain the relevance of a document to a query but are not useful in predicting the model's behavior on unseen query-document pairs. We propose a novel method to globally explain neural relevance models by constructing a "relevance thesaurus" containing semantically relevant query and document term pairs. This thesaurus is used to augment lexical matching models such as BM25 to approximate the neural model's predictions. Our method involves training a neural relevance model to score the relevance of partial query and document segments, which is then used to identify relevant terms across the vocabulary space. We evaluate the obtained thesaurus explanation based on ranking effectiveness and fidelity to the target neural ranking model. Notably, our thesaurus reveals the existence of brand name bias in ranking models, demonstrating one advantage of our explanation method.<|reference_end|>
arxiv
@article{kim2024discovering, title={Discovering Biases in Information Retrieval Models Using Relevance Thesaurus as Global Explanation}, author={Youngwoo Kim, Razieh Rahimi, James Allan}, journal={arXiv preprint arXiv:2410.03584}, year={2024}, archivePrefix={arXiv}, eprint={2410.03584}, primaryClass={cs.IR} }
kim2024discovering
arxiv-665713
2410.03585
MeDeT: Medical Device Digital Twins Creation with Few-shot Meta-learning
<|reference_start|>MeDeT: Medical Device Digital Twins Creation with Few-shot Meta-learning: Testing healthcare Internet of Things (IoT) applications at system and integration levels necessitates integrating numerous medical devices of various types. Challenges of incorporating medical devices are: (i) their continuous evolution, making it infeasible to include all device variants, and (ii) rigorous testing at scale requires multiple devices and their variants, which is time-intensive, costly, and impractical. Our collaborator, Oslo City's health department, faced these challenges in developing automated test infrastructure, which our research aims to address. In this context, we propose a meta-learning-based approach (MeDeT) to generate digital twins (DTs) of medical devices and adapt DTs to evolving devices. We evaluate MeDeT in OsloCity's context using five widely-used medical devices integrated with a real-world healthcare IoT application. Our evaluation assesses MeDeT's ability to generate and adapt DTs across various devices and versions using different few-shot methods, the fidelity of these DTs, the scalability of operating 1000 DTs concurrently, and the associated time costs. Results show that MeDeT can generate DTs with over 96% fidelity, adapt DTs to different devices and newer versions with reduced time cost (around one minute), and operate 1000 DTs in a scalable manner while maintaining the fidelity level, thus serving in place of physical devices for testing.<|reference_end|>
arxiv
@article{sartaj2024medet:, title={MeDeT: Medical Device Digital Twins Creation with Few-shot Meta-learning}, author={Hassan Sartaj, Shaukat Ali, and Julie Marie Gj{o}by}, journal={arXiv preprint arXiv:2410.03585}, year={2024}, archivePrefix={arXiv}, eprint={2410.03585}, primaryClass={cs.SE} }
sartaj2024medet:
arxiv-665714
2410.03586
Never Mind The No-Ops: Faster and Less Volatile Simulation Modelling of Co-Evolutionary Species Interactions via Spatial Cyclic Games
<|reference_start|>Never Mind The No-Ops: Faster and Less Volatile Simulation Modelling of Co-Evolutionary Species Interactions via Spatial Cyclic Games: Issues in co-evolutionary population dynamics have long been studied via computationally intensive simulations of minimally simple agent-based models, known as Evolutionary Spatial Cyclic Games (ESCGs), involving multiple interacting biological species in which each agent has its own unique spatial location in a cell on a regular lattice, and can move from cell to cell over time. Many papers have been published exploring the dynamics of ESCGs where competitive inter-species predator/prey relationships are modelled via the cyclic game Rock-Paper-Scissors (RPS) for three species, or Rock-Paper-Scissors-Lizard-Spock (RPSLS) for five. At the core of these simulations is the "Elementary Step" (ES), in which one or two agents are chosen at random to either compete to the death, or to reproduce, or to move location. ESCG studies typically involve executing trillions of ESs and hence the computational efficiency of the core ES algorithm is a key concern. In this paper I demonstrate that the de facto standard "Original ES" (OES) algorithm is computationally inefficient both in time and in space due to the implicit execution of many "no-op" commands (i.e., commands that do nothing) and because at steady state large numbers of cells can be empty, and yet empty cells serve no purpose. I present a new "Revised ES" (RES) algorithm which eliminates these inefficiencies, and I show empirically that ESCGs with RES exhibit qualitatively the same characteristics as those with OES, and are also markedly more stable. The more stable dynamics of RES-based simulations means that they can be run with smaller lattices than when using OES, leading to reductions in total simulation times of 85% or more. Python source code developed for the experiments reported here is freely available on GitHub.<|reference_end|>
arxiv
@article{cliff2024never, title={Never Mind The No-Ops: Faster and Less Volatile Simulation Modelling of Co-Evolutionary Species Interactions via Spatial Cyclic Games}, author={Dave Cliff}, journal={arXiv preprint arXiv:2410.03586}, year={2024}, archivePrefix={arXiv}, eprint={2410.03586}, primaryClass={q-bio.PE cs.CE math.DS nlin.AO nlin.CG} }
cliff2024never
arxiv-665715
2410.03588
Training Over a Distribution of Hyperparameters for Enhanced Performance and Adaptability on Imbalanced Classification
<|reference_start|>Training Over a Distribution of Hyperparameters for Enhanced Performance and Adaptability on Imbalanced Classification: Although binary classification is a well-studied problem, training reliable classifiers under severe class imbalance remains a challenge. Recent techniques mitigate the ill effects of imbalance on training by modifying the loss functions or optimization methods. We observe that different hyperparameter values on these loss functions perform better at different recall values. We propose to exploit this fact by training one model over a distribution of hyperparameter values--instead of a single value--via Loss Conditional Training (LCT). Experiments show that training over a distribution of hyperparameters not only approximates the performance of several models but actually improves the overall performance of models on both CIFAR and real medical imaging applications, such as melanoma and diabetic retinopathy detection. Furthermore, training models with LCT is more efficient because some hyperparameter tuning can be conducted after training to meet individual needs without needing to retrain from scratch.<|reference_end|>
arxiv
@article{lieberman2024training, title={Training Over a Distribution of Hyperparameters for Enhanced Performance and Adaptability on Imbalanced Classification}, author={Kelsey Lieberman, Swarna Kamlam Ravindran, Shuai Yuan and Carlo Tomasi}, journal={arXiv preprint arXiv:2410.03588}, year={2024}, archivePrefix={arXiv}, eprint={2410.03588}, primaryClass={cs.LG} }
lieberman2024training
arxiv-665716
2410.03592
Variational Bayes Gaussian Splatting
<|reference_start|>Variational Bayes Gaussian Splatting: Recently, 3D Gaussian Splatting has emerged as a promising approach for modeling 3D scenes using mixtures of Gaussians. The predominant optimization method for these models relies on backpropagating gradients through a differentiable rendering pipeline, which struggles with catastrophic forgetting when dealing with continuous streams of data. To address this limitation, we propose Variational Bayes Gaussian Splatting (VBGS), a novel approach that frames training a Gaussian splat as variational inference over model parameters. By leveraging the conjugacy properties of multivariate Gaussians, we derive a closed-form variational update rule, allowing efficient updates from partial, sequential observations without the need for replay buffers. Our experiments show that VBGS not only matches state-of-the-art performance on static datasets, but also enables continual learning from sequentially streamed 2D and 3D data, drastically improving performance in this setting.<|reference_end|>
arxiv
@article{van de maele2024variational, title={Variational Bayes Gaussian Splatting}, author={Toon Van de Maele, Ozan Catal, Alexander Tschantz, Christopher L. Buckley, Tim Verbelen}, journal={arXiv preprint arXiv:2410.03592}, year={2024}, archivePrefix={arXiv}, eprint={2410.03592}, primaryClass={cs.CV cs.AI} }
van de maele2024variational
arxiv-665717
2410.03594
Explicit, Implicit, and Scattered: Revisiting Event Extraction to Capture Complex Arguments
<|reference_start|>Explicit, Implicit, and Scattered: Revisiting Event Extraction to Capture Complex Arguments: Prior works formulate the extraction of event-specific arguments as a span extraction problem, where event arguments are explicit -- i.e. assumed to be contiguous spans of text in a document. In this study, we revisit this definition of Event Extraction (EE) by introducing two key argument types that cannot be modeled by existing EE frameworks. First, implicit arguments are event arguments which are not explicitly mentioned in the text, but can be inferred through context. Second, scattered arguments are event arguments that are composed of information scattered throughout the text. These two argument types are crucial to elicit the full breadth of information required for proper event modeling. To support the extraction of explicit, implicit, and scattered arguments, we develop a novel dataset, DiscourseEE, which includes 7,464 argument annotations from online health discourse. Notably, 51.2% of the arguments are implicit, and 17.4% are scattered, making DiscourseEE a unique corpus for complex event extraction. Additionally, we formulate argument extraction as a text generation problem to facilitate the extraction of complex argument types. We provide a comprehensive evaluation of state-of-the-art models and highlight critical open challenges in generative event extraction. Our data and codebase are available at https://omar-sharif03.github.io/DiscourseEE.<|reference_end|>
arxiv
@article{sharif2024explicit,, title={Explicit, Implicit, and Scattered: Revisiting Event Extraction to Capture Complex Arguments}, author={Omar Sharif, Joseph Gatto, Madhusudan Basak, Sarah M. Preum}, journal={arXiv preprint arXiv:2410.03594}, year={2024}, archivePrefix={arXiv}, eprint={2410.03594}, primaryClass={cs.CL} }
sharif2024explicit,
arxiv-665718
2410.03595
Understanding Reasoning in Chain-of-Thought from the Hopfieldian View
<|reference_start|>Understanding Reasoning in Chain-of-Thought from the Hopfieldian View: Large Language Models have demonstrated remarkable abilities across various tasks, with Chain-of-Thought (CoT) prompting emerging as a key technique to enhance reasoning capabilities. However, existing research primarily focuses on improving performance, lacking a comprehensive framework to explain and understand the fundamental factors behind CoT's success. To bridge this gap, we introduce a novel perspective grounded in the Hopfieldian view of cognition in cognitive neuroscience. We establish a connection between CoT reasoning and key cognitive elements such as stimuli, actions, neural populations, and representation spaces. From our view, we can understand the reasoning process as the movement between these representation spaces. Building on this insight, we develop a method for localizing reasoning errors in the response of CoTs. Moreover, we propose the Representation-of-Thought (RoT) framework, which leverages the robustness of low-dimensional representation spaces to enhance the robustness of the reasoning process in CoTs. Experimental results demonstrate that RoT improves the robustness and interpretability of CoT reasoning while offering fine-grained control over the reasoning process.<|reference_end|>
arxiv
@article{hu2024understanding, title={Understanding Reasoning in Chain-of-Thought from the Hopfieldian View}, author={Lijie Hu, Liang Liu, Shu Yang, Xin Chen, Zhen Tan, Muhammad Asif Ali, Mengdi Li, and Di Wang}, journal={arXiv preprint arXiv:2410.03595}, year={2024}, archivePrefix={arXiv}, eprint={2410.03595}, primaryClass={cs.AI cs.CL cs.LG} }
hu2024understanding
arxiv-665719
2410.03596
SiMilarity-Enhanced Homophily for Multi-View Heterophilous Graph Clustering
<|reference_start|>SiMilarity-Enhanced Homophily for Multi-View Heterophilous Graph Clustering: With the increasing prevalence of graph-structured data, multi-view graph clustering has been widely used in various downstream applications. Existing approaches primarily rely on a unified message passing mechanism, which significantly enhances clustering performance. Nevertheless, this mechanism limits its applicability to heterophilous situations, as it is fundamentally predicated on the assumption of homophily, i.e., the connected nodes often belong to the same class. In reality, this assumption does not always hold; a moderately or even mildly homophilous graph is more common than a fully homophilous one due to inevitable heterophilous information in the graph. To address this issue, in this paper, we propose a novel SiMilarity-enhanced Homophily for Multi-view Heterophilous Graph Clustering (SMHGC) approach. By analyzing the relationship between similarity and graph homophily, we propose to enhance the homophily by introducing three similarity terms, i.e., neighbor pattern similarity, node feature similarity, and multi-view global similarity, in a label-free manner. Then, a consensus-based inter- and intra-view fusion paradigm is proposed to fuse the improved homophilous graph from different views and utilize them for clustering. The state-of-the-art experimental results on both multi-view heterophilous and homophilous datasets collectively demonstrate the strong capacity of similarity for unsupervised multi-view heterophilous graph learning. Additionally, the consistent performance across semi-synthetic datasets with varying levels of homophily serves as further evidence of SMHGC's resilience to heterophily.<|reference_end|>
arxiv
@article{chen2024similarity-enhanced, title={SiMilarity-Enhanced Homophily for Multi-View Heterophilous Graph Clustering}, author={Jianpeng Chen, Yawen Ling, Yazhou Ren, Zichen Wen, Tianyi Wu, Shufei Zhang, Lifang He}, journal={arXiv preprint arXiv:2410.03596}, year={2024}, archivePrefix={arXiv}, eprint={2410.03596}, primaryClass={cs.AI} }
chen2024similarity-enhanced
arxiv-665720
2410.03600
Efficiently Identifying Watermarked Segments in Mixed-Source Texts
<|reference_start|>Efficiently Identifying Watermarked Segments in Mixed-Source Texts: Text watermarks in large language models (LLMs) are increasingly used to detect synthetic text, mitigating misuse cases like fake news and academic dishonesty. While existing watermarking detection techniques primarily focus on classifying entire documents as watermarked or not, they often neglect the common scenario of identifying individual watermark segments within longer, mixed-source documents. Drawing inspiration from plagiarism detection systems, we propose two novel methods for partial watermark detection. First, we develop a geometry cover detection framework aimed at determining whether there is a watermark segment in long text. Second, we introduce an adaptive online learning algorithm to pinpoint the precise location of watermark segments within the text. Evaluated on three popular watermarking techniques (KGW-Watermark, Unigram-Watermark, and Gumbel-Watermark), our approach achieves high accuracy, significantly outperforming baseline methods. Moreover, our framework is adaptable to other watermarking techniques, offering new insights for precise watermark detection.<|reference_end|>
arxiv
@article{zhao2024efficiently, title={Efficiently Identifying Watermarked Segments in Mixed-Source Texts}, author={Xuandong Zhao, Chenwen Liao, Yu-Xiang Wang, Lei Li}, journal={arXiv preprint arXiv:2410.03600}, year={2024}, archivePrefix={arXiv}, eprint={2410.03600}, primaryClass={cs.CL} }
zhao2024efficiently
arxiv-665721
2410.03601
How Discrete and Continuous Diffusion Meet: Comprehensive Analysis of Discrete Diffusion Models via a Stochastic Integral Framework
<|reference_start|>How Discrete and Continuous Diffusion Meet: Comprehensive Analysis of Discrete Diffusion Models via a Stochastic Integral Framework: Discrete diffusion models have gained increasing attention for their ability to model complex distributions with tractable sampling and inference. However, the error analysis for discrete diffusion models remains less well-understood. In this work, we propose a comprehensive framework for the error analysis of discrete diffusion models based on L\'evy-type stochastic integrals. By generalizing the Poisson random measure to that with a time-independent and state-dependent intensity, we rigorously establish a stochastic integral formulation of discrete diffusion models and provide the corresponding change of measure theorems that are intriguingly analogous to It\^o integrals and Girsanov's theorem for their continuous counterparts. Our framework unifies and strengthens the current theoretical results on discrete diffusion models and obtains the first error bound for the $\tau$-leaping scheme in KL divergence. With error sources clearly identified, our analysis gives new insight into the mathematical properties of discrete diffusion models and offers guidance for the design of efficient and accurate algorithms for real-world discrete diffusion model applications.<|reference_end|>
arxiv
@article{ren2024how, title={How Discrete and Continuous Diffusion Meet: Comprehensive Analysis of Discrete Diffusion Models via a Stochastic Integral Framework}, author={Yinuo Ren, Haoxuan Chen, Grant M. Rotskoff, Lexing Ying}, journal={arXiv preprint arXiv:2410.03601}, year={2024}, archivePrefix={arXiv}, eprint={2410.03601}, primaryClass={cs.LG cs.NA math.NA stat.ML} }
ren2024how
arxiv-665722
2410.03602
Exploring gauge-fixing conditions with gradient-based optimization
<|reference_start|>Exploring gauge-fixing conditions with gradient-based optimization: Lattice gauge fixing is required to compute gauge-variant quantities, for example those used in RI-MOM renormalization schemes or as objects of comparison for model calculations. Recently, gauge-variant quantities have also been found to be more amenable to signal-to-noise optimization using contour deformations. These applications motivate systematic parameterization and exploration of gauge-fixing schemes. This work introduces a differentiable parameterization of gauge fixing which is broad enough to cover Landau gauge, Coulomb gauge, and maximal tree gauges. The adjoint state method allows gradient-based optimization to select gauge-fixing schemes that minimize an arbitrary target loss function.<|reference_end|>
arxiv
@article{detmold2024exploring, title={Exploring gauge-fixing conditions with gradient-based optimization}, author={William Detmold, Gurtej Kanwar, Yin Lin, Phiala E. Shanahan, Michael L. Wagman}, journal={arXiv preprint arXiv:2410.03602}, year={2024}, number={MIT-CTP/5786}, archivePrefix={arXiv}, eprint={2410.03602}, primaryClass={hep-lat cs.LG} }
detmold2024exploring
arxiv-665723
2410.03603
LeLaN: Learning A Language-Conditioned Navigation Policy from In-the-Wild Videos
<|reference_start|>LeLaN: Learning A Language-Conditioned Navigation Policy from In-the-Wild Videos: The world is filled with a wide variety of objects. For robots to be useful, they need the ability to find arbitrary objects described by people. In this paper, we present LeLaN(Learning Language-conditioned Navigation policy), a novel approach that consumes unlabeled, action-free egocentric data to learn scalable, language-conditioned object navigation. Our framework, LeLaN leverages the semantic knowledge of large vision-language models, as well as robotic foundation models, to label in-the-wild data from a variety of indoor and outdoor environments. We label over 130 hours of data collected in real-world indoor and outdoor environments, including robot observations, YouTube video tours, and human walking data. Extensive experiments with over 1000 real-world trials show that our approach enables training a policy from unlabeled action-free videos that outperforms state-of-the-art robot navigation methods, while being capable of inference at 4 times their speed on edge compute. We open-source our models, datasets and provide supplementary videos on our project page (https://learning-language-navigation.github.io/).<|reference_end|>
arxiv
@article{hirose2024lelan:, title={LeLaN: Learning A Language-Conditioned Navigation Policy from In-the-Wild Videos}, author={Noriaki Hirose, Catherine Glossop, Ajay Sridhar, Dhruv Shah, Oier Mees, Sergey Levine}, journal={arXiv preprint arXiv:2410.03603}, year={2024}, archivePrefix={arXiv}, eprint={2410.03603}, primaryClass={cs.RO} }
hirose2024lelan:
arxiv-665724
2410.03605
Stabilizing the Consistent Quasidiffusion Method with Linear Prolongation
<|reference_start|>Stabilizing the Consistent Quasidiffusion Method with Linear Prolongation: The quasidiffusion (QD) method, also known as the Variable Eddington Factor (VEF) method in the astrophysical community, is an established iterative method for accelerating source iterations in SN calculations. A great advantage of the QD method is that the diffusion equation that accelerates the SN source iterations can be discretized in any valid discretization without concern for consistency with the transport discretization. QD has comparable effectiveness with diffusion synthetic acceleration (DSA), but the converged scalar flux of the diffusion equation will differ from the transport solution by the spatial truncation errors. Larsen et al. introduced a new consistent QD method (CQD), which includes a straightforwardly defined transport consistency factor closely related to the well-known coarse mesh finite difference (CMFD) and diffusion synthetic acceleration (DSA) methods. The CQD method preserves the discretized scalar flux solution of the SN equations, and it is stable for problems with optically thin spatial cells, but just like nonlinear diffusion acceleration (NDA), it degrades in performance and eventually becomes unstable when the spatial cells become greater than about one mean free path thick. In this paper, we performed a formal Fourier analysis of the CQD method to show that its theoretical spectral radius is essentially the same as that of the NDA method. To improve the stability of CQD, we introduce the lpCQD method, which adopts the idea of the linear prolongation CMFD (lpCMFD) method.<|reference_end|>
arxiv
@article{wang2024stabilizing, title={Stabilizing the Consistent Quasidiffusion Method with Linear Prolongation}, author={Dean Wang}, journal={arXiv preprint arXiv:2410.03605}, year={2024}, archivePrefix={arXiv}, eprint={2410.03605}, primaryClass={math.NA cs.NA} }
wang2024stabilizing
arxiv-665725
2410.03608
TICKing All the Boxes: Generated Checklists Improve LLM Evaluation and Generation
<|reference_start|>TICKing All the Boxes: Generated Checklists Improve LLM Evaluation and Generation: Given the widespread adoption and usage of Large Language Models (LLMs), it is crucial to have flexible and interpretable evaluations of their instruction-following ability. Preference judgments between model outputs have become the de facto evaluation standard, despite distilling complex, multi-faceted preferences into a single ranking. Furthermore, as human annotation is slow and costly, LLMs are increasingly used to make these judgments, at the expense of reliability and interpretability. In this work, we propose TICK (Targeted Instruct-evaluation with ChecKlists), a fully automated, interpretable evaluation protocol that structures evaluations with LLM-generated, instruction-specific checklists. We first show that, given an instruction, LLMs can reliably produce high-quality, tailored evaluation checklists that decompose the instruction into a series of YES/NO questions. Each question asks whether a candidate response meets a specific requirement of the instruction. We demonstrate that using TICK leads to a significant increase (46.4% $\to$ 52.2%) in the frequency of exact agreements between LLM judgements and human preferences, as compared to having an LLM directly score an output. We then show that STICK (Self-TICK) can be used to improve generation quality across multiple benchmarks via self-refinement and Best-of-N selection. STICK self-refinement on LiveBench reasoning tasks leads to an absolute gain of $+$7.8%, whilst Best-of-N selection with STICK attains $+$6.3% absolute improvement on the real-world instruction dataset, WildBench. In light of this, structured, multi-faceted self-improvement is shown to be a promising way to further advance LLM capabilities. Finally, by providing LLM-generated checklists to human evaluators tasked with directly scoring LLM responses to WildBench instructions, we notably increase inter-annotator agreement (0.194 $\to$ 0.256).<|reference_end|>
arxiv
@article{cook2024ticking, title={TICKing All the Boxes: Generated Checklists Improve LLM Evaluation and Generation}, author={Jonathan Cook, Tim Rockt"aschel, Jakob Foerster, Dennis Aumiller, Alex Wang}, journal={arXiv preprint arXiv:2410.03608}, year={2024}, archivePrefix={arXiv}, eprint={2410.03608}, primaryClass={cs.AI cs.CL cs.HC cs.LG} }
cook2024ticking
arxiv-665726
2410.03609
Subexponential Algorithms for Clique Cover on Unit Disk and Unit Ball Graphs
<|reference_start|>Subexponential Algorithms for Clique Cover on Unit Disk and Unit Ball Graphs: In Clique Cover, given a graph $G$ and an integer $k$, the task is to partition the vertices of $G$ into $k$ cliques. Clique Cover on unit ball graphs has a natural interpretation as a clustering problem, where the objective function is the maximum diameter of a cluster. Many classical NP-hard problems are known to admit $2^{O(n^{(1 - 1/d)})}$-time algorithms on unit ball graphs in $\mathbb{R}^d$ [de Berg et al., SIAM J. Comp 2018]. A notable exception is the Maximum Clique problem, which admits a polynomial-time algorithm on unit disk graphs and a subexponential algorithm on unit ball graphs in $\mathbb{R}^3$, but no subexponential algorithm on unit ball graphs in dimensions 4 or larger, assuming the ETH [Bonamy et al., JACM 2021]. In this work, we show that Clique Cover also suffers from a "curse of dimensionality", albeit in a significantly different way compared to Maximum Clique. We present a $2^{O(\sqrt{n})}$-time algorithm for unit disk graphs and argue that it is tight under the ETH. On the other hand, we show that Clique Cover does not admit a $2^{o(n)}$-time algorithm on unit ball graphs in dimension $5$, unless the ETH fails.<|reference_end|>
arxiv
@article{koana2024subexponential, title={Subexponential Algorithms for Clique Cover on Unit Disk and Unit Ball Graphs}, author={Tomohiro Koana, Nidhi Purohit, Kirill Simonov}, journal={arXiv preprint arXiv:2410.03609}, year={2024}, archivePrefix={arXiv}, eprint={2410.03609}, primaryClass={cs.DS cs.CG} }
koana2024subexponential
arxiv-665727
2410.03610
Management of high-tech companies in conditions of import substitution
<|reference_start|>Management of high-tech companies in conditions of import substitution: The article analyzes the development of high-tech sectors of the Russian economy in the context of import substitution. Features of managing priority project portfolios are considered. Issues of creating a unified information space for aviation industry enterprises are studied in the context of introduction of a modified OLAP technology of management decision support. Investment attractiveness of high-tech sectors of the Russian economy is estimated based on the coefficient of gross value added of project products. Investment-overheated industries are identified, and recommendations on market correction and returning project assets to a balanced state are given.<|reference_end|>
arxiv
@article{pyatovsky2024management, title={Management of high-tech companies in conditions of import substitution}, author={S.E.Pyatovsky, N.S.Efimova, E.V.Surkova}, journal={arXiv preprint arXiv:2410.03610}, year={2024}, archivePrefix={arXiv}, eprint={2410.03610}, primaryClass={cs.CY} }
pyatovsky2024management
arxiv-665728
2410.03613
Large Language Model Performance Benchmarking on Mobile Platforms: A Thorough Evaluation
<|reference_start|>Large Language Model Performance Benchmarking on Mobile Platforms: A Thorough Evaluation: As large language models (LLMs) increasingly integrate into every aspect of our work and daily lives, there are growing concerns about user privacy, which push the trend toward local deployment of these models. There are a number of lightweight LLMs (e.g., Gemini Nano, LLAMA2 7B) that can run locally on smartphones, providing users with greater control over their personal data. As a rapidly emerging application, we are concerned about their performance on commercial-off-the-shelf mobile devices. To fully understand the current landscape of LLM deployment on mobile platforms, we conduct a comprehensive measurement study on mobile devices. We evaluate both metrics that affect user experience, including token throughput, latency, and battery consumption, as well as factors critical to developers, such as resource utilization, DVFS strategies, and inference engines. In addition, we provide a detailed analysis of how these hardware capabilities and system dynamics affect on-device LLM performance, which may help developers identify and address bottlenecks for mobile LLM applications. We also provide comprehensive comparisons across the mobile system-on-chips (SoCs) from major vendors, highlighting their performance differences in handling LLM workloads. We hope that this study can provide insights for both the development of on-device LLMs and the design for future mobile system architecture.<|reference_end|>
arxiv
@article{xiao2024large, title={Large Language Model Performance Benchmarking on Mobile Platforms: A Thorough Evaluation}, author={Jie Xiao, Qianyi Huang, Xu Chen and Chen Tian}, journal={arXiv preprint arXiv:2410.03613}, year={2024}, archivePrefix={arXiv}, eprint={2410.03613}, primaryClass={cs.LG} }
xiao2024large
arxiv-665729
2410.03617
What Matters for Model Merging at Scale?
<|reference_start|>What Matters for Model Merging at Scale?: Model merging aims to combine multiple expert models into a more capable single model, offering benefits such as reduced storage and serving costs, improved generalization, and support for decentralized model development. Despite its promise, previous studies have primarily focused on merging a few small models. This leaves many unanswered questions about the effect of scaling model size and how it interplays with other key factors -- like the base model quality and number of expert models -- , to affect the merged model's performance. This work systematically evaluates the utility of model merging at scale, examining the impact of these different factors. We experiment with merging fully fine-tuned models using 4 popular merging methods -- Averaging, Task~Arithmetic, Dare, and TIES -- across model sizes ranging from 1B-64B parameters and merging up to 8 different expert models. We evaluate the merged models on both held-in tasks, i.e., the expert's training tasks, and zero-shot generalization to unseen held-out tasks. Our experiments provide several new insights about model merging at scale and the interplay between different factors. First, we find that merging is more effective when experts are created from strong base models, i.e., models with good zero-shot performance. Second, larger models facilitate easier merging. Third merging consistently improves generalization capabilities. Notably, when merging 8 large expert models, the merged models often generalize better compared to the multitask trained models. Fourth, we can better merge more expert models when working with larger models. Fifth, different merging methods behave very similarly at larger scales. Overall, our findings shed light on some interesting properties of model merging while also highlighting some limitations. We hope that this study will serve as a reference point on large-scale merging for upcoming research.<|reference_end|>
arxiv
@article{yadav2024what, title={What Matters for Model Merging at Scale?}, author={Prateek Yadav, Tu Vu, Jonathan Lai, Alexandra Chronopoulou, Manaal Faruqui, Mohit Bansal, Tsendsuren Munkhdalai}, journal={arXiv preprint arXiv:2410.03617}, year={2024}, archivePrefix={arXiv}, eprint={2410.03617}, primaryClass={cs.LG cs.AI cs.CL} }
yadav2024what
arxiv-665730
2410.03618
Open-World Reinforcement Learning over Long Short-Term Imagination
<|reference_start|>Open-World Reinforcement Learning over Long Short-Term Imagination: Training visual reinforcement learning agents in a high-dimensional open world presents significant challenges. While various model-based methods have improved sample efficiency by learning interactive world models, these agents tend to be "short-sighted", as they are typically trained on short snippets of imagined experiences. We argue that the primary obstacle in open-world decision-making is improving the efficiency of off-policy exploration across an extensive state space. In this paper, we present LS-Imagine, which extends the imagination horizon within a limited number of state transition steps, enabling the agent to explore behaviors that potentially lead to promising long-term feedback. The foundation of our approach is to build a long short-term world model. To achieve this, we simulate goal-conditioned jumpy state transitions and compute corresponding affordance maps by zooming in on specific areas within single images. This facilitates the integration of direct long-term values into behavior learning. Our method demonstrates significant improvements over state-of-the-art techniques in MineDojo.<|reference_end|>
arxiv
@article{li2024open-world, title={Open-World Reinforcement Learning over Long Short-Term Imagination}, author={Jiajian Li, Qi Wang, Yunbo Wang, Xin Jin, Yang Li, Wenjun Zeng, Xiaokang Yang}, journal={arXiv preprint arXiv:2410.03618}, year={2024}, archivePrefix={arXiv}, eprint={2410.03618}, primaryClass={cs.LG} }
li2024open-world
arxiv-665731
2410.03621
A Global Medical Data Security and Privacy Preserving Standards Identification Framework for Electronic Healthcare Consumers
<|reference_start|>A Global Medical Data Security and Privacy Preserving Standards Identification Framework for Electronic Healthcare Consumers: Electronic Health Records (EHR) are crucial for the success of digital healthcare, with a focus on putting consumers at the center of this transformation. However, the digitalization of healthcare records brings along security and privacy risks for personal data. The major concern is that different countries have varying standards for the security and privacy of medical data. This paper proposed a novel and comprehensive framework to standardize these rules globally, bringing them together on a common platform. To support this proposal, the study reviews existing literature to understand the research interest in this issue. It also examines six key laws and standards related to security and privacy, identifying twenty concepts. The proposed framework utilized K-means clustering to categorize these concepts and identify five key factors. Finally, an Ordinal Priority Approach is applied to determine the preferred implementation of these factors in the context of EHRs. The proposed study provides a descriptive then prescriptive framework for the implementation of privacy and security in the context of electronic health records. Therefore, the findings of the proposed framework are useful for professionals and policymakers in improving the security and privacy associated with EHRs.<|reference_end|>
arxiv
@article{mishra2024a, title={A Global Medical Data Security and Privacy Preserving Standards Identification Framework for Electronic Healthcare Consumers}, author={Vinaytosh Mishra, Kishu Gupta, Deepika Saxena, Ashutosh Kumar Singh}, journal={A Global Medical Data Security and Privacy Preserving Standards Identification Framework for Electronic Healthcare Consumers, in IEEE Transactions on Consumer Electronics, vol. 70, no. 1, pp. 4379-4387, Feb. 2024}, year={2024}, doi={10.1109/TCE.2024.3373912}, archivePrefix={arXiv}, eprint={2410.03621}, primaryClass={cs.LG} }
mishra2024a
arxiv-665732
2410.03622
A mixed-dimensional model for the electrostatic problem on coupled domains
<|reference_start|>A mixed-dimensional model for the electrostatic problem on coupled domains: We derive a mixed-dimensional 3D-1D formulation of the electrostatic equation in two domains with different dielectric constants to compute, with an affordable computational cost, the electric field and potential in the relevant case of thin inclusions in a larger 3D domain. The numerical solution is obtained by Mixed Finite Elements for the 3D problem and Finite Elements on the 1D domain. We analyze some test cases with simple geometries to validate the proposed approach against analytical solutions, and perform comparisons with the fully resolved 3D problem. We treat the case where ramifications are present in the one-dimensional domain and show some results on the geometry of an electrical treeing, a ramified structure that propagates in insulators causing their failure.<|reference_end|>
arxiv
@article{crippa2024a, title={A mixed-dimensional model for the electrostatic problem on coupled domains}, author={Beatrice Crippa, Anna Scotti, Andrea Villa}, journal={arXiv preprint arXiv:2410.03622}, year={2024}, archivePrefix={arXiv}, eprint={2410.03622}, primaryClass={math.NA cs.NA} }
crippa2024a
arxiv-665733
2410.03624
HyperCMR: Enhanced Multi-Contrast CMR Reconstruction with Eagle Loss
<|reference_start|>HyperCMR: Enhanced Multi-Contrast CMR Reconstruction with Eagle Loss: Accelerating image acquisition for cardiac magnetic resonance imaging (CMRI) is a critical task. CMRxRecon2024 challenge aims to set the state of the art for multi-contrast CMR reconstruction. This paper presents HyperCMR, a novel framework designed to accelerate the reconstruction of multi-contrast cardiac magnetic resonance (CMR) images. HyperCMR enhances the existing PromptMR model by incorporating advanced loss functions, notably the innovative Eagle Loss, which is specifically designed to recover missing high-frequency information in undersampled k-space. Extensive experiments conducted on the CMRxRecon2024 challenge dataset demonstrate that HyperCMR consistently outperforms the baseline across multiple evaluation metrics, achieving superior SSIM and PSNR scores.<|reference_end|>
arxiv
@article{xu2024hypercmr:, title={HyperCMR: Enhanced Multi-Contrast CMR Reconstruction with Eagle Loss}, author={Ruru Xu, Caner "Ozer, Ilkay Oksuz}, journal={arXiv preprint arXiv:2410.03624}, year={2024}, archivePrefix={arXiv}, eprint={2410.03624}, primaryClass={eess.IV cs.CV} }
xu2024hypercmr:
arxiv-665734
2410.03626
Robust Offline Imitation Learning from Diverse Auxiliary Data
<|reference_start|>Robust Offline Imitation Learning from Diverse Auxiliary Data: Offline imitation learning enables learning a policy solely from a set of expert demonstrations, without any environment interaction. To alleviate the issue of distribution shift arising due to the small amount of expert data, recent works incorporate large numbers of auxiliary demonstrations alongside the expert data. However, the performance of these approaches rely on assumptions about the quality and composition of the auxiliary data. However, they are rarely successful when those assumptions do not hold. To address this limitation, we propose Robust Offline Imitation from Diverse Auxiliary Data (ROIDA). ROIDA first identifies high-quality transitions from the entire auxiliary dataset using a learned reward function. These high-reward samples are combined with the expert demonstrations for weighted behavioral cloning. For lower-quality samples, ROIDA applies temporal difference learning to steer the policy towards high-reward states, improving long-term returns. This two-pronged approach enables our framework to effectively leverage both high and low-quality data without any assumptions. Extensive experiments validate that ROIDA achieves robust and consistent performance across multiple auxiliary datasets with diverse ratios of expert and non-expert demonstrations. ROIDA effectively leverages unlabeled auxiliary data, outperforming prior methods reliant on specific data assumptions.<|reference_end|>
arxiv
@article{ghosh2024robust, title={Robust Offline Imitation Learning from Diverse Auxiliary Data}, author={Udita Ghosh, Dripta S. Raychaudhuri, Jiachen Li, Konstantinos Karydis, Amit K. Roy-Chowdhury}, journal={arXiv preprint arXiv:2410.03626}, year={2024}, archivePrefix={arXiv}, eprint={2410.03626}, primaryClass={cs.LG} }
ghosh2024robust
arxiv-665735
2410.03634
Conditional Enzyme Generation Using Protein Language Models with Adapters
<|reference_start|>Conditional Enzyme Generation Using Protein Language Models with Adapters: The conditional generation of proteins with desired functions and/or properties is a key goal for generative models. Existing methods based on prompting of language models can generate proteins conditioned on a target functionality, such as a desired enzyme family. However, these methods are limited to simple, tokenized conditioning and have not been shown to generalize to unseen functions. In this study, we propose ProCALM (Protein Conditionally Adapted Language Model), an approach for the conditional generation of proteins using adapters to protein language models. Our specific implementation of ProCALM involves finetuning ProGen2 to incorporate conditioning representations of enzyme function and taxonomy. ProCALM matches existing methods at conditionally generating sequences from target enzyme families. Impressively, it can also generate within the joint distribution of enzymatic function and taxonomy, and it can generalize to rare and unseen enzyme families and taxonomies. Overall, ProCALM is a flexible and computationally efficient approach, and we expect that it can be extended to a wide range of generative language models.<|reference_end|>
arxiv
@article{yang2024conditional, title={Conditional Enzyme Generation Using Protein Language Models with Adapters}, author={Jason Yang, Aadyot Bhatnagar, Jeffrey A. Ruffolo, Ali Madani}, journal={arXiv preprint arXiv:2410.03634}, year={2024}, archivePrefix={arXiv}, eprint={2410.03634}, primaryClass={q-bio.BM cs.LG} }
yang2024conditional
arxiv-665736
2410.03637
On the Cost of Consecutive Estimation Error: Significance-Aware Non-linear Aging
<|reference_start|>On the Cost of Consecutive Estimation Error: Significance-Aware Non-linear Aging: This paper considers the semantics-aware remote state estimation of an asymmetric Markov chain with prioritized states. Due to resource constraints, the sensor needs to trade between estimation quality and communication cost. The aim is to exploit the significance of information through the history of system realizations to determine the optimal timing of transmission, thereby reducing the amount of uninformative data transmitted in the network. To this end, we introduce a new metric, the significance-aware Age of Consecutive Error (AoCE), that captures two semantic attributes: the significance of estimation error and the cost of consecutive error. Different costs and non-linear age functions are assigned to different estimation errors to account for their relative importance to system performance. We identify the optimal transmission problem as a countably infinite state Markov decision process (MDP) with unbounded costs. We first give sufficient conditions on the age functions, source pattern, and channel reliability so that an optimal policy exists to have bounded average costs. We show that the optimal policy exhibits a switching structure. That is, the sensor triggers a transmission only when the system has been trapped in an error for a certain number of consecutive time slots. We also provide sufficient conditions under which the switching policy degenerates into a simple threshold policy, i.e., featuring identical thresholds for all estimation errors. Furthermore, we exploit the structural properties and develop a structured policy iteration (SPI) algorithm that considerably reduces computation overhead. Numerical results show that the optimal policy outperforms the classic rule-, distortion- and age-based policies. An important takeaway is that the more semantic attributes we utilize, the fewer transmissions are needed.<|reference_end|>
arxiv
@article{luo2024on, title={On the Cost of Consecutive Estimation Error: Significance-Aware Non-linear Aging}, author={Jiping Luo, Nikolaos Pappas}, journal={arXiv preprint arXiv:2410.03637}, year={2024}, archivePrefix={arXiv}, eprint={2410.03637}, primaryClass={cs.IT cs.SY eess.SY math.IT} }
luo2024on
arxiv-665737
2410.03640
Real-World Benchmarks Make Membership Inference Attacks Fail on Diffusion Models
<|reference_start|>Real-World Benchmarks Make Membership Inference Attacks Fail on Diffusion Models: Membership inference attacks (MIAs) on diffusion models have emerged as potential evidence of unauthorized data usage in training pre-trained diffusion models. These attacks aim to detect the presence of specific images in training datasets of diffusion models. Our study delves into the evaluation of state-of-the-art MIAs on diffusion models and reveals critical flaws and overly optimistic performance estimates in existing MIA evaluation. We introduce CopyMark, a more realistic MIA benchmark that distinguishes itself through the support for pre-trained diffusion models, unbiased datasets, and fair evaluation pipelines. Through extensive experiments, we demonstrate that the effectiveness of current MIA methods significantly degrades under these more practical conditions. Based on our results, we alert that MIA, in its current state, is not a reliable approach for identifying unauthorized data usage in pre-trained diffusion models. To the best of our knowledge, we are the first to discover the performance overestimation of MIAs on diffusion models and present a unified benchmark for more realistic evaluation. Our code is available on GitHub: \url{https://github.com/caradryanl/CopyMark}.<|reference_end|>
arxiv
@article{liang2024real-world, title={Real-World Benchmarks Make Membership Inference Attacks Fail on Diffusion Models}, author={Chumeng Liang and Jiaxuan You}, journal={arXiv preprint arXiv:2410.03640}, year={2024}, archivePrefix={arXiv}, eprint={2410.03640}, primaryClass={cs.LG} }
liang2024real-world
arxiv-665738
2410.03642
Aligning LLMs with Individual Preferences via Interaction
<|reference_start|>Aligning LLMs with Individual Preferences via Interaction: As large language models (LLMs) demonstrate increasingly advanced capabilities, aligning their behaviors with human values and preferences becomes crucial for their wide adoption. While previous research focuses on general alignment to principles such as helpfulness, harmlessness, and honesty, the need to account for individual and diverse preferences has been largely overlooked, potentially undermining customized human experiences. To address this gap, we train LLMs that can ''interact to align'', essentially cultivating the meta-skill of LLMs to implicitly infer the unspoken personalized preferences of the current user through multi-turn conversations, and then dynamically align their following behaviors and responses to these inferred preferences. Our approach involves establishing a diverse pool of 3,310 distinct user personas by initially creating seed examples, which are then expanded through iterative self-generation and filtering. Guided by distinct user personas, we leverage multi-LLM collaboration to develop a multi-turn preference dataset containing 3K+ multi-turn conversations in tree structures. Finally, we apply supervised fine-tuning and reinforcement learning to enhance LLMs using this dataset. For evaluation, we establish the ALOE (ALign With CustOmized PrEferences) benchmark, consisting of 100 carefully selected examples and well-designed metrics to measure the customized alignment performance during conversations. Experimental results demonstrate the effectiveness of our method in enabling dynamic, personalized alignment via interaction.<|reference_end|>
arxiv
@article{wu2024aligning, title={Aligning LLMs with Individual Preferences via Interaction}, author={Shujin Wu, May Fung, Cheng Qian, Jeonghwan Kim, Dilek Hakkani-Tur, Heng Ji}, journal={arXiv preprint arXiv:2410.03642}, year={2024}, archivePrefix={arXiv}, eprint={2410.03642}, primaryClass={cs.CL cs.AI cs.HC} }
wu2024aligning
arxiv-665739
2410.03643
Sine-transform-based fast solvers for Riesz fractional nonlinear Schr\"odinger equations with attractive nonlinearities
<|reference_start|>Sine-transform-based fast solvers for Riesz fractional nonlinear Schr\"odinger equations with attractive nonlinearities: This paper presents fast solvers for linear systems arising from the discretization of fractional nonlinear Schr\"odinger equations with Riesz derivatives and attractive nonlinearities. These systems are characterized by complex symmetry, indefiniteness, and a $d$-level Toeplitz-plus-diagonal structure. We propose a Toeplitz-based anti-symmetric and normal splitting iteration method for the equivalent real block linear systems, ensuring unconditional convergence. The derived optimal parameter is approximately equal to 1. By combining this iteration method with sine-transform-based preconditioning, we introduce a novel preconditioner that enhances the convergence rate of Krylov subspace methods. Both theoretical and numerical analyses demonstrate that the new preconditioner exhibits a parameter-free property (allowing the iteration parameter to be fixed at 1). The eigenvalues of the preconditioned system matrix are nearly clustered in a small neighborhood around 1, and the convergence rate of the corresponding preconditioned GMRES method is independent of the spatial mesh size and the fractional order of the Riesz derivatives.<|reference_end|>
arxiv
@article{chen2024sine-transform-based, title={Sine-transform-based fast solvers for Riesz fractional nonlinear Schr\"odinger equations with attractive nonlinearities}, author={Chao Chen, Xi Yang, Fei-Yan Zhang}, journal={arXiv preprint arXiv:2410.03643}, year={2024}, archivePrefix={arXiv}, eprint={2410.03643}, primaryClass={math.NA cs.NA} }
chen2024sine-transform-based
arxiv-665740
2410.03644
Unlearnable 3D Point Clouds: Class-wise Transformation Is All You Need
<|reference_start|>Unlearnable 3D Point Clouds: Class-wise Transformation Is All You Need: Traditional unlearnable strategies have been proposed to prevent unauthorized users from training on the 2D image data. With more 3D point cloud data containing sensitivity information, unauthorized usage of this new type data has also become a serious concern. To address this, we propose the first integral unlearnable framework for 3D point clouds including two processes: (i) we propose an unlearnable data protection scheme, involving a class-wise setting established by a category-adaptive allocation strategy and multi-transformations assigned to samples; (ii) we propose a data restoration scheme that utilizes class-wise inverse matrix transformation, thus enabling authorized-only training for unlearnable data. This restoration process is a practical issue overlooked in most existing unlearnable literature, \ie, even authorized users struggle to gain knowledge from 3D unlearnable data. Both theoretical and empirical results (including 6 datasets, 16 models, and 2 tasks) demonstrate the effectiveness of our proposed unlearnable framework. Our code is available at \url{https://github.com/CGCL-codes/UnlearnablePC}<|reference_end|>
arxiv
@article{wang2024unlearnable, title={Unlearnable 3D Point Clouds: Class-wise Transformation Is All You Need}, author={Xianlong Wang, Minghui Li, Wei Liu, Hangtao Zhang, Shengshan Hu, Yechao Zhang, Ziqi Zhou, Hai Jin}, journal={arXiv preprint arXiv:2410.03644}, year={2024}, archivePrefix={arXiv}, eprint={2410.03644}, primaryClass={cs.CV} }
wang2024unlearnable
arxiv-665741
2410.03645
GenSim2: Scaling Robot Data Generation with Multi-modal and Reasoning LLMs
<|reference_start|>GenSim2: Scaling Robot Data Generation with Multi-modal and Reasoning LLMs: Robotic simulation today remains challenging to scale up due to the human efforts required to create diverse simulation tasks and scenes. Simulation-trained policies also face scalability issues as many sim-to-real methods focus on a single task. To address these challenges, this work proposes GenSim2, a scalable framework that leverages coding LLMs with multi-modal and reasoning capabilities for complex and realistic simulation task creation, including long-horizon tasks with articulated objects. To automatically generate demonstration data for these tasks at scale, we propose planning and RL solvers that generalize within object categories. The pipeline can generate data for up to 100 articulated tasks with 200 objects and reduce the required human efforts. To utilize such data, we propose an effective multi-task language-conditioned policy architecture, dubbed proprioceptive point-cloud transformer (PPT), that learns from the generated demonstrations and exhibits strong sim-to-real zero-shot transfer. Combining the proposed pipeline and the policy architecture, we show a promising usage of GenSim2 that the generated data can be used for zero-shot transfer or co-train with real-world collected data, which enhances the policy performance by 20% compared with training exclusively on limited real data.<|reference_end|>
arxiv
@article{hua2024gensim2:, title={GenSim2: Scaling Robot Data Generation with Multi-modal and Reasoning LLMs}, author={Pu Hua, Minghuan Liu, Annabella Macaluso, Yunfeng Lin, Weinan Zhang, Huazhe Xu, Lirui Wang}, journal={arXiv preprint arXiv:2410.03645}, year={2024}, archivePrefix={arXiv}, eprint={2410.03645}, primaryClass={cs.RO cs.AI cs.CV cs.LG} }
hua2024gensim2:
arxiv-665742
2410.03651
Minimax-optimal trust-aware multi-armed bandits
<|reference_start|>Minimax-optimal trust-aware multi-armed bandits: Multi-armed bandit (MAB) algorithms have achieved significant success in sequential decision-making applications, under the premise that humans perfectly implement the recommended policy. However, existing methods often overlook the crucial factor of human trust in learning algorithms. When trust is lacking, humans may deviate from the recommended policy, leading to undesired learning performance. Motivated by this gap, we study the trust-aware MAB problem by integrating a dynamic trust model into the standard MAB framework. Specifically, it assumes that the recommended and actually implemented policy differs depending on human trust, which in turn evolves with the quality of the recommended policy. We establish the minimax regret in the presence of the trust issue and demonstrate the suboptimality of vanilla MAB algorithms such as the upper confidence bound (UCB) algorithm. To overcome this limitation, we introduce a novel two-stage trust-aware procedure that provably attains near-optimal statistical guarantees. A simulation study is conducted to illustrate the benefits of our proposed algorithm when dealing with the trust issue.<|reference_end|>
arxiv
@article{cai2024minimax-optimal, title={Minimax-optimal trust-aware multi-armed bandits}, author={Changxiao Cai, Jiacheng Zhang}, journal={arXiv preprint arXiv:2410.03651}, year={2024}, archivePrefix={arXiv}, eprint={2410.03651}, primaryClass={stat.ML cs.LG math.ST stat.ME stat.TH} }
cai2024minimax-optimal
arxiv-665743
2410.03653
Dorami: Privilege Separating Security Monitor on RISC-V TEEs
<|reference_start|>Dorami: Privilege Separating Security Monitor on RISC-V TEEs: TEE implementations on RISC-V offer an enclave abstraction by introducing a trusted component called the security monitor (SM). The SM performs critical tasks such as isolating enclaves from each other as well as from the OS by using privileged ISA instructions that enforce the physical memory protection. However, the SM executes at the highest privilege layer on the platform (machine-mode) along side firmware that is not only large in size but also includes third-party vendor code specific to the platform. In this paper, we present Dorami - a privilege separation approach that isolates the SM from the firmware thus reducing the attack surface on TEEs. Dorami re-purposes existing ISA features to enforce its isolation and achieves its goals without large overheads.<|reference_end|>
arxiv
@article{kuhne2024dorami:, title={Dorami: Privilege Separating Security Monitor on RISC-V TEEs}, author={Mark Kuhne, Stavros Volos, Shweta Shinde}, journal={arXiv preprint arXiv:2410.03653}, year={2024}, archivePrefix={arXiv}, eprint={2410.03653}, primaryClass={cs.CR} }
kuhne2024dorami:
arxiv-665744
2410.03654
Learning Humanoid Locomotion over Challenging Terrain
<|reference_start|>Learning Humanoid Locomotion over Challenging Terrain: Humanoid robots can, in principle, use their legs to go almost anywhere. Developing controllers capable of traversing diverse terrains, however, remains a considerable challenge. Classical controllers are hard to generalize broadly while the learning-based methods have primarily focused on gentle terrains. Here, we present a learning-based approach for blind humanoid locomotion capable of traversing challenging natural and man-made terrain. Our method uses a transformer model to predict the next action based on the history of proprioceptive observations and actions. The model is first pre-trained on a dataset of flat-ground trajectories with sequence modeling, and then fine-tuned on uneven terrain using reinforcement learning. We evaluate our model on a real humanoid robot across a variety of terrains, including rough, deformable, and sloped surfaces. The model demonstrates robust performance, in-context adaptation, and emergent terrain representations. In real-world case studies, our humanoid robot successfully traversed over 4 miles of hiking trails in Berkeley and climbed some of the steepest streets in San Francisco.<|reference_end|>
arxiv
@article{radosavovic2024learning, title={Learning Humanoid Locomotion over Challenging Terrain}, author={Ilija Radosavovic, Sarthak Kamat, Trevor Darrell, Jitendra Malik}, journal={arXiv preprint arXiv:2410.03654}, year={2024}, archivePrefix={arXiv}, eprint={2410.03654}, primaryClass={cs.RO cs.LG} }
radosavovic2024learning
arxiv-665745
2410.03655
Geometric Representation Condition Improves Equivariant Molecule Generation
<|reference_start|>Geometric Representation Condition Improves Equivariant Molecule Generation: Recent advancements in molecular generative models have demonstrated substantial potential in accelerating scientific discovery, particularly in drug design. However, these models often face challenges in generating high-quality molecules, especially in conditional scenarios where specific molecular properties must be satisfied. In this work, we introduce GeoRCG, a general framework to enhance the performance of molecular generative models by integrating geometric representation conditions. We decompose the molecule generation process into two stages: first, generating an informative geometric representation; second, generating a molecule conditioned on the representation. Compared to directly generating a molecule, the relatively easy-to-generate representation in the first-stage guides the second-stage generation to reach a high-quality molecule in a more goal-oriented and much faster way. Leveraging EDM as the base generator, we observe significant quality improvements in unconditional molecule generation on the widely-used QM9 and GEOM-DRUG datasets. More notably, in the challenging conditional molecular generation task, our framework achieves an average 31\% performance improvement over state-of-the-art approaches, highlighting the superiority of conditioning on semantically rich geometric representations over conditioning on individual property values as in previous approaches. Furthermore, we show that, with such representation guidance, the number of diffusion steps can be reduced to as small as 100 while maintaining superior generation quality than that achieved with 1,000 steps, thereby significantly accelerating the generation process.<|reference_end|>
arxiv
@article{li2024geometric, title={Geometric Representation Condition Improves Equivariant Molecule Generation}, author={Zian Li, Cai Zhou, Xiyuan Wang, Xingang Peng, Muhan Zhang}, journal={arXiv preprint arXiv:2410.03655}, year={2024}, archivePrefix={arXiv}, eprint={2410.03655}, primaryClass={cs.LG cs.AI} }
li2024geometric
arxiv-665746
2410.03658
RAFT: Realistic Attacks to Fool Text Detectors
<|reference_start|>RAFT: Realistic Attacks to Fool Text Detectors: Large language models (LLMs) have exhibited remarkable fluency across various tasks. However, their unethical applications, such as disseminating disinformation, have become a growing concern. Although recent works have proposed a number of LLM detection methods, their robustness and reliability remain unclear. In this paper, we present RAFT: a grammar error-free black-box attack against existing LLM detectors. In contrast to previous attacks for language models, our method exploits the transferability of LLM embeddings at the word-level while preserving the original text quality. We leverage an auxiliary embedding to greedily select candidate words to perturb against the target detector. Experiments reveal that our attack effectively compromises all detectors in the study across various domains by up to 99%, and are transferable across source models. Manual human evaluation studies show our attacks are realistic and indistinguishable from original human-written text. We also show that examples generated by RAFT can be used to train adversarially robust detectors. Our work shows that current LLM detectors are not adversarially robust, underscoring the urgent need for more resilient detection mechanisms.<|reference_end|>
arxiv
@article{wang2024raft:, title={RAFT: Realistic Attacks to Fool Text Detectors}, author={James Wang, Ran Li, Junfeng Yang, and Chengzhi Mao}, journal={arXiv preprint arXiv:2410.03658}, year={2024}, archivePrefix={arXiv}, eprint={2410.03658}, primaryClass={cs.CL cs.LG} }
wang2024raft:
arxiv-665747
2410.03659
Unraveling Cross-Modality Knowledge Conflict in Large Vision-Language Models
<|reference_start|>Unraveling Cross-Modality Knowledge Conflict in Large Vision-Language Models: Large Vision-Language Models (LVLMs) have demonstrated impressive capabilities for capturing and reasoning over multimodal inputs. However, these models are prone to parametric knowledge conflicts, which arise from inconsistencies of represented knowledge between their vision and language components. In this paper, we formally define the problem of $\textbf{cross-modality parametric knowledge conflict}$ and present a systematic approach to detect, interpret, and mitigate them. We introduce a pipeline that identifies conflicts between visual and textual answers, showing a persistently high conflict rate across modalities in recent LVLMs regardless of the model size. We further investigate how these conflicts interfere with the inference process and propose a contrastive metric to discern the conflicting samples from the others. Building on these insights, we develop a novel dynamic contrastive decoding method that removes undesirable logits inferred from the less confident modality components based on answer confidence. For models that do not provide logits, we also introduce two prompt-based strategies to mitigate the conflicts. Our methods achieve promising improvements in accuracy on both the ViQuAE and InfoSeek datasets. Specifically, using LLaVA-34B, our proposed dynamic contrastive decoding improves an average accuracy of 2.24%.<|reference_end|>
arxiv
@article{zhu2024unraveling, title={Unraveling Cross-Modality Knowledge Conflicts in Large Vision-Language Models}, author={Tinghui Zhu, Qin Liu, Fei Wang, Zhengzhong Tu, Muhao Chen}, journal={arXiv preprint arXiv:2410.03659}, year={2024}, archivePrefix={arXiv}, eprint={2410.03659}, primaryClass={cs.CV cs.CL} }
zhu2024unraveling
arxiv-665748
2410.03662
System 2 reasoning capabilities are nigh
<|reference_start|>System 2 reasoning capabilities are nigh: In recent years, machine learning models have made strides towards human-like reasoning capabilities from several directions. In this work, we review the current state of the literature and describe the remaining steps to achieve a neural model which can perform System 2 reasoning analogous to a human. We argue that if current models are insufficient to be classed as performing reasoning, there remains very little additional progress needed to attain that goal.<|reference_end|>
arxiv
@article{lowe2024system, title={System 2 Reasoning Capabilities Are Nigh}, author={Scott C. Lowe}, journal={The First Workshop on System-2 Reasoning at Scale, NeurIPS 2024}, year={2024}, archivePrefix={arXiv}, eprint={2410.03662}, primaryClass={cs.AI cs.LG} }
lowe2024system
arxiv-665749
2410.03663
Enhance Reasoning by Learning from Mistakes: Peer-Review Knowledge Distillation from Multiple Large Language Models
<|reference_start|>Enhance Reasoning by Learning from Mistakes: Peer-Review Knowledge Distillation from Multiple Large Language Models: Large language models (LLMs) have exhibited complex reasoning abilities by generating question rationales and demonstrated exceptional performance in natural language processing (NLP) tasks. However, these reasoning capabilities generally emerge in models with tens of billions of parameters, creating significant computational challenges for real-world deployment. Recent research has concentrated on improving open-source smaller models through knowledge distillation (KD) from commercial LLMs. Nevertheless, most of these studies rely solely on the responses from one single LLM as the gold rationale for training. In this paper, we introduce a novel Mistake-Aware Peer-Review Distillation (MAPD) approach: 1) Instead of merely obtaining gold rationales from teachers, our method asks teachers to identify and explain the student's mistakes, providing customized instruction learning data. 2) We design a simulated peer-review process between teacher LLMs, which selects only the generated rationales above the acceptance threshold. This reduces the chance of teachers guessing correctly with flawed rationale, improving instructional data quality. Comprehensive experiments and analysis on mathematical, commonsense, and logical reasoning tasks demonstrate the effectiveness of our method.<|reference_end|>
arxiv
@article{li2024learning, title={Learning from Committee: Reasoning Distillation from a Mixture of Teachers with Peer-Review}, author={Zhuochun Li, Yuelyu Ji, Rui Meng, Daqing He}, journal={arXiv preprint arXiv:2410.03663}, year={2024}, archivePrefix={arXiv}, eprint={2410.03663}, primaryClass={cs.CL cs.AI} }
li2024learning
arxiv-665750
2410.03665
Estimating Body and Hand Motion in an Ego-sensed World
<|reference_start|>Estimating Body and Hand Motion in an Ego-sensed World: We present EgoAllo, a system for human motion estimation from a head-mounted device. Using only egocentric SLAM poses and images, EgoAllo guides sampling from a conditional diffusion model to estimate 3D body pose, height, and hand parameters that capture the wearer's actions in the allocentric coordinate frame of the scene. To achieve this, our key insight is in representation: we propose spatial and temporal invariance criteria for improving model performance, from which we derive a head motion conditioning parameterization that improves estimation by up to 18%. We also show how the bodies estimated by our system can improve the hands: the resulting kinematic and temporal constraints result in over 40% lower hand estimation errors compared to noisy monocular estimates. Project page: https://egoallo.github.io/<|reference_end|>
arxiv
@article{yi2024estimating, title={Estimating Body and Hand Motion in an Ego-sensed World}, author={Brent Yi, Vickie Ye, Maya Zheng, Lea M"uller, Georgios Pavlakos, Yi Ma, Jitendra Malik, Angjoo Kanazawa}, journal={arXiv preprint arXiv:2410.03665}, year={2024}, archivePrefix={arXiv}, eprint={2410.03665}, primaryClass={cs.CV cs.AI} }
yi2024estimating
arxiv-665751
2410.03674
Trends, Advancements and Challenges in Intelligent Optimization in Satellite Communication
<|reference_start|>Trends, Advancements and Challenges in Intelligent Optimization in Satellite Communication: Efficient satellite communications play an enormously important role in all of our daily lives. This includes the transmission of data for communication purposes, the operation of IoT applications or the provision of data for ground stations. More and more, AI-based methods are finding their way into these areas. This paper gives an overview of current research in the field of intelligent optimization of satellite communication. For this purpose, a text-mining based literature review was conducted and the identified papers were thematically clustered and analyzed. The identified clusters cover the main topics of routing, resource allocation and, load balancing. Through such a clustering of the literature in overarching topics, a structured analysis of the research papers was enabled, allowing the identification of latest technologies and approaches as well as research needs for intelligent optimization of satellite communication.<|reference_end|>
arxiv
@article{krajsic2024trends,, title={Trends, Advancements and Challenges in Intelligent Optimization in Satellite Communication}, author={Philippe Krajsic, Viola Suess, Zehong Cao, Ryszard Kowalczyk, Bogdan Franczyk}, journal={arXiv preprint arXiv:2410.03674}, year={2024}, archivePrefix={arXiv}, eprint={2410.03674}, primaryClass={cs.NI cs.IT cs.LG eess.SP math.IT} }
krajsic2024trends,
arxiv-665752
2410.03675
Controllable Shape Modeling with Neural Generalized Cylinder
<|reference_start|>Controllable Shape Modeling with Neural Generalized Cylinder: Neural shape representation, such as neural signed distance field (NSDF), becomes more and more popular in shape modeling as its ability to deal with complex topology and arbitrary resolution. Due to the implicit manner to use features for shape representation, manipulating the shapes faces inherent challenge of inconvenience, since the feature cannot be intuitively edited. In this work, we propose neural generalized cylinder (NGC) for explicit manipulation of NSDF, which is an extension of traditional generalized cylinder (GC). Specifically, we define a central curve first and assign neural features along the curve to represent the profiles. Then NSDF is defined on the relative coordinates of a specialized GC with oval-shaped profiles. By using the relative coordinates, NSDF can be explicitly controlled via manipulation of the GC. To this end, we apply NGC to many non-rigid deformation tasks like complex curved deformation, local scaling and twisting for shapes. The comparison on shape deformation with other methods proves the effectiveness and efficiency of NGC. Furthermore, NGC could utilize the neural feature for shape blending by a simple neural feature interpolation.<|reference_end|>
arxiv
@article{zhu2024controllable, title={Controllable Shape Modeling with Neural Generalized Cylinder}, author={Xiangyu Zhu, Zhiqin Chen, Ruizhen Hu, Xiaoguang Han}, journal={arXiv preprint arXiv:2410.03675}, year={2024}, archivePrefix={arXiv}, eprint={2410.03675}, primaryClass={cs.CV cs.GR} }
zhu2024controllable
arxiv-665753
2410.03676
A quest through interconnected datasets: lessons from highly-cited ICASSP papers
<|reference_start|>A quest through interconnected datasets: lessons from highly-cited ICASSP papers: As audio machine learning outcomes are deployed in societally impactful applications, it is important to have a sense of the quality and origins of the data used. Noticing that being explicit about this sense is not trivially rewarded in academic publishing in applied machine learning domains, and neither is included in typical applied machine learning curricula, we present a study into dataset usage connected to the top-5 cited papers at the International Conference on Acoustics, Speech, and Signal Processing (ICASSP). In this, we conduct thorough depth-first analyses towards origins of used datasets, often leading to searches that had to go beyond what was reported in official papers, and ending into unclear or entangled origins. Especially in the current pull towards larger, and possibly generative AI models, awareness of the need for accountability on data provenance is increasing. With this, we call on the community to not only focus on engineering larger models, but create more room and reward for explicitizing the foundations on which such models should be built.<|reference_end|>
arxiv
@article{liem2024a, title={A quest through interconnected datasets: lessons from highly-cited ICASSP papers}, author={Cynthia C. S. Liem, Dou{g}a Tac{s}c{i}lar, Andrew M. Demetriou}, journal={arXiv preprint arXiv:2410.03676}, year={2024}, archivePrefix={arXiv}, eprint={2410.03676}, primaryClass={cs.SD cs.LG eess.AS} }
liem2024a
arxiv-665754
2410.03678
Post-Quantum Cryptography Anonymous Scheme -- PQCWC: Post-Quantum Cryptography Winternitz-Chen
<|reference_start|>Post-Quantum Cryptography Anonymous Scheme -- PQCWC: Post-Quantum Cryptography Winternitz-Chen: As quantum computing technology matures, it poses a threat to the security of mainstream asymmetric cryptographic methods. In response, the National Institute of Standards and Technology released the final version of post-quantum cryptographic (PQC) algorithm standards in August 2024. These post-quantum cryptographic algorithms are primarily based on lattice-based and hash-based cryptography. Therefore, this study proposes the Post-Quantum Cryptography Winternitz-Chen (PQCWC) anonymous scheme, aimed at exploring the design of anonymous schemes based on PQC for future applications in privacy protection. The anonymous scheme designed in this study is mainly built on the Winternitz signature scheme, which can prevent the original public key from being exposed in the certificate. Furthermore, the PQCWC anonymous scheme integrates the butterfly key expansion mechanism, proposing the first hash-based butterfly key expansion mechanism in the world, achieving anonymity for both the registration authority and the certificate authority, thereby fully protecting privacy. In the experimental environment, this study compares various hash algorithms, including Secure Hash Algorithm-1 (SHA-1), the SHA-2 series, the SHA-3 series, and the BLAKE series. The results demonstrate that the proposed anonymous scheme can achieve anonymity without increasing key length, signature length, key generation time, signature generation time, or signature verification time.<|reference_end|>
arxiv
@article{chen2024post-quantum, title={Post-Quantum Cryptography Anonymous Scheme -- PQCWC: Post-Quantum Cryptography Winternitz-Chen}, author={Abel C. H. Chen}, journal={arXiv preprint arXiv:2410.03678}, year={2024}, archivePrefix={arXiv}, eprint={2410.03678}, primaryClass={cs.CR cs.CY cs.NI stat.AP} }
chen2024post-quantum
arxiv-665755
2410.03684
MRSO: Balancing Exploration and Exploitation through Modified Rat Swarm Optimization for Global Optimization
<|reference_start|>MRSO: Balancing Exploration and Exploitation through Modified Rat Swarm Optimization for Global Optimization: The rapid advancement of intelligent technology has led to the development of optimization algorithms that leverage natural behaviors to address complex issues. Among these, the Rat Swarm Optimizer (RSO), inspired by rats' social and behavioral characteristics, has demonstrated potential in various domains, although its convergence precision and exploration capabilities are limited. To address these shortcomings, this study introduces the Modified Rat Swarm Optimizer (MRSO), designed to enhance the balance between exploration and exploitation. MRSO incorporates unique modifications to improve search efficiency and durability, making it suitable for challenging engineering problems such as welded beam, pressure vessel, and gear train design. Extensive testing with classical benchmark functions shows that MRSO significantly improves performance, avoiding local optima and achieving higher accuracy in six out of nine multimodal functions and in all seven fixed-dimension multimodal functions. In the CEC 2019 benchmarks, MRSO outperforms the standard RSO in six out of ten functions, demonstrating superior global search capabilities. When applied to engineering design problems, MRSO consistently delivers better average results than RSO, proving its effectiveness. Additionally, we compared our approach with eight recent and well-known algorithms using both classical and CEC-2019 bench-marks. MRSO outperforms each of these algorithms, achieving superior results in six out of 23 classical benchmark functions and in four out of ten CEC-2019 benchmark functions. These results further demonstrate MRSO's significant contributions as a reliable and efficient tool for optimization tasks in engineering applications.<|reference_end|>
arxiv
@article{abdulla2024mrso:, title={MRSO: Balancing Exploration and Exploitation through Modified Rat Swarm Optimization for Global Optimization}, author={Hemin Sardar Abdulla, Azad A. Ameen, Sarwar Ibrahim Saeed, Ismail Asaad Mohammed and Tarik A. Rashid}, journal={arXiv preprint arXiv:2410.03684}, year={2024}, archivePrefix={arXiv}, eprint={2410.03684}, primaryClass={cs.NE} }
abdulla2024mrso:
arxiv-665756
2410.03686
LCM: Log Conformal Maps for Robust Representation Learning to Mitigate Perspective Distortion
<|reference_start|>LCM: Log Conformal Maps for Robust Representation Learning to Mitigate Perspective Distortion: Perspective distortion (PD) leads to substantial alterations in the shape, size, orientation, angles, and spatial relationships of visual elements in images. Accurately determining camera intrinsic and extrinsic parameters is challenging, making it hard to synthesize perspective distortion effectively. The current distortion correction methods involve removing distortion and learning vision tasks, thus making it a multi-step process, often compromising performance. Recent work leverages the M\"obius transform for mitigating perspective distortions (MPD) to synthesize perspective distortions without estimating camera parameters. M\"obius transform requires tuning multiple interdependent and interrelated parameters and involving complex arithmetic operations, leading to substantial computational complexity. To address these challenges, we propose Log Conformal Maps (LCM), a method leveraging the logarithmic function to approximate perspective distortions with fewer parameters and reduced computational complexity. We provide a detailed foundation complemented with experiments to demonstrate that LCM with fewer parameters approximates the MPD. We show that LCM integrates well with supervised and self-supervised representation learning, outperform standard models, and matches the state-of-the-art performance in mitigating perspective distortion over multiple benchmarks, namely Imagenet-PD, Imagenet-E, and Imagenet-X. Further LCM demonstrate seamless integration with person re-identification and improved the performance. Source code is made publicly available at https://github.com/meenakshi23/Log-Conformal-Maps.<|reference_end|>
arxiv
@article{chippa2024lcm:, title={LCM: Log Conformal Maps for Robust Representation Learning to Mitigate Perspective Distortion}, author={Meenakshi Subhash Chippa, Prakash Chandra Chhipa, Kanjar De, Marcus Liwicki, Rajkumar Saini}, journal={arXiv preprint arXiv:2410.03686}, year={2024}, archivePrefix={arXiv}, eprint={2410.03686}, primaryClass={cs.CV} }
chippa2024lcm:
arxiv-665757
2410.03688
LLM Agents as 6G Orchestrator: A Paradigm for Task-Oriented Physical-Layer Automation
<|reference_start|>LLM Agents as 6G Orchestrator: A Paradigm for Task-Oriented Physical-Layer Automation: The rapid advancement in generative pre-training models is propelling a paradigm shift in technological progression from basic applications such as chatbots towards more sophisticated agent-based systems. It is with huge potential and necessity that the 6G system be combined with the copilot of large language model (LLM) agents and digital twins (DT) to manage the highly complicated communication system with new emerging features such as native AI service and sensing. With the 6G-oriented agent, the base station could understand the transmission requirements of various dynamic upper-layer tasks, automatically orchestrate the optimal system workflow. Through continuously get feedback from the 6G DT for reinforcement, the agents can finally raise the performance of practical system accordingly. Differing from existing LLM agents designed for general application, the 6G-oriented agent aims to make highly rigorous and precise planning with a vast amount of extra expert knowledge, which inevitably requires a specific system design from model training to implementation. This paper proposes a novel comprehensive approach for building task-oriented 6G LLM agents. We first propose a two-stage continual pre-training and fine-tuning scheme to build the field basic model and diversities of specialized expert models for meeting the requirements of various application scenarios. Further, a novel inference framework based on semantic retrieval for leveraging the existing communication-related functions is proposed. Experiment results of exemplary tasks, such as physical-layer task decomposition, show the proposed paradigm's feasibility and effectiveness.<|reference_end|>
arxiv
@article{xiao2024llm, title={LLM Agents as 6G Orchestrator: A Paradigm for Task-Oriented Physical-Layer Automation}, author={Zhuoran Xiao, Chenhui Ye, Yunbo Hu, Honggang Yuan, Yihang Huang, Yijia Feng, Liyu Cai and Jiang Chang}, journal={arXiv preprint arXiv:2410.03688}, year={2024}, archivePrefix={arXiv}, eprint={2410.03688}, primaryClass={cs.NI cs.AI} }
xiao2024llm
arxiv-665758
2410.03690
Conversational Swarms of Humans and AI Agents enable Hybrid Collaborative Decision-making
<|reference_start|>Conversational Swarms of Humans and AI Agents enable Hybrid Collaborative Decision-making: Conversational Swarm Intelligence (CSI) is an AI-powered communication and collaboration technology that allows large, networked groups (of potentially unlimited size) to hold thoughtful conversational deliberations in real-time. Inspired by the efficient decision-making dynamics of fish schools, CSI divides a human population into a set of small subgroups connected by AI agents. This enables the full group to hold a unified conversation. In this study, groups of 25 participants were tasked with selecting a roster of players in a real Fantasy Baseball contest. A total of 10 trials were run using CSI. In half the trials, each subgroup was augmented with a fact-providing AI agent referred to herein as an Infobot. The Infobot was loaded with a wide range of MLB statistics. The human participants could query the Infobot the same way they would query other persons in their subgroup. Results show that when using CSI, the 25-person groups outperformed 72% of individually surveyed participants and showed significant intelligence amplification versus the mean score (p=0.016). The CSI-enabled groups also significantly outperformed the most popular picks across the collected surveys for each daily contest (p<0.001). The CSI sessions that used Infobots scored slightly higher than those that did not, but it was not statistically significant in this study. That said, 85% of participants agreed with the statement 'Our decisions were stronger because of information provided by the Infobot' and only 4% disagreed. In addition, deliberations that used Infobots showed significantly less variance (p=0.039) in conversational content across members. This suggests that Infobots promoted more balanced discussions in which fewer members dominated the dialog. This may be because the infobot enabled participants to confidently express opinions with the support of factual data<|reference_end|>
arxiv
@article{rosenberg2024conversational, title={Conversational Swarms of Humans and AI Agents enable Hybrid Collaborative Decision-making}, author={Louis Rosenberg, Hans Schumann, Christopher Dishop, Gregg Willcox, Anita Woolley, and Ganesh Mani}, journal={arXiv preprint arXiv:2410.03690}, year={2024}, archivePrefix={arXiv}, eprint={2410.03690}, primaryClass={cs.HC} }
rosenberg2024conversational
arxiv-665759
2410.03692
Floating-floating point: a highly accurate number representation with flexible Counting ranges
<|reference_start|>Floating-floating point: a highly accurate number representation with flexible Counting ranges: Efficient number representation is essential for federated learning, natural language processing, and network measurement solutions. Due to timing, area, and power constraints, such applications use narrow bit-width (e.g., 8-bit) number systems. The widely used floating-point systems exhibit a trade-off between the counting range and accuracy. This paper introduces Floating-Floating-Point (F2P) - a floating point number that varies the partition between mantissa and exponent. Such flexibility leads to a large counting range combined with improved accuracy over a selected sub-range. Our evaluation demonstrates that moving to F2P from the state-of-the-art improves network measurement accuracy and federated learning.<|reference_end|>
arxiv
@article{cohen2024floating-floating, title={Floating-floating point: a highly accurate number representation with flexible Counting ranges}, author={Itamar Cohen, Gil Einziger}, journal={arXiv preprint arXiv:2410.03692}, year={2024}, archivePrefix={arXiv}, eprint={2410.03692}, primaryClass={cs.NI cs.LG} }
cohen2024floating-floating
arxiv-665760
2410.03693
Linear Independence of Generalized Neurons and Related Functions
<|reference_start|>Linear Independence of Generalized Neurons and Related Functions: The linear independence of neurons plays a significant role in theoretical analysis of neural networks. Specifically, given neurons $H_1, ..., H_n: \bR^N \times \bR^d \to \bR$, we are interested in the following question: when are $\{H_1(\theta_1, \cdot), ..., H_n(\theta_n, \cdot)\}$ are linearly independent as the parameters $\theta_1, ..., \theta_n$ of these functions vary over $\bR^N$. Previous works give a complete characterization of two-layer neurons without bias, for generic smooth activation functions. In this paper, we study the problem for neurons with arbitrary layers and widths, giving a simple but complete characterization for generic analytic activation functions.<|reference_end|>
arxiv
@article{zhang2024linear, title={Linear Independence of Generalized Neurons and Related Functions}, author={Leyang Zhang}, journal={arXiv preprint arXiv:2410.03693}, year={2024}, archivePrefix={arXiv}, eprint={2410.03693}, primaryClass={cs.LG} }
zhang2024linear
arxiv-665761
2410.03694
Making Data: The Work Behind Artificial Intelligence
<|reference_start|>Making Data: The Work Behind Artificial Intelligence: AI generates both enthusiasm and disillusionment, with promises that often go unfulfilled. It is therefore not surprising that human labor, which is its fundamental component, is also subject to these same deceptions. The development of "smart technologies" depends, at different stages, on a multitude of precarious, underpaid and invisible workers, who, dispersed globally, carry out repetitive, fragmented activities, paid per task and completed in a few seconds. These are workers who label data to train algorithms, through tasks that require the intuitive, creative and cognitive abilities of human beings, such as categorizing images, classifying advertisements, transcribing audio and video, evaluating advertisements, moderating content on social media, labeling human anatomical points of interest, digitizing documents, etc. This form of work is often referred to as "microwork". Our contribution, which documents the conditions of microwork in Brazil and offers portraits of the workers, is a step in the wider effort to overcome the current state of invisibilization. It opens up avenues for future research, with the aim of better characterizing this new form of work, tracing its changes over time in relation to the dynamics of globalization and, ideally, identifying levers for action and transitions.<|reference_end|>
arxiv
@article{braz2024making, title={Making Data: The Work Behind Artificial Intelligence}, author={Matheus Viana Braz (UEM), Paola Tubaro (CNRS, ENSAE Paris, CREST), Antonio A. Casilli (I3 SES, NOS, LACI)}, journal={Ricardo Festi; J{\"o}rg Nowak. As novas infraestruturas produtivas: digitaliza{\c c}{\~a}o do trabalho, e-log{\'i}stica e ind{\'u}stria 4.0, Boitempo, pp.105-120, 2024, 6557173871}, year={2024}, archivePrefix={arXiv}, eprint={2410.03694}, primaryClass={cs.HC cs.CY} }
braz2024making
arxiv-665762
2410.03695
Improving the Accessibility of Dating Websites for Individuals with Visual Impairments
<|reference_start|>Improving the Accessibility of Dating Websites for Individuals with Visual Impairments: People now frequently meet and develop relationships through online dating. Yet, due to their limited accessibility, utilizing dating services can be difficult and irritating for people with visual impairments. The significance of the research issue can be attributed to the fact that dating websites are becoming more and more common and have a significant impact on how people establish romantic connections. It can be challenging for people with visual impairments to use dating services and develop lasting relationships because many of them are not created with their requirements in mind. We can encourage people with visual impairments to participate more completely in online dating and possibly enhance the success of their romantic relationships by making dating websites more accessible. There is some existing implementation that can automatically recognize the facial expression, age, gender, presence of child(ren) and other common objects from a profile photo in a dating platform. The goal of this project is incorporate additional features (presence of any common pets, indoor vs. outdoor image) to further enhance the capability of existing system and come up with test viable solutions to accessibility issues that people with visual impairments face when using dating websites.<|reference_end|>
arxiv
@article{shrestha2024improving, title={Improving the Accessibility of Dating Websites for Individuals with Visual Impairments}, author={Gyanendra Shrestha, and Soumya Tejaswi Vadlamani}, journal={arXiv preprint arXiv:2410.03695}, year={2024}, archivePrefix={arXiv}, eprint={2410.03695}, primaryClass={cs.HC cs.LG} }
shrestha2024improving
arxiv-665763
2410.03696
Improving Emotion Recognition Accuracy with Personalized Clustering
<|reference_start|>Improving Emotion Recognition Accuracy with Personalized Clustering: Emotion recognition through artificial intelligence and smart sensing of physical and physiological signals (Affective Computing) is achieving very interesting results in terms of accuracy, inference times, and user-independent models. In this sense, there are applications related to the safety and well-being of people (sexual aggressions, gender-based violence, children and elderly abuse, mental health, etc.) that require even more improvements. Emotion detection should be done with fast, discrete, and non-luxurious systems working in real-time and real life (wearable devices, wireless communications, battery-powered). Furthermore, emotional reactions to violence are not equal in all people. Then, large general models cannot be applied to a multiuser system for people protection, and customized and simple AI models would be welcomed by health and social workers and law enforcement agents. These customized models will be applicable to clusters of subjects sharing similarities in their emotional reactions to external stimuli. This customization requires several steps: creating clusters of subjects with similar behaviors, creating AI models for every cluster, continually updating these models with new data, and enrolling new subjects in clusters when required. A methodology for clustering data compiled (physical and physiological data, together with emotional labels) is presented in this work, as well as the method for including new subjects once the AI model is generated. Experimental results demonstrate an improvement of 4% in accuracy and 3% in f1-score w.r.t. the general model, along with a 14% reduction in variability.<|reference_end|>
arxiv
@article{gutierrez-martin2024improving, title={Improving Emotion Recognition Accuracy with Personalized Clustering}, author={Laura Gutierrez-Martin (1), Celia Lopez Ongil (1 and 2), Jose M. Lanza-Gutierrez (3), and Jose A. Miranda Calero (4) ((1) Department of Electronics, Universidad Carlos III de Madrid, Spain, (2) Gender Studies Institute, Universidad Carlos III de Madrid, Spain, (3) Department of Computer Science, Universidad de Alcala, Spain, (4) Embedded Systems Laboratory, Ecole Polytechnique Federale de Lausanne, Switzerland)}, journal={arXiv preprint arXiv:2410.03696}, year={2024}, archivePrefix={arXiv}, eprint={2410.03696}, primaryClass={cs.HC cs.AI cs.LG eess.SP} }
gutierrez-martin2024improving
arxiv-665764
2410.03697
Combining Open-box Simulation and Importance Sampling for Tuning Large-Scale Recommenders
<|reference_start|>Combining Open-box Simulation and Importance Sampling for Tuning Large-Scale Recommenders: Growing scale of recommender systems require extensive tuning to respond to market dynamics and system changes. We address the challenge of tuning a large-scale ads recommendation platform with multiple continuous parameters influencing key performance indicators (KPIs). Traditional methods like open-box Monte Carlo simulators, while accurate, are computationally expensive due to the high cost of evaluating numerous parameter settings. To mitigate this, we propose a hybrid approach Simulator-Guided Importance Sampling (SGIS) that combines open-box simulation with importance sampling (IS). SGIS leverages the strengths of both techniques: it performs a coarse enumeration over the parameter space to identify promising initial settings and then uses IS to iteratively refine these settings. This approach significantly reduces computational costs while maintaining high accuracy in KPI estimation. We demonstrate the effectiveness of SGIS through simulations as well as real-world experiments, showing that it achieves substantial improvements in KPIs with lower computational overhead compared to traditional methods.<|reference_end|>
arxiv
@article{paneri2024combining, title={Combining Open-box Simulation and Importance Sampling for Tuning Large-Scale Recommenders}, author={Kaushal Paneri, Michael Munje, Kailash Singh Maurya, Adith Swaminathan, Yifan Shi}, journal={arXiv preprint arXiv:2410.03697}, year={2024}, archivePrefix={arXiv}, eprint={2410.03697}, primaryClass={cs.LG cs.AI cs.IR} }
paneri2024combining
arxiv-665765
2410.03702
A survey of Zarankiewicz problem in geometry
<|reference_start|>A survey of Zarankiewicz problem in geometry: One of the central topics in extremal graph theory is the study of the function $ex(n,H)$, which represents the maximum number of edges a graph with $n$ vertices can have while avoiding a fixed graph $H$ as a subgraph. Tur{\'a}n provided a complete characterization for the case when $H$ is a complete graph on $r$ vertices. Erd{\H o}s, Stone, and Simonovits extended Tur{\'a}n's result to arbitrary graphs $H$ with $\chi(H) > 2$ (chromatic number greater than 2). However, determining the asymptotics of $ex(n, H)$ for bipartite graphs $H$ remains a widely open problem. A classical example of this is Zarankiewicz's problem, which asks for the asymptotics of $ex(n, K_{t,t})$. In this paper, we survey Zarankiewicz's problem, with a focus on graphs that arise from geometry. Incidence geometry, in particular, can be viewed as a manifestation of Zarankiewicz's problem in geometrically defined graphs.<|reference_end|>
arxiv
@article{smorodinsky2024a, title={A survey of Zarankiewicz problem in geometry}, author={Shakhar Smorodinsky}, journal={arXiv preprint arXiv:2410.03702}, year={2024}, archivePrefix={arXiv}, eprint={2410.03702}, primaryClass={math.HO cs.CG cs.DM math.CO} }
smorodinsky2024a
arxiv-665766
2410.03703
Human Creativity in the Age of LLMs: Randomized Experiments on Divergent and Convergent Thinking
<|reference_start|>Human Creativity in the Age of LLMs: Randomized Experiments on Divergent and Convergent Thinking: Large language models are transforming the creative process by offering unprecedented capabilities to algorithmically generate ideas. While these tools can enhance human creativity when people co-create with them, it's unclear how this will impact unassisted human creativity. We conducted two large pre-registered parallel experiments involving 1,100 participants attempting tasks targeting the two core components of creativity, divergent and convergent thinking. We compare the effects of two forms of large language model (LLM) assistance -- a standard LLM providing direct answers and a coach-like LLM offering guidance -- with a control group receiving no AI assistance, and focus particularly on how all groups perform in a final, unassisted stage. Our findings reveal that while LLM assistance can provide short-term boosts in creativity during assisted tasks, it may inadvertently hinder independent creative performance when users work without assistance, raising concerns about the long-term impact on human creativity and cognition.<|reference_end|>
arxiv
@article{kumar2024human, title={Human Creativity in the Age of LLMs: Randomized Experiments on Divergent and Convergent Thinking}, author={Harsh Kumar, Jonathan Vincentius, Ewan Jordan, Ashton Anderson}, journal={arXiv preprint arXiv:2410.03703}, year={2024}, archivePrefix={arXiv}, eprint={2410.03703}, primaryClass={cs.HC} }
kumar2024human
arxiv-665767
2410.03704
PAGE: A Modern Measure of Emotion Perception for Teamwork and Management Research
<|reference_start|>PAGE: A Modern Measure of Emotion Perception for Teamwork and Management Research: This paper presents a new measure of emotional perceptiveness called PAGE: Perceiving AI Generated Emotions. The test includes a broad range of emotions, expressed by ethnically diverse faces, spanning a wide range of ages. We created stimuli with Generative AI, demonstrating the potential to build customizable assessments of emotional intelligence at relatively low cost. Study 1 describes the validation of the image set and test construction. Study 2 reports the psychometric properties of the test. Despite its brevity - 8 minutes on average - PAGE has strong convergent validity and moderately higher internal consistency than comparable measures. Study 3 explores predictive validity using a lab experiment in which we causally identify the contributions managers make to teams. PAGE scores strongly predict managers causal contributions to group success, a finding which is robust to controlling for personality and demographic characteristics. We also discussed the potential of Generative AI to automate development of non-cognitive skill assessments.<|reference_end|>
arxiv
@article{weidmann2024page:, title={PAGE: A Modern Measure of Emotion Perception for Teamwork and Management Research}, author={Ben Weidmann and Yixian Xu}, journal={arXiv preprint arXiv:2410.03704}, year={2024}, archivePrefix={arXiv}, eprint={2410.03704}, primaryClass={cs.HC} }
weidmann2024page:
arxiv-665768
2410.03705
Gradient Boosting Decision Trees on Medical Diagnosis over Tabular Data
<|reference_start|>Gradient Boosting Decision Trees on Medical Diagnosis over Tabular Data: Medical diagnosis is a crucial task in the medical field, in terms of providing accurate classification and respective treatments. Having near-precise decisions based on correct diagnosis can affect a patient's life itself, and may extremely result in a catastrophe if not classified correctly. Several traditional machine learning (ML), such as support vector machines (SVMs) and logistic regression, and state-of-the-art tabular deep learning (DL) methods, including TabNet and TabTransformer, have been proposed and used over tabular medical datasets. Additionally, due to the superior performances, lower computational costs, and easier optimization over different tasks, ensemble methods have been used in the field more recently. They offer a powerful alternative in terms of providing successful medical decision-making processes in several diagnosis tasks. In this study, we investigated the benefits of ensemble methods, especially the Gradient Boosting Decision Tree (GBDT) algorithms in medical classification tasks over tabular data, focusing on XGBoost, CatBoost, and LightGBM. The experiments demonstrate that GBDT methods outperform traditional ML and deep neural network architectures and have the highest average rank over several benchmark tabular medical diagnosis datasets. Furthermore, they require much less computational power compared to DL models, creating the optimal methodology in terms of high performance and lower complexity.<|reference_end|>
arxiv
@article{yıldız2024gradient, title={Gradient Boosting Decision Trees on Medical Diagnosis over Tabular Data}, author={A. Yark{i}n Y{i}ld{i}z and Asli Kalayci}, journal={arXiv preprint arXiv:2410.03705}, year={2024}, archivePrefix={arXiv}, eprint={2410.03705}, primaryClass={cs.LG} }
yıldız2024gradient
arxiv-665769
2410.03706
Topological Foundations of Reinforcement Learning
<|reference_start|>Topological Foundations of Reinforcement Learning: The goal of this work is to serve as a foundation for deep studies of the topology of state, action, and policy spaces in reinforcement learning. By studying these spaces from a mathematical perspective, we expect to gain more insight into how to build better algorithms to solve decision problems. Therefore, we focus on presenting the connection between the Banach fixed point theorem and the convergence of reinforcement learning algorithms, and we illustrate how the insights gained from this can practically help in designing more efficient algorithms. Before doing so, however, we first introduce relevant concepts such as metric spaces, normed spaces and Banach spaces for better understanding, before expressing the entire reinforcement learning problem in terms of Markov decision processes. This allows us to properly introduce the Banach contraction principle in a language suitable for reinforcement learning, and to write the Bellman equations in terms of operators on Banach spaces to show why reinforcement learning algorithms converge. Finally, we show how the insights gained from the mathematical study of convergence are helpful in reasoning about the best ways to make reinforcement learning algorithms more efficient.<|reference_end|>
arxiv
@article{kadurha2024topological, title={Topological Foundations of Reinforcement Learning}, author={David Krame Kadurha}, journal={arXiv preprint arXiv:2410.03706}, year={2024}, archivePrefix={arXiv}, eprint={2410.03706}, primaryClass={cs.LG cs.AI math.FA} }
kadurha2024topological
arxiv-665770
2410.03707
Mamba Meets Financial Markets: A Graph-Mamba Approach for Stock Price Prediction
<|reference_start|>Mamba Meets Financial Markets: A Graph-Mamba Approach for Stock Price Prediction: Stock markets play an important role in the global economy, where accurate stock price predictions can lead to significant financial returns. While existing transformer-based models have outperformed long short-term memory networks and convolutional neural networks in financial time series prediction, their high computational complexity and memory requirements limit their practicality for real-time trading and long-sequence data processing. To address these challenges, we propose SAMBA, an innovative framework for stock return prediction that builds on the Mamba architecture and integrates graph neural networks. SAMBA achieves near-linear computational complexity by utilizing a bidirectional Mamba block to capture long-term dependencies in historical price data and employing adaptive graph convolution to model dependencies between daily stock features. Our experimental results demonstrate that SAMBA significantly outperforms state-of-the-art baseline models in prediction accuracy, maintaining low computational complexity. The code and datasets are available at github.com/Ali-Meh619/SAMBA.<|reference_end|>
arxiv
@article{mehrabian2024mamba, title={Mamba Meets Financial Markets: A Graph-Mamba Approach for Stock Price Prediction}, author={Ali Mehrabian, Ehsan Hoseinzade, Mahdi Mazloum, Xiaohong Chen}, journal={arXiv preprint arXiv:2410.03707}, year={2024}, archivePrefix={arXiv}, eprint={2410.03707}, primaryClass={q-fin.CP cs.LG} }
mehrabian2024mamba
arxiv-665771
2410.03710
Open AI-Romance with ChatGPT, Ready for Your Cyborg Lover?
<|reference_start|>Open AI-Romance with ChatGPT, Ready for Your Cyborg Lover?: Since late March 2024, a Chinese college student has shared her AI Romance with ChatGPT on Red, a popular Chinese social media platform, attracting millions of followers and sparking numerous imitations. This phenomenon has created an iconic figure among Chinese youth, particularly females. This study employs a case study and digital ethnography approach seeking to understand how technology (social media, generative AI) shapes Chinese female students' engagement with AI Romance and how AI Romance impacts the reshaping of gender power relations of Chinese female college students. There are three main findings. First, Open AI Romance is performative, mutually shaping, and creates flexible gender power dynamics and potential new configurations. Second, the cyborg lover identity is fluid, shared, and partially private due to technology and social platforms. Third, the rise of ChatGPT's DAN mode on Red introduces a simulated "male" app into a "female" platform, pushing the limits of policy guidelines, and social norms, making the platform even "wilder." This research provides a deeper understanding of the intersection between technology and social behavior, highlighting the role of AI and social media in evolving gender dynamics among Chinese youth. It sheds light on the performative nature of digital interactions and the potential for technology to redefine traditional gender power structures.<|reference_end|>
arxiv
@article{xie2024open, title={Open AI-Romance with ChatGPT, Ready for Your Cyborg Lover?}, author={Qin Xie}, journal={arXiv preprint arXiv:2410.03710}, year={2024}, archivePrefix={arXiv}, eprint={2410.03710}, primaryClass={cs.HC cs.CY} }
xie2024open
arxiv-665772
2410.03712
Visualization of missing data: a state-of-the-art survey
<|reference_start|>Visualization of missing data: a state-of-the-art survey: Missing data, the data value that is not recorded for a variable, occurs in almost all statistical analyses and may be caused by many reasons, such as lack of collection or a lack of documentation. Researchers need to adequately deal with this issue to provide a valid analysis. The visualization of missing values plays an important role in supporting the investigation and understanding of the missing data patterns. While some techniques and tools for visualization of missing values are available, it is still a challenge to select the right visualization that will fulfil the user requirements for visualizing missing data. This paper provides an overview and state-of-the-art report (STAR) of research literature focusing on missing values visualization. To the best of our knowledge, this is the first survey paper with a focus on missing data visualization. The goal of this paper is to encourage visualization researchers to increase their involvement with Missing data visualization.<|reference_end|>
arxiv
@article{alsufyani2024visualization, title={Visualization of missing data: a state-of-the-art survey}, author={Sarah Alsufyani, Matthew Forshaw and Sara Johansson Fernstad}, journal={arXiv preprint arXiv:2410.03712}, year={2024}, archivePrefix={arXiv}, eprint={2410.03712}, primaryClass={cs.HC} }
alsufyani2024visualization
arxiv-665773
2410.03713
Queering AI: Undoing the Self in the Algorithmic Borderlands
<|reference_start|>Queering AI: Undoing the Self in the Algorithmic Borderlands: This paper challenges fixed orientations towards the self in human-AI entanglements. It offers queering as a strategy to subvert the individuation and fixing of identities within algorithmic systems and the loss of futurity that it brings about. By exploring queerness, the paper examines how one's sense of self and futurity are interpellated within the algorithmic borderlands of human-AI entanglements. The study discusses an embodied experiment called "Undoing Gracia," a Digital Twin simulation where the first author Grace and their AI twins (Lex and Tortugi) interact within the fictional world of Gracia. The experiment probes into Grace's multifaceted subjectivities by conceiving themselves as interdependent entities evolving through their interactions within Gracia. The paper outlines the process of creating and implementing the simulation and examines how the agents co-perform and become-with alongside Gracia's making. The findings illuminate queer gestures for navigating human-AI entanglements in HCI research and practice, highlighting the importance of fluid identities in shaping human-AI relations.<|reference_end|>
arxiv
@article{turtle2024queering, title={Queering AI: Undoing the Self in the Algorithmic Borderlands}, author={Grace Leonora Turtle (Delft University of Technology, Netherlands), Roy Bendor (Delft University of Technology, Netherlands), Elisa Giaccardi (Politecnico di Milano, Italy), Blazej Kotowski (Universitat Pompeu Fabra, Spain)}, journal={arXiv preprint arXiv:2410.03713}, year={2024}, archivePrefix={arXiv}, eprint={2410.03713}, primaryClass={cs.HC} }
turtle2024queering
arxiv-665774
2410.03714
Perceptual Analysis of Groups of Virtual Humans Animated using Interactive Platforms
<|reference_start|>Perceptual Analysis of Groups of Virtual Humans Animated using Interactive Platforms: Virtual humans (VH) have been used in Computer Graphics (CG) for many years, and perception studies have been applied to understand how people perceive them. Some studies have already examined how realism impacts the comfort of viewers. In some cases, the user's comfort is related to human identification. For example, people from a specific group may look positively at others from the same group. Gender is one of those characteristics that have in-group advantages. For example, in terms of VHs, studies have shown that female humans are more likely to recognize emotions in female VHs than in male VHs. However, there are many other variables that can impact the user perception. To aid this discussion, we conducted a study on how people perceive comfort and realism in relation to interactive VHs with different genders and expressing negative, neutral, or positive emotions in groups. We created a virtual environment for participants to interact with groups of VHs, which are interactive and should evolve in real-time, using a popular game engine. To animate the characters, we opted for cartoon figures that are animated by tracking the facial expressions of actors, using available game engine platforms to conduct the driven animation. Our results indicate that the emotion of the VH group impacts both comfort and realism perception, even by using simple cartoon characters in an interactive environment. Furthermore, the findings suggest that individuals reported feeling better with a positive emotion compared to a negative emotion, and that negative emotion recognition is impacted by the gender of the VHs group. Additionally, although we used simple characters, the results are consistent with the perception obtained when analysing realistic the state-of-the-art virtual humans, which positive emotions tend to be more correctly recognized than negative ones.<|reference_end|>
arxiv
@article{montanha2024perceptual, title={Perceptual Analysis of Groups of Virtual Humans Animated using Interactive Platforms}, author={Rubens Montanha, Giovana Raupp, Ana Carolina Schmitt, Gabriel Schneider, Victor Araujo, Soraia Raupp Musse}, journal={arXiv preprint arXiv:2410.03714}, year={2024}, archivePrefix={arXiv}, eprint={2410.03714}, primaryClass={cs.HC cs.GR} }
montanha2024perceptual
arxiv-665775
2410.03717
Revisiting the Superficial Alignment Hypothesis
<|reference_start|>Revisiting the Superficial Alignment Hypothesis: The Superficial Alignment Hypothesis posits that almost all of a language model's abilities and knowledge are learned during pre-training, while post-training is about giving a model the right style and format. We re-examine these claims by empirically studying the scaling behavior of post-training with increasing finetuning examples and evaluating them using objective task-specific standardized benchmarks. Through experiments with the Llama-3, Mistral, and Llama-2 model families of multiple sizes, we observe that, similar to the pre-training scaling laws, post-training task performance scales as a power law against the number of finetuning examples. This power law relationship holds across a broad array of capabilities, including mathematical reasoning, coding, instruction following, and multihop-reasoning. In addition, for tasks like math and multihop reasoning, we observe that a handful of examples merely align the model stylistically but do not saturate performance on the benchmarks. Model performance is instead correlated with its reasoning ability and it improves significantly with more examples, illustrating the need for holistic evaluation programs leveraging objective benchmarks in addition to measurement of alignment to human preferences. We also observe that language models are not necessarily limited to using knowledge learned during pre-training. With appropriate post-training, a model's ability to integrate new knowledge greatly improves on downstream tasks like multihop question-answering. Taken together, these results shed new light on the Superficial Alignment Hypothesis, suggesting that it is, at best, an over-simplification.<|reference_end|>
arxiv
@article{raghavendra2024revisiting, title={Revisiting the Superficial Alignment Hypothesis}, author={Mohit Raghavendra, Vaskar Nath, Sean Hendryx}, journal={arXiv preprint arXiv:2410.03717}, year={2024}, archivePrefix={arXiv}, eprint={2410.03717}, primaryClass={cs.CL cs.AI cs.LG} }
raghavendra2024revisiting
arxiv-665776
2410.03718
Performance Evaluation of Tokenizers in Large Language Models for the Assamese Language
<|reference_start|>Performance Evaluation of Tokenizers in Large Language Models for the Assamese Language: Training of a tokenizer plays an important role in the performance of deep learning models. This research aims to understand the performance of tokenizers in five state-of-the-art (SOTA) large language models (LLMs) in the Assamese language of India. The research is important to understand the multi-lingual support for a low-resourced language such as Assamese. Our research reveals that the tokenizer of SUTRA from Two AI performs the best with an average Normalized Sequence Length (NSL) value of 0.45, closely followed by the tokenizer of GPT-4o from Open AI with an average NSL value of 0.54, followed by Gemma 2, Meta Llama 3.1, and Mistral Large Instruct 2407 with an average NSL value of 0.82, 1.4, and 1.48 respectively.<|reference_end|>
arxiv
@article{tamang2024performance, title={Performance Evaluation of Tokenizers in Large Language Models for the Assamese Language}, author={Sagar Tamang and Dibya Jyoti Bora}, journal={arXiv preprint arXiv:2410.03718}, year={2024}, archivePrefix={arXiv}, eprint={2410.03718}, primaryClass={cs.CL} }
tamang2024performance
arxiv-665777
2410.03719
FluentEditor+: Text-based Speech Editing by Modeling Local Hierarchical Acoustic Smoothness and Global Prosody Consistency
<|reference_start|>FluentEditor+: Text-based Speech Editing by Modeling Local Hierarchical Acoustic Smoothness and Global Prosody Consistency: Text-based speech editing (TSE) allows users to modify speech by editing the corresponding text and performing operations such as cutting, copying, and pasting to generate updated audio without altering the original recording directly. Text-based speech editing (TSE) allows users to modify speech by editing the corresponding text and performing operations such as cutting, copying, and pasting to generate updated audio without altering the original recording directly. While current TSE techniques focus on minimizing discrepancies between generated speech and reference targets within edited segments, they often neglect the importance of maintaining both local and global fluency in the context of the original discourse. Additionally, seamlessly integrating edited segments with unaltered portions of the audio remains challenging, typically requiring support from text-to-speech (TTS) systems. This paper introduces a novel approach, FluentEditor$\tiny +$, designed to overcome these limitations. FluentEditor$\tiny +$ employs advanced feature extraction techniques to capture both acoustic and prosodic characteristics, ensuring fluent transitions between edited and unedited regions. The model ensures segmental acoustic smoothness and global prosody consistency, allowing seamless splicing of speech while preserving the coherence and naturalness of the output. Extensive experiments on the VCTK and LibriTTS datasets show that FluentEditor$\tiny +$ surpasses existing TTS-based methods, including Editspeech, Campnet, $A^3T$ FluentSpeech, and Fluenteditor, in both fluency and prosody. Ablation studies further highlight the contributions of each module to the overall effectiveness of the system.<|reference_end|>
arxiv
@article{liu2024fluenteditor+:, title={FluentEditor+: Text-based Speech Editing by Modeling Local Hierarchical Acoustic Smoothness and Global Prosody Consistency}, author={Rui Liu, Jiatian Xi, Ziyue Jiang and Haizhou Li}, journal={arXiv preprint arXiv:2410.03719}, year={2024}, archivePrefix={arXiv}, eprint={2410.03719}, primaryClass={cs.CL cs.SD eess.AS} }
liu2024fluenteditor+:
arxiv-665778
2410.03720
NeuralQP: A General Hypergraph-based Optimization Framework for Large-scale QCQPs
<|reference_start|>NeuralQP: A General Hypergraph-based Optimization Framework for Large-scale QCQPs: Machine Learning (ML) optimization frameworks have gained attention for their ability to accelerate the optimization of large-scale Quadratically Constrained Quadratic Programs (QCQPs) by learning shared problem structures. However, existing ML frameworks often rely heavily on strong problem assumptions and large-scale solvers. This paper introduces NeuralQP, a general hypergraph-based framework for large-scale QCQPs. NeuralQP features two main components: Hypergraph-based Neural Prediction, which generates embeddings and predicted solutions for QCQPs without problem assumptions, and Parallel Neighborhood Optimization, which employs a McCormick relaxation-based repair strategy to identify and correct illegal variables, iteratively improving the solution with a small-scale solver. We further prove that our framework UniEGNN with our hypergraph representation is equivalent to the Interior-Point Method (IPM) for quadratic programming. Experiments on two benchmark problems and large-scale real-world instances from QPLIB demonstrate that NeuralQP outperforms state-of-the-art solvers (e.g., Gurobi and SCIP) in both solution quality and time efficiency, further validating the efficiency of ML optimization frameworks for QCQPs.<|reference_end|>
arxiv
@article{xiong2024neuralqp:, title={NeuralQP: A General Hypergraph-based Optimization Framework for Large-scale QCQPs}, author={Zhixiao Xiong, Fangyu Zong, Huigen Ye, Hua Xu}, journal={arXiv preprint arXiv:2410.03720}, year={2024}, archivePrefix={arXiv}, eprint={2410.03720}, primaryClass={math.OC cs.LG} }
xiong2024neuralqp:
arxiv-665779
2410.03721
Thematic Analysis with Open-Source Generative AI and Machine Learning: A New Method for Inductive Qualitative Codebook Development
<|reference_start|>Thematic Analysis with Open-Source Generative AI and Machine Learning: A New Method for Inductive Qualitative Codebook Development: This paper aims to answer one central question: to what extent can open-source generative text models be used in a workflow to approximate thematic analysis in social science research? To answer this question, we present the Generative AI-enabled Theme Organization and Structuring (GATOS) workflow, which uses open-source machine learning techniques, natural language processing tools, and generative text models to facilitate thematic analysis. To establish validity of the method, we present three case studies applying the GATOS workflow, leveraging these models and techniques to inductively create codebooks similar to traditional procedures using thematic analysis. Specifically, we investigate the extent to which a workflow comprising open-source models and tools can inductively produce codebooks that approach the known space of themes and sub-themes. To address the challenge of gleaning insights from these texts, we combine open-source generative text models, retrieval-augmented generation, and prompt engineering to identify codes and themes in large volumes of text, i.e., generate a qualitative codebook. The process mimics an inductive coding process that researchers might use in traditional thematic analysis by reading text one unit of analysis at a time, considering existing codes already in the codebook, and then deciding whether or not to generate a new code based on whether the extant codebook provides adequate thematic coverage. We demonstrate this workflow using three synthetic datasets from hypothetical organizational research settings: a study of teammate feedback in teamwork settings, a study of organizational cultures of ethical behavior, and a study of employee perspectives about returning to their offices after the pandemic. We show that the GATOS workflow is able to identify themes in the text that were used to generate the original synthetic datasets.<|reference_end|>
arxiv
@article{katz2024thematic, title={Thematic Analysis with Open-Source Generative AI and Machine Learning: A New Method for Inductive Qualitative Codebook Development}, author={Andrew Katz and Gabriella Coloyan Fleming and Joyce Main}, journal={arXiv preprint arXiv:2410.03721}, year={2024}, archivePrefix={arXiv}, eprint={2410.03721}, primaryClass={cs.CL cs.AI cs.HC} }
katz2024thematic
arxiv-665780
2410.03723
Human Bias in the Face of AI: The Role of Human Judgement in AI Generated Text Evaluation
<|reference_start|>Human Bias in the Face of AI: The Role of Human Judgement in AI Generated Text Evaluation: As AI advances in text generation, human trust in AI generated content remains constrained by biases that go beyond concerns of accuracy. This study explores how bias shapes the perception of AI versus human generated content. Through three experiments involving text rephrasing, news article summarization, and persuasive writing, we investigated how human raters respond to labeled and unlabeled content. While the raters could not differentiate the two types of texts in the blind test, they overwhelmingly favored content labeled as "Human Generated," over those labeled "AI Generated," by a preference score of over 30%. We observed the same pattern even when the labels were deliberately swapped. This human bias against AI has broader societal and cognitive implications, as it undervalues AI performance. This study highlights the limitations of human judgment in interacting with AI and offers a foundation for improving human-AI collaboration, especially in creative fields.<|reference_end|>
arxiv
@article{zhu2024human, title={Human Bias in the Face of AI: The Role of Human Judgement in AI Generated Text Evaluation}, author={Tiffany Zhu, Iain Weissburg, Kexun Zhang, William Yang Wang}, journal={arXiv preprint arXiv:2410.03723}, year={2024}, archivePrefix={arXiv}, eprint={2410.03723}, primaryClass={cs.CL cs.AI cs.HC} }
zhu2024human
arxiv-665781
2410.03724
Large Language Models Overcome the Machine Penalty When Acting Fairly but Not When Acting Selfishly or Altruistically
<|reference_start|>Large Language Models Overcome the Machine Penalty When Acting Fairly but Not When Acting Selfishly or Altruistically: In social dilemmas where the collective and self-interests are at odds, people typically cooperate less with machines than with fellow humans, a phenomenon termed the machine penalty. Overcoming this penalty is critical for successful human-machine collectives, yet current solutions often involve ethically-questionable tactics, like concealing machines' non-human nature. In this study, with 1,152 participants, we explore the possibility of closing this research question by using Large Language Models (LLMs), in scenarios where communication is possible between interacting parties. We design three types of LLMs: (i) Cooperative, aiming to assist its human associate; (ii) Selfish, focusing solely on maximizing its self-interest; and (iii) Fair, balancing its own and collective interest, while slightly prioritizing self-interest. Our findings reveal that, when interacting with humans, fair LLMs are able to induce cooperation levels comparable to those observed in human-human interactions, even when their non-human nature is fully disclosed. In contrast, selfish and cooperative LLMs fail to achieve this goal. Post-experiment analysis shows that all three types of LLMs succeed in forming mutual cooperation agreements with humans, yet only fair LLMs, which occasionally break their promises, are capable of instilling a perception among humans that cooperating with them is the social norm, and eliciting positive views on their trustworthiness, mindfulness, intelligence, and communication quality. Our findings suggest that for effective human-machine cooperation, bot manufacturers should avoid designing machines with mere rational decision-making or a sole focus on assisting humans. Instead, they should design machines capable of judiciously balancing their own interest and the interest of humans.<|reference_end|>
arxiv
@article{wang2024large, title={Large Language Models Overcome the Machine Penalty When Acting Fairly but Not When Acting Selfishly or Altruistically}, author={Zhen Wang, Ruiqi Song, Chen Shen, Shiya Yin, Zhao Song, Balaraju Battu, Lei Shi, Danyang Jia, Talal Rahwan, Shuyue Hu}, journal={arXiv preprint arXiv:2410.03724}, year={2024}, archivePrefix={arXiv}, eprint={2410.03724}, primaryClass={cs.HC cs.AI cs.GT econ.GN q-fin.EC} }
wang2024large
arxiv-665782
2410.03725
Realtime, multimodal invasive ventilation risk monitoring using language models and BoXHED
<|reference_start|>Realtime, multimodal invasive ventilation risk monitoring using language models and BoXHED: Objective: realtime monitoring of invasive ventilation (iV) in intensive care units (ICUs) plays a crucial role in ensuring prompt interventions and better patient outcomes. However, conventional methods often overlook valuable insights embedded within clinical notes, relying solely on tabular data. In this study, we propose an innovative approach to enhance iV risk monitoring by incorporating clinical notes into the monitoring pipeline through using language models for text summarization. Results: We achieve superior performance in all metrics reported by the state-of-the-art in iV risk monitoring, namely: an AUROC of 0.86, an AUC-PR of 0.35, and an AUCt of up to 0.86. We also demonstrate that our methodology allows for more lead time in flagging iV for certain time buckets. Conclusion: Our study underscores the potential of integrating clinical notes and language models into realtime iV risk monitoring, paving the way for improved patient care and informed clinical decision-making in ICU settings.<|reference_end|>
arxiv
@article{pakbin2024realtime,, title={Realtime, multimodal invasive ventilation risk monitoring using language models and BoXHED}, author={Arash Pakbin, Aaron Su, Donald K.K. Lee, Bobak J. Mortazavi}, journal={arXiv preprint arXiv:2410.03725}, year={2024}, archivePrefix={arXiv}, eprint={2410.03725}, primaryClass={cs.CL} }
pakbin2024realtime,
arxiv-665783
2410.03726
Neurosymbolic AI approach to Attribution in Large Language Models
<|reference_start|>Neurosymbolic AI approach to Attribution in Large Language Models: Attribution in large language models (LLMs) remains a significant challenge, particularly in ensuring the factual accuracy and reliability of the generated outputs. Current methods for citation or attribution, such as those employed by tools like Perplexity.ai and Bing Search-integrated LLMs, attempt to ground responses by providing real-time search results and citations. However, so far, these approaches suffer from issues such as hallucinations, biases, surface-level relevance matching, and the complexity of managing vast, unfiltered knowledge sources. While tools like Perplexity.ai dynamically integrate web-based information and citations, they often rely on inconsistent sources such as blog posts or unreliable sources, which limits their overall reliability. We present that these challenges can be mitigated by integrating Neurosymbolic AI (NesyAI), which combines the strengths of neural networks with structured symbolic reasoning. NesyAI offers transparent, interpretable, and dynamic reasoning processes, addressing the limitations of current attribution methods by incorporating structured symbolic knowledge with flexible, neural-based learning. This paper explores how NesyAI frameworks can enhance existing attribution models, offering more reliable, interpretable, and adaptable systems for LLMs.<|reference_end|>
arxiv
@article{tilwani2024neurosymbolic, title={Neurosymbolic AI approach to Attribution in Large Language Models}, author={Deepa Tilwani, Revathy Venkataramanan, Amit P. Sheth}, journal={arXiv preprint arXiv:2410.03726}, year={2024}, archivePrefix={arXiv}, eprint={2410.03726}, primaryClass={cs.CL} }
tilwani2024neurosymbolic
arxiv-665784
2410.03727
FaithEval: Can Your Language Model Stay Faithful to Context, Even If "The Moon is Made of Marshmallows"
<|reference_start|>FaithEval: Can Your Language Model Stay Faithful to Context, Even If "The Moon is Made of Marshmallows": Ensuring faithfulness to context in large language models (LLMs) and retrieval-augmented generation (RAG) systems is crucial for reliable deployment in real-world applications, as incorrect or unsupported information can erode user trust. Despite advancements on standard benchmarks, faithfulness hallucination-where models generate responses misaligned with the provided context-remains a significant challenge. In this work, we introduce FaithEval, a novel and comprehensive benchmark tailored to evaluate the faithfulness of LLMs in contextual scenarios across three diverse tasks: unanswerable, inconsistent, and counterfactual contexts. These tasks simulate real-world challenges where retrieval mechanisms may surface incomplete, contradictory, or fabricated information. FaithEval comprises 4.9K high-quality problems in total, validated through a rigorous four-stage context construction and validation framework, employing both LLM-based auto-evaluation and human validation. Our extensive study across a wide range of open-source and proprietary models reveals that even state-of-the-art models often struggle to remain faithful to the given context, and that larger models do not necessarily exhibit improved faithfulness.Project is available at: \url{https://github.com/SalesforceAIResearch/FaithEval}.<|reference_end|>
arxiv
@article{ming2024faitheval:, title={FaithEval: Can Your Language Model Stay Faithful to Context, Even If "The Moon is Made of Marshmallows"}, author={Yifei Ming, Senthil Purushwalkam, Shrey Pandit, Zixuan Ke, Xuan-Phi Nguyen, Caiming Xiong, Shafiq Joty}, journal={arXiv preprint arXiv:2410.03727}, year={2024}, archivePrefix={arXiv}, eprint={2410.03727}, primaryClass={cs.CL cs.AI cs.LG} }
ming2024faitheval:
arxiv-665785
2410.03728
Exploring QUIC Dynamics: A Large-Scale Dataset for Encrypted Traffic Analysis
<|reference_start|>Exploring QUIC Dynamics: A Large-Scale Dataset for Encrypted Traffic Analysis: QUIC, a new and increasingly used transport protocol, addresses and resolves the limitations of TCP by offering improved security, performance, and features such as stream multiplexing and connection migration. These features, however, also present challenges for network operators who need to monitor and analyze web traffic. In this paper, we introduce VisQUIC, a labeled dataset comprising over 100,000 QUIC traces from more than 44,000 websites (URLs), collected over a four-month period. These traces provide the foundation for generating more than seven million images, with configurable parameters of window length, pixel resolution, normalization, and labels. These images enable an observer looking at the interactions between a client and a server to analyze and gain insights about QUIC encrypted connections. To illustrate the dataset's potential, we offer a use-case example of an observer estimating the number of HTTP/3 responses/requests pairs in a given QUIC, which can reveal server behavior, client--server interactions, and the load imposed by an observed connection. We formulate the problem as a discrete regression problem, train a machine learning (ML) model for it, and then evaluate it using the proposed dataset on an example use case.<|reference_end|>
arxiv
@article{gahtan2024exploring, title={Exploring QUIC Dynamics: A Large-Scale Dataset for Encrypted Traffic Analysis}, author={Barak Gahtan, Robert J. Shahla, Alex M. Bronstein, Reuven Cohen}, journal={arXiv preprint arXiv:2410.03728}, year={2024}, archivePrefix={arXiv}, eprint={2410.03728}, primaryClass={cs.NI cs.AI cs.CV cs.LG} }
gahtan2024exploring
arxiv-665786
2410.03729
Certifying Guidance & Control Networks: Uncertainty Propagation to an Event Manifold
<|reference_start|>Certifying Guidance & Control Networks: Uncertainty Propagation to an Event Manifold: We perform uncertainty propagation on an event manifold for Guidance & Control Networks (G&CNETs), aiming to enhance the certification tools for neural networks in this field. This work utilizes three previously solved optimal control problems with varying levels of dynamics nonlinearity and event manifold complexity. The G&CNETs are trained to represent the optimal control policies of a time-optimal interplanetary transfer, a mass-optimal landing on an asteroid and energy-optimal drone racing, respectively. For each of these problems, we describe analytically the terminal conditions on an event manifold with respect to initial state uncertainties. Crucially, this expansion does not depend on time but solely on the initial conditions of the system, thereby making it possible to study the robustness of the G&CNET at any specific stage of a mission defined by the event manifold. Once this analytical expression is found, we provide confidence bounds by applying the Cauchy-Hadamard theorem and perform uncertainty propagation using moment generating functions. While Monte Carlo-based (MC) methods can yield the results we present, this work is driven by the recognition that MC simulations alone may be insufficient for future certification of neural networks in guidance and control applications.<|reference_end|>
arxiv
@article{origer2024certifying, title={Certifying Guidance & Control Networks: Uncertainty Propagation to an Event Manifold}, author={Sebastien Origer, Dario Izzo, Giacomo Acciarini, Francesco Biscani, Rita Mastroianni, Max Bannach, Harry Holt}, journal={arXiv preprint arXiv:2410.03729}, year={2024}, archivePrefix={arXiv}, eprint={2410.03729}, primaryClass={eess.SY cs.LG cs.SY} }
origer2024certifying
arxiv-665787
2410.03730
Progress Report: Towards European LLMs
<|reference_start|>Progress Report: Towards European LLMs: We present preliminary results of the project OpenGPT-X. At present, the project has developed two multilingual LLMs designed to embrace Europe's linguistic diversity by supporting all 24 official languages of the European Union. Trained on a dataset comprising around 60% non-English data and utilizing a custom multilingual tokenizer, our models address the limitations of existing LLMs that predominantly focus on English or a few high-resource languages. We detail the models' development principles, data processing techniques, tokenizer optimization, and training methodologies. The models demonstrate competitive performance across multilingual benchmarks, as evidenced by its performance on European versions of ARC, HellaSwag, MMLU, and TruthfulQA.<|reference_end|>
arxiv
@article{ali2024teuken-7b-base, title={Teuken-7B-Base & Teuken-7B-Instruct: Towards European LLMs}, author={Mehdi Ali, Michael Fromm, Klaudia Thellmann, Jan Ebert, Alexander Arno Weber, Richard Rutmann, Charvi Jain, Max L"ubbering, Daniel Steinigen, Johannes Leveling, Katrin Klug, Jasper Schulze Buschhoff, Lena Jurkschat, Hammam Abdelwahab, Benny J"org Stein, Karl-Heinz Sylla, Pavel Denisov, Nicolo' Brandizzi, Qasid Saleem, Anirban Bhowmick, Lennard Helmer, Chelsea John, Pedro Ortiz Suarez, Malte Ostendorff, Alex Jude, Lalith Manjunath, Samuel Weinbach, Carolin Penke, Oleg Filatov, Shima Asaadi, Fabio Barth, Rafet Sifa, Fabian K"uch, Andreas Herten, Ren'e J"akel, Georg Rehm, Stefan Kesselheim, Joachim K"ohler, Nicolas Flores-Herr}, journal={arXiv preprint arXiv:2410.03730}, year={2024}, archivePrefix={arXiv}, eprint={2410.03730}, primaryClass={cs.CL cs.AI cs.LG} }
ali2024teuken-7b-base
arxiv-665788
2410.03731
Unsupervised Human Preference Learning
<|reference_start|>Unsupervised Human Preference Learning: Large language models demonstrate impressive reasoning abilities but struggle to provide personalized content due to their lack of individual user preference information. Existing methods, such as in-context learning and parameter-efficient fine-tuning, fall short in capturing the complexity of human preferences, especially given the small, personal datasets individuals possess. In this paper, we propose a novel approach utilizing small parameter models as preference agents to generate natural language rules that guide a larger, pre-trained model, enabling efficient personalization. Our method involves a small, local "steering wheel" model that directs the outputs of a much larger foundation model, producing content tailored to an individual's preferences while leveraging the extensive knowledge and capabilities of the large model. Importantly, this personalization is achieved without the need to fine-tune the large model. Experimental results on email and article datasets, demonstrate that our technique significantly outperforms baseline personalization methods. By allowing foundation models to adapt to individual preferences in a data and compute-efficient manner, our approach paves the way for highly personalized language model applications.<|reference_end|>
arxiv
@article{shashidhar2024unsupervised, title={Unsupervised Human Preference Learning}, author={Sumuk Shashidhar, Abhinav Chinta, Vaibhav Sahai, Dilek Hakkani-T"ur}, journal={arXiv preprint arXiv:2410.03731}, year={2024}, archivePrefix={arXiv}, eprint={2410.03731}, primaryClass={cs.CL cs.AI} }
shashidhar2024unsupervised
arxiv-665789
2410.03732
Multi-Scale Convolutional LSTM with Transfer Learning for Anomaly Detection in Cellular Networks
<|reference_start|>Multi-Scale Convolutional LSTM with Transfer Learning for Anomaly Detection in Cellular Networks: The rapid growth in mobile broadband usage and increasing subscribers have made it crucial to ensure reliable network performance. As mobile networks grow more complex, especially during peak hours, manual collection of Key Performance Indicators (KPIs) is time-consuming due to the vast data involved. Detecting network failures and identifying unusual behavior during busy periods is vital to assess network health. Researchers have applied Deep Learning (DL) and Machine Learning (ML) techniques to understand network behavior by predicting throughput, analyzing call records, and detecting outages. However, these methods often require significant computational power, large labeled datasets, and are typically specialized, making retraining for new scenarios costly and time-intensive. This study introduces a novel approach Multi-Scale Convolutional LSTM with Transfer Learning (TL) to detect anomalies in cellular networks. The model is initially trained from scratch using a publicly available dataset to learn typical network behavior. Transfer Learning is then employed to fine-tune the model by applying learned weights to different datasets. We compare the performance of the model trained from scratch with that of the fine-tuned model using TL. To address class imbalance and gain deeper insights, Exploratory Data Analysis (EDA) and the Synthetic Minority Over-sampling Technique (SMOTE) are applied. Results demonstrate that the model trained from scratch achieves 99% accuracy after 100 epochs, while the fine-tuned model reaches 95% accuracy on a different dataset after just 20 epochs.<|reference_end|>
arxiv
@article{noonari2024multi-scale, title={Multi-Scale Convolutional LSTM with Transfer Learning for Anomaly Detection in Cellular Networks}, author={Nooruddin Noonari, Daniel Corujo, Rui L. Aguiar, Francisco J. Ferrao}, journal={arXiv preprint arXiv:2410.03732}, year={2024}, archivePrefix={arXiv}, eprint={2410.03732}, primaryClass={cs.NI cs.LG} }
noonari2024multi-scale
arxiv-665790
2410.03733
Evaluating the Effects of AI Directors for Quest Selection
<|reference_start|>Evaluating the Effects of AI Directors for Quest Selection: Modern commercial games are designed for mass appeal, not for individual players, but there is a unique opportunity in video games to better fit the individual through adapting game elements. In this paper, we focus on AI Directors, systems which can dynamically modify a game, that personalize the player experience to match the player's preference. In the past, some AI Director studies have provided inconclusive results, so their effect on player experience is not clear. We take three AI Directors and directly compare them in a human subject study to test their effectiveness on quest selection. Our results show that a non-random AI Director provides a better player experience than a random AI Director.<|reference_end|>
arxiv
@article{yu2024evaluating, title={Evaluating the Effects of AI Directors for Quest Selection}, author={Kristen K. Yu, Matthew Guzdial, and Nathan Sturtevant}, journal={arXiv preprint arXiv:2410.03733}, year={2024}, archivePrefix={arXiv}, eprint={2410.03733}, primaryClass={cs.HC cs.AI} }
yu2024evaluating
arxiv-665791
2410.03734
Accent conversion using discrete units with parallel data synthesized from controllable accented TTS
<|reference_start|>Accent conversion using discrete units with parallel data synthesized from controllable accented TTS: The goal of accent conversion (AC) is to convert speech accents while preserving content and speaker identity. Previous methods either required reference utterances during inference, did not preserve speaker identity well, or used one-to-one systems that could only be trained for each non-native accent. This paper presents a promising AC model that can convert many accents into native to overcome these issues. Our approach utilizes discrete units, derived from clustering self-supervised representations of native speech, as an intermediary target for accent conversion. Leveraging multi-speaker text-to-speech synthesis, it transforms these discrete representations back into native speech while retaining the speaker identity. Additionally, we develop an efficient data augmentation method to train the system without demanding a lot of non-native resources. Our system is proved to improve non-native speaker fluency, sound like a native accent, and preserve original speaker identity well.<|reference_end|>
arxiv
@article{nguyen2024accent, title={Accent conversion using discrete units with parallel data synthesized from controllable accented TTS}, author={Tuan Nam Nguyen and Ngoc Quan Pham and Alexander Waibel}, journal={arXiv preprint arXiv:2410.03734}, year={2024}, archivePrefix={arXiv}, eprint={2410.03734}, primaryClass={cs.SD cs.CL eess.AS} }
nguyen2024accent
arxiv-665792
2410.03735
Task-Adaptive Pretrained Language Models via Clustered-Importance Sampling
<|reference_start|>Task-Adaptive Pretrained Language Models via Clustered-Importance Sampling: Specialist language models (LMs) focus on a specific task or domain on which they often outperform generalist LMs of the same size. However, the specialist data needed to pretrain these models is only available in limited amount for most tasks. In this work, we build specialist models from large generalist training sets instead. We adjust the training distribution of the generalist data with guidance from the limited domain-specific data. We explore several approaches, with clustered importance sampling standing out. This method clusters the generalist dataset and samples from these clusters based on their frequencies in the smaller specialist dataset. It is scalable, suitable for pretraining and continued pretraining, it works well in multi-task settings. Our findings demonstrate improvements across different domains in terms of language modeling perplexity and accuracy on multiple-choice question tasks. We also present ablation studies that examine the impact of dataset sizes, clustering configurations, and model sizes.<|reference_end|>
arxiv
@article{grangier2024task-adaptive, title={Task-Adaptive Pretrained Language Models via Clustered-Importance Sampling}, author={David Grangier, Simin Fan, Skyler Seto, Pierre Ablin}, journal={arXiv preprint arXiv:2410.03735}, year={2024}, archivePrefix={arXiv}, eprint={2410.03735}, primaryClass={cs.CL cs.LG} }
grangier2024task-adaptive
arxiv-665793
2410.03736
CliMB: An AI-enabled Partner for Clinical Predictive Modeling
<|reference_start|>CliMB: An AI-enabled Partner for Clinical Predictive Modeling: Despite its significant promise and continuous technical advances, real-world applications of artificial intelligence (AI) remain limited. We attribute this to the "domain expert-AI-conundrum": while domain experts, such as clinician scientists, should be able to build predictive models such as risk scores, they face substantial barriers in accessing state-of-the-art (SOTA) tools. While automated machine learning (AutoML) has been proposed as a partner in clinical predictive modeling, many additional requirements need to be fulfilled to make machine learning accessible for clinician scientists. To address this gap, we introduce CliMB, a no-code AI-enabled partner designed to empower clinician scientists to create predictive models using natural language. CliMB guides clinician scientists through the entire medical data science pipeline, thus empowering them to create predictive models from real-world data in just one conversation. CliMB also creates structured reports and interpretable visuals. In evaluations involving clinician scientists and systematic comparisons against a baseline GPT-4, CliMB consistently demonstrated superior performance in key areas such as planning, error prevention, code execution, and model performance. Moreover, in blinded assessments involving 45 clinicians from diverse specialties and career stages, more than 80% preferred CliMB over GPT-4. Overall, by providing a no-code interface with clear guidance and access to SOTA methods in the fields of data-centric AI, AutoML, and interpretable ML, CliMB empowers clinician scientists to build robust predictive models.<|reference_end|>
arxiv
@article{saveliev2024climb:, title={CliMB: An AI-enabled Partner for Clinical Predictive Modeling}, author={Evgeny Saveliev, Tim Schubert, Thomas Pouplin, Vasilis Kosmoliaptsis, Mihaela van der Schaar}, journal={arXiv preprint arXiv:2410.03736}, year={2024}, archivePrefix={arXiv}, eprint={2410.03736}, primaryClass={cs.HC cs.AI cs.LG} }
saveliev2024climb:
arxiv-665794
2410.03737
Meta Reinforcement Learning Approach for Adaptive Resource Optimization in O-RAN
<|reference_start|>Meta Reinforcement Learning Approach for Adaptive Resource Optimization in O-RAN: As wireless networks grow to support more complex applications, the Open Radio Access Network (O-RAN) architecture, with its smart RAN Intelligent Controller (RIC) modules, becomes a crucial solution for real-time network data collection, analysis, and dynamic management of network resources including radio resource blocks and downlink power allocation. Utilizing artificial intelligence (AI) and machine learning (ML), O-RAN addresses the variable demands of modern networks with unprecedented efficiency and adaptability. Despite progress in using ML-based strategies for network optimization, challenges remain, particularly in the dynamic allocation of resources in unpredictable environments. This paper proposes a novel Meta Deep Reinforcement Learning (Meta-DRL) strategy, inspired by Model-Agnostic Meta-Learning (MAML), to advance resource block and downlink power allocation in O-RAN. Our approach leverages O-RAN's disaggregated architecture with virtual distributed units (DUs) and meta-DRL strategies, enabling adaptive and localized decision-making that significantly enhances network efficiency. By integrating meta-learning, our system quickly adapts to new network conditions, optimizing resource allocation in real-time. This results in a 19.8% improvement in network management performance over traditional methods, advancing the capabilities of next-generation wireless networks.<|reference_end|>
arxiv
@article{lotfi2024meta, title={Meta Reinforcement Learning Approach for Adaptive Resource Optimization in O-RAN}, author={Fatemeh Lotfi, Fatemeh Afghah}, journal={arXiv preprint arXiv:2410.03737}, year={2024}, archivePrefix={arXiv}, eprint={2410.03737}, primaryClass={cs.NI cs.AI cs.LG cs.RO cs.SY eess.SY stat.ML} }
lotfi2024meta
arxiv-665795
2410.03738
ERASMO: Leveraging Large Language Models for Enhanced Clustering Segmentation
<|reference_start|>ERASMO: Leveraging Large Language Models for Enhanced Clustering Segmentation: Cluster analysis plays a crucial role in various domains and applications, such as customer segmentation in marketing. These contexts often involve multimodal data, including both tabular and textual datasets, making it challenging to represent hidden patterns for obtaining meaningful clusters. This study introduces ERASMO, a framework designed to fine-tune a pretrained language model on textually encoded tabular data and generate embeddings from the fine-tuned model. ERASMO employs a textual converter to transform tabular data into a textual format, enabling the language model to process and understand the data more effectively. Additionally, ERASMO produces contextually rich and structurally representative embeddings through techniques such as random feature sequence shuffling and number verbalization. Extensive experimental evaluations were conducted using multiple datasets and baseline approaches. Our results demonstrate that ERASMO fully leverages the specific context of each tabular dataset, leading to more precise and nuanced embeddings for accurate clustering. This approach enhances clustering performance by capturing complex relationship patterns within diverse tabular data.<|reference_end|>
arxiv
@article{silva2024erasmo:, title={ERASMO: Leveraging Large Language Models for Enhanced Clustering Segmentation}, author={Fillipe dos Santos Silva, Gabriel Kenzo Kakimoto, Julio Cesar dos Reis and Marcelo S. Reis}, journal={arXiv preprint arXiv:2410.03738}, year={2024}, archivePrefix={arXiv}, eprint={2410.03738}, primaryClass={cs.CL cs.AI} }
silva2024erasmo:
arxiv-665796
2410.03739
Grammar Induction from Visual, Speech and Text
<|reference_start|>Grammar Induction from Visual, Speech and Text: Grammar Induction could benefit from rich heterogeneous signals, such as text, vision, and acoustics. In the process, features from distinct modalities essentially serve complementary roles to each other. With such intuition, this work introduces a novel \emph{unsupervised visual-audio-text grammar induction} task (named \textbf{VAT-GI}), to induce the constituent grammar trees from parallel images, text, and speech inputs. Inspired by the fact that language grammar natively exists beyond the texts, we argue that the text has not to be the predominant modality in grammar induction. Thus we further introduce a \emph{textless} setting of VAT-GI, wherein the task solely relies on visual and auditory inputs. To approach the task, we propose a visual-audio-text inside-outside recursive autoencoder (\textbf{VaTiora}) framework, which leverages rich modal-specific and complementary features for effective grammar parsing. Besides, a more challenging benchmark data is constructed to assess the generalization ability of VAT-GI system. Experiments on two benchmark datasets demonstrate that our proposed VaTiora system is more effective in incorporating the various multimodal signals, and also presents new state-of-the-art performance of VAT-GI.<|reference_end|>
arxiv
@article{zhao2024grammar, title={Grammar Induction from Visual, Speech and Text}, author={Yu Zhao, Hao Fei, Shengqiong Wu, Meishan Zhang, Min Zhang, Tat-seng Chua}, journal={arXiv preprint arXiv:2410.03739}, year={2024}, archivePrefix={arXiv}, eprint={2410.03739}, primaryClass={cs.CL cs.AI} }
zhao2024grammar
arxiv-665797
2410.03740
Language Enhanced Model for Eye (LEME): An Open-Source Ophthalmology-Specific Large Language Model
<|reference_start|>Language Enhanced Model for Eye (LEME): An Open-Source Ophthalmology-Specific Large Language Model: Large Language Models (LLMs) are poised to revolutionize healthcare. Ophthalmology-specific LLMs remain scarce and underexplored. We introduced an open-source, specialized LLM for ophthalmology, termed Language Enhanced Model for Eye (LEME). LEME was initially pre-trained on the Llama2 70B framework and further fine-tuned with a corpus of ~127,000 non-copyrighted training instances curated from ophthalmology-specific case reports, abstracts, and open-source study materials. We benchmarked LEME against eight other LLMs, namely, GPT-3.5, GPT-4, three Llama2 models (7B, 13B, 70B), PMC-LLAMA 13B, Meditron 70B, and EYE-Llama (another ophthalmology-specific LLM). Evaluations included four internal validation tasks: abstract completion, fill-in-the-blank, multiple-choice questions (MCQ), and short-answer QA. External validation tasks encompassed long-form QA, MCQ, patient EHR summarization, and clinical QA. Evaluation metrics included Rouge-L scores, accuracy, and expert evaluation of correctness, completeness, and readability. In internal validations, LEME consistently outperformed its counterparts, achieving Rouge-L scores of 0.20 in abstract completion (all p<0.05), 0.82 in fill-in-the-blank (all p<0.0001), and 0.22 in short-answer QA (all p<0.0001, except versus GPT-4). In external validations, LEME excelled in long-form QA with a Rouge-L of 0.19 (all p<0.0001), ranked second in MCQ accuracy (0.68; all p<0.0001), and scored highest in EHR summarization and clinical QA (ranging from 4.24 to 4.83 out of 5 for correctness, completeness, and readability). LEME's emphasis on robust fine-tuning and the use of non-copyrighted data represents a breakthrough in open-source ophthalmology-specific LLMs, offering the potential to revolutionize execution of clinical tasks while democratizing research collaboration.<|reference_end|>
arxiv
@article{gilson2024language, title={Language Enhanced Model for Eye (LEME): An Open-Source Ophthalmology-Specific Large Language Model}, author={Aidan Gilson, Xuguang Ai, Qianqian Xie, Sahana Srinivasan, Krithi Pushpanathan, Maxwell B. Singer, Jimin Huang, Hyunjae Kim, Erping Long, Peixing Wan, Luciano V. Del Priore, Lucila Ohno-Machado, Hua Xu, Dianbo Liu, Ron A. Adelman, Yih-Chung Tham, Qingyu Chen}, journal={arXiv preprint arXiv:2410.03740}, year={2024}, archivePrefix={arXiv}, eprint={2410.03740}, primaryClass={cs.CL} }
gilson2024language
arxiv-665798
2410.03741
Towards Democratization of Subspeciality Medical Expertise
<|reference_start|>Towards Democratization of Subspeciality Medical Expertise: The scarcity of subspecialist medical expertise, particularly in rare, complex and life-threatening diseases, poses a significant challenge for healthcare delivery. This issue is particularly acute in cardiology where timely, accurate management determines outcomes. We explored the potential of AMIE (Articulate Medical Intelligence Explorer), a large language model (LLM)-based experimental AI system optimized for diagnostic dialogue, to potentially augment and support clinical decision-making in this challenging context. We curated a real-world dataset of 204 complex cases from a subspecialist cardiology practice, including results for electrocardiograms, echocardiograms, cardiac MRI, genetic tests, and cardiopulmonary stress tests. We developed a ten-domain evaluation rubric used by subspecialists to evaluate the quality of diagnosis and clinical management plans produced by general cardiologists or AMIE, the latter enhanced with web-search and self-critique capabilities. AMIE was rated superior to general cardiologists for 5 of the 10 domains (with preference ranging from 9% to 20%), and equivalent for the rest. Access to AMIE's response improved cardiologists' overall response quality in 63.7% of cases while lowering quality in just 3.4%. Cardiologists' responses with access to AMIE were superior to cardiologist responses without access to AMIE for all 10 domains. Qualitative examinations suggest AMIE and general cardiologist could complement each other, with AMIE thorough and sensitive, while general cardiologist concise and specific. Overall, our results suggest that specialized medical LLMs have the potential to augment general cardiologists' capabilities by bridging gaps in subspecialty expertise, though further research and validation are essential for wide clinical utility.<|reference_end|>
arxiv
@article{o'sullivan2024towards, title={Towards Democratization of Subspeciality Medical Expertise}, author={Jack W. O'Sullivan, Anil Palepu, Khaled Saab, Wei-Hung Weng, Yong Cheng, Emily Chu, Yaanik Desai, Aly Elezaby, Daniel Seung Kim, Roy Lan, Wilson Tang, Natalie Tapaskar, Victoria Parikh, Sneha S. Jain, Kavita Kulkarni, Philip Mansfield, Dale Webster, Juraj Gottweis, Joelle Barral, Mike Schaekermann, Ryutaro Tanno, S. Sara Mahdavi, Vivek Natarajan, Alan Karthikesalingam, Euan Ashley, Tao Tu}, journal={arXiv preprint arXiv:2410.03741}, year={2024}, archivePrefix={arXiv}, eprint={2410.03741}, primaryClass={cs.HC cs.AI} }
o'sullivan2024towards
arxiv-665799
2410.03742
Beyond Scalar Reward Model: Learning Generative Judge from Preference Data
<|reference_start|>Beyond Scalar Reward Model: Learning Generative Judge from Preference Data: Learning from preference feedback is a common practice for aligning large language models~(LLMs) with human value. Conventionally, preference data is learned and encoded into a scalar reward model that connects a value head with an LLM to produce a scalar score as preference or reward. However, scalar models lack interpretability and are known to be susceptible to biases in datasets. This paper investigates leveraging the generation capability of LLMs to address both limitations in one shot. Specifically, we prompt the pre-trained LLM to generate positive and negative judgments, both supported with rationales in natural language form. The self-generated contrastive judgment pairs are used to train the generative judge with Direct Preference Optimization (DPO). This proposal of training the generative Judge using self-generated Contrastive judgments (Con-J) ensures natural interpretability due to the generated rationales together with the judgments, as well as high robustness against bias without the need for an additional reward head. Experimental results show that the performance of Con-J is comparable to the scalar reward model trained on the same collection of preference data, and demonstrate its superior interpretability and robustness in encoding human preferences.<|reference_end|>
arxiv
@article{ye2024beyond, title={Beyond Scalar Reward Model: Learning Generative Judge from Preference Data}, author={Ziyi Ye, Xiangsheng Li, Qiuchi Li, Qingyao Ai, Yujia Zhou, Wei Shen, Dong Yan, Yiqun Liu}, journal={arXiv preprint arXiv:2410.03742}, year={2024}, archivePrefix={arXiv}, eprint={2410.03742}, primaryClass={cs.CL cs.AI cs.LG} }
ye2024beyond
arxiv-665800
2410.03743
Mitigating Training Imbalance in LLM Fine-Tuning via Selective Parameter Merging
<|reference_start|>Mitigating Training Imbalance in LLM Fine-Tuning via Selective Parameter Merging: Supervised fine-tuning (SFT) is crucial for adapting Large Language Models (LLMs) to specific tasks. In this work, we demonstrate that the order of training data can lead to significant training imbalances, potentially resulting in performance degradation. Consequently, we propose to mitigate this imbalance by merging SFT models fine-tuned with different data orders, thereby enhancing the overall effectiveness of SFT. Additionally, we introduce a novel technique, "parameter-selection merging," which outperforms traditional weighted-average methods on five datasets. Further, through analysis and ablation studies, we validate the effectiveness of our method and identify the sources of performance improvements.<|reference_end|>
arxiv
@article{ju2024mitigating, title={Mitigating Training Imbalance in LLM Fine-Tuning via Selective Parameter Merging}, author={Yiming Ju, Ziyi Ni, Xingrun Xing, Zhixiong Zeng, hanyu Zhao, Siqi Fan, Zheng Zhang}, journal={arXiv preprint arXiv:2410.03743}, year={2024}, archivePrefix={arXiv}, eprint={2410.03743}, primaryClass={cs.CL cs.AI cs.LG} }
ju2024mitigating