--- dataset_info: features: - name: id dtype: string - name: input dtype: string - name: target dtype: string splits: - name: train num_bytes: 13402 num_examples: 35 - name: validation num_bytes: 291387 num_examples: 765 - name: test num_bytes: 1414977 num_examples: 3722 download_size: 730958 dataset_size: 1719766 configs: - config_name: default data_files: - split: train path: data/train-* - split: validation path: data/validation-* - split: test path: data/test-* --- # ViGEText_17to23 dataset Evaluating the Symbol Binding Ability of Large Language Models for Multiple-Choice Questions in Vietnamese General Education: https://github.com/uitnlp/ViGEText_17to23 ``` @inproceedings{10.1145/3628797.3628837, author = {Nguyen, Duc-Vu and Nguyen, Quoc-Nam}, title = {Evaluating the Symbol Binding Ability of Large Language Models for Multiple-Choice Questions in Vietnamese General Education}, year = {2023}, isbn = {9798400708916}, publisher = {Association for Computing Machinery}, address = {New York, NY, USA}, url = {https://doi.org/10.1145/3628797.3628837}, doi = {10.1145/3628797.3628837}, abstract = {In this paper, we evaluate the ability of large language models (LLMs) to perform multiple choice symbol binding (MCSB) for multiple choice question answering (MCQA) tasks in zero-shot, one-shot, and few-shot settings. We focus on Vietnamese, with fewer challenging MCQA datasets than in English. The two existing datasets, ViMMRC 1.0 and ViMMRC 2.0, focus on literature. Recent research in Vietnamese natural language processing (NLP) has focused on the Vietnamese National High School Graduation Examination (VNHSGE) from 2019 to 2023 to evaluate ChatGPT. However, these studies have mainly focused on how ChatGPT solves the VNHSGE step by step. We aim to create a novel and high-quality dataset by providing structured guidelines for typing LaTeX formulas for mathematics, physics, chemistry, and biology. This dataset can be used to evaluate the MCSB ability of LLMs and smaller language models (LMs) because it is typed in a strict LaTeX style. We determine the most probable character answer (A, B, C, or D) based on context, instead of finding the answer step by step as in previous Vietnamese works. This reduces computational costs and accelerates the evaluation of LLMs. Our evaluation of six well-known LLMs, namely BLOOMZ-7.1B-MT, LLaMA-2-7B, LLaMA-2-70B, GPT-3, GPT-3.5, and GPT-4.0, on the ViMMRC 1.0 and ViMMRC 2.0 benchmarks and our proposed dataset shows promising results on the MCSB ability of LLMs for Vietnamese. The dataset is available1 for research purposes only.}, booktitle = {Proceedings of the 12th International Symposium on Information and Communication Technology}, pages = {379–386}, numpages = {8}, keywords = {Analysis of Language Models, Multiple Choice Symbol Binding, Multiple Choice Question Answering, Language Modeling}, location = {, Ho Chi Minh, Vietnam, }, series = {SOICT '23} } ```