Datasets:

Languages:
English
Multilinguality:
monolingual
Size Categories:
10K<n<100K
Language Creators:
expert-generated
Annotations Creators:
expert-generated
Source Datasets:
original
Tags:
License:
albertvillanova HF staff commited on
Commit
610bdae
1 Parent(s): 9c6ede8

Convert dataset to Parquet

Browse files

Convert dataset to Parquet.

README.md CHANGED
@@ -1,5 +1,4 @@
1
  ---
2
- pretty_name: IMDB
3
  annotations_creators:
4
  - expert-generated
5
  language_creators:
@@ -19,6 +18,40 @@ task_categories:
19
  task_ids:
20
  - sentiment-classification
21
  paperswithcode_id: imdb-movie-reviews
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
22
  train-eval-index:
23
  - config: plain_text
24
  task: text-classification
@@ -68,29 +101,6 @@ train-eval-index:
68
  name: Recall weighted
69
  args:
70
  average: weighted
71
- dataset_info:
72
- features:
73
- - name: text
74
- dtype: string
75
- - name: label
76
- dtype:
77
- class_label:
78
- names:
79
- 0: neg
80
- 1: pos
81
- config_name: plain_text
82
- splits:
83
- - name: train
84
- num_bytes: 33432835
85
- num_examples: 25000
86
- - name: test
87
- num_bytes: 32650697
88
- num_examples: 25000
89
- - name: unsupervised
90
- num_bytes: 67106814
91
- num_examples: 50000
92
- download_size: 84125825
93
- dataset_size: 133190346
94
  ---
95
 
96
  # Dataset Card for "imdb"
 
1
  ---
 
2
  annotations_creators:
3
  - expert-generated
4
  language_creators:
 
18
  task_ids:
19
  - sentiment-classification
20
  paperswithcode_id: imdb-movie-reviews
21
+ pretty_name: IMDB
22
+ dataset_info:
23
+ config_name: plain_text
24
+ features:
25
+ - name: text
26
+ dtype: string
27
+ - name: label
28
+ dtype:
29
+ class_label:
30
+ names:
31
+ '0': neg
32
+ '1': pos
33
+ splits:
34
+ - name: train
35
+ num_bytes: 33432823
36
+ num_examples: 25000
37
+ - name: test
38
+ num_bytes: 32650685
39
+ num_examples: 25000
40
+ - name: unsupervised
41
+ num_bytes: 67106794
42
+ num_examples: 50000
43
+ download_size: 83446840
44
+ dataset_size: 133190302
45
+ configs:
46
+ - config_name: plain_text
47
+ data_files:
48
+ - split: train
49
+ path: plain_text/train-*
50
+ - split: test
51
+ path: plain_text/test-*
52
+ - split: unsupervised
53
+ path: plain_text/unsupervised-*
54
+ default: true
55
  train-eval-index:
56
  - config: plain_text
57
  task: text-classification
 
101
  name: Recall weighted
102
  args:
103
  average: weighted
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
104
  ---
105
 
106
  # Dataset Card for "imdb"
dataset_infos.json CHANGED
@@ -1 +1,60 @@
1
- {"plain_text": {"description": "Large Movie Review Dataset.\nThis is a dataset for binary sentiment classification containing substantially more data than previous benchmark datasets. We provide a set of 25,000 highly polar movie reviews for training, and 25,000 for testing. There is additional unlabeled data for use as well.", "citation": "@InProceedings{maas-EtAl:2011:ACL-HLT2011,\n author = {Maas, Andrew L. and Daly, Raymond E. and Pham, Peter T. and Huang, Dan and Ng, Andrew Y. and Potts, Christopher},\n title = {Learning Word Vectors for Sentiment Analysis},\n booktitle = {Proceedings of the 49th Annual Meeting of the Association for Computational Linguistics: Human Language Technologies},\n month = {June},\n year = {2011},\n address = {Portland, Oregon, USA},\n publisher = {Association for Computational Linguistics},\n pages = {142--150},\n url = {http://www.aclweb.org/anthology/P11-1015}\n}\n", "homepage": "http://ai.stanford.edu/~amaas/data/sentiment/", "license": "", "features": {"text": {"dtype": "string", "id": null, "_type": "Value"}, "label": {"num_classes": 2, "names": ["neg", "pos"], "names_file": null, "id": null, "_type": "ClassLabel"}}, "post_processed": null, "supervised_keys": null, "task_templates": [{"task": "text-classification", "text_column": "text", "label_column": "label", "labels": ["neg", "pos"]}], "builder_name": "imdb", "config_name": "plain_text", "version": {"version_str": "1.0.0", "description": "", "major": 1, "minor": 0, "patch": 0}, "splits": {"train": {"name": "train", "num_bytes": 33432835, "num_examples": 25000, "dataset_name": "imdb"}, "test": {"name": "test", "num_bytes": 32650697, "num_examples": 25000, "dataset_name": "imdb"}, "unsupervised": {"name": "unsupervised", "num_bytes": 67106814, "num_examples": 50000, "dataset_name": "imdb"}}, "download_checksums": {"http://ai.stanford.edu/~amaas/data/sentiment/aclImdb_v1.tar.gz": {"num_bytes": 84125825, "checksum": "c40f74a18d3b61f90feba1e17730e0d38e8b97c05fde7008942e91923d1658fe"}}, "download_size": 84125825, "post_processing_size": null, "dataset_size": 133190346, "size_in_bytes": 217316171}}
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "plain_text": {
3
+ "description": "Large Movie Review Dataset.\nThis is a dataset for binary sentiment classification containing substantially more data than previous benchmark datasets. We provide a set of 25,000 highly polar movie reviews for training, and 25,000 for testing. There is additional unlabeled data for use as well.",
4
+ "citation": "@InProceedings{maas-EtAl:2011:ACL-HLT2011,\n author = {Maas, Andrew L. and Daly, Raymond E. and Pham, Peter T. and Huang, Dan and Ng, Andrew Y. and Potts, Christopher},\n title = {Learning Word Vectors for Sentiment Analysis},\n booktitle = {Proceedings of the 49th Annual Meeting of the Association for Computational Linguistics: Human Language Technologies},\n month = {June},\n year = {2011},\n address = {Portland, Oregon, USA},\n publisher = {Association for Computational Linguistics},\n pages = {142--150},\n url = {http://www.aclweb.org/anthology/P11-1015}\n}\n",
5
+ "homepage": "http://ai.stanford.edu/~amaas/data/sentiment/",
6
+ "license": "",
7
+ "features": {
8
+ "text": {
9
+ "dtype": "string",
10
+ "_type": "Value"
11
+ },
12
+ "label": {
13
+ "names": [
14
+ "neg",
15
+ "pos"
16
+ ],
17
+ "_type": "ClassLabel"
18
+ }
19
+ },
20
+ "task_templates": [
21
+ {
22
+ "task": "text-classification",
23
+ "label_column": "label"
24
+ }
25
+ ],
26
+ "builder_name": "imdb",
27
+ "dataset_name": "imdb",
28
+ "config_name": "plain_text",
29
+ "version": {
30
+ "version_str": "1.0.0",
31
+ "description": "",
32
+ "major": 1,
33
+ "minor": 0,
34
+ "patch": 0
35
+ },
36
+ "splits": {
37
+ "train": {
38
+ "name": "train",
39
+ "num_bytes": 33432823,
40
+ "num_examples": 25000,
41
+ "dataset_name": null
42
+ },
43
+ "test": {
44
+ "name": "test",
45
+ "num_bytes": 32650685,
46
+ "num_examples": 25000,
47
+ "dataset_name": null
48
+ },
49
+ "unsupervised": {
50
+ "name": "unsupervised",
51
+ "num_bytes": 67106794,
52
+ "num_examples": 50000,
53
+ "dataset_name": null
54
+ }
55
+ },
56
+ "download_size": 83446840,
57
+ "dataset_size": 133190302,
58
+ "size_in_bytes": 216637142
59
+ }
60
+ }
plain_text/test-00000-of-00001.parquet ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:b52e26e2f872d282ffac460bf9770b25ac6f102cda0e6ca7158df98c94e8b3da
3
+ size 20470363
plain_text/train-00000-of-00001.parquet ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:db47d16b5c297cc0dd625e519c81319c24c9149e70e8496de5475f6fa928342c
3
+ size 20979968
plain_text/unsupervised-00000-of-00001.parquet ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:74d14fbfcbb39fb7d299c38ca9f0ae6d231bf97108da85d620027ba437b6d52e
3
+ size 41996509