| Inference Score |
275 |
275 |
100.0% |
|
samsung SM-A536N
|
|
|
samsung SM-A536N
|
|
|
Image Classification (F32)
|
193
|
193
|
100.0% |
|
samsung SM-A536N
|
|
|
samsung SM-A536N
|
|
|
Image Classification (F16)
|
192
|
192
|
100.0% |
|
samsung SM-A536N
|
|
|
samsung SM-A536N
|
|
|
Image Classification (I8)
|
176
|
176
|
100.0% |
|
samsung SM-A536N
|
|
|
samsung SM-A536N
|
|
|
Image Segmentation (F32)
|
428
|
428
|
100.0% |
|
samsung SM-A536N
|
|
|
samsung SM-A536N
|
|
|
Image Segmentation (F16)
|
430
|
430
|
100.0% |
|
samsung SM-A536N
|
|
|
samsung SM-A536N
|
|
|
Image Segmentation (I8)
|
532
|
532
|
100.0% |
|
samsung SM-A536N
|
|
|
samsung SM-A536N
|
|
|
Pose Estimation (F32)
|
142
|
142
|
100.0% |
|
samsung SM-A536N
|
|
|
samsung SM-A536N
|
|
|
Pose Estimation (F16)
|
142
|
142
|
100.0% |
|
samsung SM-A536N
|
|
|
samsung SM-A536N
|
|
|
Pose Estimation (I8)
|
127
|
127
|
100.0% |
|
samsung SM-A536N
|
|
|
samsung SM-A536N
|
|
|
Object Detection (F32)
|
277
|
277
|
100.0% |
|
samsung SM-A536N
|
|
|
samsung SM-A536N
|
|
|
Object Detection (F16)
|
278
|
278
|
100.0% |
|
samsung SM-A536N
|
|
|
samsung SM-A536N
|
|
|
Object Detection (I8)
|
261
|
261
|
100.0% |
|
samsung SM-A536N
|
|
|
samsung SM-A536N
|
|
|
Face Detection (F32)
|
380
|
380
|
100.0% |
|
samsung SM-A536N
|
|
|
samsung SM-A536N
|
|
|
Face Detection (F16)
|
367
|
367
|
100.0% |
|
samsung SM-A536N
|
|
|
samsung SM-A536N
|
|
|
Face Detection (I8)
|
367
|
367
|
100.0% |
|
samsung SM-A536N
|
|
|
samsung SM-A536N
|
|
|
Text Classification (F32)
|
335
|
335
|
100.0% |
|
samsung SM-A536N
|
|
|
samsung SM-A536N
|
|
|
Text Classification (F16)
|
335
|
335
|
100.0% |
|
samsung SM-A536N
|
|
|
samsung SM-A536N
|
|
|
Machine Translation (F32)
|
345
|
345
|
100.0% |
|
samsung SM-A536N
|
|
|
samsung SM-A536N
|
|
|
Machine Translation (F16)
|
342
|
342
|
100.0% |
|
samsung SM-A536N
|
|
|
samsung SM-A536N
|
|