From 887ceff2e1dd4629f487e43465254f5bed3acae9 Mon Sep 17 00:00:00 2001 From: Fabian Grob Date: Thu, 12 Oct 2023 16:25:07 +0200 Subject: [PATCH] fixes config notation, tested layerwise with extra bit width options --- .../imagenet_classification/ptq/ptq_evaluate.py | 10 +++++----- 1 file changed, 5 insertions(+), 5 deletions(-) diff --git a/src/brevitas_examples/imagenet_classification/ptq/ptq_evaluate.py b/src/brevitas_examples/imagenet_classification/ptq/ptq_evaluate.py index b15acdb87..dd49e8531 100644 --- a/src/brevitas_examples/imagenet_classification/ptq/ptq_evaluate.py +++ b/src/brevitas_examples/imagenet_classification/ptq/ptq_evaluate.py @@ -169,7 +169,7 @@ parser.add_argument( '--gpfq-p', default=0.25, type=float, help='P parameter for GPFQ (default: 0.25)') parser.add_argument( - '--quant_format', + '--quant-format', default='int', choices=['int', 'float'], help='Quantization format to use for weights and activations (default: int)') @@ -231,10 +231,10 @@ def main(): f"{args.model_name}_" f"{args.target_backend}_" f"{args.quant_format}_" - f"{args.weight_mantissa_bit_width if args.quant_format == 'float' else ''}_" - f"{args.weight_exponent_bit_width if args.quant_format == 'float' else ''}_" - f"{args.act_mantissa_bit_width if args.quant_format == 'float' else ''}_" - f"{args.act_exponent_bit_width if args.quant_format == 'float' else ''}_" + f"{str(args.weight_mantissa_bit_width) + '_' if args.quant_format == 'float' else ''}" + f"{str(args.weight_exponent_bit_width) + '_' if args.quant_format == 'float' else ''}" + f"{str(args.act_mantissa_bit_width) + '_' if args.quant_format == 'float' else ''}" + f"{str(args.act_exponent_bit_width) + '_' if args.quant_format == 'float' else ''}" f"{args.scale_factor_type}_" f"a{args.act_bit_width}" f"w{args.weight_bit_width}_"