# Run demo app to verify functionality and download sample sources: #/opt/intel/computer_vision_sdk/deployment_tools/demo/demo_squeezenet_download_convert_run.sh -d MYRIAD # Download alexnet model: cd /opt/intel/computer_vision_sdk/deployment_tools/model_downloader ./downloader.py --name alexnet # Use model optimizer to convert model to Inference Engine format (*.xml + *.bin): mkdir /opt/intel/computer_vision_sdk/deployment_tools/model_optimizer/alexnet mkdir /opt/intel/computer_vision_sdk/deployment_tools/model_optimizer/alexnet/FP16 mkdir /opt/intel/computer_vision_sdk/deployment_tools/model_optimizer/alexnet/FP32 cd /opt/intel/computer_vision_sdk/deployment_tools/model_optimizer ./mo.py --data_type=FP16 --input_model ../model_downloader/classification/alexnet/caffe/alexnet.caffemodel -o ./alexnet/FP16/ ./mo.py --data_type=FP32 --input_model ../model_downloader/classification/alexnet/caffe/alexnet.caffemodel -o ./alexnet/FP32/ # Compile benchmark app from source: cd ~/inference_engine_samples/benchmark_app/ make # Run benchmarks for MYRIAD and CPU for comparison: cd ~/inference_engine_samples/intel64/Release/ ./benchmark_app -d MYRIAD -i /opt/intel/computer_vision_sdk/deployment_tools/demo/car.png -m /opt/intel/computer_vision_sdk/deployment_tools/model_optimizer/alexnet/FP16/alexnet.xml ./benchmark_app -d CPU -i /opt/intel/computer_vision_sdk/deployment_tools/demo/car.png -m /opt/intel/computer_vision_sdk/deployment_tools/model_optimizer/alexnet/FP32/alexnet.xml