All Classes Namespaces Files Functions Variables Typedefs Enumerations Enumerator Friends Macros Groups Pages
Sample Code

The following sections list the code samples that are part of the pylon SDK.

The pylon samples can be found under <SDK ROOT>\Samples. There is a GNU make file available for each sample.

Include Files Used by Samples

The following include files are used by the samples shown below.

CameraEventPrinter.h

// Contains a Camera Event Handler that prints a message for each event method call.
#ifndef INCLUDED_CAMERAEVENTPRINTER_H_4683453
#define INCLUDED_CAMERAEVENTPRINTER_H_4683453
#include <iostream>
namespace Pylon
{
class CInstantCamera;
class CCameraEventPrinter : public CCameraEventHandler
{
public:
virtual void OnCameraEvent( CInstantCamera& camera, intptr_t userProvidedId, GenApi::INode* pNode)
{
std::cout << "OnCameraEvent event for device " << camera.GetDeviceInfo().GetModelName() << std::endl;
std::cout << "User provided ID: " << userProvidedId << std::endl;
std::cout << "Event data node name: " << pNode->GetName() << std::endl;
GenApi::CValuePtr ptrValue( pNode );
if ( ptrValue.IsValid() )
{
std::cout << "Event node data: " << ptrValue->ToString() << std::endl;
}
std::cout << std::endl;
}
};
}
#endif /* INCLUDED_CAMERAEVENTPRINTER_H_4683453 */

ConfigurationEventPrinter.h

// Contains a Configuration Event Handler that prints a message for each event method call.
#ifndef INCLUDED_CONFIGURATIONEVENTPRINTER_H_663006
#define INCLUDED_CONFIGURATIONEVENTPRINTER_H_663006
#include <iostream>
namespace Pylon
{
class CInstantCamera;
class CConfigurationEventPrinter : public CConfigurationEventHandler
{
public:
void OnAttach( CInstantCamera& /*camera*/)
{
std::cout << "OnAttach event" << std::endl;
}
void OnAttached( CInstantCamera& camera)
{
std::cout << "OnAttached event for device " << camera.GetDeviceInfo().GetModelName() << std::endl;
}
void OnOpen( CInstantCamera& camera)
{
std::cout << "OnOpen event for device " << camera.GetDeviceInfo().GetModelName() << std::endl;
}
void OnOpened( CInstantCamera& camera)
{
std::cout << "OnOpened event for device " << camera.GetDeviceInfo().GetModelName() << std::endl;
}
void OnGrabStart( CInstantCamera& camera)
{
std::cout << "OnGrabStart event for device " << camera.GetDeviceInfo().GetModelName() << std::endl;
}
void OnGrabStarted( CInstantCamera& camera)
{
std::cout << "OnGrabStarted event for device " << camera.GetDeviceInfo().GetModelName() << std::endl;
}
void OnGrabStop( CInstantCamera& camera)
{
std::cout << "OnGrabStop event for device " << camera.GetDeviceInfo().GetModelName() << std::endl;
}
void OnGrabStopped( CInstantCamera& camera)
{
std::cout << "OnGrabStopped event for device " << camera.GetDeviceInfo().GetModelName() << std::endl;
}
void OnClose( CInstantCamera& camera)
{
std::cout << "OnClose event for device " << camera.GetDeviceInfo().GetModelName() << std::endl;
}
void OnClosed( CInstantCamera& camera)
{
std::cout << "OnClosed event for device " << camera.GetDeviceInfo().GetModelName() << std::endl;
}
void OnDestroy( CInstantCamera& camera)
{
std::cout << "OnDestroy event for device " << camera.GetDeviceInfo().GetModelName() << std::endl;
}
void OnDestroyed( CInstantCamera& /*camera*/)
{
std::cout << "OnDestroyed event" << std::endl;
}
void OnDetach( CInstantCamera& camera)
{
std::cout << "OnDetach event for device " << camera.GetDeviceInfo().GetModelName() << std::endl;
}
void OnDetached( CInstantCamera& camera)
{
std::cout << "OnDetached event for device " << camera.GetDeviceInfo().GetModelName() << std::endl;
}
void OnGrabError( CInstantCamera& camera, const String_t errorMessage)
{
std::cout << "OnGrabError event for device " << camera.GetDeviceInfo().GetModelName() << std::endl;
std::cout << "Error Message: " << errorMessage << std::endl;
}
void OnCameraDeviceRemoved( CInstantCamera& camera)
{
std::cout << "OnCameraDeviceRemoved event for device " << camera.GetDeviceInfo().GetModelName() << std::endl;
}
};
}
#endif /* INCLUDED_CONFIGURATIONEVENTPRINTER_H_663006 */

ImageEventPrinter.h

// Contains an Image Event Handler that prints a message for each event method call.
#ifndef INCLUDED_IMAGEEVENTPRINTER_H_7884943
#define INCLUDED_IMAGEEVENTPRINTER_H_7884943
#include <iostream>
namespace Pylon
{
class CInstantCamera;
class CImageEventPrinter : public CImageEventHandler
{
public:
virtual void OnImagesSkipped( CInstantCamera& camera, size_t countOfSkippedImages)
{
std::cout << "OnImagesSkipped event for device " << camera.GetDeviceInfo().GetModelName() << std::endl;
std::cout << countOfSkippedImages << " images have been skipped." << std::endl;
std::cout << std::endl;
}
virtual void OnImageGrabbed( CInstantCamera& camera, const CGrabResultPtr& ptrGrabResult)
{
std::cout << "OnImageGrabbed event for device " << camera.GetDeviceInfo().GetModelName() << std::endl;
// Image grabbed successfully?
if (ptrGrabResult->GrabSucceeded())
{
std::cout << "SizeX: " << ptrGrabResult->GetWidth() << std::endl;
std::cout << "SizeY: " << ptrGrabResult->GetHeight() << std::endl;
const uint8_t *pImageBuffer = (uint8_t *) ptrGrabResult->GetBuffer();
std::cout << "Gray value of first pixel: " << (uint32_t) pImageBuffer[0] << std::endl;
std::cout << std::endl;
}
else
{
std::cout << "Error: " << ptrGrabResult->GetErrorCode() << " " << ptrGrabResult->GetErrorDescription() << std::endl;
}
}
};
}
#endif /* INCLUDED_IMAGEEVENTPRINTER_H_7884943 */

PixelFormatAndAoiConfiguration.h

// Contains a configuration that sets pixel data format and Image AOI.
#ifndef INCLUDED_PIXELFORMATANDAOICONFIGURATION_H_00104928
#define INCLUDED_PIXELFORMATANDAOICONFIGURATION_H_00104928
namespace Pylon
{
class CInstantCamera;
}
class CPixelFormatAndAoiConfiguration : public Pylon::CConfigurationEventHandler
{
public:
{
try
{
// Allow all the names in the namespace GenApi to be used without qualification.
using namespace GenApi;
// Get the camera control object.
INodeMap &control = camera.GetNodeMap();
// Get the parameters for setting the image area of interest (Image AOI).
const CIntegerPtr width = control.GetNode("Width");
const CIntegerPtr height = control.GetNode("Height");
const CIntegerPtr offsetX = control.GetNode("OffsetX");
const CIntegerPtr offsetY = control.GetNode("OffsetY");
// Maximize the Image AOI.
if (IsWritable(offsetX))
{
offsetX->SetValue(offsetX->GetMin());
}
if (IsWritable(offsetY))
{
offsetY->SetValue(offsetY->GetMin());
}
width->SetValue(width->GetMax());
height->SetValue(height->GetMax());
// Set the pixel data format.
CEnumerationPtr(control.GetNode("PixelFormat"))->FromString("Mono8");
}
catch (const GenericException& e)
{
throw RUNTIME_EXCEPTION( "Could not apply configuration. const GenericException caught in OnOpened method msg=%hs", e.what());
}
}
};
#endif /* INCLUDED_PIXELFORMATANDAOICONFIGURATION_H_00104928 */

SampleImageCreator.h

// Contains functions for creating sample images.
#ifndef INCLUDED_SAMPLEIMAGECREATOR_H_2792867
#define INCLUDED_SAMPLEIMAGECREATOR_H_2792867
#include <pylon/Pixel.h>
namespace SampleImageCreator
{
Pylon::CPylonImage CreateJuliaFractal( Pylon::EPixelType pixelType, uint32_t width, uint32_t height)
{
// Allow all the names in the namespace Pylon to be used without qualification.
using namespace Pylon;
// Define Constants.
static const SRGB8Pixel palette[]=
{
{0, 28, 50}, {0, 42, 75}, {0, 56, 100}, {0, 70, 125}, {0, 84, 150},
{0, 50, 0}, {0, 100, 0}, {0, 150, 0}, {0, 200, 0}, {0, 250, 0},
{50, 0, 0}, {100, 0, 0}, {150, 0, 0}, {200, 0, 0}, {250, 0, 0}
};
uint32_t numColors = sizeof( palette) / sizeof( palette[0]);
const double cX = -0.735;
const double cY = 0.11;
const double cMaxX = 1.6;
const double cMinX = -1.6;
const double cMaxY = 1;
const double cMinY = -1;
const uint32_t cMaxIterations = 50;
// Create image.
CPylonImage juliaFractal( CPylonImage::Create( PixelType_RGB8packed, width, height));
// Get the pointer to the first pixel.
SRGB8Pixel* pCurrentPixel = (SRGB8Pixel*) juliaFractal.GetBuffer();
// Compute the fractal.
for ( uint32_t pixelY = 0; pixelY < height; ++pixelY )
{
for ( uint32_t pixelX = 0; pixelX < width; ++pixelX, ++pCurrentPixel )
{
long double x = ((cMaxX-cMinX) / width) * pixelX + cMinX;
long double y = cMaxY - pixelY * ((cMaxY-cMinY) / height);
long double xd = 0;
long double yd = 0;
uint32_t i = 0;
for(; i < cMaxIterations; ++i)
{
xd = x * x - y * y + cX;
yd = 2 *x * y + cY;
x = xd;
y = yd;
if ( (x * x + y * y) > 4 )
{
break;
}
}
if ( i >= cMaxIterations)
{
pCurrentPixel = palette[0];
}
else
{
pCurrentPixel = palette[ i % numColors ];
}
}
}
// Convert the image to the target format if needed.
if ( juliaFractal.GetPixelType() != pixelType)
{
converter.OutputPixelFormat = pixelType;
converter.Convert( juliaFractal, CPylonImage( juliaFractal));
}
// Return the image.
return juliaFractal;
}
Pylon::CPylonImage CreateMandelbrotFractal( Pylon::EPixelType pixelType, uint32_t width, uint32_t height)
{
// Allow all the names in the namespace Pylon to be used without qualification.
using namespace Pylon;
// Define constants.
static const SRGB8Pixel palette[]=
{
{0, 28, 50}, {0, 42, 75}, {0, 56, 100}, {0, 70, 125}, {0, 84, 150},
{0, 50, 0}, {0, 100, 0}, {0, 150, 0}, {0, 200, 0}, {0, 250, 0},
{50, 0, 0}, {100, 0, 0}, {150, 0, 0}, {200, 0, 0}, {250, 0, 0}
};
uint32_t numColors = sizeof( palette) / sizeof( palette[0]);
const double cMaxX = 1.0;
const double cMinX = -2.0;
const double cMaxY = 1.2;
const double cMinY = -1.2;
const uint32_t cMaxIterations = 50;
// Create image.
CPylonImage mandelbrotFractal( CPylonImage::Create( PixelType_RGB8packed, width, height));
// Get the pointer to the first pixel.
SRGB8Pixel* pCurrentPixel = (SRGB8Pixel*) mandelbrotFractal.GetBuffer();
// Compute the fractal.
for ( uint32_t pixelY = 0; pixelY < height; ++pixelY )
{
for ( uint32_t pixelX = 0; pixelX < width; ++pixelX, ++pCurrentPixel )
{
long double xStart = ((cMaxX-cMinX) / width) * pixelX + cMinX;
long double yStart = cMaxY - pixelY * ((cMaxY-cMinY) / height);
long double x = xStart;
long double y = yStart;
long double xd =0;
long double yd =0;
uint32_t i = 0;
for(; i < cMaxIterations; ++i)
{
xd = x * x - y * y + xStart;
yd = 2 *x * y + yStart;
x = xd;
y = yd;
if ( (x * x + y * y) > 4 )
{
break;
}
}
if ( i >= cMaxIterations)
{
pCurrentPixel = palette[0];
}
else
{
pCurrentPixel = palette[ i % numColors ];
}
}
}
// Convert the image to the target format if needed.
if ( mandelbrotFractal.GetPixelType() != pixelType)
{
converter.OutputPixelFormat = pixelType;
converter.Convert( mandelbrotFractal, CPylonImage( mandelbrotFractal));
}
// Return the image.
return mandelbrotFractal;
}
}
#endif /* INCLUDED_SAMPLEIMAGECREATOR_H_2792867 */

DeviceRemovalHandling

// DeviceRemovalHandling.cpp
/*
Note: Before getting started, Basler recommends reading the Programmer's Guide topic
in the pylon C++ API documentation that gets installed with pylon.
If you are upgrading to a higher major version of pylon, Basler also
strongly recommends reading the Migration topic in the pylon C++ API documentation.
This sample program demonstrates how to be informed about the removal of a camera device.
It also shows how to reconnect to a removed device.
Attention:
If you build this sample and run it under a debugger using a GigE camera device, pylon will set the heartbeat
timeout to 5 minutes. This is done to allow debugging and single stepping of the code without
the camera thinking we're hung because we don't send any heartbeats.
Accordingly, it would take 5 minutes for the application to notice the disconnection of a GigE device.
To work around this, the CHeartbeatHelper class is used to control the HeartbeatTimeout.
Just before waiting for the removal, it will set the timeout to 1000 ms.
*/
// Include files to use the PYLON API.
#include "../include/ConfigurationEventPrinter.h"
// Namespace for using pylon objects.
using namespace Pylon;
// Namespace for using cout.
using namespace std;
// Simple helper class to set the HeartbeatTimeout safely.
class CHeartbeatHelper
{
public:
explicit CHeartbeatHelper(CInstantCamera& camera)
: m_pHeartbeatTimeout(NULL)
{
// m_pHeartbeatTimeout may be NULL
m_pHeartbeatTimeout = camera.GetTLNodeMap().GetNode("HeartbeatTimeout");
}
bool SetValue(int64_t NewValue)
{
// Do nothing if no heartbeat feature is available.
if (!m_pHeartbeatTimeout.IsValid())
return false;
// Apply the increment and cut off invalid values if neccessary.
int64_t correctedValue = NewValue - (NewValue % m_pHeartbeatTimeout->GetInc());
m_pHeartbeatTimeout->SetValue(correctedValue);
return true;
}
bool SetMax()
{
// Do nothing if no heartbeat feature is available.
if (!m_pHeartbeatTimeout.IsValid())
return false;
int64_t maxVal = m_pHeartbeatTimeout->GetMax();
return SetValue(maxVal);
}
protected:
GenApi::CIntegerPtr m_pHeartbeatTimeout; // Pointer to the node, will be NULL if no node exists.
};
// When using Device Specific Instant Camera classes there are specific Configuration event handler classes available which can be used, for example
// Pylon::CBaslerGigEConfigurationEventHandler or Pylon::CBasler1394ConfigurationEventHandler
//Example of a configuration event handler that handles device removal events.
class CSampleConfigurationEventHandler : public Pylon::CConfigurationEventHandler
{
public:
// This method is called from a different thread when the camera device removal has been detected.
void OnCameraDeviceRemoved( CInstantCamera& /*camera*/)
{
// Print two new lines, just for improving printed output.
cout << endl << endl;
cout << "CSampleConfigurationEventHandler::OnCameraDeviceRemoved called." << std::endl;
}
};
// Time to wait in quarters of seconds.
static const uint32_t c_loopCounterInitialValue = 20 * 4;
int main(int argc, char* argv[])
{
// The exit code of the sample application.
int exitCode = 0;
// Before using any pylon methods, the pylon runtime must be initialized.
try
{
// Declare a local counter used for waiting.
int loopCount = 0;
// Get the transport layer factory.
// Create an instant camera object with the camera device found first.
CInstantCamera camera( tlFactory.CreateFirstDevice());
// Print the camera information.
cout << "Using device " << camera.GetDeviceInfo().GetModelName() << endl;
cout << "Friendly Name: " << camera.GetDeviceInfo().GetFriendlyName() << endl;
cout << "Full Name : " << camera.GetDeviceInfo().GetFullName() << endl;
cout << "SerialNumber : " << camera.GetDeviceInfo().GetSerialNumber() << endl;
cout << endl;
// For demonstration purposes only, register another configuration event handler that handles device removal.
camera.RegisterConfiguration( new CSampleConfigurationEventHandler, RegistrationMode_Append, Cleanup_Delete);
// For demonstration purposes only, add a sample configuration event handler to print out information
// about camera use.
camera.RegisterConfiguration( new CConfigurationEventPrinter, RegistrationMode_Append, Cleanup_Delete);
// Open the camera. Camera device removal is only detected while the camera is open.
camera.Open();
// Now, try to detect that the camera has been removed:
// Ask the user to disconnect a device
loopCount = c_loopCounterInitialValue;
cout << endl << "Please disconnect the device (timeout " << loopCount / 4 << "s) " << endl;
// Before testing the callbacks, we manually set the heartbeat timeout to a short value when using GigE cameras.
// Since for debug versions the heartbeat timeout has been set to 5 minutes, it would take up to 5 minutes
// until detection of the device removal.
CHeartbeatHelper heartbeatHelper(camera);
heartbeatHelper.SetValue(1000); // 1000 ms timeout
try
{
// Get a camera parameter using generic parameter access.
GenApi::CIntegerPtr width(camera.GetNodeMap().GetNode("Width"));
// The following loop accesses the camera. It could also be a loop that is
// grabbing images. The device removal is handled in the exception handler.
while ( loopCount > 0)
{
// Print a "." every few seconds to tell the user we're waiting for the callback.
if (--loopCount % 4 == 0)
{
cout << ".";
cout.flush();
}
// Change the width value in the camera depending on the loop counter.
// Any access to the camera like setting parameters or grabbing images
// will fail throwing an exception if the camera has been disconnected.
width->SetValue( width->GetMax() - (width->GetInc() * (loopCount % 2)));
}
}
catch (const GenericException &e)
{
if ( camera.IsCameraDeviceRemoved())
{
// The camera device has been removed. This caused the exception.
cout << endl;
cout << "The camera has been removed from the PC." << endl;
cout << "The camera device removal triggered an exception:" << endl
<< e.GetDescription() << endl;
}
else
{
// An unexpected error has occurred.
// In this example it is handled by exiting the program.
throw;
}
}
if ( !camera.IsCameraDeviceRemoved())
cout << endl << "Timeout expired" << endl;
// Now try to find the detached camera after it has been attached again:
// Create a device info object for remembering the camera properties.
// Remember the camera properties that allow detecting the same camera again.
// Destroy the Pylon Device representing the detached camera device.
// It cannot be used anymore.
camera.DestroyDevice();
// Ask the user to connect the same device.
loopCount = c_loopCounterInitialValue;
cout << endl << "Please connect the same device to the PC again (timeout " << loopCount / 4 << "s) " << endl;
// Create a filter containing the CDeviceInfo object info which describes the properties of the device we are looking for.
filter.push_back( info);
for ( ; loopCount > 0; --loopCount)
{
// Print a . every few seconds to tell the user we're waiting for the camera to be attached
if ( loopCount % 4 == 0)
{
cout << ".";
cout.flush();
}
// Try to find the camera we are looking for.
if ( tlFactory.EnumerateDevices(devices, filter) > 0 )
{
// Print two new lines, just for improving printed output.
cout << endl << endl;
// The camera has been found. Create and attach it to the Instant Camera object.
camera.Attach( tlFactory.CreateDevice( devices[0]));
//Exit waiting
break;
}
}
// If the camera has been found.
if ( camera.IsPylonDeviceAttached())
{
// Print the camera information.
cout << endl;
cout << "Using device " << camera.GetDeviceInfo().GetModelName() << endl;
cout << "Friendly Name: " << camera.GetDeviceInfo().GetFriendlyName() << endl;
cout << "Full Name : " << camera.GetDeviceInfo().GetFullName() << endl;
cout << "SerialNumber : " << camera.GetDeviceInfo().GetSerialNumber() << endl;
cout << endl;
// All configuration objects and other event handler objects are still registered.
// The configuration objects will parameterize the camera device and the instant
// camera will be ready for operation again.
// Open the camera.
camera.Open();
// Now the Instant Camera object can be used as before.
}
else // Timeout
{
cout << endl << "Timeout expired." << endl;
}
}
catch (const GenericException &e)
{
// Error handling.
cerr << "An exception occurred." << endl
<< e.GetDescription() << endl;
exitCode = 1;
}
// Comment the following two lines to disable waiting on exit.
cerr << endl << "Press Enter to exit." << endl;
while( cin.get() != '\n');
// Releases all pylon resources.
return exitCode;
}

Grab

// Grab.cpp
/*
Note: Before getting started, Basler recommends reading the Programmer's Guide topic
in the pylon C++ API documentation that gets installed with pylon.
If you are upgrading to a higher major version of pylon, Basler also
strongly recommends reading the Migration topic in the pylon C++ API documentation.
This sample illustrates how to grab and process images using the CInstantCamera class.
The images are grabbed and processed asynchronously, i.e.,
while the application is processing a buffer, the acquisition of the next buffer is done
in parallel.
The CInstantCamera class uses a pool of buffers to retrieve image data
from the camera device. Once a buffer is filled and ready,
the buffer can be retrieved from the camera object for processing. The buffer
and additional image data are collected in a grab result. The grab result is
held by a smart pointer after retrieval. The buffer is automatically reused
when explicitly released or when the smart pointer object is destroyed.
*/
// Include files to use the PYLON API.
#ifdef PYLON_WIN_BUILD
# include <pylon/PylonGUI.h>
#endif
// Namespace for using pylon objects.
using namespace Pylon;
// Namespace for using cout.
using namespace std;
// Number of images to be grabbed.
static const uint32_t c_countOfImagesToGrab = 100;
int main(int argc, char* argv[])
{
// The exit code of the sample application.
int exitCode = 0;
// Before using any pylon methods, the pylon runtime must be initialized.
try
{
// Create an instant camera object with the camera device found first.
CInstantCamera camera( CTlFactory::GetInstance().CreateFirstDevice());
// Print the model name of the camera.
cout << "Using device " << camera.GetDeviceInfo().GetModelName() << endl;
// The parameter MaxNumBuffer can be used to control the count of buffers
// allocated for grabbing. The default value of this parameter is 10.
camera.MaxNumBuffer = 5;
// Start the grabbing of c_countOfImagesToGrab images.
// The camera device is parameterized with a default configuration which
// sets up free-running continuous acquisition.
camera.StartGrabbing( c_countOfImagesToGrab);
// This smart pointer will receive the grab result data.
CGrabResultPtr ptrGrabResult;
// Camera.StopGrabbing() is called automatically by the RetrieveResult() method
// when c_countOfImagesToGrab images have been retrieved.
while ( camera.IsGrabbing())
{
// Wait for an image and then retrieve it. A timeout of 5000 ms is used.
camera.RetrieveResult( 5000, ptrGrabResult, TimeoutHandling_ThrowException);
// Image grabbed successfully?
if (ptrGrabResult->GrabSucceeded())
{
// Access the image data.
cout << "SizeX: " << ptrGrabResult->GetWidth() << endl;
cout << "SizeY: " << ptrGrabResult->GetHeight() << endl;
const uint8_t *pImageBuffer = (uint8_t *) ptrGrabResult->GetBuffer();
cout << "Gray value of first pixel: " << (uint32_t) pImageBuffer[0] << endl << endl;
#ifdef PYLON_WIN_BUILD
// Display the grabbed image.
Pylon::DisplayImage(1, ptrGrabResult);
#endif
}
else
{
cout << "Error: " << ptrGrabResult->GetErrorCode() << " " << ptrGrabResult->GetErrorDescription() << endl;
}
}
}
catch (const GenericException &e)
{
// Error handling.
cerr << "An exception occurred." << endl
<< e.GetDescription() << endl;
exitCode = 1;
}
// Comment the following two lines to disable waiting on exit.
cerr << endl << "Press Enter to exit." << endl;
while( cin.get() != '\n');
// Releases all pylon resources.
return exitCode;
}

Grab_CameraEvents

// Grab_CameraEvents.cpp
/*
Note: Before getting started, Basler recommends reading the Programmer's Guide topic
in the pylon C++ API documentation that gets installed with pylon.
If you are upgrading to a higher major version of pylon, Basler also
strongly recommends reading the Migration topic in the pylon C++ API documentation.
Basler GigE Vision and IEEE 1394 cameras can send event messages. For example, when a sensor
exposure has finished, the camera can send an Exposure End event to the PC. The event
can be received by the PC before the image data for the finished exposure has been completely
transferred. This sample illustrates how to be notified when camera event message data
is received.
The event messages are automatically retrieved and processed by the InstantCamera classes.
The information carried by event messages is exposed as parameter nodes in the camera node map
and can be accessed like "normal" camera parameters. These nodes are updated
when a camera event is received. You can register camera event handler objects that are
triggered when event data has been received.
These mechanisms are demonstrated for the Exposure End and the Event Overrun events.
The Exposure End event carries the following information:
* ExposureEndEventFrameID: Indicates the number of the image frame that has been exposed.
* ExposureEndEventTimestamp: Indicates the moment when the event has been generated.
* ExposureEndEventStreamChannelIndex: Indicates the number of the image data stream used to
transfer the exposed frame.
The Event Overrun event is sent by the camera as a warning that events are being dropped. The
notification contains no specific information about how many or which events have been dropped.
Events may be dropped if events are generated at a very high frequency and if there isn't enough
bandwidth available to send the events.
It is shown in this sample how to register event handlers indicating the arrival of events
sent by the camera. For demonstration purposes, several different handlers are registered
for the same event.
*/
// Include files to use the PYLON API.
// Include files used by samples.
#include "../include/ConfigurationEventPrinter.h"
#include "../include/CameraEventPrinter.h"
// Namespace for using pylon objects.
using namespace Pylon;
#if defined( USE_1394 )
// Settings to use Basler IEEE 1394 cameras.
#include <pylon/1394/Basler1394InstantCamera.h>
typedef Pylon::CBasler1394InstantCamera Camera_t;
typedef CBasler1394CameraEventHandler CameraEventHandler_t; // Or use Camera_t::CameraEventHandler_t
using namespace Basler_IIDC1394CameraParams;
#elif defined ( USE_GIGE )
// Settings to use Basler GigE cameras.
typedef CBaslerGigECameraEventHandler CameraEventHandler_t; // Or use Camera_t::CameraEventHandler_t
using namespace Basler_GigECameraParams;
#else
#error camera type is not specified. For example, define USE_GIGE for using GigE cameras
#endif
// Namespace for using cout.
using namespace std;
//Enumeration used for distinguishing different events.
enum MyEvents
{
eMyExposureEndEvent = 100,
eMyEventOverrunEvent = 200
};
// Number of images to be grabbed.
static const uint32_t c_countOfImagesToGrab = 5;
// Example handler for camera events.
class CSampleCameraEventHandler : public CameraEventHandler_t
{
public:
// Only very short processing tasks should be performed by this method. Otherwise, the event notification will block the
// processing of images.
virtual void OnCameraEvent( Camera_t& camera, intptr_t userProvidedId, GenApi::INode* /* pNode */)
{
std::cout << std::endl;
switch ( userProvidedId )
{
case eMyExposureEndEvent: // Exposure End event
cout << "Exposure End event. FrameID: " << camera.ExposureEndEventFrameID.GetValue() << " Timestamp: " << camera.ExposureEndEventTimestamp.GetValue() << std::endl << std::endl;
break;
case eMyEventOverrunEvent: // Event Overrun event
cout << "Event Overrun event. FrameID: " << camera.EventOverrunEventFrameID.GetValue() << " Timestamp: " << camera.EventOverrunEventTimestamp.GetValue() << std::endl << std::endl;
break;
}
}
};
//Example of an image event handler.
class CSampleImageEventHandler : public CImageEventHandler
{
public:
virtual void OnImageGrabbed( CInstantCamera& camera, const CGrabResultPtr& ptrGrabResult)
{
cout << "CSampleImageEventHandler::OnImageGrabbed called." << std::endl;
cout << std::endl;
cout << std::endl;
}
};
int main(int argc, char* argv[])
{
// The exit code of the sample application
int exitCode = 0;
// Before using any pylon methods, the pylon runtime must be initialized.
// Create an example event handler. In the present case, we use one single camera handler for handling multiple camera events.
// The handler prints a message for each received event.
CSampleCameraEventHandler* pHandler1 = new CSampleCameraEventHandler;
// Create another more generic event handler printing out information about the node for which an event callback
// is fired.
CCameraEventPrinter* pHandler2 = new CCameraEventPrinter;
try
{
// Only look for cameras supported by Camera_t
info.SetDeviceClass( Camera_t::DeviceClass());
// Create an instant camera object with the first found camera device matching the specified device class.
Camera_t camera( CTlFactory::GetInstance().CreateFirstDevice( info));
// Register the standard configuration event handler for enabling software triggering.
// The software trigger configuration handler replaces the default configuration
// as all currently registered configuration handlers are removed by setting the registration mode to RegistrationMode_ReplaceAll.
// For demonstration purposes only, add sample configuration event handlers to print out information
// about camera use and image grabbing.
camera.RegisterConfiguration( new CConfigurationEventPrinter, RegistrationMode_Append, Cleanup_Delete); // Camera use.
// For demonstration purposes only, register another image event handler.
camera.RegisterImageEventHandler( new CSampleImageEventHandler, RegistrationMode_Append, Cleanup_Delete);
// Camera event processing must be activated first, the default is off.
camera.GrabCameraEvents = true;
// Register an event handler for the Exposure End event. For each event type, there is a "data" node
// representing the event. The actual data that is carried by the event is held by child nodes of the
// data node. In the case of the Exposure End event, the child nodes are ExposureEndEventFrameID, ExposureEndEventTimestamp,
// and ExposureEndEventStreamChannelIndex. The CSampleCameraEventHandler demonstrates how to access the child nodes within
// a callback that is fired for the parent data node.
camera.RegisterCameraEventHandler( pHandler1, "ExposureEndEventData", eMyExposureEndEvent, RegistrationMode_ReplaceAll, Cleanup_None);
// Register the same handler for a second event. The user-provided ID can be used
// to distinguish between the events.
camera.RegisterCameraEventHandler( pHandler1, "EventOverrunEventData", eMyEventOverrunEvent, RegistrationMode_Append, Cleanup_None);
// The handler is registered for both, the ExposureEndEventFrameID and the ExposureEndEventTimestamp
// node. These nodes represent the data carried by the Exposure End event.
// For each Exposure End event received, the handler will be called twice, once for the frame ID, and
// once for the time stamp.
camera.RegisterCameraEventHandler( pHandler2, "ExposureEndEventFrameID", eMyExposureEndEvent, RegistrationMode_Append, Cleanup_None);
camera.RegisterCameraEventHandler( pHandler2, "ExposureEndEventTimestamp", eMyExposureEndEvent, RegistrationMode_Append, Cleanup_None);
// Open the camera for setting parameters.
camera.Open();
// Check if the device supports events.
if ( !GenApi::IsAvailable( camera.EventSelector))
{
throw RUNTIME_EXCEPTION( "The device doesn't support events.");
}
// Enable sending of Exposure End events.
// Select the event to receive.
camera.EventSelector.SetValue(EventSelector_ExposureEnd);
// Enable it.
camera.EventNotification.SetValue(EventNotification_GenICamEvent);
// Enable sending of Event Overrun events.
camera.EventSelector.SetValue(EventSelector_EventOverrun);
camera.EventNotification.SetValue(EventNotification_GenICamEvent);
// Start the grabbing of c_countOfImagesToGrab images.
camera.StartGrabbing( c_countOfImagesToGrab);
// This smart pointer will receive the grab result data.
CGrabResultPtr ptrGrabResult;
// Camera.StopGrabbing() is called automatically by the RetrieveResult() method
// when c_countOfImagesToGrab images have been retrieved.
while ( camera.IsGrabbing())
{
// Execute the software trigger. Wait up to 1000 ms for the camera to be ready for trigger.
if ( camera.WaitForFrameTriggerReady( 1000, TimeoutHandling_ThrowException))
{
camera.ExecuteSoftwareTrigger();
}
// Retrieve grab results and notify the camera event and image event handlers.
camera.RetrieveResult( 5000, ptrGrabResult, TimeoutHandling_ThrowException);
// Nothing to do here with the grab result, the grab results are handled by the registered event handler.
}
// Disable sending Exposure End events.
camera.EventSelector.SetValue(EventSelector_ExposureEnd);
camera.EventNotification.SetValue(EventNotification_Off);
// Disable sending Event Overrun events.
camera.EventSelector.SetValue(EventSelector_EventOverrun);
camera.EventNotification.SetValue(EventNotification_Off);
}
catch (const GenericException &e)
{
// Error handling.
cerr << "An exception occurred." << endl
<< e.GetDescription() << endl;
exitCode = 1;
}
// Delete the event handlers.
delete pHandler1;
delete pHandler2;
// Comment the following two lines to disable waiting on exit.
cerr << endl << "Press Enter to exit." << endl;
while( cin.get() != '\n');
// Releases all pylon resources.
return exitCode;
}

Grab_CameraEvents_Usb

// Grab_CameraEvents_Usb.cpp
/*
Note: Before getting started, Basler recommends reading the Programmer's Guide topic
in the pylon C++ API documentation that gets installed with pylon.
If you are upgrading to a higher major version of pylon, Basler also
strongly recommends reading the Migration topic in the pylon C++ API documentation.
Basler USB3 Vision cameras can send event messages. For example, when a sensor
exposure has finished, the camera can send an Exposure End event to the PC. The event
can be received by the PC before the image data for the finished exposure has been completely
transferred. This sample illustrates how to be notified when camera event message data
is received.
The event messages are automatically retrieved and processed by the InstantCamera classes.
The information carried by event messages is exposed as parameter nodes in the camera node map
and can be accessed like "normal" camera parameters. These nodes are updated
when a camera event is received. You can register camera event handler objects that are
triggered when event data has been received.
These mechanisms are demonstrated for the Exposure End event.
The Exposure End event carries the following information:
* EventExposureEndFrameID: Indicates the number of the image frame that has been exposed.
* EventExposureEndTimestamp: Indicates the moment when the event has been generated.
transfer the exposed frame.
It is shown in this sample how to register event handlers indicating the arrival of events
sent by the camera. For demonstration purposes, several different handlers are registered
for the same event.
*/
// Include files to use the PYLON API.
// Include files used by samples.
#include "../include/ConfigurationEventPrinter.h"
#include "../include/CameraEventPrinter.h"
// Namespace for using pylon objects.
using namespace Pylon;
#if defined( USE_USB )
// Settings for using Basler USB cameras.
typedef CBaslerUsbCameraEventHandler CameraEventHandler_t; // Or use Camera_t::CameraEventHandler_t
using namespace Basler_UsbCameraParams;
#else
#error Camera type is not specified. For example, define USE_USB for using USB cameras.
#endif
// Namespace for using cout.
using namespace std;
//Enumeration used for distinguishing different events.
enum MyEvents
{
eMyExposureEndEvent = 100
// More events can be added here.
};
// Number of images to be grabbed.
static const uint32_t c_countOfImagesToGrab = 5;
// Example handler for camera events.
class CSampleCameraEventHandler : public CameraEventHandler_t
{
public:
// Only very short processing tasks should be performed by this method. Otherwise, the event notification will block the
// processing of images.
virtual void OnCameraEvent( Camera_t& camera, intptr_t userProvidedId, GenApi::INode* /* pNode */)
{
std::cout << std::endl;
switch ( userProvidedId )
{
case eMyExposureEndEvent: // Exposure End event
cout << "Exposure End event. FrameID: " << camera.EventExposureEndFrameID.GetValue() << " Timestamp: " << camera.EventExposureEndTimestamp.GetValue() << std::endl << std::endl;
break;
// More events can be added here.
}
}
};
//Example of an image event handler.
class CSampleImageEventHandler : public CImageEventHandler
{
public:
virtual void OnImageGrabbed( CInstantCamera& camera, const CGrabResultPtr& ptrGrabResult)
{
cout << "CSampleImageEventHandler::OnImageGrabbed called." << std::endl;
cout << std::endl;
cout << std::endl;
}
};
int main(int argc, char* argv[])
{
// The exit code of the sample application
int exitCode = 0;
// Before using any pylon methods, the pylon runtime must be initialized.
// Create an example event handler. In the present case, we use one single camera handler for handling multiple camera events.
// The handler prints a message for each received event.
CSampleCameraEventHandler* pHandler1 = new CSampleCameraEventHandler;
// Create another more generic event handler printing out information about the node for which an event callback
// is fired.
CCameraEventPrinter* pHandler2 = new CCameraEventPrinter;
try
{
// Only look for cameras supported by Camera_t
info.SetDeviceClass( Camera_t::DeviceClass());
// Create an instant camera object with the first found camera device matching the specified device class.
Camera_t camera( CTlFactory::GetInstance().CreateFirstDevice( info));
// Register the standard configuration event handler for enabling software triggering.
// The software trigger configuration handler replaces the default configuration
// as all currently registered configuration handlers are removed by setting the registration mode to RegistrationMode_ReplaceAll.
// For demonstration purposes only, add sample configuration event handlers to print out information
// about camera use and image grabbing.
camera.RegisterConfiguration( new CConfigurationEventPrinter, RegistrationMode_Append, Cleanup_Delete); // Camera use.
// For demonstration purposes only, register another image event handler.
camera.RegisterImageEventHandler( new CSampleImageEventHandler, RegistrationMode_Append, Cleanup_Delete);
// Camera event processing must be activated first, the default is off.
camera.GrabCameraEvents = true;
// Register an event handler for the Exposure End event. For each event type, there is a "data" node
// representing the event. The actual data that is carried by the event is held by child nodes of the
// data node. In the case of the Exposure End event, the child nodes are EventExposureEndFrameID and EventExposureEndTimestamp.
// The CSampleCameraEventHandler demonstrates how to access the child nodes within
// a callback that is fired for the parent data node.
// The user-provided ID eMyExposureEndEvent can be used to distinguish between multiple events (not shown).
camera.RegisterCameraEventHandler( pHandler1, "EventExposureEndData", eMyExposureEndEvent, RegistrationMode_ReplaceAll, Cleanup_None);
// The handler is registered for both, the EventExposureEndFrameID and the EventExposureEndTimestamp
// node. These nodes represent the data carried by the Exposure End event.
// For each Exposure End event received, the handler will be called twice, once for the frame ID, and
// once for the time stamp.
camera.RegisterCameraEventHandler( pHandler2, "EventExposureEndFrameID", eMyExposureEndEvent, RegistrationMode_Append, Cleanup_None);
camera.RegisterCameraEventHandler( pHandler2, "EventExposureEndTimestamp", eMyExposureEndEvent, RegistrationMode_Append, Cleanup_None);
// Open the camera for setting parameters.
camera.Open();
// Check if the device supports events.
if ( !GenApi::IsAvailable( camera.EventSelector))
{
throw RUNTIME_EXCEPTION( "The device doesn't support events.");
}
// Enable sending of Exposure End events.
// Select the event to receive.
camera.EventSelector.SetValue(EventSelector_ExposureEnd);
// Enable it.
camera.EventNotification.SetValue(EventNotification_On);
// Start the grabbing of c_countOfImagesToGrab images.
camera.StartGrabbing( c_countOfImagesToGrab);
// This smart pointer will receive the grab result data.
CGrabResultPtr ptrGrabResult;
// Camera.StopGrabbing() is called automatically by the RetrieveResult() method
// when c_countOfImagesToGrab images have been retrieved.
while ( camera.IsGrabbing())
{
// Execute the software trigger. Wait up to 1000 ms for the camera to be ready for trigger.
if ( camera.WaitForFrameTriggerReady( 1000, TimeoutHandling_ThrowException))
{
camera.ExecuteSoftwareTrigger();
}
// Retrieve grab results and notify the camera event and image event handlers.
camera.RetrieveResult( 5000, ptrGrabResult, TimeoutHandling_ThrowException);
// Nothing to do here with the grab result, the grab results are handled by the registered event handler.
}
// Disable sending Exposure End events.
camera.EventSelector.SetValue(EventSelector_ExposureEnd);
camera.EventNotification.SetValue(EventNotification_Off);
}
catch (const GenericException &e)
{
// Error handling.
cerr << "An exception occurred." << endl
<< e.GetDescription() << endl;
exitCode = 1;
}
// Delete the event handlers.
delete pHandler1;
delete pHandler2;
// Comment the following two lines to disable waiting on exit.
cerr << endl << "Press Enter to exit." << endl;
while( cin.get() != '\n');
// Releases all pylon resources.
return exitCode;
}

Grab_ChunkImage

// Grab_ChunkImage.cpp
/*
Note: Before getting started, Basler recommends reading the Programmer's Guide topic
in the pylon C++ API documentation that gets installed with pylon.
If you are upgrading to a higher major version of pylon, Basler also
strongly recommends reading the Migration topic in the pylon C++ API documentation.
Basler cameras provide chunk features: The cameras can generate certain information about each image,
e.g. frame counters, time stamps, and CRC checksums, that is appended to the image data as data "chunks".
This sample illustrates how to enable chunk features, how to grab images, and how to process the appended
data. When the camera is in chunk mode, it transfers data blocks that are partitioned into chunks. The first
chunk is always the image data. When chunk features are enabled, the image data chunk is followed by chunks
containing the information generated by the chunk features.
*/
// Include files to use the PYLON API
#ifdef PYLON_WIN_BUILD
# include <pylon/PylonGUI.h>
#endif
// Namespace for using pylon objects.
using namespace Pylon;
#if defined( USE_1394 )
// Settings for using Basler IEEE 1394 cameras.
#include <pylon/1394/Basler1394InstantCamera.h>
typedef Pylon::CBasler1394InstantCamera Camera_t;
typedef Pylon::CBasler1394ImageEventHandler ImageEventHandler_t; // Or use Camera_t::ImageEventHandler_t
typedef Pylon::CBasler1394GrabResultPtr GrabResultPtr_t; // Or use Camera_t::GrabResultPtr_t
using namespace Basler_IIDC1394CameraParams;
#elif defined ( USE_GIGE )
// Settings for using Basler GigE cameras.
typedef Pylon::CBaslerGigEImageEventHandler ImageEventHandler_t; // Or use Camera_t::ImageEventHandler_t
typedef Pylon::CBaslerGigEGrabResultPtr GrabResultPtr_t; // Or use Camera_t::GrabResultPtr_t
using namespace Basler_GigECameraParams;
#elif defined( USE_USB )
// Settings for using Basler USB cameras.
typedef Pylon::CBaslerUsbImageEventHandler ImageEventHandler_t; // Or use Camera_t::ImageEventHandler_t
typedef Pylon::CBaslerUsbGrabResultPtr GrabResultPtr_t; // Or use Camera_t::GrabResultPtr_t
using namespace Basler_UsbCameraParams;
#else
#error Camera type is not specified. For example, define USE_GIGE for using GigE cameras.
#endif
// Namespace for using cout.
using namespace std;
// Example of a device specific handler for image events.
class CSampleImageEventHandler : public ImageEventHandler_t
{
public:
virtual void OnImageGrabbed( Camera_t& camera, const GrabResultPtr_t& ptrGrabResult)
{
// The chunk data is attached to the grab result and can be accessed anywhere.
// Generic parameter access:
// This shows the access via the chunk data node map. This method is available for all grab result types.
GenApi::CIntegerPtr chunkTimestamp( ptrGrabResult->GetChunkDataNodeMap().GetNode( "ChunkTimestamp"));
// Access the chunk data attached to the result.
// Before accessing the chunk data, you should check to see
// if the chunk is readable. When it is readable, the buffer
// contains the requested chunk data.
if ( IsReadable( chunkTimestamp))
cout << "OnImageGrabbed: TimeStamp (Result) accessed via node map: " << chunkTimestamp->GetValue() << endl;
// Native parameter access:
// When using the device specific grab results the chunk data can be accessed
// via the members of the grab result data.
if ( IsReadable(ptrGrabResult->ChunkTimestamp))
cout << "OnImageGrabbed: TimeStamp (Result) accessed via result member: " << ptrGrabResult->ChunkTimestamp.GetValue() << endl;
}
};
// Number of images to be grabbed.
static const uint32_t c_countOfImagesToGrab = 5;
int main(int argc, char* argv[])
{
// The exit code of the sample application.
int exitCode = 0;
// Before using any pylon methods, the pylon runtime must be initialized.
try
{
// Only look for cameras supported by Camera_t
info.SetDeviceClass( Camera_t::DeviceClass());
// Create an instant camera object with the first found camera device that matches the specified device class.
Camera_t camera( CTlFactory::GetInstance().CreateFirstDevice( info));
// Print the model name of the camera.
cout << "Using device " << camera.GetDeviceInfo().GetModelName() << endl;
// Register an image event handler that accesses the chunk data.
camera.RegisterImageEventHandler( new CSampleImageEventHandler, RegistrationMode_Append, Cleanup_Delete);
// Open the camera.
camera.Open();
// A GenICam node map is required for accessing chunk data. That's why a small node map is required for each grab result.
// Creating a lot of node maps can be time consuming.
// The node maps are usually created dynamically when StartGrabbing() is called.
// To avoid a delay caused by node map creation in StartGrabbing() you have the option to create
// a static pool of node maps once before grabbing.
//camera.StaticChunkNodeMapPoolSize = camera.MaxNumBuffer.GetValue();
// Enable chunks in general.
if (GenApi::IsWritable(camera.ChunkModeActive))
{
camera.ChunkModeActive.SetValue(true);
}
else
{
throw RUNTIME_EXCEPTION( "The camera doesn't support chunk features");
}
// Enable time stamp chunks.
camera.ChunkSelector.SetValue(ChunkSelector_Timestamp);
camera.ChunkEnable.SetValue(true);
#ifndef USE_USB // USB camera devices provide generic counters. An explicit FrameCounter value is not provided by USB camera devices.
// Enable frame counter chunks.
camera.ChunkSelector.SetValue(ChunkSelector_Framecounter);
camera.ChunkEnable.SetValue(true);
#endif
// Enable CRC checksum chunks.
camera.ChunkSelector.SetValue(ChunkSelector_PayloadCRC16);
camera.ChunkEnable.SetValue(true);
// Start the grabbing of c_countOfImagesToGrab images.
// The camera device is parameterized with a default configuration which
// sets up free-running continuous acquisition.
camera.StartGrabbing( c_countOfImagesToGrab);
// This smart pointer will receive the grab result data.
GrabResultPtr_t ptrGrabResult;
// Camera.StopGrabbing() is called automatically by the RetrieveResult() method
// when c_countOfImagesToGrab images have been retrieved.
while( camera.IsGrabbing())
{
// Wait for an image and then retrieve it. A timeout of 5000 ms is used.
// RetrieveResult calls the image event handler's OnImageGrabbed method.
camera.RetrieveResult( 5000, ptrGrabResult, TimeoutHandling_ThrowException);
#ifdef PYLON_WIN_BUILD
// Display the image
Pylon::DisplayImage(1, ptrGrabResult);
#endif
cout << "GrabSucceeded: " << ptrGrabResult->GrabSucceeded() << endl;
// The result data is automatically filled with received chunk data.
// (Note: This is not the case when using the low-level API)
cout << "SizeX: " << ptrGrabResult->GetWidth() << endl;
cout << "SizeY: " << ptrGrabResult->GetHeight() << endl;
const uint8_t *pImageBuffer = (uint8_t *) ptrGrabResult->GetBuffer();
cout << "Gray value of first pixel: " << (uint32_t) pImageBuffer[0] << endl;
// Check to see if a buffer containing chunk data has been received.
if (PayloadType_ChunkData != ptrGrabResult->GetPayloadType())
{
throw RUNTIME_EXCEPTION( "Unexpected payload type received.");
}
// Since we have activated the CRC Checksum feature, we can check
// the integrity of the buffer first.
// Note: Enabling the CRC Checksum feature is not a prerequisite for using
// chunks. Chunks can also be handled when the CRC Checksum feature is deactivated.
if (ptrGrabResult->HasCRC() && ptrGrabResult->CheckCRC() == false)
{
throw RUNTIME_EXCEPTION( "Image was damaged!");
}
// Access the chunk data attached to the result.
// Before accessing the chunk data, you should check to see
// if the chunk is readable. When it is readable, the buffer
// contains the requested chunk data.
if (IsReadable(ptrGrabResult->ChunkTimestamp))
cout << "TimeStamp (Result): " << ptrGrabResult->ChunkTimestamp.GetValue() << endl;
#ifndef USE_USB // USB camera devices provide generic counters. An explicit FrameCounter value is not provided by USB camera devices.
if (IsReadable(ptrGrabResult->ChunkFramecounter))
cout << "FrameCounter (Result): " << ptrGrabResult->ChunkFramecounter.GetValue() << endl;
#endif
cout << endl;
}
// Disable chunk mode.
camera.ChunkModeActive.SetValue(false);
}
catch (const GenericException &e)
{
// Error handling.
cerr << "An exception occurred." << endl
<< e.GetDescription() << endl;
exitCode = 1;
}
// Comment the following two lines to disable waiting on exit.
cerr << endl << "Press Enter to exit." << endl;
while( cin.get() != '\n');
// Releases all pylon resources.
return exitCode;
}

Grab_MultiCast

// Grab_MultiCast.cpp
/*
Note: Before getting started, Basler recommends reading the Programmer's Guide topic
in the pylon C++ API documentation that gets installed with pylon.
If you are upgrading to a higher major version of pylon, Basler also
strongly recommends reading the Migration topic in the pylon C++ API documentation.
This sample demonstrates how to open a camera in multicast mode
and how to receive a multicast stream.
Two instances of this application must be started simultaneously on different computers.
The first application started on PC A acts as the controlling application and has full access to the GigE camera.
The second instance started on PC B opens the camera in monitor mode.
This instance is not able to control the camera but can receive multicast streams.
To get the sample running, start this application first on PC A in control mode.
After PC A has begun to receive frames, start the second instance of this
application on PC B in monitor mode.
*/
// Include files to use the PYLON API.
#ifdef PYLON_WIN_BUILD
# include <pylon/PylonGUI.h>
#endif
// Include files used by samples.
#include "../include/ConfigurationEventPrinter.h"
#include "../include/ImageEventPrinter.h"
// Include file for _kbhit
#if defined(PYLON_WIN_BUILD)
# include <conio.h>
#elif defined(PYLON_UNIX_BUILD)
# include <stdio.h>
# include <termios.h>
# include <unistd.h>
# include <fcntl.h>
#endif
// Settings to use Basler GigE cameras.
using namespace Basler_GigECameraParams;
using namespace Basler_GigEStreamParams;
// Namespace for using pylon objects.
using namespace Pylon;
#ifndef USE_GIGE
#error This example is usable for GigE cameras only.
#endif
// Namespace for using cout.
using namespace std;
// Number of images to be grabbed.
static const uint32_t c_countOfImagesToGrab = 100;
bool KeyPressed(void)
{
#if defined(PYLON_WIN_BUILD)
return _kbhit() != 0;
#elif defined(PYLON_UNIX_BUILD)
struct termios savedTermios;
int savedFL;
struct termios termios;
int ch;
tcgetattr(STDIN_FILENO, &savedTermios);
savedFL = fcntl(STDIN_FILENO, F_GETFL, 0);
termios = savedTermios;
termios.c_lflag &= ~(ICANON | ECHO);
tcsetattr(STDIN_FILENO, TCSANOW, &termios);
fcntl(STDIN_FILENO, F_SETFL, savedFL | O_NONBLOCK);
ch = getchar();
fcntl(STDIN_FILENO, F_SETFL, savedFL);
tcsetattr(STDIN_FILENO, TCSANOW, &savedTermios);
if (ch != EOF)
{
ungetc(ch, stdin);
}
return ch != EOF;
#endif
}
int main(int argc, char* argv[])
{
// The exit code of the sample application.
int exitCode = 0;
// Query the user for the mode to use.
bool monitorMode = false;
{
char key;
// Ask the user to launch the multicast controlling application or the multicast monitoring application.
cout << "Start multicast sample in (c)ontrol or in (m)onitor mode? (c/m) ";
do
cin.get(key);
while ( (key != 'c') && (key != 'm') && (key != 'C') && (key != 'M'));
monitorMode = (key == 'm') || (key == 'M');
}
// Flush input buffer (stdin).
while(KeyPressed())
{ getchar(); }
// Before using any pylon methods, the pylon runtime must be initialized.
try
{
// Only look for GigE cameras.
// Create an instant camera object for the GigE camera found first.
CBaslerGigEInstantCamera camera( CTlFactory::GetInstance().CreateFirstDevice( info));
// The default configuration must be removed when monitor mode is selected
// because the monitoring application is not allowed to modify any parameter settings.
if ( monitorMode)
{
camera.RegisterConfiguration( (CConfigurationEventHandler*) NULL, RegistrationMode_ReplaceAll, Cleanup_None);
}
// For demonstration purposes only, add sample configuration event handlers to print out information
// about camera use and image grabbing.
camera.RegisterConfiguration( new CConfigurationEventPrinter, RegistrationMode_Append, Cleanup_Delete); // Camera use.
camera.RegisterImageEventHandler( new CImageEventPrinter, RegistrationMode_Append, Cleanup_Delete); // Image grabbing.
// Print the model name of the camera.
cout << "Using device " << camera.GetDeviceInfo().GetModelName() << endl;
// Monitor mode selected.
if ( monitorMode)
{
// Set MonitorModeActive to true to act as monitor
camera.MonitorModeActive = true;
// Open the camera.
camera.Open();
// Select transmission type. If the camera is already controlled by another application
// and configured for multicast, the active camera configuration can be used
// (IP Address and Port will be set automatically).
camera.GetStreamGrabberParams().TransmissionType = TransmissionType_UseCameraConfig;
// Alternatively, the stream grabber could be explicitly set to "multicast"...
// In this case, the IP Address and the IP port must also be set.
//
//camera.GetStreamGrabberParams().TransmissionType = TransmissionType_Multicast;
//camera.GetStreamGrabberParams().DestinationAddr = "239.0.0.1";
//camera.GetStreamGrabberParams().DestinationPort = 49152;
if (camera.GetStreamGrabberParams().DestinationAddr.GetValue() != "0.0.0.0" &&
camera.GetStreamGrabberParams().DestinationPort.GetValue() != 0)
{
camera.StartGrabbing( c_countOfImagesToGrab);
} else
{
cerr << endl << "Failed to open stream grabber (monitor mode): The acquisition is not yet started by the controlling application." << endl;
cerr << endl << "Start the controlling application before starting the monitor application" << endl;
}
}
// Controlling mode selected.
else
{
// Open the camera.
camera.Open();
// Set transmission type to "multicast"...
// In this case, the IP Address and the IP port must also be set.
camera.GetStreamGrabberParams().TransmissionType = TransmissionType_Multicast;
// camera.GetStreamGrabberParams().DestinationAddr = "239.0.0.1"; // These are default values.
// camera.GetStreamGrabberParams().DestinationPort = 49152;
// Maximize the image area of interest (Image AOI).
if (IsWritable(camera.OffsetX))
{
camera.OffsetX.SetValue(camera.OffsetX.GetMin());
}
if (IsWritable(camera.OffsetY))
{
camera.OffsetY.SetValue(camera.OffsetY.GetMin());
}
camera.Width.SetValue(camera.Width.GetMax());
camera.Height.SetValue(camera.Height.GetMax());
// Set the pixel data format.
camera.PixelFormat.SetValue(PixelFormat_Mono8);
camera.StartGrabbing();
}
// This smart pointer will receive the grab result data.
CGrabResultPtr ptrGrabResult;
// Camera.StopGrabbing() is called automatically by the RetrieveResult() method
// when c_countOfImagesToGrab images have been retrieved in monitor mode
// or when a key is pressed and the camera object is destroyed.
while( !KeyPressed() && camera.IsGrabbing())
{
// Wait for an image and then retrieve it. A timeout of 5000 ms is used.
camera.RetrieveResult( 5000, ptrGrabResult, TimeoutHandling_ThrowException);
#ifdef PYLON_WIN_BUILD
// Display the image
Pylon::DisplayImage(1, ptrGrabResult);
#endif
// The grab result could now be processed here.
}
}
catch (const GenericException &e)
{
// Error handling
cerr << "An exception occurred." << endl
<< e.GetDescription() << endl;
exitCode = 1;
}
// Comment the following three lines to disable wait on exit.
cin.ignore(cin.rdbuf()->in_avail()); //Remove left over characters from input buffer.
cerr << endl << "Press Enter to exit." << endl;
while( cin.get() != '\n');
// Releases all pylon resources.
return exitCode;
}

Grab_MultipleCameras

// Grab_MultipleCameras.cpp
/*
Note: Before getting started, Basler recommends reading the Programmer's Guide topic
in the pylon C++ API documentation that gets installed with pylon.
If you are upgrading to a higher major version of pylon, Basler also
strongly recommends reading the Migration topic in the pylon C++ API documentation.
This sample illustrates how to grab and process images from multiple cameras
using the CInstantCameraArray class. The CInstantCameraArray class represents
an array of instant camera objects. It provides almost the same interface
as the instant camera for grabbing.
The main purpose of the CInstantCameraArray is to simplify waiting for images and
camera events of multiple cameras in one thread. This is done by providing a single
RetrieveResult method for all cameras in the array.
Alternatively, the grabbing can be started using the internal grab loop threads
of all cameras in the CInstantCameraArray. The grabbed images can then be processed by one or more
image event handlers. Please note that this is not shown in this example.
*/
// Include files to use the PYLON API.
#ifdef PYLON_WIN_BUILD
# include <pylon/PylonGUI.h>
#endif
// Namespace for using pylon objects.
using namespace Pylon;
// Namespace for using cout.
using namespace std;
// Number of images to be grabbed.
static const uint32_t c_countOfImagesToGrab = 10;
// Limits the amount of cameras used for grabbing.
// It is important to manage the available bandwidth when grabbing with multiple cameras.
// This applies, for instance, if two GigE cameras are connected to the same network adapter via a switch.
// To manage the bandwidth, the GevSCPD interpacket delay parameter and the GevSCFTD transmission delay
// parameter can be set for each GigE camera device.
// The "Controlling Packet Transmission Timing with the Interpacket and Frame Transmission Delays on Basler GigE Vision Cameras"
// Application Notes (AW000649xx000)
// provide more information about this topic.
// The bandwidth used by a FireWire camera device can be limited by adjusting the packet size.
static const size_t c_maxCamerasToUse = 2;
int main(int argc, char* argv[])
{
// The exit code of the sample application.
int exitCode = 0;
// Before using any pylon methods, the pylon runtime must be initialized.
try
{
// Get the transport layer factory.
// Get all attached devices and exit application if no device is found.
if ( tlFactory.EnumerateDevices(devices) == 0 )
{
throw RUNTIME_EXCEPTION( "No camera present.");
}
// Create an array of instant cameras for the found devices and avoid exceeding a maximum number of devices.
CInstantCameraArray cameras( min( devices.size(), c_maxCamerasToUse));
// Create and attach all Pylon Devices.
for ( size_t i = 0; i < cameras.GetSize(); ++i)
{
cameras[ i ].Attach( tlFactory.CreateDevice( devices[ i ]));
// Print the model name of the camera.
cout << "Using device " << cameras[ i ].GetDeviceInfo().GetModelName() << endl;
}
// Starts grabbing for all cameras starting with index 0. The grabbing
// is started for one camera after the other. That's why the images of all
// cameras are not taken at the same time.
// However, a hardware trigger setup can be used to cause all cameras to grab images synchronously.
// According to their default configuration, the cameras are
// set up for free-running continuous acquisition.
cameras.StartGrabbing();
// This smart pointer will receive the grab result data.
CGrabResultPtr ptrGrabResult;
// Grab c_countOfImagesToGrab from the cameras.
for( uint32_t i = 0; i < c_countOfImagesToGrab && cameras.IsGrabbing(); ++i)
{
cameras.RetrieveResult( 5000, ptrGrabResult, TimeoutHandling_ThrowException);
// When the cameras in the array are created the camera context value
// is set to the index of the camera in the array.
// The camera context is a user settable value.
// This value is attached to each grab result and can be used
// to determine the camera that produced the grab result.
intptr_t cameraContextValue = ptrGrabResult->GetCameraContext();
#ifdef PYLON_WIN_BUILD
// Show the image acquired by each camera in the window related to each camera.
Pylon::DisplayImage(cameraContextValue, ptrGrabResult);
#endif
// Print the index and the model name of the camera.
cout << "Camera " << cameraContextValue << ": " << cameras[ cameraContextValue ].GetDeviceInfo().GetModelName() << endl;
// Now, the image data can be processed.
cout << "GrabSucceeded: " << ptrGrabResult->GrabSucceeded() << endl;
cout << "SizeX: " << ptrGrabResult->GetWidth() << endl;
cout << "SizeY: " << ptrGrabResult->GetHeight() << endl;
const uint8_t *pImageBuffer = (uint8_t *) ptrGrabResult->GetBuffer();
cout << "Gray value of first pixel: " << (uint32_t) pImageBuffer[0] << endl << endl;
}
}
catch (const GenericException &e)
{
// Error handling
cerr << "An exception occurred." << endl
<< e.GetDescription() << endl;
exitCode = 1;
}
// Comment the following two lines to disable waiting on exit.
cerr << endl << "Press Enter to exit." << endl;
while( cin.get() != '\n');
// Releases all pylon resources.
return exitCode;
}

Grab_Strategies

// Grab_Strategies.cpp
/*
Note: Before getting started, Basler recommends reading the Programmer's Guide topic
in the pylon C++ API documentation that gets installed with pylon.
If you are upgrading to a higher major version of pylon, Basler also
strongly recommends reading the Migration topic in the pylon C++ API documentation.
This sample shows the use of the Instant Camera grab strategies.
*/
// Include files to use the PYLON API.
// Include files used by samples.
#include "../include/ConfigurationEventPrinter.h"
#include "../include/ImageEventPrinter.h"
// Namespace for using pylon objects.
using namespace Pylon;
// Namespace for using cout.
using namespace std;
int main(int argc, char* argv[])
{
// The exit code of the sample application.
int exitCode = 0;
// Before using any pylon methods, the pylon runtime must be initialized.
try
{
// This smart pointer will receive the grab result data.
CGrabResultPtr ptrGrabResult;
// Create an instant camera object for the camera device found first.
CInstantCamera camera( CTlFactory::GetInstance().CreateFirstDevice());
// Register the standard configuration event handler for enabling software triggering.
// The software trigger configuration handler replaces the default configuration
// as all currently registered configuration handlers are removed by setting the registration mode to RegistrationMode_ReplaceAll.
// For demonstration purposes only, add sample configuration event handlers to print out information
// about camera use and image grabbing.
camera.RegisterConfiguration( new CConfigurationEventPrinter, RegistrationMode_Append, Cleanup_Delete);
camera.RegisterImageEventHandler( new CImageEventPrinter, RegistrationMode_Append, Cleanup_Delete);
// Print the model name of the camera.
cout << "Using device " << camera.GetDeviceInfo().GetModelName() << endl;
// The MaxNumBuffer parameter can be used to control the count of buffers
// allocated for grabbing. The default value of this parameter is 10.
camera.MaxNumBuffer = 15;
// Open the camera.
camera.Open();
// Can the camera device be queried whether it is ready to accept the next frame trigger?
if (camera.CanWaitForFrameTriggerReady())
{
cout << "Grab using the GrabStrategy_OneByOne default strategy:" << endl << endl;
// The GrabStrategy_OneByOne strategy is used. The images are processed
// in the order of their arrival.
camera.StartGrabbing( GrabStrategy_OneByOne);
// In the background, the grab engine thread retrieves the
// image data and queues the buffers into the internal output queue.
// Issue software triggers. For each call, wait up to 1000 ms until the camera is ready for triggering the next image.
for ( int i = 0; i < 3; ++i)
{
if ( camera.WaitForFrameTriggerReady( 1000, TimeoutHandling_ThrowException))
{
camera.ExecuteSoftwareTrigger();
}
}
// For demonstration purposes, wait for the last image to appear in the output queue.
// Check that grab results are waiting.
if ( camera.GetGrabResultWaitObject().Wait( 0))
{
cout << endl << "Grab results wait in the output queue." << endl << endl;
}
// All triggered images are still waiting in the output queue
// and are now retrieved.
// The grabbing continues in the background, e.g. when using hardware trigger mode,
// as long as the grab engine does not run out of buffers.
int nBuffersInQueue = 0;
while( camera.RetrieveResult( 0, ptrGrabResult, TimeoutHandling_Return))
{
nBuffersInQueue++;
}
cout << "Retrieved " << nBuffersInQueue << " grab results from output queue." << endl << endl;
//Stop the grabbing.
camera.StopGrabbing();
cout << endl << "Grab using strategy GrabStrategy_LatestImageOnly:" << endl << endl;
// The GrabStrategy_LatestImageOnly strategy is used. The images are processed
// in the order of their arrival but only the last received image
// is kept in the output queue.
// This strategy can be useful when the acquired images are only displayed on the screen.
// If the processor has been busy for a while and images could not be displayed automatically
// the latest image is displayed when processing time is available again.
camera.StartGrabbing( GrabStrategy_LatestImageOnly);
// Execute the software trigger, wait actively until the camera accepts the next frame trigger or until the timeout occurs.
for ( int i = 0; i < 3; ++i)
{
if ( camera.WaitForFrameTriggerReady( 1000, TimeoutHandling_ThrowException))
{
camera.ExecuteSoftwareTrigger();
}
}
// Wait for all images.
// Check whether the grab result is waiting.
if ( camera.GetGrabResultWaitObject().Wait( 0))
{
cout << endl << "A grab result waits in the output queue." << endl << endl;
}
// Only the last received image is waiting in the internal output queue
// and is now retrieved.
// The grabbing continues in the background, e.g. when using the hardware trigger mode.
nBuffersInQueue = 0;
while( camera.RetrieveResult( 0, ptrGrabResult, TimeoutHandling_Return))
{
cout << "Skipped " << ptrGrabResult->GetNumberOfSkippedImages() << " images." << endl;
nBuffersInQueue++;
}
cout << "Retrieved " << nBuffersInQueue << " grab result from output queue." << endl << endl;
//Stop the grabbing.
camera.StopGrabbing();
cout << endl << "Grab using strategy GrabStrategy_LatestImages:" << endl << endl;
// The GrabStrategy_LatestImages strategy is used. The images are processed
// in the order of their arrival, but only a number of the images received last
// are kept in the output queue.
// The size of the output queue can be adjusted.
// When using this strategy the OutputQueueSize parameter can be changed during grabbing.
camera.OutputQueueSize = 2;
camera.StartGrabbing( GrabStrategy_LatestImages);
// Execute the software trigger, wait actively until the camera accepts the next frame trigger or until the timeout occurs.
for ( int i = 0; i < 3; ++i)
{
if ( camera.WaitForFrameTriggerReady( 1000, TimeoutHandling_ThrowException))
{
camera.ExecuteSoftwareTrigger();
}
}
// Wait for all images.
// Check whether the grab results are waiting.
if ( camera.GetGrabResultWaitObject().Wait( 0))
{
cout << endl << "Grab results wait in the output queue." << endl << endl;
}
// Only the images received last are waiting in the internal output queue
// and are now retrieved.
// The grabbing continues in the background, e.g. when using the hardware trigger mode.
nBuffersInQueue = 0;
while( camera.RetrieveResult( 0, ptrGrabResult, TimeoutHandling_Return))
{
if ( ptrGrabResult->GetNumberOfSkippedImages())
{
cout << "Skipped " << ptrGrabResult->GetNumberOfSkippedImages() << " image." << endl;
}
nBuffersInQueue++;
}
cout << "Retrieved " << nBuffersInQueue << " grab results from output queue." << endl << endl;
// When setting the output queue size to 1 this strategy is equivalent to the GrabStrategy_LatestImageOnly grab strategy.
camera.OutputQueueSize = 1;
// When setting the output queue size to CInstantCamera::MaxNumBuffer this strategy is equivalent to GrabStrategy_OneByOne.
camera.OutputQueueSize = camera.MaxNumBuffer;
//Stop the grabbing.
camera.StopGrabbing();
// The Upcoming Image grab strategy cannot be used together with USB camera devices.
// For more information, see the advanced topics section of the pylon Programmer's Guide.
if ( !camera.IsUsb())
{
cout << endl << "Grab using the GrabStrategy_UpcomingImage strategy:" << endl << endl;
// Reconfigure the camera to use continuous acquisition.
// The GrabStrategy_UpcomingImage strategy is used. A buffer for grabbing
// is queued each time when RetrieveResult()
// is called. The image data is grabbed into the buffer and returned.
// This ensures that the image grabbed is the next image
// received from the camera.
// All images are still transported to the PC.
camera.StartGrabbing( GrabStrategy_UpcomingImage);
// Queues a buffer for grabbing and waits for the grab to finish.
camera.RetrieveResult( 5000, ptrGrabResult, TimeoutHandling_ThrowException);
// Sleep.
// Check no grab result is waiting, because no buffers are queued for grabbing.
if ( !camera.GetGrabResultWaitObject().Wait( 0))
{
cout << "No grab result waits in the output queue." << endl << endl;
}
//Stop the grabbing.
camera.StopGrabbing();
}
}
else
{
// See the documentation of CInstantCamera::CanWaitForFrameTriggerReady() for more information.
cout << endl;
cout << "This sample can only be used with cameras that can be queried whether they are ready to accept the next frame trigger.";
cout << endl;
cout << endl;
}
}
catch (const GenericException &e)
{
// Error handling.
cerr << "An exception occurred." << endl
<< e.GetDescription() << endl;
exitCode = 1;
}
// Comment the following two lines to disable waiting on exit.
cerr << endl << "Press Enter to exit." << endl;
while( cin.get() != '\n');
// Releases all pylon resources.
return exitCode;
}

Grab_UsingActionCommand

// Grab_UsingActionCommand.cpp
/*
Note: Before getting started, Basler recommends reading the Programmer's Guide topic
in the pylon C++ API documentation that gets installed with pylon.
If you are upgrading to a higher major version of pylon, Basler also
strongly recommends reading the Migration topic in the pylon C++ API documentation.
This sample shows how to issue a GigE Vision ACTION_CMD to multiple cameras.
By using an action command multiple cameras can be triggered at the same time
compared to software triggering, which must be triggered individually.
To make the configuration of multiple cameras easier this sample uses the CInstantCameraArray class.
It also uses a CActionTriggerConfiguration to set up the basic action command features.
*/
#include <time.h> // for time
#include <stdlib.h> // for rand & srand
// Include files to use the PYLON API.
#ifdef PYLON_WIN_BUILD
# include <pylon/PylonGUI.h>
#endif
// Settings to use Basler GigE cameras.
using namespace Basler_GigECameraParams;
// Namespace for using pylon objects.
using namespace Pylon;
// Namespace for using cout.
using namespace std;
#ifndef USE_GIGE
#error This example is usable for GigE cameras only.
#endif
// Limits the amount of cameras used for grabbing.
// It is important to manage the available bandwidth when grabbing with multiple
// cameras. This applies, for instance, if two GigE cameras are connected to the
// same network adapter via a switch. To manage the bandwidth, the GevSCPD
// interpacket delay parameter and the GevSCFTD transmission delay parameter can
// be set for each GigE camera device. The "Controlling Packet Transmission Timing
// with the Interpacket and Frame Transmission Delays on Basler GigE Vision Cameras"
// Application Note (AW000649xx000) provides more information about this topic.
static const uint32_t c_maxCamerasToUse = 2;
int main(int argc, char* argv[])
{
int exitCode = 0;
// Before using any pylon methods, the pylon runtime must be initialized.
try
{
// Get the GigE transport layer.
// We'll need it later to issue the action commands.
CTlFactory& tlFactory = CTlFactory::GetInstance();
if (pTL == NULL)
{
throw RUNTIME_EXCEPTION("No GigE transport layer available.");
}
// In this sample we use the transport layer directly to enumerate cameras.
// By calling EnumerateDevices on the TL we get get only GigE cameras.
// You could also accomplish this by using a filter and
// let the Transport Layer Factory enumerate.
DeviceInfoList_t allDeviceInfos;
if (pTL->EnumerateDevices(allDeviceInfos) == 0)
{
throw RUNTIME_EXCEPTION("No GigE cameras present.");
}
// Only use cameras in the same subnet as the first one.
DeviceInfoList_t usableDeviceInfos;
usableDeviceInfos.push_back(allDeviceInfos[0]);
const String_t subnet(static_cast<const CBaslerGigEDeviceInfo&>(allDeviceInfos[0]).GetSubnetAddress());
// Start with index 1 as we have already added the first one above.
// We will also limit the number of cameras to c_maxCamerasToUse.
for (size_t i = 1; i < allDeviceInfos.size() && usableDeviceInfos.size() < c_maxCamerasToUse; ++i)
{
const CBaslerGigEDeviceInfo& gigeinfo = static_cast<const CBaslerGigEDeviceInfo&>(allDeviceInfos[i]);
if (subnet == gigeinfo.GetSubnetAddress())
{
// Add this deviceInfo to the ones we will be using.
usableDeviceInfos.push_back(gigeinfo);
}
else
{
cerr << "Camera will not be used because it is in a different subnet "
<< subnet << "!" << endl;
}
}
// In this sample we'll use an CBaslerGigEInstantCameraArray to access multiple cameras.
CBaslerGigEInstantCameraArray cameras(usableDeviceInfos.size());
// Seed the random number generator and generate a random device key value.
srand((unsigned)time(NULL));
const uint32_t DeviceKey = rand();
// For this sample we configure all cameras to be in the same group.
const uint32_t GroupKey = 0x112233;
// For the following sample we use the CActionTriggerConfiguration to configure the camera.
// It will set the DeviceKey, GroupKey and GroupMask features. It will also
// configure the camera FrameTrigger and set the TriggerSource to the action command.
// You can look at the implementation of CActionTriggerConfiguration in <pylon/gige/ActionTriggerConfiguration.h>
// to see which features are set.
// Create all GigE cameras and attach them to the InstantCameras in the array.
for (size_t i = 0; i < cameras.GetSize(); ++i)
{
cameras[i].Attach(tlFactory.CreateDevice(usableDeviceInfos[i]));
// We'll use the CActionTriggerConfiguration, which will set up the cameras to wait for an action command.
cameras[i].RegisterConfiguration(new CActionTriggerConfiguration(DeviceKey, GroupKey, AllGroupMask), RegistrationMode_Append, Cleanup_Delete);
// Set the context. This will help us later to correlate the grab result to a camera in the array.
cameras[i].SetCameraContext(i);
const CBaslerGigEDeviceInfo& di = cameras[i].GetDeviceInfo();
// Print the model name of the camera.
cout << "Using camera " << i << ": " << di.GetModelName() << " (" << di.GetIpAddress() << ")" << endl;
}
// Open all cameras.
// This will apply the CActionTriggerConfiguration specified above.
cameras.Open();
// Use an Action Command to Trigger Multiple Cameras at the Same Time.
cout << endl << "Issuing an action command." << endl;
// Starts grabbing for all cameras.
// The cameras won't transmit any image data, because they are configured to wait for an action command.
cameras.StartGrabbing();
// Now we issue the action command to all devices in the subnet.
// The devices with a matching DeviceKey, GroupKey and valid GroupMask will grab an image.
pTL->IssueActionCommand(DeviceKey, GroupKey, AllGroupMask, subnet);
// This smart pointer will receive the grab result data.
CBaslerGigEGrabResultPtr ptrGrabResult;
// Retrieve images from all cameras.
const int DefaultTimeout_ms = 5000;
for (size_t i = 0; i < usableDeviceInfos.size() && cameras.IsGrabbing(); ++i)
{
// CInstantCameraArray::RetrieveResult will return grab results in the order they arrive.
cameras.RetrieveResult(DefaultTimeout_ms, ptrGrabResult, TimeoutHandling_ThrowException);
// When the cameras in the array are created the camera context value
// is set to the index of the camera in the array.
// The camera context is a user-settable value.
// This value is attached to each grab result and can be used
// to determine the camera that produced the grab result.
intptr_t cameraIndex = ptrGrabResult->GetCameraContext();
// Image grabbed successfully?
if (ptrGrabResult->GrabSucceeded())
{
#ifdef PYLON_WIN_BUILD
// Show the image acquired by each camera in the window related to the camera.
// DisplayImage supports up to 32 image windows.
if (cameraIndex <= 31)
Pylon::DisplayImage(cameraIndex, ptrGrabResult);
#endif
// Print the index and the model name of the camera.
cout << "Camera " << cameraIndex << ": " << cameras[cameraIndex].GetDeviceInfo().GetModelName() <<
" (" << cameras[cameraIndex].GetDeviceInfo().GetIpAddress() << ")" << endl;
// You could process the image here by accessing the image buffer.
cout << "GrabSucceeded: " << ptrGrabResult->GrabSucceeded() << endl;
const uint8_t *pImageBuffer = (uint8_t *) ptrGrabResult->GetBuffer();
cout << "Gray value of first pixel: " << (uint32_t) pImageBuffer[0] << endl << endl;
}
else
{
// If a buffer has been incompletely grabbed, the network bandwidth is possibly insufficient for transferring
// multiple images simultaneously. See note above c_maxCamerasToUse.
cout << "Error: " << ptrGrabResult->GetErrorCode() << " " << ptrGrabResult->GetErrorDescription() << endl;
}
}
// In case you want to trigger again you should wait for the camera
// to become trigger-ready before issuing the next action command.
// To avoid overtriggering you should call cameras[0].WaitForFrameTriggerReady
// (see Grab_UsingGrabLoopThread sample for details).
cameras.StopGrabbing();
// Close all cameras.
cameras.Close();
}
catch (const GenericException &e)
{
// Error handling
cerr << "An exception occurred." << endl
<< e.GetDescription() << endl;
exitCode = 1;
}
// Comment the following two lines to disable waiting on exit.
cerr << endl << "Press Enter to exit." << endl;
while (cin.get() != '\n');
// Releases all pylon resources.
return exitCode;
}

Grab_UsingBufferFactory

// Grab_UsingBufferFactory.cpp
/*
Note: Before getting started, Basler recommends reading the Programmer's Guide topic
in the pylon C++ API documentation that gets installed with pylon.
If you are upgrading to a higher major version of pylon, Basler also
strongly recommends reading the Migration topic in the pylon C++ API documentation.
This sample demonstrates the use of a user-provided buffer factory.
The use of a buffer factory is optional and intended for advanced use cases only.
A buffer factory is only needed if you plan to grab into externally supplied buffers.
*/
// Include files to use the PYLON API.
#ifdef PYLON_WIN_BUILD
#include <pylon/PylonGUI.h>
#endif
// Namespace for using pylon objects.
using namespace Pylon;
// Namespace for using cout.
using namespace std;
// Number of images to be grabbed.
static const uint32_t c_countOfImagesToGrab = 5;
// A user-provided buffer factory.
class MyBufferFactory : public IBufferFactory
{
public:
MyBufferFactory()
: m_lastBufferContext(1000)
{
}
virtual ~MyBufferFactory()
{
}
// Will be called when the Instant Camera object needs to allocate a buffer.
// Return the buffer and context data in the output parameters.
// In case of an error new() will throw an exception
// which will be forwarded to the caller to indicate an error.
// Warning: This method can be called by different threads.
virtual void AllocateBuffer( size_t bufferSize, void** pCreatedBuffer, intptr_t& bufferContext)
{
try
{
// Allocate buffer for pixel data.
// If you already have a buffer allocated by your image processing library you can use this instead.
// In this case you must modify the delete code (see below) accordingly.
pCreatedBuffer = new uint8_t[bufferSize];
// The context information is never changed by the Instant Camera and can be used
// by the buffer factory to manage the buffers.
// The context information can be retrieved from a grab result by calling
// ptrGrabResult->GetBufferContext();
bufferContext = ++m_lastBufferContext;
cout << "Created buffer " << bufferContext << ", " << *pCreatedBuffer << endl;
}
catch (const std::exception&)
{
// In case of an error we must free the memory we may have already allocated.
if (*pCreatedBuffer != NULL)
{
uint8_t* p = reinterpret_cast<uint8_t*>(pCreatedBuffer);
delete[] p;
pCreatedBuffer = NULL;
}
// Rethrow exception.
// AllocateBuffer can also just return with *pCreatedBuffer = NULL to indicate
// that no buffer is available at the moment.
throw;
}
}
// Frees a previously allocated buffer.
// Warning: This method can be called by different threads.
virtual void FreeBuffer( void* pCreatedBuffer, intptr_t bufferContext)
{
uint8_t* p = reinterpret_cast<uint8_t*>(pCreatedBuffer);
delete [] p;
cout << "Freed buffer " << bufferContext << ", " << pCreatedBuffer << endl;
}
// Destroys the buffer factory.
// This will be used when you pass the ownership of the buffer factory instance to pylon
// by defining Cleanup_Delete. pylon will call this function to destroy the instance
// of the buffer factory. If you don't pass the ownership to pylon (Cleanup_None)
// this method will be ignored.
virtual void DestroyBufferFactory()
{
delete this;
}
protected:
unsigned long m_lastBufferContext;
};
int main(int argc, char* argv[])
{
// The exit code of the sample application.
int exitCode = 0;
// Before using any pylon methods, the pylon runtime must be initialized.
try
{
// The buffer factory must be created first because objects on the
// stack are destroyed in reverse order of creation.
// The buffer factory must exist longer than the Instant Camera object
// in this sample.
MyBufferFactory myFactory;
// Create an instant camera object with the camera device found first.
CInstantCamera camera( CTlFactory::GetInstance().CreateFirstDevice());
// Print the model name of the camera.
cout << "Using device " << camera.GetDeviceInfo().GetModelName() << endl;
// Use our own implementation of a buffer factory.
// Since we control the lifetime of the factory object we pass Cleanup_None.
camera.SetBufferFactory(&myFactory, Cleanup_None);
// The parameter MaxNumBuffer can be used to control the count of buffers
// allocated for grabbing. The default value of this parameter is 10.
camera.MaxNumBuffer = 5;
// Start the grabbing of c_countOfImagesToGrab images.
// The camera device is parameterized with a default configuration which
// sets up free-running continuous acquisition.
camera.StartGrabbing( c_countOfImagesToGrab);
// This smart pointer will receive the grab result data.
CGrabResultPtr ptrGrabResult;
// Camera.StopGrabbing() is called automatically by the RetrieveResult() method
// when c_countOfImagesToGrab images have been retrieved.
while ( camera.IsGrabbing())
{
// Wait for an image and then retrieve it. A timeout of 5000 ms is used.
camera.RetrieveResult( 5000, ptrGrabResult, TimeoutHandling_ThrowException);
// Image grabbed successfully?
if (ptrGrabResult->GrabSucceeded())
{
// Access the image data.
cout << "Context: " << ptrGrabResult->GetBufferContext() << endl;
cout << "SizeX: " << ptrGrabResult->GetWidth() << endl;
cout << "SizeY: " << ptrGrabResult->GetHeight() << endl;
const uint8_t *pImageBuffer = (uint8_t *) ptrGrabResult->GetBuffer();
cout << "Gray value of first pixel: " << (uint32_t) pImageBuffer[0] << endl << endl;
#ifdef PYLON_WIN_BUILD
// Display the grabbed image.
Pylon::DisplayImage(1, ptrGrabResult);
#endif
}
else
{
cout << "Error: " << ptrGrabResult->GetErrorCode() << " " << ptrGrabResult->GetErrorDescription();
}
}
}
catch (const GenericException &e)
{
// Error handling.
cerr << "An exception occurred." << endl
<< e.GetDescription() << endl;
exitCode = 1;
}
// Comment the following two lines to disable waiting on exit.
cerr << endl << "Press Enter to exit." << endl;
while( cin.get() != '\n');
// Releases all pylon resources.
return exitCode;
}

Grab_UsingExposureEndEvent

// Grab_UsingExposureEndEvent.cpp
/*
Note: Before getting started, Basler recommends reading the Programmer's Guide topic
in the pylon C++ API documentation that gets installed with pylon.
If you are upgrading to a higher major version of pylon, Basler also
strongly recommends reading the Migration topic in the pylon C++ API documentation.
This sample shows how to use the Exposure End event to speed up the image acquisition.
For example, when a sensor exposure is finished, the camera can send an Exposure End event to the PC.
The PC can receive the event before the image data of the finished exposure has been completely transferred.
This can be used in order to avoid an unnecessary delay, for example when an imaged
object is moved further before the related image data transfer is complete.
*/
// Include files to use the PYLON API.
// Include files used by samples.
#include "../include/ConfigurationEventPrinter.h"
#include <iomanip>
#ifdef PYLON_UNIX_BUILD
# include <sys/time.h>
#endif
// Namespace for using pylon objects.
using namespace Pylon;
#if defined( USE_1394 )
// Settings to be used for Basler IEEE 1394 cameras.
#include <pylon/1394/Basler1394InstantCamera.h>
typedef Pylon::CBasler1394InstantCamera Camera_t;
typedef CBasler1394CameraEventHandler CameraEventHandler_t; // Or use Camera_t::CameraEventHandler_t
typedef CBasler1394ImageEventHandler ImageEventHandler_t; // Or use Camera_t::ImageEventHandler_t
typedef Pylon::CBasler1394GrabResultPtr GrabResultPtr_t; // Or use Camera_t::GrabResultPtr_t
using namespace Basler_IIDC1394CameraParams;
#elif defined ( USE_GIGE )
// Settings to be used for Basler GigE cameras.
using namespace Basler_GigECameraParams;
#else
#error camera type is not specified. For example, define USE_GIGE for using GigE cameras
#endif
// Namespace for using cout.
using namespace std;
// Enumeration used for distinguishing different events.
enum MyEvents
{
eMyExposureEndEvent, // Triggered by a camera event.
eMyFrameStartOvertrigger, // Triggered by a camera event.
eMyEventOverrunEvent, // Triggered by a camera event.
eMyImageReceivedEvent, // Triggered by the receipt of an image.
eMyMoveEvent, // Triggered when the imaged item or the sensor head can be moved.
eMyNoEvent // Used as default setting.
};
// Names of possible events for a printed output.
const char* MyEventNames[] =
{
"ExposureEndEvent ",
"FrameStartOvertrigger",
"EventOverrunEvent ",
"ImageReceived ",
"Move ",
"NoEvent "
};
// Used for logging received events without outputting the information on the screen
// because outputting will change the timing.
// This class is used for demonstration purposes only.
struct LogItem
{
LogItem()
: eventType( eMyNoEvent)
, frameNumber(0)
{
}
LogItem( MyEvents event, uint32_t frameNr)
: eventType(event)
, frameNumber(frameNr)
{
//Warning, measured values can be wrong on older PC hardware.
#if defined(PYLON_WIN_BUILD)
QueryPerformanceCounter(&time);
#elif defined(PYLON_UNIX_BUILD)
struct timeval tv;
gettimeofday(&tv, NULL);
time = static_cast<unsigned long long>(tv.tv_sec) * 1000L + static_cast<unsigned long long>(tv.tv_usec) / 1000LL;
#endif
}
#if defined(PYLON_WIN_BUILD)
LARGE_INTEGER time; // Recorded time stamps.
#elif defined(PYLON_UNIX_BUILD)
unsigned long long time; // Recorded time stamps.
#endif
MyEvents eventType; // Type of the received event.
uint16_t frameNumber; // Frame number of the received event. A frame number may not be available for some transport layers.
// Frame numbers are not supported by all transport layers.
};
// Helper function for printing a log.
// This function is used for demonstration purposes only.
void PrintLog( const std::vector<LogItem>& aLog)
{
#if defined(PYLON_WIN_BUILD)
// Get the PC timer frequency.
LARGE_INTEGER timerFrequency;
QueryPerformanceFrequency(&timerFrequency);
#endif
cout << std::endl << "Warning, the printed time values can be wrong on older PC hardware." << std::endl << std::endl;
// Print the event information header.
cout << "Time [ms] " << "Event " << "FrameNumber" << std::endl;
cout << "------------ " << "--------------------- " << "-----------" << std::endl;
// Print the logged information.
size_t logSize = aLog.size();
for ( size_t i = 0; i < logSize; ++i)
{
// Calculate the elapsed time between the events.
double time_ms = 0;
if ( i)
{
#if defined(PYLON_WIN_BUILD)
__int64 oldTicks = ((__int64)aLog[i-1].time.HighPart << 32) + (__int64)aLog[i-1].time.LowPart;
__int64 newTicks = ((__int64)aLog[i].time.HighPart << 32) + (__int64)aLog[i].time.LowPart;
long double timeDifference = (long double) (newTicks - oldTicks);
long double ticksPerSecond = (long double) (((__int64)timerFrequency.HighPart << 32) + (__int64)timerFrequency.LowPart);
time_ms = (timeDifference / ticksPerSecond) * 1000;
#elif defined(PYLON_UNIX_BUILD)
time_ms = aLog[i].time - aLog[i-1].time;
#endif
}
// Print the event information.
cout << setw(12) << fixed << setprecision(4) << time_ms <<" "<< MyEventNames[ aLog[i].eventType ] <<" "<< aLog[i].frameNumber << std::endl;
}
}
// Number of images to be grabbed.
static const uint32_t c_countOfImagesToGrab = 50;
#if defined ( USE_GIGE )
// Example handler for GigE camera events.
// Additional handling is required for GigE camera events because the event network packets can be lost, doubled or delayed on the network.
class CEventHandler : public CBaslerGigECameraEventHandler, public CBaslerGigEImageEventHandler
{
public:
CEventHandler()
: m_nextExpectedFrameNumberImage(1)
, m_nextExpectedFrameNumberExposureEnd(1)
, m_nextFrameNumberForMove(1)
{
// Reserve space to log camera events and image events.
m_log.reserve( c_countOfImagesToGrab * 2);
}
// This method is called when a camera event has been received.
virtual void OnCameraEvent( CBaslerGigEInstantCamera& camera, intptr_t userProvidedId, GenApi::INode* /* pNode */)
{
if ( userProvidedId == eMyExposureEndEvent)
{
// An Exposure End event has been received.
uint16_t frameNumber = (uint16_t)camera.ExposureEndEventFrameID.GetValue();
m_log.push_back( LogItem( eMyExposureEndEvent, frameNumber));
// If Exposure End event is not doubled.
if ( GetIncrementedFrameNumber( frameNumber) != m_nextExpectedFrameNumberExposureEnd)
{
// Check whether the imaged item or the sensor head can be moved.
if ( frameNumber == m_nextFrameNumberForMove)
{
MoveImagedItemOrSensorHead();
}
// Check for missing Exposure End events.
if ( frameNumber != m_nextExpectedFrameNumberExposureEnd)
{
throw RUNTIME_EXCEPTION( "An Exposure End event has been lost. Expected frame number is %d but got frame number %d.", m_nextExpectedFrameNumberExposureEnd, frameNumber);
}
IncrementFrameNumber( m_nextExpectedFrameNumberExposureEnd);
}
}
else if ( userProvidedId == eMyFrameStartOvertrigger)
{
// The camera has been overtriggered.
m_log.push_back( LogItem( eMyFrameStartOvertrigger, 0));
// Handle this error...
}
else if ( userProvidedId == eMyEventOverrunEvent)
{
// The camera was unable to send all its events to the PC.
// Events have been dropped by the camera.
m_log.push_back( LogItem( eMyEventOverrunEvent, 0));
// Handle this error...
}
else
{
PYLON_ASSERT2(false, "The sample has been modified and a new event has been registered. Add handler code above.");
}
}
// This method is called when an image has been grabbed.
virtual void OnImageGrabbed( CBaslerGigEInstantCamera& camera, const CBaslerGigEGrabResultPtr& ptrGrabResult)
{
// An image has been received. Block ID is equal to frame number for GigE camera devices.
uint16_t frameNumber = (uint16_t)ptrGrabResult->GetBlockID();
m_log.push_back( LogItem( eMyImageReceivedEvent, frameNumber));
// Check whether the imaged item or the sensor head can be moved.
// This will be the case if the Exposure End has been lost or if the Exposure End is received later than the image.
if ( frameNumber == m_nextFrameNumberForMove)
{
MoveImagedItemOrSensorHead();
}
// Check for missing images.
if ( frameNumber != m_nextExpectedFrameNumberImage)
{
throw RUNTIME_EXCEPTION( "An image has been lost. Expected frame number is %d but got frame number %d.", m_nextExpectedFrameNumberExposureEnd, frameNumber);
}
IncrementFrameNumber( m_nextExpectedFrameNumberImage);
}
void MoveImagedItemOrSensorHead()
{
// The imaged item or the sensor head can be moved now...
// The camera may not be ready for a trigger at this point yet because the sensor is still being read out.
// See the documentation of the CInstantCamera::WaitForFrameTriggerReady() method for more information.
m_log.push_back( LogItem( eMyMoveEvent, m_nextFrameNumberForMove));
IncrementFrameNumber( m_nextFrameNumberForMove);
}
void PrintLog()
{
::PrintLog( m_log);
}
private:
void IncrementFrameNumber( uint16_t& frameNumber)
{
frameNumber = GetIncrementedFrameNumber( frameNumber);
}
uint16_t GetIncrementedFrameNumber( uint16_t frameNumber)
{
++frameNumber;
if ( frameNumber == 0)
{
// Zero is not a valid frame number.
++frameNumber;
}
return frameNumber;
}
uint16_t m_nextExpectedFrameNumberImage;
uint16_t m_nextExpectedFrameNumberExposureEnd;
uint16_t m_nextFrameNumberForMove;
std::vector<LogItem> m_log;
};
#else //No GigE camera
// Example handler for camera events.
class CEventHandler : public CameraEventHandler_t , public ImageEventHandler_t
{
public:
CEventHandler()
{
// Reserve space to log camera events and image events.
m_log.reserve( c_countOfImagesToGrab * 2);
}
// This method is called when a camera event has been received.
virtual void OnCameraEvent( Camera_t& camera, intptr_t userProvidedId, GenApi::INode* /* pNode */)
{
if ( userProvidedId == eMyExposureEndEvent)
{
// An Exposure End event has been received.
m_log.push_back( LogItem( eMyExposureEndEvent, (uint16_t)camera.ExposureEndEventFrameID.GetValue()));
// Move the imaged item or the sensor head.
MoveImagedItemOrSensorHead();
}
else if ( userProvidedId == eMyFrameStartOvertrigger)
{
// The camera has been overtriggered.
m_log.push_back( LogItem( eMyFrameStartOvertrigger, 0));
// Handle this error...
}
else if ( userProvidedId == eMyEventOverrunEvent)
{
// The camera was unable to send all its events to the PC.
// Events have been dropped by the camera.
m_log.push_back( LogItem( eMyEventOverrunEvent, 0));
// Handle this error...
}
else
{
PYLON_ASSERT2(false, "The sample has been modified and a new event has been registered. Add handler code above.");
}
}
// This method is called when an image has been grabbed.
virtual void OnImageGrabbed( Camera_t& camera, const GrabResultPtr_t& ptrGrabResult)
{
// An image has been received.
m_log.push_back( LogItem( eMyImageReceivedEvent, (uint16_t)ptrGrabResult->GetBlockID()));
}
void MoveImagedItemOrSensorHead()
{
// The imaged item or the sensor head can be moved now...
// The camera may not be ready for trigger at this point yet because the sensor is still being read out.
// See the documentation of the CInstantCamera::WaitForFrameTriggerReady() method for more information.
m_log.push_back( LogItem( eMyMoveEvent, 0));
}
void PrintLog()
{
::PrintLog( m_log);
}
private:
std::vector<LogItem> m_log;
};
#endif
int main(int argc, char* argv[])
{
// Exit code of the sample application.
int exitCode = 0;
// Before using any pylon methods, the pylon runtime must be initialized.
try
{
// Create the event handler.
CEventHandler eventHandler;
// Only look for cameras supported by Camera_t.
info.SetDeviceClass( Camera_t::DeviceClass());
// Create an instant camera object with the first found camera device matching the specified device class.
Camera_t camera( CTlFactory::GetInstance().CreateFirstDevice( info));
// For demonstration purposes only, add sample configuration event handlers to print out information
// about camera use and image grabbing.
camera.RegisterConfiguration( new CConfigurationEventPrinter, RegistrationMode_Append, Cleanup_Delete); // Camera use.
// Register the event handler.
camera.RegisterImageEventHandler( &eventHandler, RegistrationMode_Append, Cleanup_None);
camera.RegisterCameraEventHandler( &eventHandler, "ExposureEndEventData", eMyExposureEndEvent, RegistrationMode_ReplaceAll, Cleanup_None);
camera.RegisterCameraEventHandler( &eventHandler, "FrameStartOvertriggerEventData", eMyFrameStartOvertrigger, RegistrationMode_Append, Cleanup_None);
camera.RegisterCameraEventHandler( &eventHandler, "EventOverrunEventData", eMyEventOverrunEvent, RegistrationMode_Append, Cleanup_None);
// Camera event processing must be activated first, the default is off.
camera.GrabCameraEvents = true;
// Open the camera for setting parameters.
camera.Open();
// The network packet signaling an event of a GigE camera device can get lost on the network.
// The following commented parameters can be used to control the handling of lost events.
//camera.GetEventGrabberParams().Timeout;
//camera.GetEventGrabberParams().RetryCount;
// Check if the device supports events.
if ( !IsAvailable( camera.EventSelector))
{
throw RUNTIME_EXCEPTION( "The device doesn't support events.");
}
// Enable the sending of Exposure End events.
// Select the event to be received.
camera.EventSelector.SetValue(EventSelector_ExposureEnd);
// Enable it.
camera.EventNotification.SetValue(EventNotification_GenICamEvent);
// Enable the sending of Event Overrun events.
camera.EventSelector.SetValue(EventSelector_EventOverrun);
camera.EventNotification.SetValue(EventNotification_GenICamEvent);
// Enable the sending of Frame Start Overtrigger events.
if ( IsAvailable( camera.EventSelector.GetEntry(EventSelector_FrameStartOvertrigger)))
{
camera.EventSelector.SetValue(EventSelector_FrameStartOvertrigger);
camera.EventNotification.SetValue(EventNotification_GenICamEvent);
}
// Start the grabbing of c_countOfImagesToGrab images.
// The camera device is parameterized with a default configuration which
// sets up free-running continuous acquisition.
camera.StartGrabbing( c_countOfImagesToGrab);
// This smart pointer will receive the grab result data.
CGrabResultPtr ptrGrabResult;
// Camera.StopGrabbing() is called automatically by the RetrieveResult() method
// when c_countOfImagesToGrab images have been retrieved.
while ( camera.IsGrabbing())
{
// Retrieve grab results and notify the camera event and image event handlers.
camera.RetrieveResult( 5000, ptrGrabResult, TimeoutHandling_ThrowException);
// Nothing to do here with the grab result, the grab results are handled by the registered event handlers.
}
// Disable the sending of Exposure End events.
camera.EventSelector = EventSelector_ExposureEnd;
camera.EventNotification.SetValue(EventNotification_Off);
// Disable the sending of Event Overrun events.
camera.EventSelector.SetValue(EventSelector_EventOverrun);
camera.EventNotification.SetValue(EventNotification_Off);
// Disable the sending of Frame Start Overtrigger events.
if ( IsAvailable( camera.EventSelector.GetEntry(EventSelector_FrameStartOvertrigger)))
{
camera.EventSelector.SetValue(EventSelector_FrameStartOvertrigger);
camera.EventNotification.SetValue(EventNotification_Off);
}
// Print the recorded log showing the timing of events and images.
eventHandler.PrintLog();
}
catch (const GenericException &e)
{
// Error handling.
cerr << "An exception occurred." << endl
<< e.GetDescription() << endl;
exitCode = 1;
}
// Comment the following two lines to disable waiting on exit.
cerr << endl << "Press Enter to exit." << endl;
while( cin.get() != '\n');
// Releases all pylon resources.
return exitCode;
}

Grab_UsingExposureEndEvent_Usb

// Grab_UsingExposureEndEvent_Usb.cpp
/*
Note: Before getting started, Basler recommends reading the Programmer's Guide topic
in the pylon C++ API documentation that gets installed with pylon.
If you are upgrading to a higher major version of pylon, Basler also
strongly recommends reading the Migration topic in the pylon C++ API documentation.
This sample shows how to use the Exposure End event to speed up the image acquisition.
For example, when a sensor exposure is finished, the camera can send an Exposure End event to the PC.
The PC can receive the event before the image data of the finished exposure has been completely transferred.
This can be used in order to avoid an unnecessary delay, for example when an imaged
object is moved further before the related image data transfer is complete.
*/
// Include files to use the PYLON API.
// Include files used by samples.
#include "../include/ConfigurationEventPrinter.h"
#include <iomanip>
#ifdef PYLON_UNIX_BUILD
# include <sys/time.h>
#endif
// Namespace for using pylon objects.
using namespace Pylon;
// Settings for using Basler USB cameras.
typedef Pylon::CBaslerUsbCameraEventHandler CameraEventHandler_t; // Or use Camera_t::CameraEventHandler_t
typedef Pylon::CBaslerUsbImageEventHandler ImageEventHandler_t; // Or use Camera_t::ImageEventHandler_t
typedef Pylon::CBaslerUsbGrabResultPtr GrabResultPtr_t; // Or use Camera_t::GrabResultPtr_t
using namespace Basler_UsbCameraParams;
// Namespace for using cout.
using namespace std;
// Enumeration used for distinguishing different events.
enum MyEvents
{
eMyExposureEndEvent, // Triggered by a camera event.
eMyFrameStartOvertrigger, // Triggered by a camera event.
eMyImageReceivedEvent, // Triggered by the receipt of an image.
eMyMoveEvent, // Triggered when the imaged item or the sensor head can be moved.
eMyNoEvent // Used as default setting.
};
// Names of possible events for a printed output.
const char* MyEventNames[] =
{
"ExposureEndEvent ",
"FrameStartOvertrigger",
"ImageReceived ",
"Move ",
"NoEvent "
};
// Used for logging received events without outputting the information on the screen
// because outputting will change the timing.
// This class is used for demonstration purposes only.
struct LogItem
{
LogItem()
: eventType( eMyNoEvent)
, frameNumber(0)
{
}
LogItem( MyEvents event, uint16_t frameNr)
: eventType(event)
, frameNumber(frameNr)
{
//Warning, measured values can be wrong on older PC hardware.
#if defined(PYLON_WIN_BUILD)
QueryPerformanceCounter(&time);
#elif defined(PYLON_UNIX_BUILD)
struct timeval tv;
gettimeofday(&tv, NULL);
time = static_cast<unsigned long long>(tv.tv_sec) * 1000L + static_cast<unsigned long long>(tv.tv_usec) / 1000LL;
#endif
}
#if defined(PYLON_WIN_BUILD)
LARGE_INTEGER time; // Recorded time stamps.
#elif defined(PYLON_UNIX_BUILD)
unsigned long long time; // Recorded time stamps.
#endif
MyEvents eventType; // Type of the received event.
uint16_t frameNumber; // Frame number of the received event.
};
// Helper function for printing a log.
// This function is used for demonstration purposes only.
void PrintLog( const std::vector<LogItem>& aLog)
{
#if defined(PYLON_WIN_BUILD)
// Get the PC timer frequency.
LARGE_INTEGER timerFrequency;
QueryPerformanceFrequency(&timerFrequency);
#endif
cout << std::endl << "Warning, the printed time values can be wrong on older PC hardware." << std::endl << std::endl;
// Print the event information header.
cout << "Time [ms] " << "Event " << "FrameNumber" << std::endl;
cout << "------------ " << "--------------------- " << "-----------" << std::endl;
// Print the logged information.
size_t logSize = aLog.size();
for ( size_t i = 0; i < logSize; ++i)
{
// Calculate the elapsed time between the events.
double time_ms = 0;
if ( i)
{
#if defined(PYLON_WIN_BUILD)
__int64 oldTicks = ((__int64)aLog[i-1].time.HighPart << 32) + (__int64)aLog[i-1].time.LowPart;
__int64 newTicks = ((__int64)aLog[i].time.HighPart << 32) + (__int64)aLog[i].time.LowPart;
long double timeDifference = (long double) (newTicks - oldTicks);
long double ticksPerSecond = (long double) (((__int64)timerFrequency.HighPart << 32) + (__int64)timerFrequency.LowPart);
time_ms = (timeDifference / ticksPerSecond) * 1000;
#elif defined(PYLON_UNIX_BUILD)
time_ms = aLog[i].time - aLog[i-1].time;
#endif
}
// Print the event information.
cout << setw(12) << fixed << setprecision(4) << time_ms <<" "<< MyEventNames[ aLog[i].eventType ] <<" "<< aLog[i].frameNumber << std::endl;
}
}
// Number of images to be grabbed.
static const uint32_t c_countOfImagesToGrab = 50;
// Example handler for GigE camera events.
// Additional handling is required for GigE camera events because the event network packets can be lost, doubled or delayed on the network.
class CEventHandler : public CameraEventHandler_t, public ImageEventHandler_t
{
public:
CEventHandler()
: m_nextExpectedFrameNumberImage(0)
, m_nextExpectedFrameNumberExposureEnd(0)
, m_nextFrameNumberForMove(0)
, m_frameIDsInitialized(false)
{
// Reserve space to log camera, image and move events.
m_log.reserve( c_countOfImagesToGrab * 3);
}
// This method is called when a camera event has been received.
virtual void OnCameraEvent( Camera_t& camera, intptr_t userProvidedId, GenApi::INode* /* pNode */)
{
if ( userProvidedId == eMyExposureEndEvent)
{
// An Exposure End event has been received.
uint16_t frameNumber = (uint16_t)camera.EventExposureEndFrameID.GetValue();
m_log.push_back( LogItem( eMyExposureEndEvent, frameNumber));
// If Exposure End event is not doubled.
if ( GetIncrementedFrameNumber( frameNumber) != m_nextExpectedFrameNumberExposureEnd)
{
// Check whether the imaged item or the sensor head can be moved.
if ( frameNumber == m_nextFrameNumberForMove)
{
MoveImagedItemOrSensorHead();
}
// Check for missing Exposure End events.
if ( frameNumber != m_nextExpectedFrameNumberExposureEnd)
{
throw RUNTIME_EXCEPTION( "An Exposure End event has been lost. Expected frame number is %d but got frame number %d.", m_nextExpectedFrameNumberExposureEnd, frameNumber);
}
IncrementFrameNumber( m_nextExpectedFrameNumberExposureEnd);
}
}
else if ( userProvidedId == eMyFrameStartOvertrigger)
{
// The camera has been overtriggered.
m_log.push_back( LogItem( eMyFrameStartOvertrigger, 0));
// Handle this error...
}
else
{
PYLON_ASSERT2(false, "The sample has been modified and a new event has been registered. Add handler code above.");
}
}
// This method is called when an image has been grabbed.
virtual void OnImageGrabbed( Camera_t& camera, const GrabResultPtr_t& ptrGrabResult)
{
// An image has been received.
uint16_t frameNumber = (uint16_t)ptrGrabResult->GetBlockID();
m_log.push_back( LogItem( eMyImageReceivedEvent, frameNumber));
// Check whether the imaged item or the sensor head can be moved.
// This will be the case if the Exposure End has been lost or if the Exposure End is received later than the image.
if ( frameNumber == m_nextFrameNumberForMove)
{
MoveImagedItemOrSensorHead();
}
// Check for missing images.
if ( frameNumber != m_nextExpectedFrameNumberImage)
{
throw RUNTIME_EXCEPTION( "An image has been lost. Expected frame number is %d but got frame number %d.", m_nextExpectedFrameNumberImage, frameNumber);
}
IncrementFrameNumber( m_nextExpectedFrameNumberImage);
}
void MoveImagedItemOrSensorHead()
{
// The imaged item or the sensor head can be moved now...
// The camera may not be ready for a trigger at this point yet because the sensor is still being read out.
// See the documentation of the CInstantCamera::WaitForFrameTriggerReady() method for more information.
m_log.push_back( LogItem( eMyMoveEvent, m_nextFrameNumberForMove));
IncrementFrameNumber( m_nextFrameNumberForMove);
}
void PrintLog()
{
::PrintLog( m_log);
}
private:
void IncrementFrameNumber( uint16_t& frameNumber)
{
frameNumber = GetIncrementedFrameNumber( frameNumber);
}
uint16_t GetIncrementedFrameNumber( uint16_t frameNumber)
{
++frameNumber;
return frameNumber;
}
uint16_t m_nextExpectedFrameNumberImage;
uint16_t m_nextExpectedFrameNumberExposureEnd;
uint16_t m_nextFrameNumberForMove;
bool m_frameIDsInitialized;
std::vector<LogItem> m_log;
};
int main(int argc, char* argv[])
{
// Exit code of the sample application.
int exitCode = 0;
// Before using any pylon methods, the pylon runtime must be initialized.
try
{
// Create the event handler.
CEventHandler eventHandler;
// Only look for cameras supported by Camera_t.
info.SetDeviceClass( Camera_t::DeviceClass());
// Create an instant camera object with the first found camera device matching the specified device class.
Camera_t camera( CTlFactory::GetInstance().CreateFirstDevice( info));
// For demonstration purposes only, add sample configuration event handlers to print out information
// about camera use and image grabbing.
camera.RegisterConfiguration( new CConfigurationEventPrinter, RegistrationMode_Append, Cleanup_Delete); // Camera use.
// Register the event handler.
camera.RegisterImageEventHandler( &eventHandler, RegistrationMode_Append, Cleanup_None);
camera.RegisterCameraEventHandler( &eventHandler, "EventExposureEndData", eMyExposureEndEvent, RegistrationMode_ReplaceAll, Cleanup_None);
camera.RegisterCameraEventHandler( &eventHandler, "EventFrameStartOvertriggerData", eMyFrameStartOvertrigger, RegistrationMode_Append, Cleanup_None);
// Camera event processing must be activated first, the default is off.
camera.GrabCameraEvents = true;
// Open the camera for setting parameters.
camera.Open();
// Check if the device supports events.
if ( !IsAvailable( camera.EventSelector))
{
throw RUNTIME_EXCEPTION( "The device doesn't support events.");
}
// Enable the sending of Exposure End events.
// Select the event to be received.
camera.EventSelector.SetValue(EventSelector_ExposureEnd);
// Enable it.
camera.EventNotification.SetValue(EventNotification_On);
// Enable the sending of Frame Start Overtrigger events.
if ( IsAvailable( camera.EventSelector.GetEntry(EventSelector_FrameStartOvertrigger)))
{
camera.EventSelector.SetValue(EventSelector_FrameStartOvertrigger);
camera.EventNotification.SetValue(EventNotification_On);
}
// Start the grabbing of c_countOfImagesToGrab images.
// The camera device is parameterized with a default configuration which
// sets up free-running continuous acquisition.
camera.StartGrabbing( c_countOfImagesToGrab);
// This smart pointer will receive the grab result data.
CGrabResultPtr ptrGrabResult;
// Camera.StopGrabbing() is called automatically by the RetrieveResult() method
// when c_countOfImagesToGrab images have been retrieved.
while ( camera.IsGrabbing())
{
// Retrieve grab results and notify the camera event and image event handlers.
camera.RetrieveResult( 5000, ptrGrabResult, TimeoutHandling_ThrowException);
// Nothing to do here with the grab result, the grab results are handled by the registered event handlers.
}
// Disable the sending of Exposure End events.
camera.EventSelector = EventSelector_ExposureEnd;
camera.EventNotification.SetValue(EventNotification_Off);
// Disable the sending of Frame Start Overtrigger events.
if ( IsAvailable( camera.EventSelector.GetEntry(EventSelector_FrameStartOvertrigger)))
{
camera.EventSelector.SetValue(EventSelector_FrameStartOvertrigger);
camera.EventNotification.SetValue(EventNotification_Off);
}
// Print the recorded log showing the timing of events and images.
eventHandler.PrintLog();
}
catch (const GenericException &e)
{
// Error handling.
cerr << "An exception occurred." << endl
<< e.GetDescription() << endl;
exitCode = 1;
}
// Comment the following two lines to disable waiting on exit.
cerr << endl << "Press Enter to exit." << endl;
while( cin.get() != '\n');
// Releases all pylon resources.
return exitCode;
}

Grab_UsingGrabLoopThread

// Grab_UsingGrabLoopThread.cpp
/*
Note: Before getting started, Basler recommends reading the Programmer's Guide topic
in the pylon C++ API documentation that gets installed with pylon.
If you are upgrading to a higher major version of pylon, Basler also
strongly recommends reading the Migration topic in the pylon C++ API documentation.
This sample illustrates how to grab and process images using the grab loop thread
provided by the Instant Camera class.
*/
// Include files to use the PYLON API.
#ifdef PYLON_WIN_BUILD
# include <pylon/PylonGUI.h>
#endif
// Include files used by samples.
#include "../include/ConfigurationEventPrinter.h"
#include "../include/ImageEventPrinter.h"
// Namespace for using pylon objects.
using namespace Pylon;
// Namespace for using cout.
using namespace std;
//Example of an image event handler.
class CSampleImageEventHandler : public CImageEventHandler
{
public:
virtual void OnImageGrabbed( CInstantCamera& camera, const CGrabResultPtr& ptrGrabResult)
{
#ifdef PYLON_WIN_BUILD
// Display the image
Pylon::DisplayImage(1, ptrGrabResult);
#endif
cout << "CSampleImageEventHandler::OnImageGrabbed called." << std::endl;
cout << std::endl;
cout << std::endl;
}
};
int main(int argc, char* argv[])
{
// The exit code of the sample application.
int exitCode = 0;
// Before using any pylon methods, the pylon runtime must be initialized.
try
{
// Create an instant camera object for the camera device found first.
CInstantCamera camera( CTlFactory::GetInstance().CreateFirstDevice());
// Register the standard configuration event handler for enabling software triggering.
// The software trigger configuration handler replaces the default configuration
// as all currently registered configuration handlers are removed by setting the registration mode to RegistrationMode_ReplaceAll.
// For demonstration purposes only, add a sample configuration event handler to print out information
// about camera use.
camera.RegisterConfiguration( new CConfigurationEventPrinter, RegistrationMode_Append, Cleanup_Delete);
// The image event printer serves as sample image processing.
// When using the grab loop thread provided by the Instant Camera object, an image event handler processing the grab
// results must be created and registered.
camera.RegisterImageEventHandler( new CImageEventPrinter, RegistrationMode_Append, Cleanup_Delete);
// For demonstration purposes only, register another image event handler.
camera.RegisterImageEventHandler( new CSampleImageEventHandler, RegistrationMode_Append, Cleanup_Delete);
// Open the camera device.
camera.Open();
// Can the camera device be queried whether it is ready to accept the next frame trigger?
if (camera.CanWaitForFrameTriggerReady())
{
// Start the grabbing using the grab loop thread, by setting the grabLoopType parameter
// to GrabLoop_ProvidedByInstantCamera. The grab results are delivered to the image event handlers.
// The GrabStrategy_OneByOne default grab strategy is used.
cerr << endl << "Enter \"t\" to trigger the camera or \"e\" to exit and press enter? (t/e)" << endl << endl;
// Wait for user input to trigger the camera or exit the program.
// The grabbing is stopped, the device is closed and destroyed automatically when the camera object goes out of scope.
char key;
do
{
cin.get(key);
if ( (key == 't' || key == 'T'))
{
// Execute the software trigger. Wait up to 500 ms for the camera to be ready for trigger.
if ( camera.WaitForFrameTriggerReady( 500, TimeoutHandling_ThrowException))
{
camera.ExecuteSoftwareTrigger();
}
}
}
while ( (key != 'e') && (key != 'E'));
}
else
{
// See the documentation of CInstantCamera::CanWaitForFrameTriggerReady() for more information.
cout << endl;
cout << "This sample can only be used with cameras that can be queried whether they are ready to accept the next frame trigger.";
cout << endl;
cout << endl;
}
}
catch (const GenericException &e)
{
// Error handling.
cerr << "An exception occurred." << endl
<< e.GetDescription() << endl;
exitCode = 1;
// Remove left over characters from input buffer.
cin.ignore(cin.rdbuf()->in_avail());
}
// Comment the following two lines to disable waiting on exit.
cerr << endl << "Press Enter to exit." << endl;
while( cin.get() != '\n');
// Releases all pylon resources.
return exitCode;
}

Grab_UsingSequencer

// Grab_UsingSequencer.cpp
/*
Note: Before getting started, Basler recommends reading the Programmer's Guide topic
in the pylon C++ API documentation that gets installed with pylon.
If you are upgrading to a higher major version of pylon, Basler also
strongly recommends reading the Migration topic in the pylon C++ API documentation.
This sample shows how to grab images using the sequencer feature of a camera.
Three sequence sets are used for image acquisition. Each sequence set
uses a different image height.
*/
// Include files to use the PYLON API
#ifdef PYLON_WIN_BUILD
# include <pylon/PylonGUI.h>
#endif
using namespace Pylon;
#if defined ( USE_GIGE )
// Settings for using Basler GigE Vision cameras.
using namespace Basler_GigECameraParams;
using namespace Basler_GigEStreamParams;
#else
#error Camera type is not specified. For example, define USE_GIGE for using GigE cameras.
#endif
// Namespace for using cout
using namespace std;
// Number of images to be grabbed.
static const uint32_t c_countOfImagesToGrab = 10;
int main(int argc, char* argv[])
{
// The exit code of the sample application.
int exitCode = 0;
// Before using any pylon methods, the pylon runtime must be initialized.
try
{
// Only look for cameras supported by Camera_t.
info.SetDeviceClass( Camera_t::DeviceClass());
// Create an instant camera object with the first found camera device that matches the specified device class.
Camera_t camera( CTlFactory::GetInstance().CreateFirstDevice( info));
// Print the model name of the camera.
cout << "Using device " << camera.GetDeviceInfo().GetModelName() << endl;
// Register the standard configuration event handler for enabling software triggering.
// The software trigger configuration handler replaces the default configuration
// as all currently registered configuration handlers are removed by setting the registration mode to RegistrationMode_ReplaceAll.
// Open the camera.
camera.Open();
if (IsWritable(camera.SequenceEnable))
{
// Disable the sequencer before changing parameters.
// The parameters under control of the sequencer are locked
// when the sequencer is enabled. For a list of parameters
// controlled by the sequencer, see the camera User's Manual.
camera.SequenceEnable.SetValue(false);
// Turn configuration mode on
if (IsWritable(camera.SequenceConfigurationMode))
{
camera.SequenceConfigurationMode.SetValue(SequenceConfigurationMode_On );
}
// Maximize the image area of interest (Image AOI).
if (IsWritable(camera.OffsetX))
{
camera.OffsetX.SetValue(camera.OffsetX.GetMin());
}
if (IsWritable(camera.OffsetY))
{
camera.OffsetY.SetValue(camera.OffsetY.GetMin());
}
camera.Width.SetValue(camera.Width.GetMax());
camera.Height.SetValue(camera.Height.GetMax());
// Set the pixel data format.
camera.PixelFormat.SetValue(PixelFormat_Mono8);
// Set up sequence sets.
// Configure how the sequence will advance.
// 'Auto' refers to the auto sequence advance mode.
// The advance from one sequence set to the next will occur automatically with each image acquired.
// After the end of the sequence set cycle was reached a new sequence set cycle will start.
camera.SequenceAdvanceMode = SequenceAdvanceMode_Auto;
// Our sequence sets relate to three steps (0..2).
// In each step we will increase the height of the Image AOI by one increment.
camera.SequenceSetTotalNumber = 3;
int64_t increments = (camera.Height.GetMax() - camera.Height.GetMin()) / camera.Height.GetInc();
// Set the parameters for step 0; quarter height image.
camera.SequenceSetIndex = 0;
camera.Height.SetValue( camera.Height.GetInc() * (increments / 4));
camera.SequenceSetStore.Execute();
// Set the parameters for step 1; half height image.
camera.SequenceSetIndex = 1;
camera.Height.SetValue( camera.Height.GetInc() * (increments / 2));
camera.SequenceSetStore.Execute();
// Set the parameters for step 2; full height image.
camera.SequenceSetIndex = 2;
camera.Height.SetValue( camera.Height.GetInc() * increments);
camera.SequenceSetStore.Execute();
// Finish configuration
if (IsWritable(camera.SequenceConfigurationMode))
{
camera.SequenceConfigurationMode.SetValue(SequenceConfigurationMode_Off);
}
// Enable the sequencer feature.
// From here on you cannot change the sequencer settings anymore.
camera.SequenceEnable.SetValue(true);
// Start the grabbing of c_countOfImagesToGrab images.
camera.StartGrabbing( c_countOfImagesToGrab);
// This smart pointer will receive the grab result data.
CGrabResultPtr grabResult;
// Camera.StopGrabbing() is called automatically by the RetrieveResult() method
// when c_countOfImagesToGrab images have been retrieved.
while ( camera.IsGrabbing())
{
// Execute the software trigger. Wait up to 100 ms for the camera to be ready for trigger.
if ( camera.WaitForFrameTriggerReady( 100, TimeoutHandling_ThrowException))
{
camera.ExecuteSoftwareTrigger();
// Wait for an image and then retrieve it. A timeout of 5000 ms is used.
camera.RetrieveResult( 5000, grabResult, TimeoutHandling_ThrowException);
// Image grabbed successfully?
if (grabResult->GrabSucceeded())
{
#ifdef PYLON_WIN_BUILD
// Display the grabbed image.
Pylon::DisplayImage(1, grabResult);
#endif
// Access the image data.
cout << "SizeX: " << grabResult->GetWidth() << endl;
cout << "SizeY: " << grabResult->GetHeight() << endl;
const uint8_t *pImageBuffer = (uint8_t *) grabResult->GetBuffer();
cout << "Gray value of first pixel: " << (uint32_t) pImageBuffer[0] << endl << endl;
}
else
{
cout << "Error: " << grabResult->GetErrorCode() << " " << grabResult->GetErrorDescription() << endl;
}
}
// Wait for user input.
cerr << endl << "Press enter to continue." << endl << endl;
while( camera.IsGrabbing() && cin.get() != '\n');
}
// Disable the sequencer.
camera.SequenceEnable.SetValue(false);
}
else
{
cout << "The sequencer feature is not available for this camera."<< endl;
}
}
catch (const GenericException &e)
{
// Error handling.
cerr << "An exception occurred." << endl
<< e.GetDescription() << endl;
exitCode = 1;
}
// Comment the following two lines to disable waiting on exit.
cerr << endl << "Press Enter to exit." << endl;
while( cin.get() != '\n');
// Releases all pylon resources.
return exitCode;
}

Grab_UsingSequencer_Usb

// Grab_UsingSequencer_usb.cpp
/*
Note: Before getting started, Basler recommends reading the Programmer's Guide topic
in the pylon C++ API documentation that gets installed with pylon.
If you are upgrading to a higher major version of pylon, Basler also
strongly recommends reading the Migration topic in the pylon C++ API documentation.
This sample shows how to grab images using the sequencer feature of a camera.
Three sequence sets are used for image acquisition. Each sequence set
uses a different image height.
*/
// Include files to use the PYLON API
#ifdef PYLON_WIN_BUILD
# include <pylon/PylonGUI.h>
#endif
using namespace Pylon;
#if defined ( USE_USB )
// Settings for using Basler USB cameras.
using namespace Basler_UsbCameraParams;
#else
#error Camera type is not specified. Define USE_USB for using Usb cameras.
#endif
// Namespace for using cout
using namespace std;
// Number of images to be grabbed.
static const uint32_t c_countOfImagesToGrab = 10;
int main(int argc, char* argv[])
{
// The exit code of the sample application.
int exitCode = 0;
// Before using any pylon methods, the pylon runtime must be initialized.
try
{
// Only look for cameras supported by Camera_t.
info.SetDeviceClass(Camera_t::DeviceClass());
// Create an instant camera object with the first found camera device that matches the specified device class.
Camera_t camera(CTlFactory::GetInstance().CreateFirstDevice(info));
// Print the model name of the camera.
cout << "Using device " << camera.GetDeviceInfo().GetModelName() << endl;
// Register the standard configuration event handler for enabling software triggering.
// The software trigger configuration handler replaces the default configuration
// as all currently registered configuration handlers are removed by setting the registration mode to RegistrationMode_ReplaceAll.
// Open the camera.
camera.Open();
if (GenApi::IsAvailable(camera.SequencerMode))
{
// Disable the sequencer before changing parameters.
// The parameters under control of the sequencer are locked
// when the sequencer is enabled. For a list of parameters
// controlled by the sequencer, see the camera User's Manual.
camera.SequencerMode.SetValue(SequencerMode_Off);
// Maximize the image area of interest (Image AOI).
if (IsWritable(camera.OffsetX))
{
camera.OffsetX.SetValue(camera.OffsetX.GetMin());
}
if (IsWritable(camera.OffsetY))
{
camera.OffsetY.SetValue(camera.OffsetY.GetMin());
}
camera.Width.SetValue(camera.Width.GetMax());
camera.Height.SetValue(camera.Height.GetMax());
// Set the pixel data format.
// This parameter may be locked when the sequencer is enabled.
camera.PixelFormat.SetValue(PixelFormat_Mono8);
// Set up sequence sets and turn sequencer configuration mode on.
camera.SequencerConfigurationMode.SetValue(SequencerConfigurationMode_On);
// Configure how the sequence will advance.
// The sequence sets relate to three steps (0..2).
// In each step, the height of the Image AOI is doubled.
const int64_t increments = (camera.Height.GetMax() - camera.Height.GetMin()) / camera.Height.GetInc();
const int64_t initialSet = camera.SequencerSetSelector.GetMin();
const int64_t incSet = camera.SequencerSetSelector.GetInc();
int64_t curSet = initialSet;
// Set the parameters for step 0; quarter height image.
camera.SequencerSetSelector.SetValue(initialSet);
{ // valid for all sets
// reset on software signal 1;
camera.SequencerPathSelector.SetValue(0);
camera.SequencerSetNext.SetValue(initialSet);
camera.SequencerTriggerSource.SetValue(SequencerTriggerSource_SoftwareSignal1);
// advance on Frame Start
camera.SequencerPathSelector.SetValue(1);
camera.SequencerTriggerSource.SetValue(SequencerTriggerSource_FrameStart);
}
camera.SequencerSetNext.SetValue(curSet + incSet);
// quarter height
camera.Height.SetValue(camera.Height.GetInc() * (increments / 4));
camera.SequencerSetSave.Execute();
// Set the parameters for step 1; half height image.
curSet += incSet;
camera.SequencerSetSelector.SetValue(curSet);
// advance on Frame Start to next set
camera.SequencerSetNext.SetValue(curSet + incSet);
// half height
camera.Height.SetValue(camera.Height.GetInc() * (increments / 2));
camera.SequencerSetSave.Execute();
// Set the parameters for step 2; full height image.
curSet += incSet;
camera.SequencerSetSelector.SetValue(curSet);
// advance on Frame End to initial set,
camera.SequencerSetNext.SetValue(initialSet); // terminates sequence definition
// full height
camera.Height.SetValue(camera.Height.GetInc() * increments);
camera.SequencerSetSave.Execute();
// Enable the sequencer feature.
// From here on you cannot change the sequencer settings anymore.
camera.SequencerConfigurationMode.SetValue(SequencerConfigurationMode_Off);
camera.SequencerMode.SetValue(SequencerMode_On);
// Start the grabbing of c_countOfImagesToGrab images.
camera.StartGrabbing(c_countOfImagesToGrab);
// This smart pointer will receive the grab result data.
CGrabResultPtr grabResult;
// Camera.StopGrabbing() is called automatically by the RetrieveResult() method
// when c_countOfImagesToGrab images have been retrieved.
while (camera.IsGrabbing())
{
// Execute the software trigger. Wait up to 100 ms for the camera to be ready for trigger.
if (camera.WaitForFrameTriggerReady(100, TimeoutHandling_ThrowException))
{
camera.ExecuteSoftwareTrigger();
// Wait for an image and then retrieve it. A timeout of 5000 ms is used.
camera.RetrieveResult(5000, grabResult, TimeoutHandling_ThrowException);
// Image grabbed successfully?
if (grabResult->GrabSucceeded())
{
#ifdef PYLON_WIN_BUILD
// Display the grabbed image.
Pylon::DisplayImage(1, grabResult);
#endif
// Access the image data.
cout << "SizeX: " << grabResult->GetWidth() << endl;
cout << "SizeY: " << grabResult->GetHeight() << endl;
const uint8_t *pImageBuffer = (uint8_t *)grabResult->GetBuffer();
cout << "Gray value of first pixel: " << (uint32_t)pImageBuffer[0] << endl << endl;
}
else
{
cout << "Error: " << grabResult->GetErrorCode() << " " << grabResult->GetErrorDescription() << endl;
}
}
// Wait for user input.
cerr << endl << "Press enter to continue." << endl << endl;
while (camera.IsGrabbing() && cin.get() != '\n');
}
// Disable the sequencer.
camera.SequencerMode.SetValue(SequencerMode_Off);
}
else
{
cout << "The sequencer feature is not available for this camera." << endl;
}
}
catch (const GenericException &e)
{
// Error handling.
cerr << "An exception occurred." << endl
<< e.GetDescription() << endl;
exitCode = 1;
}
// Comment the following two lines to disable waiting on exit.
cerr << endl << "Press Enter to exit." << endl;
while (cin.get() != '\n');
// Releases all pylon resources.
return exitCode;
}

ParametrizeCamera_AutoFunctions

// ParametrizeCamera_AutoFunctions.cpp
/*
Note: Before getting started, Basler recommends reading the Programmer's Guide topic
in the pylon C++ API documentation that gets installed with pylon.
If you are upgrading to a higher major version of pylon, Basler also
strongly recommends reading the Migration topic in the pylon C++ API documentation.
This sample illustrates how to use the Auto Functions feature of Basler cameras.
*/
// Include files to use the PYLON API.
#ifdef PYLON_WIN_BUILD
# include <pylon/PylonGUI.h>
#endif
// Namespace for using pylon objects.
using namespace Pylon;
// Namespace for using cout.
using namespace std;
#if defined( USE_1394 )
// Settings to use Basler IEEE 1394 cameras.
#include <pylon/1394/Basler1394InstantCamera.h>
typedef Pylon::CBasler1394InstantCamera Camera_t;
using namespace Basler_IIDC1394CameraParams;
#elif defined ( USE_GIGE )
// Settings to use Basler GigE cameras.
using namespace Basler_GigECameraParams;
#else
#error camera type is not specified. For example, define USE_GIGE for using GigE cameras
#endif
// The camera specific grab result smart pointer.
typedef Camera_t::GrabResultPtr_t GrabResultPtr_t;
bool IsColorCamera(Camera_t& camera);
void AutoGainOnce(Camera_t& camera);
void AutoGainContinuous(Camera_t& camera);
void AutoExposureOnce(Camera_t& camera);
void AutoExposureContinuous(Camera_t& camera);
void AutoWhiteBalance(Camera_t& camera);
int main(int argc, char* argv[])
{
// The exit code of the sample application.
int exitCode = 0;
// Before using any pylon methods, the pylon runtime must be initialized.
try
{
// Only look for cameras supported by Camera_t.
info.SetDeviceClass( Camera_t::DeviceClass());
// Create an instant camera object with the first found camera device that matches the specified device class.
Camera_t camera( CTlFactory::GetInstance().CreateFirstDevice( info));
// Print the name of the used camera.
cout << "Using device " << camera.GetDeviceInfo().GetModelName() << endl;
// Register the standard event handler for configuring single frame acquisition.
// This overrides the default configuration as all event handlers are removed by setting the registration mode to RegistrationMode_ReplaceAll.
// Please note that the camera device auto functions do not require grabbing by single frame acquisition.
// All available acquisition modes can be used.
// Open the camera.
camera.Open();
// Turn test image off.
camera.TestImageSelector = TestImageSelector_Off;
// Only area scan cameras support auto functions.
if (camera.DeviceScanType.GetValue() == DeviceScanType_Areascan)
{
// All area scan cameras support luminance control.
// Carry out luminance control by using the "once" gain auto function.
// For demonstration purposes only, set the gain to an initial value.
camera.GainRaw.SetValue( camera.GainRaw.GetMax());
AutoGainOnce(camera);
cerr << endl << "Press Enter to continue." << endl;
while( cin.get() != '\n');
// Carry out luminance control by using the "continuous" gain auto function.
// For demonstration purposes only, set the gain to an initial value.
camera.GainRaw.SetValue( camera.GainRaw.GetMax());
AutoGainContinuous(camera);
cerr << endl << "Press Enter to continue." << endl;
while( cin.get() != '\n');
// For demonstration purposes only, set the exposure time to an initial value.
camera.ExposureTimeRaw.SetValue( camera.ExposureTimeRaw.GetMin());
// Carry out luminance control by using the "once" exposure auto function.
AutoExposureOnce(camera);
cerr << endl << "Press Enter to continue." << endl;
while( cin.get() != '\n');
// For demonstration purposes only, set the exposure time to an initial value.
camera.ExposureTimeRaw.SetValue( camera.ExposureTimeRaw.GetMin());
// Carry out luminance control by using the "continuous" exposure auto function.
AutoExposureContinuous(camera);
// Only color cameras support the balance white auto function.
if (IsColorCamera(camera))
{
cerr << endl << "Press Enter to continue." << endl;
while( cin.get() != '\n');
// For demonstration purposes only, set the initial balance ratio values:
camera.BalanceRatioSelector.SetValue(BalanceRatioSelector_Red);
camera.BalanceRatioAbs.SetValue(3.14);
camera.BalanceRatioSelector.SetValue(BalanceRatioSelector_Green);
camera.BalanceRatioAbs.SetValue(0.5);
camera.BalanceRatioSelector.SetValue(BalanceRatioSelector_Blue);
camera.BalanceRatioAbs.SetValue(0.125);
// Carry out white balance using the balance white auto function.
AutoWhiteBalance(camera);
}
}
else
{
cerr << "Only area scan cameras support auto functions." << endl;
}
// Close camera.
camera.Close();
}
catch (const TimeoutException &e)
{
// Auto functions did not finish in time.
// Maybe the cap on the lens is still on or there is not enough light.
cerr << "A timeout has occurred." << endl
<< e.GetDescription() << endl;
cerr << "Please make sure you remove the cap from the camera lens before running this sample." << endl;
exitCode = 0;
}
catch (const GenericException &e)
{
// Error handling.
cerr << "An exception occurred." << endl
<< e.GetDescription() << endl;
exitCode = 1;
}
// Comment the following two lines to disable waiting on exit.
cerr << endl << "Press Enter to exit." << endl;
while( cin.get() != '\n');
// Releases all pylon resources.
return exitCode;
}
void AutoGainOnce(Camera_t& camera)
{
// Check whether the gain auto function is available.
if ( !IsWritable( camera.GainAuto))
{
cout << "The camera does not support Gain Auto." << endl << endl;
return;
}
// Maximize the grabbed image area of interest (Image AOI).
if (IsWritable(camera.OffsetX))
{
camera.OffsetX.SetValue(camera.OffsetX.GetMin());
}
if (IsWritable(camera.OffsetY))
{
camera.OffsetY.SetValue(camera.OffsetY.GetMin());
}
camera.Width.SetValue(camera.Width.GetMax());
camera.Height.SetValue(camera.Height.GetMax());
// Set the Auto Function AOI for luminance statistics.
// Currently, AutoFunctionAOISelector_AOI1 is predefined to gather
// luminance statistics.
camera.AutoFunctionAOISelector.SetValue(AutoFunctionAOISelector_AOI1);
camera.AutoFunctionAOIOffsetX.SetValue(0);
camera.AutoFunctionAOIOffsetY.SetValue(0);
camera.AutoFunctionAOIWidth.SetValue(camera.Width.GetMax());
camera.AutoFunctionAOIHeight.SetValue(camera.Height.GetMax());
// Set the target value for luminance control. The value is always expressed
// as an 8 bit value regardless of the current pixel data output format,
// i.e., 0 -> black, 255 -> white.
camera.AutoTargetValue.SetValue(80);
// We are going to try GainAuto = Once.
cout << "Trying 'GainAuto = Once'." << endl;
cout << "Initial Gain = " << camera.GainRaw.GetValue() << endl;
// Set the gain ranges for luminance control.
camera.AutoGainRawLowerLimit.SetValue(camera.GainRaw.GetMin());
camera.AutoGainRawUpperLimit.SetValue(camera.GainRaw.GetMax());
camera.GainAuto.SetValue(GainAuto_Once);
// When the "once" mode of operation is selected,
// the parameter values are automatically adjusted until the related image property
// reaches the target value. After the automatic parameter value adjustment is complete, the auto
// function will automatically be set to "off" and the new parameter value will be applied to the
// subsequently grabbed images.
int n = 0;
while (camera.GainAuto.GetValue() != GainAuto_Off)
{
GrabResultPtr_t ptrGrabResult;
camera.GrabOne( 5000, ptrGrabResult);
#ifdef PYLON_WIN_BUILD
Pylon::DisplayImage(1, ptrGrabResult);
#endif
++n;
//For demonstration purposes only. Wait until the image is shown.
//Make sure the loop is exited.
if (n > 100)
{
throw TIMEOUT_EXCEPTION( "The adjustment of auto gain did not finish.");
}
}
cout << "GainAuto went back to 'Off' after " << n << " frames." << endl;
cout << "Final Gain = " << camera.GainRaw.GetValue() << endl << endl;
}
void AutoGainContinuous(Camera_t& camera)
{
// Check whether the Gain Auto feature is available.
if ( !IsWritable( camera.GainAuto))
{
cout << "The camera does not support Gain Auto." << endl << endl;
return;
}
// Maximize the grabbed image area of interest (Image AOI).
if (IsWritable(camera.OffsetX))
{
camera.OffsetX.SetValue(camera.OffsetX.GetMin());
}
if (IsWritable(camera.OffsetY))
{
camera.OffsetY.SetValue(camera.OffsetY.GetMin());
}
camera.Width.SetValue(camera.Width.GetMax());
camera.Height.SetValue(camera.Height.GetMax());
// Set the Auto Function AOI for luminance statistics.
// Currently, AutoFunctionAOISelector_AOI1 is predefined to gather
// luminance statistics.
camera.AutoFunctionAOISelector.SetValue(AutoFunctionAOISelector_AOI1);
camera.AutoFunctionAOIOffsetX.SetValue(0);
camera.AutoFunctionAOIOffsetY.SetValue(0);
camera.AutoFunctionAOIWidth.SetValue(camera.Width.GetMax());
camera.AutoFunctionAOIHeight.SetValue(camera.Height.GetMax());
// Set the target value for luminance control. The value is always expressed
// as an 8 bit value regardless of the current pixel data output format,
// i.e., 0 -> black, 255 -> white.
camera.AutoTargetValue.SetValue(80);
// We are trying GainAuto = Continuous.
cout << "Trying 'GainAuto = Continuous'." << endl;
cout << "Initial Gain = " << camera.GainRaw.GetValue() << endl;
camera.GainAuto.SetValue(GainAuto_Continuous);
// When "continuous" mode is selected, the parameter value is adjusted repeatedly while images are acquired.
// Depending on the current frame rate, the automatic adjustments will usually be carried out for
// every or every other image unless the camera�s micro controller is kept busy by other tasks.
// The repeated automatic adjustment will proceed until the "once" mode of operation is used or
// until the auto function is set to "off", in which case the parameter value resulting from the latest
// automatic adjustment will operate unless the value is manually adjusted.
for (int n = 0; n < 20; n++) // For demonstration purposes, we will grab "only" 20 images.
{
GrabResultPtr_t ptrGrabResult;
camera.GrabOne( 5000, ptrGrabResult);
#ifdef PYLON_WIN_BUILD
Pylon::DisplayImage(1, ptrGrabResult);
#endif
//For demonstration purposes only. Wait until the image is shown.
}
camera.GainAuto.SetValue(GainAuto_Off); // Switch off GainAuto.
cout << "Final Gain = " << camera.GainRaw.GetValue() << endl << endl;
}
void AutoExposureOnce(Camera_t& camera)
{
// Check whether auto exposure is available
if ( !IsWritable( camera.ExposureAuto))
{
cout << "The camera does not support Exposure Auto." << endl << endl;
return;
}
// Maximize the grabbed area of interest (Image AOI).
if (IsWritable(camera.OffsetX))
{
camera.OffsetX.SetValue(camera.OffsetX.GetMin());
}
if (IsWritable(camera.OffsetY))
{
camera.OffsetY.SetValue(camera.OffsetY.GetMin());
}
camera.Width.SetValue(camera.Width.GetMax());
camera.Height.SetValue(camera.Height.GetMax());
// Set the Auto Function AOI for luminance statistics.
// Currently, AutoFunctionAOISelector_AOI1 is predefined to gather
// luminance statistics.
camera.AutoFunctionAOISelector.SetValue(AutoFunctionAOISelector_AOI1);
camera.AutoFunctionAOIOffsetX.SetValue(0);
camera.AutoFunctionAOIOffsetY.SetValue(0);
camera.AutoFunctionAOIWidth.SetValue(camera.Width.GetMax());
camera.AutoFunctionAOIHeight.SetValue(camera.Height.GetMax());
// Set the target value for luminance control. The value is always expressed
// as an 8 bit value regardless of the current pixel data output format,
// i.e., 0 -> black, 255 -> white.
camera.AutoTargetValue.SetValue(80);
// Try ExposureAuto = Once.
cout << "Trying 'ExposureAuto = Once'." << endl;
cout << "Initial exposure time = ";
cout << camera.ExposureTimeAbs.GetValue() << " us" << endl;
// Set the exposure time ranges for luminance control.
camera.AutoExposureTimeAbsLowerLimit.SetValue(camera.AutoExposureTimeAbsLowerLimit.GetMin());
camera.AutoExposureTimeAbsUpperLimit.SetValue(camera.AutoExposureTimeAbsLowerLimit.GetMax());
camera.ExposureAuto.SetValue(ExposureAuto_Once);
// When the "once" mode of operation is selected,
// the parameter values are automatically adjusted until the related image property
// reaches the target value. After the automatic parameter value adjustment is complete, the auto
// function will automatically be set to "off", and the new parameter value will be applied to the
// subsequently grabbed images.
int n = 0;
while (camera.ExposureAuto.GetValue() != ExposureAuto_Off)
{
GrabResultPtr_t ptrGrabResult;
camera.GrabOne( 5000, ptrGrabResult);
#ifdef PYLON_WIN_BUILD
Pylon::DisplayImage(1, ptrGrabResult);
#endif
++n;
//For demonstration purposes only. Wait until the image is shown.
//Make sure the loop is exited.
if (n > 100)
{
throw TIMEOUT_EXCEPTION( "The adjustment of auto exposure did not finish.");
}
}
cout << "ExposureAuto went back to 'Off' after " << n << " frames." << endl;
cout << "Final exposure time = ";
cout << camera.ExposureTimeAbs.GetValue() << " us" << endl << endl;
}
void AutoExposureContinuous(Camera_t& camera)
{
// Check whether the Exposure Auto feature is available.
if ( !IsWritable( camera.ExposureAuto))
{
cout << "The camera does not support Exposure Auto." << endl << endl;
return;
}
// Maximize the grabbed area of interest (Image AOI).
if (IsWritable(camera.OffsetX))
{
camera.OffsetX.SetValue(camera.OffsetX.GetMin());
}
if (IsWritable(camera.OffsetY))
{
camera.OffsetY.SetValue(camera.OffsetY.GetMin());
}
camera.Width.SetValue(camera.Width.GetMax());
camera.Height.SetValue(camera.Height.GetMax());
// Set the Auto Function AOI for luminance statistics.
// Currently, AutoFunctionAOISelector_AOI1 is predefined to gather
// luminance statistics.
camera.AutoFunctionAOISelector.SetValue(AutoFunctionAOISelector_AOI1);
camera.AutoFunctionAOIOffsetX.SetValue(0);
camera.AutoFunctionAOIOffsetY.SetValue(0);
camera.AutoFunctionAOIWidth.SetValue(camera.Width.GetMax());
camera.AutoFunctionAOIHeight.SetValue(camera.Height.GetMax());
// Set the target value for luminance control. The value is always expressed
// as an 8 bit value regardless of the current pixel data output format,
// i.e., 0 -> black, 255 -> white.
camera.AutoTargetValue.SetValue(80);
cout << "ExposureAuto 'GainAuto = Continuous'." << endl;
cout << "Initial exposure time = ";
cout << camera.ExposureTimeAbs.GetValue() << " us" << endl;
camera.ExposureAuto.SetValue(ExposureAuto_Continuous);
// When "continuous" mode is selected, the parameter value is adjusted repeatedly while images are acquired.
// Depending on the current frame rate, the automatic adjustments will usually be carried out for
// every or every other image, unless the camera�s microcontroller is kept busy by other tasks.
// The repeated automatic adjustment will proceed until the "once" mode of operation is used or
// until the auto function is set to "off", in which case the parameter value resulting from the latest
// automatic adjustment will operate unless the value is manually adjusted.
for (int n = 0; n < 20; n++) // For demonstration purposes, we will use only 20 images.
{
GrabResultPtr_t ptrGrabResult;
camera.GrabOne( 5000, ptrGrabResult);
#ifdef PYLON_WIN_BUILD
Pylon::DisplayImage(1, ptrGrabResult);
#endif
//For demonstration purposes only. Wait until the image is shown.
}
camera.ExposureAuto.SetValue(ExposureAuto_Off); // Switch off Exposure Auto.
cout << "Final exposure time = ";
cout << camera.ExposureTimeAbs.GetValue() << " us" << endl << endl;
}
void AutoWhiteBalance(Camera_t& camera)
{
// Check whether the Balance White Auto feature is available.
if ( !IsWritable( camera.BalanceWhiteAuto))
{
cout << "The camera does not support Balance White Auto." << endl << endl;
return;
}
// Maximize the grabbed area of interest (Image AOI).
if (IsWritable(camera.OffsetX))
{
camera.OffsetX.SetValue(camera.OffsetX.GetMin());
}
if (IsWritable(camera.OffsetY))
{
camera.OffsetY.SetValue(camera.OffsetY.GetMin());
}
camera.Width.SetValue(camera.Width.GetMax());
camera.Height.SetValue(camera.Height.GetMax());
// Set the Auto Function AOI for white balance statistics.
// Currently, AutoFunctionAOISelector_AOI2 is predefined to gather
// white balance statistics.
camera.AutoFunctionAOISelector.SetValue(AutoFunctionAOISelector_AOI2);
camera.AutoFunctionAOIOffsetX.SetValue(0);
camera.AutoFunctionAOIOffsetY.SetValue(0);
camera.AutoFunctionAOIWidth.SetValue(camera.Width.GetMax());
camera.AutoFunctionAOIHeight.SetValue(camera.Height.GetMax());
cout << "Trying 'BalanceWhiteAuto = Once'." << endl;
cout << "Initial balance ratio: ";
camera.BalanceRatioSelector.SetValue(BalanceRatioSelector_Red);
cout << "R = " << camera.BalanceRatioAbs.GetValue() << " ";
camera.BalanceRatioSelector.SetValue(BalanceRatioSelector_Green);
cout << "G = " << camera.BalanceRatioAbs.GetValue() << " ";
camera.BalanceRatioSelector.SetValue(BalanceRatioSelector_Blue);
cout << "B = " << camera.BalanceRatioAbs.GetValue() << endl;
camera.BalanceWhiteAuto.SetValue(BalanceWhiteAuto_Once);
// When the "once" mode of operation is selected,
// the parameter values are automatically adjusted until the related image property
// reaches the target value. After the automatic parameter value adjustment is complete, the auto
// function will automatically be set to "off" and the new parameter value will be applied to the
// subsequently grabbed images.
int n = 0;
while (camera.BalanceWhiteAuto.GetValue() != BalanceWhiteAuto_Off)
{
GrabResultPtr_t ptrGrabResult;
camera.GrabOne( 5000, ptrGrabResult);
#ifdef PYLON_WIN_BUILD
Pylon::DisplayImage(1, ptrGrabResult);
#endif
++n;
//For demonstration purposes only. Wait until the image is shown.
//Make sure the loop is exited.
if (n > 100)
{
throw TIMEOUT_EXCEPTION( "The adjustment of auto white balance did not finish.");
}
}
cout << "BalanceWhiteAuto went back to 'Off' after ";
cout << n << " frames." << endl;
cout << "Final balance ratio: ";
camera.BalanceRatioSelector.SetValue(BalanceRatioSelector_Red);
cout << "R = " << camera.BalanceRatioAbs.GetValue() << " ";
camera.BalanceRatioSelector.SetValue(BalanceRatioSelector_Green);
cout << "G = " << camera.BalanceRatioAbs.GetValue() << " ";
camera.BalanceRatioSelector.SetValue(BalanceRatioSelector_Blue);
cout << "B = " << camera.BalanceRatioAbs.GetValue() << endl;
}
bool IsColorCamera(Camera_t& camera)
{
camera.PixelFormat.GetEntries(Entries);
bool Result = false;
for (size_t i = 0; i < Entries.size(); i++)
{
GenApi::INode *pNode = Entries[i];
if (IsAvailable(pNode->GetAccessMode()))
{
GenApi::IEnumEntry *pEnum = dynamic_cast<GenApi::IEnumEntry *>(pNode);
const GenICam::gcstring sym(pEnum->GetSymbolic());
if (sym.find(GenICam::gcstring("Bayer")) != GenICam::gcstring::_npos())
{
Result = true;
break;
}
}
}
return Result;
}

ParametrizeCamera_AutoFunctions_Usb

// ParametrizeCamera_AutoFunctions_Usb.cpp
/*
Note: Before getting started, Basler recommends reading the Programmer's Guide topic
in the pylon C++ API documentation that gets installed with pylon.
If you are upgrading to a higher major version of pylon, Basler also
strongly recommends reading the Migration topic in the pylon C++ API documentation.
This sample illustrates how to use the Auto Functions feature of Basler USB cameras.
Features, like 'Gain', are named according to the Standard Feature Naming Convention (SFNC).
The SFNC defines a common set of features, their behavior, and the related parameter names.
This ensures the interoperability of cameras from different camera vendors. Cameras compliant
with the USB 3 Vision standard are based on the SFNC version 2.0.
Basler GigE and Firewire cameras are based on previous SFNC versions.
Accordingly, the behavior of these cameras and some parameters names will be different.
That's why this sample is different from the sample for Firewire and GigE cameras in
ParametrizeCamera_AutoFunctions.cpp
*/
// Include files to use the PYLON API.
#ifdef PYLON_WIN_BUILD
# include <pylon/PylonGUI.h>
#endif
// Namespace for using pylon objects.
using namespace Pylon;
// Namespace for using cout.
using namespace std;
#if defined( USE_BCON )
// Settings to use Basler BCON cameras.
#include <pylon/bcon/BaslerBconInstantCamera.h>
typedef Pylon::CBaslerBconInstantCamera Camera_t;
using namespace Basler_BconCameraParams;
#else
// Settings for using Basler USB cameras.
using namespace Basler_UsbCameraParams;
#endif
// The camera specific grab result smart pointer.
typedef Camera_t::GrabResultPtr_t GrabResultPtr_t;
bool IsColorCamera(Camera_t& camera);
void AutoGainOnce(Camera_t& camera);
void AutoGainContinuous(Camera_t& camera);
void AutoExposureOnce(Camera_t& camera);
void AutoExposureContinuous(Camera_t& camera);
void AutoWhiteBalance(Camera_t& camera);
int main(int argc, char* argv[])
{
// The exit code of the sample application.
int exitCode = 0;
// Before using any pylon methods, the pylon runtime must be initialized.
try
{
// Only look for cameras supported by Camera_t.
info.SetDeviceClass( Camera_t::DeviceClass());
// Create an instant camera object with the first found camera device that matches the specified device class.
Camera_t camera( CTlFactory::GetInstance().CreateFirstDevice( info));
// Print the name of the used camera.
cout << "Using device " << camera.GetDeviceInfo().GetModelName() << endl;
// Check to see if the camera supportsAutoFunction ROI parameters.
// Former firmware versions supporting the AutoFunctionAOI parameters are no longer supported by this sample.
if (!IsAvailable(camera.AutoFunctionROISelector) && IsAvailable(camera.GetNodeMap().GetNode("AutoFunctionAOISelector")))
{
cout << "This camera only supports the deprecated AutoFunctionAOIxxxx camera parameters." << endl;
cout << "If you want to configure the regions used by the auto functions on this camera, use" << endl;
cout << "the AutoFunctionAOIxxxx parameters instead of the AutoFunctionROIxxxx parameters." << endl << endl;
// Comment the following two lines to disable waiting on exit.
cerr << endl << "Press Enter to exit." << endl;
while( cin.get() != '\n');
// Releases all pylon resources.
return 0;
}
// Register the standard event handler for configuring single frame acquisition.
// This overrides the default configuration as all event handlers are removed by setting the registration mode to RegistrationMode_ReplaceAll.
// Please note that the camera device auto functions do not require grabbing by single frame acquisition.
// All available acquisition modes can be used.
// Open the camera.
camera.Open();
// Turn test image off.
#if defined( USE_BCON )
camera.TestPattern = TestPattern_Off;
#else
// Handle test image selector for different USB camera models.
if (IsAvailable(camera.TestImageSelector))
{
camera.TestImageSelector = TestImageSelector_Off;
}
if (IsAvailable(camera.TestPattern))
{
camera.TestPattern = TestPattern_Off;
}
#endif
// Only area scan cameras support auto functions.
if (camera.DeviceScanType.GetValue() == DeviceScanType_Areascan)
{
// All area scan cameras support luminance control.
// Carry out luminance control by using the "once" gain auto function.
// For demonstration purposes only, set the gain to an initial value.
camera.Gain.SetValue( camera.Gain.GetMax());
AutoGainOnce(camera);
cerr << endl << "Press Enter to continue." << endl;
while( cin.get() != '\n');
// Carry out luminance control by using the "continuous" gain auto function.
// For demonstration purposes only, set the gain to an initial value.
camera.Gain.SetValue( camera.Gain.GetMax());
AutoGainContinuous(camera);
cerr << endl << "Press Enter to continue." << endl;
while( cin.get() != '\n');
// For demonstration purposes only, set the exposure time to an initial value.
camera.ExposureTime.SetValue( camera.ExposureTime.GetMin());
// Carry out luminance control by using the "once" exposure auto function.
AutoExposureOnce(camera);
cerr << endl << "Press Enter to continue." << endl;
while( cin.get() != '\n');
// For demonstration purposes only, set the exposure time to an initial value.
camera.ExposureTime.SetValue( camera.ExposureTime.GetMin());
// Carry out luminance control by using the "continuous" exposure auto function.
AutoExposureContinuous(camera);
// Only color cameras support the balance white auto function.
if (IsColorCamera(camera))
{
cerr << endl << "Press Enter to continue." << endl;
while( cin.get() != '\n');
// For demonstration purposes only, set the initial balance ratio values:
camera.BalanceRatioSelector.SetValue(BalanceRatioSelector_Red);
camera.BalanceRatio.SetValue(3.14);
camera.BalanceRatioSelector.SetValue(BalanceRatioSelector_Green);
camera.BalanceRatio.SetValue(0.5);
camera.BalanceRatioSelector.SetValue(BalanceRatioSelector_Blue);
camera.BalanceRatio.SetValue(0.125);
// Carry out white balance using the balance white auto function.
AutoWhiteBalance(camera);
}
}
else
{
cerr << "Only area scan cameras support auto functions." << endl;
}
// Close camera.
camera.Close();
}
catch (const GenericException &e)
{
// Error handling.
cerr << "An exception occurred." << endl
<< e.GetDescription() << endl;
exitCode = 1;
}
// Comment the following two lines to disable waiting on exit.
cerr << endl << "Press Enter to exit." << endl;
while( cin.get() != '\n');
// Releases all pylon resources.
return exitCode;
}
void AutoGainOnce(Camera_t& camera)
{
// Check whether the gain auto function is available.
if ( !IsWritable( camera.GainAuto))
{
cout << "The camera does not support Gain Auto." << endl << endl;
return;
}
// Maximize the grabbed image area of interest (Image AOI).
if (IsWritable(camera.OffsetX))
{
camera.OffsetX.SetValue(camera.OffsetX.GetMin());
}
if (IsWritable(camera.OffsetY))
{
camera.OffsetY.SetValue(camera.OffsetY.GetMin());
}
camera.Width.SetValue(camera.Width.GetMax());
camera.Height.SetValue(camera.Height.GetMax());
if(IsAvailable(camera.AutoFunctionROISelector))
{
// Set the Auto Function ROI for luminance statistics.
// We want to use ROI1 for gathering the statistics
if (IsWritable(camera.AutoFunctionROIUseBrightness))
{
camera.AutoFunctionROISelector.SetValue(AutoFunctionROISelector_ROI1);
camera.AutoFunctionROIUseBrightness.SetValue(true); // ROI 1 is used for brightness control
camera.AutoFunctionROISelector.SetValue(AutoFunctionROISelector_ROI2);
camera.AutoFunctionROIUseBrightness.SetValue(false); // ROI 2 is not used for brightness control
}
// Set the ROI (in this example the complete sensor is used)
camera.AutoFunctionROISelector.SetValue(AutoFunctionROISelector_ROI1); // configure ROI 1
camera.AutoFunctionROIOffsetX.SetValue(0);
camera.AutoFunctionROIOffsetY.SetValue(0);
camera.AutoFunctionROIWidth.SetValue(camera.Width.GetMax());
camera.AutoFunctionROIHeight.SetValue(camera.Height.GetMax());
}
// Set the target value for luminance control.
// A value of 0.3 means that the target brightness is 30 % of the maximum brightness of the raw pixel value read out from the sensor.
// A value of 0.4 means 40 % and so forth.
camera.AutoTargetBrightness.SetValue(0.3);
// We are going to try GainAuto = Once.
cout << "Trying 'GainAuto = Once'." << endl;
cout << "Initial Gain = " << camera.Gain.GetValue() << endl;
// Set the gain ranges for luminance control.
camera.AutoGainLowerLimit.SetValue(camera.Gain.GetMin());
camera.AutoGainUpperLimit.SetValue(camera.Gain.GetMax());
camera.GainAuto.SetValue(GainAuto_Once);
// When the "once" mode of operation is selected,
// the parameter values are automatically adjusted until the related image property
// reaches the target value. After the automatic parameter value adjustment is complete, the auto
// function will automatically be set to "off" and the new parameter value will be applied to the
// subsequently grabbed images.
int n = 0;
while (camera.GainAuto.GetValue() != GainAuto_Off)
{
GrabResultPtr_t ptrGrabResult;
camera.GrabOne( 5000, ptrGrabResult);
#ifdef PYLON_WIN_BUILD
Pylon::DisplayImage(1, ptrGrabResult);
#endif
++n;
//For demonstration purposes only. Wait until the image is shown.
//Make sure the loop is exited.
if (n > 100)
{
throw RUNTIME_EXCEPTION( "The adjustment of auto gain did not finish.");
}
}
cout << "GainAuto went back to 'Off' after " << n << " frames." << endl;
cout << "Final Gain = " << camera.Gain.GetValue() << endl << endl;
}
void AutoGainContinuous(Camera_t& camera)
{
// Check whether the Gain Auto feature is available.
if ( !IsWritable( camera.GainAuto))
{
cout << "The camera does not support Gain Auto." << endl << endl;
return;
}
// Maximize the grabbed image area of interest (Image AOI).
if (IsWritable(camera.OffsetX))
{
camera.OffsetX.SetValue(camera.OffsetX.GetMin());
}
if (IsWritable(camera.OffsetY))
{
camera.OffsetY.SetValue(camera.OffsetY.GetMin());
}
camera.Width.SetValue(camera.Width.GetMax());
camera.Height.SetValue(camera.Height.GetMax());
if(IsAvailable(camera.AutoFunctionROISelector))
{
// Set the Auto Function ROI for luminance statistics.
// We want to use ROI1 for gathering the statistics.
if (IsWritable(camera.AutoFunctionROIUseBrightness))
{
camera.AutoFunctionROISelector.SetValue(AutoFunctionROISelector_ROI1);
camera.AutoFunctionROIUseBrightness.SetValue(true); // ROI 1 is used for brightness control
camera.AutoFunctionROISelector.SetValue(AutoFunctionROISelector_ROI2);
camera.AutoFunctionROIUseBrightness.SetValue(false); // ROI 2 is not used for brightness control
}
// Set the ROI (in this example the complete sensor is used)
camera.AutoFunctionROISelector.SetValue(AutoFunctionROISelector_ROI1); // configure ROI 1
camera.AutoFunctionROIOffsetX.SetValue(0);
camera.AutoFunctionROIOffsetY.SetValue(0);
camera.AutoFunctionROIWidth.SetValue(camera.Width.GetMax());
camera.AutoFunctionROIHeight.SetValue(camera.Height.GetMax());
}
// Set the target value for luminance control.
// A value of 0.3 means that the target brightness is 30 % of the maximum brightness of the raw pixel value read out from the sensor.
// A value of 0.4 means 40 % and so forth.
camera.AutoTargetBrightness.SetValue(0.3);
// We are trying GainAuto = Continuous.
cout << "Trying 'GainAuto = Continuous'." << endl;
cout << "Initial Gain = " << camera.Gain.GetValue() << endl;
camera.GainAuto.SetValue(GainAuto_Continuous);
// When "continuous" mode is selected, the parameter value is adjusted repeatedly while images are acquired.
// Depending on the current frame rate, the automatic adjustments will usually be carried out for
// every or every other image unless the camera�s micro controller is kept busy by other tasks.
// The repeated automatic adjustment will proceed until the "once" mode of operation is used or
// until the auto function is set to "off", in which case the parameter value resulting from the latest
// automatic adjustment will operate unless the value is manually adjusted.
for (int n = 0; n < 20; n++) // For demonstration purposes, we will grab "only" 20 images.
{
GrabResultPtr_t ptrGrabResult;
camera.GrabOne( 5000, ptrGrabResult);
#ifdef PYLON_WIN_BUILD
Pylon::DisplayImage(1, ptrGrabResult);
#endif
//For demonstration purposes only. Wait until the image is shown.
}
camera.GainAuto.SetValue(GainAuto_Off); // Switch off GainAuto.
cout << "Final Gain = " << camera.Gain.GetValue() << endl << endl;
}
void AutoExposureOnce(Camera_t& camera)
{
// Check whether auto exposure is available
if ( !IsWritable( camera.ExposureAuto))
{
cout << "The camera does not support Exposure Auto." << endl << endl;
return;
}
// Maximize the grabbed area of interest (Image AOI).
if (IsWritable(camera.OffsetX))
{
camera.OffsetX.SetValue(camera.OffsetX.GetMin());
}
if (IsWritable(camera.OffsetY))
{
camera.OffsetY.SetValue(camera.OffsetY.GetMin());
}
camera.Width.SetValue(camera.Width.GetMax());
camera.Height.SetValue(camera.Height.GetMax());
if(IsAvailable(camera.AutoFunctionROISelector))
{
// Set the Auto Function ROI for luminance statistics.
// We want to use ROI1 for gathering the statistics.
if (IsWritable(camera.AutoFunctionROIUseBrightness))
{
camera.AutoFunctionROISelector.SetValue(AutoFunctionROISelector_ROI1);
camera.AutoFunctionROIUseBrightness.SetValue(true); // ROI 1 is used for brightness control
camera.AutoFunctionROISelector.SetValue(AutoFunctionROISelector_ROI2);
camera.AutoFunctionROIUseBrightness.SetValue(false); // ROI 2 is not used for brightness control
}
// Set the ROI (in this example the complete sensor is used)
camera.AutoFunctionROISelector.SetValue(AutoFunctionROISelector_ROI1); // configure ROI 1
camera.AutoFunctionROIOffsetX.SetValue(0);
camera.AutoFunctionROIOffsetY.SetValue(0);
camera.AutoFunctionROIWidth.SetValue(camera.Width.GetMax());
camera.AutoFunctionROIHeight.SetValue(camera.Height.GetMax());
}
// Set the target value for luminance control.
// A value of 0.3 means that the target brightness is 30 % of the maximum brightness of the raw pixel value read out from the sensor.
// A value of 0.4 means 40 % and so forth.
camera.AutoTargetBrightness.SetValue(0.3);
// Try ExposureAuto = Once.
cout << "Trying 'ExposureAuto = Once'." << endl;
cout << "Initial exposure time = ";
cout << camera.ExposureTime.GetValue() << " us" << endl;
// Set the exposure time ranges for luminance control.
camera.AutoExposureTimeLowerLimit.SetValue(camera.AutoExposureTimeLowerLimit.GetMin());
camera.AutoExposureTimeUpperLimit.SetValue(camera.AutoExposureTimeLowerLimit.GetMax());
camera.ExposureAuto.SetValue(ExposureAuto_Once);
// When the "once" mode of operation is selected,
// the parameter values are automatically adjusted until the related image property
// reaches the target value. After the automatic parameter value adjustment is complete, the auto
// function will automatically be set to "off", and the new parameter value will be applied to the
// subsequently grabbed images.
int n = 0;
while (camera.ExposureAuto.GetValue() != ExposureAuto_Off)
{
GrabResultPtr_t ptrGrabResult;
camera.GrabOne( 5000, ptrGrabResult);
#ifdef PYLON_WIN_BUILD
Pylon::DisplayImage(1, ptrGrabResult);
#endif
++n;
//For demonstration purposes only. Wait until the image is shown.
//Make sure the loop is exited.
if (n > 100)
{
throw RUNTIME_EXCEPTION( "The adjustment of auto exposure did not finish.");
}
}
cout << "ExposureAuto went back to 'Off' after " << n << " frames." << endl;
cout << "Final exposure time = ";
cout << camera.ExposureTime.GetValue() << " us" << endl << endl;
}
void AutoExposureContinuous(Camera_t& camera)
{
// Check whether the Exposure Auto feature is available.
if ( !IsWritable( camera.ExposureAuto))
{
cout << "The camera does not support Exposure Auto." << endl << endl;
return;
}
// Maximize the grabbed area of interest (Image AOI).
if (IsWritable(camera.OffsetX))
{
camera.OffsetX.SetValue(camera.OffsetX.GetMin());
}
if (IsWritable(camera.OffsetY))
{
camera.OffsetY.SetValue(camera.OffsetY.GetMin());
}
camera.Width.SetValue(camera.Width.GetMax());
camera.Height.SetValue(camera.Height.GetMax());
if(IsAvailable(camera.AutoFunctionROISelector))
{
// Set the Auto Function ROI for luminance statistics.
// We want to use ROI1 for gathering the statistics.
if (IsWritable(camera.AutoFunctionROIUseBrightness))
{
camera.AutoFunctionROISelector.SetValue(AutoFunctionROISelector_ROI1);
camera.AutoFunctionROIUseBrightness.SetValue(true); // ROI 1 is used for brightness control
camera.AutoFunctionROISelector.SetValue(AutoFunctionROISelector_ROI2);
camera.AutoFunctionROIUseBrightness.SetValue(false); // ROI 2 is not used for brightness control
}
// Set the ROI (in this example the complete sensor is used)
camera.AutoFunctionROISelector.SetValue(AutoFunctionROISelector_ROI1); // configure ROI 1
camera.AutoFunctionROIOffsetX.SetValue(0);
camera.AutoFunctionROIOffsetY.SetValue(0);
camera.AutoFunctionROIWidth.SetValue(camera.Width.GetMax());
camera.AutoFunctionROIHeight.SetValue(camera.Height.GetMax());
}
// Set the target value for luminance control.
// A value of 0.3 means that the target brightness is 30 % of the maximum brightness of the raw pixel value read out from the sensor.
// A value of 0.4 means 40 % and so forth.
camera.AutoTargetBrightness.SetValue(0.3);
cout << "ExposureAuto 'GainAuto = Continuous'." << endl;
cout << "Initial exposure time = ";
cout << camera.ExposureTime.GetValue() << " us" << endl;
camera.ExposureAuto.SetValue(ExposureAuto_Continuous);
// When "continuous" mode is selected, the parameter value is adjusted repeatedly while images are acquired.
// Depending on the current frame rate, the automatic adjustments will usually be carried out for
// every or every other image, unless the camera�s microcontroller is kept busy by other tasks.
// The repeated automatic adjustment will proceed until the "once" mode of operation is used or
// until the auto function is set to "off", in which case the parameter value resulting from the latest
// automatic adjustment will operate unless the value is manually adjusted.
for (int n = 0; n < 20; n++) // For demonstration purposes, we will use only 20 images.
{
GrabResultPtr_t ptrGrabResult;
camera.GrabOne( 5000, ptrGrabResult);
#ifdef PYLON_WIN_BUILD
Pylon::DisplayImage(1, ptrGrabResult);
#endif
//For demonstration purposes only. Wait until the image is shown.
}
camera.ExposureAuto.SetValue(ExposureAuto_Off); // Switch off Exposure Auto.
cout << "Final exposure time = ";
cout << camera.ExposureTime.GetValue() << " us" << endl << endl;
}
void AutoWhiteBalance(Camera_t& camera)
{
// Check whether the Balance White Auto feature is available.
if ( !IsWritable( camera.BalanceWhiteAuto))
{
cout << "The camera does not support Balance White Auto." << endl << endl;
return;
}
// Maximize the grabbed area of interest (Image AOI).
if (IsWritable(camera.OffsetX))
{
camera.OffsetX.SetValue(camera.OffsetX.GetMin());
}
if (IsWritable(camera.OffsetY))
{
camera.OffsetY.SetValue(camera.OffsetY.GetMin());
}
camera.Width.SetValue(camera.Width.GetMax());
camera.Height.SetValue(camera.Height.GetMax());
if(IsAvailable(camera.AutoFunctionROISelector))
{
// Set the Auto Function ROI for white balance.
// We want to use ROI2
camera.AutoFunctionROISelector.SetValue(AutoFunctionROISelector_ROI1);
camera.AutoFunctionROIUseWhiteBalance.SetValue(false); // ROI 1 is not used for white balance
camera.AutoFunctionROISelector.SetValue(AutoFunctionROISelector_ROI2);
camera.AutoFunctionROIUseWhiteBalance.SetValue(true); // ROI 2 is used for white balance
// Set the Auto Function AOI for white balance statistics.
// Currently, AutoFunctionROISelector_ROI2 is predefined to gather
// white balance statistics.
camera.AutoFunctionROISelector.SetValue(AutoFunctionROISelector_ROI2);
camera.AutoFunctionROIOffsetX.SetValue(0);
camera.AutoFunctionROIOffsetY.SetValue(0);
camera.AutoFunctionROIWidth.SetValue(camera.Width.GetMax());
camera.AutoFunctionROIHeight.SetValue(camera.Height.GetMax());
}
cout << "Trying 'BalanceWhiteAuto = Once'." << endl;
cout << "Initial balance ratio: ";
camera.BalanceRatioSelector.SetValue(BalanceRatioSelector_Red);
cout << "R = " << camera.BalanceRatio.GetValue() << " ";
camera.BalanceRatioSelector.SetValue(BalanceRatioSelector_Green);
cout << "G = " << camera.BalanceRatio.GetValue() << " ";
camera.BalanceRatioSelector.SetValue(BalanceRatioSelector_Blue);
cout << "B = " << camera.BalanceRatio.GetValue() << endl;
camera.BalanceWhiteAuto.SetValue(BalanceWhiteAuto_Once);
// When the "once" mode of operation is selected,
// the parameter values are automatically adjusted until the related image property
// reaches the target value. After the automatic parameter value adjustment is complete, the auto
// function will automatically be set to "off" and the new parameter value will be applied to the
// subsequently grabbed images.
int n = 0;
while (camera.BalanceWhiteAuto.GetValue() != BalanceWhiteAuto_Off)
{
GrabResultPtr_t ptrGrabResult;
camera.GrabOne( 5000, ptrGrabResult);
#ifdef PYLON_WIN_BUILD
Pylon::DisplayImage(1, ptrGrabResult);
#endif
++n;
//For demonstration purposes only. Wait until the image is shown.
//Make sure the loop is exited.
if (n > 100)
{
throw RUNTIME_EXCEPTION( "The adjustment of auto white balance did not finish.");
}
}
cout << "BalanceWhiteAuto went back to 'Off' after ";
cout << n << " frames." << endl;
cout << "Final balance ratio: ";
camera.BalanceRatioSelector.SetValue(BalanceRatioSelector_Red);
cout << "R = " << camera.BalanceRatio.GetValue() << " ";
camera.BalanceRatioSelector.SetValue(BalanceRatioSelector_Green);
cout << "G = " << camera.BalanceRatio.GetValue() << " ";
camera.BalanceRatioSelector.SetValue(BalanceRatioSelector_Blue);
cout << "B = " << camera.BalanceRatio.GetValue() << endl;
}
bool IsColorCamera(Camera_t& camera)
{
camera.PixelFormat.GetEntries(Entries);
bool Result = false;
for (size_t i = 0; i < Entries.size(); i++)
{
GenApi::INode *pNode = Entries[i];
if (IsAvailable(pNode->GetAccessMode()))
{
GenApi::IEnumEntry *pEnum = dynamic_cast<GenApi::IEnumEntry *>(pNode);
const GenICam::gcstring sym(pEnum->GetSymbolic());
if (sym.find(GenICam::gcstring("Bayer")) != GenICam::gcstring::_npos())
{
Result = true;
break;
}
}
}
return Result;
}

ParametrizeCamera_Configurations

// ParametrizeCamera_Configurations.cpp
/*
Note: Before getting started, Basler recommends reading the Programmer's Guide topic
in the pylon C++ API documentation that gets installed with pylon.
If you are upgrading to a higher major version of pylon, Basler also
strongly recommends reading the Migration topic in the pylon C++ API documentation.
The instant camera allows to install event handlers for configuration purposes
and for handling the grab results. This is very useful for handling standard
camera setups and image processing tasks.
This sample shows how to use configuration event handlers by applying the standard
configurations and registering sample configuration event handlers.
Configuration event handlers are derived from the CConfigurationEventHandler base class.
CConfigurationEventHandler provides virtual methods that can be overridden. If the
configuration event handler is registered these methods are called when the state of the
instant camera objects changes, e.g. when the camera object is opened or closed.
The standard configuration event handlers override the OnOpened method. The overridden method
parametrizes the camera.
Device specific camera classes, e.g. for GigE cameras, provide specialized
event handler base classes, e.g. CBaslerGigEConfigurationEventHandler.
*/
// Include files to use the PYLON API.
// Include files used by samples.
#include "../include/ImageEventPrinter.h"
#include "../include/ConfigurationEventPrinter.h"
#include "../include/PixelFormatAndAoiConfiguration.h"
// Namespace for using pylon objects.
using namespace Pylon;
// Namespace for using cout.
using namespace std;
// Number of images to be grabbed.
static const uint32_t c_countOfImagesToGrab = 3;
int main(int argc, char* argv[])
{
// The exit code of the sample application.
int exitCode = 0;
// Before using any pylon methods, the pylon runtime must be initialized.
try
{
// Create an instant camera object with the first camera device found.
CInstantCamera camera( CTlFactory::GetInstance().CreateFirstDevice());
// Print the model name of the camera.
cout << "Using device " << camera.GetDeviceInfo().GetModelName() << endl;
// For demonstration purposes only, register an image event handler.
// printing out information about the grabbed images.
camera.RegisterImageEventHandler( new CImageEventPrinter, RegistrationMode_Append, Cleanup_Delete);
// This smart pointer will receive the grab result data.
CGrabResultPtr ptrGrabResult;
cout << "Grab using continuous acquisition:" << endl << endl;
// Register the standard configuration event handler for setting up the camera for continuous acquisition.
// By setting the registration mode to RegistrationMode_ReplaceAll, the new configuration handler replaces the
// default configuration handler that has been automatically registered when creating the
// instant camera object.
// The handler is automatically deleted when deregistered or when the registry is cleared if Cleanup_Delete is specified.
// The camera's Open() method calls the configuration handler's OnOpened() method that
// applies the required parameter modifications.
camera.Open();
// The registered configuration event handler has done its parametrization now.
// Additional parameters could be set here.
// Grab some images for demonstration.
camera.StartGrabbing( c_countOfImagesToGrab);
while( camera.IsGrabbing())
{
camera.RetrieveResult( 5000, ptrGrabResult, TimeoutHandling_ThrowException);
}
// Close the camera.
camera.Close();
cout << "Grab using software trigger mode:" << endl << endl;
// Register the standard configuration event handler for setting up the camera for software
// triggering.
// The current configuration is replaced by the software trigger configuration by setting the
// registration mode to RegistrationMode_ReplaceAll.
// StartGrabbing() calls the camera's Open() automatically if the camera is not open yet.
// The Open method calls the configuration handler's OnOpened() method that
// sets the required parameters for enabling software triggering.
// Grab some images for demonstration.
camera.StartGrabbing( c_countOfImagesToGrab);
while( camera.IsGrabbing())
{
// Execute the software trigger. The call waits up to 100 ms for the camera
// to be ready to be triggered.
camera.WaitForFrameTriggerReady( 100, TimeoutHandling_ThrowException);
camera.ExecuteSoftwareTrigger();
camera.RetrieveResult( 5000, ptrGrabResult, TimeoutHandling_ThrowException);
}
// StopGrabbing() is called from RetrieveResult if the number of images
// to grab has been reached. Since the camera was opened by StartGrabbing()
// it is closed by StopGrabbing().
// The CSoftwareTriggerConfiguration, like all standard configurations, is provided as a header file.
// The source code can be copied and modified to meet application specific needs, e.g.
// the CSoftwareTriggerConfiguration class could easily be changed into a hardware trigger configuration.
cout << "Grab using single frame acquisition:" << endl << endl;
// Register the standard configuration event handler for configuring single frame acquisition.
// The previous configuration is removed by setting the registration mode to RegistrationMode_ReplaceAll.
// GrabOne calls StartGrabbing and StopGrabbing internally.
// As seen above Open() is called by StartGrabbing and
// the OnOpened() method of the CAcquireSingleFrameConfiguration handler is called.
camera.GrabOne( 5000, ptrGrabResult);
// To continuously grab single images it is much more efficient to open the camera before grabbing.
// Note: The software trigger mode (see above) should be used for grabbing single images if you want to maximize frame rate.
// Now, the camera parameters are applied in the OnOpened method of the configuration object.
camera.Open();
// Additional parameters could be set here.
// Grab some images for demonstration.
camera.GrabOne( 5000, ptrGrabResult);
camera.GrabOne( 5000, ptrGrabResult);
camera.GrabOne( 5000, ptrGrabResult);
// Close the camera.
camera.Close();
cout << "Grab using multiple configuration objects:" << endl << endl;
// Register the standard event handler for configuring single frame acquisition.
// The current configuration is replaced by setting the registration mode to RegistrationMode_ReplaceAll.
// Register an additional configuration handler to set the image format and adjust the AOI.
// By setting the registration mode to RegistrationMode_Append, the configuration handler is added instead of replacing
// the already registered configuration handler.
camera.RegisterConfiguration( new CPixelFormatAndAoiConfiguration, RegistrationMode_Append, Cleanup_Delete);
// Create an event printer on the heap.
CConfigurationEventPrinter* pEventPrinterObject = new CConfigurationEventPrinter;
// Register the handler object and define Cleanup_None so that it is not deleted by the camera object.
// It must be ensured, that the configuration handler "lives" at least until the handler is deregistered!
camera.RegisterConfiguration( pEventPrinterObject, RegistrationMode_Append, Cleanup_None);
// Grab an image for demonstration. Configuration events are printed.
cout << endl << "Grab, configuration events are printed:" << endl << endl;
camera.GrabOne( 5000, ptrGrabResult);
// Deregister the event handler.
camera.DeregisterConfiguration( pEventPrinterObject);
// The event handler can now be deleted.
delete pEventPrinterObject;
pEventPrinterObject = NULL;
// Grab an image for demonstration. Configuration events are not printed.
cout << endl << "Grab, configuration events are not printed:" << endl << endl;
camera.GrabOne( 5000, ptrGrabResult);
}
catch (const GenericException &e)
{
// Error handling.
cerr << "An exception occurred." << endl
<< e.GetDescription() << endl;
exitCode = 1;
}
// Comment the following two lines to disable waiting on exit
cerr << endl << "Press Enter to exit." << endl;
while( cin.get() != '\n');
// Releases all pylon resources.
return exitCode;
}

ParametrizeCamera_GenericParameterAccess

// ParametrizeCamera_GenericParameterAccess.cpp
/*
Note: Before getting started, Basler recommends reading the Programmer's Guide topic
in the pylon C++ API documentation that gets installed with pylon.
If you are upgrading to a higher major version of pylon, Basler also
strongly recommends reading the Migration topic in the pylon C++ API documentation.
For camera configuration and for accessing other parameters, the pylon API
uses the technologies defined by the GenICam standard hosted by the
European Machine Vision Association (EMVA). The GenICam specification
(http://www.GenICam.org) defines a format for camera description files.
These files describe the configuration interface of GenICam compliant cameras.
The description files are written in XML (eXtensible Markup Language) and
describe camera registers, their interdependencies, and all other
information needed to access high-level features such as Gain,
Exposure Time, or Image Format by means of low-level register read and
write operations.
The elements of a camera description file are represented as software
objects called Nodes. For example, a node can represent a single camera
register, a camera parameter such as Gain, a set of available parameter
values, etc. Each node implements the GenApi::INode interface.
The nodes are linked together by different relationships as explained in the
GenICam standard document available at www.GenICam.org. The complete set of
nodes is stored in a data structure called Node Map.
At runtime, a Node Map is instantiated from an XML description.
This sample shows the 'generic' approach for configuring a camera
using the GenApi nodemaps represented by the GenApi::INodeMap interface.
The names and types of the parameter nodes can be found in the Basler pylon Programmer's Guide
and API Reference, in the camera User's Manual, in the camera's document about
Register Structure and Access Methodes (if applicable), and by using the pylon Viewer tool.
See also the ParametrizeCamera_NativeParameterAccess sample for the 'native'
approach for configuring a camera.
*/
// Include files to use the PYLON API.
// Namespace for using pylon objects.
using namespace Pylon;
// Namespace for using GenApi objects.
using namespace GenApi;
// Namespace for using cout.
using namespace std;
// Adjust value to make it comply with range and increment passed.
//
// The parameter's minimum and maximum are always considered as valid values.
// If the increment is larger than one, the returned value will be: min + (n * inc).
// If the value doesn't meet these criteria, it will be rounded down to ensure compliance.
int64_t Adjust(int64_t val, int64_t minimum, int64_t maximum, int64_t inc)
{
// Check the input parameters.
if (inc <= 0)
{
// Negative increments are invalid.
throw LOGICAL_ERROR_EXCEPTION("Unexpected increment %d", inc);
}
if (minimum > maximum)
{
// Minimum must not be bigger than or equal to the maximum.
throw LOGICAL_ERROR_EXCEPTION("minimum bigger than maximum.");
}
// Check the lower bound.
if (val < minimum)
{
return minimum;
}
// Check the upper bound.
if (val > maximum)
{
return maximum;
}
// Check the increment.
if (inc == 1)
{
// Special case: all values are valid.
return val;
}
else
{
// The value must be min + (n * inc).
// Due to the integer division, the value will be rounded down.
return minimum + ( ((val - minimum) / inc) * inc );
}
}
int main(int argc, char* argv[])
{
// The exit code of the sample application.
int exitCode = 0;
// Before using any pylon methods, the pylon runtime must be initialized.
try
{
// Create an instant camera object with the camera found first.
CInstantCamera camera( CTlFactory::GetInstance().CreateFirstDevice());
INodeMap& nodemap = camera.GetNodeMap();
// Open the camera for accessing the parameters.
camera.Open();
// Get camera device information.
cout << "Camera Device Information" << endl
<< "=========================" << endl;
cout << "Vendor : "
<< CStringPtr( nodemap.GetNode( "DeviceVendorName") )->GetValue() << endl;
cout << "Model : "
<< CStringPtr( nodemap.GetNode( "DeviceModelName") )->GetValue() << endl;
cout << "Firmware version : "
<< CStringPtr( nodemap.GetNode( "DeviceFirmwareVersion") )->GetValue() << endl << endl;
// Camera settings.
cout << "Camera Device Settings" << endl
<< "======================" << endl;
// Set the AOI:
// Get the integer nodes describing the AOI.
CIntegerPtr offsetX( nodemap.GetNode( "OffsetX"));
CIntegerPtr offsetY( nodemap.GetNode( "OffsetY"));
CIntegerPtr width( nodemap.GetNode( "Width"));
CIntegerPtr height( nodemap.GetNode( "Height"));
// On some cameras the offsets are read-only,
// so we check whether we can write a value. Otherwise, we would get an exception.
// GenApi has some convenience predicates to check this easily.
if ( IsWritable( offsetX))
{
offsetX->SetValue( offsetX->GetMin());
}
if ( IsWritable( offsetY))
{
offsetY->SetValue( offsetY->GetMin());
}
// Some properties have restrictions. Use GetInc/GetMin/GetMax to make sure you set a valid value.
int64_t newWidth = 202;
newWidth = Adjust(newWidth, width->GetMin(), width->GetMax(), width->GetInc());
int64_t newHeight = 101;
newHeight = Adjust(newHeight, height->GetMin(), height->GetMax(), height->GetInc());
width->SetValue(newWidth);
height->SetValue(newHeight);
cout << "OffsetX : " << offsetX->GetValue() << endl;
cout << "OffsetY : " << offsetY->GetValue() << endl;
cout << "Width : " << width->GetValue() << endl;
cout << "Height : " << height->GetValue() << endl;
// Access the PixelFormat enumeration type node.
CEnumerationPtr pixelFormat( nodemap.GetNode( "PixelFormat"));
// Remember the current pixel format.
String_t oldPixelFormat = pixelFormat->ToString();
cout << "Old PixelFormat : " << oldPixelFormat << endl;
// Set the pixel format to Mono8 if available.
if ( IsAvailable( pixelFormat->GetEntryByName( "Mono8")))
{
pixelFormat->FromString( "Mono8");
cout << "New PixelFormat : " << pixelFormat->ToString() << endl;
}
// Set the new gain to 50% -> Min + ((Max-Min) / 2).
//
// Note: Some newer camera models may have auto functions enabled.
// To be able to set the gain value to a specific value
// the Gain Auto function must be disabled first.
// Access the enumeration type node GainAuto.
CEnumerationPtr gainAuto( nodemap.GetNode( "GainAuto"));
if ( IsWritable( gainAuto))
{
gainAuto->FromString("Off");
}
// Check to see which Standard Feature Naming Convention (SFNC) is used by the camera device.
if ( camera.GetSfncVersion() >= Sfnc_2_0_0)
{
// Access the Gain float type node. This node is available for USB camera devices.
// USB camera devices are compliant to SFNC version 2.0.
CFloatPtr gain( nodemap.GetNode( "Gain"));
double newGain = gain->GetMin() + ((gain->GetMax() - gain->GetMin()) / 2);
gain->SetValue(newGain);
cout << "Gain (50%) : " << gain->GetValue() << " (Min: " << gain->GetMin() << "; Max: " << gain->GetMax() << ")" << endl;
}
else
{
// Access the GainRaw integer type node. This node is available for IIDC 1394 and GigE camera devices.
CIntegerPtr gainRaw( nodemap.GetNode( "GainRaw"));
int64_t newGainRaw = gainRaw->GetMin() + ((gainRaw->GetMax() - gainRaw->GetMin()) / 2);
// Make sure the calculated value is valid.
newGainRaw = Adjust(newGainRaw, gainRaw->GetMin(), gainRaw->GetMax(), gainRaw->GetInc());
gainRaw->SetValue(newGainRaw);
cout << "Gain (50%) : " << gainRaw->GetValue() << " (Min: " << gainRaw->GetMin() << "; Max: " << gainRaw->GetMax() << "; Inc: " << gainRaw->GetInc() << ")" << endl;
}
// Restore the old pixel format.
pixelFormat->FromString(oldPixelFormat);
// Close the camera.
camera.Close();
}
catch (const GenericException &e)
{
// Error handling.
cerr << "An exception occurred." << endl
<< e.GetDescription() << endl;
exitCode = 1;
}
// Comment the following two lines to disable waiting on exit.
cerr << endl << "Press Enter to exit." << endl;
while( cin.get() != '\n');
// Releases all pylon resources.
return exitCode;
}

ParametrizeCamera_LoadAndSave

// ParametrizeCamera_LoadAndSave.cpp
/*
Note: Before getting started, Basler recommends reading the Programmer's Guide topic
in the pylon C++ API documentation that gets installed with pylon.
If you are upgrading to a higher major version of pylon, Basler also
strongly recommends reading the Migration topic in the pylon C++ API documentation.
This sample application demonstrates how to save or load the features of a camera
to or from a file.
*/
// Include files to use the PYLON API.
// Namespace for using pylon objects.
using namespace Pylon;
// Namespace for using cout.
using namespace std;
// The name of the pylon feature stream file.
const char Filename[] = "NodeMap.pfs";
int main(int argc, char* argv[])
{
// The exit code of the sample application.
int exitCode = 0;
// Before using any pylon methods, the pylon runtime must be initialized.
try
{
// Create an instant camera object with the camera device found first.
CInstantCamera camera( CTlFactory::GetInstance().CreateFirstDevice());
// Print the model name of the camera.
cout << "Using device " << camera.GetDeviceInfo().GetModelName() << endl;
// Open the camera.
camera.Open();
cout << "Saving camera's node map to file..."<< endl;
// Save the content of the camera's node map into the file.
CFeaturePersistence::Save( Filename, &camera.GetNodeMap() );
// --------------------------------------------------------------------
// Just for demonstration, read the content of the file back to the camera's node map with enabled validation.
cout << "Reading file back to camera's node map..."<< endl;
CFeaturePersistence::Load( Filename, &camera.GetNodeMap(), true );
// Close the camera.
camera.Close();
}
catch (const GenericException &e)
{
// Error handling.
cerr << "An exception occurred." << endl
<< e.GetDescription() << endl;
exitCode = 1;
}
// Comment the following two lines to disable waiting on exit
cerr << endl << "Press Enter to exit." << endl;
while( cin.get() != '\n');
// Releases all pylon resources.
return exitCode;
}

ParametrizeCamera_LookupTable

// ParametrizeCamera_LookupTable.cpp
/*
Note: Before getting started, Basler recommends reading the Programmer's Guide topic
in the pylon C++ API documentation that gets installed with pylon.
If you are upgrading to a higher major version of pylon, Basler also
strongly recommends reading the Migration topic in the pylon C++ API documentation.
This sample program demonstrates the use of the Luminance Lookup Table feature.
*/
// Include files to use the PYLON API.
// Namespace for using pylon objects.
using namespace Pylon;
#if defined( USE_1394 )
// Settings to use Basler IEEE 1394 cameras.
#include <pylon/1394/Basler1394InstantCamera.h>
typedef Pylon::CBasler1394InstantCamera Camera_t;
using namespace Basler_IIDC1394CameraParams;
#elif defined ( USE_GIGE )
// Settings to use Basler GigE cameras.
using namespace Basler_GigECameraParams;
#elif defined ( USE_USB )
// Settings to use Basler USB cameras.
using namespace Basler_UsbCameraParams;
#else
#error camera type is not specified. For example, define USE_GIGE for using GigE cameras
#endif
// Namespace for using cout.
using namespace std;
int main(int argc, char* argv[])
{
// The exit code of the sample application.
int exitCode = 0;
// Before using any pylon methods, the pylon runtime must be initialized.
try
{
// Only look for cameras supported by Camera_t
info.SetDeviceClass( Camera_t::DeviceClass());
// Create an instant camera object with the first found camera device matching the specified device class.
Camera_t camera( CTlFactory::GetInstance().CreateFirstDevice( info));
// Print the model name of the camera.
cout << "Using device " << camera.GetDeviceInfo().GetModelName() << endl;
cout << "Opening camera...";
// Open the camera.
camera.Open();
cout << "done" << endl;
cout << "Writing LUT....";
// Select the lookup table using the LUTSelector.
camera.LUTSelector.SetValue( LUTSelector_Luminance );
// Some cameras have 10 bit and others have 12 bit lookup tables, so determine
// the type of the lookup table for the current device.
const int nValues = (int) camera.LUTIndex.GetMax() + 1;
int inc;
if ( nValues == 4096 ) // 12 bit LUT.
inc = 8;
else if ( nValues == 1024 ) // 10 bit LUT.
inc = 2;
else
{
throw RUNTIME_EXCEPTION( "Type of LUT is not supported by this sample.");
}
// Use LUTIndex and LUTValue parameter to access the lookup table values.
// The following lookup table causes an inversion of the sensor values.
for ( int i = 0; i < nValues; i += inc )
{
camera.LUTIndex.SetValue( i );
camera.LUTValue.SetValue( nValues - 1 - i );
}
cout << "done" << endl;
// Enable the lookup table.
camera.LUTEnable.SetValue( true );
// Grab and process images here.
// ...
// Disable the lookup table.
camera.LUTEnable.SetValue( false );
// Close the camera.
camera.Close();
}
catch (const GenericException &e)
{
// Error handling.
cerr << "An exception occurred." << endl
<< e.GetDescription() << endl;
exitCode = 1;
}
// Comment the following two lines to disable waiting on exit
cerr << endl << "Press Enter to exit." << endl;
while( cin.get() != '\n');
// Releases all pylon resources.
return exitCode;
}

ParametrizeCamera_NativeParameterAccess

// ParametrizeCamera_NativeParameterAccess.cpp
/*
Note: Before getting started, Basler recommends reading the Programmer's Guide topic
in the pylon C++ API documentation that gets installed with pylon.
If you are upgrading to a higher major version of pylon, Basler also
strongly recommends reading the Migration topic in the pylon C++ API documentation.
For camera configuration and for accessing other parameters, the pylon API
uses the technologies defined by the GenICam standard hosted by the
European Machine Vision Association (EMVA). The GenICam specification
(http://www.GenICam.org) defines a format for camera description files.
These files describe the configuration interface of GenICam compliant cameras.
The description files are written in XML (eXtensible Markup Language) and
describe camera registers, their interdependencies, and all other
information needed to access high-level features such as Gain,
Exposure Time, or Image Format by means of low-level register read and
write operations.
The elements of a camera description file are represented as software
objects called Nodes. For example, a node can represent a single camera
register, a camera parameter such as Gain, a set of available parameter
values, etc. Each node implements the GenApi::INode interface.
Using the code generators provided by GenICam's GenApi module,
a programming interface is created from a camera description file.
Thereby, a member is provided for each parameter that is available for the camera device.
The programming interface is exported by the Device Specific Instant Camera classes.
This is the easiest way to access parameters.
This sample shows the 'native' approach for configuring a camera
using device specific instant camera classes.
See also the ParametrizeCamera_GenericParameterAccess sample for the 'generic'
approach for configuring a camera.
*/
// Include files to use the PYLON API.
// Namespace for using pylon objects.
using namespace Pylon;
#if defined( USE_1394 )
// Settings for using Basler IEEE 1394 cameras.
#include <pylon/1394/Basler1394InstantCamera.h>
typedef Pylon::CBasler1394InstantCamera Camera_t;
using namespace Basler_IIDC1394CameraParams;
#elif defined ( USE_GIGE )
// Settings for using Basler GigE cameras.
using namespace Basler_GigECameraParams;
#elif defined ( USE_CAMERALINK )
// Settings for using Basler Camera Link cameras.
#include <pylon/cameralink/BaslerCameraLinkInstantCamera.h>
typedef Pylon::CBaslerCameraLinkInstantCamera Camera_t;
using namespace Basler_CLCameraParams;
#elif defined ( USE_USB )
// Settings for using Basler USB cameras.
using namespace Basler_UsbCameraParams;
#elif defined ( USE_BCON )
// Settings for using Basler BCON cameras.
#include <pylon/bcon/BaslerBconInstantCamera.h>
typedef Pylon::CBaslerBconInstantCamera Camera_t;
using namespace Basler_BconCameraParams;
#else
#error Camera type is not specified. For example, define USE_GIGE for using GigE cameras.
#endif
// Namespace for using cout.
using namespace std;
// Adjust value so it complies with range and increment passed.
//
// The parameter's minimum and maximum are always considered as valid values.
// If the increment is larger than one, the returned value will be: min + (n * inc).
// If the value doesn't meet these criteria, it will be rounded down so that it does.
int64_t Adjust(int64_t val, int64_t minimum, int64_t maximum, int64_t inc)
{
// Check the input parameters.
if (inc <= 0)
{
// Negative increments are invalid.
throw LOGICAL_ERROR_EXCEPTION("Unexpected increment %d", inc);
}
if (minimum > maximum)
{
// Minimum must not be bigger than or equal to the maximum.
throw LOGICAL_ERROR_EXCEPTION("minimum bigger than maximum.");
}
// Check the lower bound.
if (val < minimum)
{
return minimum;
}
// Check the upper bound.
if (val > maximum)
{
return maximum;
}
// Check the increment.
if (inc == 1)
{
// Special case: all values are valid.
return val;
}
else
{
// The value must be min + (n * inc).
// Due to the integer division, the value will be rounded down.
return minimum + ( ((val - minimum) / inc) * inc );
}
}
int main(int argc, char* argv[])
{
// The exit code of the sample application.
int exitCode = 0;
// Before using any pylon methods, the pylon runtime must be initialized.
try
{
// Only look for cameras supported by Camera_t.
info.SetDeviceClass( Camera_t::DeviceClass());
// Create an instant camera object with the first found camera device matching the specified device class.
Camera_t camera( CTlFactory::GetInstance().CreateFirstDevice( info));
// Open the camera for accessing the parameters.
camera.Open();
// Get camera device information.
cout << "Camera Device Information" << endl
<< "=========================" << endl;
cout << "Vendor : "
<< camera.DeviceVendorName.GetValue() << endl;
cout << "Model : "
<< camera.DeviceModelName.GetValue() << endl;
cout << "Firmware version : "
<< camera.DeviceFirmwareVersion.GetValue() << endl << endl;
// Camera settings.
cout << "Camera Device Settings" << endl
<< "======================" << endl;
// Set the AOI:
// On some cameras the Offsets are read-only,
// so we check whether we can write a value. Otherwise, we would get an exception.
// GenApi has some convenience predicates to check this easily.
if (IsWritable(camera.OffsetX))
{
camera.OffsetX.SetValue(camera.OffsetX.GetMin());
}
if (IsWritable(camera.OffsetY))
{
camera.OffsetY.SetValue(camera.OffsetY.GetMin());
}
// Some properties have restrictions. Use GetInc/GetMin/GetMax to make sure you set a valid value.
int64_t newWidth = 202;
newWidth = Adjust(newWidth, camera.Width.GetMin(), camera.Width.GetMax(), camera.Width.GetInc());
int64_t newHeight = 101;
newHeight = Adjust(newHeight, camera.Height.GetMin(), camera.Height.GetMax(), camera.Height.GetInc());
camera.Width.SetValue(newWidth);
camera.Height.SetValue(newHeight);
cout << "OffsetX : " << camera.OffsetX.GetValue() << endl;
cout << "OffsetY : " << camera.OffsetY.GetValue() << endl;
cout << "Width : " << camera.Width.GetValue() << endl;
cout << "Height : " << camera.Height.GetValue() << endl;
// Remember the current pixel format.
PixelFormatEnums oldPixelFormat = camera.PixelFormat.GetValue();
cout << "Old PixelFormat : " << camera.PixelFormat.ToString() << " (" << oldPixelFormat << ")" << endl;
// Set pixel format to Mono8 if available.
if ( GenApi::IsAvailable( camera.PixelFormat.GetEntry(PixelFormat_Mono8)))
{
camera.PixelFormat.SetValue(PixelFormat_Mono8);
cout << "New PixelFormat : " << camera.PixelFormat.ToString() << " (" << camera.PixelFormat.GetValue() << ")" << endl;
}
// Set the new gain to 50% -> Min + ((Max-Min) / 2)
//
// Note: Some newer camera models may have auto functions enabled.
// To be able to set the gain value to a specific value
// the Gain Auto function must be disabled first.
if (IsWritable(camera.GainAuto))
{
camera.GainAuto.FromString("Off");
}
#if defined( USE_USB ) || defined( USE_BCON )
double newGain = camera.Gain.GetMin() + ((camera.Gain.GetMax() - camera.Gain.GetMin()) / 2);
camera.Gain.SetValue(newGain);
cout << "Gain (50%) : " << camera.Gain.GetValue() << " (Min: " << camera.Gain.GetMin() << "; Max: " << camera.Gain.GetMax() << ")" << endl;
#else
int64_t newGainRaw = camera.GainRaw.GetMin() + ((camera.GainRaw.GetMax() - camera.GainRaw.GetMin()) / 2);
// Make sure the calculated value is valid
newGainRaw = Adjust(newGainRaw, camera.GainRaw.GetMin(), camera.GainRaw.GetMax(), camera.GainRaw.GetInc());
camera.GainRaw.SetValue(newGainRaw);
cout << "Gain (50%) : " << camera.GainRaw.GetValue() << " (Min: " << camera.GainRaw.GetMin() << "; Max: " << camera.GainRaw.GetMax() << "; Inc: " << camera.GainRaw.GetInc() << ")" << endl;
#endif
// Restore the old pixel format.
camera.PixelFormat.SetValue(oldPixelFormat);
// Close the camera.
camera.Close();
}
catch (const GenericException &e)
{
// Error handling.
cerr << "An exception occurred." << endl
<< e.GetDescription() << endl;
exitCode = 1;
}
// Comment the following two lines to disable waiting on exit
cerr << endl << "Press Enter to exit." << endl;
while( cin.get() != '\n');
// Releases all pylon resources.
return exitCode;
}

ParametrizeCamera_Shading

// ParametrizeCamera_Shading.cpp
/*
Note: Before getting started, Basler recommends reading the Programmer's Guide topic
in the pylon C++ API documentation that gets installed with pylon.
If you are upgrading to a higher major version of pylon, Basler also
strongly recommends reading the Migration topic in the pylon C++ API documentation.
This sample demonstrates how to calculate and upload a gain shading
set to a Basler runner line scan camera.
This sample only applies to Basler runner cameras.
*/
// For use with Visual Studio >= 2005, disable deprecate warnings caused by the fopen function.
#define _CRT_SECURE_NO_WARNINGS
// Include files to use the PYLON API.
// Namespace for using pylon objects.
using namespace Pylon;
// For DBL_MAX.
#include <float.h>
#include <errno.h>
#ifdef _MSC_VER
#pragma warning(push)
#pragma warning(disable: 4244)
#endif
// For file upload.
#ifdef _MSC_VER
#pragma warning(pop)
#endif
#if defined ( USE_GIGE )
// Settings to use for Basler GigE cameras.
using namespace Basler_GigECameraParams;
using namespace Basler_GigEStreamParams;
#elif defined( USE_1394 )
#error Currently no Basler 1394 camera supports shading
#else
#error Camera type is not specified. For example, define USE_GIGE for using GigE cameras
#endif
// Namespace for using cout.
using namespace std;
// Prototypes for functions used in 'main'.
void CreateShadingData(Camera_t& camera,
const char *pLocalFilename);
void UploadFile(Camera_t& camera,
const char *pCameraFilename,
const char *pLocalFilename);
void CheckShadingData(Camera_t& camera);
// Name of the file where we will store the shading data on the local disk.
static const char LocalFilename[] = "ShadingData.bin";
#define USE_SHADING_SET_1 // Define which shading set we are going to use.
#if defined (USE_SHADING_SET_1)
// Name of the file in the camera where the shading data will be stored.
static const char CameraFilename[] = "UserGainShading1";
// Name of the shading set that corresponds to 'CameraFilename'.
#elif defined (USE_SHADING_SET_2)
// Name of the file in the camera where shading data will be stored.
static const char CameraFilename[] = "UserGainShading2";
// Name of the shading set that corresponds to 'CameraFilename'.
#else
#error No shading set defined!
#endif
int main(int argc, char* argv[])
{
// The exit code of the sample application.
int exitCode = 0;
// Before using any pylon methods, the pylon runtime must be initialized.
try
{
// Only look for cameras supported by Camera_t
info.SetDeviceClass( Camera_t::DeviceClass());
// Create an instant camera object with the first found camera device matching the specified device class.
Camera_t camera( CTlFactory::GetInstance().CreateFirstDevice( info));
// Print the model name of the camera.
cout << "Using device " << camera.GetDeviceInfo().GetModelName() << endl;
// Register the standard configuration event handler for configuring single frame acquisition.
// This replaces the default configuration as all event handlers are removed by setting the registration mode to RegistrationMode_ReplaceAll.
// Open the camera.
camera.Open();
// Only line scan cameras support gain shading.
if (camera.DeviceScanType.GetValue() == DeviceScanType_Linescan)
{
// Here, we assume that the conditions for exposure (illumination,
// exposure time, etc.) have been set up to deliver images of
// uniform intensity (gray value), but that the acquired images are not uniform.
// We calculate the gain shading data so that the observed non-uniformity
// will be compensated when the data are applied.
// These data are saved to a local file.
CreateShadingData( camera, LocalFilename);
// Transfer calculated gain shading data from the local file to the camera.
UploadFile( camera, CameraFilename, LocalFilename);
// Test to what extent the non-uniformity has been compensated.
CheckShadingData( camera);
}
else
{
cerr << "Only line scan cameras support gain shading." << endl;
}
// Close the camera.
camera.Close();
}
catch (const GenericException &e)
{
// Error handling.
cerr << "An exception occurred." << endl
<< e.GetDescription() << endl;
exitCode = 1;
}
// Comment the following two lines to disable waiting on exit.
cerr << endl << "Press Enter to exit." << endl;
while( cin.get() != '\n');
// Releases all pylon resources.
return exitCode;
}
//
// In the following code, the format of the arrays containing intensities
// or coefficients is as follows:
//
// If the pixel format is PixelFormat_Mono8:
// ArraySize == Width
// [Value_x0, Value_x1, Value_x2, ... , Value_x(Width - 1)]
//
// If the pixel format is PixelFormat_RGB8Packed:
// ArraySize == 3 * Width
// [ValueRed_x0, ValueRed_x1, ... , ValueRed_x(Width - 1),
// ValueGreen_x0, ValueGreen_x1, ... , ValueGreen_x(Width - 1),
// ValueBlue_x0, ValueBlue_x1, ... , ValueBlue_x(Width - 1)]
//
//
// Grab a frame and store the intensitiy for the pixels in each column
// in 'Intensities'.
//
void AverageLines(Camera_t& camera,
uint32_t Width, // Width of frame (number of pixels in each line).
uint32_t Height, // Height of frame (number of lines in each frame).
uint32_t NumCoeffs, // Number of coefficients.
double *Intensities) // Destination array.
{
for (uint32_t x = 0; x < NumCoeffs; x++)
{
Intensities[x] = 0.0;
}
cout << "Grab frame for averaging." << endl;
CGrabResultPtr ptrGrabResult;
camera.GrabOne( 5000, ptrGrabResult);
uint8_t *Buffer = static_cast<uint8_t*>( ptrGrabResult->GetBuffer());
if (NumCoeffs == 3 * Width)
{
//
// RGB mode.
//
for (uint32_t x = 0; x < Width; x++)
{
for (uint32_t y = 0; y < Height; y++)
{
// Add intensities.
uint32_t idx = 3 * (y * Width + x);
Intensities[x] += Buffer[idx];
Intensities[x + Width] += Buffer[idx + 1];
Intensities[x + 2 * Width] += Buffer[idx + 2];
}
}
}
else
{
//
// Mono mode.
//
for (uint32_t x = 0; x < Width; x++)
{
for (uint32_t y = 0; y < Height; y++)
{
// Add intensities.
Intensities[x] += Buffer[y * Width + x];
}
}
}
double scale = 1.0 / double(Height);
for (uint32_t x = 0; x < NumCoeffs; x++)
{
// Calculate average intensities.
Intensities[x] *= scale;
}
}
//
// Take the average intensities from 'pDblCoeff'. Identify the minimum and maximum
// average intensity. For each intensity, calculate a multiplier so that
// the product of the multiplier and the intensity equals the maximimum intensity (the
// multiplier for the maximum intensity is 1). Store the multipliers in 'pDblCoeff'.
//
void CalculateCoeffs( uint32_t Width, // Width of image (number of pixels in each line).
uint32_t Height, // Height of image (number of lines in each frame).
uint32_t NumCoeffs, // Number of shading coefficients.
double * pDblCoeff ) // In: averaged intensities.
// Out: multiplier values.
{
if (NumCoeffs == 3 * Width)
{
//
// RGB mode.
//
double MinR = DBL_MAX;
double MinG = DBL_MAX;
double MinB = DBL_MAX;
double MaxR = -DBL_MAX;
double MaxG = -DBL_MAX;
double MaxB = -DBL_MAX;
for (uint32_t x = 0; x < Width; x++)
{
// Determine min and max intensity.
if (pDblCoeff[x] < MinR)
{
MinR = pDblCoeff[x];
}
if (pDblCoeff[x] > MaxR)
{
MaxR = pDblCoeff[x];
}
if (pDblCoeff[x + Width] < MinG)
{
MinG = pDblCoeff[x + Width];
}
if (pDblCoeff[x + Width] > MaxG)
{
MaxG = pDblCoeff[x + Width];
}
if (pDblCoeff[x + 2 * Width] < MinB)
{
MinB = pDblCoeff[x + 2 * Width];
}
if (pDblCoeff[x + 2 * Width] > MaxB)
{
MaxB = pDblCoeff[x + 2 * Width];
}
}
cout << "MaxR = " << (MaxR / MinR) << " * MinR" << endl;
cout << "MaxG = " << (MaxG / MinG) << " * MinG" << endl;
cout << "MaxB = " << (MaxB / MinB) << " * MinB" << endl;
// Scale to maximum intensity.
for (uint32_t x = 0; x < Width; x++)
{
pDblCoeff[x] = MaxR / pDblCoeff[x];
pDblCoeff[x + Width] = MaxG / pDblCoeff[x + Width];
pDblCoeff[x + 2 * Width] = MaxB / pDblCoeff[x + 2 * Width];
}
}
else
{
//
// Mono mode.
//
double Min = DBL_MAX;
double Max = -DBL_MAX;
for (uint32_t x = 0; x < Width; x++)
{
// Determine min and max intensity.
if (pDblCoeff[x] < Min)
{
Min = pDblCoeff[x];
}
if (pDblCoeff[x] > Max)
{
Max = pDblCoeff[x];
}
}
cout << "Max = " << (Max / Min) << " * Min" << endl;
// Scale to maximum intensity.
for (uint32_t x = 0; x < Width; x++)
{
pDblCoeff[x] = Max / pDblCoeff[x];
}
}
}
bool SupportsRGB(Camera_t& camera);
// 'CreateShadingData' assumes that the conditions for exposure (illumination,
// exposure time, etc.) have been set up to deliver images of
// uniform intensity (gray value), but that the acquired images are not uniform.
// We calculate the gain shading data so that the observed non-uniformity
// will be compensated when the data are applied.
// These data are saved to a local file.
void CreateShadingData(Camera_t& camera, const char *pLocalFilename)
{
//
// Prepare camera for grab.
//
uint32_t Width = (uint32_t)camera.Width.GetValue();
uint32_t Height = (uint32_t)camera.Height.GetValue();
int32_t BytesPerPixel = 1;
if (SupportsRGB(camera))
{
camera.PixelFormat.SetValue(PixelFormat_RGB8Packed);
BytesPerPixel = 3;
}
else
{
camera.PixelFormat.SetValue(PixelFormat_Mono8);
}
// Disable gain shading for calculation.
camera.ShadingSelector.SetValue(ShadingSelector_GainShading);
camera.ShadingEnable.SetValue(false);
//
// Grab and average images into 'pDblCoeff'.
//
uint32_t NumCoeffs = BytesPerPixel * Width;
double *pDblCoeff = new double[NumCoeffs];
AverageLines(camera, Width, Height, NumCoeffs, pDblCoeff);
//
// Calculate gain shading data.
//
// Convert averaged intensities to multipliers.
CalculateCoeffs(Width, Height, NumCoeffs, pDblCoeff);
// Convert multipliers to camera format.
uint32_t *pCoeffs = new uint32_t[NumCoeffs];
for (uint32_t x = 0; x < NumCoeffs; x++)
{
// The multipliers are expressed as 32 bit fixed point
// numbers with 16 bits before and 16 bits after
// the decimal point.
uint32_t coeff = uint32_t(pDblCoeff[x] * (1 << 16));
// Currently, the maximum multiplier is limited to 3.99998
// (max register value == 0x0003FFFF).
if (coeff > 0x0003FFFF)
{
static bool PrintMessage = true;
if (PrintMessage)
{
PrintMessage = false;
cout << "Gain shading had to be clipped." << endl;
}
coeff = 0x0003FFFF;
}
pCoeffs[x] = coeff;
}
delete[] pDblCoeff;
//
// Write data to file.
//
FILE *fp = fopen(pLocalFilename, "wb");
if (fp == NULL )
{
RUNTIME_EXCEPTION("Can not open file '%s'\n", pLocalFilename);
}
// Header for gain shading file.
struct ShadingHeader_t
{
unsigned char version;
unsigned char type;
unsigned char sensorType;
unsigned char lineType;
unsigned short width;
unsigned short reserved;
};
// Constants used in header.
static const unsigned char ShadingVersion_1 = 0x5a;
static const unsigned char ShadingType_Gain = 0xc3;
static const unsigned char ShadingSensorType_Line = 0x02;
static const unsigned char ShadingLineType_Single = 0x01;
static const unsigned char ShadingLineType_Tri = 0x03;
// Construct header.
ShadingHeader_t h;
h.version = ShadingVersion_1;
h.type = ShadingType_Gain;
h.sensorType = ShadingSensorType_Line;
h.lineType = BytesPerPixel == 3 ? ShadingLineType_Tri : ShadingLineType_Single;
h.width = uint16_t(Width);
h.reserved = 0;
// Write shading data to local file.
fwrite(&h, sizeof(h), 1, fp);
fwrite(pCoeffs, sizeof(uint32_t), NumCoeffs, fp);
fclose(fp);
delete[] pCoeffs;
}
// Copy data from a local file to a file in the camera.
void UploadFile(Camera_t& camera,
const char *pCameraFilename,
const char *pLocalFilename)
{
// Open local file.
FILE *fp = fopen(pLocalFilename, "rb");
if ( fp == NULL )
{
RUNTIME_EXCEPTION("Can not open file '%s'\n", pLocalFilename);
}
// Determine file size.
fseek(fp, 0, SEEK_END);
size_t Size = ftell(fp);
rewind(fp);
if (Size == 0)
{
fclose(fp);
return;
}
// Read data from local file into pBuf.
char *pBuf = new char[Size];
size_t read = fread(pBuf, 1, Size, fp);
fclose(fp);
if (read != Size)
{
RUNTIME_EXCEPTION("Failed to read from file '%s'\n", pLocalFilename);
}
// Transfer data to camera.
GenApi::ODevFileStream stream( &camera.GetNodeMap(), pCameraFilename);
stream.write(pBuf, streamsize(Size));
stream.close();
delete[] pBuf;
}
// Check the success of 'CreateShadingData' and 'UploadFile' by
// - activating and enabling the uploaded shading data file
// - grabbing one image
// - calculating the multipliers again, expecting them to be close to 1.0
void CheckShadingData(Camera_t& camera)
{
uint32_t Width = (uint32_t)camera.Width.GetValue();
uint32_t Height = (uint32_t)camera.Height.GetValue();
int32_t BytesPerPixel = 1;
if (SupportsRGB(camera))
{
BytesPerPixel = 3;
}
//
// Activate and enable the gain shading set that was just uploaded.
//
camera.ShadingSelector.SetValue(ShadingSelector_GainShading);
camera.ShadingSetSelector.SetValue(ShadingSet);
camera.ShadingSetActivate.Execute();
camera.ShadingEnable.SetValue(true);
//
// Grab image and calculate multipliers just to print the new Max/Min ratio.
//
uint32_t NumCoeffs = BytesPerPixel * Width;
double *pDblCoeff = new double[NumCoeffs];
AverageLines( camera,
Width,
Height,
NumCoeffs,
pDblCoeff);
cout << endl << "After applying shading correction:" << endl;
CalculateCoeffs(Width, Height, NumCoeffs, pDblCoeff);
delete[] pDblCoeff;
}
// Check whether camera supports RGB pixel formats.
bool SupportsRGB(Camera_t& camera)
{
camera.PixelFormat.GetEntries(Entries);
bool Result = false;
for (size_t i = 0; i < Entries.size(); i++)
{
GenApi::INode *pNode = Entries[i];
if (IsAvailable(pNode->GetAccessMode()))
{
GenApi::IEnumEntry *pEnum = dynamic_cast<GenApi::IEnumEntry *>(pNode);
const GenICam::gcstring sym(pEnum->GetSymbolic());
if (sym.find(GenICam::gcstring("RGB")) != string::npos)
{
Result = true;
break;
}
}
}
return Result;
}

ParametrizeCamera_UserSets

// ParametrizeCamera_UserSets.cpp
/*
Note: Before getting started, Basler recommends reading the Programmer's Guide topic
in the pylon C++ API documentation that gets installed with pylon.
If you are upgrading to a higher major version of pylon, Basler also
strongly recommends reading the Migration topic in the pylon C++ API documentation.
Demonstrates how to use user configuration sets (user sets) and how to configure the camera
to start up with the user defined settings of user set 1.
You can also configure your camera using the pylon Viewer and
store your custom settings in a user set of your choice.
ATTENTION:
Executing this sample will overwrite all current settings in user set 1.
*/
// Include files to use the PYLON API.
// Namespace for using pylon objects.
using namespace Pylon;
#if defined( USE_1394 )
// Setting for using Basler IEEE 1394 cameras.
#include <pylon/1394/Basler1394InstantCamera.h>
typedef Pylon::CBasler1394InstantCamera Camera_t;
using namespace Basler_IIDC1394CameraParams;
#elif defined ( USE_GIGE )
// Setting for using Basler GigE cameras.
using namespace Basler_GigECameraParams;
#elif defined ( USE_CAMERALINK )
// Setting for using Basler Camera Link cameras.
#include <pylon/cameralink/BaslerCameraLinkInstantCamera.h>
typedef Pylon::CBaslerCameraLinkInstantCamera Camera_t;
using namespace Basler_CLCameraParams;
#elif defined ( USE_USB )
// Setting for using Basler USB cameras.
using namespace Basler_UsbCameraParams;
#elif defined ( USE_BCON )
// Settings for using Basler BCON cameras.
#include <pylon/bcon/BaslerBconInstantCamera.h>
typedef Pylon::CBaslerBconInstantCamera Camera_t;
using namespace Basler_BconCameraParams;
#else
#error Camera type is not specified. For example, define USE_GIGE for using GigE cameras.
#endif
// Namespace for using cout.
using namespace std;
int main(int argc, char* argv[])
{
// The exit code of the sample application.
int exitCode = 0;
// Before using any pylon methods, the pylon runtime must be initialized.
try
{
// Only look for cameras supported by Camera_t
info.SetDeviceClass( Camera_t::DeviceClass());
// Create an instant camera object with the first found camera device matching the specified device class.
Camera_t camera( CTlFactory::GetInstance().CreateFirstDevice( info));
// Print the model name of the camera.
cout << "Using device " << camera.GetDeviceInfo().GetModelName() << endl;
// Open the camera.
camera.Open();
// Remember the current default user set selector so we can restore it later when cleaning up.
#if defined( USE_USB ) || defined( USE_BCON )
UserSetDefaultEnums oldDefaultUserSet = camera.UserSetDefault.GetValue();
#else
UserSetDefaultSelectorEnums oldDefaultUserSet = camera.UserSetDefaultSelector.GetValue();
#endif
// Load default settings.
cout << "Loading default settings" << endl;
camera.UserSetSelector.SetValue(UserSetSelector_Default);
camera.UserSetLoad.Execute();
// Set gain and exposure time values.
// The camera won't let you set specific values when related auto functions are active.
// So we need to disable the related auto functions before setting the values.
cout << "Turning off Gain Auto and Exposure Auto." << endl;
#if defined( USE_USB ) || defined( USE_BCON )
camera.GainAuto.SetValue(GainAuto_Off);
camera.Gain.SetValue(camera.Gain.GetMin());
camera.ExposureAuto.SetValue(ExposureAuto_Off);
camera.ExposureTime.SetValue(camera.ExposureTime.GetMin());
#else
camera.GainAuto.SetValue(GainAuto_Off);
camera.GainRaw.SetValue(camera.GainRaw.GetMin());
camera.ExposureAuto.SetValue(ExposureAuto_Off);
camera.ExposureTimeRaw.SetValue(camera.ExposureTimeRaw.GetMin());
#endif
// Save to user set 1.
//
// ATTENTION:
// This will overwrite all settings previously saved in user set 1.
cout << "Saving currently active settings to user set 1." << endl;
camera.UserSetSelector.SetValue(UserSetSelector_UserSet1);
camera.UserSetSave.Execute();
// Show default settings.
cout << endl << "Loading default settings." << endl;
camera.UserSetSelector.SetValue(UserSetSelector_Default);
camera.UserSetLoad.Execute();
cout << "Default settings" << endl;
cout << "================" << endl;
#if defined( USE_USB ) || defined( USE_BCON )
cout << "Gain : " << camera.Gain.GetValue() << endl;
cout << "Exposure time : " << camera.ExposureTime.GetValue() << endl;
#else
cout << "Gain : " << camera.GainRaw.GetValue() << endl;
cout << "Exposure time : " << camera.ExposureTimeRaw.GetValue() << endl;
#endif
// Show user set 1 settings.
cout << endl << "Loading user set 1 settings." << endl;
camera.UserSetSelector.SetValue(UserSetSelector_UserSet1);
camera.UserSetLoad.Execute();
cout << "User set 1 settings" << endl;
cout << "===================" << endl;
#if defined( USE_USB ) || defined( USE_BCON )
cout << "Gain : " << camera.Gain.GetValue() << endl;
cout << "Exposure time : " << camera.ExposureTime.GetValue() << endl;
#else
cout << "Gain : " << camera.GainRaw.GetValue() << endl;
cout << "Exposure time : " << camera.ExposureTimeRaw.GetValue() << endl;
#endif
#if defined( USE_USB ) || defined( USE_BCON )
// Set user set 1 as default user set:
// When the camera wakes up it will be configured
// with the settings from user set 1.
camera.UserSetDefault.SetValue(UserSetDefault_UserSet1);
// Restore the default user set selector.
camera.UserSetDefault.SetValue(oldDefaultUserSet);
#else
// Set user set 1 as default user set:
// When the camera wakes up it will be configured
// with the settings from user set 1.
camera.UserSetDefaultSelector.SetValue(UserSetDefaultSelector_UserSet1);
// Restore the default user set selector.
camera.UserSetDefaultSelector.SetValue(oldDefaultUserSet);
#endif
// Close the camera.
camera.Close();
}
catch (const GenericException &e)
{
// Error handling.
cerr << "An exception occurred." << endl
<< e.GetDescription() << endl;
exitCode = 1;
}
// Comment the following two lines to disable waiting on exit.
cerr << endl << "Press Enter to exit." << endl;
while( cin.get() != '\n');
// Releases all pylon resources.
return exitCode;
}

Utility_Image

// Utility_Image.cpp
/*
Note: Before getting started, Basler recommends reading the Programmer's Guide topic
in the pylon C++ API documentation that gets installed with pylon.
If you are upgrading to a higher major version of pylon, Basler also
strongly recommends reading the Migration topic in the pylon C++ API documentation.
This sample illustrates how to use the pylon image classes CPylonImage and CPylonBitmapImage.
CPylonImage supports handling image buffers of the various existing pixel types.
CPylonBitmapImage can be used to easily create Windows bitmaps for displaying images.
Additionally, there are two image class related interfaces in pylon IImage and IReusableImage.
IImage can be used to access image properties and image buffer.
The IReusableImage interface extends the IImage interface to be able to reuse the
resources of the image to represent a different image.
Both CPylonImage and CPylonBitmapImage implement the IReusableImage interface.
The pylon grab result class CGrabResultPtr provides a cast operator to the IImage
interface. This eases the use of the grab result together with the image classes.
*/
#include <iomanip>
// Include files to use the PYLON API.
#ifdef PYLON_WIN_BUILD
# include <pylon/PylonGUI.h>
#endif
#include "../include/SampleImageCreator.h"
// Namespace for using pylon objects.
using namespace Pylon;
using namespace GenApi;
// Namespace for using cout.
using namespace std;
// This is a helper function for printing image properties.
void PrintImageProperties( IImage& image)
{
cout
<< "Buffer: " << image.GetBuffer()
<< " Image Size: " << image.GetImageSize()
<< " Width: " << image.GetWidth()
<< " Height: " << image.GetHeight()
<< " Unique: " << image.IsUnique()
<< endl;
}
// This is a helper function for showing an image on the screen if Windows is used,
// and for printing the first bytes of the image.
void ShowImage( IImage& image, const char* message = NULL)
{
#ifdef PYLON_WIN_BUILD
// Display the image.
Pylon::DisplayImage(1, image);
#endif
if ( message)
{
cout << endl << message << " ";
}
// store state of cout
std::ios state(NULL);
state.copyfmt(cout);
const uint8_t* pBytes = reinterpret_cast<const uint8_t*>(image.GetBuffer());
cout << endl << "First six bytes of the image: " << endl;
for (unsigned int i = 0; i < 6; ++i)
{
cout << "0x" << hex << setfill('0') << setw(2) << unsigned(pBytes[i]) << " ";
}
cout << endl;
// restore state of cout
cout.copyfmt(state);
cerr << "Press Enter to continue." << endl;
while( cin.get() != '\n');
}
int main(int argc, char* argv[])
{
// The exit code of the sample application.
int exitCode = 0;
// Before using any pylon methods, the pylon runtime must be initialized.
try
{
// Define some constants.
const uint32_t cWidth = 640;
const uint32_t cHeight = 480;
const uint32_t cPadding = 10;
const uint8_t cSampleGrayValue = 160;
// The CPylonImage basics.
{
// Create a pylon image with the given properties.
CPylonImage imageMono8( CPylonImage::Create( PixelType_Mono8, cWidth, cHeight));
cout << "The properties of the newly created image." << endl;
PrintImageProperties( imageMono8);
// The image class allocates a buffer large enough to hold the image.
// We can use it for example to fill it with a test pattern.
uint32_t width = imageMono8.GetWidth();
uint32_t height = imageMono8.GetHeight();
uint8_t* buffer = static_cast<uint8_t*>( imageMono8.GetBuffer());
uint8_t* p = buffer;
for (uint32_t y = 0; y < height; ++y)
{
for (uint32_t x = 0; x < width; ++x, ++p)
{
p = (uint8_t)((x + y) % 256);
}
}
// Show the image on the screen in a separate window.
ShowImage( imageMono8, "Created image.");
// If the pylon image object is copied or assigned then no image data copy is made.
// All objects reference the same buffer now. The image properties have been copied though.
// The IsUnique() method can be used to check whether a buffer is
// referenced by multiple pylon image objects.
CPylonImage sameImageMono8A( imageMono8);
CPylonImage sameImageMono8B = imageMono8;
cout << endl << "After assignment multiple images reference the same data." << endl;
PrintImageProperties( imageMono8);
PrintImageProperties( sameImageMono8A);
PrintImageProperties( sameImageMono8B);
// The CopyImage method can be used to create a full copy of an image.
CPylonImage copiedImage;
copiedImage.CopyImage( imageMono8);
cout << endl << "The properties of a full copy of the test image." << endl;
PrintImageProperties( copiedImage);
// The Release() method can be used to release any data.
// The object sameImageMono8B is now empty.
// No buffer is allocated.
sameImageMono8B.Release();
cout << endl << "Assigned to image object after releasing the image data." << endl;
PrintImageProperties( sameImageMono8B);
// A newly created image object is empty.
CPylonImage reusedImage;
cout << endl << "A newly created image object." << endl;
PrintImageProperties( reusedImage);
// The Reset() method can be used to reset the image properties
// and allocate a new buffer if required.
reusedImage.Reset( PixelType_Mono8, cWidth, cHeight);
cout << "After resetting the image properties. A new Buffer is allocated." << endl;
PrintImageProperties( reusedImage);
// Reset() never decreases the allocated buffer size if the
// new image fits into the current buffer.
// The new image is smaller and therefore the buffer is reused.
reusedImage.Reset( PixelType_Mono8, cWidth / 2, cHeight);
cout << "After resetting the image properties to a smaller image. The buffer is reused." << endl;
PrintImageProperties( reusedImage);
// A new buffer is allocated because the old buffer is
// too small for the new image.
reusedImage.Reset( PixelType_Mono8, cWidth * 2, cHeight);
cout << "After resetting the image properties to a larger image." << endl << "A new Buffer is allocated." << endl;
PrintImageProperties( reusedImage);
// The imageMono8 and sameImageMono8A objects still reference the
// same image. Because of this the buffer referenced by sameImageMono8A
// cannot be reused. A new buffer is allocated.
sameImageMono8A.Reset( PixelType_Mono8, cWidth, cHeight);
cout << endl << "After resetting the image properties while the image data is referenced by another image. A new Buffer is allocated." << endl;
PrintImageProperties( sameImageMono8A);
// For advanced use cases additional line padding and the image orientation can be defined, too.
sameImageMono8A.Reset( PixelType_Mono8, cWidth, cHeight, cPadding, ImageOrientation_TopDown);
cout << endl << "After resetting the image properties with additional padding." << endl;
PrintImageProperties( sameImageMono8A);
// The image objects are destroyed here and the buffers are deleted.
// An allocated image buffer is deleted if it is not referenced
// anymore by a pylon image object.
}
// The CPylonImage and user buffers.
{
// Create pylon images.
CPylonImage imageA;
CPylonImage imageB;
// Create a buffer for demonstration purposes. This could be a buffer of a 3rd party
// image library.
// This example uses a C++ library vector class for buffer allocation for automatic
// deletion of the buffer.
vector<uint8_t> buffer( (cWidth + cPadding) * cHeight, cSampleGrayValue);
size_t bufferSize = buffer.size() * sizeof( buffer[ 0 ]);
// Initializes the image object with the user buffer. Now the image object could be used to
// interface with other pylon objects more easily, e.g. the image format converter.
// The user buffer must not be deleted while it is attached to the pylon image object.
imageA.AttachUserBuffer( &buffer[ 0 ], bufferSize, PixelType_Mono8, cWidth, cHeight, cPadding);
cout << endl << "The properties of an image with an attached user buffer." << endl;
PrintImageProperties( imageA);
// The image can be assigned new properties as long as the image fits into the user buffer.
imageA.Reset( PixelType_Mono8, cWidth / 2, cHeight);
cout << "After resetting the image properties to a smaller image. The buffer is reused." << endl;
PrintImageProperties( imageA);
// This causes an exception because the attached user buffer is too small for the image.
try
{
cout << "Try calling the Reset method when the user buffer is too small for the new image." << endl;
imageA.Reset( PixelType_Mono8, cWidth * 2, cHeight);
}
catch (const GenericException &e)
{
cerr << "Expected exception: " << e.GetDescription() << endl;
}
// The CopyImage method can be used to create a full copy of the provided image.
imageB.CopyImage( &buffer[ 0 ], bufferSize, PixelType_Mono8, cWidth, cHeight, cPadding);
cout << endl << "The properties of an image after a full copy of a user buffer." << endl;
PrintImageProperties( imageB);
// The image objects are destroyed. The user must take care of the deletion of the user buffer.
}
// The CPylonImage and grab results.
{
// This smart pointer will receive the grab result data.
CGrabResultPtr ptrGrabResult;
// Try to get a grab result.
cout << endl << "Waiting for an image to be grabbed." << endl;
try
{
CInstantCamera Camera( CTlFactory::GetInstance().CreateFirstDevice());
Camera.GrabOne( 1000, ptrGrabResult);
}
catch (const GenericException &e)
{
cerr << "Could not grab an image: " << endl
<< e.GetDescription() << endl;
}
if ( ptrGrabResult && ptrGrabResult->GrabSucceeded())
{
// Create a pylon image.
CPylonImage image;
// A pylon grab result class CGrabResultPtr provides a cast operator to IImage.
// That's why it can be used like an image, e.g. to print its properties or
// to show it on the screen.
cout << endl << "The properties of the grabbed image." << endl;
PrintImageProperties( ptrGrabResult);
ShowImage( ptrGrabResult, "Grabbed image.");
// Initializes the image object with the buffer from the grab result.
// This prevents the reuse of the buffer for grabbing as long as it is
// not released.
// Please note that this is not relevant for this example because the
// camera object has been destroyed already.
image.AttachGrabResultBuffer( ptrGrabResult);
cout << endl << "The properties of an image with an attached grab result." << endl;
PrintImageProperties( image);
// Get the grab result image properties for later use.
EPixelType pixelType = ptrGrabResult->GetPixelType();
uint32_t width = ptrGrabResult->GetWidth();
uint32_t height = ptrGrabResult->GetHeight();
// Now the grab result can be released. The grab result buffer is now
// only held by the pylon image.
ptrGrabResult.Release();
cout << "After the grab result has been released." << endl;
PrintImageProperties( image);
// If a grab result is referenced then always a new buffer is allocated on reset.
image.Reset( pixelType, width / 2, height);
cout << endl << "After resetting the image properties while a grab result is referenced. A new Buffer is allocated." << endl;
PrintImageProperties( image);
}
}
// Loading and saving.
// Please note that this is only a brief overview. Please look at the
// Utility_ImageLoadAndSave sample for more information.
{
// Create pylon images.
CPylonImage imageSaved;
CPylonImage imageLoaded;
// Create a sample image.
imageSaved = SampleImageCreator::CreateJuliaFractal( PixelType_RGB8packed, cWidth, cHeight);
#ifdef PYLON_WIN_BUILD
// Save the image. The image is automatically converted to
// a format that can be saved if needed.
imageSaved.Save( ImageFileFormat_Bmp, "JuliaFractal.bmp");
#endif
#ifdef PYLON_WIN_BUILD
// Load the image.
imageLoaded.Load( "JuliaFractal.bmp");
cout << endl << "The properties of the loaded sample image." << endl;
PrintImageProperties( imageLoaded);
ShowImage( imageLoaded, "The loaded sample image is shown.");
#endif
}
// The GetAOI method.
// This method can be used to create partial images derived from an image, e.g. thumbnail images for displaying
// defects.
{
// Create pylon images.
CPylonImage sampleImage;
CPylonImage aoiFromAoi;
// Create a sample image.
sampleImage = SampleImageCreator::CreateJuliaFractal( PixelType_RGB8packed, cWidth, cHeight);
cout << endl << "The properties of the sample image." << endl;
PrintImageProperties( sampleImage);
// Compute the coordinates of the area of interest.
uint32_t topLeftX = cWidth / 4;
uint32_t topLeftY = cHeight /2;
uint32_t width = cWidth / 4;
uint32_t height = cHeight / 4;
// Create a new pylon image containing the AOI.
// No image data is copied. The same image buffer is referenced.
// The buffer start is now the first pixel of the and the
// padding property of the pylon image object is used to skip over the
// part of a line outside of the AOI.
aoi = sampleImage.GetAoi( topLeftX, topLeftY, width, height);
cout << "After creating an AOI." << endl;
PrintImageProperties( aoi);
ShowImage( aoi, "AOI of the sample image.");
// CopyImage( const IImage& image, size_t newPaddingX) can be used to create a
// full copy and to remove the additional padding.
CPylonImage copiedAoi;
copiedAoi.CopyImage( aoi, 0);
cout << "The properties of a full copy of the AOI image." << endl;
PrintImageProperties( copiedAoi);
// GetAOI can be applied again for the AOI image.
topLeftX = width / 4;
topLeftY = height / 4;
width = width / 2;
height = height / 2;
aoiFromAoi = aoi.GetAoi( topLeftX, topLeftY, width, height);
// An AOI image is still valid if the source image object has been destroyed
// or the image data has been released.
aoi.Release();
sampleImage.Release();
// Show the image.
cout << "After creating an AOI of an AOI." << endl;
PrintImageProperties( aoiFromAoi);
ShowImage( aoiFromAoi, "AOI of the AOI of the sample image.");
// The AOI image still references the buffer of the source image.
// It is the only object that references this buffer.
// That's why the full buffer can be reused if needed.
aoiFromAoi.Reset( PixelType_Mono8, cWidth, cHeight);
cout << "After reusing the buffer of the sample image." << endl;
PrintImageProperties( aoiFromAoi);
}
// The GetPlane method.
// This method can be used to work with the planes of
// an planar image.
{
// Create an image object.
CPylonImage imageRGB8planar;
// Create a sample image.
imageRGB8planar = SampleImageCreator::CreateMandelbrotFractal( PixelType_RGB8planar, cWidth, cHeight);
ShowImage( imageRGB8planar, "Sample image.");
// Create images to access the planes of the planar image.
// No image data is copied. The same image buffer is referenced.
// The buffer start is the start of the plane and the pixel type
// set to the corresponding pixel type of a plane.
CPylonImage redPlane = imageRGB8planar.GetPlane(0);
CPylonImage greenPlane = imageRGB8planar.GetPlane(1);
CPylonImage bluePlane = imageRGB8planar.GetPlane(2);
// Show the planes.
ShowImage( redPlane, "Red plane of the sample image.");
ShowImage( greenPlane, "Green plane of the sample image.");
ShowImage( bluePlane, "Blue plane of the sample image.");
// Now a plane can be modified. Here the red plane is set to zero.
memset( redPlane.GetBuffer(), 0, greenPlane.GetImageSize());
// Show the image.
ShowImage( imageRGB8planar, "Sample image with red set to zero.");
}
// The CPylonBitmapImage class.
// This class can be used to easily create Windows bitmaps, e.g. for displaying.
{
#ifdef PYLON_WIN_BUILD
// Create a bitmap image
CPylonBitmapImage bitmapImage;
// Create a sample image.
CPylonImage sampleImage;
sampleImage = SampleImageCreator::CreateJuliaFractal( PixelType_RGB8packed, cWidth, cHeight);
// The bitmap image class automatically converts input images to the
// corresponding bitmap format.
bitmapImage.CopyImage( sampleImage);
cout << endl << "The properties of the bitmap image." << endl;
PrintImageProperties( bitmapImage);
ShowImage( bitmapImage, "The sample image is shown.");
// If the pylon bitmap image object is copied or assigned then no image data copy is made.
// All objects reference the same Windows bitmap now.
// The IsUnique() method can be used to check whether the Windows bitmap is
// referenced by multiple pylon image objects.
CPylonBitmapImage sameBitmapImageA( bitmapImage);
CPylonBitmapImage sameBitmapImageB = bitmapImage;
cout << endl << "After assignment multiple images reference the same data." << endl;
PrintImageProperties( bitmapImage);
PrintImageProperties( sameBitmapImageA);
PrintImageProperties( sameBitmapImageB);
// The Release() method can be used to release any data.
// The object sameBitmapImageB is now empty.
// No bitmap is allocated.
sameBitmapImageB.Release();
cout << endl << "Assigned to image object after releasing the image data." << endl;
PrintImageProperties( sameBitmapImageB);
// The image format converter can be used to have more control over the conversion.
// In this example a monochrome version of a sample image is created.
// See the Utility_ImageFormatConverter sample for more details.
converter.Convert( bitmapImage, sampleImage);
// Show the image.
cout << endl << "The properties of the converted bitmap image." << endl;
PrintImageProperties( bitmapImage);
ShowImage( bitmapImage, "The to monochrome converted sample image is shown.");
// Reset can be used to reuse the underlying windows bitmap if
// the new image properties are equal to the old ones.
// No additional program logic is needed for reusing a bitmap
// until new image properties are required.
bitmapImage.Reset( PixelType_Mono8, cWidth, cHeight);
cout << endl << "The properties of the reused bitmap image with equal properties." << endl;
PrintImageProperties( bitmapImage);
// Now the new image properties are different. A new Windows
// bitmap is created.
bitmapImage.Reset( PixelType_Mono8, cWidth / 2, cHeight);
cout << endl << "The properties of the newly allocated bitmap image with different properties." << endl;
PrintImageProperties( bitmapImage);
// The bitmap image class provides a cast operator for HBitmap.
// The cast operator can be used for instance to provide the handle to Windows API functions.
HBITMAP bitmap = bitmapImage;
// The bitmap can also be detached to use it without the pylon image object.
bitmap = bitmapImage.Detach();
// The pylon bitmap image is now empty.
cout << endl << "The image object after detaching the image data." << endl;
PrintImageProperties( bitmapImage);
// After detaching the bitmap must be deleted by the user.
::DeleteObject( bitmap);
#endif
}
}
catch (const GenericException &e)
{
// Error handling.
cerr << "An exception occurred." << endl
<< e.GetDescription() << endl;
exitCode = 1;
}
// Comment the following two lines to disable waiting on exit.
cerr << endl << "Press Enter to exit." << endl;
while( cin.get() != '\n');
// Releases all pylon resources.
return exitCode;
}

Utility_ImageFormatConverter

// Utility_ImageLoadAndSave.cpp
/*
Note: Before getting started, Basler recommends reading the Programmer's Guide topic
in the pylon C++ API documentation that gets installed with pylon.
If you are upgrading to a higher major version of pylon, Basler also
strongly recommends reading the Migration topic in the pylon C++ API documentation.
This sample illustrates how to use the image format
converter class CImageFormatConverter.
The image format converter accepts all image formats
produced by Basler camera devices and it is able to
convert these to a number of output formats.
The conversion can be controlled by several parameters.
See the converter class documentation for more details.
*/
#include <iomanip>
// Include files to use the PYLON API.
#ifdef PYLON_WIN_BUILD
# include <pylon/PylonGUI.h>
#endif
#include "../include/SampleImageCreator.h"
// Namespace for using pylon objects.
using namespace Pylon;
using namespace GenApi;
// Namespace for using cout.
using namespace std;
// This is a helper function for showing an image on the screen if Windows is used,
// and for printing the first bytes of the image.
void ShowImage( IImage& image, const char* message = NULL)
{
#ifdef PYLON_WIN_BUILD
// Display the image.
Pylon::DisplayImage(1, image);
#endif
if ( message)
{
cout << endl << message << " ";
}
// store state of cout
std::ios state(NULL);
state.copyfmt(cout);
const uint8_t* pBytes = reinterpret_cast<const uint8_t*>(image.GetBuffer());
cout << endl << "First six bytes of the image: " << endl;
for (unsigned int i = 0; i < 6; ++i)
{
cout << "0x" << hex << setfill('0') << setw(2) << unsigned(pBytes[i]) << " ";
}
cout << endl;
// restore state of cout
cout.copyfmt(state);
cerr << "Press Enter to continue." << endl;
while( cin.get() != '\n');
}
int main(int argc, char* argv[])
{
// The exit code of the sample application.
int exitCode = 0;
// Before using any pylon methods, the pylon runtime must be initialized.
try
{
// Define some constants.
const uint32_t cWidth = 640;
const uint32_t cHeight = 480;
// The image format converter basics.
{
// First the image format converter class must be created.
// Second the converter must be parameterized.
// Then it can be used to convert input images to
// the target image format.
// Create a sample image.
CPylonImage imageRGB8packed = SampleImageCreator::CreateMandelbrotFractal( PixelType_RGB8packed, cWidth, cHeight);
ShowImage( imageRGB8packed, "Source image.");
// Create a target image
CPylonImage targetImage;
// Convert the image. Note that there are more overloaded Convert methods available, e.g.
// for converting the image from or to a user buffer.
converter.Convert( targetImage, imageRGB8packed);
ShowImage( targetImage, "Converted image.");
}
// Checking if conversion is needed.
{
// Create a target image.
CPylonImage targetImage;
// Create the converter and set parameters.
// Try to get a grab result for demonstration purposes.
cout << endl << "Waiting for an image to be grabbed." << endl;
try
{
// This smart pointer will receive the grab result data.
CGrabResultPtr ptrGrabResult;
CInstantCamera Camera( CTlFactory::GetInstance().CreateFirstDevice());
if ( Camera.GrabOne( 1000, ptrGrabResult))
{
// Now we can check if conversion is required.
if ( converter.ImageHasDestinationFormat( ptrGrabResult))
{
// No conversion is needed. It can be skipped for saving processing
// time.
ShowImage( ptrGrabResult, "Grabbed image.");
}
else
{
// Conversion is needed.
ShowImage( ptrGrabResult, "Grabbed image.");
converter.Convert( targetImage, ptrGrabResult);
ShowImage( targetImage, "Converted image.");
}
}
}
catch (const GenericException &e)
{
cerr << "Could not grab an image: " << endl
<< e.GetDescription() << endl;
}
}
}
catch (const GenericException &e)
{
// Error handling.
cerr << "An exception occurred." << endl
<< e.GetDescription() << endl;
exitCode = 1;
}
// Comment the following two lines to disable waiting on exit.
cerr << endl << "Press Enter to exit." << endl;
while( cin.get() != '\n');
// Releases all pylon resources.
return exitCode;
}

Utility_ImageLoadAndSave

// Utility_ImageLoadAndSave.cpp
/*
Note: Before getting started, Basler recommends reading the Programmer's Guide topic
in the pylon C++ API documentation that gets installed with pylon.
If you are upgrading to a higher major version of pylon, Basler also
strongly recommends reading the Migration topic in the pylon C++ API documentation.
This sample illustrates how to load and save images.
The CImagePersistence class provides static functions for
loading and saving images. It uses the image
class related interfaces IImage and IReusableImage of pylon.
IImage can be used to access image properties and image buffer.
Therefore, it is used when saving images. In addition to that images can also
be saved by passing an image buffer and the corresponding properties.
The IReusableImage interface extends the IImage interface to be able to reuse
the resources of the image to represent a different image. The IReusableImage
interface is used when loading images.
The CPylonImage and CPylonBitmapImage image classes implement the
IReusableImage interface. These classes can therefore be used as targets
for loading images.
The gab result smart pointer classes provide a cast operator to the IImage
interface. This makes it possible to pass a grab result directly to the
function that saves images to disk.
*/
// Include files to use the PYLON API.
#include "../include/SampleImageCreator.h"
// Namespace for using pylon objects.
using namespace Pylon;
using namespace GenApi;
// Namespace for using cout.
using namespace std;
int main(int argc, char* argv[])
{
// The exit code of the sample application.
int exitCode = 0;
// Before using any pylon methods, the pylon runtime must be initialized.
try
{
// Define some constants.
const uint32_t cWidth = 640;
const uint32_t cHeight = 480;
// Saving images using the CImagePersistence class.
{
// Create a sample image.
CPylonImage imageRGB16packed = SampleImageCreator::CreateMandelbrotFractal( PixelType_RGB16packed, cWidth, cHeight);
// If required the image is automatically converted to a new image and then saved.
// An image with a bit depth higher than 8 Bit is stored with 16 Bit bit depth
// if supported by the image file format. In this case the pixel data is MSB aligned.
// If more control over the conversion is required then the CImageFormatConverter class
// can be used to convert the input image before saving it (not shown).
CImagePersistence::Save( ImageFileFormat_Tiff, "MandelbrotFractal.tiff", imageRGB16packed);
cout << "The image " << (CImagePersistence::CanSaveWithoutConversion( ImageFileFormat_Tiff, imageRGB16packed) ? "can" : "can not")
<< " be saved without conversion as tiff." << endl;
#ifdef PYLON_WIN_BUILD
// The CPylonImage and the CPylonBitmapImage classes provide a member function
// for saving images for convenience. This function calls CImagePersistence::Save().
imageRGB16packed.Save( ImageFileFormat_Bmp, "MandelbrotFractal.bmp");
// CanSaveWithoutConversion() can be used to check whether a conversion is performed when saving the image.
cout << "The image " << (CImagePersistence::CanSaveWithoutConversion( ImageFileFormat_Bmp, imageRGB16packed) ? "can" : "can not")
<< " be saved without conversion as bmp." << endl;
#endif
// Additionally it is possible to save image data that is not held by an image class.
// For demonstration purposes only, the buffer and the image properties from the sample image are used here.
EPixelType pixelType = imageRGB16packed.GetPixelType();
uint32_t width = imageRGB16packed.GetWidth();
uint32_t height = imageRGB16packed.GetHeight();
size_t paddingX = imageRGB16packed.GetPaddingX();
EImageOrientation orientation = imageRGB16packed.GetOrientation();
size_t bufferSize = imageRGB16packed.GetImageSize();
void* buffer = imageRGB16packed.GetBuffer();
"MandelbrotFractal.png",
buffer,
bufferSize,
pixelType,
width,
height,
paddingX,
orientation);
}
// Loading images.
{
// Create pylon images.
CPylonImage imageRGB16packedFromTiff;
CPylonImage imageBGR8packedFromBmp;
// Load the tiff image directly via the ImageFile interface.
CImagePersistence::Load( "MandelbrotFractal.tiff", imageRGB16packedFromTiff);
cout << "The pixel type of the image is " << (imageRGB16packedFromTiff.GetPixelType() == PixelType_RGB16packed ? "" : "not ")
<< "RGB16packed." << endl;
#ifdef PYLON_WIN_BUILD
// The CPylonImage and the CPylonBitmapImage classes provide a member function
// for loading images for convenience. This function calls CImagePersistence::Load().
imageBGR8packedFromBmp.Load( "MandelbrotFractal.bmp");
// The format of the loaded image from the bmp file is BGR8packed instead of the original RGB16packed format because
// it had to be converted for saving it in the bmp format.
cout << "The pixel type of the image is " << (imageBGR8packedFromBmp.GetPixelType() == PixelType_BGR8packed ? "" : "not ")
<< "BGR8packed." << endl;
#endif
}
//JPEG handling is only supported on windows
#ifdef PYLON_WIN_BUILD
// Selecting the image quality when saving in JPEG format.
{
// Create a sample image.
CPylonImage imageRGB8packed = SampleImageCreator::CreateMandelbrotFractal( PixelType_RGB8packed, cWidth, cHeight);
// The JPEG image quality can be adjusted in the range from 0 to 100.
CImagePersistenceOptions additionalOptions;
// Set the lowest quality value.
additionalOptions.SetQuality(0);
// Save the image.
CImagePersistence::Save( ImageFileFormat_Jpeg, "MandelbrotFractal_0.jpg", imageRGB8packed, &additionalOptions);
// Set the highest quality value.
additionalOptions.SetQuality(100);
// Save the image.
CImagePersistence::Save( ImageFileFormat_Jpeg, "MandelbrotFractal_100.jpg", imageRGB8packed, &additionalOptions);
}
#endif
// Saving grabbed images.
{
// Try to get a grab result.
cout << endl << "Waiting for an image to be grabbed." << endl;
try
{
// This smart pointer will receive the grab result data.
CGrabResultPtr ptrGrabResult;
CInstantCamera Camera( CTlFactory::GetInstance().CreateFirstDevice());
if ( Camera.GrabOne( 1000, ptrGrabResult))
{
// The pylon grab result smart pointer classes provide a cast operator to the IImage
// interface. This makes it possible to pass a grab result directly to the
// function that saves an image to disk.
CImagePersistence::Save( ImageFileFormat_Png, "GrabbedImage.png", ptrGrabResult);
}
}
catch (const GenericException &e)
{
cerr << "Could not grab an image: " << endl
<< e.GetDescription() << endl;
}
}
}
catch (const GenericException &e)
{
// Error handling.
cerr << "An exception occurred." << endl
<< e.GetDescription() << endl;
exitCode = 1;
}
// Comment the following two lines to disable waiting on exit.
cerr << endl << "Press Enter to exit." << endl;
while( cin.get() != '\n');
// Releases all pylon resources.
return exitCode;
}

pylon 5.0.9
Copyright © 2006-2017 Basler AG (Tue May 30 2017 17:24:57)