// python wrapper for vtkVideoSource // #define VTK_WRAPPING_CXX #define VTK_STREAMS_FWD_ONLY #include "vtkPythonArgs.h" #include "vtkPythonOverload.h" #include "vtkConfigure.h" #include #include #include "vtkVariant.h" #include "vtkIndent.h" #include "vtkVideoSource.h" extern "C" { VTK_ABI_EXPORT void PyVTKAddFile_vtkVideoSource(PyObject *); } extern "C" { VTK_ABI_EXPORT PyObject *PyvtkVideoSource_ClassNew(); } #ifndef DECLARED_PyvtkImageAlgorithm_ClassNew extern "C" { PyObject *PyvtkImageAlgorithm_ClassNew(); } #define DECLARED_PyvtkImageAlgorithm_ClassNew #endif static const char *PyvtkVideoSource_Doc = "vtkVideoSource - Superclass of video input devices for VTK\n\n" "Superclass: vtkImageAlgorithm\n\n" "vtkVideoSource is a superclass for video input interfaces for VTK.\n" "The goal is to provide an interface which is very similar to the\n" "interface of a VCR, where the 'tape' is an internal frame buffer\n" "capable of holding a preset number of video frames. Specialized\n" "versions of this class record input from various video input sources.\n" "This base class records input from a noise source.\n" "@warning\n" "You must call the ReleaseSystemResources() method before the\n" "application exits. Otherwise the application might hang while trying\n" "to exit.\n" "@sa\n" "vtkWin32VideoSource vtkMILVideoSource\n\n"; static PyObject * PyvtkVideoSource_IsTypeOf(PyObject *, PyObject *args) { vtkPythonArgs ap(args, "IsTypeOf"); char *temp0 = nullptr; PyObject *result = nullptr; if (ap.CheckArgCount(1) && ap.GetValue(temp0)) { int tempr = vtkVideoSource::IsTypeOf(temp0); if (!ap.ErrorOccurred()) { result = ap.BuildValue(tempr); } } return result; } static PyObject * PyvtkVideoSource_IsA(PyObject *self, PyObject *args) { vtkPythonArgs ap(self, args, "IsA"); vtkObjectBase *vp = ap.GetSelfPointer(self, args); vtkVideoSource *op = static_cast(vp); char *temp0 = nullptr; PyObject *result = nullptr; if (op && ap.CheckArgCount(1) && ap.GetValue(temp0)) { int tempr = (ap.IsBound() ? op->IsA(temp0) : op->vtkVideoSource::IsA(temp0)); if (!ap.ErrorOccurred()) { result = ap.BuildValue(tempr); } } return result; } static PyObject * PyvtkVideoSource_SafeDownCast(PyObject *, PyObject *args) { vtkPythonArgs ap(args, "SafeDownCast"); vtkObjectBase *temp0 = nullptr; PyObject *result = nullptr; if (ap.CheckArgCount(1) && ap.GetVTKObject(temp0, "vtkObjectBase")) { vtkVideoSource *tempr = vtkVideoSource::SafeDownCast(temp0); if (!ap.ErrorOccurred()) { result = ap.BuildVTKObject(tempr); } } return result; } static PyObject * PyvtkVideoSource_NewInstance(PyObject *self, PyObject *args) { vtkPythonArgs ap(self, args, "NewInstance"); vtkObjectBase *vp = ap.GetSelfPointer(self, args); vtkVideoSource *op = static_cast(vp); PyObject *result = nullptr; if (op && ap.CheckArgCount(0)) { vtkVideoSource *tempr = (ap.IsBound() ? op->NewInstance() : op->vtkVideoSource::NewInstance()); if (!ap.ErrorOccurred()) { result = ap.BuildVTKObject(tempr); if (result && PyVTKObject_Check(result)) { PyVTKObject_GetObject(result)->UnRegister(0); PyVTKObject_SetFlag(result, VTK_PYTHON_IGNORE_UNREGISTER, 1); } } } return result; } static PyObject * PyvtkVideoSource_Record(PyObject *self, PyObject *args) { vtkPythonArgs ap(self, args, "Record"); vtkObjectBase *vp = ap.GetSelfPointer(self, args); vtkVideoSource *op = static_cast(vp); PyObject *result = nullptr; if (op && ap.CheckArgCount(0)) { if (ap.IsBound()) { op->Record(); } else { op->vtkVideoSource::Record(); } if (!ap.ErrorOccurred()) { result = ap.BuildNone(); } } return result; } static PyObject * PyvtkVideoSource_Play(PyObject *self, PyObject *args) { vtkPythonArgs ap(self, args, "Play"); vtkObjectBase *vp = ap.GetSelfPointer(self, args); vtkVideoSource *op = static_cast(vp); PyObject *result = nullptr; if (op && ap.CheckArgCount(0)) { if (ap.IsBound()) { op->Play(); } else { op->vtkVideoSource::Play(); } if (!ap.ErrorOccurred()) { result = ap.BuildNone(); } } return result; } static PyObject * PyvtkVideoSource_Stop(PyObject *self, PyObject *args) { vtkPythonArgs ap(self, args, "Stop"); vtkObjectBase *vp = ap.GetSelfPointer(self, args); vtkVideoSource *op = static_cast(vp); PyObject *result = nullptr; if (op && ap.CheckArgCount(0)) { if (ap.IsBound()) { op->Stop(); } else { op->vtkVideoSource::Stop(); } if (!ap.ErrorOccurred()) { result = ap.BuildNone(); } } return result; } static PyObject * PyvtkVideoSource_Rewind(PyObject *self, PyObject *args) { vtkPythonArgs ap(self, args, "Rewind"); vtkObjectBase *vp = ap.GetSelfPointer(self, args); vtkVideoSource *op = static_cast(vp); PyObject *result = nullptr; if (op && ap.CheckArgCount(0)) { if (ap.IsBound()) { op->Rewind(); } else { op->vtkVideoSource::Rewind(); } if (!ap.ErrorOccurred()) { result = ap.BuildNone(); } } return result; } static PyObject * PyvtkVideoSource_FastForward(PyObject *self, PyObject *args) { vtkPythonArgs ap(self, args, "FastForward"); vtkObjectBase *vp = ap.GetSelfPointer(self, args); vtkVideoSource *op = static_cast(vp); PyObject *result = nullptr; if (op && ap.CheckArgCount(0)) { if (ap.IsBound()) { op->FastForward(); } else { op->vtkVideoSource::FastForward(); } if (!ap.ErrorOccurred()) { result = ap.BuildNone(); } } return result; } static PyObject * PyvtkVideoSource_Seek(PyObject *self, PyObject *args) { vtkPythonArgs ap(self, args, "Seek"); vtkObjectBase *vp = ap.GetSelfPointer(self, args); vtkVideoSource *op = static_cast(vp); int temp0; PyObject *result = nullptr; if (op && ap.CheckArgCount(1) && ap.GetValue(temp0)) { if (ap.IsBound()) { op->Seek(temp0); } else { op->vtkVideoSource::Seek(temp0); } if (!ap.ErrorOccurred()) { result = ap.BuildNone(); } } return result; } static PyObject * PyvtkVideoSource_Grab(PyObject *self, PyObject *args) { vtkPythonArgs ap(self, args, "Grab"); vtkObjectBase *vp = ap.GetSelfPointer(self, args); vtkVideoSource *op = static_cast(vp); PyObject *result = nullptr; if (op && ap.CheckArgCount(0)) { if (ap.IsBound()) { op->Grab(); } else { op->vtkVideoSource::Grab(); } if (!ap.ErrorOccurred()) { result = ap.BuildNone(); } } return result; } static PyObject * PyvtkVideoSource_GetRecording(PyObject *self, PyObject *args) { vtkPythonArgs ap(self, args, "GetRecording"); vtkObjectBase *vp = ap.GetSelfPointer(self, args); vtkVideoSource *op = static_cast(vp); PyObject *result = nullptr; if (op && ap.CheckArgCount(0)) { int tempr = (ap.IsBound() ? op->GetRecording() : op->vtkVideoSource::GetRecording()); if (!ap.ErrorOccurred()) { result = ap.BuildValue(tempr); } } return result; } static PyObject * PyvtkVideoSource_GetPlaying(PyObject *self, PyObject *args) { vtkPythonArgs ap(self, args, "GetPlaying"); vtkObjectBase *vp = ap.GetSelfPointer(self, args); vtkVideoSource *op = static_cast(vp); PyObject *result = nullptr; if (op && ap.CheckArgCount(0)) { int tempr = (ap.IsBound() ? op->GetPlaying() : op->vtkVideoSource::GetPlaying()); if (!ap.ErrorOccurred()) { result = ap.BuildValue(tempr); } } return result; } static PyObject * PyvtkVideoSource_SetFrameSize_s1(PyObject *self, PyObject *args) { vtkPythonArgs ap(self, args, "SetFrameSize"); vtkObjectBase *vp = ap.GetSelfPointer(self, args); vtkVideoSource *op = static_cast(vp); int temp0; int temp1; int temp2; PyObject *result = nullptr; if (op && ap.CheckArgCount(3) && ap.GetValue(temp0) && ap.GetValue(temp1) && ap.GetValue(temp2)) { if (ap.IsBound()) { op->SetFrameSize(temp0, temp1, temp2); } else { op->vtkVideoSource::SetFrameSize(temp0, temp1, temp2); } if (!ap.ErrorOccurred()) { result = ap.BuildNone(); } } return result; } static PyObject * PyvtkVideoSource_SetFrameSize_s2(PyObject *self, PyObject *args) { vtkPythonArgs ap(self, args, "SetFrameSize"); vtkObjectBase *vp = ap.GetSelfPointer(self, args); vtkVideoSource *op = static_cast(vp); const int size0 = 3; int temp0[3]; int save0[3]; PyObject *result = nullptr; if (op && ap.CheckArgCount(1) && ap.GetArray(temp0, size0)) { ap.SaveArray(temp0, save0, size0); if (ap.IsBound()) { op->SetFrameSize(temp0); } else { op->vtkVideoSource::SetFrameSize(temp0); } if (ap.ArrayHasChanged(temp0, save0, size0) && !ap.ErrorOccurred()) { ap.SetArray(0, temp0, size0); } if (!ap.ErrorOccurred()) { result = ap.BuildNone(); } } return result; } static PyObject * PyvtkVideoSource_SetFrameSize(PyObject *self, PyObject *args) { int nargs = vtkPythonArgs::GetArgCount(self, args); switch(nargs) { case 3: return PyvtkVideoSource_SetFrameSize_s1(self, args); case 1: return PyvtkVideoSource_SetFrameSize_s2(self, args); } vtkPythonArgs::ArgCountError(nargs, "SetFrameSize"); return nullptr; } static PyObject * PyvtkVideoSource_GetFrameSize(PyObject *self, PyObject *args) { vtkPythonArgs ap(self, args, "GetFrameSize"); vtkObjectBase *vp = ap.GetSelfPointer(self, args); vtkVideoSource *op = static_cast(vp); int sizer = 3; PyObject *result = nullptr; if (op && ap.CheckArgCount(0)) { int *tempr = (ap.IsBound() ? op->GetFrameSize() : op->vtkVideoSource::GetFrameSize()); if (!ap.ErrorOccurred()) { result = ap.BuildTuple(tempr, sizer); } } return result; } static PyObject * PyvtkVideoSource_SetFrameRate(PyObject *self, PyObject *args) { vtkPythonArgs ap(self, args, "SetFrameRate"); vtkObjectBase *vp = ap.GetSelfPointer(self, args); vtkVideoSource *op = static_cast(vp); float temp0; PyObject *result = nullptr; if (op && ap.CheckArgCount(1) && ap.GetValue(temp0)) { if (ap.IsBound()) { op->SetFrameRate(temp0); } else { op->vtkVideoSource::SetFrameRate(temp0); } if (!ap.ErrorOccurred()) { result = ap.BuildNone(); } } return result; } static PyObject * PyvtkVideoSource_GetFrameRate(PyObject *self, PyObject *args) { vtkPythonArgs ap(self, args, "GetFrameRate"); vtkObjectBase *vp = ap.GetSelfPointer(self, args); vtkVideoSource *op = static_cast(vp); PyObject *result = nullptr; if (op && ap.CheckArgCount(0)) { float tempr = (ap.IsBound() ? op->GetFrameRate() : op->vtkVideoSource::GetFrameRate()); if (!ap.ErrorOccurred()) { result = ap.BuildValue(tempr); } } return result; } static PyObject * PyvtkVideoSource_SetOutputFormat(PyObject *self, PyObject *args) { vtkPythonArgs ap(self, args, "SetOutputFormat"); vtkObjectBase *vp = ap.GetSelfPointer(self, args); vtkVideoSource *op = static_cast(vp); int temp0; PyObject *result = nullptr; if (op && ap.CheckArgCount(1) && ap.GetValue(temp0)) { if (ap.IsBound()) { op->SetOutputFormat(temp0); } else { op->vtkVideoSource::SetOutputFormat(temp0); } if (!ap.ErrorOccurred()) { result = ap.BuildNone(); } } return result; } static PyObject * PyvtkVideoSource_SetOutputFormatToLuminance(PyObject *self, PyObject *args) { vtkPythonArgs ap(self, args, "SetOutputFormatToLuminance"); vtkObjectBase *vp = ap.GetSelfPointer(self, args); vtkVideoSource *op = static_cast(vp); PyObject *result = nullptr; if (op && ap.CheckArgCount(0)) { if (ap.IsBound()) { op->SetOutputFormatToLuminance(); } else { op->vtkVideoSource::SetOutputFormatToLuminance(); } if (!ap.ErrorOccurred()) { result = ap.BuildNone(); } } return result; } static PyObject * PyvtkVideoSource_SetOutputFormatToRGB(PyObject *self, PyObject *args) { vtkPythonArgs ap(self, args, "SetOutputFormatToRGB"); vtkObjectBase *vp = ap.GetSelfPointer(self, args); vtkVideoSource *op = static_cast(vp); PyObject *result = nullptr; if (op && ap.CheckArgCount(0)) { if (ap.IsBound()) { op->SetOutputFormatToRGB(); } else { op->vtkVideoSource::SetOutputFormatToRGB(); } if (!ap.ErrorOccurred()) { result = ap.BuildNone(); } } return result; } static PyObject * PyvtkVideoSource_SetOutputFormatToRGBA(PyObject *self, PyObject *args) { vtkPythonArgs ap(self, args, "SetOutputFormatToRGBA"); vtkObjectBase *vp = ap.GetSelfPointer(self, args); vtkVideoSource *op = static_cast(vp); PyObject *result = nullptr; if (op && ap.CheckArgCount(0)) { if (ap.IsBound()) { op->SetOutputFormatToRGBA(); } else { op->vtkVideoSource::SetOutputFormatToRGBA(); } if (!ap.ErrorOccurred()) { result = ap.BuildNone(); } } return result; } static PyObject * PyvtkVideoSource_GetOutputFormat(PyObject *self, PyObject *args) { vtkPythonArgs ap(self, args, "GetOutputFormat"); vtkObjectBase *vp = ap.GetSelfPointer(self, args); vtkVideoSource *op = static_cast(vp); PyObject *result = nullptr; if (op && ap.CheckArgCount(0)) { int tempr = (ap.IsBound() ? op->GetOutputFormat() : op->vtkVideoSource::GetOutputFormat()); if (!ap.ErrorOccurred()) { result = ap.BuildValue(tempr); } } return result; } static PyObject * PyvtkVideoSource_SetFrameBufferSize(PyObject *self, PyObject *args) { vtkPythonArgs ap(self, args, "SetFrameBufferSize"); vtkObjectBase *vp = ap.GetSelfPointer(self, args); vtkVideoSource *op = static_cast(vp); int temp0; PyObject *result = nullptr; if (op && ap.CheckArgCount(1) && ap.GetValue(temp0)) { if (ap.IsBound()) { op->SetFrameBufferSize(temp0); } else { op->vtkVideoSource::SetFrameBufferSize(temp0); } if (!ap.ErrorOccurred()) { result = ap.BuildNone(); } } return result; } static PyObject * PyvtkVideoSource_GetFrameBufferSize(PyObject *self, PyObject *args) { vtkPythonArgs ap(self, args, "GetFrameBufferSize"); vtkObjectBase *vp = ap.GetSelfPointer(self, args); vtkVideoSource *op = static_cast(vp); PyObject *result = nullptr; if (op && ap.CheckArgCount(0)) { int tempr = (ap.IsBound() ? op->GetFrameBufferSize() : op->vtkVideoSource::GetFrameBufferSize()); if (!ap.ErrorOccurred()) { result = ap.BuildValue(tempr); } } return result; } static PyObject * PyvtkVideoSource_SetNumberOfOutputFrames(PyObject *self, PyObject *args) { vtkPythonArgs ap(self, args, "SetNumberOfOutputFrames"); vtkObjectBase *vp = ap.GetSelfPointer(self, args); vtkVideoSource *op = static_cast(vp); int temp0; PyObject *result = nullptr; if (op && ap.CheckArgCount(1) && ap.GetValue(temp0)) { if (ap.IsBound()) { op->SetNumberOfOutputFrames(temp0); } else { op->vtkVideoSource::SetNumberOfOutputFrames(temp0); } if (!ap.ErrorOccurred()) { result = ap.BuildNone(); } } return result; } static PyObject * PyvtkVideoSource_GetNumberOfOutputFrames(PyObject *self, PyObject *args) { vtkPythonArgs ap(self, args, "GetNumberOfOutputFrames"); vtkObjectBase *vp = ap.GetSelfPointer(self, args); vtkVideoSource *op = static_cast(vp); PyObject *result = nullptr; if (op && ap.CheckArgCount(0)) { int tempr = (ap.IsBound() ? op->GetNumberOfOutputFrames() : op->vtkVideoSource::GetNumberOfOutputFrames()); if (!ap.ErrorOccurred()) { result = ap.BuildValue(tempr); } } return result; } static PyObject * PyvtkVideoSource_AutoAdvanceOn(PyObject *self, PyObject *args) { vtkPythonArgs ap(self, args, "AutoAdvanceOn"); vtkObjectBase *vp = ap.GetSelfPointer(self, args); vtkVideoSource *op = static_cast(vp); PyObject *result = nullptr; if (op && ap.CheckArgCount(0)) { if (ap.IsBound()) { op->AutoAdvanceOn(); } else { op->vtkVideoSource::AutoAdvanceOn(); } if (!ap.ErrorOccurred()) { result = ap.BuildNone(); } } return result; } static PyObject * PyvtkVideoSource_AutoAdvanceOff(PyObject *self, PyObject *args) { vtkPythonArgs ap(self, args, "AutoAdvanceOff"); vtkObjectBase *vp = ap.GetSelfPointer(self, args); vtkVideoSource *op = static_cast(vp); PyObject *result = nullptr; if (op && ap.CheckArgCount(0)) { if (ap.IsBound()) { op->AutoAdvanceOff(); } else { op->vtkVideoSource::AutoAdvanceOff(); } if (!ap.ErrorOccurred()) { result = ap.BuildNone(); } } return result; } static PyObject * PyvtkVideoSource_SetAutoAdvance(PyObject *self, PyObject *args) { vtkPythonArgs ap(self, args, "SetAutoAdvance"); vtkObjectBase *vp = ap.GetSelfPointer(self, args); vtkVideoSource *op = static_cast(vp); int temp0; PyObject *result = nullptr; if (op && ap.CheckArgCount(1) && ap.GetValue(temp0)) { if (ap.IsBound()) { op->SetAutoAdvance(temp0); } else { op->vtkVideoSource::SetAutoAdvance(temp0); } if (!ap.ErrorOccurred()) { result = ap.BuildNone(); } } return result; } static PyObject * PyvtkVideoSource_GetAutoAdvance(PyObject *self, PyObject *args) { vtkPythonArgs ap(self, args, "GetAutoAdvance"); vtkObjectBase *vp = ap.GetSelfPointer(self, args); vtkVideoSource *op = static_cast(vp); PyObject *result = nullptr; if (op && ap.CheckArgCount(0)) { int tempr = (ap.IsBound() ? op->GetAutoAdvance() : op->vtkVideoSource::GetAutoAdvance()); if (!ap.ErrorOccurred()) { result = ap.BuildValue(tempr); } } return result; } static PyObject * PyvtkVideoSource_SetClipRegion_s1(PyObject *self, PyObject *args) { vtkPythonArgs ap(self, args, "SetClipRegion"); vtkObjectBase *vp = ap.GetSelfPointer(self, args); vtkVideoSource *op = static_cast(vp); const int size0 = 6; int temp0[6]; int save0[6]; PyObject *result = nullptr; if (op && ap.CheckArgCount(1) && ap.GetArray(temp0, size0)) { ap.SaveArray(temp0, save0, size0); if (ap.IsBound()) { op->SetClipRegion(temp0); } else { op->vtkVideoSource::SetClipRegion(temp0); } if (ap.ArrayHasChanged(temp0, save0, size0) && !ap.ErrorOccurred()) { ap.SetArray(0, temp0, size0); } if (!ap.ErrorOccurred()) { result = ap.BuildNone(); } } return result; } static PyObject * PyvtkVideoSource_SetClipRegion_s2(PyObject *self, PyObject *args) { vtkPythonArgs ap(self, args, "SetClipRegion"); vtkObjectBase *vp = ap.GetSelfPointer(self, args); vtkVideoSource *op = static_cast(vp); int temp0; int temp1; int temp2; int temp3; int temp4; int temp5; PyObject *result = nullptr; if (op && ap.CheckArgCount(6) && ap.GetValue(temp0) && ap.GetValue(temp1) && ap.GetValue(temp2) && ap.GetValue(temp3) && ap.GetValue(temp4) && ap.GetValue(temp5)) { if (ap.IsBound()) { op->SetClipRegion(temp0, temp1, temp2, temp3, temp4, temp5); } else { op->vtkVideoSource::SetClipRegion(temp0, temp1, temp2, temp3, temp4, temp5); } if (!ap.ErrorOccurred()) { result = ap.BuildNone(); } } return result; } static PyObject * PyvtkVideoSource_SetClipRegion(PyObject *self, PyObject *args) { int nargs = vtkPythonArgs::GetArgCount(self, args); switch(nargs) { case 1: return PyvtkVideoSource_SetClipRegion_s1(self, args); case 6: return PyvtkVideoSource_SetClipRegion_s2(self, args); } vtkPythonArgs::ArgCountError(nargs, "SetClipRegion"); return nullptr; } static PyObject * PyvtkVideoSource_GetClipRegion(PyObject *self, PyObject *args) { vtkPythonArgs ap(self, args, "GetClipRegion"); vtkObjectBase *vp = ap.GetSelfPointer(self, args); vtkVideoSource *op = static_cast(vp); int sizer = 6; PyObject *result = nullptr; if (op && ap.CheckArgCount(0)) { int *tempr = (ap.IsBound() ? op->GetClipRegion() : op->vtkVideoSource::GetClipRegion()); if (!ap.ErrorOccurred()) { result = ap.BuildTuple(tempr, sizer); } } return result; } static PyObject * PyvtkVideoSource_SetOutputWholeExtent_s1(PyObject *self, PyObject *args) { vtkPythonArgs ap(self, args, "SetOutputWholeExtent"); vtkObjectBase *vp = ap.GetSelfPointer(self, args); vtkVideoSource *op = static_cast(vp); int temp0; int temp1; int temp2; int temp3; int temp4; int temp5; PyObject *result = nullptr; if (op && ap.CheckArgCount(6) && ap.GetValue(temp0) && ap.GetValue(temp1) && ap.GetValue(temp2) && ap.GetValue(temp3) && ap.GetValue(temp4) && ap.GetValue(temp5)) { if (ap.IsBound()) { op->SetOutputWholeExtent(temp0, temp1, temp2, temp3, temp4, temp5); } else { op->vtkVideoSource::SetOutputWholeExtent(temp0, temp1, temp2, temp3, temp4, temp5); } if (!ap.ErrorOccurred()) { result = ap.BuildNone(); } } return result; } static PyObject * PyvtkVideoSource_SetOutputWholeExtent_s2(PyObject *self, PyObject *args) { vtkPythonArgs ap(self, args, "SetOutputWholeExtent"); vtkObjectBase *vp = ap.GetSelfPointer(self, args); vtkVideoSource *op = static_cast(vp); const int size0 = 6; int temp0[6]; PyObject *result = nullptr; if (op && ap.CheckArgCount(1) && ap.GetArray(temp0, size0)) { if (ap.IsBound()) { op->SetOutputWholeExtent(temp0); } else { op->vtkVideoSource::SetOutputWholeExtent(temp0); } if (!ap.ErrorOccurred()) { result = ap.BuildNone(); } } return result; } static PyObject * PyvtkVideoSource_SetOutputWholeExtent(PyObject *self, PyObject *args) { int nargs = vtkPythonArgs::GetArgCount(self, args); switch(nargs) { case 6: return PyvtkVideoSource_SetOutputWholeExtent_s1(self, args); case 1: return PyvtkVideoSource_SetOutputWholeExtent_s2(self, args); } vtkPythonArgs::ArgCountError(nargs, "SetOutputWholeExtent"); return nullptr; } static PyObject * PyvtkVideoSource_GetOutputWholeExtent(PyObject *self, PyObject *args) { vtkPythonArgs ap(self, args, "GetOutputWholeExtent"); vtkObjectBase *vp = ap.GetSelfPointer(self, args); vtkVideoSource *op = static_cast(vp); int sizer = 6; PyObject *result = nullptr; if (op && ap.CheckArgCount(0)) { int *tempr = (ap.IsBound() ? op->GetOutputWholeExtent() : op->vtkVideoSource::GetOutputWholeExtent()); if (!ap.ErrorOccurred()) { result = ap.BuildTuple(tempr, sizer); } } return result; } static PyObject * PyvtkVideoSource_SetDataSpacing_s1(PyObject *self, PyObject *args) { vtkPythonArgs ap(self, args, "SetDataSpacing"); vtkObjectBase *vp = ap.GetSelfPointer(self, args); vtkVideoSource *op = static_cast(vp); double temp0; double temp1; double temp2; PyObject *result = nullptr; if (op && ap.CheckArgCount(3) && ap.GetValue(temp0) && ap.GetValue(temp1) && ap.GetValue(temp2)) { if (ap.IsBound()) { op->SetDataSpacing(temp0, temp1, temp2); } else { op->vtkVideoSource::SetDataSpacing(temp0, temp1, temp2); } if (!ap.ErrorOccurred()) { result = ap.BuildNone(); } } return result; } static PyObject * PyvtkVideoSource_SetDataSpacing_s2(PyObject *self, PyObject *args) { vtkPythonArgs ap(self, args, "SetDataSpacing"); vtkObjectBase *vp = ap.GetSelfPointer(self, args); vtkVideoSource *op = static_cast(vp); const int size0 = 3; double temp0[3]; PyObject *result = nullptr; if (op && ap.CheckArgCount(1) && ap.GetArray(temp0, size0)) { if (ap.IsBound()) { op->SetDataSpacing(temp0); } else { op->vtkVideoSource::SetDataSpacing(temp0); } if (!ap.ErrorOccurred()) { result = ap.BuildNone(); } } return result; } static PyObject * PyvtkVideoSource_SetDataSpacing(PyObject *self, PyObject *args) { int nargs = vtkPythonArgs::GetArgCount(self, args); switch(nargs) { case 3: return PyvtkVideoSource_SetDataSpacing_s1(self, args); case 1: return PyvtkVideoSource_SetDataSpacing_s2(self, args); } vtkPythonArgs::ArgCountError(nargs, "SetDataSpacing"); return nullptr; } static PyObject * PyvtkVideoSource_GetDataSpacing(PyObject *self, PyObject *args) { vtkPythonArgs ap(self, args, "GetDataSpacing"); vtkObjectBase *vp = ap.GetSelfPointer(self, args); vtkVideoSource *op = static_cast(vp); int sizer = 3; PyObject *result = nullptr; if (op && ap.CheckArgCount(0)) { double *tempr = (ap.IsBound() ? op->GetDataSpacing() : op->vtkVideoSource::GetDataSpacing()); if (!ap.ErrorOccurred()) { result = ap.BuildTuple(tempr, sizer); } } return result; } static PyObject * PyvtkVideoSource_SetDataOrigin_s1(PyObject *self, PyObject *args) { vtkPythonArgs ap(self, args, "SetDataOrigin"); vtkObjectBase *vp = ap.GetSelfPointer(self, args); vtkVideoSource *op = static_cast(vp); double temp0; double temp1; double temp2; PyObject *result = nullptr; if (op && ap.CheckArgCount(3) && ap.GetValue(temp0) && ap.GetValue(temp1) && ap.GetValue(temp2)) { if (ap.IsBound()) { op->SetDataOrigin(temp0, temp1, temp2); } else { op->vtkVideoSource::SetDataOrigin(temp0, temp1, temp2); } if (!ap.ErrorOccurred()) { result = ap.BuildNone(); } } return result; } static PyObject * PyvtkVideoSource_SetDataOrigin_s2(PyObject *self, PyObject *args) { vtkPythonArgs ap(self, args, "SetDataOrigin"); vtkObjectBase *vp = ap.GetSelfPointer(self, args); vtkVideoSource *op = static_cast(vp); const int size0 = 3; double temp0[3]; PyObject *result = nullptr; if (op && ap.CheckArgCount(1) && ap.GetArray(temp0, size0)) { if (ap.IsBound()) { op->SetDataOrigin(temp0); } else { op->vtkVideoSource::SetDataOrigin(temp0); } if (!ap.ErrorOccurred()) { result = ap.BuildNone(); } } return result; } static PyObject * PyvtkVideoSource_SetDataOrigin(PyObject *self, PyObject *args) { int nargs = vtkPythonArgs::GetArgCount(self, args); switch(nargs) { case 3: return PyvtkVideoSource_SetDataOrigin_s1(self, args); case 1: return PyvtkVideoSource_SetDataOrigin_s2(self, args); } vtkPythonArgs::ArgCountError(nargs, "SetDataOrigin"); return nullptr; } static PyObject * PyvtkVideoSource_GetDataOrigin(PyObject *self, PyObject *args) { vtkPythonArgs ap(self, args, "GetDataOrigin"); vtkObjectBase *vp = ap.GetSelfPointer(self, args); vtkVideoSource *op = static_cast(vp); int sizer = 3; PyObject *result = nullptr; if (op && ap.CheckArgCount(0)) { double *tempr = (ap.IsBound() ? op->GetDataOrigin() : op->vtkVideoSource::GetDataOrigin()); if (!ap.ErrorOccurred()) { result = ap.BuildTuple(tempr, sizer); } } return result; } static PyObject * PyvtkVideoSource_SetOpacity(PyObject *self, PyObject *args) { vtkPythonArgs ap(self, args, "SetOpacity"); vtkObjectBase *vp = ap.GetSelfPointer(self, args); vtkVideoSource *op = static_cast(vp); float temp0; PyObject *result = nullptr; if (op && ap.CheckArgCount(1) && ap.GetValue(temp0)) { if (ap.IsBound()) { op->SetOpacity(temp0); } else { op->vtkVideoSource::SetOpacity(temp0); } if (!ap.ErrorOccurred()) { result = ap.BuildNone(); } } return result; } static PyObject * PyvtkVideoSource_GetOpacity(PyObject *self, PyObject *args) { vtkPythonArgs ap(self, args, "GetOpacity"); vtkObjectBase *vp = ap.GetSelfPointer(self, args); vtkVideoSource *op = static_cast(vp); PyObject *result = nullptr; if (op && ap.CheckArgCount(0)) { float tempr = (ap.IsBound() ? op->GetOpacity() : op->vtkVideoSource::GetOpacity()); if (!ap.ErrorOccurred()) { result = ap.BuildValue(tempr); } } return result; } static PyObject * PyvtkVideoSource_GetFrameCount(PyObject *self, PyObject *args) { vtkPythonArgs ap(self, args, "GetFrameCount"); vtkObjectBase *vp = ap.GetSelfPointer(self, args); vtkVideoSource *op = static_cast(vp); PyObject *result = nullptr; if (op && ap.CheckArgCount(0)) { int tempr = (ap.IsBound() ? op->GetFrameCount() : op->vtkVideoSource::GetFrameCount()); if (!ap.ErrorOccurred()) { result = ap.BuildValue(tempr); } } return result; } static PyObject * PyvtkVideoSource_SetFrameCount(PyObject *self, PyObject *args) { vtkPythonArgs ap(self, args, "SetFrameCount"); vtkObjectBase *vp = ap.GetSelfPointer(self, args); vtkVideoSource *op = static_cast(vp); int temp0; PyObject *result = nullptr; if (op && ap.CheckArgCount(1) && ap.GetValue(temp0)) { if (ap.IsBound()) { op->SetFrameCount(temp0); } else { op->vtkVideoSource::SetFrameCount(temp0); } if (!ap.ErrorOccurred()) { result = ap.BuildNone(); } } return result; } static PyObject * PyvtkVideoSource_GetFrameIndex(PyObject *self, PyObject *args) { vtkPythonArgs ap(self, args, "GetFrameIndex"); vtkObjectBase *vp = ap.GetSelfPointer(self, args); vtkVideoSource *op = static_cast(vp); PyObject *result = nullptr; if (op && ap.CheckArgCount(0)) { int tempr = (ap.IsBound() ? op->GetFrameIndex() : op->vtkVideoSource::GetFrameIndex()); if (!ap.ErrorOccurred()) { result = ap.BuildValue(tempr); } } return result; } static PyObject * PyvtkVideoSource_GetFrameTimeStamp_s1(PyObject *self, PyObject *args) { vtkPythonArgs ap(self, args, "GetFrameTimeStamp"); vtkObjectBase *vp = ap.GetSelfPointer(self, args); vtkVideoSource *op = static_cast(vp); int temp0; PyObject *result = nullptr; if (op && ap.CheckArgCount(1) && ap.GetValue(temp0)) { double tempr = (ap.IsBound() ? op->GetFrameTimeStamp(temp0) : op->vtkVideoSource::GetFrameTimeStamp(temp0)); if (!ap.ErrorOccurred()) { result = ap.BuildValue(tempr); } } return result; } static PyObject * PyvtkVideoSource_GetFrameTimeStamp_s2(PyObject *self, PyObject *args) { vtkPythonArgs ap(self, args, "GetFrameTimeStamp"); vtkObjectBase *vp = ap.GetSelfPointer(self, args); vtkVideoSource *op = static_cast(vp); PyObject *result = nullptr; if (op && ap.CheckArgCount(0)) { double tempr = (ap.IsBound() ? op->GetFrameTimeStamp() : op->vtkVideoSource::GetFrameTimeStamp()); if (!ap.ErrorOccurred()) { result = ap.BuildValue(tempr); } } return result; } static PyObject * PyvtkVideoSource_GetFrameTimeStamp(PyObject *self, PyObject *args) { int nargs = vtkPythonArgs::GetArgCount(self, args); switch(nargs) { case 1: return PyvtkVideoSource_GetFrameTimeStamp_s1(self, args); case 0: return PyvtkVideoSource_GetFrameTimeStamp_s2(self, args); } vtkPythonArgs::ArgCountError(nargs, "GetFrameTimeStamp"); return nullptr; } static PyObject * PyvtkVideoSource_Initialize(PyObject *self, PyObject *args) { vtkPythonArgs ap(self, args, "Initialize"); vtkObjectBase *vp = ap.GetSelfPointer(self, args); vtkVideoSource *op = static_cast(vp); PyObject *result = nullptr; if (op && ap.CheckArgCount(0)) { if (ap.IsBound()) { op->Initialize(); } else { op->vtkVideoSource::Initialize(); } if (!ap.ErrorOccurred()) { result = ap.BuildNone(); } } return result; } static PyObject * PyvtkVideoSource_GetInitialized(PyObject *self, PyObject *args) { vtkPythonArgs ap(self, args, "GetInitialized"); vtkObjectBase *vp = ap.GetSelfPointer(self, args); vtkVideoSource *op = static_cast(vp); PyObject *result = nullptr; if (op && ap.CheckArgCount(0)) { int tempr = (ap.IsBound() ? op->GetInitialized() : op->vtkVideoSource::GetInitialized()); if (!ap.ErrorOccurred()) { result = ap.BuildValue(tempr); } } return result; } static PyObject * PyvtkVideoSource_ReleaseSystemResources(PyObject *self, PyObject *args) { vtkPythonArgs ap(self, args, "ReleaseSystemResources"); vtkObjectBase *vp = ap.GetSelfPointer(self, args); vtkVideoSource *op = static_cast(vp); PyObject *result = nullptr; if (op && ap.CheckArgCount(0)) { if (ap.IsBound()) { op->ReleaseSystemResources(); } else { op->vtkVideoSource::ReleaseSystemResources(); } if (!ap.ErrorOccurred()) { result = ap.BuildNone(); } } return result; } static PyObject * PyvtkVideoSource_InternalGrab(PyObject *self, PyObject *args) { vtkPythonArgs ap(self, args, "InternalGrab"); vtkObjectBase *vp = ap.GetSelfPointer(self, args); vtkVideoSource *op = static_cast(vp); PyObject *result = nullptr; if (op && ap.CheckArgCount(0)) { if (ap.IsBound()) { op->InternalGrab(); } else { op->vtkVideoSource::InternalGrab(); } if (!ap.ErrorOccurred()) { result = ap.BuildNone(); } } return result; } static PyObject * PyvtkVideoSource_SetStartTimeStamp(PyObject *self, PyObject *args) { vtkPythonArgs ap(self, args, "SetStartTimeStamp"); vtkObjectBase *vp = ap.GetSelfPointer(self, args); vtkVideoSource *op = static_cast(vp); double temp0; PyObject *result = nullptr; if (op && ap.CheckArgCount(1) && ap.GetValue(temp0)) { if (ap.IsBound()) { op->SetStartTimeStamp(temp0); } else { op->vtkVideoSource::SetStartTimeStamp(temp0); } if (!ap.ErrorOccurred()) { result = ap.BuildNone(); } } return result; } static PyObject * PyvtkVideoSource_GetStartTimeStamp(PyObject *self, PyObject *args) { vtkPythonArgs ap(self, args, "GetStartTimeStamp"); vtkObjectBase *vp = ap.GetSelfPointer(self, args); vtkVideoSource *op = static_cast(vp); PyObject *result = nullptr; if (op && ap.CheckArgCount(0)) { double tempr = (ap.IsBound() ? op->GetStartTimeStamp() : op->vtkVideoSource::GetStartTimeStamp()); if (!ap.ErrorOccurred()) { result = ap.BuildValue(tempr); } } return result; } static PyMethodDef PyvtkVideoSource_Methods[] = { {"IsTypeOf", PyvtkVideoSource_IsTypeOf, METH_VARARGS, "V.IsTypeOf(string) -> int\nC++: static vtkTypeBool IsTypeOf(const char *type)\n\nReturn 1 if this class type is the same type of (or a subclass\nof) the named class. Returns 0 otherwise. This method works in\ncombination with vtkTypeMacro found in vtkSetGet.h.\n"}, {"IsA", PyvtkVideoSource_IsA, METH_VARARGS, "V.IsA(string) -> int\nC++: vtkTypeBool IsA(const char *type) override;\n\nReturn 1 if this class is the same type of (or a subclass of) the\nnamed class. Returns 0 otherwise. This method works in\ncombination with vtkTypeMacro found in vtkSetGet.h.\n"}, {"SafeDownCast", PyvtkVideoSource_SafeDownCast, METH_VARARGS, "V.SafeDownCast(vtkObjectBase) -> vtkVideoSource\nC++: static vtkVideoSource *SafeDownCast(vtkObjectBase *o)\n\n"}, {"NewInstance", PyvtkVideoSource_NewInstance, METH_VARARGS, "V.NewInstance() -> vtkVideoSource\nC++: vtkVideoSource *NewInstance()\n\n"}, {"Record", PyvtkVideoSource_Record, METH_VARARGS, "V.Record()\nC++: virtual void Record()\n\nRecord incoming video at the specified FrameRate. The recording\ncontinues indefinitely until Stop() is called.\n"}, {"Play", PyvtkVideoSource_Play, METH_VARARGS, "V.Play()\nC++: virtual void Play()\n\nPlay through the 'tape' sequentially at the specified frame rate.\nIf you have just finished Recoding, you should call Rewind()\nfirst.\n"}, {"Stop", PyvtkVideoSource_Stop, METH_VARARGS, "V.Stop()\nC++: virtual void Stop()\n\nStop recording or playing.\n"}, {"Rewind", PyvtkVideoSource_Rewind, METH_VARARGS, "V.Rewind()\nC++: virtual void Rewind()\n\nRewind to the frame with the earliest timestamp. Record\noperations will start on the following frame, therefore if you\nwant to re-record over this frame you must call Seek(-1) before\ncalling Grab() or Record().\n"}, {"FastForward", PyvtkVideoSource_FastForward, METH_VARARGS, "V.FastForward()\nC++: virtual void FastForward()\n\nFastForward to the last frame that was recorded (i.e. to the\nframe that has the most recent timestamp).\n"}, {"Seek", PyvtkVideoSource_Seek, METH_VARARGS, "V.Seek(int)\nC++: virtual void Seek(int n)\n\nSeek forwards or backwards by the specified number of frames\n(positive is forward, negative is backward).\n"}, {"Grab", PyvtkVideoSource_Grab, METH_VARARGS, "V.Grab()\nC++: virtual void Grab()\n\nGrab a single video frame.\n"}, {"GetRecording", PyvtkVideoSource_GetRecording, METH_VARARGS, "V.GetRecording() -> int\nC++: virtual int GetRecording()\n\nAre we in record mode? (record mode and play mode are mutually\nexclusive).\n"}, {"GetPlaying", PyvtkVideoSource_GetPlaying, METH_VARARGS, "V.GetPlaying() -> int\nC++: virtual int GetPlaying()\n\nAre we in play mode? (record mode and play mode are mutually\nexclusive).\n"}, {"SetFrameSize", PyvtkVideoSource_SetFrameSize, METH_VARARGS, "V.SetFrameSize(int, int, int)\nC++: virtual void SetFrameSize(int x, int y, int z)\nV.SetFrameSize([int, int, int])\nC++: virtual void SetFrameSize(int dim[3])\n\nSet the full-frame size. This must be an allowed size for the\ndevice, the device may either refuse a request for an illegal\nframe size or automatically choose a new frame size. The default\nis usually 320x240x1, but can be device specific. The 'depth'\nshould always be 1 (unless you have a device that can handle 3D\nacquisition).\n"}, {"GetFrameSize", PyvtkVideoSource_GetFrameSize, METH_VARARGS, "V.GetFrameSize() -> (int, int, int)\nC++: int *GetFrameSize()\n\n"}, {"SetFrameRate", PyvtkVideoSource_SetFrameRate, METH_VARARGS, "V.SetFrameRate(float)\nC++: virtual void SetFrameRate(float rate)\n\nRequest a particular frame rate (default 30 frames per second).\n"}, {"GetFrameRate", PyvtkVideoSource_GetFrameRate, METH_VARARGS, "V.GetFrameRate() -> float\nC++: virtual float GetFrameRate()\n\nRequest a particular frame rate (default 30 frames per second).\n"}, {"SetOutputFormat", PyvtkVideoSource_SetOutputFormat, METH_VARARGS, "V.SetOutputFormat(int)\nC++: virtual void SetOutputFormat(int format)\n\nSet the output format. This must be appropriate for device,\nusually only VTK_LUMINANCE, VTK_RGB, and VTK_RGBA are supported.\n"}, {"SetOutputFormatToLuminance", PyvtkVideoSource_SetOutputFormatToLuminance, METH_VARARGS, "V.SetOutputFormatToLuminance()\nC++: void SetOutputFormatToLuminance()\n\nSet the output format. This must be appropriate for device,\nusually only VTK_LUMINANCE, VTK_RGB, and VTK_RGBA are supported.\n"}, {"SetOutputFormatToRGB", PyvtkVideoSource_SetOutputFormatToRGB, METH_VARARGS, "V.SetOutputFormatToRGB()\nC++: void SetOutputFormatToRGB()\n\nSet the output format. This must be appropriate for device,\nusually only VTK_LUMINANCE, VTK_RGB, and VTK_RGBA are supported.\n"}, {"SetOutputFormatToRGBA", PyvtkVideoSource_SetOutputFormatToRGBA, METH_VARARGS, "V.SetOutputFormatToRGBA()\nC++: void SetOutputFormatToRGBA()\n\nSet the output format. This must be appropriate for device,\nusually only VTK_LUMINANCE, VTK_RGB, and VTK_RGBA are supported.\n"}, {"GetOutputFormat", PyvtkVideoSource_GetOutputFormat, METH_VARARGS, "V.GetOutputFormat() -> int\nC++: virtual int GetOutputFormat()\n\nSet the output format. This must be appropriate for device,\nusually only VTK_LUMINANCE, VTK_RGB, and VTK_RGBA are supported.\n"}, {"SetFrameBufferSize", PyvtkVideoSource_SetFrameBufferSize, METH_VARARGS, "V.SetFrameBufferSize(int)\nC++: virtual void SetFrameBufferSize(int FrameBufferSize)\n\nSet size of the frame buffer, i.e. the number of frames that the\n'tape' can store.\n"}, {"GetFrameBufferSize", PyvtkVideoSource_GetFrameBufferSize, METH_VARARGS, "V.GetFrameBufferSize() -> int\nC++: virtual int GetFrameBufferSize()\n\nSet size of the frame buffer, i.e. the number of frames that the\n'tape' can store.\n"}, {"SetNumberOfOutputFrames", PyvtkVideoSource_SetNumberOfOutputFrames, METH_VARARGS, "V.SetNumberOfOutputFrames(int)\nC++: virtual void SetNumberOfOutputFrames(int _arg)\n\nSet the number of frames to copy to the output on each execute.\nThe frames will be concatenated along the Z dimension, with the\nmost recent frame first. Default: 1\n"}, {"GetNumberOfOutputFrames", PyvtkVideoSource_GetNumberOfOutputFrames, METH_VARARGS, "V.GetNumberOfOutputFrames() -> int\nC++: virtual int GetNumberOfOutputFrames()\n\nSet the number of frames to copy to the output on each execute.\nThe frames will be concatenated along the Z dimension, with the\nmost recent frame first. Default: 1\n"}, {"AutoAdvanceOn", PyvtkVideoSource_AutoAdvanceOn, METH_VARARGS, "V.AutoAdvanceOn()\nC++: virtual void AutoAdvanceOn()\n\nSet whether to automatically advance the buffer before each grab.\nDefault: on\n"}, {"AutoAdvanceOff", PyvtkVideoSource_AutoAdvanceOff, METH_VARARGS, "V.AutoAdvanceOff()\nC++: virtual void AutoAdvanceOff()\n\nSet whether to automatically advance the buffer before each grab.\nDefault: on\n"}, {"SetAutoAdvance", PyvtkVideoSource_SetAutoAdvance, METH_VARARGS, "V.SetAutoAdvance(int)\nC++: virtual void SetAutoAdvance(int _arg)\n\nSet whether to automatically advance the buffer before each grab.\nDefault: on\n"}, {"GetAutoAdvance", PyvtkVideoSource_GetAutoAdvance, METH_VARARGS, "V.GetAutoAdvance() -> int\nC++: virtual int GetAutoAdvance()\n\nSet whether to automatically advance the buffer before each grab.\nDefault: on\n"}, {"SetClipRegion", PyvtkVideoSource_SetClipRegion, METH_VARARGS, "V.SetClipRegion([int, int, int, int, int, int])\nC++: virtual void SetClipRegion(int r[6])\nV.SetClipRegion(int, int, int, int, int, int)\nC++: virtual void SetClipRegion(int x0, int x1, int y0, int y1,\n int z0, int z1)\n\nSet the clip rectangle for the frames. The video will be clipped\nbefore it is copied into the framebuffer. Changing the\nClipRegion will destroy the current contents of the framebuffer.\nThe default ClipRegion is\n(0,VTK_INT_MAX,0,VTK_INT_MAX,0,VTK_INT_MAX).\n"}, {"GetClipRegion", PyvtkVideoSource_GetClipRegion, METH_VARARGS, "V.GetClipRegion() -> (int, int, int, int, int, int)\nC++: int *GetClipRegion()\n\n"}, {"SetOutputWholeExtent", PyvtkVideoSource_SetOutputWholeExtent, METH_VARARGS, "V.SetOutputWholeExtent(int, int, int, int, int, int)\nC++: void SetOutputWholeExtent(int, int, int, int, int, int)\nV.SetOutputWholeExtent((int, int, int, int, int, int))\nC++: void SetOutputWholeExtent(int a[6])\n\n"}, {"GetOutputWholeExtent", PyvtkVideoSource_GetOutputWholeExtent, METH_VARARGS, "V.GetOutputWholeExtent() -> (int, int, int, int, int, int)\nC++: int *GetOutputWholeExtent()\n\n"}, {"SetDataSpacing", PyvtkVideoSource_SetDataSpacing, METH_VARARGS, "V.SetDataSpacing(float, float, float)\nC++: void SetDataSpacing(double, double, double)\nV.SetDataSpacing((float, float, float))\nC++: void SetDataSpacing(double a[3])\n\n"}, {"GetDataSpacing", PyvtkVideoSource_GetDataSpacing, METH_VARARGS, "V.GetDataSpacing() -> (float, float, float)\nC++: double *GetDataSpacing()\n\n"}, {"SetDataOrigin", PyvtkVideoSource_SetDataOrigin, METH_VARARGS, "V.SetDataOrigin(float, float, float)\nC++: void SetDataOrigin(double, double, double)\nV.SetDataOrigin((float, float, float))\nC++: void SetDataOrigin(double a[3])\n\n"}, {"GetDataOrigin", PyvtkVideoSource_GetDataOrigin, METH_VARARGS, "V.GetDataOrigin() -> (float, float, float)\nC++: double *GetDataOrigin()\n\n"}, {"SetOpacity", PyvtkVideoSource_SetOpacity, METH_VARARGS, "V.SetOpacity(float)\nC++: virtual void SetOpacity(float _arg)\n\nFor RGBA output only (4 scalar components), set the opacity. \nThis will not modify the existing contents of the framebuffer,\nonly subsequently grabbed frames.\n"}, {"GetOpacity", PyvtkVideoSource_GetOpacity, METH_VARARGS, "V.GetOpacity() -> float\nC++: virtual float GetOpacity()\n\nFor RGBA output only (4 scalar components), set the opacity. \nThis will not modify the existing contents of the framebuffer,\nonly subsequently grabbed frames.\n"}, {"GetFrameCount", PyvtkVideoSource_GetFrameCount, METH_VARARGS, "V.GetFrameCount() -> int\nC++: virtual int GetFrameCount()\n\nThis value is incremented each time a frame is grabbed. reset it\nto zero (or any other value) at any time.\n"}, {"SetFrameCount", PyvtkVideoSource_SetFrameCount, METH_VARARGS, "V.SetFrameCount(int)\nC++: virtual void SetFrameCount(int _arg)\n\nThis value is incremented each time a frame is grabbed. reset it\nto zero (or any other value) at any time.\n"}, {"GetFrameIndex", PyvtkVideoSource_GetFrameIndex, METH_VARARGS, "V.GetFrameIndex() -> int\nC++: virtual int GetFrameIndex()\n\nGet the frame index relative to the 'beginning of the tape'. \nThis value wraps back to zero if it increases past the\nFrameBufferSize.\n"}, {"GetFrameTimeStamp", PyvtkVideoSource_GetFrameTimeStamp, METH_VARARGS, "V.GetFrameTimeStamp(int) -> float\nC++: virtual double GetFrameTimeStamp(int frame)\nV.GetFrameTimeStamp() -> float\nC++: double GetFrameTimeStamp()\n\nGet a time stamp in seconds (resolution of milliseconds) for a\nvideo frame. Time began on Jan 1, 1970. You can specify a\nnumber (negative or positive) to specify the position of the\nvideo frame relative to the current frame.\n"}, {"Initialize", PyvtkVideoSource_Initialize, METH_VARARGS, "V.Initialize()\nC++: virtual void Initialize()\n\nInitialize the hardware. This is called automatically on the\nfirst Update or Grab.\n"}, {"GetInitialized", PyvtkVideoSource_GetInitialized, METH_VARARGS, "V.GetInitialized() -> int\nC++: virtual int GetInitialized()\n\nInitialize the hardware. This is called automatically on the\nfirst Update or Grab.\n"}, {"ReleaseSystemResources", PyvtkVideoSource_ReleaseSystemResources, METH_VARARGS, "V.ReleaseSystemResources()\nC++: virtual void ReleaseSystemResources()\n\nRelease the video driver. This method must be called before\napplication exit, or else the application might hang during exit.\n"}, {"InternalGrab", PyvtkVideoSource_InternalGrab, METH_VARARGS, "V.InternalGrab()\nC++: virtual void InternalGrab()\n\nThe internal function which actually does the grab. You will\ndefinitely want to override this if you develop a vtkVideoSource\nsubclass.\n"}, {"SetStartTimeStamp", PyvtkVideoSource_SetStartTimeStamp, METH_VARARGS, "V.SetStartTimeStamp(float)\nC++: void SetStartTimeStamp(double t)\n\nAnd internal variable which marks the beginning of a Record\nsession. These methods are for internal use only.\n"}, {"GetStartTimeStamp", PyvtkVideoSource_GetStartTimeStamp, METH_VARARGS, "V.GetStartTimeStamp() -> float\nC++: double GetStartTimeStamp()\n\nAnd internal variable which marks the beginning of a Record\nsession. These methods are for internal use only.\n"}, {nullptr, nullptr, 0, nullptr} }; static PyTypeObject PyvtkVideoSource_Type = { PyVarObject_HEAD_INIT(&PyType_Type, 0) "vtkIOVideoPython.vtkVideoSource", // tp_name sizeof(PyVTKObject), // tp_basicsize 0, // tp_itemsize PyVTKObject_Delete, // tp_dealloc 0, // tp_print nullptr, // tp_getattr nullptr, // tp_setattr nullptr, // tp_compare PyVTKObject_Repr, // tp_repr nullptr, // tp_as_number nullptr, // tp_as_sequence nullptr, // tp_as_mapping nullptr, // tp_hash nullptr, // tp_call PyVTKObject_String, // tp_str PyObject_GenericGetAttr, // tp_getattro PyObject_GenericSetAttr, // tp_setattro &PyVTKObject_AsBuffer, // tp_as_buffer Py_TPFLAGS_DEFAULT|Py_TPFLAGS_HAVE_GC|Py_TPFLAGS_BASETYPE, // tp_flags PyvtkVideoSource_Doc, // tp_doc PyVTKObject_Traverse, // tp_traverse nullptr, // tp_clear nullptr, // tp_richcompare offsetof(PyVTKObject, vtk_weakreflist), // tp_weaklistoffset nullptr, // tp_iter nullptr, // tp_iternext nullptr, // tp_methods nullptr, // tp_members PyVTKObject_GetSet, // tp_getset nullptr, // tp_base nullptr, // tp_dict nullptr, // tp_descr_get nullptr, // tp_descr_set offsetof(PyVTKObject, vtk_dict), // tp_dictoffset nullptr, // tp_init nullptr, // tp_alloc PyVTKObject_New, // tp_new PyObject_GC_Del, // tp_free nullptr, // tp_is_gc nullptr, // tp_bases nullptr, // tp_mro nullptr, // tp_cache nullptr, // tp_subclasses nullptr, // tp_weaklist VTK_WRAP_PYTHON_SUPPRESS_UNINITIALIZED }; static vtkObjectBase *PyvtkVideoSource_StaticNew() { return vtkVideoSource::New(); } PyObject *PyvtkVideoSource_ClassNew() { PyVTKClass_Add( &PyvtkVideoSource_Type, PyvtkVideoSource_Methods, "vtkVideoSource", &PyvtkVideoSource_StaticNew); PyTypeObject *pytype = &PyvtkVideoSource_Type; if ((pytype->tp_flags & Py_TPFLAGS_READY) != 0) { return (PyObject *)pytype; } #if !defined(VTK_PY3K) && PY_VERSION_HEX >= 0x02060000 pytype->tp_flags |= Py_TPFLAGS_HAVE_NEWBUFFER; #endif pytype->tp_base = (PyTypeObject *)PyvtkImageAlgorithm_ClassNew(); PyType_Ready(pytype); return (PyObject *)pytype; } void PyVTKAddFile_vtkVideoSource( PyObject *dict) { PyObject *o; o = PyvtkVideoSource_ClassNew(); if (o && PyDict_SetItemString(dict, "vtkVideoSource", o) != 0) { Py_DECREF(o); } }