MFC CFindReplaceDialog destruction - windows

How can I destroy a pointer to CFindReplaceDialog object properly?
For example I have class:
class CjumpView : public CRichEditView
{
CFindReplceDialog *m_pFRDlg;
// ...
};
CjumpView::CjumpView()
: m_pFRDlg(NULL)
{
}
CjumpView::~CjumpView()
{
if(m_pFRDlg != NULL)
{
m_pFRDlg->DestroyWindow();
delete(m_pFRDlg);
}
}
void CjumpView::OnEditFind()
{
if(m_pFRDlg == NULL)
{
const bool fShowFind = true;
m_pFRDlg = new CFindReplaceDialog();
m_pFRDlg->Create(fShowFind, m_sFind, NULL, NULL, this)
}
}
LRESULT CjumpView::OnFind(WPARAM, LPARAM lParam)
{
LPFINDREPLACE lpFindReplace = reinterpret_cast<LPFINDREPLACE>(lParam);
if(lpFindReplace->Flags & FR_DIALOGTERM)
{
m_pFRDlg->DestroyWindow();
delete(m_pFRDlg);
m_pFRDlg = NULL;
return NULL;
}
lpFindReplace->Flags = 0;
return NULL;
}
In Visual Studio CFindReplaceDialog help article it is said that objects of this class should be created in heap using new operator. Then Create member function should be called.
My OnFind function reacts on closing of this Find dialog. It calls DestroyWindow() function, the tries to delete dialog object.
But when I try to call DestoyWindow() in OnFind() or in destructor I have an exception:
Access violation at address...
How to destroy this dialog and to delete pointer?

If someone's interested as me on this problem, the solution is simple.
Actually you don't have to delete CFindReplaceDislog* pointer after it has been used.
After it's closed, it receives WM_NCDESTROY message. And as far as it's derived from CWnd, CWnd::OnNcDestroy() handler is invoked. In the last line it calls PostNcDestroy() which does nothing in CWnd, but overriden in CFindReplaceDialog. There it deletes 'this' pointer like so:
void CFindReplaceDialog::PostNcDestroy()
{
ASSERT(m_hWnd == NULL);
delete this;
}
So, you don't have to invoke delete(m_pFRDlg) anywhere.

Related

wglCreateContext fails with error code 6 (Invalid Handle)

I'm trying to create a wgl context according to the tutorial at https://www.khronos.org/opengl/wiki/Creating_an_OpenGL_Context_(WGL). For whatever reason, wglCreateContext returns null, and GetLastError returns 6, or Invalid Handle. I have used the tutorial before, and it worked just fine.
I'm trying to create a dummy context to a hidden window so that I can create another context for the user window. What's going on?
The Windows API is referred through the winAPI identifier, and wgl is a global struct containing function pointers to OpenGL.
struct WglContext
{
winAPI.HDC hdc;
winAPI.HGLRC handle;
}
__gshared Win32GL wgl;
///WGL-specific global data.
struct Win32GL
{
import oswald : OsWindow, WindowConfig, WindowError;
OsWindow helperWindow;
winAPI.HINSTANCE instance;
PFN_wglCreateContext createContext;
PFN_wglDeleteContext deleteContext;
PFN_wglGetProcAddress getProcAddress;
PFN_wglMakeCurrent makeCurrent;
PFN_wglCreateContextAttribsARB createContextAttribsARB;
PFN_wglGetExtensionStringARB getExtensionStringARB;
PFN_wglGetExtensionStringEXT getExtensionStringEXT;
bool extensionsAreLoaded;
static void initialize()
{
if (wgl.instance !is null)
return; //The library has already been initialized
WindowConfig cfg;
cfg.title = "viewport_gl_helper";
cfg.hidden = true;
auto windowError = OsWindow.createNew(cfg, &wgl.helperWindow);
if (windowError != WindowError.NoError)
{
import std.conv : to;
assert(false, "Failed to create helper window: " ~ windowError.to!string);
}
wgl.instance = winAPI.LoadLibrary("opengl32.dll");
if (wgl.instance is null)
assert(false, "Viweport failed to load opengl32.dll");
wgl.bind(cast(void**)&wgl.createContext, "wglCreateContext\0");
wgl.bind(cast(void**)&wgl.deleteContext, "wglDeleteContext\0");
wgl.bind(cast(void**)&wgl.getProcAddress, "wglGetProcAddress\0");
wgl.bind(cast(void**)&wgl.makeCurrent, "wglMakeCurrent\0");
}
static void terminate()
{
if (!wgl.instance)
return;
winAPI.FreeLibrary(wgl.instance);
wgl.helperWindow.destroy();
}
void bind(void** func, in string name)
{
*func = winAPI.GetProcAddress(this.instance, name.ptr);
}
}
struct WglContext
{
winAPI.HDC hdc;
winAPI.HGLRC handle;
}
WglContext wglCreateTmpContext()
{
assert(wgl.instance);
winAPI.PIXELFORMATDESCRIPTOR pfd;
pfd.nSize = winAPI.PIXELFORMATDESCRIPTOR.sizeof;
pfd.nVersion = 1;
pfd.dwFlags = winAPI.PFD_DRAW_TO_WINDOW | winAPI.PFD_SUPPORT_OPENGL | winAPI.PFD_DOUBLEBUFFER;
pfd.iPixelType = winAPI.PFD_TYPE_RGBA;
pfd.cColorBits = 24;
auto hdc = winAPI.GetDC(wgl.helperWindow.platformData.handle);
const pixelFormat = winAPI.ChoosePixelFormat(hdc, &pfd);
if (winAPI.SetPixelFormat(hdc, pixelFormat, &pfd) == winAPI.FALSE)
assert(false, "Failed to set pixel format for temp context");
writeln(hdc);
writeln(pixelFormat);
winAPI.HGLRC context = wgl.createContext(hdc);
if (context is null)
{
import std.conv: to;
assert(false, "Failed to create temp context: Error Code " ~ winAPI.GetLastError().to!string);
}
if (wgl.makeCurrent(hdc, context) == winAPI.FALSE)
{
wgl.deleteContext(context);
assert(false, "Failed to make temp context current");
}
return WglContext(hdc, context);
}
void main()
{
wgl.initialize(); //Fetches function pointers, and loads opengl32.dll
auto context = wglCreateTmpContext();
wglDeleteContext(context); //Delegates to wgl.deleteContext
wgl.terminate(); //Unloads opengl32.dll and nulls function pointers
}
I know nothing on that winAPI you use. Anyhow, I'm sure of:
You must:
Create a window and get its HWND handle (Windows definition of
HWND, see below). Tipically the window uses WS_CLIPCHILDREN | WS_CLIPSIBLINGS
style; but try also the WS_OVERLAPPEDWINDOW as in the link you
posted.
Verify you get a valid HWND, and a valid HDC from it.
HWND and HDC are defined as *void. I would not trust on auto.

Can I call a function from the base class which return bool from derived class

I have the following base class:
class node_layer_manager_t : public layer_manager_t
{
protected:
//Devices
trx_t trx;
private:
std::vector<string> trx_dump_labels;
public:
node_layer_manager_t( xml::node_t& params );
~node_layer_manager_t();
virtual bool set_profile(void) override;
}
I created the following derived class:
class node_layer_manager_with_rad_t : public node_layer_manager_t
{
protected:
//Devices
radio_t radio;
public:
node_layer_manager_with_rad_t(xml::node_t& params );
~node_layer_manager_with_rad_t();
virtual bool set_profile(void) override;
virtual void radio_monitoring_job_function(void);
intervalues_t<double> radio_tmp;
ushort duration_seconds_for_radio_monitoring;
};
I want it so that the set profile will execute the set_profile of the base class and in addition some other action.
Can I just write it this way?
bool node_layer_manager_with_rad_t::set_profile(void)
{
bool success;
node_layer_manager_t::set_profile();
try
{
string_t profile_tag = "logs/trx_dump/node:"+get_id();
dev_tx = profile->get_decendant(profile_tag.c_str());
cout<<"sarit id= "<< get_id()<<endl;
success = true;
}
catch(...)
{
cout<<"sarit profile error: "<<endl;
success = false;
}
return success; //**
}
**Or should I reurn the follwing:
return (success && node_layer_manager_t::set_profile());
If you have to call parent set_profile regardless what you have to do in derived class, you should adopt design which take care about this constraint.
Typically, you should mark based class set_porfile as final and manage call of a dedicated derived class method inside based class:
class node_layer_manager_t : public layer_manager_t
{
protected:
....
// set_profile actions of derived class
// proposed a default without side effect implementation if
// derived class doesn't need to overload this.
virtual bool set_profile_child() { return true; };
private:
....
public:
.....
// Manage here call of derived
virtual bool set_profile() override final
{
// actions before derived specific actions
....
// Call specific derived class actions
bool success = set_profile_child();
// actions after derived specific actions
if (success)
{
//do based class action
}
return success;
}
}
and in child:
class node_layer_manager_with_rad_t : public node_layer_manager_t
{
protected:
....
public:
virtual bool set_profile_child() override;
};
// Manage only there own action, regardless of needs of based class
bool node_layer_manager_with_rad_t::set_profile(void)
{
try
{
// Do what you're in charge, and only what you're in charge!
}
catch(...)
{
cout<<"sarit profile error: "<<endl;
success = false;
}
return success; //**
}
With this kind of design, each class do only what it have to manage, and only its. Derived class doesn't have to deal with needs of based class.
If you want to offer to your derived class ability to decided if code is executed before or after generic behavior, you can replace or add to set_profile_child() two methods: bool pre_set_profile() and bool post_set_profile()
At first, you haven't declared success anywhere (so actually, this is not a mcve, the code should not compile as is).
Still I get it - and tThe answer is: it depends on what you actually want to do...
Do you want to call the super class first or after the sub class code? Your example implies the former, your alternative the latter. Do you want to abort if the super class function fails or still execute your code?
Your inital example calls the super class function, ignores the result and does its own stuff afterwards.
This calls the super class function first and continues only on success:
bool success = node_layer_manager_t::set_profile();
if(success)
{
try { /*...*/ } // <- no need to set success to true, it is already
catch(...) { /*...*/ success = false; }
}
This executes both, but combines the result:
bool success = node_layer_manager_t::set_profile();
try { /*...*/ } // <- do not modify success, must remain false if super class failed!
catch(...) { /*...*/ success = false; }
Your alternative hints to executing the sub class code first and only call the super class function, if nothing went wrong.
Any of these approaches might be appropriate, none of them might be. You have to get a clear image of what your requirements are - and then implement the code such that your needs are satisfied...

Safari plugin crashes on NPN_GetValue

My plugin code crashes when I call the NPN_GetValue. Basically I created a scriptable object which has a 'getDevice' method that can return a device array to JavaScript. Below is the code snippet.
static bool mainNPObjectInvoke(NPObject *obj, NPIdentifier identifier, const NPVariant *args, uint32_t argCount, NPVariant *result)
{
printf("create main object");
MainNPObject *mainObject = (MainNPObject *)obj;
if (identifier == methodIdentifiers[METHOD_ID_GET_DEVICES])
{
NPObject *windowObj = NULL;
browser->getvalue(mainObject->npp, NPNVWindowNPObject, &windowObj);
// it crashed here
....
}
}
I created the MainNPObject instance with below method.
NPObject *createMainNPObject(NPP npp)
{
MainNPObject *object = (MainNPObject *)browser->createobject(npp, &mainNPClass);
object->npp = npp;
theMainObject = object;
return (NPObject *)object;
}
The createMainNPObject is called in the plugin function I provided to browser.
NPError NPP_GetValue(NPP instance, NPPVariable variable, void *value)
{
PluginObject *obj = instance->pdata;
switch (variable) {
case NPPVpluginCoreAnimationLayer:
if (!obj->rootLayer)
setupLayerHierarchy(obj);
*(CALayer **)value = obj->rootLayer;
return NPERR_NO_ERROR;
case NPPVpluginScriptableNPObject:
if (!obj->mainObject)
{
obj->mainObject = createMainNPObject(instance);
}
....
}
And the allocate function is as below.
static NPObject *mainNPObjectAllocate(NPP npp, NPClass *class)
{
initializeIdentifiers();
MainNPObject *mainObject = malloc(sizeof(MainNPObject));
mainObject->deviceManager = [[DeviceManager alloc] init];
return (NPObject *)mainObject;
}
Definition of MainNPObject:
typedef struct
{
NPObject *npobject;
NPP npp;
DeviceManager *deviceManager;
} MainNPObject;
By debugging the code, I found that the system raised an EXC_BAD_ACCESS when calling the browser->getValue and it looks like the npp pointer is invalid.
0x00007fff83f82dab <+0019> je 0x7fff83f82db9 <_ZN6WebKit14NetscapePlugin7fromNPPEP4_NPP+33>
0x00007fff83f82dad <+0021> incl 0x8(%rax)
Can someone help me out?
Thanks!
Hmm; not seeing anything obvious. Try adding another parameter (an int?) to your structure and set it during allocate or immediately afterwords, then later on check to see if it's still the value you set before you call getvalue. See if your struct is somehow getting corrupt. That happened to me once when I was casting the NPObject funny in a non-obvious way.

Proper way of raising events from C++/CLI?

I was wondering what's the proper way of raising events from C++/CLI. In C# one should first make a copy of the handler, check if it's not null, and then call it. Is there a similar practice for C++/CLI?
This isn't the whole story! You don't usually have to worry about null event handlers in C++/CLI. The code for these checks is generated for you. Consider the following trivial C++/CLI class.
public ref class MyClass
{
public:
event System::EventHandler ^ MyEvent;
};
If you compile this class, and disassemble it using Reflector, you get the following c# code.
public class MyClass
{
// Fields
private EventHandler <backing_store>MyEvent;
// Events
public event EventHandler MyEvent
{
[MethodImpl(MethodImplOptions.Synchronized)] add
{
this.<backing_store>MyEvent = (EventHandler) Delegate.Combine(this.<backing_store>MyEvent, value);
}
[MethodImpl(MethodImplOptions.Synchronized)] remove
{
this.<backing_store>MyEvent = (EventHandler) Delegate.Remove(this.<backing_store>MyEvent, value);
}
raise
{
EventHandler <tmp> = null;
<tmp> = this.<backing_store>MyEvent;
if (<tmp> != null)
{
<tmp>(value0, value1);
}
}
}
}
The usual checks are being done in the raise method. Unless you really want custom behavior, you should feel comfortable declaring your event as in the above class, and raising it without fear of a null handler.
C++/CLI allows you to override raise in custom event handlers so you don't have to test for null or copy when raising the event. Of course, inside your custom raise you still have to do this.
Example, adapted from the MSDN for correctness:
public delegate void f(int);
public ref struct E {
f ^ _E;
public:
void handler(int i) {
System::Console::WriteLine(i);
}
E() {
_E = nullptr;
}
event f^ Event {
void add(f ^ d) {
_E += d;
}
void remove(f ^ d) {
_E -= d;
}
void raise(int i) {
f^ tmp = _E;
if (tmp) {
tmp->Invoke(i);
}
}
}
static void Go() {
E^ pE = gcnew E;
pE->Event += gcnew f(pE, &E::handler);
pE->Event(17);
}
};
int main() {
E::Go();
}
If your issue is that raise isn't private, then explicitly implement it like the docs say:
http://msdn.microsoft.com/en-us/library/5f3csfsa.aspx
In summary:
If you just use the event keyword, you create a "trivial" event. The compiler generates add/remove/raise and the delegate member for you. The generated raise function (as the docs say) checks for nullptr. Trivial events are documented here:
http://msdn.microsoft.com/en-us/library/4b612y2s.aspx
If you want "more control", for example to make raise private, then you have to explicitly implement the members as shown in the link. You must explicitly declare a data member for the delegate type. Then you use the event keyword to declare the event-related members, as in the Microsoft example:
// event keyword introduces the scope wherein I'm defining the required methods
// "f" is my delegate type
// "Event" is the unrealistic name of the event itself
event f^ Event
{
// add is public (because the event block is public)
// "_E" is the private delegate data member of type "f"
void add(f ^ d) { _E += d; }
// making remove private
private:
void remove(f ^ d) { _E -= d; }
// making raise protected
protected:
void raise(int i)
{
// check for nullptr
if (_E)
{
_E->Invoke(i);
}
}
}// end event block
Wordy, but there it is.
-reilly.

How to tell if .NET code is being run by Visual Studio designer

I am getting some errors thrown in my code when I open a Windows Forms form in Visual Studio's designer. I would like to branch in my code and perform a different initialization if the form is being opened by designer than if it is being run for real.
How can I determine at run-time if the code is being executed as part of designer opening the form?
if (System.ComponentModel.LicenseManager.UsageMode == System.ComponentModel.LicenseUsageMode.Designtime)
{
// Design time logic
}
To find out if you're in "design mode":
Windows Forms components (and controls) have a DesignMode property.
Windows Presentation Foundation controls should use the IsInDesignMode attached property.
The Control.DesignMode property is probably what you're looking for. It tells you if the control's parent is open in the designer.
In most cases it works great, but there are instances where it doesn't work as expected. First, it doesn't work in the controls constructor. Second, DesignMode is false for "grandchild" controls. For example, DesignMode on controls hosted in a UserControl will return false when the UserControl is hosted in a parent.
There is a pretty easy workaround. It goes something like this:
public bool HostedDesignMode
{
get
{
Control parent = Parent;
while (parent!=null)
{
if(parent.DesignMode) return true;
parent = parent.Parent;
}
return DesignMode;
}
}
I haven't tested that code, but it should work.
The most reliable approach is:
public bool isInDesignMode
{
get
{
System.Diagnostics.Process process = System.Diagnostics.Process.GetCurrentProcess();
bool res = process.ProcessName == "devenv";
process.Dispose();
return res;
}
}
The most reliable way to do this is to ignore the DesignMode property and use your own flag that gets set on application startup.
Class:
public static class Foo
{
public static bool IsApplicationRunning { get; set; }
}
Program.cs:
[STAThread]
static void Main()
{
Foo.IsApplicationRunning = true;
// ... code goes here ...
}
Then just check the flag whever you need it.
if(Foo.IsApplicationRunning)
{
// Do runtime stuff
}
else
{
// Do design time stuff
}
I had the same problem in Visual Studio Express 2013. I tried many of the solutions suggested here but the one that worked for me was an answer to a different thread, which I will repeat here in case the link is ever broken:
protected static bool IsInDesigner
{
get { return (Assembly.GetEntryAssembly() == null); }
}
The devenv approach stopped working in VS2012 as the designer now has its own process. Here is the solution I am currently using (the 'devenv' part is left there for legacy, but without VS2010 I am not able to test that though).
private static readonly string[] _designerProcessNames = new[] { "xdesproc", "devenv" };
private static bool? _runningFromVisualStudioDesigner = null;
public static bool RunningFromVisualStudioDesigner
{
get
{
if (!_runningFromVisualStudioDesigner.HasValue)
{
using (System.Diagnostics.Process currentProcess = System.Diagnostics.Process.GetCurrentProcess())
{
_runningFromVisualStudioDesigner = _designerProcessNames.Contains(currentProcess.ProcessName.ToLower().Trim());
}
}
return _runningFromVisualStudioDesigner.Value;
}
}
/// <summary>
/// Are we in design mode?
/// </summary>
/// <returns>True if in design mode</returns>
private bool IsDesignMode() {
// Ugly hack, but it works in every version
return 0 == String.CompareOrdinal(
"devenv.exe", 0,
Application.ExecutablePath, Application.ExecutablePath.Length - 10, 10);
}
System.Diagnostics.Debugger.IsAttached
It's hack-ish, but if you're using VB.NET and when you're running from within Visual Studio My.Application.Deployment.CurrentDeployment will be Nothing, because you haven't deployed it yet. I'm not sure how to check the equivalent value in C#.
using (System.Diagnostics.Process process = System.Diagnostics.Process.GetCurrentProcess())
{
bool inDesigner = process.ProcessName.ToLower().Trim() == "devenv";
return inDesigner;
}
I tried the above code (added a using statement) and this would fail on some occasions for me. Testing in the constructor of a usercontrol placed directly in a form with the designer loading at startup. But would work in other places.
What worked for me, in all locations is:
private bool isDesignMode()
{
bool bProcCheck = false;
using (System.Diagnostics.Process process = System.Diagnostics.Process.GetCurrentProcess())
{
bProcCheck = process.ProcessName.ToLower().Trim() == "devenv";
}
bool bModeCheck = (System.ComponentModel.LicenseManager.UsageMode == System.ComponentModel.LicenseUsageMode.Designtime);
return bProcCheck || DesignMode || bModeCheck;
}
Maybe a bit overkill, but it works, so is good enough for me.
The success in the example noted above is the bModeCheck, so probably the DesignMode is surplus.
You check the DesignMode property of your control:
if (!DesignMode)
{
//Do production runtime stuff
}
Note that this won't work in your constructor because the components haven't been initialized yet.
When running a project, its name is appended with ".vshost".
So, I use this:
public bool IsInDesignMode
{
get
{
Process p = Process.GetCurrentProcess();
bool result = false;
if (p.ProcessName.ToLower().Trim().IndexOf("vshost") != -1)
result = true;
p.Dispose();
return result;
}
}
It works for me.
I'm not sure if running in debug mode counts as real, but an easy way is to include an if statement in your code that checkes for System.Diagnostics.Debugger.IsAttached.
If you created a property that you don't need at all at design time, you can use the DesignerSerializationVisibility attribute and set it to Hidden. For example:
protected virtual DataGridView GetGrid()
{
throw new NotImplementedException("frmBase.GetGrid()");
}
[DesignerSerializationVisibility(DesignerSerializationVisibility.Hidden)]
public int ColumnCount { get { return GetGrid().Columns.Count; } set { /*Some code*/ } }
It stopped my Visual Studio crashing every time I made a change to the form with NotImplementedException() and tried to save. Instead, Visual Studio knows that I don't want to serialize this property, so it can skip it. It only displays some weird string in the properties box of the form, but it seems to be safe to ignore.
Please note that this change does not take effect until you rebuild.
We use the following code in UserControls and it does the work. Using only DesignMode will not work in your app that uses your custom user controls as pointed out by other members.
public bool IsDesignerHosted
{
get { return IsControlDesignerHosted(this); }
}
public bool IsControlDesignerHosted(System.Windows.Forms.Control ctrl)
{
if (ctrl != null)
{
if (ctrl.Site != null)
{
if (ctrl.Site.DesignMode == true)
return true;
else
{
if (IsControlDesignerHosted(ctrl.Parent))
return true;
else
return false;
}
}
else
{
if (IsControlDesignerHosted(ctrl.Parent))
return true;
else
return false;
}
}
else
return false;
}
Basically the logic above boils down to:
public bool IsControlDesignerHosted(System.Windows.Forms.Control ctrl)
{
if (ctrl == null) return false;
if (ctrl.Site != null && ctrl.Site.DesignMode) return true;
return IsControlDesignerHosted(ctrl.Parent);
}
If you are in a form or control you can use the DesignMode property:
if (DesignMode)
{
DesignMode Only stuff
}
I found the DesignMode property to be buggy, at least in previous versions of Visual Studio. Hence, I made my own using the following logic:
Process.GetCurrentProcess().ProcessName.ToLower().Trim() == "devenv";
Kind of a hack, I know, but it works well.
System.ComponentModel.Component.DesignMode == true
To solve the problem, you can also code as below:
private bool IsUnderDevelopment
{
get
{
System.Diagnostics.Process process = System.Diagnostics.Process.GetCurrentProcess();
if (process.ProcessName.EndsWith(".vshost")) return true;
else return false;
}
}
Here's another one:
//Caters only to thing done while only in design mode
if (App.Current.MainWindow == null){ // in design mode }
//Avoids design mode problems
if (App.Current.MainWindow != null) { //applicaiton is running }
After testing most of the answers here, unfortunately nothing worked for me (VS2015).
So I added a little twist to JohnV's answer, which didn't work out of the box, since DesignMode is a protected Property in the Control class.
First I made an extension method which returns the DesignMode's Property value via Reflection:
public static Boolean GetDesignMode(this Control control)
{
BindingFlags bindFlags = BindingFlags.Instance | BindingFlags.NonPublic | BindingFlags.Static;
PropertyInfo prop = control.GetType().GetProperty("DesignMode", bindFlags);
return (Boolean)prop.GetValue(control, null);
}
and then I made a function like JohnV:
public bool HostedDesignMode
{
get
{
Control parent = Parent;
while (parent != null)
{
if (parent.GetDesignMode()) return true;
parent = parent.Parent;
}
return DesignMode;
}
}
This is the only method that worked for me, avoiding all the ProcessName mess, and while reflection should not be used lightly, in this case it did all the difference! ;)
EDIT:
You can also make the second function an extension method like this:
public static Boolean IsInDesignMode(this Control control)
{
Control parent = control.Parent;
while (parent != null)
{
if (parent.GetDesignMode())
{
return true;
}
parent = parent.Parent;
}
return control.GetDesignMode();
}
For WPF (hopefully this is useful for those WPF people stumbling upon this question):
if (System.ComponentModel.DesignerProperties.GetIsInDesignMode(new DependencyObject()))
{
}
GetIsInDesignMode requires a DependencyObject. If you don't have one, just create one.
/// <summary>
/// Whether or not we are being run from the Visual Studio IDE
/// </summary>
public bool InIDE
{
get
{
return Process.GetCurrentProcess().ProcessName.ToLower().Trim().EndsWith("vshost");
}
}
Here's a flexible way that is adaptable to where you compile from as well as whether or not you care which mode you're in.
string testString1 = "\\bin\\";
//string testString = "\\bin\\Debug\\";
//string testString = "\\bin\\Release\\";
if (AppDomain.CurrentDomain.BaseDirectory.Contains(testString))
{
//Your code here
}

Resources