aboutsummaryrefslogtreecommitdiff
path: root/tools/debugserver/source/MacOSX
diff options
context:
space:
mode:
Diffstat (limited to 'tools/debugserver/source/MacOSX')
-rw-r--r--tools/debugserver/source/MacOSX/CFBundle.cpp97
-rw-r--r--tools/debugserver/source/MacOSX/CFBundle.h43
-rw-r--r--tools/debugserver/source/MacOSX/CFData.cpp85
-rw-r--r--tools/debugserver/source/MacOSX/CFData.h39
-rw-r--r--tools/debugserver/source/MacOSX/CFString.cpp201
-rw-r--r--tools/debugserver/source/MacOSX/CFString.h43
-rw-r--r--tools/debugserver/source/MacOSX/CFUtils.h81
-rw-r--r--tools/debugserver/source/MacOSX/CMakeLists.txt89
-rw-r--r--tools/debugserver/source/MacOSX/Genealogy.cpp300
-rw-r--r--tools/debugserver/source/MacOSX/Genealogy.h116
-rw-r--r--tools/debugserver/source/MacOSX/GenealogySPI.h96
-rw-r--r--tools/debugserver/source/MacOSX/HasAVX.h27
-rw-r--r--tools/debugserver/source/MacOSX/HasAVX.s50
-rw-r--r--tools/debugserver/source/MacOSX/MachException.cpp582
-rw-r--r--tools/debugserver/source/MacOSX/MachException.h133
-rw-r--r--tools/debugserver/source/MacOSX/MachProcess.h364
-rw-r--r--tools/debugserver/source/MacOSX/MachProcess.mm3685
-rw-r--r--tools/debugserver/source/MacOSX/MachTask.h134
-rw-r--r--tools/debugserver/source/MacOSX/MachTask.mm1144
-rw-r--r--tools/debugserver/source/MacOSX/MachThread.cpp922
-rw-r--r--tools/debugserver/source/MacOSX/MachThread.h159
-rw-r--r--tools/debugserver/source/MacOSX/MachThreadList.cpp668
-rw-r--r--tools/debugserver/source/MacOSX/MachThreadList.h87
-rw-r--r--tools/debugserver/source/MacOSX/MachVMMemory.cpp598
-rw-r--r--tools/debugserver/source/MacOSX/MachVMMemory.h51
-rw-r--r--tools/debugserver/source/MacOSX/MachVMRegion.cpp202
-rw-r--r--tools/debugserver/source/MacOSX/MachVMRegion.h77
-rw-r--r--tools/debugserver/source/MacOSX/Makefile54
-rw-r--r--tools/debugserver/source/MacOSX/ThreadInfo.h26
-rw-r--r--tools/debugserver/source/MacOSX/arm/DNBArchImpl.cpp2162
-rw-r--r--tools/debugserver/source/MacOSX/arm/DNBArchImpl.h282
-rw-r--r--tools/debugserver/source/MacOSX/arm64/DNBArchImplARM64.cpp2095
-rw-r--r--tools/debugserver/source/MacOSX/arm64/DNBArchImplARM64.h272
-rw-r--r--tools/debugserver/source/MacOSX/dbgnub-mig.defs5
-rw-r--r--tools/debugserver/source/MacOSX/i386/CMakeLists.txt4
-rw-r--r--tools/debugserver/source/MacOSX/i386/DNBArchImplI386.cpp1901
-rw-r--r--tools/debugserver/source/MacOSX/i386/DNBArchImplI386.h255
-rw-r--r--tools/debugserver/source/MacOSX/i386/MachRegisterStatesI386.h180
-rw-r--r--tools/debugserver/source/MacOSX/i386/Makefile19
-rw-r--r--tools/debugserver/source/MacOSX/ppc/DNBArchImpl.cpp569
-rw-r--r--tools/debugserver/source/MacOSX/ppc/DNBArchImpl.h179
-rw-r--r--tools/debugserver/source/MacOSX/stack_logging.h122
-rw-r--r--tools/debugserver/source/MacOSX/x86_64/CMakeLists.txt8
-rw-r--r--tools/debugserver/source/MacOSX/x86_64/DNBArchImplX86_64.cpp2289
-rw-r--r--tools/debugserver/source/MacOSX/x86_64/DNBArchImplX86_64.h260
-rw-r--r--tools/debugserver/source/MacOSX/x86_64/MachRegisterStatesX86_64.h210
-rw-r--r--tools/debugserver/source/MacOSX/x86_64/Makefile19
47 files changed, 20984 insertions, 0 deletions
diff --git a/tools/debugserver/source/MacOSX/CFBundle.cpp b/tools/debugserver/source/MacOSX/CFBundle.cpp
new file mode 100644
index 000000000000..fdcb7cc2fcb2
--- /dev/null
+++ b/tools/debugserver/source/MacOSX/CFBundle.cpp
@@ -0,0 +1,97 @@
+//===-- CFBundle.cpp --------------------------------------------*- C++ -*-===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// Created by Greg Clayton on 1/16/08.
+//
+//===----------------------------------------------------------------------===//
+
+#include "CFBundle.h"
+#include "CFString.h"
+
+//----------------------------------------------------------------------
+// CFBundle constructor
+//----------------------------------------------------------------------
+CFBundle::CFBundle(const char *path) :
+ CFReleaser<CFBundleRef>(),
+ m_bundle_url()
+{
+ if (path && path[0])
+ SetPath(path);
+}
+
+//----------------------------------------------------------------------
+// CFBundle copy constructor
+//----------------------------------------------------------------------
+CFBundle::CFBundle(const CFBundle& rhs) :
+ CFReleaser<CFBundleRef>(rhs),
+ m_bundle_url(rhs.m_bundle_url)
+{
+
+}
+
+//----------------------------------------------------------------------
+// CFBundle copy constructor
+//----------------------------------------------------------------------
+CFBundle&
+CFBundle::operator=(const CFBundle& rhs)
+{
+ *this = rhs;
+ return *this;
+}
+
+//----------------------------------------------------------------------
+// Destructor
+//----------------------------------------------------------------------
+CFBundle::~CFBundle()
+{
+}
+
+//----------------------------------------------------------------------
+// Set the path for a bundle by supplying a
+//----------------------------------------------------------------------
+bool
+CFBundle::SetPath (const char *path)
+{
+ CFAllocatorRef alloc = kCFAllocatorDefault;
+ // Release our old bundle and ULR
+ reset(); // This class is a CFReleaser<CFBundleRef>
+ m_bundle_url.reset();
+ // Make a CFStringRef from the supplied path
+ CFString cf_path;
+ cf_path.SetFileSystemRepresentation(path);
+ if (cf_path.get())
+ {
+ // Make our Bundle URL
+ m_bundle_url.reset (::CFURLCreateWithFileSystemPath (alloc, cf_path.get(), kCFURLPOSIXPathStyle, true));
+ if (m_bundle_url.get())
+ {
+ reset (::CFBundleCreate (alloc, m_bundle_url.get()));
+ }
+ }
+ return get() != NULL;
+}
+
+CFStringRef
+CFBundle::GetIdentifier () const
+{
+ CFBundleRef bundle = get();
+ if (bundle != NULL)
+ return ::CFBundleGetIdentifier (bundle);
+ return NULL;
+}
+
+
+CFURLRef
+CFBundle::CopyExecutableURL () const
+{
+ CFBundleRef bundle = get();
+ if (bundle != NULL)
+ return CFBundleCopyExecutableURL(bundle);
+ return NULL;
+}
diff --git a/tools/debugserver/source/MacOSX/CFBundle.h b/tools/debugserver/source/MacOSX/CFBundle.h
new file mode 100644
index 000000000000..e08290add731
--- /dev/null
+++ b/tools/debugserver/source/MacOSX/CFBundle.h
@@ -0,0 +1,43 @@
+//===-- CFBundle.h ----------------------------------------------*- C++ -*-===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// Created by Greg Clayton on 1/16/08.
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef __CFBundle_h__
+#define __CFBundle_h__
+
+#include "CFUtils.h"
+
+class CFBundle : public CFReleaser<CFBundleRef>
+{
+public:
+ //------------------------------------------------------------------
+ // Constructors and Destructors
+ //------------------------------------------------------------------
+ CFBundle(const char *path = NULL);
+ CFBundle(const CFBundle& rhs);
+ CFBundle& operator=(const CFBundle& rhs);
+ virtual
+ ~CFBundle();
+ bool
+ SetPath (const char *path);
+
+ CFStringRef
+ GetIdentifier () const;
+
+ CFURLRef
+ CopyExecutableURL () const;
+
+protected:
+ CFReleaser<CFURLRef> m_bundle_url;
+};
+
+#endif // #ifndef __CFBundle_h__
diff --git a/tools/debugserver/source/MacOSX/CFData.cpp b/tools/debugserver/source/MacOSX/CFData.cpp
new file mode 100644
index 000000000000..94c93da544a4
--- /dev/null
+++ b/tools/debugserver/source/MacOSX/CFData.cpp
@@ -0,0 +1,85 @@
+//===-- CFData.cpp ----------------------------------------------*- C++ -*-===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// Created by Greg Clayton on 1/16/08.
+//
+//===----------------------------------------------------------------------===//
+
+#include "CFData.h"
+
+//----------------------------------------------------------------------
+// CFData constructor
+//----------------------------------------------------------------------
+CFData::CFData(CFDataRef data) :
+ CFReleaser<CFDataRef>(data)
+{
+
+}
+
+//----------------------------------------------------------------------
+// CFData copy constructor
+//----------------------------------------------------------------------
+CFData::CFData(const CFData& rhs) :
+ CFReleaser<CFDataRef>(rhs)
+{
+
+}
+
+//----------------------------------------------------------------------
+// CFData copy constructor
+//----------------------------------------------------------------------
+CFData&
+CFData::operator=(const CFData& rhs)
+
+{
+ *this = rhs;
+ return *this;
+}
+
+//----------------------------------------------------------------------
+// Destructor
+//----------------------------------------------------------------------
+CFData::~CFData()
+{
+}
+
+
+CFIndex
+CFData::GetLength() const
+{
+ CFDataRef data = get();
+ if (data)
+ return CFDataGetLength (data);
+ return 0;
+}
+
+
+const uint8_t*
+CFData::GetBytePtr() const
+{
+ CFDataRef data = get();
+ if (data)
+ return CFDataGetBytePtr (data);
+ return NULL;
+}
+
+CFDataRef
+CFData::Serialize(CFPropertyListRef plist, CFPropertyListFormat format)
+{
+ CFAllocatorRef alloc = kCFAllocatorDefault;
+ reset();
+ CFReleaser<CFWriteStreamRef> stream (::CFWriteStreamCreateWithAllocatedBuffers (alloc, alloc));
+ ::CFWriteStreamOpen (stream.get());
+ CFIndex len = ::CFPropertyListWriteToStream (plist, stream.get(), format, NULL);
+ if (len > 0)
+ reset((CFDataRef)::CFWriteStreamCopyProperty (stream.get(), kCFStreamPropertyDataWritten));
+ ::CFWriteStreamClose (stream.get());
+ return get();
+}
+
diff --git a/tools/debugserver/source/MacOSX/CFData.h b/tools/debugserver/source/MacOSX/CFData.h
new file mode 100644
index 000000000000..2c9d65d3af72
--- /dev/null
+++ b/tools/debugserver/source/MacOSX/CFData.h
@@ -0,0 +1,39 @@
+//===-- CFData.h ------------------------------------------------*- C++ -*-===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// Created by Greg Clayton on 1/16/08.
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef __CFData_h__
+#define __CFData_h__
+
+#include "CFUtils.h"
+
+class CFData : public CFReleaser<CFDataRef>
+{
+public:
+ //------------------------------------------------------------------
+ // Constructors and Destructors
+ //------------------------------------------------------------------
+ CFData(CFDataRef data = NULL);
+ CFData(const CFData& rhs);
+ CFData& operator=(const CFData& rhs);
+ virtual ~CFData();
+
+ CFDataRef Serialize(CFPropertyListRef plist, CFPropertyListFormat format);
+ const uint8_t* GetBytePtr () const;
+ CFIndex GetLength () const;
+protected:
+ //------------------------------------------------------------------
+ // Classes that inherit from CFData can see and modify these
+ //------------------------------------------------------------------
+};
+
+#endif // #ifndef __CFData_h__
diff --git a/tools/debugserver/source/MacOSX/CFString.cpp b/tools/debugserver/source/MacOSX/CFString.cpp
new file mode 100644
index 000000000000..819024ca3bcb
--- /dev/null
+++ b/tools/debugserver/source/MacOSX/CFString.cpp
@@ -0,0 +1,201 @@
+//===-- CFString.cpp --------------------------------------------*- C++ -*-===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// Created by Greg Clayton on 1/16/08.
+//
+//===----------------------------------------------------------------------===//
+
+#include "CFString.h"
+#include <string>
+#include <glob.h>
+
+//----------------------------------------------------------------------
+// CFString constructor
+//----------------------------------------------------------------------
+CFString::CFString(CFStringRef s) :
+ CFReleaser<CFStringRef> (s)
+{
+}
+
+//----------------------------------------------------------------------
+// CFString copy constructor
+//----------------------------------------------------------------------
+CFString::CFString(const CFString& rhs) :
+ CFReleaser<CFStringRef> (rhs)
+{
+
+}
+
+//----------------------------------------------------------------------
+// CFString copy constructor
+//----------------------------------------------------------------------
+CFString&
+CFString::operator=(const CFString& rhs)
+{
+ if (this != &rhs)
+ *this = rhs;
+ return *this;
+}
+
+CFString::CFString (const char *cstr, CFStringEncoding cstr_encoding) :
+ CFReleaser<CFStringRef> ()
+{
+ if (cstr && cstr[0])
+ {
+ reset(::CFStringCreateWithCString(kCFAllocatorDefault, cstr, cstr_encoding));
+ }
+}
+
+//----------------------------------------------------------------------
+// Destructor
+//----------------------------------------------------------------------
+CFString::~CFString()
+{
+}
+
+const char *
+CFString::GetFileSystemRepresentation(std::string& s)
+{
+ return CFString::FileSystemRepresentation(get(), s);
+}
+
+CFStringRef
+CFString::SetFileSystemRepresentation (const char *path)
+{
+ CFStringRef new_value = NULL;
+ if (path && path[0])
+ new_value = ::CFStringCreateWithFileSystemRepresentation (kCFAllocatorDefault, path);
+ reset(new_value);
+ return get();
+}
+
+
+CFStringRef
+CFString::SetFileSystemRepresentationFromCFType (CFTypeRef cf_type)
+{
+ CFStringRef new_value = NULL;
+ if (cf_type != NULL)
+ {
+ CFTypeID cf_type_id = ::CFGetTypeID(cf_type);
+
+ if (cf_type_id == ::CFStringGetTypeID())
+ {
+ // Retain since we are using the existing object
+ new_value = (CFStringRef)::CFRetain(cf_type);
+ }
+ else if (cf_type_id == ::CFURLGetTypeID())
+ {
+ new_value = ::CFURLCopyFileSystemPath((CFURLRef)cf_type, kCFURLPOSIXPathStyle);
+ }
+ }
+ reset(new_value);
+ return get();
+}
+
+CFStringRef
+CFString::SetFileSystemRepresentationAndExpandTilde (const char *path)
+{
+ std::string expanded_path;
+ if (CFString::GlobPath(path, expanded_path))
+ SetFileSystemRepresentation(expanded_path.c_str());
+ else
+ reset();
+ return get();
+}
+
+const char *
+CFString::UTF8(std::string& str)
+{
+ return CFString::UTF8(get(), str);
+}
+
+// Static function that puts a copy of the UTF8 contents of CF_STR into STR
+// and returns the C string pointer that is contained in STR when successful, else
+// NULL is returned. This allows the std::string parameter to own the extracted string,
+// and also allows that string to be returned as a C string pointer that can be used.
+
+const char *
+CFString::UTF8 (CFStringRef cf_str, std::string& str)
+{
+ if (cf_str)
+ {
+ const CFStringEncoding encoding = kCFStringEncodingUTF8;
+ CFIndex max_utf8_str_len = CFStringGetLength (cf_str);
+ max_utf8_str_len = CFStringGetMaximumSizeForEncoding (max_utf8_str_len, encoding);
+ if (max_utf8_str_len > 0)
+ {
+ str.resize(max_utf8_str_len);
+ if (!str.empty())
+ {
+ if (CFStringGetCString (cf_str, &str[0], str.size(), encoding))
+ {
+ str.resize(strlen(str.c_str()));
+ return str.c_str();
+ }
+ }
+ }
+ }
+ return NULL;
+}
+
+// Static function that puts a copy of the file system representation of CF_STR
+// into STR and returns the C string pointer that is contained in STR when
+// successful, else NULL is returned. This allows the std::string parameter
+// to own the extracted string, and also allows that string to be returned as
+// a C string pointer that can be used.
+
+const char *
+CFString::FileSystemRepresentation (CFStringRef cf_str, std::string& str)
+{
+ if (cf_str)
+ {
+ CFIndex max_length = ::CFStringGetMaximumSizeOfFileSystemRepresentation (cf_str);
+ if (max_length > 0)
+ {
+ str.resize(max_length);
+ if (!str.empty())
+ {
+ if (::CFStringGetFileSystemRepresentation (cf_str, &str[0], str.size()))
+ {
+ str.erase(::strlen(str.c_str()));
+ return str.c_str();
+ }
+ }
+ }
+ }
+ str.erase();
+ return NULL;
+}
+
+
+CFIndex
+CFString::GetLength() const
+{
+ CFStringRef str = get();
+ if (str)
+ return CFStringGetLength (str);
+ return 0;
+}
+
+
+const char*
+CFString::GlobPath(const char* path, std::string &expanded_path)
+{
+ glob_t globbuf;
+ if (::glob (path, GLOB_TILDE, NULL, &globbuf) == 0)
+ {
+ expanded_path = globbuf.gl_pathv[0];
+ ::globfree (&globbuf);
+ }
+ else
+ expanded_path.clear();
+
+ return expanded_path.c_str();
+}
+
diff --git a/tools/debugserver/source/MacOSX/CFString.h b/tools/debugserver/source/MacOSX/CFString.h
new file mode 100644
index 000000000000..73945a28a658
--- /dev/null
+++ b/tools/debugserver/source/MacOSX/CFString.h
@@ -0,0 +1,43 @@
+//===-- CFString.h ----------------------------------------------*- C++ -*-===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// Created by Greg Clayton on 1/16/08.
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef __CFString_h__
+#define __CFString_h__
+
+#include "CFUtils.h"
+#include <iosfwd>
+
+class CFString : public CFReleaser<CFStringRef>
+{
+public:
+ //------------------------------------------------------------------
+ // Constructors and Destructors
+ //------------------------------------------------------------------
+ CFString (CFStringRef cf_str = NULL);
+ CFString (const char *s, CFStringEncoding encoding = kCFStringEncodingUTF8);
+ CFString (const CFString& rhs);
+ CFString& operator= (const CFString& rhs);
+ virtual ~CFString ();
+
+ const char * GetFileSystemRepresentation (std::string& str);
+ CFStringRef SetFileSystemRepresentation (const char *path);
+ CFStringRef SetFileSystemRepresentationFromCFType (CFTypeRef cf_type);
+ CFStringRef SetFileSystemRepresentationAndExpandTilde (const char *path);
+ const char * UTF8 (std::string& str);
+ CFIndex GetLength() const;
+ static const char *UTF8 (CFStringRef cf_str, std::string& str);
+ static const char *FileSystemRepresentation (CFStringRef cf_str, std::string& str);
+ static const char* GlobPath(const char* path, std::string &expanded_path);
+};
+
+#endif // #ifndef __CFString_h__
diff --git a/tools/debugserver/source/MacOSX/CFUtils.h b/tools/debugserver/source/MacOSX/CFUtils.h
new file mode 100644
index 000000000000..afa984fa11cd
--- /dev/null
+++ b/tools/debugserver/source/MacOSX/CFUtils.h
@@ -0,0 +1,81 @@
+//===-- CFUtils.h -----------------------------------------------*- C++ -*-===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// Created by Greg Clayton on 3/5/07.
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef __CFUtils_h__
+#define __CFUtils_h__
+
+#include <CoreFoundation/CoreFoundation.h>
+
+#ifdef __cplusplus
+
+//----------------------------------------------------------------------
+// Templatized CF helper class that can own any CF pointer and will
+// call CFRelease() on any valid pointer it owns unless that pointer is
+// explicitly released using the release() member function.
+//----------------------------------------------------------------------
+template <class T>
+class CFReleaser
+{
+public:
+ // Type names for the avlue
+ typedef T element_type;
+
+ // Constructors and destructors
+ CFReleaser(T ptr = NULL) : _ptr(ptr) { }
+ CFReleaser(const CFReleaser& copy) : _ptr(copy.get())
+ {
+ if (get())
+ ::CFRetain(get());
+ }
+ virtual ~CFReleaser() { reset(); }
+
+ // Assignments
+ CFReleaser& operator= (const CFReleaser<T>& copy)
+ {
+ if (copy != *this)
+ {
+ // Replace our owned pointer with the new one
+ reset(copy.get());
+ // Retain the current pointer that we own
+ if (get())
+ ::CFRetain(get());
+ }
+ }
+ // Get the address of the contained type
+ T * ptr_address() { return &_ptr; }
+
+ // Access the pointer itself
+ const T get() const { return _ptr; }
+ T get() { return _ptr; }
+
+ // Set a new value for the pointer and CFRelease our old
+ // value if we had a valid one.
+ void reset(T ptr = NULL)
+ {
+ if (ptr != _ptr)
+ {
+ if (_ptr != NULL)
+ ::CFRelease(_ptr);
+ _ptr = ptr;
+ }
+ }
+
+ // Release ownership without calling CFRelease
+ T release() { T tmp = _ptr; _ptr = NULL; return tmp; }
+private:
+ element_type _ptr;
+};
+
+#endif // #ifdef __cplusplus
+#endif // #ifndef __CFUtils_h__
+
diff --git a/tools/debugserver/source/MacOSX/CMakeLists.txt b/tools/debugserver/source/MacOSX/CMakeLists.txt
new file mode 100644
index 000000000000..d319cb7b0bab
--- /dev/null
+++ b/tools/debugserver/source/MacOSX/CMakeLists.txt
@@ -0,0 +1,89 @@
+#add_subdirectory(arm64)
+#add_subdirectory(arm)
+add_subdirectory(i386)
+#add_subdirectory(ppc)
+add_subdirectory(x86_64)
+
+include_directories(..)
+
+set(generated_mach_interfaces
+ ${CMAKE_CURRENT_BINARY_DIR}/mach_exc.h
+ ${CMAKE_CURRENT_BINARY_DIR}/mach_excServer.c
+ ${CMAKE_CURRENT_BINARY_DIR}/mach_excUser.c
+ )
+add_custom_command(OUTPUT ${generated_mach_interfaces}
+ COMMAND mig ${CMAKE_CURRENT_SOURCE_DIR}/dbgnub-mig.defs
+ DEPENDS ${CMAKE_CURRENT_SOURCE_DIR}/dbgnub-mig.defs
+ )
+
+set(DEBUGSERVER_VERS_GENERATED_FILE ${CMAKE_CURRENT_BINARY_DIR}/debugserver_vers.c)
+set_source_files_properties(${DEBUGSERVER_VERS_GENERATED_FILE} PROPERTIES GENERATED 1)
+
+add_custom_command(OUTPUT ${DEBUGSERVER_VERS_GENERATED_FILE}
+ COMMAND ${LLDB_SOURCE_DIR}/scripts/generate-vers.pl
+ ${LLDB_SOURCE_DIR}/lldb.xcodeproj/project.pbxproj debugserver
+ > ${DEBUGSERVER_VERS_GENERATED_FILE}
+ DEPENDS ${LLDB_SOURCE_DIR}/scripts/generate-vers.pl
+ ${LLDB_SOURCE_DIR}/lldb.xcodeproj/project.pbxproj
+ )
+
+set(DEBUGSERVER_USED_LIBS
+ lldbDebugserverCommon
+ lldbUtility
+ lldbDebugserverMacOSX_I386
+ lldbDebugserverMacOSX_X86_64
+ )
+
+add_lldb_executable(debugserver
+ HasAVX.s
+ CFBundle.cpp
+ CFData.cpp
+ CFString.cpp
+ Genealogy.cpp
+ MachException.cpp
+ MachProcess.mm
+ MachTask.mm
+ MachThread.cpp
+ MachThreadList.cpp
+ MachVMMemory.cpp
+ MachVMRegion.cpp
+ ${generated_mach_interfaces}
+ ${DEBUGSERVER_VERS_GENERATED_FILE}
+ )
+
+set_source_files_properties(
+ HasAVX.s
+ # Necessary since compilation will fail with stand-alone assembler
+ PROPERTIES LANGUAGE C COMPILE_FLAGS "-x assembler-with-cpp"
+ )
+
+target_link_libraries(debugserver ${DEBUGSERVER_USED_LIBS})
+
+# Sign the debugserver binary
+set (CODESIGN_IDENTITY lldb_codesign)
+execute_process(
+ COMMAND xcrun -f codesign_allocate
+ OUTPUT_STRIP_TRAILING_WHITESPACE
+ OUTPUT_VARIABLE CODESIGN_ALLOCATE
+ )
+# Older cmake versions don't support "-E env".
+if (${CMAKE_MAJOR_VERSION}.${CMAKE_MINOR_VERSION} LESS 3.2)
+ add_custom_command(TARGET debugserver
+ POST_BUILD
+ # Note: --entitlements option removed, as it causes errors when debugging.
+ # was: COMMAND CODESIGN_ALLOCATE=${CODESIGN_ALLOCATE} codesign --entitlements ${CMAKE_CURRENT_SOURCE_DIR}/../debugserver-entitlements.plist --force --sign ${CODESIGN_IDENTITY} debugserver
+ COMMAND CODESIGN_ALLOCATE=${CODESIGN_ALLOCATE} codesign --force --sign ${CODESIGN_IDENTITY} debugserver
+ WORKING_DIRECTORY ${CMAKE_BINARY_DIR}/bin
+ )
+else()
+ add_custom_command(TARGET debugserver
+ POST_BUILD
+ # Note: --entitlements option removed (see comment above).
+ COMMAND ${CMAKE_COMMAND} -E env CODESIGN_ALLOCATE=${CODESIGN_ALLOCATE} codesign --force --sign ${CODESIGN_IDENTITY} debugserver
+ WORKING_DIRECTORY ${CMAKE_BINARY_DIR}/bin
+ )
+endif()
+
+install(TARGETS debugserver
+ RUNTIME DESTINATION bin
+ )
diff --git a/tools/debugserver/source/MacOSX/Genealogy.cpp b/tools/debugserver/source/MacOSX/Genealogy.cpp
new file mode 100644
index 000000000000..a5ee097aa2a6
--- /dev/null
+++ b/tools/debugserver/source/MacOSX/Genealogy.cpp
@@ -0,0 +1,300 @@
+///===-- Activity.cpp ---------------------------------------*- C++ -*-===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+
+#include <Availability.h>
+#include <string>
+#include <dlfcn.h>
+#include <uuid/uuid.h>
+
+#include "DNBDefs.h"
+#include "Genealogy.h"
+#include "GenealogySPI.h"
+#include "MachThreadList.h"
+
+//---------------------------
+/// Constructor
+//---------------------------
+
+Genealogy::Genealogy () :
+ m_os_activity_diagnostic_for_pid (nullptr),
+ m_os_activity_iterate_processes (nullptr),
+ m_os_activity_iterate_breadcrumbs (nullptr),
+ m_os_activity_iterate_messages (nullptr),
+ m_os_activity_iterate_activities (nullptr),
+ m_os_trace_get_type (nullptr),
+ m_os_trace_copy_formatted_message (nullptr),
+ m_os_activity_for_thread (nullptr),
+ m_os_activity_for_task_thread (nullptr),
+ m_thread_activities(),
+ m_process_executable_infos(),
+ m_diagnosticd_call_timed_out(false)
+{
+ m_os_activity_diagnostic_for_pid = (bool (*)(pid_t, os_activity_t, uint32_t, os_diagnostic_block_t))dlsym (RTLD_DEFAULT, "os_activity_diagnostic_for_pid");
+ m_os_activity_iterate_processes = (void (*)(os_activity_process_list_t, bool (^)(os_activity_process_t)))dlsym (RTLD_DEFAULT, "os_activity_iterate_processes");
+ m_os_activity_iterate_breadcrumbs = (void (*)(os_activity_process_t, bool (^)(os_activity_breadcrumb_t))) dlsym (RTLD_DEFAULT, "os_activity_iterate_breadcrumbs");
+ m_os_activity_iterate_messages = (void (*)(os_trace_message_list_t, os_activity_process_t, bool (^)(os_trace_message_t)))dlsym (RTLD_DEFAULT, "os_activity_iterate_messages");
+ m_os_activity_iterate_activities = (void (*)(os_activity_list_t, os_activity_process_t, bool (^)(os_activity_entry_t)))dlsym (RTLD_DEFAULT, "os_activity_iterate_activities");
+ m_os_trace_get_type = (uint8_t (*)(os_trace_message_t)) dlsym (RTLD_DEFAULT, "os_trace_get_type");
+ m_os_trace_copy_formatted_message = (char *(*)(os_trace_message_t)) dlsym (RTLD_DEFAULT, "os_trace_copy_formatted_message");
+ m_os_activity_for_thread = (os_activity_t (*)(os_activity_process_t, uint64_t)) dlsym (RTLD_DEFAULT, "os_activity_for_thread");
+ m_os_activity_for_task_thread = (os_activity_t (*)(task_t, uint64_t)) dlsym (RTLD_DEFAULT, "os_activity_for_task_thread");
+ m_os_activity_messages_for_thread = (os_trace_message_list_t (*) (os_activity_process_t process, os_activity_t activity, uint64_t thread_id)) dlsym (RTLD_DEFAULT, "os_activity_messages_for_thread");
+}
+
+Genealogy::ThreadActivitySP
+Genealogy::GetGenealogyInfoForThread (pid_t pid, nub_thread_t tid, const MachThreadList &thread_list, task_t task, bool &timed_out)
+{
+ ThreadActivitySP activity;
+ //
+ // if we've timed out trying to get the activities, don't try again at this process stop.
+ // (else we'll need to hit the timeout for every thread we're asked about.)
+ // We'll try again at the next public stop.
+
+ if (m_thread_activities.size() == 0 && m_diagnosticd_call_timed_out == false)
+ {
+ GetActivities(pid, thread_list, task);
+ }
+ std::map<nub_thread_t, ThreadActivitySP>::const_iterator search;
+ search = m_thread_activities.find(tid);
+ if (search != m_thread_activities.end())
+ {
+ activity = search->second;
+ }
+ timed_out = m_diagnosticd_call_timed_out;
+ return activity;
+}
+
+void
+Genealogy::Clear()
+{
+ m_thread_activities.clear();
+ m_diagnosticd_call_timed_out = false;
+}
+
+void
+Genealogy::GetActivities(pid_t pid, const MachThreadList &thread_list, task_t task)
+{
+ if (m_os_activity_diagnostic_for_pid != nullptr
+ && m_os_activity_iterate_processes != nullptr
+ && m_os_activity_iterate_breadcrumbs != nullptr
+ && m_os_activity_iterate_messages != nullptr
+ && m_os_activity_iterate_activities != nullptr
+ && m_os_trace_get_type != nullptr
+ && m_os_trace_copy_formatted_message != nullptr
+ && (m_os_activity_for_thread != nullptr || m_os_activity_for_task_thread != nullptr)
+ )
+ {
+ __block dispatch_semaphore_t semaphore = dispatch_semaphore_create(0);
+ __block BreadcrumbList breadcrumbs;
+ __block ActivityList activities;
+ __block MessageList messages;
+ __block std::map<nub_thread_t, uint64_t> thread_activity_mapping;
+
+ os_activity_diagnostic_flag_t flags = OS_ACTIVITY_DIAGNOSTIC_ALL_ACTIVITIES | OS_ACTIVITY_DIAGNOSTIC_PROCESS_ONLY;
+ if (m_os_activity_diagnostic_for_pid (pid, 0, flags, ^(os_activity_process_list_t processes, int error)
+ {
+ if (error == 0)
+ {
+ m_os_activity_iterate_processes (processes, ^bool(os_activity_process_t process_info)
+ {
+ if (pid == process_info->pid)
+ {
+ // Collect all the Breadcrumbs
+ m_os_activity_iterate_breadcrumbs (process_info, ^bool(os_activity_breadcrumb_t breadcrumb)
+ {
+ Breadcrumb bc;
+ bc.breadcrumb_id = breadcrumb->breadcrumb_id;
+ bc.activity_id = breadcrumb->activity_id;
+ bc.timestamp = breadcrumb->timestamp;
+ if (breadcrumb->name)
+ bc.name = breadcrumb->name;
+ breadcrumbs.push_back (bc);
+ return true;
+ });
+
+ // Collect all the Activites
+ m_os_activity_iterate_activities (process_info->activities, process_info, ^bool(os_activity_entry_t activity)
+ {
+ Activity ac;
+ ac.activity_start = activity->activity_start;
+ ac.activity_id = activity->activity_id;
+ ac.parent_id = activity->parent_id;
+ if (activity->activity_name)
+ ac.activity_name = activity->activity_name;
+ if (activity->reason)
+ ac.reason = activity->reason;
+ activities.push_back (ac);
+ return true;
+ });
+
+
+ // Collect all the Messages -- messages not associated with any thread
+ m_os_activity_iterate_messages (process_info->messages, process_info, ^bool(os_trace_message_t trace_msg)
+ {
+ Message msg;
+ msg.timestamp = trace_msg->timestamp;
+ msg.trace_id = trace_msg->trace_id;
+ msg.thread = trace_msg->thread;
+ msg.type = m_os_trace_get_type (trace_msg);
+ msg.activity_id = 0;
+ if (trace_msg->image_uuid && trace_msg->image_path)
+ {
+ ProcessExecutableInfoSP process_info_sp (new ProcessExecutableInfo());
+ uuid_copy (process_info_sp->image_uuid, trace_msg->image_uuid);
+ process_info_sp->image_path = trace_msg->image_path;
+ msg.process_info_index = AddProcessExecutableInfo (process_info_sp);
+ }
+ const char *message_text = m_os_trace_copy_formatted_message (trace_msg);
+ if (message_text)
+ msg.message = message_text;
+ messages.push_back (msg);
+ return true;
+ });
+
+ // Discover which activities are said to be running on threads currently
+ const nub_size_t num_threads = thread_list.NumThreads();
+ for (nub_size_t i = 0; i < num_threads; ++i)
+ {
+ nub_thread_t thread_id = thread_list.ThreadIDAtIndex(i);
+ os_activity_t act = 0;
+ if (m_os_activity_for_task_thread != nullptr)
+ {
+ act = m_os_activity_for_task_thread (task, thread_id);
+ }
+ else if (m_os_activity_for_thread != nullptr)
+ {
+ act = m_os_activity_for_thread (process_info, thread_id);
+ }
+ if (act != 0)
+ thread_activity_mapping[thread_id] = act;
+ }
+
+ // Collect all Messages -- messages associated with a thread
+
+ // When there's no genealogy information, an early version of os_activity_messages_for_thread
+ // can crash in rare circumstances. Check to see if this process has any activities before
+ // making the call to get messages.
+ if (process_info->activities != nullptr && thread_activity_mapping.size() > 0)
+ {
+ std::map<nub_thread_t, uint64_t>::const_iterator iter;
+ for (iter = thread_activity_mapping.begin(); iter != thread_activity_mapping.end(); ++iter)
+ {
+ nub_thread_t thread_id = iter->first;
+ os_activity_t act = iter->second;
+ os_trace_message_list_t this_thread_messages = m_os_activity_messages_for_thread (process_info, act, thread_id);
+ m_os_activity_iterate_messages (this_thread_messages, process_info, ^bool(os_trace_message_t trace_msg)
+ {
+ Message msg;
+ msg.timestamp = trace_msg->timestamp;
+ msg.trace_id = trace_msg->trace_id;
+ msg.thread = trace_msg->thread;
+ msg.type = m_os_trace_get_type (trace_msg);
+ msg.activity_id = act;
+ if (trace_msg->image_uuid && trace_msg->image_path)
+ {
+ ProcessExecutableInfoSP process_info_sp (new ProcessExecutableInfo());
+ uuid_copy (process_info_sp->image_uuid, trace_msg->image_uuid);
+ process_info_sp->image_path = trace_msg->image_path;
+ msg.process_info_index = AddProcessExecutableInfo (process_info_sp);
+ }
+ const char *message_text = m_os_trace_copy_formatted_message (trace_msg);
+ if (message_text)
+ msg.message = message_text;
+ messages.push_back (msg);
+ return true;
+ });
+ }
+ }
+ }
+ return true;
+ });
+ }
+ dispatch_semaphore_signal(semaphore);
+ }) == true)
+ {
+ // Wait for the diagnosticd xpc calls to all finish up -- or half a second to elapse.
+ dispatch_time_t timeout = dispatch_time(DISPATCH_TIME_NOW, NSEC_PER_SEC / 2);
+ bool success = dispatch_semaphore_wait(semaphore, timeout) == 0;
+ if (!success)
+ {
+ m_diagnosticd_call_timed_out = true;
+ return;
+ }
+ }
+
+ // breadcrumbs, activities, and messages have all now been filled in.
+
+ std::map<nub_thread_t, uint64_t>::const_iterator iter;
+ for (iter = thread_activity_mapping.begin(); iter != thread_activity_mapping.end(); ++iter)
+ {
+ nub_thread_t thread_id = iter->first;
+ uint64_t activity_id = iter->second;
+ ActivityList::const_iterator activity_search;
+ for (activity_search = activities.begin(); activity_search != activities.end(); ++activity_search)
+ {
+ if (activity_search->activity_id == activity_id)
+ {
+ ThreadActivitySP thread_activity_sp (new ThreadActivity());
+ thread_activity_sp->current_activity = *activity_search;
+
+ BreadcrumbList::const_iterator breadcrumb_search;
+ for (breadcrumb_search = breadcrumbs.begin(); breadcrumb_search != breadcrumbs.end(); ++breadcrumb_search)
+ {
+ if (breadcrumb_search->activity_id == activity_id)
+ {
+ thread_activity_sp->breadcrumbs.push_back (*breadcrumb_search);
+ }
+ }
+ MessageList::const_iterator message_search;
+ for (message_search = messages.begin(); message_search != messages.end(); ++message_search)
+ {
+ if (message_search->thread == thread_id)
+ {
+ thread_activity_sp->messages.push_back (*message_search);
+ }
+ }
+
+ m_thread_activities[thread_id] = thread_activity_sp;
+ break;
+ }
+ }
+ }
+ }
+}
+
+uint32_t
+Genealogy::AddProcessExecutableInfo (ProcessExecutableInfoSP process_exe_info)
+{
+ const uint32_t info_size = static_cast<uint32_t>(m_process_executable_infos.size());
+ for (uint32_t idx = 0; idx < info_size; ++idx)
+ {
+ if (uuid_compare (m_process_executable_infos[idx]->image_uuid, process_exe_info->image_uuid) == 0)
+ {
+ return idx + 1;
+ }
+ }
+ m_process_executable_infos.push_back (process_exe_info);
+ return info_size + 1;
+}
+
+Genealogy::ProcessExecutableInfoSP
+Genealogy::GetProcessExecutableInfosAtIndex(size_t idx)
+{
+ ProcessExecutableInfoSP info_sp;
+ if (idx > 0)
+ {
+ idx--;
+ if (idx <= m_process_executable_infos.size())
+ {
+ info_sp = m_process_executable_infos[idx];
+ }
+ }
+ return info_sp;
+}
+
diff --git a/tools/debugserver/source/MacOSX/Genealogy.h b/tools/debugserver/source/MacOSX/Genealogy.h
new file mode 100644
index 000000000000..d39145a06f29
--- /dev/null
+++ b/tools/debugserver/source/MacOSX/Genealogy.h
@@ -0,0 +1,116 @@
+//===-- Activity.h -----------------------------------------------*- C++ -*-===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef __Genealogy_h__
+#define __Genealogy_h__
+
+#include <string>
+#include <vector>
+#include <map>
+#include <pthread.h>
+#include <mach/task.h>
+
+#include "GenealogySPI.h"
+#include "MachThreadList.h"
+
+class Genealogy
+{
+public:
+
+ Genealogy ();
+
+ ~Genealogy ()
+ {
+ }
+
+ void
+ Clear();
+
+ struct Breadcrumb
+ {
+ uint32_t breadcrumb_id;
+ uint64_t activity_id;
+ uint64_t timestamp;
+ std::string name;
+ };
+
+ struct Activity
+ {
+ uint64_t activity_start;
+ uint64_t activity_id;
+ uint64_t parent_id;
+ std::string activity_name;
+ std::string reason;
+ };
+
+ struct Message
+ {
+ uint64_t timestamp;
+ uint64_t activity_id;
+ uint64_t trace_id;
+ uint64_t thread;
+ uint8_t type; // OS_TRACE_TYPE_RELEASE, OS_TRACE_TYPE_DEBUG, OS_TRACE_TYPE_ERROR, OS_TRACE_TYPE_FAULT
+ uint32_t process_info_index; // index # of the image uuid/file path, 0 means unknown
+ std::string message;
+ };
+
+ typedef std::vector<Message> MessageList;
+ typedef std::vector<Breadcrumb> BreadcrumbList;
+ typedef std::vector<Activity> ActivityList;
+
+ struct ThreadActivity
+ {
+ Activity current_activity;
+ MessageList messages;
+ BreadcrumbList breadcrumbs; // should be 0 or 1 breadcrumbs; no more than 1 BC for any given activity
+ };
+
+ typedef std::shared_ptr<ThreadActivity> ThreadActivitySP;
+
+ ThreadActivitySP
+ GetGenealogyInfoForThread (pid_t pid, nub_thread_t tid, const MachThreadList &thread_list, task_t task, bool &timed_out);
+
+ struct ProcessExecutableInfo
+ {
+ std::string image_path;
+ uuid_t image_uuid;
+ };
+
+ typedef std::shared_ptr<ProcessExecutableInfo> ProcessExecutableInfoSP;
+
+ ProcessExecutableInfoSP
+ GetProcessExecutableInfosAtIndex(size_t idx);
+
+ uint32_t
+ AddProcessExecutableInfo(ProcessExecutableInfoSP process_exe_info);
+
+private:
+
+ void
+ GetActivities(pid_t pid, const MachThreadList &thread_list, task_t task);
+
+ // the spi we need to call into libtrace - look them up via dlsym at runtime
+ bool (*m_os_activity_diagnostic_for_pid) (pid_t pid, os_activity_t activity, uint32_t flags, os_diagnostic_block_t block);
+ void (*m_os_activity_iterate_processes) (os_activity_process_list_t processes, bool (^iterator)(os_activity_process_t process_info));
+ void (*m_os_activity_iterate_breadcrumbs) (os_activity_process_t process_info, bool (^iterator)(os_activity_breadcrumb_t breadcrumb));
+ void (*m_os_activity_iterate_messages) (os_trace_message_list_t messages, os_activity_process_t process_info, bool (^iterator)(os_trace_message_t tracemsg));
+ void (*m_os_activity_iterate_activities) (os_activity_list_t activities, os_activity_process_t process_info, bool (^iterator)(os_activity_entry_t activity));
+ uint8_t (*m_os_trace_get_type) (os_trace_message_t trace_msg);
+ char * (*m_os_trace_copy_formatted_message) (os_trace_message_t trace_msg);
+ os_activity_t (*m_os_activity_for_thread) (os_activity_process_t process, uint64_t thread_id);
+ os_activity_t (*m_os_activity_for_task_thread) (task_t target, uint64_t thread_id);
+ os_trace_message_list_t (*m_os_activity_messages_for_thread) (os_activity_process_t process, os_activity_t activity, uint64_t thread_id);
+
+
+ std::map<nub_thread_t, ThreadActivitySP> m_thread_activities;
+ std::vector<ProcessExecutableInfoSP> m_process_executable_infos;
+ bool m_diagnosticd_call_timed_out;
+};
+
+#endif // __Genealogy_h__
diff --git a/tools/debugserver/source/MacOSX/GenealogySPI.h b/tools/debugserver/source/MacOSX/GenealogySPI.h
new file mode 100644
index 000000000000..f84e930e8725
--- /dev/null
+++ b/tools/debugserver/source/MacOSX/GenealogySPI.h
@@ -0,0 +1,96 @@
+//===-- ActivitySPI.h -------------------------------------------*- C++ -*-===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//===----------------------------------------------------------------------===//
+
+#ifndef __GenealogySPI_h__
+#define __GenealogySPI_h__
+
+#include <xpc/xpc.h>
+
+typedef void *os_activity_process_list_t;
+typedef void *os_activity_list_t;
+typedef void *os_trace_message_list_t;
+typedef struct os_activity_watch_s *os_activity_watch_t;
+typedef uint64_t os_activity_t;
+
+struct os_activity_breadcrumb_s {
+ uint32_t breadcrumb_id;
+ uint64_t activity_id;
+ uint64_t timestamp;
+ const char *name;
+};
+
+typedef struct os_activity_breadcrumb_s *os_activity_breadcrumb_t;
+
+typedef struct os_trace_message_s {
+ uint64_t trace_id;
+ uint64_t thread;
+ uint64_t timestamp;
+ uint32_t offset;
+ xpc_object_t __unsafe_unretained payload;
+ const uint8_t *image_uuid;
+ const char *image_path;
+ const char *format;
+ const void *buffer;
+ size_t bufferLen;
+} *os_trace_message_t;
+
+typedef struct os_activity_process_s {
+ os_activity_process_list_t child_procs;
+ os_trace_message_list_t messages;
+ os_activity_list_t activities;
+ void *breadcrumbs;
+ uint64_t proc_id;
+ const uint8_t *image_uuid;
+ const char *image_path;
+ pid_t pid;
+} *os_activity_process_t;
+
+typedef struct os_activity_entry_s {
+ uint64_t activity_start;
+ os_activity_t activity_id;
+ os_activity_t parent_id;
+ const char *activity_name;
+ const char *reason;
+ os_trace_message_list_t messages;
+} *os_activity_entry_t;
+
+enum
+{
+ OS_ACTIVITY_DIAGNOSTIC_DEFAULT = 0x00000000,
+ OS_ACTIVITY_DIAGNOSTIC_PROCESS_ONLY = 0x00000001,
+ OS_ACTIVITY_DIAGNOSTIC_SKIP_DECODE = 0x00000002,
+ OS_ACTIVITY_DIAGNOSTIC_FLATTENED = 0x00000004,
+ OS_ACTIVITY_DIAGNOSTIC_ALL_ACTIVITIES = 0x00000008,
+ OS_ACTIVITY_DIAGNOSTIC_MAX = 0x0000000f
+};
+typedef uint32_t os_activity_diagnostic_flag_t;
+
+enum
+{
+ OS_ACTIVITY_WATCH_DEFAULT = 0x00000000,
+ OS_ACTIVITY_WATCH_PROCESS_ONLY = 0x00000001,
+ OS_ACTIVITY_WATCH_SKIP_DECODE = 0x00000002,
+ OS_ACTIVITY_WATCH_PAYLOAD = 0x00000004,
+ OS_ACTIVITY_WATCH_ERRORS = 0x00000008,
+ OS_ACTIVITY_WATCH_FAULTS = 0x00000010,
+ OS_ACTIVITY_WATCH_MAX = 0x0000001f
+};
+typedef uint32_t os_activity_watch_flag_t;
+
+// Return values from os_trace_get_type()
+#define OS_TRACE_TYPE_RELEASE (1u << 0)
+#define OS_TRACE_TYPE_DEBUG (1u << 1)
+#define OS_TRACE_TYPE_ERROR ((1u << 6) | (1u << 0))
+#define OS_TRACE_TYPE_FAULT ((1u << 7) | (1u << 6) | (1u << 0))
+
+
+typedef void (^os_activity_watch_block_t)(os_activity_watch_t watch, os_activity_process_t process_info, bool canceled);
+typedef void (^os_diagnostic_block_t)(os_activity_process_list_t processes, int error);
+
+#endif
+
diff --git a/tools/debugserver/source/MacOSX/HasAVX.h b/tools/debugserver/source/MacOSX/HasAVX.h
new file mode 100644
index 000000000000..c7a50fa20b3e
--- /dev/null
+++ b/tools/debugserver/source/MacOSX/HasAVX.h
@@ -0,0 +1,27 @@
+//===-- HasAVX.h ------------------------------------------------*- C++ -*-===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef HasAVX_h
+#define HasAVX_h
+
+#if defined (__i386__) || defined (__x86_64__)
+
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+int HasAVX ();
+
+#ifdef __cplusplus
+}
+#endif
+
+#endif
+
+#endif
diff --git a/tools/debugserver/source/MacOSX/HasAVX.s b/tools/debugserver/source/MacOSX/HasAVX.s
new file mode 100644
index 000000000000..b66ccf14dbee
--- /dev/null
+++ b/tools/debugserver/source/MacOSX/HasAVX.s
@@ -0,0 +1,50 @@
+//===-- HasAVX.s ---------------------------------------*- x86 Assembly -*-===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+
+#if defined (__i386__) || defined (__x86_64__)
+
+.globl _HasAVX
+
+_HasAVX:
+#if defined (__x86_64__)
+ pushq %rbp
+ movq %rsp, %rbp
+ pushq %rbx
+#else
+ pushl %ebp
+ movl %esp, %ebp
+ pushl %ebx
+#endif
+ mov $1, %eax
+ cpuid // clobbers ebx
+ and $0x018000000, %ecx
+ cmp $0x018000000, %ecx
+ jne not_supported
+ mov $0, %ecx
+.byte 0x0f, 0x01, 0xd0 // xgetbv, for those assemblers that don't know it
+ and $0x06, %eax
+ cmp $0x06, %eax
+ jne not_supported
+ mov $1, %eax
+ jmp done
+not_supported:
+ mov $0, %eax
+done:
+#if defined (__x86_64__)
+ popq %rbx
+ movq %rbp, %rsp
+ popq %rbp
+#else
+ popl %ebx
+ movl %ebp, %esp
+ popl %ebp
+#endif
+ ret // return
+
+#endif
diff --git a/tools/debugserver/source/MacOSX/MachException.cpp b/tools/debugserver/source/MacOSX/MachException.cpp
new file mode 100644
index 000000000000..b7245796ae4c
--- /dev/null
+++ b/tools/debugserver/source/MacOSX/MachException.cpp
@@ -0,0 +1,582 @@
+//===-- MachException.cpp ---------------------------------------*- C++ -*-===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// Created by Greg Clayton on 6/18/07.
+//
+//===----------------------------------------------------------------------===//
+
+#include "MachException.h"
+#include "MachProcess.h"
+#include "DNB.h"
+#include "DNBError.h"
+#include <sys/types.h>
+#include "DNBLog.h"
+#include "PThreadMutex.h"
+#include "SysSignal.h"
+#include <errno.h>
+#include <sys/ptrace.h>
+
+// Routine mach_exception_raise
+extern "C"
+kern_return_t catch_mach_exception_raise
+(
+ mach_port_t exception_port,
+ mach_port_t thread,
+ mach_port_t task,
+ exception_type_t exception,
+ mach_exception_data_t code,
+ mach_msg_type_number_t codeCnt
+);
+
+extern "C"
+kern_return_t catch_mach_exception_raise_state
+(
+ mach_port_t exception_port,
+ exception_type_t exception,
+ const mach_exception_data_t code,
+ mach_msg_type_number_t codeCnt,
+ int *flavor,
+ const thread_state_t old_state,
+ mach_msg_type_number_t old_stateCnt,
+ thread_state_t new_state,
+ mach_msg_type_number_t *new_stateCnt
+);
+
+// Routine mach_exception_raise_state_identity
+extern "C"
+kern_return_t catch_mach_exception_raise_state_identity
+(
+ mach_port_t exception_port,
+ mach_port_t thread,
+ mach_port_t task,
+ exception_type_t exception,
+ mach_exception_data_t code,
+ mach_msg_type_number_t codeCnt,
+ int *flavor,
+ thread_state_t old_state,
+ mach_msg_type_number_t old_stateCnt,
+ thread_state_t new_state,
+ mach_msg_type_number_t *new_stateCnt
+);
+
+extern "C" boolean_t mach_exc_server(
+ mach_msg_header_t *InHeadP,
+ mach_msg_header_t *OutHeadP);
+
+// Any access to the g_message variable should be done by locking the
+// g_message_mutex first, using the g_message variable, then unlocking
+// the g_message_mutex. See MachException::Message::CatchExceptionRaise()
+// for sample code.
+
+static MachException::Data *g_message = NULL;
+//static pthread_mutex_t g_message_mutex = PTHREAD_MUTEX_INITIALIZER;
+
+
+extern "C"
+kern_return_t
+catch_mach_exception_raise_state
+(
+ mach_port_t exc_port,
+ exception_type_t exc_type,
+ const mach_exception_data_t exc_data,
+ mach_msg_type_number_t exc_data_count,
+ int * flavor,
+ const thread_state_t old_state,
+ mach_msg_type_number_t old_stateCnt,
+ thread_state_t new_state,
+ mach_msg_type_number_t * new_stateCnt
+)
+{
+ if (DNBLogCheckLogBit(LOG_EXCEPTIONS))
+ {
+ DNBLogThreaded ("::%s ( exc_port = 0x%4.4x, exc_type = %d ( %s ), exc_data = 0x%llx, exc_data_count = %d)",
+ __FUNCTION__,
+ exc_port,
+ exc_type, MachException::Name(exc_type),
+ (uint64_t)exc_data,
+ exc_data_count);
+ }
+ return KERN_FAILURE;
+}
+
+extern "C"
+kern_return_t
+catch_mach_exception_raise_state_identity
+(
+ mach_port_t exc_port,
+ mach_port_t thread_port,
+ mach_port_t task_port,
+ exception_type_t exc_type,
+ mach_exception_data_t exc_data,
+ mach_msg_type_number_t exc_data_count,
+ int * flavor,
+ thread_state_t old_state,
+ mach_msg_type_number_t old_stateCnt,
+ thread_state_t new_state,
+ mach_msg_type_number_t *new_stateCnt
+)
+{
+ if (DNBLogCheckLogBit(LOG_EXCEPTIONS))
+ {
+ DNBLogThreaded("::%s ( exc_port = 0x%4.4x, thd_port = 0x%4.4x, tsk_port = 0x%4.4x, exc_type = %d ( %s ), exc_data[%d] = { 0x%llx, 0x%llx })",
+ __FUNCTION__,
+ exc_port,
+ thread_port,
+ task_port,
+ exc_type, MachException::Name(exc_type),
+ exc_data_count,
+ (uint64_t)(exc_data_count > 0 ? exc_data[0] : 0xBADDBADD),
+ (uint64_t)(exc_data_count > 1 ? exc_data[1] : 0xBADDBADD));
+ }
+ mach_port_deallocate (mach_task_self (), task_port);
+ mach_port_deallocate (mach_task_self (), thread_port);
+
+ return KERN_FAILURE;
+}
+
+extern "C"
+kern_return_t
+catch_mach_exception_raise
+(
+ mach_port_t exc_port,
+ mach_port_t thread_port,
+ mach_port_t task_port,
+ exception_type_t exc_type,
+ mach_exception_data_t exc_data,
+ mach_msg_type_number_t exc_data_count)
+{
+ if (DNBLogCheckLogBit(LOG_EXCEPTIONS))
+ {
+ DNBLogThreaded ("::%s ( exc_port = 0x%4.4x, thd_port = 0x%4.4x, tsk_port = 0x%4.4x, exc_type = %d ( %s ), exc_data[%d] = { 0x%llx, 0x%llx })",
+ __FUNCTION__,
+ exc_port,
+ thread_port,
+ task_port,
+ exc_type, MachException::Name(exc_type),
+ exc_data_count,
+ (uint64_t)(exc_data_count > 0 ? exc_data[0] : 0xBADDBADD),
+ (uint64_t)(exc_data_count > 1 ? exc_data[1] : 0xBADDBADD));
+ }
+
+ if (task_port == g_message->task_port)
+ {
+ g_message->task_port = task_port;
+ g_message->thread_port = thread_port;
+ g_message->exc_type = exc_type;
+ g_message->exc_data.resize(exc_data_count);
+ ::memcpy (&g_message->exc_data[0], exc_data, g_message->exc_data.size() * sizeof (mach_exception_data_type_t));
+ return KERN_SUCCESS;
+ }
+ return KERN_FAILURE;
+}
+
+
+void
+MachException::Message::Dump() const
+{
+ DNBLogThreadedIf(LOG_EXCEPTIONS,
+ " exc_msg { bits = 0x%8.8x size = 0x%8.8x remote-port = 0x%8.8x local-port = 0x%8.8x reserved = 0x%8.8x id = 0x%8.8x } ",
+ exc_msg.hdr.msgh_bits,
+ exc_msg.hdr.msgh_size,
+ exc_msg.hdr.msgh_remote_port,
+ exc_msg.hdr.msgh_local_port,
+ exc_msg.hdr.msgh_reserved,
+ exc_msg.hdr.msgh_id);
+
+ DNBLogThreadedIf(LOG_EXCEPTIONS,
+ "reply_msg { bits = 0x%8.8x size = 0x%8.8x remote-port = 0x%8.8x local-port = 0x%8.8x reserved = 0x%8.8x id = 0x%8.8x }",
+ reply_msg.hdr.msgh_bits,
+ reply_msg.hdr.msgh_size,
+ reply_msg.hdr.msgh_remote_port,
+ reply_msg.hdr.msgh_local_port,
+ reply_msg.hdr.msgh_reserved,
+ reply_msg.hdr.msgh_id);
+
+ state.Dump();
+}
+
+bool
+MachException::Data::GetStopInfo(struct DNBThreadStopInfo *stop_info) const
+{
+ // Zero out the structure.
+ memset(stop_info, 0, sizeof(struct DNBThreadStopInfo));
+
+ if (exc_type == 0)
+ {
+ stop_info->reason = eStopTypeInvalid;
+ return true;
+ }
+
+ // We always stop with a mach exceptions
+ stop_info->reason = eStopTypeException;
+ // Save the EXC_XXXX exception type
+ stop_info->details.exception.type = exc_type;
+
+ // Fill in a text description
+ const char * exc_name = MachException::Name(exc_type);
+ char *desc = stop_info->description;
+ const char *end_desc = desc + DNB_THREAD_STOP_INFO_MAX_DESC_LENGTH;
+ if (exc_name)
+ desc += snprintf(desc, DNB_THREAD_STOP_INFO_MAX_DESC_LENGTH, "%s", exc_name);
+ else
+ desc += snprintf(desc, DNB_THREAD_STOP_INFO_MAX_DESC_LENGTH, "%i", exc_type);
+
+ stop_info->details.exception.data_count = exc_data.size();
+
+ int soft_signal = SoftSignal();
+ if (soft_signal)
+ {
+ if (desc < end_desc)
+ {
+ const char *sig_str = SysSignal::Name(soft_signal);
+ snprintf(desc, end_desc - desc, " EXC_SOFT_SIGNAL( %i ( %s ))", soft_signal, sig_str ? sig_str : "unknown signal");
+ }
+ }
+ else
+ {
+ // No special disassembly for exception data, just
+ size_t idx;
+ if (desc < end_desc)
+ {
+ desc += snprintf(desc, end_desc - desc, " data[%llu] = {", (uint64_t)stop_info->details.exception.data_count);
+
+ for (idx = 0; desc < end_desc && idx < stop_info->details.exception.data_count; ++idx)
+ desc += snprintf(desc, end_desc - desc, "0x%llx%c", (uint64_t)exc_data[idx], ((idx + 1 == stop_info->details.exception.data_count) ? '}' : ','));
+ }
+ }
+
+ // Copy the exception data
+ size_t i;
+ for (i=0; i<stop_info->details.exception.data_count; i++)
+ stop_info->details.exception.data[i] = exc_data[i];
+
+ return true;
+}
+
+
+void
+MachException::Data::DumpStopReason() const
+{
+ int soft_signal = SoftSignal();
+ if (soft_signal)
+ {
+ const char *signal_str = SysSignal::Name(soft_signal);
+ if (signal_str)
+ DNBLog("signal(%s)", signal_str);
+ else
+ DNBLog("signal(%i)", soft_signal);
+ return;
+ }
+ DNBLog("%s", Name(exc_type));
+}
+
+kern_return_t
+MachException::Message::Receive(mach_port_t port, mach_msg_option_t options, mach_msg_timeout_t timeout, mach_port_t notify_port)
+{
+ DNBError err;
+ const bool log_exceptions = DNBLogCheckLogBit(LOG_EXCEPTIONS);
+ mach_msg_timeout_t mach_msg_timeout = options & MACH_RCV_TIMEOUT ? timeout : 0;
+ if (log_exceptions && ((options & MACH_RCV_TIMEOUT) == 0))
+ {
+ // Dump this log message if we have no timeout in case it never returns
+ DNBLogThreaded ("::mach_msg ( msg->{bits = %#x, size = %u remote_port = %#x, local_port = %#x, reserved = 0x%x, id = 0x%x}, option = %#x, send_size = 0, rcv_size = %llu, rcv_name = %#x, timeout = %u, notify = %#x)",
+ exc_msg.hdr.msgh_bits,
+ exc_msg.hdr.msgh_size,
+ exc_msg.hdr.msgh_remote_port,
+ exc_msg.hdr.msgh_local_port,
+ exc_msg.hdr.msgh_reserved,
+ exc_msg.hdr.msgh_id,
+ options,
+ (uint64_t)sizeof (exc_msg.data),
+ port,
+ mach_msg_timeout,
+ notify_port);
+ }
+
+ err = ::mach_msg (&exc_msg.hdr,
+ options, // options
+ 0, // Send size
+ sizeof (exc_msg.data), // Receive size
+ port, // exception port to watch for exception on
+ mach_msg_timeout, // timeout in msec (obeyed only if MACH_RCV_TIMEOUT is ORed into the options parameter)
+ notify_port);
+
+ // Dump any errors we get
+ if (log_exceptions)
+ {
+ err.LogThreaded("::mach_msg ( msg->{bits = %#x, size = %u remote_port = %#x, local_port = %#x, reserved = 0x%x, id = 0x%x}, option = %#x, send_size = %u, rcv_size = %u, rcv_name = %#x, timeout = %u, notify = %#x)",
+ exc_msg.hdr.msgh_bits,
+ exc_msg.hdr.msgh_size,
+ exc_msg.hdr.msgh_remote_port,
+ exc_msg.hdr.msgh_local_port,
+ exc_msg.hdr.msgh_reserved,
+ exc_msg.hdr.msgh_id,
+ options,
+ 0,
+ sizeof (exc_msg.data),
+ port,
+ mach_msg_timeout,
+ notify_port);
+ }
+ return err.Error();
+}
+
+bool
+MachException::Message::CatchExceptionRaise(task_t task)
+{
+ bool success = false;
+ // locker will keep a mutex locked until it goes out of scope
+// PThreadMutex::Locker locker(&g_message_mutex);
+ // DNBLogThreaded("calling mach_exc_server");
+ state.task_port = task;
+ g_message = &state;
+ // The exc_server function is the MIG generated server handling function
+ // to handle messages from the kernel relating to the occurrence of an
+ // exception in a thread. Such messages are delivered to the exception port
+ // set via thread_set_exception_ports or task_set_exception_ports. When an
+ // exception occurs in a thread, the thread sends an exception message to
+ // its exception port, blocking in the kernel waiting for the receipt of a
+ // reply. The exc_server function performs all necessary argument handling
+ // for this kernel message and calls catch_exception_raise,
+ // catch_exception_raise_state or catch_exception_raise_state_identity,
+ // which should handle the exception. If the called routine returns
+ // KERN_SUCCESS, a reply message will be sent, allowing the thread to
+ // continue from the point of the exception; otherwise, no reply message
+ // is sent and the called routine must have dealt with the exception
+ // thread directly.
+ if (mach_exc_server (&exc_msg.hdr, &reply_msg.hdr))
+ {
+ success = true;
+ }
+ else if (DNBLogCheckLogBit(LOG_EXCEPTIONS))
+ {
+ DNBLogThreaded("mach_exc_server returned zero...");
+ }
+ g_message = NULL;
+ return success;
+}
+
+
+
+kern_return_t
+MachException::Message::Reply(MachProcess *process, int signal)
+{
+ // Reply to the exception...
+ DNBError err;
+
+ // If we had a soft signal, we need to update the thread first so it can
+ // continue without signaling
+ int soft_signal = state.SoftSignal();
+ if (soft_signal)
+ {
+ int state_pid = -1;
+ if (process->Task().TaskPort() == state.task_port)
+ {
+ // This is our task, so we can update the signal to send to it
+ state_pid = process->ProcessID();
+ soft_signal = signal;
+ }
+ else
+ {
+ err = ::pid_for_task(state.task_port, &state_pid);
+ }
+
+ assert (state_pid != -1);
+ if (state_pid != -1)
+ {
+ errno = 0;
+ if (::ptrace (PT_THUPDATE, state_pid, (caddr_t)((uintptr_t)state.thread_port), soft_signal) != 0)
+ err.SetError(errno, DNBError::POSIX);
+ else
+ err.Clear();
+
+ if (DNBLogCheckLogBit(LOG_EXCEPTIONS) || err.Fail())
+ err.LogThreaded("::ptrace (request = PT_THUPDATE, pid = 0x%4.4x, tid = 0x%4.4x, signal = %i)", state_pid, state.thread_port, soft_signal);
+ }
+ }
+
+ DNBLogThreadedIf(LOG_EXCEPTIONS, "::mach_msg ( msg->{bits = %#x, size = %u, remote_port = %#x, local_port = %#x, reserved = 0x%x, id = 0x%x}, option = %#x, send_size = %u, rcv_size = %u, rcv_name = %#x, timeout = %u, notify = %#x)",
+ reply_msg.hdr.msgh_bits,
+ reply_msg.hdr.msgh_size,
+ reply_msg.hdr.msgh_remote_port,
+ reply_msg.hdr.msgh_local_port,
+ reply_msg.hdr.msgh_reserved,
+ reply_msg.hdr.msgh_id,
+ MACH_SEND_MSG | MACH_SEND_INTERRUPT,
+ reply_msg.hdr.msgh_size,
+ 0,
+ MACH_PORT_NULL,
+ MACH_MSG_TIMEOUT_NONE,
+ MACH_PORT_NULL);
+
+ err = ::mach_msg ( &reply_msg.hdr,
+ MACH_SEND_MSG | MACH_SEND_INTERRUPT,
+ reply_msg.hdr.msgh_size,
+ 0,
+ MACH_PORT_NULL,
+ MACH_MSG_TIMEOUT_NONE,
+ MACH_PORT_NULL);
+
+ if (err.Fail())
+ {
+ if (err.Error() == MACH_SEND_INTERRUPTED)
+ {
+ if (DNBLogCheckLogBit(LOG_EXCEPTIONS))
+ err.LogThreaded("::mach_msg() - send interrupted");
+ // TODO: keep retrying to reply???
+ }
+ else
+ {
+ if (state.task_port == process->Task().TaskPort())
+ {
+ DNBLogThreaded("error: mach_msg() returned an error when replying to a mach exception: error = %u", err.Error());
+ abort ();
+ }
+ else
+ {
+ if (DNBLogCheckLogBit(LOG_EXCEPTIONS))
+ err.LogThreaded("::mach_msg() - failed (child of task)");
+ }
+ }
+ }
+
+ return err.Error();
+}
+
+
+void
+MachException::Data::Dump() const
+{
+ const char *exc_type_name = MachException::Name(exc_type);
+ DNBLogThreadedIf(LOG_EXCEPTIONS, " state { task_port = 0x%4.4x, thread_port = 0x%4.4x, exc_type = %i (%s) ...", task_port, thread_port, exc_type, exc_type_name ? exc_type_name : "???");
+
+ const size_t exc_data_count = exc_data.size();
+ // Dump any special exception data contents
+ int soft_signal = SoftSignal();
+ if (soft_signal != 0)
+ {
+ const char *sig_str = SysSignal::Name(soft_signal);
+ DNBLogThreadedIf(LOG_EXCEPTIONS, " exc_data: EXC_SOFT_SIGNAL (%i (%s))", soft_signal, sig_str ? sig_str : "unknown signal");
+ }
+ else
+ {
+ // No special disassembly for this data, just dump the data
+ size_t idx;
+ for (idx = 0; idx < exc_data_count; ++idx)
+ {
+ DNBLogThreadedIf(LOG_EXCEPTIONS, " exc_data[%llu]: 0x%llx", (uint64_t)idx, (uint64_t)exc_data[idx]);
+ }
+ }
+}
+
+#define PREV_EXC_MASK_ALL (EXC_MASK_BAD_ACCESS | \
+ EXC_MASK_BAD_INSTRUCTION | \
+ EXC_MASK_ARITHMETIC | \
+ EXC_MASK_EMULATION | \
+ EXC_MASK_SOFTWARE | \
+ EXC_MASK_BREAKPOINT | \
+ EXC_MASK_SYSCALL | \
+ EXC_MASK_MACH_SYSCALL | \
+ EXC_MASK_RPC_ALERT | \
+ EXC_MASK_MACHINE)
+
+// Don't listen for EXC_RESOURCE, it should really get handled by the system handler.
+
+#ifndef EXC_RESOURCE
+#define EXC_RESOURCE 11
+#endif
+
+#ifndef EXC_MASK_RESOURCE
+#define EXC_MASK_RESOURCE (1 << EXC_RESOURCE)
+#endif
+
+#define LLDB_EXC_MASK (EXC_MASK_ALL & ~EXC_MASK_RESOURCE)
+
+kern_return_t
+MachException::PortInfo::Save (task_t task)
+{
+ DNBLogThreadedIf(LOG_EXCEPTIONS | LOG_VERBOSE, "MachException::PortInfo::Save ( task = 0x%4.4x )", task);
+ // Be careful to be able to have debugserver built on a newer OS than what
+ // it is currently running on by being able to start with all exceptions
+ // and back off to just what is supported on the current system
+ DNBError err;
+
+ mask = LLDB_EXC_MASK;
+
+ count = (sizeof (ports) / sizeof (ports[0]));
+ err = ::task_get_exception_ports (task, mask, masks, &count, ports, behaviors, flavors);
+ if (DNBLogCheckLogBit(LOG_EXCEPTIONS) || err.Fail())
+ err.LogThreaded("::task_get_exception_ports ( task = 0x%4.4x, mask = 0x%x, maskCnt => %u, ports, behaviors, flavors )", task, mask, count);
+
+ if (err.Error() == KERN_INVALID_ARGUMENT && mask != PREV_EXC_MASK_ALL)
+ {
+ mask = PREV_EXC_MASK_ALL;
+ count = (sizeof (ports) / sizeof (ports[0]));
+ err = ::task_get_exception_ports (task, mask, masks, &count, ports, behaviors, flavors);
+ if (DNBLogCheckLogBit(LOG_EXCEPTIONS) || err.Fail())
+ err.LogThreaded("::task_get_exception_ports ( task = 0x%4.4x, mask = 0x%x, maskCnt => %u, ports, behaviors, flavors )", task, mask, count);
+ }
+ if (err.Fail())
+ {
+ mask = 0;
+ count = 0;
+ }
+ return err.Error();
+}
+
+kern_return_t
+MachException::PortInfo::Restore (task_t task)
+{
+ DNBLogThreadedIf(LOG_EXCEPTIONS | LOG_VERBOSE, "MachException::PortInfo::Restore( task = 0x%4.4x )", task);
+ uint32_t i = 0;
+ DNBError err;
+ if (count > 0)
+ {
+ for (i = 0; i < count; i++)
+ {
+ err = ::task_set_exception_ports (task, masks[i], ports[i], behaviors[i], flavors[i]);
+ if (DNBLogCheckLogBit(LOG_EXCEPTIONS) || err.Fail())
+ {
+ err.LogThreaded("::task_set_exception_ports ( task = 0x%4.4x, exception_mask = 0x%8.8x, new_port = 0x%4.4x, behavior = 0x%8.8x, new_flavor = 0x%8.8x )", task, masks[i], ports[i], behaviors[i], flavors[i]);
+ // Bail if we encounter any errors
+ }
+
+ if (err.Fail())
+ break;
+ }
+ }
+ count = 0;
+ return err.Error();
+}
+
+const char *
+MachException::Name(exception_type_t exc_type)
+{
+ switch (exc_type)
+ {
+ case EXC_BAD_ACCESS: return "EXC_BAD_ACCESS";
+ case EXC_BAD_INSTRUCTION: return "EXC_BAD_INSTRUCTION";
+ case EXC_ARITHMETIC: return "EXC_ARITHMETIC";
+ case EXC_EMULATION: return "EXC_EMULATION";
+ case EXC_SOFTWARE: return "EXC_SOFTWARE";
+ case EXC_BREAKPOINT: return "EXC_BREAKPOINT";
+ case EXC_SYSCALL: return "EXC_SYSCALL";
+ case EXC_MACH_SYSCALL: return "EXC_MACH_SYSCALL";
+ case EXC_RPC_ALERT: return "EXC_RPC_ALERT";
+#ifdef EXC_CRASH
+ case EXC_CRASH: return "EXC_CRASH";
+#endif
+ default:
+ break;
+ }
+ return NULL;
+}
+
+
+
diff --git a/tools/debugserver/source/MacOSX/MachException.h b/tools/debugserver/source/MacOSX/MachException.h
new file mode 100644
index 000000000000..c831479f2b6d
--- /dev/null
+++ b/tools/debugserver/source/MacOSX/MachException.h
@@ -0,0 +1,133 @@
+//===-- MachException.h -----------------------------------------*- C++ -*-===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// Created by Greg Clayton on 6/18/07.
+//
+//===----------------------------------------------------------------------===//
+
+
+#ifndef __MachException_h__
+#define __MachException_h__
+
+#include <mach/mach.h>
+#include <vector>
+
+class MachProcess;
+class PThreadMutex;
+
+typedef union MachMessageTag
+{
+ mach_msg_header_t hdr;
+ char data[1024];
+} MachMessage;
+
+
+class MachException
+{
+public:
+
+ struct PortInfo
+ {
+ exception_mask_t mask; // the exception mask for this device which may be a subset of EXC_MASK_ALL...
+ exception_mask_t masks[EXC_TYPES_COUNT];
+ mach_port_t ports[EXC_TYPES_COUNT];
+ exception_behavior_t behaviors[EXC_TYPES_COUNT];
+ thread_state_flavor_t flavors[EXC_TYPES_COUNT];
+ mach_msg_type_number_t count;
+
+ kern_return_t Save(task_t task);
+ kern_return_t Restore(task_t task);
+ };
+
+ struct Data
+ {
+ task_t task_port;
+ thread_t thread_port;
+ exception_type_t exc_type;
+ std::vector<mach_exception_data_type_t> exc_data;
+ Data() :
+ task_port(TASK_NULL),
+ thread_port(THREAD_NULL),
+ exc_type(0),
+ exc_data()
+ {
+ }
+
+ void Clear()
+ {
+ task_port = TASK_NULL;
+ thread_port = THREAD_NULL;
+ exc_type = 0;
+ exc_data.clear();
+ }
+ bool IsValid() const
+ {
+ return task_port != TASK_NULL &&
+ thread_port != THREAD_NULL &&
+ exc_type != 0;
+ }
+ // Return the SoftSignal for this MachException data, or zero if there is none
+ int SoftSignal() const
+ {
+ if (exc_type == EXC_SOFTWARE && exc_data.size() == 2 && exc_data[0] == EXC_SOFT_SIGNAL)
+ return static_cast<int>(exc_data[1]);
+ return 0;
+ }
+ bool IsBreakpoint() const
+ {
+ return (exc_type == EXC_BREAKPOINT || ((exc_type == EXC_SOFTWARE) && exc_data[0] == 1));
+ }
+ void Dump() const;
+ void DumpStopReason() const;
+ bool GetStopInfo(struct DNBThreadStopInfo *stop_info) const;
+ };
+
+ struct Message
+ {
+ MachMessage exc_msg;
+ MachMessage reply_msg;
+ Data state;
+
+ Message() :
+ state()
+ {
+ memset(&exc_msg, 0, sizeof(exc_msg));
+ memset(&reply_msg, 0, sizeof(reply_msg));
+ }
+ bool CatchExceptionRaise(task_t task);
+ void Dump() const;
+ kern_return_t Reply (MachProcess *process, int signal);
+ kern_return_t Receive( mach_port_t receive_port,
+ mach_msg_option_t options,
+ mach_msg_timeout_t timeout,
+ mach_port_t notify_port = MACH_PORT_NULL);
+
+ typedef std::vector<Message> collection;
+ typedef collection::iterator iterator;
+ typedef collection::const_iterator const_iterator;
+ };
+
+ enum
+ {
+ e_actionForward, // Forward signal to inferior process
+ e_actionStop, // Stop when this signal is received
+ };
+ struct Action
+ {
+ task_t task_port; // Set to TASK_NULL for any TASK
+ thread_t thread_port; // Set to THREAD_NULL for any thread
+ exception_type_t exc_mask; // Mach exception mask to watch for
+ std::vector<mach_exception_data_type_t> exc_data_mask; // Mask to apply to exception data, or empty to ignore exc_data value for exception
+ std::vector<mach_exception_data_type_t> exc_data_value; // Value to compare to exception data after masking, or empty to ignore exc_data value for exception
+ uint8_t flags; // Action flags describing what to do with the exception
+ };
+ static const char *Name(exception_type_t exc_type);
+};
+
+#endif
diff --git a/tools/debugserver/source/MacOSX/MachProcess.h b/tools/debugserver/source/MacOSX/MachProcess.h
new file mode 100644
index 000000000000..3be0b2dbabb3
--- /dev/null
+++ b/tools/debugserver/source/MacOSX/MachProcess.h
@@ -0,0 +1,364 @@
+//===-- MachProcess.h -------------------------------------------*- C++ -*-===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// Created by Greg Clayton on 6/15/07.
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef __MachProcess_h__
+#define __MachProcess_h__
+
+#include <mach/mach.h>
+#include <sys/signal.h>
+#include <pthread.h>
+#include <vector>
+#include <CoreFoundation/CoreFoundation.h>
+
+#include "DNBDefs.h"
+#include "DNBBreakpoint.h"
+#include "DNBError.h"
+#include "DNBThreadResumeActions.h"
+#include "MachException.h"
+#include "MachVMMemory.h"
+#include "MachTask.h"
+#include "MachThreadList.h"
+#include "PThreadCondition.h"
+#include "PThreadEvent.h"
+#include "PThreadMutex.h"
+#include "Genealogy.h"
+#include "ThreadInfo.h"
+#include "JSONGenerator.h"
+
+class DNBThreadResumeActions;
+
+class MachProcess
+{
+public:
+ //----------------------------------------------------------------------
+ // Constructors and Destructors
+ //----------------------------------------------------------------------
+ MachProcess ();
+ ~MachProcess ();
+
+ //----------------------------------------------------------------------
+ // Child process control
+ //----------------------------------------------------------------------
+ pid_t AttachForDebug (pid_t pid, char *err_str, size_t err_len);
+ pid_t LaunchForDebug (const char *path,
+ char const *argv[],
+ char const *envp[],
+ const char *working_directory,
+ const char *stdin_path,
+ const char *stdout_path,
+ const char *stderr_path,
+ bool no_stdio,
+ nub_launch_flavor_t launch_flavor,
+ int disable_aslr,
+ const char *event_data,
+ DNBError &err);
+
+ static uint32_t GetCPUTypeForLocalProcess (pid_t pid);
+ static pid_t ForkChildForPTraceDebugging (const char *path, char const *argv[], char const *envp[], MachProcess* process, DNBError &err);
+ static pid_t PosixSpawnChildForPTraceDebugging (const char *path,
+ cpu_type_t cpu_type,
+ char const *argv[],
+ char const *envp[],
+ const char *working_directory,
+ const char *stdin_path,
+ const char *stdout_path,
+ const char *stderr_path,
+ bool no_stdio,
+ MachProcess* process,
+ int disable_aslr,
+ DNBError& err);
+ nub_addr_t GetDYLDAllImageInfosAddress ();
+ static const void * PrepareForAttach (const char *path, nub_launch_flavor_t launch_flavor, bool waitfor, DNBError &err_str);
+ static void CleanupAfterAttach (const void *attach_token, nub_launch_flavor_t launch_flavor, bool success, DNBError &err_str);
+ static nub_process_t CheckForProcess (const void *attach_token, nub_launch_flavor_t launch_flavor);
+#if defined(WITH_BKS) || defined(WITH_FBS)
+ pid_t BoardServiceLaunchForDebug (const char *app_bundle_path, char const *argv[], char const *envp[], bool no_stdio, bool disable_aslr, const char *event_data, DNBError &launch_err);
+ pid_t BoardServiceForkChildForPTraceDebugging (const char *path, char const *argv[], char const *envp[], bool no_stdio, bool disable_aslr, const char *event_data, DNBError &launch_err);
+ bool BoardServiceSendEvent (const char *event, DNBError &error);
+#endif
+ static bool GetOSVersionNumbers (uint64_t *major, uint64_t *minor, uint64_t *patch);
+#ifdef WITH_BKS
+ static void BKSCleanupAfterAttach (const void *attach_token, DNBError &err_str);
+#endif // WITH_BKS
+#ifdef WITH_FBS
+ static void FBSCleanupAfterAttach (const void *attach_token, DNBError &err_str);
+#endif // WITH_FBS
+#ifdef WITH_SPRINGBOARD
+ pid_t SBLaunchForDebug (const char *app_bundle_path, char const *argv[], char const *envp[], bool no_stdio, bool disable_aslr, DNBError &launch_err);
+ static pid_t SBForkChildForPTraceDebugging (const char *path, char const *argv[], char const *envp[], bool no_stdio, MachProcess* process, DNBError &launch_err);
+#endif // WITH_SPRINGBOARD
+ nub_addr_t LookupSymbol (const char *name, const char *shlib);
+ void SetNameToAddressCallback (DNBCallbackNameToAddress callback, void *baton)
+ {
+ m_name_to_addr_callback = callback;
+ m_name_to_addr_baton = baton;
+ }
+ void SetSharedLibraryInfoCallback (DNBCallbackCopyExecutableImageInfos callback, void *baton)
+ {
+ m_image_infos_callback = callback;
+ m_image_infos_baton = baton;
+ }
+
+ bool Resume (const DNBThreadResumeActions& thread_actions);
+ bool Signal (int signal, const struct timespec *timeout_abstime = NULL);
+ bool Interrupt();
+ bool SendEvent (const char *event, DNBError &send_err);
+ bool Kill (const struct timespec *timeout_abstime = NULL);
+ bool Detach ();
+ nub_size_t ReadMemory (nub_addr_t addr, nub_size_t size, void *buf);
+ nub_size_t WriteMemory (nub_addr_t addr, nub_size_t size, const void *buf);
+
+ //----------------------------------------------------------------------
+ // Path and arg accessors
+ //----------------------------------------------------------------------
+ const char * Path () const { return m_path.c_str(); }
+ size_t ArgumentCount () const { return m_args.size(); }
+ const char * ArgumentAtIndex (size_t arg_idx) const
+ {
+ if (arg_idx < m_args.size())
+ return m_args[arg_idx].c_str();
+ return NULL;
+ }
+
+ //----------------------------------------------------------------------
+ // Breakpoint functions
+ //----------------------------------------------------------------------
+ DNBBreakpoint * CreateBreakpoint (nub_addr_t addr, nub_size_t length, bool hardware);
+ bool DisableBreakpoint (nub_addr_t addr, bool remove);
+ void DisableAllBreakpoints (bool remove);
+ bool EnableBreakpoint (nub_addr_t addr);
+ DNBBreakpointList& Breakpoints() { return m_breakpoints; }
+ const DNBBreakpointList& Breakpoints() const { return m_breakpoints; }
+
+ //----------------------------------------------------------------------
+ // Watchpoint functions
+ //----------------------------------------------------------------------
+ DNBBreakpoint * CreateWatchpoint (nub_addr_t addr, nub_size_t length, uint32_t watch_type, bool hardware);
+ bool DisableWatchpoint (nub_addr_t addr, bool remove);
+ void DisableAllWatchpoints (bool remove);
+ bool EnableWatchpoint (nub_addr_t addr);
+ uint32_t GetNumSupportedHardwareWatchpoints () const;
+ DNBBreakpointList& Watchpoints() { return m_watchpoints; }
+ const DNBBreakpointList& Watchpoints() const { return m_watchpoints; }
+
+ //----------------------------------------------------------------------
+ // Exception thread functions
+ //----------------------------------------------------------------------
+ bool StartSTDIOThread ();
+ static void * STDIOThread (void *arg);
+ void ExceptionMessageReceived (const MachException::Message& exceptionMessage);
+ task_t ExceptionMessageBundleComplete ();
+ void SharedLibrariesUpdated ();
+ nub_size_t CopyImageInfos (struct DNBExecutableImageInfo **image_infos, bool only_changed);
+
+ //----------------------------------------------------------------------
+ // Profile functions
+ //----------------------------------------------------------------------
+ void SetEnableAsyncProfiling (bool enable, uint64_t internal_usec, DNBProfileDataScanType scan_type);
+ bool IsProfilingEnabled () { return m_profile_enabled; }
+ useconds_t ProfileInterval () { return m_profile_interval_usec; }
+ bool StartProfileThread ();
+ static void * ProfileThread (void *arg);
+ void SignalAsyncProfileData (const char *info);
+ size_t GetAsyncProfileData (char *buf, size_t buf_size);
+
+ //----------------------------------------------------------------------
+ // Accessors
+ //----------------------------------------------------------------------
+ pid_t ProcessID () const { return m_pid; }
+ bool ProcessIDIsValid () const { return m_pid > 0; }
+ pid_t SetProcessID (pid_t pid);
+ MachTask& Task() { return m_task; }
+ const MachTask& Task() const { return m_task; }
+
+ PThreadEvent& Events() { return m_events; }
+ const DNBRegisterSetInfo *
+ GetRegisterSetInfo (nub_thread_t tid, nub_size_t *num_reg_sets) const;
+ bool GetRegisterValue (nub_thread_t tid, uint32_t set, uint32_t reg, DNBRegisterValue *reg_value) const;
+ bool SetRegisterValue (nub_thread_t tid, uint32_t set, uint32_t reg, const DNBRegisterValue *value) const;
+ nub_bool_t SyncThreadState (nub_thread_t tid);
+ const char * ThreadGetName (nub_thread_t tid);
+ nub_state_t ThreadGetState (nub_thread_t tid);
+ ThreadInfo::QoS GetRequestedQoS (nub_thread_t tid, nub_addr_t tsd, uint64_t dti_qos_class_index);
+ nub_addr_t GetPThreadT (nub_thread_t tid);
+ nub_addr_t GetDispatchQueueT (nub_thread_t tid);
+ nub_addr_t GetTSDAddressForThread (nub_thread_t tid, uint64_t plo_pthread_tsd_base_address_offset, uint64_t plo_pthread_tsd_base_offset, uint64_t plo_pthread_tsd_entry_size);
+ JSONGenerator::ObjectSP GetLoadedDynamicLibrariesInfos (nub_process_t pid, nub_addr_t image_list_address, nub_addr_t image_count);
+
+ nub_size_t GetNumThreads () const;
+ nub_thread_t GetThreadAtIndex (nub_size_t thread_idx) const;
+ nub_thread_t GetCurrentThread ();
+ nub_thread_t GetCurrentThreadMachPort ();
+ nub_thread_t SetCurrentThread (nub_thread_t tid);
+ MachThreadList & GetThreadList() { return m_thread_list; }
+ bool GetThreadStoppedReason(nub_thread_t tid, struct DNBThreadStopInfo *stop_info);
+ void DumpThreadStoppedReason(nub_thread_t tid) const;
+ const char * GetThreadInfo (nub_thread_t tid) const;
+
+ nub_thread_t GetThreadIDForMachPortNumber (thread_t mach_port_number) const;
+
+ uint32_t GetCPUType ();
+ nub_state_t GetState ();
+ void SetState (nub_state_t state);
+ bool IsRunning (nub_state_t state)
+ {
+ return state == eStateRunning || IsStepping(state);
+ }
+ bool IsStepping (nub_state_t state)
+ {
+ return state == eStateStepping;
+ }
+ bool CanResume (nub_state_t state)
+ {
+ return state == eStateStopped;
+ }
+
+ bool GetExitStatus(int* status)
+ {
+ if (GetState() == eStateExited)
+ {
+ if (status)
+ *status = m_exit_status;
+ return true;
+ }
+ return false;
+ }
+ void SetExitStatus(int status)
+ {
+ m_exit_status = status;
+ SetState(eStateExited);
+ }
+ const char * GetExitInfo ()
+ {
+ return m_exit_info.c_str();
+ }
+
+ void SetExitInfo (const char *info);
+
+ uint32_t StopCount() const { return m_stop_count; }
+ void SetChildFileDescriptors (int stdin_fileno, int stdout_fileno, int stderr_fileno)
+ {
+ m_child_stdin = stdin_fileno;
+ m_child_stdout = stdout_fileno;
+ m_child_stderr = stderr_fileno;
+ }
+
+ int GetStdinFileDescriptor () const { return m_child_stdin; }
+ int GetStdoutFileDescriptor () const { return m_child_stdout; }
+ int GetStderrFileDescriptor () const { return m_child_stderr; }
+ void AppendSTDOUT (char* s, size_t len);
+ size_t GetAvailableSTDOUT (char *buf, size_t buf_size);
+ size_t GetAvailableSTDERR (char *buf, size_t buf_size);
+ void CloseChildFileDescriptors ()
+ {
+ if (m_child_stdin >= 0)
+ {
+ ::close (m_child_stdin);
+ m_child_stdin = -1;
+ }
+ if (m_child_stdout >= 0)
+ {
+ ::close (m_child_stdout);
+ m_child_stdout = -1;
+ }
+ if (m_child_stderr >= 0)
+ {
+ ::close (m_child_stderr);
+ m_child_stderr = -1;
+ }
+ }
+
+ bool ProcessUsingSpringBoard() const { return (m_flags & eMachProcessFlagsUsingSBS) != 0; }
+ bool ProcessUsingBackBoard() const { return (m_flags & eMachProcessFlagsUsingBKS) != 0; }
+
+ Genealogy::ThreadActivitySP GetGenealogyInfoForThread (nub_thread_t tid, bool &timed_out);
+
+ Genealogy::ProcessExecutableInfoSP GetGenealogyImageInfo (size_t idx);
+
+ DNBProfileDataScanType GetProfileScanType () { return m_profile_scan_type; }
+
+private:
+ enum
+ {
+ eMachProcessFlagsNone = 0,
+ eMachProcessFlagsAttached = (1 << 0),
+ eMachProcessFlagsUsingSBS = (1 << 1),
+ eMachProcessFlagsUsingBKS = (1 << 2),
+ eMachProcessFlagsUsingFBS = (1 << 3)
+ };
+ void Clear (bool detaching = false);
+ void ReplyToAllExceptions ();
+ void PrivateResume ();
+
+ uint32_t Flags () const { return m_flags; }
+ nub_state_t DoSIGSTOP (bool clear_bps_and_wps, bool allow_running, uint32_t *thread_idx_ptr);
+
+ pid_t m_pid; // Process ID of child process
+ cpu_type_t m_cpu_type; // The CPU type of this process
+ int m_child_stdin;
+ int m_child_stdout;
+ int m_child_stderr;
+ std::string m_path; // A path to the executable if we have one
+ std::vector<std::string> m_args; // The arguments with which the process was lauched
+ int m_exit_status; // The exit status for the process
+ std::string m_exit_info; // Any extra info that we may have about the exit
+ MachTask m_task; // The mach task for this process
+ uint32_t m_flags; // Process specific flags (see eMachProcessFlags enums)
+ uint32_t m_stop_count; // A count of many times have we stopped
+ pthread_t m_stdio_thread; // Thread ID for the thread that watches for child process stdio
+ PThreadMutex m_stdio_mutex; // Multithreaded protection for stdio
+ std::string m_stdout_data;
+
+ bool m_profile_enabled; // A flag to indicate if profiling is enabled
+ useconds_t m_profile_interval_usec; // If enable, the profiling interval in microseconds
+ DNBProfileDataScanType m_profile_scan_type; // Indicates what needs to be profiled
+ pthread_t m_profile_thread; // Thread ID for the thread that profiles the inferior
+ PThreadMutex m_profile_data_mutex; // Multithreaded protection for profile info data
+ std::vector<std::string> m_profile_data; // Profile data, must be protected by m_profile_data_mutex
+
+ DNBThreadResumeActions m_thread_actions; // The thread actions for the current MachProcess::Resume() call
+ MachException::Message::collection
+ m_exception_messages; // A collection of exception messages caught when listening to the exception port
+ PThreadMutex m_exception_messages_mutex; // Multithreaded protection for m_exception_messages
+
+ MachThreadList m_thread_list; // A list of threads that is maintained/updated after each stop
+ Genealogy m_activities; // A list of activities that is updated after every stop lazily
+ nub_state_t m_state; // The state of our process
+ PThreadMutex m_state_mutex; // Multithreaded protection for m_state
+ PThreadEvent m_events; // Process related events in the child processes lifetime can be waited upon
+ PThreadEvent m_private_events; // Used to coordinate running and stopping the process without affecting m_events
+ DNBBreakpointList m_breakpoints; // Breakpoint list for this process
+ DNBBreakpointList m_watchpoints; // Watchpoint list for this process
+ DNBCallbackNameToAddress m_name_to_addr_callback;
+ void * m_name_to_addr_baton;
+ DNBCallbackCopyExecutableImageInfos
+ m_image_infos_callback;
+ void * m_image_infos_baton;
+ std::string m_bundle_id; // If we are a SB or BKS process, this will be our bundle ID.
+ int m_sent_interrupt_signo; // When we call MachProcess::Interrupt(), we want to send a single signal
+ // to the inferior and only send the signal if we aren't already stopped.
+ // If we end up sending a signal to stop the process we store it until we
+ // receive an exception with this signal. This helps us to verify we got
+ // the signal that interrupted the process. We might stop due to another
+ // reason after an interrupt signal is sent, so this helps us ensure that
+ // we don't report a spurious stop on the next resume.
+ int m_auto_resume_signo; // If we resume the process and still haven't received our interrupt signal
+ // acknownledgement, we will shortly after the next resume. We store the
+ // interrupt signal in this variable so when we get the interrupt signal
+ // as the sole reason for the process being stopped, we can auto resume
+ // the process.
+ bool m_did_exec;
+};
+
+
+#endif // __MachProcess_h__
diff --git a/tools/debugserver/source/MacOSX/MachProcess.mm b/tools/debugserver/source/MacOSX/MachProcess.mm
new file mode 100644
index 000000000000..b9e06307a4aa
--- /dev/null
+++ b/tools/debugserver/source/MacOSX/MachProcess.mm
@@ -0,0 +1,3685 @@
+//===-- MachProcess.cpp -----------------------------------------*- C++ -*-===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// Created by Greg Clayton on 6/15/07.
+//
+//===----------------------------------------------------------------------===//
+
+#include "DNB.h"
+#include <inttypes.h>
+#include <mach/mach.h>
+#include <signal.h>
+#include <spawn.h>
+#include <sys/fcntl.h>
+#include <sys/types.h>
+#include <sys/ptrace.h>
+#include <sys/stat.h>
+#include <sys/sysctl.h>
+#include <unistd.h>
+#include <pthread.h>
+#include <mach-o/loader.h>
+#include <uuid/uuid.h>
+#include "MacOSX/CFUtils.h"
+#include "SysSignal.h"
+
+#include <algorithm>
+#include <map>
+
+#import <Foundation/Foundation.h>
+
+#include "DNBDataRef.h"
+#include "DNBLog.h"
+#include "DNBThreadResumeActions.h"
+#include "DNBTimer.h"
+#include "MachProcess.h"
+#include "PseudoTerminal.h"
+
+#include "CFBundle.h"
+#include "CFData.h"
+#include "CFString.h"
+
+#ifdef WITH_SPRINGBOARD
+
+#include <CoreFoundation/CoreFoundation.h>
+#include <SpringBoardServices/SpringBoardServer.h>
+#include <SpringBoardServices/SBSWatchdogAssertion.h>
+
+static bool
+IsSBProcess (nub_process_t pid)
+{
+ CFReleaser<CFArrayRef> appIdsForPID (::SBSCopyDisplayIdentifiersForProcessID(pid));
+ return appIdsForPID.get() != NULL;
+}
+
+#endif // WITH_SPRINGBOARD
+
+#if defined (WITH_SPRINGBOARD) || defined (WITH_BKS) || defined (WITH_FBS)
+// This returns a CFRetained pointer to the Bundle ID for app_bundle_path,
+// or NULL if there was some problem getting the bundle id.
+static CFStringRef CopyBundleIDForPath (const char *app_bundle_path, DNBError &err_str);
+#endif
+
+#if defined(WITH_BKS) || defined(WITH_FBS)
+#import <Foundation/Foundation.h>
+static const int OPEN_APPLICATION_TIMEOUT_ERROR = 111;
+typedef void (*SetErrorFunction) (NSInteger, DNBError &);
+typedef bool (*CallOpenApplicationFunction) (NSString *bundleIDNSStr, NSDictionary *options, DNBError &error, pid_t *return_pid);
+// This function runs the BKSSystemService (or FBSSystemService) method openApplication:options:clientPort:withResult,
+// messaging the app passed in bundleIDNSStr.
+// The function should be run inside of an NSAutoReleasePool.
+//
+// It will use the "options" dictionary passed in, and fill the error passed in if there is an error.
+// If return_pid is not NULL, we'll fetch the pid that was made for the bundleID.
+// If bundleIDNSStr is NULL, then the system application will be messaged.
+
+template <typename OpenFlavor, typename ErrorFlavor, ErrorFlavor no_error_enum_value, SetErrorFunction error_function>
+static bool
+CallBoardSystemServiceOpenApplication (NSString *bundleIDNSStr, NSDictionary *options, DNBError &error, pid_t *return_pid)
+{
+ // Now make our systemService:
+ OpenFlavor *system_service = [[OpenFlavor alloc] init];
+
+ if (bundleIDNSStr == nil)
+ {
+ bundleIDNSStr = [system_service systemApplicationBundleIdentifier];
+ if (bundleIDNSStr == nil)
+ {
+ // Okay, no system app...
+ error.SetErrorString("No system application to message.");
+ return false;
+ }
+ }
+
+ mach_port_t client_port = [system_service createClientPort];
+ __block dispatch_semaphore_t semaphore = dispatch_semaphore_create(0);
+ __block ErrorFlavor open_app_error = no_error_enum_value;
+ bool wants_pid = (return_pid != NULL);
+ __block pid_t pid_in_block;
+
+ const char *cstr = [bundleIDNSStr UTF8String];
+ if (!cstr)
+ cstr = "<Unknown Bundle ID>";
+
+ DNBLog ("About to launch process for bundle ID: %s", cstr);
+ [system_service openApplication: bundleIDNSStr
+ options: options
+ clientPort: client_port
+ withResult: ^(NSError *bks_error)
+ {
+ // The system service will cleanup the client port we created for us.
+ if (bks_error)
+ open_app_error = (ErrorFlavor)[bks_error code];
+
+ if (open_app_error == no_error_enum_value)
+ {
+ if (wants_pid)
+ {
+ pid_in_block = [system_service pidForApplication: bundleIDNSStr];
+ DNBLog("In completion handler, got pid for bundle id, pid: %d.", pid_in_block);
+ DNBLogThreadedIf(LOG_PROCESS, "In completion handler, got pid for bundle id, pid: %d.", pid_in_block);
+ }
+ else
+ DNBLogThreadedIf (LOG_PROCESS, "In completion handler: success.");
+ }
+ else
+ {
+ const char *error_str = [(NSString *)[bks_error localizedDescription] UTF8String];
+ DNBLogThreadedIf(LOG_PROCESS, "In completion handler for send event, got error \"%s\"(%ld).",
+ error_str ? error_str : "<unknown error>",
+ open_app_error);
+ // REMOVE ME
+ DNBLogError ("In completion handler for send event, got error \"%s\"(%ld).",
+ error_str ? error_str : "<unknown error>",
+ open_app_error);
+ }
+
+ [system_service release];
+ dispatch_semaphore_signal(semaphore);
+ }
+
+ ];
+
+ const uint32_t timeout_secs = 9;
+
+ dispatch_time_t timeout = dispatch_time(DISPATCH_TIME_NOW, timeout_secs * NSEC_PER_SEC);
+
+ long success = dispatch_semaphore_wait(semaphore, timeout) == 0;
+
+ dispatch_release(semaphore);
+
+ if (!success)
+{
+ DNBLogError("timed out trying to send openApplication to %s.", cstr);
+ error.SetError (OPEN_APPLICATION_TIMEOUT_ERROR, DNBError::Generic);
+ error.SetErrorString ("timed out trying to launch app");
+ }
+ else if (open_app_error != no_error_enum_value)
+ {
+ error_function (open_app_error, error);
+ DNBLogError("unable to launch the application with CFBundleIdentifier '%s' bks_error = %u", cstr, open_app_error);
+ success = false;
+ }
+ else if (wants_pid)
+ {
+ *return_pid = pid_in_block;
+ DNBLogThreadedIf (LOG_PROCESS, "Out of completion handler, pid from block %d and passing out: %d", pid_in_block, *return_pid);
+}
+
+
+ return success;
+}
+#endif
+
+#ifdef WITH_BKS
+#import <Foundation/Foundation.h>
+extern "C"
+{
+#import <BackBoardServices/BackBoardServices.h>
+#import <BackBoardServices/BKSSystemService_LaunchServices.h>
+#import <BackBoardServices/BKSOpenApplicationConstants_Private.h>
+}
+
+static bool
+IsBKSProcess (nub_process_t pid)
+{
+ BKSApplicationStateMonitor *state_monitor = [[BKSApplicationStateMonitor alloc] init];
+ BKSApplicationState app_state = [state_monitor mostElevatedApplicationStateForPID: pid];
+ return app_state != BKSApplicationStateUnknown;
+}
+
+static void
+SetBKSError (NSInteger error_code, DNBError &error)
+{
+ error.SetError (error_code, DNBError::BackBoard);
+ NSString *err_nsstr = ::BKSOpenApplicationErrorCodeToString((BKSOpenApplicationErrorCode) error_code);
+ const char *err_str = NULL;
+ if (err_nsstr == NULL)
+ err_str = "unknown BKS error";
+ else
+ {
+ err_str = [err_nsstr UTF8String];
+ if (err_str == NULL)
+ err_str = "unknown BKS error";
+ }
+ error.SetErrorString(err_str);
+}
+
+static bool
+BKSAddEventDataToOptions (NSMutableDictionary *options, const char *event_data, DNBError &option_error)
+{
+ if (strcmp (event_data, "BackgroundContentFetching") == 0)
+ {
+ DNBLog("Setting ActivateForEvent key in options dictionary.");
+ NSDictionary *event_details = [NSDictionary dictionary];
+ NSDictionary *event_dictionary = [NSDictionary dictionaryWithObject:event_details forKey:BKSActivateForEventOptionTypeBackgroundContentFetching];
+ [options setObject: event_dictionary forKey: BKSOpenApplicationOptionKeyActivateForEvent];
+ return true;
+ }
+ else
+ {
+ DNBLogError ("Unrecognized event type: %s. Ignoring.", event_data);
+ option_error.SetErrorString("Unrecognized event data.");
+ return false;
+ }
+
+}
+
+static NSMutableDictionary *
+BKSCreateOptionsDictionary(const char *app_bundle_path, NSMutableArray *launch_argv, NSMutableDictionary *launch_envp, NSString *stdio_path, bool disable_aslr, const char *event_data)
+{
+ NSMutableDictionary *debug_options = [NSMutableDictionary dictionary];
+ if (launch_argv != nil)
+ [debug_options setObject: launch_argv forKey: BKSDebugOptionKeyArguments];
+ if (launch_envp != nil)
+ [debug_options setObject: launch_envp forKey: BKSDebugOptionKeyEnvironment];
+
+ [debug_options setObject: stdio_path forKey: BKSDebugOptionKeyStandardOutPath];
+ [debug_options setObject: stdio_path forKey: BKSDebugOptionKeyStandardErrorPath];
+ [debug_options setObject: [NSNumber numberWithBool: YES] forKey: BKSDebugOptionKeyWaitForDebugger];
+ if (disable_aslr)
+ [debug_options setObject: [NSNumber numberWithBool: YES] forKey: BKSDebugOptionKeyDisableASLR];
+
+ // That will go in the overall dictionary:
+
+ NSMutableDictionary *options = [NSMutableDictionary dictionary];
+ [options setObject: debug_options forKey: BKSOpenApplicationOptionKeyDebuggingOptions];
+ // And there are some other options at the top level in this dictionary:
+ [options setObject: [NSNumber numberWithBool: YES] forKey: BKSOpenApplicationOptionKeyUnlockDevice];
+
+ DNBError error;
+ BKSAddEventDataToOptions (options, event_data, error);
+
+ return options;
+}
+
+static CallOpenApplicationFunction BKSCallOpenApplicationFunction = CallBoardSystemServiceOpenApplication<BKSSystemService, BKSOpenApplicationErrorCode, BKSOpenApplicationErrorCodeNone, SetBKSError>;
+#endif // WITH_BKS
+
+#ifdef WITH_FBS
+#import <Foundation/Foundation.h>
+extern "C"
+{
+#import <FrontBoardServices/FrontBoardServices.h>
+#import <FrontBoardServices/FBSSystemService_LaunchServices.h>
+#import <FrontBoardServices/FBSOpenApplicationConstants_Private.h>
+#import <MobileCoreServices/MobileCoreServices.h>
+#import <MobileCoreServices/LSResourceProxy.h>
+}
+
+#ifdef WITH_BKS
+static bool
+IsFBSProcess (nub_process_t pid)
+{
+ BKSApplicationStateMonitor *state_monitor = [[BKSApplicationStateMonitor alloc] init];
+ BKSApplicationState app_state = [state_monitor mostElevatedApplicationStateForPID: pid];
+ return app_state != BKSApplicationStateUnknown;
+}
+#else
+static bool
+IsFBSProcess (nub_process_t pid)
+{
+ // FIXME: What is the FBS equivalent of BKSApplicationStateMonitor
+ return true;
+}
+#endif
+
+static void
+SetFBSError (NSInteger error_code, DNBError &error)
+{
+ error.SetError ((DNBError::ValueType) error_code, DNBError::FrontBoard);
+ NSString *err_nsstr = ::FBSOpenApplicationErrorCodeToString((FBSOpenApplicationErrorCode) error_code);
+ const char *err_str = NULL;
+ if (err_nsstr == NULL)
+ err_str = "unknown FBS error";
+ else
+ {
+ err_str = [err_nsstr UTF8String];
+ if (err_str == NULL)
+ err_str = "unknown FBS error";
+ }
+ error.SetErrorString(err_str);
+}
+
+static bool
+FBSAddEventDataToOptions (NSMutableDictionary *options, const char *event_data, DNBError &option_error)
+{
+ if (strcmp (event_data, "BackgroundContentFetching") == 0)
+ {
+ DNBLog("Setting ActivateForEvent key in options dictionary.");
+ NSDictionary *event_details = [NSDictionary dictionary];
+ NSDictionary *event_dictionary = [NSDictionary dictionaryWithObject:event_details forKey:FBSActivateForEventOptionTypeBackgroundContentFetching];
+ [options setObject: event_dictionary forKey: FBSOpenApplicationOptionKeyActivateForEvent];
+ return true;
+ }
+ else
+ {
+ DNBLogError ("Unrecognized event type: %s. Ignoring.", event_data);
+ option_error.SetErrorString("Unrecognized event data.");
+ return false;
+ }
+
+}
+
+static NSMutableDictionary *
+FBSCreateOptionsDictionary(const char *app_bundle_path, NSMutableArray *launch_argv, NSDictionary *launch_envp, NSString *stdio_path, bool disable_aslr, const char *event_data)
+{
+ NSMutableDictionary *debug_options = [NSMutableDictionary dictionary];
+
+ if (launch_argv != nil)
+ [debug_options setObject: launch_argv forKey: FBSDebugOptionKeyArguments];
+ if (launch_envp != nil)
+ [debug_options setObject: launch_envp forKey: FBSDebugOptionKeyEnvironment];
+
+ [debug_options setObject: stdio_path forKey: FBSDebugOptionKeyStandardOutPath];
+ [debug_options setObject: stdio_path forKey: FBSDebugOptionKeyStandardErrorPath];
+ [debug_options setObject: [NSNumber numberWithBool: YES] forKey: FBSDebugOptionKeyWaitForDebugger];
+ if (disable_aslr)
+ [debug_options setObject: [NSNumber numberWithBool: YES] forKey: FBSDebugOptionKeyDisableASLR];
+
+ // That will go in the overall dictionary:
+
+ NSMutableDictionary *options = [NSMutableDictionary dictionary];
+ [options setObject: debug_options forKey: FBSOpenApplicationOptionKeyDebuggingOptions];
+ // And there are some other options at the top level in this dictionary:
+ [options setObject: [NSNumber numberWithBool: YES] forKey: FBSOpenApplicationOptionKeyUnlockDevice];
+
+ // We have to get the "sequence ID & UUID" for this app bundle path and send them to FBS:
+
+ NSURL *app_bundle_url = [NSURL fileURLWithPath: [NSString stringWithUTF8String: app_bundle_path] isDirectory: YES];
+ LSApplicationProxy *app_proxy = [LSApplicationProxy applicationProxyForBundleURL: app_bundle_url];
+ if (app_proxy)
+ {
+ DNBLog("Sending AppProxy info: sequence no: %lu, GUID: %s.", app_proxy.sequenceNumber, [app_proxy.cacheGUID.UUIDString UTF8String]);
+ [options setObject: [NSNumber numberWithUnsignedInteger: app_proxy.sequenceNumber] forKey: FBSOpenApplicationOptionKeyLSSequenceNumber];
+ [options setObject: app_proxy.cacheGUID.UUIDString forKey: FBSOpenApplicationOptionKeyLSCacheGUID];
+ }
+
+ DNBError error;
+ FBSAddEventDataToOptions (options, event_data, error);
+
+ return options;
+}
+static CallOpenApplicationFunction FBSCallOpenApplicationFunction = CallBoardSystemServiceOpenApplication<FBSSystemService, FBSOpenApplicationErrorCode, FBSOpenApplicationErrorCodeNone, SetFBSError>;
+#endif // WITH_FBS
+
+#if 0
+#define DEBUG_LOG(fmt, ...) printf(fmt, ## __VA_ARGS__)
+#else
+#define DEBUG_LOG(fmt, ...)
+#endif
+
+#ifndef MACH_PROCESS_USE_POSIX_SPAWN
+#define MACH_PROCESS_USE_POSIX_SPAWN 1
+#endif
+
+#ifndef _POSIX_SPAWN_DISABLE_ASLR
+#define _POSIX_SPAWN_DISABLE_ASLR 0x0100
+#endif
+
+MachProcess::MachProcess() :
+ m_pid (0),
+ m_cpu_type (0),
+ m_child_stdin (-1),
+ m_child_stdout (-1),
+ m_child_stderr (-1),
+ m_path (),
+ m_args (),
+ m_task (this),
+ m_flags (eMachProcessFlagsNone),
+ m_stdio_thread (0),
+ m_stdio_mutex (PTHREAD_MUTEX_RECURSIVE),
+ m_stdout_data (),
+ m_profile_enabled (false),
+ m_profile_interval_usec (0),
+ m_profile_thread (0),
+ m_profile_data_mutex(PTHREAD_MUTEX_RECURSIVE),
+ m_profile_data (),
+ m_thread_actions (),
+ m_exception_messages (),
+ m_exception_messages_mutex (PTHREAD_MUTEX_RECURSIVE),
+ m_thread_list (),
+ m_activities (),
+ m_state (eStateUnloaded),
+ m_state_mutex (PTHREAD_MUTEX_RECURSIVE),
+ m_events (0, kAllEventsMask),
+ m_private_events (0, kAllEventsMask),
+ m_breakpoints (),
+ m_watchpoints (),
+ m_name_to_addr_callback(NULL),
+ m_name_to_addr_baton(NULL),
+ m_image_infos_callback(NULL),
+ m_image_infos_baton(NULL),
+ m_sent_interrupt_signo (0),
+ m_auto_resume_signo (0),
+ m_did_exec (false)
+{
+ DNBLogThreadedIf(LOG_PROCESS | LOG_VERBOSE, "%s", __PRETTY_FUNCTION__);
+}
+
+MachProcess::~MachProcess()
+{
+ DNBLogThreadedIf(LOG_PROCESS | LOG_VERBOSE, "%s", __PRETTY_FUNCTION__);
+ Clear();
+}
+
+pid_t
+MachProcess::SetProcessID(pid_t pid)
+{
+ // Free any previous process specific data or resources
+ Clear();
+ // Set the current PID appropriately
+ if (pid == 0)
+ m_pid = ::getpid ();
+ else
+ m_pid = pid;
+ return m_pid; // Return actually PID in case a zero pid was passed in
+}
+
+nub_state_t
+MachProcess::GetState()
+{
+ // If any other threads access this we will need a mutex for it
+ PTHREAD_MUTEX_LOCKER(locker, m_state_mutex);
+ return m_state;
+}
+
+const char *
+MachProcess::ThreadGetName(nub_thread_t tid)
+{
+ return m_thread_list.GetName(tid);
+}
+
+nub_state_t
+MachProcess::ThreadGetState(nub_thread_t tid)
+{
+ return m_thread_list.GetState(tid);
+}
+
+
+nub_size_t
+MachProcess::GetNumThreads () const
+{
+ return m_thread_list.NumThreads();
+}
+
+nub_thread_t
+MachProcess::GetThreadAtIndex (nub_size_t thread_idx) const
+{
+ return m_thread_list.ThreadIDAtIndex(thread_idx);
+}
+
+nub_thread_t
+MachProcess::GetThreadIDForMachPortNumber (thread_t mach_port_number) const
+{
+ return m_thread_list.GetThreadIDByMachPortNumber (mach_port_number);
+}
+
+nub_bool_t
+MachProcess::SyncThreadState (nub_thread_t tid)
+{
+ MachThreadSP thread_sp(m_thread_list.GetThreadByID(tid));
+ if (!thread_sp)
+ return false;
+ kern_return_t kret = ::thread_abort_safely(thread_sp->MachPortNumber());
+ DNBLogThreadedIf (LOG_THREAD, "thread = 0x%8.8" PRIx32 " calling thread_abort_safely (tid) => %u (GetGPRState() for stop_count = %u)", thread_sp->MachPortNumber(), kret, thread_sp->Process()->StopCount());
+
+ if (kret == KERN_SUCCESS)
+ return true;
+ else
+ return false;
+
+}
+
+ThreadInfo::QoS
+MachProcess::GetRequestedQoS (nub_thread_t tid, nub_addr_t tsd, uint64_t dti_qos_class_index)
+{
+ return m_thread_list.GetRequestedQoS (tid, tsd, dti_qos_class_index);
+}
+
+nub_addr_t
+MachProcess::GetPThreadT (nub_thread_t tid)
+{
+ return m_thread_list.GetPThreadT (tid);
+}
+
+nub_addr_t
+MachProcess::GetDispatchQueueT (nub_thread_t tid)
+{
+ return m_thread_list.GetDispatchQueueT (tid);
+}
+
+nub_addr_t
+MachProcess::GetTSDAddressForThread (nub_thread_t tid, uint64_t plo_pthread_tsd_base_address_offset, uint64_t plo_pthread_tsd_base_offset, uint64_t plo_pthread_tsd_entry_size)
+{
+ return m_thread_list.GetTSDAddressForThread (tid, plo_pthread_tsd_base_address_offset, plo_pthread_tsd_base_offset, plo_pthread_tsd_entry_size);
+}
+
+
+
+JSONGenerator::ObjectSP
+MachProcess::GetLoadedDynamicLibrariesInfos (nub_process_t pid, nub_addr_t image_list_address, nub_addr_t image_count)
+{
+ JSONGenerator::DictionarySP reply_sp;
+
+ int mib[4] = { CTL_KERN, KERN_PROC, KERN_PROC_PID, pid};
+ struct kinfo_proc processInfo;
+ size_t bufsize = sizeof(processInfo);
+ if (sysctl(mib, (unsigned)(sizeof(mib)/sizeof(int)), &processInfo, &bufsize, NULL, 0) == 0 && bufsize > 0)
+ {
+ uint32_t pointer_size = 4;
+ if (processInfo.kp_proc.p_flag & P_LP64)
+ pointer_size = 8;
+
+ struct segment
+ {
+ std::string name;
+ uint64_t vmaddr;
+ uint64_t vmsize;
+ uint64_t fileoff;
+ uint64_t filesize;
+ uint64_t maxprot;
+ uint64_t initprot;
+ uint64_t nsects;
+ uint64_t flags;
+ };
+
+ struct image_info
+ {
+ uint64_t load_address;
+ std::string pathname;
+ uint64_t mod_date;
+ struct mach_header_64 mach_header;
+ std::vector<struct segment> segments;
+ uuid_t uuid;
+ };
+ std::vector<image_info> image_infos;
+ size_t image_infos_size = image_count * 3 * pointer_size;
+
+ uint8_t *image_info_buf = (uint8_t *) malloc (image_infos_size);
+ if (image_info_buf == NULL)
+ {
+ return reply_sp;
+ }
+ if (ReadMemory (image_list_address, image_infos_size, image_info_buf) != image_infos_size)
+ {
+ return reply_sp;
+ }
+
+
+ //// First the image_infos array with (load addr, pathname, mod date) tuples
+
+
+ for (size_t i = 0; i < image_count; i++)
+ {
+ struct image_info info;
+ nub_addr_t pathname_address;
+ if (pointer_size == 4)
+ {
+ uint32_t load_address_32;
+ uint32_t pathname_address_32;
+ uint32_t mod_date_32;
+ ::memcpy (&load_address_32, image_info_buf + (i * 3 * pointer_size), 4);
+ ::memcpy (&pathname_address_32, image_info_buf + (i * 3 * pointer_size) + pointer_size, 4);
+ ::memcpy (&mod_date_32, image_info_buf + (i * 3 * pointer_size) + pointer_size + pointer_size, 4);
+ info.load_address = load_address_32;
+ info.mod_date = mod_date_32;
+ pathname_address = pathname_address_32;
+ }
+ else
+ {
+ uint64_t load_address_64;
+ uint64_t pathname_address_64;
+ uint64_t mod_date_64;
+ ::memcpy (&load_address_64, image_info_buf + (i * 3 * pointer_size), 8);
+ ::memcpy (&pathname_address_64, image_info_buf + (i * 3 * pointer_size) + pointer_size, 8);
+ ::memcpy (&mod_date_64, image_info_buf + (i * 3 * pointer_size) + pointer_size + pointer_size, 8);
+ info.load_address = load_address_64;
+ info.mod_date = mod_date_64;
+ pathname_address = pathname_address_64;
+ }
+ char strbuf[17];
+ info.pathname = "";
+ uint64_t pathname_ptr = pathname_address;
+ bool still_reading = true;
+ while (still_reading && ReadMemory (pathname_ptr, sizeof (strbuf) - 1, strbuf) == sizeof (strbuf) - 1)
+ {
+ strbuf[sizeof(strbuf) - 1] = '\0';
+ info.pathname += strbuf;
+ pathname_ptr += sizeof (strbuf) - 1;
+ // Stop if we found nul byte indicating the end of the string
+ for (size_t i = 0; i < sizeof(strbuf) - 1; i++)
+ {
+ if (strbuf[i] == '\0')
+ {
+ still_reading = false;
+ break;
+ }
+ }
+ }
+ uuid_clear (info.uuid);
+ image_infos.push_back (info);
+ }
+ if (image_infos.size() == 0)
+ {
+ return reply_sp;
+ }
+
+
+ //// Second, read the mach header / load commands for all the dylibs
+
+
+ for (size_t i = 0; i < image_count; i++)
+ {
+ uint64_t load_cmds_p;
+ if (pointer_size == 4)
+ {
+ struct mach_header header;
+ if (ReadMemory (image_infos[i].load_address, sizeof (struct mach_header), &header) != sizeof (struct mach_header))
+ {
+ return reply_sp;
+ }
+ load_cmds_p = image_infos[i].load_address + sizeof (struct mach_header);
+ image_infos[i].mach_header.magic = header.magic;
+ image_infos[i].mach_header.cputype = header.cputype;
+ image_infos[i].mach_header.cpusubtype = header.cpusubtype;
+ image_infos[i].mach_header.filetype = header.filetype;
+ image_infos[i].mach_header.ncmds = header.ncmds;
+ image_infos[i].mach_header.sizeofcmds = header.sizeofcmds;
+ image_infos[i].mach_header.flags = header.flags;
+ }
+ else
+ {
+ struct mach_header_64 header;
+ if (ReadMemory (image_infos[i].load_address, sizeof (struct mach_header_64), &header) != sizeof (struct mach_header_64))
+ {
+ return reply_sp;
+ }
+ load_cmds_p = image_infos[i].load_address + sizeof (struct mach_header_64);
+ image_infos[i].mach_header.magic = header.magic;
+ image_infos[i].mach_header.cputype = header.cputype;
+ image_infos[i].mach_header.cpusubtype = header.cpusubtype;
+ image_infos[i].mach_header.filetype = header.filetype;
+ image_infos[i].mach_header.ncmds = header.ncmds;
+ image_infos[i].mach_header.sizeofcmds = header.sizeofcmds;
+ image_infos[i].mach_header.flags = header.flags;
+ }
+ for (uint32_t j = 0; j < image_infos[i].mach_header.ncmds; j++)
+ {
+ struct load_command lc;
+ if (ReadMemory (load_cmds_p, sizeof (struct load_command), &lc) != sizeof (struct load_command))
+ {
+ return reply_sp;
+ }
+ if (lc.cmd == LC_SEGMENT)
+ {
+ struct segment_command seg;
+ if (ReadMemory (load_cmds_p, sizeof (struct segment_command), &seg) != sizeof (struct segment_command))
+ {
+ return reply_sp;
+ }
+ struct segment this_seg;
+ char name[17];
+ ::memset (name, 0, sizeof (name));
+ memcpy (name, seg.segname, sizeof (seg.segname));
+ this_seg.name = name;
+ this_seg.vmaddr = seg.vmaddr;
+ this_seg.vmsize = seg.vmsize;
+ this_seg.fileoff = seg.fileoff;
+ this_seg.filesize = seg.filesize;
+ this_seg.maxprot = seg.maxprot;
+ this_seg.initprot = seg.initprot;
+ this_seg.nsects = seg.nsects;
+ this_seg.flags = seg.flags;
+ image_infos[i].segments.push_back(this_seg);
+ }
+ if (lc.cmd == LC_SEGMENT_64)
+ {
+ struct segment_command_64 seg;
+ if (ReadMemory (load_cmds_p, sizeof (struct segment_command_64), &seg) != sizeof (struct segment_command_64))
+ {
+ return reply_sp;
+ }
+ struct segment this_seg;
+ char name[17];
+ ::memset (name, 0, sizeof (name));
+ memcpy (name, seg.segname, sizeof (seg.segname));
+ this_seg.name = name;
+ this_seg.vmaddr = seg.vmaddr;
+ this_seg.vmsize = seg.vmsize;
+ this_seg.fileoff = seg.fileoff;
+ this_seg.filesize = seg.filesize;
+ this_seg.maxprot = seg.maxprot;
+ this_seg.initprot = seg.initprot;
+ this_seg.nsects = seg.nsects;
+ this_seg.flags = seg.flags;
+ image_infos[i].segments.push_back(this_seg);
+ }
+ if (lc.cmd == LC_UUID)
+ {
+ struct uuid_command uuidcmd;
+ if (ReadMemory (load_cmds_p, sizeof (struct uuid_command), &uuidcmd) == sizeof (struct uuid_command))
+ uuid_copy (image_infos[i].uuid, uuidcmd.uuid);
+ }
+ load_cmds_p += lc.cmdsize;
+ }
+ }
+
+
+ //// Thrid, format all of the above in the JSONGenerator object.
+
+
+ JSONGenerator::ArraySP image_infos_array_sp (new JSONGenerator::Array());
+ for (size_t i = 0; i < image_count; i++)
+ {
+ JSONGenerator::DictionarySP image_info_dict_sp (new JSONGenerator::Dictionary());
+ image_info_dict_sp->AddIntegerItem ("load_address", image_infos[i].load_address);
+ image_info_dict_sp->AddIntegerItem ("mod_date", image_infos[i].mod_date);
+ image_info_dict_sp->AddStringItem ("pathname", image_infos[i].pathname);
+
+ uuid_string_t uuidstr;
+ uuid_unparse_upper (image_infos[i].uuid, uuidstr);
+ image_info_dict_sp->AddStringItem ("uuid", uuidstr);
+
+ JSONGenerator::DictionarySP mach_header_dict_sp (new JSONGenerator::Dictionary());
+ mach_header_dict_sp->AddIntegerItem ("magic", image_infos[i].mach_header.magic);
+ mach_header_dict_sp->AddIntegerItem ("cputype", image_infos[i].mach_header.cputype);
+ mach_header_dict_sp->AddIntegerItem ("cpusubtype", image_infos[i].mach_header.cpusubtype);
+ mach_header_dict_sp->AddIntegerItem ("filetype", image_infos[i].mach_header.filetype);
+
+// DynamicLoaderMacOSX doesn't currently need these fields, so don't send them.
+// mach_header_dict_sp->AddIntegerItem ("ncmds", image_infos[i].mach_header.ncmds);
+// mach_header_dict_sp->AddIntegerItem ("sizeofcmds", image_infos[i].mach_header.sizeofcmds);
+// mach_header_dict_sp->AddIntegerItem ("flags", image_infos[i].mach_header.flags);
+ image_info_dict_sp->AddItem ("mach_header", mach_header_dict_sp);
+
+ JSONGenerator::ArraySP segments_sp (new JSONGenerator::Array());
+ for (size_t j = 0; j < image_infos[i].segments.size(); j++)
+ {
+ JSONGenerator::DictionarySP segment_sp (new JSONGenerator::Dictionary());
+ segment_sp->AddStringItem ("name", image_infos[i].segments[j].name);
+ segment_sp->AddIntegerItem ("vmaddr", image_infos[i].segments[j].vmaddr);
+ segment_sp->AddIntegerItem ("vmsize", image_infos[i].segments[j].vmsize);
+ segment_sp->AddIntegerItem ("fileoff", image_infos[i].segments[j].fileoff);
+ segment_sp->AddIntegerItem ("filesize", image_infos[i].segments[j].filesize);
+ segment_sp->AddIntegerItem ("maxprot", image_infos[i].segments[j].maxprot);
+
+// DynamicLoaderMacOSX doesn't currently need these fields, so don't send them.
+// segment_sp->AddIntegerItem ("initprot", image_infos[i].segments[j].initprot);
+// segment_sp->AddIntegerItem ("nsects", image_infos[i].segments[j].nsects);
+// segment_sp->AddIntegerItem ("flags", image_infos[i].segments[j].flags);
+ segments_sp->AddItem (segment_sp);
+ }
+ image_info_dict_sp->AddItem ("segments", segments_sp);
+
+ image_infos_array_sp->AddItem (image_info_dict_sp);
+ }
+ reply_sp.reset (new JSONGenerator::Dictionary());
+ reply_sp->AddItem ("images", image_infos_array_sp);
+ }
+ return reply_sp;
+}
+
+nub_thread_t
+MachProcess::GetCurrentThread ()
+{
+ return m_thread_list.CurrentThreadID();
+}
+
+nub_thread_t
+MachProcess::GetCurrentThreadMachPort ()
+{
+ return m_thread_list.GetMachPortNumberByThreadID(m_thread_list.CurrentThreadID());
+}
+
+nub_thread_t
+MachProcess::SetCurrentThread(nub_thread_t tid)
+{
+ return m_thread_list.SetCurrentThread(tid);
+}
+
+bool
+MachProcess::GetThreadStoppedReason(nub_thread_t tid, struct DNBThreadStopInfo *stop_info)
+{
+ if (m_thread_list.GetThreadStoppedReason(tid, stop_info))
+ {
+ if (m_did_exec)
+ stop_info->reason = eStopTypeExec;
+ return true;
+ }
+ return false;
+}
+
+void
+MachProcess::DumpThreadStoppedReason(nub_thread_t tid) const
+{
+ return m_thread_list.DumpThreadStoppedReason(tid);
+}
+
+const char *
+MachProcess::GetThreadInfo(nub_thread_t tid) const
+{
+ return m_thread_list.GetThreadInfo(tid);
+}
+
+uint32_t
+MachProcess::GetCPUType ()
+{
+ if (m_cpu_type == 0 && m_pid != 0)
+ m_cpu_type = MachProcess::GetCPUTypeForLocalProcess (m_pid);
+ return m_cpu_type;
+}
+
+const DNBRegisterSetInfo *
+MachProcess::GetRegisterSetInfo (nub_thread_t tid, nub_size_t *num_reg_sets) const
+{
+ MachThreadSP thread_sp (m_thread_list.GetThreadByID (tid));
+ if (thread_sp)
+ {
+ DNBArchProtocol *arch = thread_sp->GetArchProtocol();
+ if (arch)
+ return arch->GetRegisterSetInfo (num_reg_sets);
+ }
+ *num_reg_sets = 0;
+ return NULL;
+}
+
+bool
+MachProcess::GetRegisterValue ( nub_thread_t tid, uint32_t set, uint32_t reg, DNBRegisterValue *value ) const
+{
+ return m_thread_list.GetRegisterValue(tid, set, reg, value);
+}
+
+bool
+MachProcess::SetRegisterValue ( nub_thread_t tid, uint32_t set, uint32_t reg, const DNBRegisterValue *value ) const
+{
+ return m_thread_list.SetRegisterValue(tid, set, reg, value);
+}
+
+void
+MachProcess::SetState(nub_state_t new_state)
+{
+ // If any other threads access this we will need a mutex for it
+ uint32_t event_mask = 0;
+
+ // Scope for mutex locker
+ {
+ PTHREAD_MUTEX_LOCKER(locker, m_state_mutex);
+ const nub_state_t old_state = m_state;
+
+ if (old_state == eStateExited)
+ {
+ DNBLogThreadedIf(LOG_PROCESS, "MachProcess::SetState(%s) ignoring new state since current state is exited", DNBStateAsString(new_state));
+ }
+ else if (old_state == new_state)
+ {
+ DNBLogThreadedIf(LOG_PROCESS, "MachProcess::SetState(%s) ignoring redundant state change...", DNBStateAsString(new_state));
+ }
+ else
+ {
+ if (NUB_STATE_IS_STOPPED(new_state))
+ event_mask = eEventProcessStoppedStateChanged;
+ else
+ event_mask = eEventProcessRunningStateChanged;
+
+ DNBLogThreadedIf(LOG_PROCESS, "MachProcess::SetState(%s) upating state (previous state was %s), event_mask = 0x%8.8x", DNBStateAsString(new_state), DNBStateAsString(old_state), event_mask);
+
+ m_state = new_state;
+ if (new_state == eStateStopped)
+ m_stop_count++;
+ }
+ }
+
+ if (event_mask != 0)
+ {
+ m_events.SetEvents (event_mask);
+ m_private_events.SetEvents (event_mask);
+ if (event_mask == eEventProcessStoppedStateChanged)
+ m_private_events.ResetEvents (eEventProcessRunningStateChanged);
+ else
+ m_private_events.ResetEvents (eEventProcessStoppedStateChanged);
+
+ // Wait for the event bit to reset if a reset ACK is requested
+ m_events.WaitForResetAck(event_mask);
+ }
+
+}
+
+void
+MachProcess::Clear(bool detaching)
+{
+ // Clear any cached thread list while the pid and task are still valid
+
+ m_task.Clear();
+ // Now clear out all member variables
+ m_pid = INVALID_NUB_PROCESS;
+ if (!detaching)
+ CloseChildFileDescriptors();
+
+ m_path.clear();
+ m_args.clear();
+ SetState(eStateUnloaded);
+ m_flags = eMachProcessFlagsNone;
+ m_stop_count = 0;
+ m_thread_list.Clear();
+ {
+ PTHREAD_MUTEX_LOCKER(locker, m_exception_messages_mutex);
+ m_exception_messages.clear();
+ }
+ m_activities.Clear();
+ if (m_profile_thread)
+ {
+ pthread_join(m_profile_thread, NULL);
+ m_profile_thread = NULL;
+ }
+}
+
+
+bool
+MachProcess::StartSTDIOThread()
+{
+ DNBLogThreadedIf(LOG_PROCESS, "MachProcess::%s ( )", __FUNCTION__);
+ // Create the thread that watches for the child STDIO
+ return ::pthread_create (&m_stdio_thread, NULL, MachProcess::STDIOThread, this) == 0;
+}
+
+void
+MachProcess::SetEnableAsyncProfiling(bool enable, uint64_t interval_usec, DNBProfileDataScanType scan_type)
+{
+ m_profile_enabled = enable;
+ m_profile_interval_usec = static_cast<useconds_t>(interval_usec);
+ m_profile_scan_type = scan_type;
+
+ if (m_profile_enabled && (m_profile_thread == NULL))
+ {
+ StartProfileThread();
+ }
+ else if (!m_profile_enabled && m_profile_thread)
+ {
+ pthread_join(m_profile_thread, NULL);
+ m_profile_thread = NULL;
+ }
+}
+
+bool
+MachProcess::StartProfileThread()
+{
+ DNBLogThreadedIf(LOG_PROCESS, "MachProcess::%s ( )", __FUNCTION__);
+ // Create the thread that profiles the inferior and reports back if enabled
+ return ::pthread_create (&m_profile_thread, NULL, MachProcess::ProfileThread, this) == 0;
+}
+
+
+nub_addr_t
+MachProcess::LookupSymbol(const char *name, const char *shlib)
+{
+ if (m_name_to_addr_callback != NULL && name && name[0])
+ return m_name_to_addr_callback(ProcessID(), name, shlib, m_name_to_addr_baton);
+ return INVALID_NUB_ADDRESS;
+}
+
+bool
+MachProcess::Resume (const DNBThreadResumeActions& thread_actions)
+{
+ DNBLogThreadedIf(LOG_PROCESS, "MachProcess::Resume ()");
+ nub_state_t state = GetState();
+
+ if (CanResume(state))
+ {
+ m_thread_actions = thread_actions;
+ PrivateResume();
+ return true;
+ }
+ else if (state == eStateRunning)
+ {
+ DNBLog("Resume() - task 0x%x is already running, ignoring...", m_task.TaskPort());
+ return true;
+ }
+ DNBLog("Resume() - task 0x%x has state %s, can't continue...", m_task.TaskPort(), DNBStateAsString(state));
+ return false;
+}
+
+bool
+MachProcess::Kill (const struct timespec *timeout_abstime)
+{
+ DNBLogThreadedIf(LOG_PROCESS, "MachProcess::Kill ()");
+ nub_state_t state = DoSIGSTOP(true, false, NULL);
+ DNBLogThreadedIf(LOG_PROCESS, "MachProcess::Kill() DoSIGSTOP() state = %s", DNBStateAsString(state));
+ errno = 0;
+ DNBLog ("Sending ptrace PT_KILL to terminate inferior process.");
+ ::ptrace (PT_KILL, m_pid, 0, 0);
+ DNBError err;
+ err.SetErrorToErrno();
+ DNBLogThreadedIf(LOG_PROCESS, "MachProcess::Kill() DoSIGSTOP() ::ptrace (PT_KILL, pid=%u, 0, 0) => 0x%8.8x (%s)", m_pid, err.Error(), err.AsString());
+ m_thread_actions = DNBThreadResumeActions (eStateRunning, 0);
+ PrivateResume ();
+
+ // Try and reap the process without touching our m_events since
+ // we want the code above this to still get the eStateExited event
+ const uint32_t reap_timeout_usec = 1000000; // Wait 1 second and try to reap the process
+ const uint32_t reap_interval_usec = 10000; //
+ uint32_t reap_time_elapsed;
+ for (reap_time_elapsed = 0;
+ reap_time_elapsed < reap_timeout_usec;
+ reap_time_elapsed += reap_interval_usec)
+ {
+ if (GetState() == eStateExited)
+ break;
+ usleep(reap_interval_usec);
+ }
+ DNBLog ("Waited %u ms for process to be reaped (state = %s)", reap_time_elapsed/1000, DNBStateAsString(GetState()));
+ return true;
+}
+
+bool
+MachProcess::Interrupt()
+{
+ nub_state_t state = GetState();
+ if (IsRunning(state))
+ {
+ if (m_sent_interrupt_signo == 0)
+ {
+ m_sent_interrupt_signo = SIGSTOP;
+ if (Signal (m_sent_interrupt_signo))
+ {
+ DNBLogThreadedIf(LOG_PROCESS, "MachProcess::Interrupt() - sent %i signal to interrupt process", m_sent_interrupt_signo);
+ }
+ else
+ {
+ m_sent_interrupt_signo = 0;
+ DNBLogThreadedIf(LOG_PROCESS, "MachProcess::Interrupt() - failed to send %i signal to interrupt process", m_sent_interrupt_signo);
+ }
+ }
+ else
+ {
+ DNBLogThreadedIf(LOG_PROCESS, "MachProcess::Interrupt() - previously sent an interrupt signal %i that hasn't been received yet, interrupt aborted", m_sent_interrupt_signo);
+ }
+ }
+ else
+ {
+ DNBLogThreadedIf(LOG_PROCESS, "MachProcess::Interrupt() - process already stopped, no interrupt sent");
+ }
+ return false;
+}
+
+bool
+MachProcess::Signal (int signal, const struct timespec *timeout_abstime)
+{
+ DNBLogThreadedIf(LOG_PROCESS, "MachProcess::Signal (signal = %d, timeout = %p)", signal, timeout_abstime);
+ nub_state_t state = GetState();
+ if (::kill (ProcessID(), signal) == 0)
+ {
+ // If we were running and we have a timeout, wait for the signal to stop
+ if (IsRunning(state) && timeout_abstime)
+ {
+ DNBLogThreadedIf(LOG_PROCESS, "MachProcess::Signal (signal = %d, timeout = %p) waiting for signal to stop process...", signal, timeout_abstime);
+ m_private_events.WaitForSetEvents(eEventProcessStoppedStateChanged, timeout_abstime);
+ state = GetState();
+ DNBLogThreadedIf(LOG_PROCESS, "MachProcess::Signal (signal = %d, timeout = %p) state = %s", signal, timeout_abstime, DNBStateAsString(state));
+ return !IsRunning (state);
+ }
+ DNBLogThreadedIf(LOG_PROCESS, "MachProcess::Signal (signal = %d, timeout = %p) not waiting...", signal, timeout_abstime);
+ return true;
+ }
+ DNBError err(errno, DNBError::POSIX);
+ err.LogThreadedIfError("kill (pid = %d, signo = %i)", ProcessID(), signal);
+ return false;
+
+}
+
+bool
+MachProcess::SendEvent (const char *event, DNBError &send_err)
+{
+ DNBLogThreadedIf(LOG_PROCESS, "MachProcess::SendEvent (event = %s) to pid: %d", event, m_pid);
+ if (m_pid == INVALID_NUB_PROCESS)
+ return false;
+ // FIXME: Shouldn't we use the launch flavor we were started with?
+#if defined(WITH_FBS) || defined(WITH_BKS)
+ return BoardServiceSendEvent (event, send_err);
+#endif
+ return true;
+}
+
+nub_state_t
+MachProcess::DoSIGSTOP (bool clear_bps_and_wps, bool allow_running, uint32_t *thread_idx_ptr)
+{
+ nub_state_t state = GetState();
+ DNBLogThreadedIf(LOG_PROCESS, "MachProcess::DoSIGSTOP() state = %s", DNBStateAsString (state));
+
+ if (!IsRunning(state))
+ {
+ if (clear_bps_and_wps)
+ {
+ DisableAllBreakpoints (true);
+ DisableAllWatchpoints (true);
+ clear_bps_and_wps = false;
+ }
+
+ // If we already have a thread stopped due to a SIGSTOP, we don't have
+ // to do anything...
+ uint32_t thread_idx = m_thread_list.GetThreadIndexForThreadStoppedWithSignal (SIGSTOP);
+ if (thread_idx_ptr)
+ *thread_idx_ptr = thread_idx;
+ if (thread_idx != UINT32_MAX)
+ return GetState();
+
+ // No threads were stopped with a SIGSTOP, we need to run and halt the
+ // process with a signal
+ DNBLogThreadedIf(LOG_PROCESS, "MachProcess::DoSIGSTOP() state = %s -- resuming process", DNBStateAsString (state));
+ if (allow_running)
+ m_thread_actions = DNBThreadResumeActions (eStateRunning, 0);
+ else
+ m_thread_actions = DNBThreadResumeActions (eStateSuspended, 0);
+
+ PrivateResume ();
+
+ // Reset the event that says we were indeed running
+ m_events.ResetEvents(eEventProcessRunningStateChanged);
+ state = GetState();
+ }
+
+ // We need to be stopped in order to be able to detach, so we need
+ // to send ourselves a SIGSTOP
+
+ DNBLogThreadedIf(LOG_PROCESS, "MachProcess::DoSIGSTOP() state = %s -- sending SIGSTOP", DNBStateAsString (state));
+ struct timespec sigstop_timeout;
+ DNBTimer::OffsetTimeOfDay(&sigstop_timeout, 2, 0);
+ Signal (SIGSTOP, &sigstop_timeout);
+ if (clear_bps_and_wps)
+ {
+ DisableAllBreakpoints (true);
+ DisableAllWatchpoints (true);
+ //clear_bps_and_wps = false;
+ }
+ uint32_t thread_idx = m_thread_list.GetThreadIndexForThreadStoppedWithSignal (SIGSTOP);
+ if (thread_idx_ptr)
+ *thread_idx_ptr = thread_idx;
+ return GetState();
+}
+
+bool
+MachProcess::Detach()
+{
+ DNBLogThreadedIf(LOG_PROCESS, "MachProcess::Detach()");
+
+ uint32_t thread_idx = UINT32_MAX;
+ nub_state_t state = DoSIGSTOP(true, true, &thread_idx);
+ DNBLogThreadedIf(LOG_PROCESS, "MachProcess::Detach() DoSIGSTOP() returned %s", DNBStateAsString(state));
+
+ {
+ m_thread_actions.Clear();
+ m_activities.Clear();
+ DNBThreadResumeAction thread_action;
+ thread_action.tid = m_thread_list.ThreadIDAtIndex (thread_idx);
+ thread_action.state = eStateRunning;
+ thread_action.signal = -1;
+ thread_action.addr = INVALID_NUB_ADDRESS;
+
+ m_thread_actions.Append (thread_action);
+ m_thread_actions.SetDefaultThreadActionIfNeeded (eStateRunning, 0);
+
+ PTHREAD_MUTEX_LOCKER (locker, m_exception_messages_mutex);
+
+ ReplyToAllExceptions ();
+
+ }
+
+ m_task.ShutDownExcecptionThread();
+
+ // Detach from our process
+ errno = 0;
+ nub_process_t pid = m_pid;
+ int ret = ::ptrace (PT_DETACH, pid, (caddr_t)1, 0);
+ DNBError err(errno, DNBError::POSIX);
+ if (DNBLogCheckLogBit(LOG_PROCESS) || err.Fail() || (ret != 0))
+ err.LogThreaded("::ptrace (PT_DETACH, %u, (caddr_t)1, 0)", pid);
+
+ // Resume our task
+ m_task.Resume();
+
+ // NULL our task out as we have already retored all exception ports
+ m_task.Clear();
+
+ // Clear out any notion of the process we once were
+ const bool detaching = true;
+ Clear(detaching);
+
+ SetState(eStateDetached);
+
+ return true;
+}
+
+//----------------------------------------------------------------------
+// ReadMemory from the MachProcess level will always remove any software
+// breakpoints from the memory buffer before returning. If you wish to
+// read memory and see those traps, read from the MachTask
+// (m_task.ReadMemory()) as that version will give you what is actually
+// in inferior memory.
+//----------------------------------------------------------------------
+nub_size_t
+MachProcess::ReadMemory (nub_addr_t addr, nub_size_t size, void *buf)
+{
+ // We need to remove any current software traps (enabled software
+ // breakpoints) that we may have placed in our tasks memory.
+
+ // First just read the memory as is
+ nub_size_t bytes_read = m_task.ReadMemory(addr, size, buf);
+
+ // Then place any opcodes that fall into this range back into the buffer
+ // before we return this to callers.
+ if (bytes_read > 0)
+ m_breakpoints.RemoveTrapsFromBuffer (addr, bytes_read, buf);
+ return bytes_read;
+}
+
+//----------------------------------------------------------------------
+// WriteMemory from the MachProcess level will always write memory around
+// any software breakpoints. Any software breakpoints will have their
+// opcodes modified if they are enabled. Any memory that doesn't overlap
+// with software breakpoints will be written to. If you wish to write to
+// inferior memory without this interference, then write to the MachTask
+// (m_task.WriteMemory()) as that version will always modify inferior
+// memory.
+//----------------------------------------------------------------------
+nub_size_t
+MachProcess::WriteMemory (nub_addr_t addr, nub_size_t size, const void *buf)
+{
+ // We need to write any data that would go where any current software traps
+ // (enabled software breakpoints) any software traps (breakpoints) that we
+ // may have placed in our tasks memory.
+
+ std::vector<DNBBreakpoint *> bps;
+
+ const size_t num_bps = m_breakpoints.FindBreakpointsThatOverlapRange(addr, size, bps);
+ if (num_bps == 0)
+ return m_task.WriteMemory(addr, size, buf);
+
+ nub_size_t bytes_written = 0;
+ nub_addr_t intersect_addr;
+ nub_size_t intersect_size;
+ nub_size_t opcode_offset;
+ const uint8_t *ubuf = (const uint8_t *)buf;
+
+ for (size_t i=0; i<num_bps; ++i)
+ {
+ DNBBreakpoint *bp = bps[i];
+
+ const bool intersects = bp->IntersectsRange(addr, size, &intersect_addr, &intersect_size, &opcode_offset);
+ UNUSED_IF_ASSERT_DISABLED(intersects);
+ assert(intersects);
+ assert(addr <= intersect_addr && intersect_addr < addr + size);
+ assert(addr < intersect_addr + intersect_size && intersect_addr + intersect_size <= addr + size);
+ assert(opcode_offset + intersect_size <= bp->ByteSize());
+
+ // Check for bytes before this breakpoint
+ const nub_addr_t curr_addr = addr + bytes_written;
+ if (intersect_addr > curr_addr)
+ {
+ // There are some bytes before this breakpoint that we need to
+ // just write to memory
+ nub_size_t curr_size = intersect_addr - curr_addr;
+ nub_size_t curr_bytes_written = m_task.WriteMemory(curr_addr, curr_size, ubuf + bytes_written);
+ bytes_written += curr_bytes_written;
+ if (curr_bytes_written != curr_size)
+ {
+ // We weren't able to write all of the requested bytes, we
+ // are done looping and will return the number of bytes that
+ // we have written so far.
+ break;
+ }
+ }
+
+ // Now write any bytes that would cover up any software breakpoints
+ // directly into the breakpoint opcode buffer
+ ::memcpy(bp->SavedOpcodeBytes() + opcode_offset, ubuf + bytes_written, intersect_size);
+ bytes_written += intersect_size;
+ }
+
+ // Write any remaining bytes after the last breakpoint if we have any left
+ if (bytes_written < size)
+ bytes_written += m_task.WriteMemory(addr + bytes_written, size - bytes_written, ubuf + bytes_written);
+
+ return bytes_written;
+}
+
+void
+MachProcess::ReplyToAllExceptions ()
+{
+ PTHREAD_MUTEX_LOCKER(locker, m_exception_messages_mutex);
+ if (m_exception_messages.empty() == false)
+ {
+ MachException::Message::iterator pos;
+ MachException::Message::iterator begin = m_exception_messages.begin();
+ MachException::Message::iterator end = m_exception_messages.end();
+ for (pos = begin; pos != end; ++pos)
+ {
+ DNBLogThreadedIf(LOG_EXCEPTIONS, "Replying to exception %u...", (uint32_t)std::distance(begin, pos));
+ int thread_reply_signal = 0;
+
+ nub_thread_t tid = m_thread_list.GetThreadIDByMachPortNumber (pos->state.thread_port);
+ const DNBThreadResumeAction *action = NULL;
+ if (tid != INVALID_NUB_THREAD)
+ {
+ action = m_thread_actions.GetActionForThread (tid, false);
+ }
+
+ if (action)
+ {
+ thread_reply_signal = action->signal;
+ if (thread_reply_signal)
+ m_thread_actions.SetSignalHandledForThread (tid);
+ }
+
+ DNBError err (pos->Reply(this, thread_reply_signal));
+ if (DNBLogCheckLogBit(LOG_EXCEPTIONS))
+ err.LogThreadedIfError("Error replying to exception");
+ }
+
+ // Erase all exception message as we should have used and replied
+ // to them all already.
+ m_exception_messages.clear();
+ }
+}
+void
+MachProcess::PrivateResume ()
+{
+ PTHREAD_MUTEX_LOCKER (locker, m_exception_messages_mutex);
+
+ m_auto_resume_signo = m_sent_interrupt_signo;
+ if (m_auto_resume_signo)
+ DNBLogThreadedIf(LOG_PROCESS, "MachProcess::PrivateResume() - task 0x%x resuming (with unhandled interrupt signal %i)...", m_task.TaskPort(), m_auto_resume_signo);
+ else
+ DNBLogThreadedIf(LOG_PROCESS, "MachProcess::PrivateResume() - task 0x%x resuming...", m_task.TaskPort());
+
+ ReplyToAllExceptions ();
+// bool stepOverBreakInstruction = step;
+
+ // Let the thread prepare to resume and see if any threads want us to
+ // step over a breakpoint instruction (ProcessWillResume will modify
+ // the value of stepOverBreakInstruction).
+ m_thread_list.ProcessWillResume (this, m_thread_actions);
+
+ // Set our state accordingly
+ if (m_thread_actions.NumActionsWithState(eStateStepping))
+ SetState (eStateStepping);
+ else
+ SetState (eStateRunning);
+
+ // Now resume our task.
+ m_task.Resume();
+}
+
+DNBBreakpoint *
+MachProcess::CreateBreakpoint(nub_addr_t addr, nub_size_t length, bool hardware)
+{
+ DNBLogThreadedIf(LOG_BREAKPOINTS, "MachProcess::CreateBreakpoint ( addr = 0x%8.8llx, length = %llu, hardware = %i)", (uint64_t)addr, (uint64_t)length, hardware);
+
+ DNBBreakpoint *bp = m_breakpoints.FindByAddress(addr);
+ if (bp)
+ bp->Retain();
+ else
+ bp = m_breakpoints.Add(addr, length, hardware);
+
+ if (EnableBreakpoint(addr))
+ {
+ DNBLogThreadedIf(LOG_BREAKPOINTS, "MachProcess::CreateBreakpoint ( addr = 0x%8.8llx, length = %llu) => %p", (uint64_t)addr, (uint64_t)length, bp);
+ return bp;
+ }
+ else if (bp->Release() == 0)
+ {
+ m_breakpoints.Remove(addr);
+ }
+ // We failed to enable the breakpoint
+ return NULL;
+}
+
+DNBBreakpoint *
+MachProcess::CreateWatchpoint(nub_addr_t addr, nub_size_t length, uint32_t watch_flags, bool hardware)
+{
+ DNBLogThreadedIf(LOG_WATCHPOINTS, "MachProcess::CreateWatchpoint ( addr = 0x%8.8llx, length = %llu, flags = 0x%8.8x, hardware = %i)", (uint64_t)addr, (uint64_t)length, watch_flags, hardware);
+
+ DNBBreakpoint *wp = m_watchpoints.FindByAddress(addr);
+ // since the Z packets only send an address, we can only have one watchpoint at
+ // an address. If there is already one, we must refuse to create another watchpoint
+ if (wp)
+ return NULL;
+
+ wp = m_watchpoints.Add(addr, length, hardware);
+ wp->SetIsWatchpoint(watch_flags);
+
+ if (EnableWatchpoint(addr))
+ {
+ DNBLogThreadedIf(LOG_WATCHPOINTS, "MachProcess::CreateWatchpoint ( addr = 0x%8.8llx, length = %llu) => %p", (uint64_t)addr, (uint64_t)length, wp);
+ return wp;
+ }
+ else
+ {
+ DNBLogThreadedIf(LOG_WATCHPOINTS, "MachProcess::CreateWatchpoint ( addr = 0x%8.8llx, length = %llu) => FAILED", (uint64_t)addr, (uint64_t)length);
+ m_watchpoints.Remove(addr);
+ }
+ // We failed to enable the watchpoint
+ return NULL;
+}
+
+void
+MachProcess::DisableAllBreakpoints (bool remove)
+{
+ DNBLogThreadedIf(LOG_BREAKPOINTS, "MachProcess::%s (remove = %d )", __FUNCTION__, remove);
+
+ m_breakpoints.DisableAllBreakpoints (this);
+
+ if (remove)
+ m_breakpoints.RemoveDisabled();
+}
+
+void
+MachProcess::DisableAllWatchpoints(bool remove)
+{
+ DNBLogThreadedIf(LOG_WATCHPOINTS, "MachProcess::%s (remove = %d )", __FUNCTION__, remove);
+
+ m_watchpoints.DisableAllWatchpoints(this);
+
+ if (remove)
+ m_watchpoints.RemoveDisabled();
+}
+
+bool
+MachProcess::DisableBreakpoint(nub_addr_t addr, bool remove)
+{
+ DNBBreakpoint *bp = m_breakpoints.FindByAddress(addr);
+ if (bp)
+ {
+ // After "exec" we might end up with a bunch of breakpoints that were disabled
+ // manually, just ignore them
+ if (!bp->IsEnabled())
+ {
+ // Breakpoint might have been disabled by an exec
+ if (remove && bp->Release() == 0)
+ {
+ m_thread_list.NotifyBreakpointChanged(bp);
+ m_breakpoints.Remove(addr);
+ }
+ return true;
+ }
+
+ // We have multiple references to this breakpoint, decrement the ref count
+ // and if it isn't zero, then return true;
+ if (remove && bp->Release() > 0)
+ return true;
+
+ DNBLogThreadedIf(LOG_BREAKPOINTS | LOG_VERBOSE, "MachProcess::DisableBreakpoint ( addr = 0x%8.8llx, remove = %d )", (uint64_t)addr, remove);
+
+ if (bp->IsHardware())
+ {
+ bool hw_disable_result = m_thread_list.DisableHardwareBreakpoint (bp);
+
+ if (hw_disable_result == true)
+ {
+ bp->SetEnabled(false);
+ // Let the thread list know that a breakpoint has been modified
+ if (remove)
+ {
+ m_thread_list.NotifyBreakpointChanged(bp);
+ m_breakpoints.Remove(addr);
+ }
+ DNBLogThreadedIf(LOG_BREAKPOINTS, "MachProcess::DisableBreakpoint ( addr = 0x%8.8llx, remove = %d ) (hardware) => success", (uint64_t)addr, remove);
+ return true;
+ }
+
+ return false;
+ }
+
+ const nub_size_t break_op_size = bp->ByteSize();
+ assert (break_op_size > 0);
+ const uint8_t * const break_op = DNBArchProtocol::GetBreakpointOpcode (bp->ByteSize());
+ if (break_op_size > 0)
+ {
+ // Clear a software breakpoint instruction
+ uint8_t curr_break_op[break_op_size];
+ bool break_op_found = false;
+
+ // Read the breakpoint opcode
+ if (m_task.ReadMemory(addr, break_op_size, curr_break_op) == break_op_size)
+ {
+ bool verify = false;
+ if (bp->IsEnabled())
+ {
+ // Make sure we have the a breakpoint opcode exists at this address
+ if (memcmp(curr_break_op, break_op, break_op_size) == 0)
+ {
+ break_op_found = true;
+ // We found a valid breakpoint opcode at this address, now restore
+ // the saved opcode.
+ if (m_task.WriteMemory(addr, break_op_size, bp->SavedOpcodeBytes()) == break_op_size)
+ {
+ verify = true;
+ }
+ else
+ {
+ DNBLogError("MachProcess::DisableBreakpoint ( addr = 0x%8.8llx, remove = %d ) memory write failed when restoring original opcode", (uint64_t)addr, remove);
+ }
+ }
+ else
+ {
+ DNBLogWarning("MachProcess::DisableBreakpoint ( addr = 0x%8.8llx, remove = %d ) expected a breakpoint opcode but didn't find one.", (uint64_t)addr, remove);
+ // Set verify to true and so we can check if the original opcode has already been restored
+ verify = true;
+ }
+ }
+ else
+ {
+ DNBLogThreadedIf(LOG_BREAKPOINTS | LOG_VERBOSE, "MachProcess::DisableBreakpoint ( addr = 0x%8.8llx, remove = %d ) is not enabled", (uint64_t)addr, remove);
+ // Set verify to true and so we can check if the original opcode is there
+ verify = true;
+ }
+
+ if (verify)
+ {
+ uint8_t verify_opcode[break_op_size];
+ // Verify that our original opcode made it back to the inferior
+ if (m_task.ReadMemory(addr, break_op_size, verify_opcode) == break_op_size)
+ {
+ // compare the memory we just read with the original opcode
+ if (memcmp(bp->SavedOpcodeBytes(), verify_opcode, break_op_size) == 0)
+ {
+ // SUCCESS
+ bp->SetEnabled(false);
+ // Let the thread list know that a breakpoint has been modified
+ if (remove && bp->Release() == 0)
+ {
+ m_thread_list.NotifyBreakpointChanged(bp);
+ m_breakpoints.Remove(addr);
+ }
+ DNBLogThreadedIf(LOG_BREAKPOINTS, "MachProcess::DisableBreakpoint ( addr = 0x%8.8llx, remove = %d ) => success", (uint64_t)addr, remove);
+ return true;
+ }
+ else
+ {
+ if (break_op_found)
+ DNBLogError("MachProcess::DisableBreakpoint ( addr = 0x%8.8llx, remove = %d ) : failed to restore original opcode", (uint64_t)addr, remove);
+ else
+ DNBLogError("MachProcess::DisableBreakpoint ( addr = 0x%8.8llx, remove = %d ) : opcode changed", (uint64_t)addr, remove);
+ }
+ }
+ else
+ {
+ DNBLogWarning("MachProcess::DisableBreakpoint: unable to disable breakpoint 0x%8.8llx", (uint64_t)addr);
+ }
+ }
+ }
+ else
+ {
+ DNBLogWarning("MachProcess::DisableBreakpoint: unable to read memory at 0x%8.8llx", (uint64_t)addr);
+ }
+ }
+ }
+ else
+ {
+ DNBLogError("MachProcess::DisableBreakpoint ( addr = 0x%8.8llx, remove = %d ) invalid breakpoint address", (uint64_t)addr, remove);
+ }
+ return false;
+}
+
+bool
+MachProcess::DisableWatchpoint(nub_addr_t addr, bool remove)
+{
+ DNBLogThreadedIf(LOG_WATCHPOINTS, "MachProcess::%s(addr = 0x%8.8llx, remove = %d)", __FUNCTION__, (uint64_t)addr, remove);
+ DNBBreakpoint *wp = m_watchpoints.FindByAddress(addr);
+ if (wp)
+ {
+ // If we have multiple references to a watchpoint, removing the watchpoint shouldn't clear it
+ if (remove && wp->Release() > 0)
+ return true;
+
+ nub_addr_t addr = wp->Address();
+ DNBLogThreadedIf(LOG_WATCHPOINTS, "MachProcess::DisableWatchpoint ( addr = 0x%8.8llx, remove = %d )", (uint64_t)addr, remove);
+
+ if (wp->IsHardware())
+ {
+ bool hw_disable_result = m_thread_list.DisableHardwareWatchpoint (wp);
+
+ if (hw_disable_result == true)
+ {
+ wp->SetEnabled(false);
+ if (remove)
+ m_watchpoints.Remove(addr);
+ DNBLogThreadedIf(LOG_WATCHPOINTS, "MachProcess::Disablewatchpoint ( addr = 0x%8.8llx, remove = %d ) (hardware) => success", (uint64_t)addr, remove);
+ return true;
+ }
+ }
+
+ // TODO: clear software watchpoints if we implement them
+ }
+ else
+ {
+ DNBLogError("MachProcess::DisableWatchpoint ( addr = 0x%8.8llx, remove = %d ) invalid watchpoint ID", (uint64_t)addr, remove);
+ }
+ return false;
+}
+
+
+uint32_t
+MachProcess::GetNumSupportedHardwareWatchpoints () const
+{
+ return m_thread_list.NumSupportedHardwareWatchpoints();
+}
+
+bool
+MachProcess::EnableBreakpoint(nub_addr_t addr)
+{
+ DNBLogThreadedIf(LOG_BREAKPOINTS, "MachProcess::EnableBreakpoint ( addr = 0x%8.8llx )", (uint64_t)addr);
+ DNBBreakpoint *bp = m_breakpoints.FindByAddress(addr);
+ if (bp)
+ {
+ if (bp->IsEnabled())
+ {
+ DNBLogWarning("MachProcess::EnableBreakpoint ( addr = 0x%8.8llx ): breakpoint already enabled.", (uint64_t)addr);
+ return true;
+ }
+ else
+ {
+ if (bp->HardwarePreferred())
+ {
+ bp->SetHardwareIndex(m_thread_list.EnableHardwareBreakpoint(bp));
+ if (bp->IsHardware())
+ {
+ bp->SetEnabled(true);
+ return true;
+ }
+ }
+
+ const nub_size_t break_op_size = bp->ByteSize();
+ assert (break_op_size != 0);
+ const uint8_t * const break_op = DNBArchProtocol::GetBreakpointOpcode (break_op_size);
+ if (break_op_size > 0)
+ {
+ // Save the original opcode by reading it
+ if (m_task.ReadMemory(addr, break_op_size, bp->SavedOpcodeBytes()) == break_op_size)
+ {
+ // Write a software breakpoint in place of the original opcode
+ if (m_task.WriteMemory(addr, break_op_size, break_op) == break_op_size)
+ {
+ uint8_t verify_break_op[4];
+ if (m_task.ReadMemory(addr, break_op_size, verify_break_op) == break_op_size)
+ {
+ if (memcmp(break_op, verify_break_op, break_op_size) == 0)
+ {
+ bp->SetEnabled(true);
+ // Let the thread list know that a breakpoint has been modified
+ m_thread_list.NotifyBreakpointChanged(bp);
+ DNBLogThreadedIf(LOG_BREAKPOINTS, "MachProcess::EnableBreakpoint ( addr = 0x%8.8llx ) : SUCCESS.", (uint64_t)addr);
+ return true;
+ }
+ else
+ {
+ DNBLogError("MachProcess::EnableBreakpoint ( addr = 0x%8.8llx ): breakpoint opcode verification failed.", (uint64_t)addr);
+ }
+ }
+ else
+ {
+ DNBLogError("MachProcess::EnableBreakpoint ( addr = 0x%8.8llx ): unable to read memory to verify breakpoint opcode.", (uint64_t)addr);
+ }
+ }
+ else
+ {
+ DNBLogError("MachProcess::EnableBreakpoint ( addr = 0x%8.8llx ): unable to write breakpoint opcode to memory.", (uint64_t)addr);
+ }
+ }
+ else
+ {
+ DNBLogError("MachProcess::EnableBreakpoint ( addr = 0x%8.8llx ): unable to read memory at breakpoint address.", (uint64_t)addr);
+ }
+ }
+ else
+ {
+ DNBLogError("MachProcess::EnableBreakpoint ( addr = 0x%8.8llx ) no software breakpoint opcode for current architecture.", (uint64_t)addr);
+ }
+ }
+ }
+ return false;
+}
+
+bool
+MachProcess::EnableWatchpoint(nub_addr_t addr)
+{
+ DNBLogThreadedIf(LOG_WATCHPOINTS, "MachProcess::EnableWatchpoint(addr = 0x%8.8llx)", (uint64_t)addr);
+ DNBBreakpoint *wp = m_watchpoints.FindByAddress(addr);
+ if (wp)
+ {
+ nub_addr_t addr = wp->Address();
+ if (wp->IsEnabled())
+ {
+ DNBLogWarning("MachProcess::EnableWatchpoint(addr = 0x%8.8llx): watchpoint already enabled.", (uint64_t)addr);
+ return true;
+ }
+ else
+ {
+ // Currently only try and set hardware watchpoints.
+ wp->SetHardwareIndex(m_thread_list.EnableHardwareWatchpoint(wp));
+ if (wp->IsHardware())
+ {
+ wp->SetEnabled(true);
+ return true;
+ }
+ // TODO: Add software watchpoints by doing page protection tricks.
+ }
+ }
+ return false;
+}
+
+// Called by the exception thread when an exception has been received from
+// our process. The exception message is completely filled and the exception
+// data has already been copied.
+void
+MachProcess::ExceptionMessageReceived (const MachException::Message& exceptionMessage)
+{
+ PTHREAD_MUTEX_LOCKER (locker, m_exception_messages_mutex);
+
+ if (m_exception_messages.empty())
+ m_task.Suspend();
+
+ DNBLogThreadedIf(LOG_EXCEPTIONS, "MachProcess::ExceptionMessageReceived ( )");
+
+ // Use a locker to automatically unlock our mutex in case of exceptions
+ // Add the exception to our internal exception stack
+ m_exception_messages.push_back(exceptionMessage);
+}
+
+task_t
+MachProcess::ExceptionMessageBundleComplete()
+{
+ // We have a complete bundle of exceptions for our child process.
+ PTHREAD_MUTEX_LOCKER (locker, m_exception_messages_mutex);
+ DNBLogThreadedIf(LOG_EXCEPTIONS, "%s: %llu exception messages.", __PRETTY_FUNCTION__, (uint64_t)m_exception_messages.size());
+ bool auto_resume = false;
+ if (!m_exception_messages.empty())
+ {
+ m_did_exec = false;
+ // First check for any SIGTRAP and make sure we didn't exec
+ const task_t task = m_task.TaskPort();
+ size_t i;
+ if (m_pid != 0)
+ {
+ bool received_interrupt = false;
+ uint32_t num_task_exceptions = 0;
+ for (i=0; i<m_exception_messages.size(); ++i)
+ {
+ if (m_exception_messages[i].state.task_port == task)
+ {
+ ++num_task_exceptions;
+ const int signo = m_exception_messages[i].state.SoftSignal();
+ if (signo == SIGTRAP)
+ {
+ // SIGTRAP could mean that we exec'ed. We need to check the
+ // dyld all_image_infos.infoArray to see if it is NULL and if
+ // so, say that we exec'ed.
+ const nub_addr_t aii_addr = GetDYLDAllImageInfosAddress();
+ if (aii_addr != INVALID_NUB_ADDRESS)
+ {
+ const nub_addr_t info_array_count_addr = aii_addr + 4;
+ uint32_t info_array_count = 0;
+ if (m_task.ReadMemory(info_array_count_addr, 4, &info_array_count) == 4)
+ {
+ if (info_array_count == 0)
+ {
+ m_did_exec = true;
+ // Force the task port to update itself in case the task port changed after exec
+ DNBError err;
+ const task_t old_task = m_task.TaskPort();
+ const task_t new_task = m_task.TaskPortForProcessID (err, true);
+ if (old_task != new_task)
+ DNBLogThreadedIf(LOG_PROCESS, "exec: task changed from 0x%4.4x to 0x%4.4x", old_task, new_task);
+ }
+ }
+ else
+ {
+ DNBLog ("error: failed to read all_image_infos.infoArrayCount from 0x%8.8llx", (uint64_t)info_array_count_addr);
+ }
+ }
+ break;
+ }
+ else if (m_sent_interrupt_signo != 0 && signo == m_sent_interrupt_signo)
+ {
+ received_interrupt = true;
+ }
+ }
+ }
+
+ if (m_did_exec)
+ {
+ cpu_type_t process_cpu_type = MachProcess::GetCPUTypeForLocalProcess (m_pid);
+ if (m_cpu_type != process_cpu_type)
+ {
+ DNBLog ("arch changed from 0x%8.8x to 0x%8.8x", m_cpu_type, process_cpu_type);
+ m_cpu_type = process_cpu_type;
+ DNBArchProtocol::SetArchitecture (process_cpu_type);
+ }
+ m_thread_list.Clear();
+ m_activities.Clear();
+ m_breakpoints.DisableAll();
+ }
+
+ if (m_sent_interrupt_signo != 0)
+ {
+ if (received_interrupt)
+ {
+ DNBLogThreadedIf(LOG_PROCESS, "MachProcess::ExceptionMessageBundleComplete(): process successfully interrupted with signal %i", m_sent_interrupt_signo);
+
+ // Mark that we received the interrupt signal
+ m_sent_interrupt_signo = 0;
+ // Not check if we had a case where:
+ // 1 - We called MachProcess::Interrupt() but we stopped for another reason
+ // 2 - We called MachProcess::Resume() (but still haven't gotten the interrupt signal)
+ // 3 - We are now incorrectly stopped because we are handling the interrupt signal we missed
+ // 4 - We might need to resume if we stopped only with the interrupt signal that we never handled
+ if (m_auto_resume_signo != 0)
+ {
+ // Only auto_resume if we stopped with _only_ the interrupt signal
+ if (num_task_exceptions == 1)
+ {
+ auto_resume = true;
+ DNBLogThreadedIf(LOG_PROCESS, "MachProcess::ExceptionMessageBundleComplete(): auto resuming due to unhandled interrupt signal %i", m_auto_resume_signo);
+ }
+ m_auto_resume_signo = 0;
+ }
+ }
+ else
+ {
+ DNBLogThreadedIf(LOG_PROCESS, "MachProcess::ExceptionMessageBundleComplete(): didn't get signal %i after MachProcess::Interrupt()",
+ m_sent_interrupt_signo);
+ }
+ }
+ }
+
+ // Let all threads recover from stopping and do any clean up based
+ // on the previous thread state (if any).
+ m_thread_list.ProcessDidStop(this);
+ m_activities.Clear();
+
+ // Let each thread know of any exceptions
+ for (i=0; i<m_exception_messages.size(); ++i)
+ {
+ // Let the thread list figure use the MachProcess to forward all exceptions
+ // on down to each thread.
+ if (m_exception_messages[i].state.task_port == task)
+ m_thread_list.NotifyException(m_exception_messages[i].state);
+ if (DNBLogCheckLogBit(LOG_EXCEPTIONS))
+ m_exception_messages[i].Dump();
+ }
+
+ if (DNBLogCheckLogBit(LOG_THREAD))
+ m_thread_list.Dump();
+
+ bool step_more = false;
+ if (m_thread_list.ShouldStop(step_more) && auto_resume == false)
+ {
+ // Wait for the eEventProcessRunningStateChanged event to be reset
+ // before changing state to stopped to avoid race condition with
+ // very fast start/stops
+ struct timespec timeout;
+ //DNBTimer::OffsetTimeOfDay(&timeout, 0, 250 * 1000); // Wait for 250 ms
+ DNBTimer::OffsetTimeOfDay(&timeout, 1, 0); // Wait for 250 ms
+ m_events.WaitForEventsToReset(eEventProcessRunningStateChanged, &timeout);
+ SetState(eStateStopped);
+ }
+ else
+ {
+ // Resume without checking our current state.
+ PrivateResume ();
+ }
+ }
+ else
+ {
+ DNBLogThreadedIf(LOG_EXCEPTIONS, "%s empty exception messages bundle (%llu exceptions).", __PRETTY_FUNCTION__, (uint64_t)m_exception_messages.size());
+ }
+ return m_task.TaskPort();
+}
+
+nub_size_t
+MachProcess::CopyImageInfos ( struct DNBExecutableImageInfo **image_infos, bool only_changed)
+{
+ if (m_image_infos_callback != NULL)
+ return m_image_infos_callback(ProcessID(), image_infos, only_changed, m_image_infos_baton);
+ return 0;
+}
+
+void
+MachProcess::SharedLibrariesUpdated ( )
+{
+ uint32_t event_bits = eEventSharedLibsStateChange;
+ // Set the shared library event bit to let clients know of shared library
+ // changes
+ m_events.SetEvents(event_bits);
+ // Wait for the event bit to reset if a reset ACK is requested
+ m_events.WaitForResetAck(event_bits);
+}
+
+void
+MachProcess::SetExitInfo (const char *info)
+{
+ if (info && info[0])
+ {
+ DNBLogThreadedIf(LOG_PROCESS, "MachProcess::%s(\"%s\")", __FUNCTION__, info);
+ m_exit_info.assign(info);
+ }
+ else
+ {
+ DNBLogThreadedIf(LOG_PROCESS, "MachProcess::%s(NULL)", __FUNCTION__);
+ m_exit_info.clear();
+ }
+}
+
+void
+MachProcess::AppendSTDOUT (char* s, size_t len)
+{
+ DNBLogThreadedIf(LOG_PROCESS, "MachProcess::%s (<%llu> %s) ...", __FUNCTION__, (uint64_t)len, s);
+ PTHREAD_MUTEX_LOCKER (locker, m_stdio_mutex);
+ m_stdout_data.append(s, len);
+ m_events.SetEvents(eEventStdioAvailable);
+
+ // Wait for the event bit to reset if a reset ACK is requested
+ m_events.WaitForResetAck(eEventStdioAvailable);
+}
+
+size_t
+MachProcess::GetAvailableSTDOUT (char *buf, size_t buf_size)
+{
+ DNBLogThreadedIf(LOG_PROCESS, "MachProcess::%s (&%p[%llu]) ...", __FUNCTION__, buf, (uint64_t)buf_size);
+ PTHREAD_MUTEX_LOCKER (locker, m_stdio_mutex);
+ size_t bytes_available = m_stdout_data.size();
+ if (bytes_available > 0)
+ {
+ if (bytes_available > buf_size)
+ {
+ memcpy(buf, m_stdout_data.data(), buf_size);
+ m_stdout_data.erase(0, buf_size);
+ bytes_available = buf_size;
+ }
+ else
+ {
+ memcpy(buf, m_stdout_data.data(), bytes_available);
+ m_stdout_data.clear();
+ }
+ }
+ return bytes_available;
+}
+
+nub_addr_t
+MachProcess::GetDYLDAllImageInfosAddress ()
+{
+ DNBError err;
+ return m_task.GetDYLDAllImageInfosAddress(err);
+}
+
+size_t
+MachProcess::GetAvailableSTDERR (char *buf, size_t buf_size)
+{
+ return 0;
+}
+
+void *
+MachProcess::STDIOThread(void *arg)
+{
+ MachProcess *proc = (MachProcess*) arg;
+ DNBLogThreadedIf(LOG_PROCESS, "MachProcess::%s ( arg = %p ) thread starting...", __FUNCTION__, arg);
+
+#if defined (__APPLE__)
+ pthread_setname_np ("stdio monitoring thread");
+#endif
+
+ // We start use a base and more options so we can control if we
+ // are currently using a timeout on the mach_msg. We do this to get a
+ // bunch of related exceptions on our exception port so we can process
+ // then together. When we have multiple threads, we can get an exception
+ // per thread and they will come in consecutively. The main thread loop
+ // will start by calling mach_msg to without having the MACH_RCV_TIMEOUT
+ // flag set in the options, so we will wait forever for an exception on
+ // our exception port. After we get one exception, we then will use the
+ // MACH_RCV_TIMEOUT option with a zero timeout to grab all other current
+ // exceptions for our process. After we have received the last pending
+ // exception, we will get a timeout which enables us to then notify
+ // our main thread that we have an exception bundle available. We then wait
+ // for the main thread to tell this exception thread to start trying to get
+ // exceptions messages again and we start again with a mach_msg read with
+ // infinite timeout.
+ DNBError err;
+ int stdout_fd = proc->GetStdoutFileDescriptor();
+ int stderr_fd = proc->GetStderrFileDescriptor();
+ if (stdout_fd == stderr_fd)
+ stderr_fd = -1;
+
+ while (stdout_fd >= 0 || stderr_fd >= 0)
+ {
+ ::pthread_testcancel ();
+
+ fd_set read_fds;
+ FD_ZERO (&read_fds);
+ if (stdout_fd >= 0)
+ FD_SET (stdout_fd, &read_fds);
+ if (stderr_fd >= 0)
+ FD_SET (stderr_fd, &read_fds);
+ int nfds = std::max<int>(stdout_fd, stderr_fd) + 1;
+
+ int num_set_fds = select (nfds, &read_fds, NULL, NULL, NULL);
+ DNBLogThreadedIf(LOG_PROCESS, "select (nfds, &read_fds, NULL, NULL, NULL) => %d", num_set_fds);
+
+ if (num_set_fds < 0)
+ {
+ int select_errno = errno;
+ if (DNBLogCheckLogBit(LOG_PROCESS))
+ {
+ err.SetError (select_errno, DNBError::POSIX);
+ err.LogThreadedIfError("select (nfds, &read_fds, NULL, NULL, NULL) => %d", num_set_fds);
+ }
+
+ switch (select_errno)
+ {
+ case EAGAIN: // The kernel was (perhaps temporarily) unable to allocate the requested number of file descriptors, or we have non-blocking IO
+ break;
+ case EBADF: // One of the descriptor sets specified an invalid descriptor.
+ return NULL;
+ break;
+ case EINTR: // A signal was delivered before the time limit expired and before any of the selected events occurred.
+ case EINVAL: // The specified time limit is invalid. One of its components is negative or too large.
+ default: // Other unknown error
+ break;
+ }
+ }
+ else if (num_set_fds == 0)
+ {
+ }
+ else
+ {
+ char s[1024];
+ s[sizeof(s)-1] = '\0'; // Ensure we have NULL termination
+ ssize_t bytes_read = 0;
+ if (stdout_fd >= 0 && FD_ISSET (stdout_fd, &read_fds))
+ {
+ do
+ {
+ bytes_read = ::read (stdout_fd, s, sizeof(s)-1);
+ if (bytes_read < 0)
+ {
+ int read_errno = errno;
+ DNBLogThreadedIf(LOG_PROCESS, "read (stdout_fd, ) => %zd errno: %d (%s)", bytes_read, read_errno, strerror(read_errno));
+ }
+ else if (bytes_read == 0)
+ {
+ // EOF...
+ DNBLogThreadedIf(LOG_PROCESS, "read (stdout_fd, ) => %zd (reached EOF for child STDOUT)", bytes_read);
+ stdout_fd = -1;
+ }
+ else if (bytes_read > 0)
+ {
+ proc->AppendSTDOUT(s, bytes_read);
+ }
+
+ } while (bytes_read > 0);
+ }
+
+ if (stderr_fd >= 0 && FD_ISSET (stderr_fd, &read_fds))
+ {
+ do
+ {
+ bytes_read = ::read (stderr_fd, s, sizeof(s)-1);
+ if (bytes_read < 0)
+ {
+ int read_errno = errno;
+ DNBLogThreadedIf(LOG_PROCESS, "read (stderr_fd, ) => %zd errno: %d (%s)", bytes_read, read_errno, strerror(read_errno));
+ }
+ else if (bytes_read == 0)
+ {
+ // EOF...
+ DNBLogThreadedIf(LOG_PROCESS, "read (stderr_fd, ) => %zd (reached EOF for child STDERR)", bytes_read);
+ stderr_fd = -1;
+ }
+ else if (bytes_read > 0)
+ {
+ proc->AppendSTDOUT(s, bytes_read);
+ }
+
+ } while (bytes_read > 0);
+ }
+ }
+ }
+ DNBLogThreadedIf(LOG_PROCESS, "MachProcess::%s (%p): thread exiting...", __FUNCTION__, arg);
+ return NULL;
+}
+
+
+void
+MachProcess::SignalAsyncProfileData (const char *info)
+{
+ DNBLogThreadedIf(LOG_PROCESS, "MachProcess::%s (%s) ...", __FUNCTION__, info);
+ PTHREAD_MUTEX_LOCKER (locker, m_profile_data_mutex);
+ m_profile_data.push_back(info);
+ m_events.SetEvents(eEventProfileDataAvailable);
+
+ // Wait for the event bit to reset if a reset ACK is requested
+ m_events.WaitForResetAck(eEventProfileDataAvailable);
+}
+
+
+size_t
+MachProcess::GetAsyncProfileData (char *buf, size_t buf_size)
+{
+ DNBLogThreadedIf(LOG_PROCESS, "MachProcess::%s (&%p[%llu]) ...", __FUNCTION__, buf, (uint64_t)buf_size);
+ PTHREAD_MUTEX_LOCKER (locker, m_profile_data_mutex);
+ if (m_profile_data.empty())
+ return 0;
+
+ size_t bytes_available = m_profile_data.front().size();
+ if (bytes_available > 0)
+ {
+ if (bytes_available > buf_size)
+ {
+ memcpy(buf, m_profile_data.front().data(), buf_size);
+ m_profile_data.front().erase(0, buf_size);
+ bytes_available = buf_size;
+ }
+ else
+ {
+ memcpy(buf, m_profile_data.front().data(), bytes_available);
+ m_profile_data.erase(m_profile_data.begin());
+ }
+ }
+ return bytes_available;
+}
+
+
+void *
+MachProcess::ProfileThread(void *arg)
+{
+ MachProcess *proc = (MachProcess*) arg;
+ DNBLogThreadedIf(LOG_PROCESS, "MachProcess::%s ( arg = %p ) thread starting...", __FUNCTION__, arg);
+
+#if defined (__APPLE__)
+ pthread_setname_np ("performance profiling thread");
+#endif
+
+ while (proc->IsProfilingEnabled())
+ {
+ nub_state_t state = proc->GetState();
+ if (state == eStateRunning)
+ {
+ std::string data = proc->Task().GetProfileData(proc->GetProfileScanType());
+ if (!data.empty())
+ {
+ proc->SignalAsyncProfileData(data.c_str());
+ }
+ }
+ else if ((state == eStateUnloaded) || (state == eStateDetached) || (state == eStateUnloaded))
+ {
+ // Done. Get out of this thread.
+ break;
+ }
+
+ // A simple way to set up the profile interval. We can also use select() or dispatch timer source if necessary.
+ usleep(proc->ProfileInterval());
+ }
+ return NULL;
+}
+
+
+pid_t
+MachProcess::AttachForDebug (pid_t pid, char *err_str, size_t err_len)
+{
+ // Clear out and clean up from any current state
+ Clear();
+ if (pid != 0)
+ {
+ DNBError err;
+ // Make sure the process exists...
+ if (::getpgid (pid) < 0)
+ {
+ err.SetErrorToErrno();
+ const char *err_cstr = err.AsString();
+ ::snprintf (err_str, err_len, "%s", err_cstr ? err_cstr : "No such process");
+ return INVALID_NUB_PROCESS;
+ }
+
+ SetState(eStateAttaching);
+ m_pid = pid;
+ // Let ourselves know we are going to be using SBS or BKS if the correct flag bit is set...
+#if defined (WITH_FBS) || defined (WITH_BKS)
+ bool found_app_flavor = false;
+#endif
+
+#if defined (WITH_FBS)
+ if (!found_app_flavor && IsFBSProcess (pid))
+ {
+ found_app_flavor = true;
+ m_flags |= eMachProcessFlagsUsingFBS;
+ }
+#elif defined (WITH_BKS)
+ if (!found_app_flavor && IsBKSProcess (pid))
+ {
+ found_app_flavor = true;
+ m_flags |= eMachProcessFlagsUsingBKS;
+ }
+#elif defined (WITH_SPRINGBOARD)
+ if (IsSBProcess(pid))
+ m_flags |= eMachProcessFlagsUsingSBS;
+#endif
+ if (!m_task.StartExceptionThread(err))
+ {
+ const char *err_cstr = err.AsString();
+ ::snprintf (err_str, err_len, "%s", err_cstr ? err_cstr : "unable to start the exception thread");
+ DNBLogThreadedIf(LOG_PROCESS, "error: failed to attach to pid %d", pid);
+ m_pid = INVALID_NUB_PROCESS;
+ return INVALID_NUB_PROCESS;
+ }
+
+ errno = 0;
+ if (::ptrace (PT_ATTACHEXC, pid, 0, 0))
+ err.SetError(errno);
+ else
+ err.Clear();
+
+ if (err.Success())
+ {
+ m_flags |= eMachProcessFlagsAttached;
+ // Sleep a bit to let the exception get received and set our process status
+ // to stopped.
+ ::usleep(250000);
+ DNBLogThreadedIf(LOG_PROCESS, "successfully attached to pid %d", pid);
+ return m_pid;
+ }
+ else
+ {
+ ::snprintf (err_str, err_len, "%s", err.AsString());
+ DNBLogThreadedIf(LOG_PROCESS, "error: failed to attach to pid %d", pid);
+ }
+ }
+ return INVALID_NUB_PROCESS;
+}
+
+Genealogy::ThreadActivitySP
+MachProcess::GetGenealogyInfoForThread (nub_thread_t tid, bool &timed_out)
+{
+ return m_activities.GetGenealogyInfoForThread (m_pid, tid, m_thread_list, m_task.TaskPort(), timed_out);
+}
+
+Genealogy::ProcessExecutableInfoSP
+MachProcess::GetGenealogyImageInfo (size_t idx)
+{
+ return m_activities.GetProcessExecutableInfosAtIndex (idx);
+}
+
+bool
+MachProcess::GetOSVersionNumbers (uint64_t *major, uint64_t *minor, uint64_t *patch)
+{
+ bool success = false;
+
+#if (__ENVIRONMENT_MAC_OS_X_VERSION_MIN_REQUIRED__ >= 101000)
+ NSAutoreleasePool *pool = [[NSAutoreleasePool alloc] init];
+
+ NSOperatingSystemVersion vers = [[NSProcessInfo processInfo] operatingSystemVersion];
+ if (major)
+ *major = vers.majorVersion;
+ if (minor)
+ *minor = vers.minorVersion;
+ if (patch)
+ *patch = vers.patchVersion;
+
+ success = true;
+
+ [pool drain];
+#endif
+
+ return success;
+}
+
+// Do the process specific setup for attach. If this returns NULL, then there's no
+// platform specific stuff to be done to wait for the attach. If you get non-null,
+// pass that token to the CheckForProcess method, and then to CleanupAfterAttach.
+
+// Call PrepareForAttach before attaching to a process that has not yet launched
+// This returns a token that can be passed to CheckForProcess, and to CleanupAfterAttach.
+// You should call CleanupAfterAttach to free the token, and do whatever other
+// cleanup seems good.
+
+const void *
+MachProcess::PrepareForAttach (const char *path, nub_launch_flavor_t launch_flavor, bool waitfor, DNBError &attach_err)
+{
+#if defined (WITH_SPRINGBOARD) || defined (WITH_BKS) || defined (WITH_FBS)
+ // Tell SpringBoard to halt the next launch of this application on startup.
+
+ if (!waitfor)
+ return NULL;
+
+ const char *app_ext = strstr(path, ".app");
+ const bool is_app = app_ext != NULL && (app_ext[4] == '\0' || app_ext[4] == '/');
+ if (!is_app)
+ {
+ DNBLogThreadedIf(LOG_PROCESS, "MachProcess::PrepareForAttach(): path '%s' doesn't contain .app, "
+ "we can't tell springboard to wait for launch...",
+ path);
+ return NULL;
+ }
+
+#if defined (WITH_FBS)
+ if (launch_flavor == eLaunchFlavorDefault)
+ launch_flavor = eLaunchFlavorFBS;
+ if (launch_flavor != eLaunchFlavorFBS)
+ return NULL;
+#elif defined (WITH_BKS)
+ if (launch_flavor == eLaunchFlavorDefault)
+ launch_flavor = eLaunchFlavorBKS;
+ if (launch_flavor != eLaunchFlavorBKS)
+ return NULL;
+#elif defined (WITH_SPRINGBOARD)
+ if (launch_flavor == eLaunchFlavorDefault)
+ launch_flavor = eLaunchFlavorSpringBoard;
+ if (launch_flavor != eLaunchFlavorSpringBoard)
+ return NULL;
+#endif
+
+ std::string app_bundle_path(path, app_ext + strlen(".app"));
+
+ CFStringRef bundleIDCFStr = CopyBundleIDForPath (app_bundle_path.c_str (), attach_err);
+ std::string bundleIDStr;
+ CFString::UTF8(bundleIDCFStr, bundleIDStr);
+ DNBLogThreadedIf(LOG_PROCESS,
+ "CopyBundleIDForPath (%s, err_str) returned @\"%s\"",
+ app_bundle_path.c_str (),
+ bundleIDStr.c_str());
+
+ if (bundleIDCFStr == NULL)
+ {
+ return NULL;
+ }
+
+#if defined (WITH_FBS)
+ if (launch_flavor == eLaunchFlavorFBS)
+ {
+ NSAutoreleasePool *pool = [[NSAutoreleasePool alloc] init];
+
+ NSString *stdio_path = nil;
+ NSFileManager *file_manager = [NSFileManager defaultManager];
+ const char *null_path = "/dev/null";
+ stdio_path = [file_manager stringWithFileSystemRepresentation: null_path length: strlen(null_path)];
+
+ NSMutableDictionary *debug_options = [NSMutableDictionary dictionary];
+ NSMutableDictionary *options = [NSMutableDictionary dictionary];
+
+ DNBLogThreadedIf(LOG_PROCESS, "Calling BKSSystemService openApplication: @\"%s\",options include stdio path: \"%s\", "
+ "BKSDebugOptionKeyDebugOnNextLaunch & BKSDebugOptionKeyWaitForDebugger )",
+ bundleIDStr.c_str(),
+ null_path);
+
+ [debug_options setObject: stdio_path forKey: FBSDebugOptionKeyStandardOutPath];
+ [debug_options setObject: stdio_path forKey: FBSDebugOptionKeyStandardErrorPath];
+ [debug_options setObject: [NSNumber numberWithBool: YES] forKey: FBSDebugOptionKeyWaitForDebugger];
+ [debug_options setObject: [NSNumber numberWithBool: YES] forKey: FBSDebugOptionKeyDebugOnNextLaunch];
+
+ [options setObject: debug_options forKey: FBSOpenApplicationOptionKeyDebuggingOptions];
+
+ FBSSystemService *system_service = [[FBSSystemService alloc] init];
+
+ mach_port_t client_port = [system_service createClientPort];
+ __block dispatch_semaphore_t semaphore = dispatch_semaphore_create(0);
+ __block FBSOpenApplicationErrorCode attach_error_code = FBSOpenApplicationErrorCodeNone;
+
+ NSString *bundleIDNSStr = (NSString *) bundleIDCFStr;
+
+ [system_service openApplication: bundleIDNSStr
+ options: options
+ clientPort: client_port
+ withResult: ^(NSError *error)
+ {
+ // The system service will cleanup the client port we created for us.
+ if (error)
+ attach_error_code = (FBSOpenApplicationErrorCode)[error code];
+
+ [system_service release];
+ dispatch_semaphore_signal(semaphore);
+ }
+ ];
+
+ const uint32_t timeout_secs = 9;
+
+ dispatch_time_t timeout = dispatch_time(DISPATCH_TIME_NOW, timeout_secs * NSEC_PER_SEC);
+
+ long success = dispatch_semaphore_wait(semaphore, timeout) == 0;
+
+ if (!success)
+ {
+ DNBLogError("timed out trying to launch %s.", bundleIDStr.c_str());
+ attach_err.SetErrorString("debugserver timed out waiting for openApplication to complete.");
+ attach_err.SetError (OPEN_APPLICATION_TIMEOUT_ERROR, DNBError::Generic);
+ }
+ else if (attach_error_code != FBSOpenApplicationErrorCodeNone)
+ {
+ SetFBSError (attach_error_code, attach_err);
+ DNBLogError("unable to launch the application with CFBundleIdentifier '%s' bks_error = %ld",
+ bundleIDStr.c_str(),
+ (NSInteger) attach_error_code);
+ }
+ dispatch_release(semaphore);
+ [pool drain];
+ }
+#endif
+#if defined (WITH_BKS)
+ if (launch_flavor == eLaunchFlavorBKS)
+ {
+ NSAutoreleasePool *pool = [[NSAutoreleasePool alloc] init];
+
+ NSString *stdio_path = nil;
+ NSFileManager *file_manager = [NSFileManager defaultManager];
+ const char *null_path = "/dev/null";
+ stdio_path = [file_manager stringWithFileSystemRepresentation: null_path length: strlen(null_path)];
+
+ NSMutableDictionary *debug_options = [NSMutableDictionary dictionary];
+ NSMutableDictionary *options = [NSMutableDictionary dictionary];
+
+ DNBLogThreadedIf(LOG_PROCESS, "Calling BKSSystemService openApplication: @\"%s\",options include stdio path: \"%s\", "
+ "BKSDebugOptionKeyDebugOnNextLaunch & BKSDebugOptionKeyWaitForDebugger )",
+ bundleIDStr.c_str(),
+ null_path);
+
+ [debug_options setObject: stdio_path forKey: BKSDebugOptionKeyStandardOutPath];
+ [debug_options setObject: stdio_path forKey: BKSDebugOptionKeyStandardErrorPath];
+ [debug_options setObject: [NSNumber numberWithBool: YES] forKey: BKSDebugOptionKeyWaitForDebugger];
+ [debug_options setObject: [NSNumber numberWithBool: YES] forKey: BKSDebugOptionKeyDebugOnNextLaunch];
+
+ [options setObject: debug_options forKey: BKSOpenApplicationOptionKeyDebuggingOptions];
+
+ BKSSystemService *system_service = [[BKSSystemService alloc] init];
+
+ mach_port_t client_port = [system_service createClientPort];
+ __block dispatch_semaphore_t semaphore = dispatch_semaphore_create(0);
+ __block BKSOpenApplicationErrorCode attach_error_code = BKSOpenApplicationErrorCodeNone;
+
+ NSString *bundleIDNSStr = (NSString *) bundleIDCFStr;
+
+ [system_service openApplication: bundleIDNSStr
+ options: options
+ clientPort: client_port
+ withResult: ^(NSError *error)
+ {
+ // The system service will cleanup the client port we created for us.
+ if (error)
+ attach_error_code = (BKSOpenApplicationErrorCode)[error code];
+
+ [system_service release];
+ dispatch_semaphore_signal(semaphore);
+ }
+ ];
+
+ const uint32_t timeout_secs = 9;
+
+ dispatch_time_t timeout = dispatch_time(DISPATCH_TIME_NOW, timeout_secs * NSEC_PER_SEC);
+
+ long success = dispatch_semaphore_wait(semaphore, timeout) == 0;
+
+ if (!success)
+ {
+ DNBLogError("timed out trying to launch %s.", bundleIDStr.c_str());
+ attach_err.SetErrorString("debugserver timed out waiting for openApplication to complete.");
+ attach_err.SetError (OPEN_APPLICATION_TIMEOUT_ERROR, DNBError::Generic);
+ }
+ else if (attach_error_code != BKSOpenApplicationErrorCodeNone)
+ {
+ SetBKSError (attach_error_code, attach_err);
+ DNBLogError("unable to launch the application with CFBundleIdentifier '%s' bks_error = %ld",
+ bundleIDStr.c_str(),
+ attach_error_code);
+ }
+ dispatch_release(semaphore);
+ [pool drain];
+ }
+#endif
+
+#if defined (WITH_SPRINGBOARD)
+ if (launch_flavor == eLaunchFlavorSpringBoard)
+ {
+ SBSApplicationLaunchError sbs_error = 0;
+
+ const char *stdout_err = "/dev/null";
+ CFString stdio_path;
+ stdio_path.SetFileSystemRepresentation (stdout_err);
+
+ DNBLogThreadedIf(LOG_PROCESS, "SBSLaunchApplicationForDebugging ( @\"%s\" , NULL, NULL, NULL, @\"%s\", @\"%s\", "
+ "SBSApplicationDebugOnNextLaunch | SBSApplicationLaunchWaitForDebugger )",
+ bundleIDStr.c_str(),
+ stdout_err,
+ stdout_err);
+
+ sbs_error = SBSLaunchApplicationForDebugging (bundleIDCFStr,
+ (CFURLRef)NULL, // openURL
+ NULL, // launch_argv.get(),
+ NULL, // launch_envp.get(), // CFDictionaryRef environment
+ stdio_path.get(),
+ stdio_path.get(),
+ SBSApplicationDebugOnNextLaunch | SBSApplicationLaunchWaitForDebugger);
+
+ if (sbs_error != SBSApplicationLaunchErrorSuccess)
+ {
+ attach_err.SetError(sbs_error, DNBError::SpringBoard);
+ return NULL;
+ }
+ }
+#endif // WITH_SPRINGBOARD
+
+ DNBLogThreadedIf(LOG_PROCESS, "Successfully set DebugOnNextLaunch.");
+ return bundleIDCFStr;
+# else // !(defined (WITH_SPRINGBOARD) || defined (WITH_BKS) || defined (WITH_FBS))
+ return NULL;
+#endif
+}
+
+// Pass in the token you got from PrepareForAttach. If there is a process
+// for that token, then the pid will be returned, otherwise INVALID_NUB_PROCESS
+// will be returned.
+
+nub_process_t
+MachProcess::CheckForProcess (const void *attach_token, nub_launch_flavor_t launch_flavor)
+{
+ if (attach_token == NULL)
+ return INVALID_NUB_PROCESS;
+
+#if defined (WITH_FBS)
+ if (launch_flavor == eLaunchFlavorFBS)
+ {
+ NSString *bundleIDNSStr = (NSString *) attach_token;
+ FBSSystemService *systemService = [[FBSSystemService alloc] init];
+ pid_t pid = [systemService pidForApplication: bundleIDNSStr];
+ [systemService release];
+ if (pid == 0)
+ return INVALID_NUB_PROCESS;
+ else
+ return pid;
+ }
+#endif
+
+#if defined (WITH_BKS)
+ if (launch_flavor == eLaunchFlavorBKS)
+ {
+ NSString *bundleIDNSStr = (NSString *) attach_token;
+ BKSSystemService *systemService = [[BKSSystemService alloc] init];
+ pid_t pid = [systemService pidForApplication: bundleIDNSStr];
+ [systemService release];
+ if (pid == 0)
+ return INVALID_NUB_PROCESS;
+ else
+ return pid;
+ }
+#endif
+
+#if defined (WITH_SPRINGBOARD)
+ if (launch_flavor == eLaunchFlavorSpringBoard)
+ {
+ CFStringRef bundleIDCFStr = (CFStringRef) attach_token;
+ Boolean got_it;
+ nub_process_t attach_pid;
+ got_it = SBSProcessIDForDisplayIdentifier(bundleIDCFStr, &attach_pid);
+ if (got_it)
+ return attach_pid;
+ else
+ return INVALID_NUB_PROCESS;
+ }
+#endif
+ return INVALID_NUB_PROCESS;
+}
+
+// Call this to clean up after you have either attached or given up on the attach.
+// Pass true for success if you have attached, false if you have not.
+// The token will also be freed at this point, so you can't use it after calling
+// this method.
+
+void
+MachProcess::CleanupAfterAttach (const void *attach_token, nub_launch_flavor_t launch_flavor, bool success, DNBError &err_str)
+{
+ if (attach_token == NULL)
+ return;
+
+#if defined (WITH_FBS)
+ if (launch_flavor == eLaunchFlavorFBS)
+ {
+ if (!success)
+ {
+ FBSCleanupAfterAttach (attach_token, err_str);
+ }
+ CFRelease((CFStringRef) attach_token);
+ }
+#endif
+
+#if defined (WITH_BKS)
+
+ if (launch_flavor == eLaunchFlavorBKS)
+ {
+ if (!success)
+ {
+ BKSCleanupAfterAttach (attach_token, err_str);
+ }
+ CFRelease((CFStringRef) attach_token);
+ }
+#endif
+
+#if defined (WITH_SPRINGBOARD)
+ // Tell SpringBoard to cancel the debug on next launch of this application
+ // if we failed to attach
+ if (launch_flavor == eMachProcessFlagsUsingSpringBoard)
+ {
+ if (!success)
+ {
+ SBSApplicationLaunchError sbs_error = 0;
+ CFStringRef bundleIDCFStr = (CFStringRef) attach_token;
+
+ sbs_error = SBSLaunchApplicationForDebugging (bundleIDCFStr,
+ (CFURLRef)NULL,
+ NULL,
+ NULL,
+ NULL,
+ NULL,
+ SBSApplicationCancelDebugOnNextLaunch);
+
+ if (sbs_error != SBSApplicationLaunchErrorSuccess)
+ {
+ err_str.SetError(sbs_error, DNBError::SpringBoard);
+ return;
+ }
+ }
+
+ CFRelease((CFStringRef) attach_token);
+ }
+#endif
+}
+
+pid_t
+MachProcess::LaunchForDebug
+(
+ const char *path,
+ char const *argv[],
+ char const *envp[],
+ const char *working_directory, // NULL => don't change, non-NULL => set working directory for inferior to this
+ const char *stdin_path,
+ const char *stdout_path,
+ const char *stderr_path,
+ bool no_stdio,
+ nub_launch_flavor_t launch_flavor,
+ int disable_aslr,
+ const char *event_data,
+ DNBError &launch_err
+)
+{
+ // Clear out and clean up from any current state
+ Clear();
+
+ DNBLogThreadedIf(LOG_PROCESS, "%s( path = '%s', argv = %p, envp = %p, launch_flavor = %u, disable_aslr = %d )", __FUNCTION__, path, argv, envp, launch_flavor, disable_aslr);
+
+ // Fork a child process for debugging
+ SetState(eStateLaunching);
+
+ switch (launch_flavor)
+ {
+ case eLaunchFlavorForkExec:
+ m_pid = MachProcess::ForkChildForPTraceDebugging (path, argv, envp, this, launch_err);
+ break;
+#ifdef WITH_FBS
+ case eLaunchFlavorFBS:
+ {
+ const char *app_ext = strstr(path, ".app");
+ if (app_ext && (app_ext[4] == '\0' || app_ext[4] == '/'))
+ {
+ std::string app_bundle_path(path, app_ext + strlen(".app"));
+ m_flags |= eMachProcessFlagsUsingFBS;
+ if (BoardServiceLaunchForDebug (app_bundle_path.c_str(), argv, envp, no_stdio, disable_aslr, event_data, launch_err) != 0)
+ return m_pid; // A successful SBLaunchForDebug() returns and assigns a non-zero m_pid.
+ else
+ break; // We tried a FBS launch, but didn't succeed lets get out
+ }
+ }
+ break;
+#endif
+#ifdef WITH_BKS
+ case eLaunchFlavorBKS:
+ {
+ const char *app_ext = strstr(path, ".app");
+ if (app_ext && (app_ext[4] == '\0' || app_ext[4] == '/'))
+ {
+ std::string app_bundle_path(path, app_ext + strlen(".app"));
+ m_flags |= eMachProcessFlagsUsingBKS;
+ if (BoardServiceLaunchForDebug (app_bundle_path.c_str(), argv, envp, no_stdio, disable_aslr, event_data, launch_err) != 0)
+ return m_pid; // A successful SBLaunchForDebug() returns and assigns a non-zero m_pid.
+ else
+ break; // We tried a BKS launch, but didn't succeed lets get out
+ }
+ }
+ break;
+#endif
+#ifdef WITH_SPRINGBOARD
+
+ case eLaunchFlavorSpringBoard:
+ {
+ // .../whatever.app/whatever ?
+ // Or .../com.apple.whatever.app/whatever -- be careful of ".app" in "com.apple.whatever" here
+ const char *app_ext = strstr (path, ".app/");
+ if (app_ext == NULL)
+ {
+ // .../whatever.app ?
+ int len = strlen (path);
+ if (len > 5)
+ {
+ if (strcmp (path + len - 4, ".app") == 0)
+ {
+ app_ext = path + len - 4;
+ }
+ }
+ }
+ if (app_ext)
+ {
+ std::string app_bundle_path(path, app_ext + strlen(".app"));
+ if (SBLaunchForDebug (app_bundle_path.c_str(), argv, envp, no_stdio, disable_aslr, launch_err) != 0)
+ return m_pid; // A successful SBLaunchForDebug() returns and assigns a non-zero m_pid.
+ else
+ break; // We tried a springboard launch, but didn't succeed lets get out
+ }
+ }
+ break;
+
+#endif
+
+ case eLaunchFlavorPosixSpawn:
+ m_pid = MachProcess::PosixSpawnChildForPTraceDebugging (path,
+ DNBArchProtocol::GetArchitecture (),
+ argv,
+ envp,
+ working_directory,
+ stdin_path,
+ stdout_path,
+ stderr_path,
+ no_stdio,
+ this,
+ disable_aslr,
+ launch_err);
+ break;
+
+ default:
+ // Invalid launch
+ launch_err.SetError(NUB_GENERIC_ERROR, DNBError::Generic);
+ return INVALID_NUB_PROCESS;
+ }
+
+ if (m_pid == INVALID_NUB_PROCESS)
+ {
+ // If we don't have a valid process ID and no one has set the error,
+ // then return a generic error
+ if (launch_err.Success())
+ launch_err.SetError(NUB_GENERIC_ERROR, DNBError::Generic);
+ }
+ else
+ {
+ m_path = path;
+ size_t i;
+ char const *arg;
+ for (i=0; (arg = argv[i]) != NULL; i++)
+ m_args.push_back(arg);
+
+ m_task.StartExceptionThread(launch_err);
+ if (launch_err.Fail())
+ {
+ if (launch_err.AsString() == NULL)
+ launch_err.SetErrorString("unable to start the exception thread");
+ DNBLog ("Could not get inferior's Mach exception port, sending ptrace PT_KILL and exiting.");
+ ::ptrace (PT_KILL, m_pid, 0, 0);
+ m_pid = INVALID_NUB_PROCESS;
+ return INVALID_NUB_PROCESS;
+ }
+
+ StartSTDIOThread();
+
+ if (launch_flavor == eLaunchFlavorPosixSpawn)
+ {
+
+ SetState (eStateAttaching);
+ errno = 0;
+ int err = ::ptrace (PT_ATTACHEXC, m_pid, 0, 0);
+ if (err == 0)
+ {
+ m_flags |= eMachProcessFlagsAttached;
+ DNBLogThreadedIf(LOG_PROCESS, "successfully spawned pid %d", m_pid);
+ launch_err.Clear();
+ }
+ else
+ {
+ SetState (eStateExited);
+ DNBError ptrace_err(errno, DNBError::POSIX);
+ DNBLogThreadedIf(LOG_PROCESS, "error: failed to attach to spawned pid %d (err = %i, errno = %i (%s))", m_pid, err, ptrace_err.Error(), ptrace_err.AsString());
+ launch_err.SetError(NUB_GENERIC_ERROR, DNBError::Generic);
+ }
+ }
+ else
+ {
+ launch_err.Clear();
+ }
+ }
+ return m_pid;
+}
+
+pid_t
+MachProcess::PosixSpawnChildForPTraceDebugging
+(
+ const char *path,
+ cpu_type_t cpu_type,
+ char const *argv[],
+ char const *envp[],
+ const char *working_directory,
+ const char *stdin_path,
+ const char *stdout_path,
+ const char *stderr_path,
+ bool no_stdio,
+ MachProcess* process,
+ int disable_aslr,
+ DNBError& err
+)
+{
+ posix_spawnattr_t attr;
+ short flags;
+ DNBLogThreadedIf(LOG_PROCESS, "%s ( path='%s', argv=%p, envp=%p, working_dir=%s, stdin=%s, stdout=%s stderr=%s, no-stdio=%i)",
+ __FUNCTION__,
+ path,
+ argv,
+ envp,
+ working_directory,
+ stdin_path,
+ stdout_path,
+ stderr_path,
+ no_stdio);
+
+ err.SetError( ::posix_spawnattr_init (&attr), DNBError::POSIX);
+ if (err.Fail() || DNBLogCheckLogBit(LOG_PROCESS))
+ err.LogThreaded("::posix_spawnattr_init ( &attr )");
+ if (err.Fail())
+ return INVALID_NUB_PROCESS;
+
+ flags = POSIX_SPAWN_START_SUSPENDED | POSIX_SPAWN_SETSIGDEF | POSIX_SPAWN_SETSIGMASK;
+ if (disable_aslr)
+ flags |= _POSIX_SPAWN_DISABLE_ASLR;
+
+ sigset_t no_signals;
+ sigset_t all_signals;
+ sigemptyset (&no_signals);
+ sigfillset (&all_signals);
+ ::posix_spawnattr_setsigmask(&attr, &no_signals);
+ ::posix_spawnattr_setsigdefault(&attr, &all_signals);
+
+ err.SetError( ::posix_spawnattr_setflags (&attr, flags), DNBError::POSIX);
+ if (err.Fail() || DNBLogCheckLogBit(LOG_PROCESS))
+ err.LogThreaded("::posix_spawnattr_setflags ( &attr, POSIX_SPAWN_START_SUSPENDED%s )", flags & _POSIX_SPAWN_DISABLE_ASLR ? " | _POSIX_SPAWN_DISABLE_ASLR" : "");
+ if (err.Fail())
+ return INVALID_NUB_PROCESS;
+
+ // Don't do this on SnowLeopard, _sometimes_ the TASK_BASIC_INFO will fail
+ // and we will fail to continue with our process...
+
+ // On SnowLeopard we should set "DYLD_NO_PIE" in the inferior environment....
+
+#if !defined(__arm__)
+
+ // We don't need to do this for ARM, and we really shouldn't now that we
+ // have multiple CPU subtypes and no posix_spawnattr call that allows us
+ // to set which CPU subtype to launch...
+ if (cpu_type != 0)
+ {
+ size_t ocount = 0;
+ err.SetError( ::posix_spawnattr_setbinpref_np (&attr, 1, &cpu_type, &ocount), DNBError::POSIX);
+ if (err.Fail() || DNBLogCheckLogBit(LOG_PROCESS))
+ err.LogThreaded("::posix_spawnattr_setbinpref_np ( &attr, 1, cpu_type = 0x%8.8x, count => %llu )", cpu_type, (uint64_t)ocount);
+
+ if (err.Fail() != 0 || ocount != 1)
+ return INVALID_NUB_PROCESS;
+ }
+#endif
+
+ PseudoTerminal pty;
+
+ posix_spawn_file_actions_t file_actions;
+ err.SetError( ::posix_spawn_file_actions_init (&file_actions), DNBError::POSIX);
+ int file_actions_valid = err.Success();
+ if (!file_actions_valid || DNBLogCheckLogBit(LOG_PROCESS))
+ err.LogThreaded("::posix_spawn_file_actions_init ( &file_actions )");
+ int pty_error = -1;
+ pid_t pid = INVALID_NUB_PROCESS;
+ if (file_actions_valid)
+ {
+ if (stdin_path == NULL && stdout_path == NULL && stderr_path == NULL && !no_stdio)
+ {
+ pty_error = pty.OpenFirstAvailableMaster(O_RDWR|O_NOCTTY);
+ if (pty_error == PseudoTerminal::success)
+ {
+ stdin_path = stdout_path = stderr_path = pty.SlaveName();
+ }
+ }
+
+ // if no_stdio or std paths not supplied, then route to "/dev/null".
+ if (no_stdio || stdin_path == NULL || stdin_path[0] == '\0')
+ stdin_path = "/dev/null";
+ if (no_stdio || stdout_path == NULL || stdout_path[0] == '\0')
+ stdout_path = "/dev/null";
+ if (no_stdio || stderr_path == NULL || stderr_path[0] == '\0')
+ stderr_path = "/dev/null";
+
+ err.SetError( ::posix_spawn_file_actions_addopen (&file_actions,
+ STDIN_FILENO,
+ stdin_path,
+ O_RDONLY | O_NOCTTY,
+ 0),
+ DNBError::POSIX);
+ if (err.Fail() || DNBLogCheckLogBit (LOG_PROCESS))
+ err.LogThreaded ("::posix_spawn_file_actions_addopen (&file_actions, filedes=STDIN_FILENO, path='%s')", stdin_path);
+
+ err.SetError( ::posix_spawn_file_actions_addopen (&file_actions,
+ STDOUT_FILENO,
+ stdout_path,
+ O_WRONLY | O_NOCTTY | O_CREAT,
+ 0640),
+ DNBError::POSIX);
+ if (err.Fail() || DNBLogCheckLogBit (LOG_PROCESS))
+ err.LogThreaded ("::posix_spawn_file_actions_addopen (&file_actions, filedes=STDOUT_FILENO, path='%s')", stdout_path);
+
+ err.SetError( ::posix_spawn_file_actions_addopen (&file_actions,
+ STDERR_FILENO,
+ stderr_path,
+ O_WRONLY | O_NOCTTY | O_CREAT,
+ 0640),
+ DNBError::POSIX);
+ if (err.Fail() || DNBLogCheckLogBit (LOG_PROCESS))
+ err.LogThreaded ("::posix_spawn_file_actions_addopen (&file_actions, filedes=STDERR_FILENO, path='%s')", stderr_path);
+
+ // TODO: Verify if we can set the working directory back immediately
+ // after the posix_spawnp call without creating a race condition???
+ if (working_directory)
+ ::chdir (working_directory);
+
+ err.SetError( ::posix_spawnp (&pid, path, &file_actions, &attr, (char * const*)argv, (char * const*)envp), DNBError::POSIX);
+ if (err.Fail() || DNBLogCheckLogBit(LOG_PROCESS))
+ err.LogThreaded("::posix_spawnp ( pid => %i, path = '%s', file_actions = %p, attr = %p, argv = %p, envp = %p )", pid, path, &file_actions, &attr, argv, envp);
+ }
+ else
+ {
+ // TODO: Verify if we can set the working directory back immediately
+ // after the posix_spawnp call without creating a race condition???
+ if (working_directory)
+ ::chdir (working_directory);
+
+ err.SetError( ::posix_spawnp (&pid, path, NULL, &attr, (char * const*)argv, (char * const*)envp), DNBError::POSIX);
+ if (err.Fail() || DNBLogCheckLogBit(LOG_PROCESS))
+ err.LogThreaded("::posix_spawnp ( pid => %i, path = '%s', file_actions = %p, attr = %p, argv = %p, envp = %p )", pid, path, NULL, &attr, argv, envp);
+ }
+
+ // We have seen some cases where posix_spawnp was returning a valid
+ // looking pid even when an error was returned, so clear it out
+ if (err.Fail())
+ pid = INVALID_NUB_PROCESS;
+
+ if (pty_error == 0)
+ {
+ if (process != NULL)
+ {
+ int master_fd = pty.ReleaseMasterFD();
+ process->SetChildFileDescriptors(master_fd, master_fd, master_fd);
+ }
+ }
+ ::posix_spawnattr_destroy (&attr);
+
+ if (pid != INVALID_NUB_PROCESS)
+ {
+ cpu_type_t pid_cpu_type = MachProcess::GetCPUTypeForLocalProcess (pid);
+ DNBLogThreadedIf(LOG_PROCESS, "MachProcess::%s ( ) pid=%i, cpu_type=0x%8.8x", __FUNCTION__, pid, pid_cpu_type);
+ if (pid_cpu_type)
+ DNBArchProtocol::SetArchitecture (pid_cpu_type);
+ }
+
+ if (file_actions_valid)
+ {
+ DNBError err2;
+ err2.SetError( ::posix_spawn_file_actions_destroy (&file_actions), DNBError::POSIX);
+ if (err2.Fail() || DNBLogCheckLogBit(LOG_PROCESS))
+ err2.LogThreaded("::posix_spawn_file_actions_destroy ( &file_actions )");
+ }
+
+ return pid;
+}
+
+uint32_t
+MachProcess::GetCPUTypeForLocalProcess (pid_t pid)
+{
+ int mib[CTL_MAXNAME]={0,};
+ size_t len = CTL_MAXNAME;
+ if (::sysctlnametomib("sysctl.proc_cputype", mib, &len))
+ return 0;
+
+ mib[len] = pid;
+ len++;
+
+ cpu_type_t cpu;
+ size_t cpu_len = sizeof(cpu);
+ if (::sysctl (mib, static_cast<u_int>(len), &cpu, &cpu_len, 0, 0))
+ cpu = 0;
+ return cpu;
+}
+
+pid_t
+MachProcess::ForkChildForPTraceDebugging
+(
+ const char *path,
+ char const *argv[],
+ char const *envp[],
+ MachProcess* process,
+ DNBError& launch_err
+)
+{
+ PseudoTerminal::Error pty_error = PseudoTerminal::success;
+
+ // Use a fork that ties the child process's stdin/out/err to a pseudo
+ // terminal so we can read it in our MachProcess::STDIOThread
+ // as unbuffered io.
+ PseudoTerminal pty;
+ pid_t pid = pty.Fork(pty_error);
+
+ if (pid < 0)
+ {
+ //--------------------------------------------------------------
+ // Error during fork.
+ //--------------------------------------------------------------
+ return pid;
+ }
+ else if (pid == 0)
+ {
+ //--------------------------------------------------------------
+ // Child process
+ //--------------------------------------------------------------
+ ::ptrace (PT_TRACE_ME, 0, 0, 0); // Debug this process
+ ::ptrace (PT_SIGEXC, 0, 0, 0); // Get BSD signals as mach exceptions
+
+ // If our parent is setgid, lets make sure we don't inherit those
+ // extra powers due to nepotism.
+ if (::setgid (getgid ()) == 0)
+ {
+
+ // Let the child have its own process group. We need to execute
+ // this call in both the child and parent to avoid a race condition
+ // between the two processes.
+ ::setpgid (0, 0); // Set the child process group to match its pid
+
+ // Sleep a bit to before the exec call
+ ::sleep (1);
+
+ // Turn this process into
+ ::execv (path, (char * const *)argv);
+ }
+ // Exit with error code. Child process should have taken
+ // over in above exec call and if the exec fails it will
+ // exit the child process below.
+ ::exit (127);
+ }
+ else
+ {
+ //--------------------------------------------------------------
+ // Parent process
+ //--------------------------------------------------------------
+ // Let the child have its own process group. We need to execute
+ // this call in both the child and parent to avoid a race condition
+ // between the two processes.
+ ::setpgid (pid, pid); // Set the child process group to match its pid
+
+ if (process != NULL)
+ {
+ // Release our master pty file descriptor so the pty class doesn't
+ // close it and so we can continue to use it in our STDIO thread
+ int master_fd = pty.ReleaseMasterFD();
+ process->SetChildFileDescriptors(master_fd, master_fd, master_fd);
+ }
+ }
+ return pid;
+}
+
+#if defined (WITH_SPRINGBOARD) || defined (WITH_BKS) || defined (WITH_FBS)
+// This returns a CFRetained pointer to the Bundle ID for app_bundle_path,
+// or NULL if there was some problem getting the bundle id.
+static CFStringRef
+CopyBundleIDForPath (const char *app_bundle_path, DNBError &err_str)
+{
+ CFBundle bundle(app_bundle_path);
+ CFStringRef bundleIDCFStr = bundle.GetIdentifier();
+ std::string bundleID;
+ if (CFString::UTF8(bundleIDCFStr, bundleID) == NULL)
+ {
+ struct stat app_bundle_stat;
+ char err_msg[PATH_MAX];
+
+ if (::stat (app_bundle_path, &app_bundle_stat) < 0)
+ {
+ err_str.SetError(errno, DNBError::POSIX);
+ snprintf(err_msg, sizeof(err_msg), "%s: \"%s\"", err_str.AsString(), app_bundle_path);
+ err_str.SetErrorString(err_msg);
+ DNBLogThreadedIf(LOG_PROCESS, "%s() error: %s", __FUNCTION__, err_msg);
+ }
+ else
+ {
+ err_str.SetError(-1, DNBError::Generic);
+ snprintf(err_msg, sizeof(err_msg), "failed to extract CFBundleIdentifier from %s", app_bundle_path);
+ err_str.SetErrorString(err_msg);
+ DNBLogThreadedIf(LOG_PROCESS, "%s() error: failed to extract CFBundleIdentifier from '%s'", __FUNCTION__, app_bundle_path);
+ }
+ return NULL;
+ }
+
+ DNBLogThreadedIf(LOG_PROCESS, "%s() extracted CFBundleIdentifier: %s", __FUNCTION__, bundleID.c_str());
+ CFRetain (bundleIDCFStr);
+
+ return bundleIDCFStr;
+}
+#endif // #if defined (WITH_SPRINGBOARD) || defined (WITH_BKS) || defined (WITH_FBS)
+#ifdef WITH_SPRINGBOARD
+
+pid_t
+MachProcess::SBLaunchForDebug (const char *path, char const *argv[], char const *envp[], bool no_stdio, bool disable_aslr, DNBError &launch_err)
+{
+ // Clear out and clean up from any current state
+ Clear();
+
+ DNBLogThreadedIf(LOG_PROCESS, "%s( '%s', argv)", __FUNCTION__, path);
+
+ // Fork a child process for debugging
+ SetState(eStateLaunching);
+ m_pid = MachProcess::SBForkChildForPTraceDebugging(path, argv, envp, no_stdio, this, launch_err);
+ if (m_pid != 0)
+ {
+ m_flags |= eMachProcessFlagsUsingSBS;
+ m_path = path;
+ size_t i;
+ char const *arg;
+ for (i=0; (arg = argv[i]) != NULL; i++)
+ m_args.push_back(arg);
+ m_task.StartExceptionThread(launch_err);
+
+ if (launch_err.Fail())
+ {
+ if (launch_err.AsString() == NULL)
+ launch_err.SetErrorString("unable to start the exception thread");
+ DNBLog ("Could not get inferior's Mach exception port, sending ptrace PT_KILL and exiting.");
+ ::ptrace (PT_KILL, m_pid, 0, 0);
+ m_pid = INVALID_NUB_PROCESS;
+ return INVALID_NUB_PROCESS;
+ }
+
+ StartSTDIOThread();
+ SetState (eStateAttaching);
+ int err = ::ptrace (PT_ATTACHEXC, m_pid, 0, 0);
+ if (err == 0)
+ {
+ m_flags |= eMachProcessFlagsAttached;
+ DNBLogThreadedIf(LOG_PROCESS, "successfully attached to pid %d", m_pid);
+ }
+ else
+ {
+ SetState (eStateExited);
+ DNBLogThreadedIf(LOG_PROCESS, "error: failed to attach to pid %d", m_pid);
+ }
+ }
+ return m_pid;
+}
+
+#include <servers/bootstrap.h>
+
+pid_t
+MachProcess::SBForkChildForPTraceDebugging (const char *app_bundle_path, char const *argv[], char const *envp[], bool no_stdio, MachProcess* process, DNBError &launch_err)
+{
+ DNBLogThreadedIf(LOG_PROCESS, "%s( '%s', argv, %p)", __FUNCTION__, app_bundle_path, process);
+ CFAllocatorRef alloc = kCFAllocatorDefault;
+
+ if (argv[0] == NULL)
+ return INVALID_NUB_PROCESS;
+
+ size_t argc = 0;
+ // Count the number of arguments
+ while (argv[argc] != NULL)
+ argc++;
+
+ // Enumerate the arguments
+ size_t first_launch_arg_idx = 1;
+ CFReleaser<CFMutableArrayRef> launch_argv;
+
+ if (argv[first_launch_arg_idx])
+ {
+ size_t launch_argc = argc > 0 ? argc - 1 : 0;
+ launch_argv.reset (::CFArrayCreateMutable (alloc, launch_argc, &kCFTypeArrayCallBacks));
+ size_t i;
+ char const *arg;
+ CFString launch_arg;
+ for (i=first_launch_arg_idx; (i < argc) && ((arg = argv[i]) != NULL); i++)
+ {
+ launch_arg.reset(::CFStringCreateWithCString (alloc, arg, kCFStringEncodingUTF8));
+ if (launch_arg.get() != NULL)
+ CFArrayAppendValue(launch_argv.get(), launch_arg.get());
+ else
+ break;
+ }
+ }
+
+ // Next fill in the arguments dictionary. Note, the envp array is of the form
+ // Variable=value but SpringBoard wants a CF dictionary. So we have to convert
+ // this here.
+
+ CFReleaser<CFMutableDictionaryRef> launch_envp;
+
+ if (envp[0])
+ {
+ launch_envp.reset(::CFDictionaryCreateMutable(alloc, 0, &kCFTypeDictionaryKeyCallBacks, &kCFTypeDictionaryValueCallBacks));
+ const char *value;
+ int name_len;
+ CFString name_string, value_string;
+
+ for (int i = 0; envp[i] != NULL; i++)
+ {
+ value = strstr (envp[i], "=");
+
+ // If the name field is empty or there's no =, skip it. Somebody's messing with us.
+ if (value == NULL || value == envp[i])
+ continue;
+
+ name_len = value - envp[i];
+
+ // Now move value over the "="
+ value++;
+
+ name_string.reset(::CFStringCreateWithBytes(alloc, (const UInt8 *) envp[i], name_len, kCFStringEncodingUTF8, false));
+ value_string.reset(::CFStringCreateWithCString(alloc, value, kCFStringEncodingUTF8));
+ CFDictionarySetValue (launch_envp.get(), name_string.get(), value_string.get());
+ }
+ }
+
+ CFString stdio_path;
+
+ PseudoTerminal pty;
+ if (!no_stdio)
+ {
+ PseudoTerminal::Error pty_err = pty.OpenFirstAvailableMaster(O_RDWR|O_NOCTTY);
+ if (pty_err == PseudoTerminal::success)
+ {
+ const char* slave_name = pty.SlaveName();
+ DNBLogThreadedIf(LOG_PROCESS, "%s() successfully opened master pty, slave is %s", __FUNCTION__, slave_name);
+ if (slave_name && slave_name[0])
+ {
+ ::chmod (slave_name, S_IRWXU | S_IRWXG | S_IRWXO);
+ stdio_path.SetFileSystemRepresentation (slave_name);
+ }
+ }
+ }
+
+ if (stdio_path.get() == NULL)
+ {
+ stdio_path.SetFileSystemRepresentation ("/dev/null");
+ }
+
+ CFStringRef bundleIDCFStr = CopyBundleIDForPath (app_bundle_path, launch_err);
+ if (bundleIDCFStr == NULL)
+ return INVALID_NUB_PROCESS;
+
+ // This is just for logging:
+ std::string bundleID;
+ CFString::UTF8(bundleIDCFStr, bundleID);
+
+ DNBLogThreadedIf(LOG_PROCESS, "%s() serialized launch arg array", __FUNCTION__);
+
+ // Find SpringBoard
+ SBSApplicationLaunchError sbs_error = 0;
+ sbs_error = SBSLaunchApplicationForDebugging (bundleIDCFStr,
+ (CFURLRef)NULL, // openURL
+ launch_argv.get(),
+ launch_envp.get(), // CFDictionaryRef environment
+ stdio_path.get(),
+ stdio_path.get(),
+ SBSApplicationLaunchWaitForDebugger | SBSApplicationLaunchUnlockDevice);
+
+
+ launch_err.SetError(sbs_error, DNBError::SpringBoard);
+
+ if (sbs_error == SBSApplicationLaunchErrorSuccess)
+ {
+ static const useconds_t pid_poll_interval = 200000;
+ static const useconds_t pid_poll_timeout = 30000000;
+
+ useconds_t pid_poll_total = 0;
+
+ nub_process_t pid = INVALID_NUB_PROCESS;
+ Boolean pid_found = SBSProcessIDForDisplayIdentifier(bundleIDCFStr, &pid);
+ // Poll until the process is running, as long as we are getting valid responses and the timeout hasn't expired
+ // A return PID of 0 means the process is not running, which may be because it hasn't been (asynchronously) started
+ // yet, or that it died very quickly (if you weren't using waitForDebugger).
+ while (!pid_found && pid_poll_total < pid_poll_timeout)
+ {
+ usleep (pid_poll_interval);
+ pid_poll_total += pid_poll_interval;
+ DNBLogThreadedIf(LOG_PROCESS, "%s() polling Springboard for pid for %s...", __FUNCTION__, bundleID.c_str());
+ pid_found = SBSProcessIDForDisplayIdentifier(bundleIDCFStr, &pid);
+ }
+
+ CFRelease (bundleIDCFStr);
+ if (pid_found)
+ {
+ if (process != NULL)
+ {
+ // Release our master pty file descriptor so the pty class doesn't
+ // close it and so we can continue to use it in our STDIO thread
+ int master_fd = pty.ReleaseMasterFD();
+ process->SetChildFileDescriptors(master_fd, master_fd, master_fd);
+ }
+ DNBLogThreadedIf(LOG_PROCESS, "%s() => pid = %4.4x", __FUNCTION__, pid);
+ }
+ else
+ {
+ DNBLogError("failed to lookup the process ID for CFBundleIdentifier %s.", bundleID.c_str());
+ }
+ return pid;
+ }
+
+ DNBLogError("unable to launch the application with CFBundleIdentifier '%s' sbs_error = %u", bundleID.c_str(), sbs_error);
+ return INVALID_NUB_PROCESS;
+}
+
+#endif // #ifdef WITH_SPRINGBOARD
+
+
+
+#if defined (WITH_BKS) || defined (WITH_FBS)
+pid_t
+MachProcess::BoardServiceLaunchForDebug (const char *path, char const *argv[], char const *envp[], bool no_stdio, bool disable_aslr, const char *event_data, DNBError &launch_err)
+{
+ DNBLogThreadedIf(LOG_PROCESS, "%s( '%s', argv)", __FUNCTION__, path);
+
+ // Fork a child process for debugging
+ SetState(eStateLaunching);
+ m_pid = BoardServiceForkChildForPTraceDebugging(path, argv, envp, no_stdio, disable_aslr, event_data, launch_err);
+ if (m_pid != 0)
+ {
+ m_path = path;
+ size_t i;
+ char const *arg;
+ for (i=0; (arg = argv[i]) != NULL; i++)
+ m_args.push_back(arg);
+ m_task.StartExceptionThread(launch_err);
+
+ if (launch_err.Fail())
+ {
+ if (launch_err.AsString() == NULL)
+ launch_err.SetErrorString("unable to start the exception thread");
+ DNBLog ("Could not get inferior's Mach exception port, sending ptrace PT_KILL and exiting.");
+ ::ptrace (PT_KILL, m_pid, 0, 0);
+ m_pid = INVALID_NUB_PROCESS;
+ return INVALID_NUB_PROCESS;
+ }
+
+ StartSTDIOThread();
+ SetState (eStateAttaching);
+ int err = ::ptrace (PT_ATTACHEXC, m_pid, 0, 0);
+ if (err == 0)
+ {
+ m_flags |= eMachProcessFlagsAttached;
+ DNBLogThreadedIf(LOG_PROCESS, "successfully attached to pid %d", m_pid);
+ }
+ else
+ {
+ SetState (eStateExited);
+ DNBLogThreadedIf(LOG_PROCESS, "error: failed to attach to pid %d", m_pid);
+ }
+ }
+ return m_pid;
+}
+
+pid_t
+MachProcess::BoardServiceForkChildForPTraceDebugging (const char *app_bundle_path,
+ char const *argv[],
+ char const *envp[],
+ bool no_stdio,
+ bool disable_aslr,
+ const char *event_data,
+ DNBError &launch_err)
+{
+ if (argv[0] == NULL)
+ return INVALID_NUB_PROCESS;
+
+ DNBLogThreadedIf(LOG_PROCESS, "%s( '%s', argv, %p)", __FUNCTION__, app_bundle_path, this);
+
+ NSAutoreleasePool *pool = [[NSAutoreleasePool alloc] init];
+
+ size_t argc = 0;
+ // Count the number of arguments
+ while (argv[argc] != NULL)
+ argc++;
+
+ // Enumerate the arguments
+ size_t first_launch_arg_idx = 1;
+
+ NSMutableArray *launch_argv = nil;
+
+ if (argv[first_launch_arg_idx])
+ {
+ size_t launch_argc = argc > 0 ? argc - 1 : 0;
+ launch_argv = [NSMutableArray arrayWithCapacity: launch_argc];
+ size_t i;
+ char const *arg;
+ NSString *launch_arg;
+ for (i=first_launch_arg_idx; (i < argc) && ((arg = argv[i]) != NULL); i++)
+ {
+ launch_arg = [NSString stringWithUTF8String: arg];
+ // FIXME: Should we silently eat an argument that we can't convert into a UTF8 string?
+ if (launch_arg != nil)
+ [launch_argv addObject: launch_arg];
+ else
+ break;
+ }
+ }
+
+ NSMutableDictionary *launch_envp = nil;
+ if (envp[0])
+ {
+ launch_envp = [[NSMutableDictionary alloc] init];
+ const char *value;
+ int name_len;
+ NSString *name_string, *value_string;
+
+ for (int i = 0; envp[i] != NULL; i++)
+ {
+ value = strstr (envp[i], "=");
+
+ // If the name field is empty or there's no =, skip it. Somebody's messing with us.
+ if (value == NULL || value == envp[i])
+ continue;
+
+ name_len = value - envp[i];
+
+ // Now move value over the "="
+ value++;
+ name_string = [[NSString alloc] initWithBytes: envp[i] length: name_len encoding: NSUTF8StringEncoding];
+ value_string = [NSString stringWithUTF8String: value];
+ [launch_envp setObject: value_string forKey: name_string];
+ }
+ }
+
+ NSString *stdio_path = nil;
+ NSFileManager *file_manager = [NSFileManager defaultManager];
+
+ PseudoTerminal pty;
+ if (!no_stdio)
+ {
+ PseudoTerminal::Error pty_err = pty.OpenFirstAvailableMaster(O_RDWR|O_NOCTTY);
+ if (pty_err == PseudoTerminal::success)
+ {
+ const char* slave_name = pty.SlaveName();
+ DNBLogThreadedIf(LOG_PROCESS, "%s() successfully opened master pty, slave is %s", __FUNCTION__, slave_name);
+ if (slave_name && slave_name[0])
+ {
+ ::chmod (slave_name, S_IRWXU | S_IRWXG | S_IRWXO);
+ stdio_path = [file_manager stringWithFileSystemRepresentation: slave_name length: strlen(slave_name)];
+ }
+ }
+ }
+
+ if (stdio_path == nil)
+ {
+ const char *null_path = "/dev/null";
+ stdio_path = [file_manager stringWithFileSystemRepresentation: null_path length: strlen(null_path)];
+ }
+
+ CFStringRef bundleIDCFStr = CopyBundleIDForPath (app_bundle_path, launch_err);
+ if (bundleIDCFStr == NULL)
+ {
+ [pool drain];
+ return INVALID_NUB_PROCESS;
+ }
+
+ // Instead of rewriting CopyBundleIDForPath for NSStrings, we'll just use toll-free bridging here:
+ NSString *bundleIDNSStr = (NSString *) bundleIDCFStr;
+
+ // Okay, now let's assemble all these goodies into the BackBoardServices options mega-dictionary:
+
+ NSMutableDictionary *options = nullptr;
+ pid_t return_pid = INVALID_NUB_PROCESS;
+ bool success = false;
+
+#ifdef WITH_BKS
+ if (m_flags & eMachProcessFlagsUsingBKS)
+ {
+ options = BKSCreateOptionsDictionary(app_bundle_path, launch_argv, launch_envp, stdio_path, disable_aslr, event_data);
+ success = BKSCallOpenApplicationFunction (bundleIDNSStr, options, launch_err, &return_pid);
+ }
+#endif
+#ifdef WITH_FBS
+ if (m_flags & eMachProcessFlagsUsingFBS)
+ {
+ options = FBSCreateOptionsDictionary(app_bundle_path, launch_argv, launch_envp, stdio_path, disable_aslr, event_data);
+ success = FBSCallOpenApplicationFunction (bundleIDNSStr, options, launch_err, &return_pid);
+ }
+#endif
+
+ if (success)
+ {
+ int master_fd = pty.ReleaseMasterFD();
+ SetChildFileDescriptors(master_fd, master_fd, master_fd);
+ CFString::UTF8(bundleIDCFStr, m_bundle_id);
+ }
+
+ [pool drain];
+
+ return return_pid;
+}
+
+bool
+MachProcess::BoardServiceSendEvent (const char *event_data, DNBError &send_err)
+{
+ bool return_value = true;
+
+ if (event_data == NULL || *event_data == '\0')
+ {
+ DNBLogError ("SendEvent called with NULL event data.");
+ send_err.SetErrorString("SendEvent called with empty event data");
+ return false;
+ }
+
+ NSAutoreleasePool *pool = [[NSAutoreleasePool alloc] init];
+
+ if (strcmp (event_data, "BackgroundApplication") == 0)
+ {
+ // This is an event I cooked up. What you actually do is foreground the system app, so:
+#ifdef WITH_BKS
+ if (m_flags & eMachProcessFlagsUsingBKS)
+ {
+ return_value = BKSCallOpenApplicationFunction(nil, nil, send_err, NULL);
+ }
+#endif
+#ifdef WITH_FBS
+ if (m_flags & eMachProcessFlagsUsingFBS)
+ {
+ return_value = FBSCallOpenApplicationFunction(nil, nil, send_err, NULL);
+ }
+#endif
+ if (!return_value)
+ {
+ DNBLogError ("Failed to background application, error: %s.", send_err.AsString());
+ }
+ }
+ else
+ {
+ if (m_bundle_id.empty())
+ {
+ // See if we can figure out the bundle ID for this PID:
+
+ DNBLogError ("Tried to send event \"%s\" to a process that has no bundle ID.", event_data);
+ return false;
+ }
+
+ NSString *bundleIDNSStr = [NSString stringWithUTF8String:m_bundle_id.c_str()];
+
+ NSMutableDictionary *options = [NSMutableDictionary dictionary];
+
+#ifdef WITH_BKS
+ if (m_flags & eMachProcessFlagsUsingBKS)
+ {
+ if (!BKSAddEventDataToOptions(options, event_data, send_err))
+ {
+ [pool drain];
+ return false;
+ }
+ return_value = BKSCallOpenApplicationFunction (bundleIDNSStr, options, send_err, NULL);
+ DNBLogThreadedIf (LOG_PROCESS, "Called BKSCallOpenApplicationFunction to send event.");
+
+ }
+#endif
+#ifdef WITH_FBS
+ if (m_flags & eMachProcessFlagsUsingFBS)
+ {
+ if (!FBSAddEventDataToOptions(options, event_data, send_err))
+ {
+ [pool drain];
+ return false;
+ }
+ return_value = FBSCallOpenApplicationFunction (bundleIDNSStr, options, send_err, NULL);
+ DNBLogThreadedIf (LOG_PROCESS, "Called FBSCallOpenApplicationFunction to send event.");
+ }
+#endif
+
+ if (!return_value)
+ {
+ DNBLogError ("Failed to send event: %s, error: %s.", event_data, send_err.AsString());
+ }
+ }
+
+ [pool drain];
+ return return_value;
+}
+#endif // defined(WITH_BKS) || defined (WITH_FBS)
+
+#ifdef WITH_BKS
+void
+MachProcess::BKSCleanupAfterAttach (const void *attach_token, DNBError &err_str)
+{
+ bool success;
+
+ NSAutoreleasePool *pool = [[NSAutoreleasePool alloc] init];
+
+ // Instead of rewriting CopyBundleIDForPath for NSStrings, we'll just use toll-free bridging here:
+ NSString *bundleIDNSStr = (NSString *) attach_token;
+
+ // Okay, now let's assemble all these goodies into the BackBoardServices options mega-dictionary:
+
+ // First we have the debug sub-dictionary:
+ NSMutableDictionary *debug_options = [NSMutableDictionary dictionary];
+ [debug_options setObject: [NSNumber numberWithBool: YES] forKey: BKSDebugOptionKeyCancelDebugOnNextLaunch];
+
+ // That will go in the overall dictionary:
+
+ NSMutableDictionary *options = [NSMutableDictionary dictionary];
+ [options setObject: debug_options forKey: BKSOpenApplicationOptionKeyDebuggingOptions];
+
+ success = BKSCallOpenApplicationFunction (bundleIDNSStr, options, err_str, NULL);
+
+ if (!success)
+ {
+ DNBLogError ("error trying to cancel debug on next launch for %s: %s", [bundleIDNSStr UTF8String], err_str.AsString());
+ }
+
+ [pool drain];
+}
+#endif // WITH_BKS
+
+#ifdef WITH_FBS
+void
+MachProcess::FBSCleanupAfterAttach (const void *attach_token, DNBError &err_str)
+{
+ bool success;
+
+ NSAutoreleasePool *pool = [[NSAutoreleasePool alloc] init];
+
+ // Instead of rewriting CopyBundleIDForPath for NSStrings, we'll just use toll-free bridging here:
+ NSString *bundleIDNSStr = (NSString *) attach_token;
+
+ // Okay, now let's assemble all these goodies into the BackBoardServices options mega-dictionary:
+
+ // First we have the debug sub-dictionary:
+ NSMutableDictionary *debug_options = [NSMutableDictionary dictionary];
+ [debug_options setObject: [NSNumber numberWithBool: YES] forKey: FBSDebugOptionKeyCancelDebugOnNextLaunch];
+
+ // That will go in the overall dictionary:
+
+ NSMutableDictionary *options = [NSMutableDictionary dictionary];
+ [options setObject: debug_options forKey: FBSOpenApplicationOptionKeyDebuggingOptions];
+
+ success = FBSCallOpenApplicationFunction (bundleIDNSStr, options, err_str, NULL);
+
+ if (!success)
+ {
+ DNBLogError ("error trying to cancel debug on next launch for %s: %s", [bundleIDNSStr UTF8String], err_str.AsString());
+ }
+
+ [pool drain];
+}
+#endif // WITH_FBS
diff --git a/tools/debugserver/source/MacOSX/MachTask.h b/tools/debugserver/source/MacOSX/MachTask.h
new file mode 100644
index 000000000000..96b991478c78
--- /dev/null
+++ b/tools/debugserver/source/MacOSX/MachTask.h
@@ -0,0 +1,134 @@
+//===-- MachTask.h ----------------------------------------------*- C++ -*-===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//----------------------------------------------------------------------
+//
+// MachTask.h
+// debugserver
+//
+// Created by Greg Clayton on 12/5/08.
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef __MachTask_h__
+#define __MachTask_h__
+
+// C Includes
+#include <mach/mach.h>
+#include <sys/socket.h>
+// C++ Includes
+#include <map>
+#include <string>
+// Other libraries and framework includes
+// Project includes
+#include "DNBDefs.h"
+#include "MachException.h"
+#include "MachVMMemory.h"
+#include "PThreadMutex.h"
+
+class MachProcess;
+
+typedef uint64_t MachMallocEventId;
+
+enum MachMallocEventType
+{
+ eMachMallocEventTypeAlloc = 2,
+ eMachMallocEventTypeDealloc = 4,
+ eMachMallocEventTypeOther = 1
+};
+
+struct MachMallocEvent
+{
+ mach_vm_address_t m_base_address;
+ uint64_t m_size;
+ MachMallocEventType m_event_type;
+ MachMallocEventId m_event_id;
+};
+
+class MachTask
+{
+public:
+ //------------------------------------------------------------------
+ // Constructors and Destructors
+ //------------------------------------------------------------------
+ MachTask (MachProcess *process);
+ virtual ~MachTask ();
+
+ void Clear ();
+
+ kern_return_t Suspend ();
+ kern_return_t Resume ();
+
+ nub_size_t ReadMemory (nub_addr_t addr, nub_size_t size, void *buf);
+ nub_size_t WriteMemory (nub_addr_t addr, nub_size_t size, const void *buf);
+ int GetMemoryRegionInfo (nub_addr_t addr, DNBRegionInfo *region_info);
+ std::string GetProfileData (DNBProfileDataScanType scanType);
+
+ nub_addr_t AllocateMemory (nub_size_t size, uint32_t permissions);
+ nub_bool_t DeallocateMemory (nub_addr_t addr);
+
+ mach_port_t ExceptionPort () const;
+ bool ExceptionPortIsValid () const;
+ kern_return_t SaveExceptionPortInfo ();
+ kern_return_t RestoreExceptionPortInfo ();
+ kern_return_t ShutDownExcecptionThread ();
+
+ bool StartExceptionThread (DNBError &err);
+ nub_addr_t GetDYLDAllImageInfosAddress (DNBError& err);
+ kern_return_t BasicInfo (struct task_basic_info *info);
+ static kern_return_t BasicInfo (task_t task, struct task_basic_info *info);
+ bool IsValid () const;
+ static bool IsValid (task_t task);
+ static void * ExceptionThread (void *arg);
+ task_t TaskPort () const { return m_task; }
+ task_t TaskPortForProcessID (DNBError &err, bool force = false);
+ static task_t TaskPortForProcessID (pid_t pid, DNBError &err, uint32_t num_retries = 10, uint32_t usec_interval = 10000);
+
+ MachProcess * Process () { return m_process; }
+ const MachProcess * Process () const { return m_process; }
+
+ nub_size_t PageSize ();
+
+ bool HasMallocLoggingEnabled ();
+
+ // enumerate the malloc records for a given address (starting with Mac OS X 10.6 Snow Leopard it should include
+ // all allocations that *include* address, rather than just those *starting* at address)
+ bool EnumerateMallocRecords (mach_vm_address_t address,
+ MachMallocEvent *event_buffer,
+ uint32_t buffer_size,
+ uint32_t *count);
+
+ // enumerate every malloc record generated by this task, no matter what the address
+ bool EnumerateMallocRecords (MachMallocEvent *event_buffer,
+ uint32_t buffer_size,
+ uint32_t *count);
+
+ // given a malloc event, report every stack frame that led to this event
+ bool EnumerateMallocFrames (MachMallocEventId event_id,
+ mach_vm_address_t *function_addresses_buffer,
+ uint32_t buffer_size,
+ uint32_t *count);
+
+protected:
+ MachProcess * m_process; // The mach process that owns this MachTask
+ task_t m_task;
+ MachVMMemory m_vm_memory; // Special mach memory reading class that will take care of watching for page and region boundaries
+ MachException::PortInfo
+ m_exc_port_info; // Saved settings for all exception ports
+ pthread_t m_exception_thread; // Thread ID for the exception thread in case we need it
+ mach_port_t m_exception_port; // Exception port on which we will receive child exceptions
+
+ typedef std::map <mach_vm_address_t, size_t> allocation_collection;
+ allocation_collection m_allocations;
+
+private:
+ MachTask(const MachTask&); // Outlaw
+ MachTask& operator=(const MachTask& rhs);// Outlaw
+};
+
+#endif // __MachTask_h__
diff --git a/tools/debugserver/source/MacOSX/MachTask.mm b/tools/debugserver/source/MacOSX/MachTask.mm
new file mode 100644
index 000000000000..9c725663d559
--- /dev/null
+++ b/tools/debugserver/source/MacOSX/MachTask.mm
@@ -0,0 +1,1144 @@
+//===-- MachTask.cpp --------------------------------------------*- C++ -*-===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//----------------------------------------------------------------------
+//
+// MachTask.cpp
+// debugserver
+//
+// Created by Greg Clayton on 12/5/08.
+//
+//===----------------------------------------------------------------------===//
+
+#include "MachTask.h"
+
+// C Includes
+
+#include <mach-o/dyld_images.h>
+#include <mach/mach_vm.h>
+#import <sys/sysctl.h>
+
+#if defined (__APPLE__)
+#include <pthread.h>
+#include <sched.h>
+#endif
+
+// C++ Includes
+#include <iomanip>
+#include <sstream>
+
+// Other libraries and framework includes
+// Project includes
+#include "CFUtils.h"
+#include "DNB.h"
+#include "DNBError.h"
+#include "DNBLog.h"
+#include "MachProcess.h"
+#include "DNBDataRef.h"
+#include "stack_logging.h"
+
+#ifdef WITH_SPRINGBOARD
+
+#include <CoreFoundation/CoreFoundation.h>
+#include <SpringBoardServices/SpringBoardServer.h>
+#include <SpringBoardServices/SBSWatchdogAssertion.h>
+
+#endif
+
+#ifdef WITH_BKS
+extern "C"
+{
+ #import <Foundation/Foundation.h>
+ #import <BackBoardServices/BackBoardServices.h>
+ #import <BackBoardServices/BKSWatchdogAssertion.h>
+}
+#endif
+
+#include <AvailabilityMacros.h>
+
+#ifdef LLDB_ENERGY
+#include <mach/mach_time.h>
+#include <pmenergy.h>
+#include <pmsample.h>
+#endif
+
+
+//----------------------------------------------------------------------
+// MachTask constructor
+//----------------------------------------------------------------------
+MachTask::MachTask(MachProcess *process) :
+ m_process (process),
+ m_task (TASK_NULL),
+ m_vm_memory (),
+ m_exception_thread (0),
+ m_exception_port (MACH_PORT_NULL)
+{
+ memset(&m_exc_port_info, 0, sizeof(m_exc_port_info));
+}
+
+//----------------------------------------------------------------------
+// Destructor
+//----------------------------------------------------------------------
+MachTask::~MachTask()
+{
+ Clear();
+}
+
+
+//----------------------------------------------------------------------
+// MachTask::Suspend
+//----------------------------------------------------------------------
+kern_return_t
+MachTask::Suspend()
+{
+ DNBError err;
+ task_t task = TaskPort();
+ err = ::task_suspend (task);
+ if (DNBLogCheckLogBit(LOG_TASK) || err.Fail())
+ err.LogThreaded("::task_suspend ( target_task = 0x%4.4x )", task);
+ return err.Error();
+}
+
+
+//----------------------------------------------------------------------
+// MachTask::Resume
+//----------------------------------------------------------------------
+kern_return_t
+MachTask::Resume()
+{
+ struct task_basic_info task_info;
+ task_t task = TaskPort();
+ if (task == TASK_NULL)
+ return KERN_INVALID_ARGUMENT;
+
+ DNBError err;
+ err = BasicInfo(task, &task_info);
+
+ if (err.Success())
+ {
+ // task_resume isn't counted like task_suspend calls are, are, so if the
+ // task is not suspended, don't try and resume it since it is already
+ // running
+ if (task_info.suspend_count > 0)
+ {
+ err = ::task_resume (task);
+ if (DNBLogCheckLogBit(LOG_TASK) || err.Fail())
+ err.LogThreaded("::task_resume ( target_task = 0x%4.4x )", task);
+ }
+ }
+ return err.Error();
+}
+
+//----------------------------------------------------------------------
+// MachTask::ExceptionPort
+//----------------------------------------------------------------------
+mach_port_t
+MachTask::ExceptionPort() const
+{
+ return m_exception_port;
+}
+
+//----------------------------------------------------------------------
+// MachTask::ExceptionPortIsValid
+//----------------------------------------------------------------------
+bool
+MachTask::ExceptionPortIsValid() const
+{
+ return MACH_PORT_VALID(m_exception_port);
+}
+
+
+//----------------------------------------------------------------------
+// MachTask::Clear
+//----------------------------------------------------------------------
+void
+MachTask::Clear()
+{
+ // Do any cleanup needed for this task
+ m_task = TASK_NULL;
+ m_exception_thread = 0;
+ m_exception_port = MACH_PORT_NULL;
+
+}
+
+
+//----------------------------------------------------------------------
+// MachTask::SaveExceptionPortInfo
+//----------------------------------------------------------------------
+kern_return_t
+MachTask::SaveExceptionPortInfo()
+{
+ return m_exc_port_info.Save(TaskPort());
+}
+
+//----------------------------------------------------------------------
+// MachTask::RestoreExceptionPortInfo
+//----------------------------------------------------------------------
+kern_return_t
+MachTask::RestoreExceptionPortInfo()
+{
+ return m_exc_port_info.Restore(TaskPort());
+}
+
+
+//----------------------------------------------------------------------
+// MachTask::ReadMemory
+//----------------------------------------------------------------------
+nub_size_t
+MachTask::ReadMemory (nub_addr_t addr, nub_size_t size, void *buf)
+{
+ nub_size_t n = 0;
+ task_t task = TaskPort();
+ if (task != TASK_NULL)
+ {
+ n = m_vm_memory.Read(task, addr, buf, size);
+
+ DNBLogThreadedIf(LOG_MEMORY, "MachTask::ReadMemory ( addr = 0x%8.8llx, size = %llu, buf = %p) => %llu bytes read", (uint64_t)addr, (uint64_t)size, buf, (uint64_t)n);
+ if (DNBLogCheckLogBit(LOG_MEMORY_DATA_LONG) || (DNBLogCheckLogBit(LOG_MEMORY_DATA_SHORT) && size <= 8))
+ {
+ DNBDataRef data((uint8_t*)buf, n, false);
+ data.Dump(0, static_cast<DNBDataRef::offset_t>(n), addr, DNBDataRef::TypeUInt8, 16);
+ }
+ }
+ return n;
+}
+
+
+//----------------------------------------------------------------------
+// MachTask::WriteMemory
+//----------------------------------------------------------------------
+nub_size_t
+MachTask::WriteMemory (nub_addr_t addr, nub_size_t size, const void *buf)
+{
+ nub_size_t n = 0;
+ task_t task = TaskPort();
+ if (task != TASK_NULL)
+ {
+ n = m_vm_memory.Write(task, addr, buf, size);
+ DNBLogThreadedIf(LOG_MEMORY, "MachTask::WriteMemory ( addr = 0x%8.8llx, size = %llu, buf = %p) => %llu bytes written", (uint64_t)addr, (uint64_t)size, buf, (uint64_t)n);
+ if (DNBLogCheckLogBit(LOG_MEMORY_DATA_LONG) || (DNBLogCheckLogBit(LOG_MEMORY_DATA_SHORT) && size <= 8))
+ {
+ DNBDataRef data((uint8_t*)buf, n, false);
+ data.Dump(0, static_cast<DNBDataRef::offset_t>(n), addr, DNBDataRef::TypeUInt8, 16);
+ }
+ }
+ return n;
+}
+
+//----------------------------------------------------------------------
+// MachTask::MemoryRegionInfo
+//----------------------------------------------------------------------
+int
+MachTask::GetMemoryRegionInfo (nub_addr_t addr, DNBRegionInfo *region_info)
+{
+ task_t task = TaskPort();
+ if (task == TASK_NULL)
+ return -1;
+
+ int ret = m_vm_memory.GetMemoryRegionInfo(task, addr, region_info);
+ DNBLogThreadedIf(LOG_MEMORY, "MachTask::MemoryRegionInfo ( addr = 0x%8.8llx ) => %i (start = 0x%8.8llx, size = 0x%8.8llx, permissions = %u)",
+ (uint64_t)addr,
+ ret,
+ (uint64_t)region_info->addr,
+ (uint64_t)region_info->size,
+ region_info->permissions);
+ return ret;
+}
+
+#define TIME_VALUE_TO_TIMEVAL(a, r) do { \
+(r)->tv_sec = (a)->seconds; \
+(r)->tv_usec = (a)->microseconds; \
+} while (0)
+
+// We should consider moving this into each MacThread.
+static void get_threads_profile_data(DNBProfileDataScanType scanType, task_t task, nub_process_t pid, std::vector<uint64_t> &threads_id, std::vector<std::string> &threads_name, std::vector<uint64_t> &threads_used_usec)
+{
+ kern_return_t kr;
+ thread_act_array_t threads;
+ mach_msg_type_number_t tcnt;
+
+ kr = task_threads(task, &threads, &tcnt);
+ if (kr != KERN_SUCCESS)
+ return;
+
+ for (mach_msg_type_number_t i = 0; i < tcnt; i++)
+ {
+ thread_identifier_info_data_t identifier_info;
+ mach_msg_type_number_t count = THREAD_IDENTIFIER_INFO_COUNT;
+ kr = ::thread_info(threads[i], THREAD_IDENTIFIER_INFO, (thread_info_t)&identifier_info, &count);
+ if (kr != KERN_SUCCESS) continue;
+
+ thread_basic_info_data_t basic_info;
+ count = THREAD_BASIC_INFO_COUNT;
+ kr = ::thread_info(threads[i], THREAD_BASIC_INFO, (thread_info_t)&basic_info, &count);
+ if (kr != KERN_SUCCESS) continue;
+
+ if ((basic_info.flags & TH_FLAGS_IDLE) == 0)
+ {
+ nub_thread_t tid = MachThread::GetGloballyUniqueThreadIDForMachPortID (threads[i]);
+ threads_id.push_back(tid);
+
+ if ((scanType & eProfileThreadName) && (identifier_info.thread_handle != 0))
+ {
+ struct proc_threadinfo proc_threadinfo;
+ int len = ::proc_pidinfo(pid, PROC_PIDTHREADINFO, identifier_info.thread_handle, &proc_threadinfo, PROC_PIDTHREADINFO_SIZE);
+ if (len && proc_threadinfo.pth_name[0])
+ {
+ threads_name.push_back(proc_threadinfo.pth_name);
+ }
+ else
+ {
+ threads_name.push_back("");
+ }
+ }
+ else
+ {
+ threads_name.push_back("");
+ }
+ struct timeval tv;
+ struct timeval thread_tv;
+ TIME_VALUE_TO_TIMEVAL(&basic_info.user_time, &thread_tv);
+ TIME_VALUE_TO_TIMEVAL(&basic_info.system_time, &tv);
+ timeradd(&thread_tv, &tv, &thread_tv);
+ uint64_t used_usec = thread_tv.tv_sec * 1000000ULL + thread_tv.tv_usec;
+ threads_used_usec.push_back(used_usec);
+ }
+
+ mach_port_deallocate(mach_task_self(), threads[i]);
+ }
+ mach_vm_deallocate(mach_task_self(), (mach_vm_address_t)(uintptr_t)threads, tcnt * sizeof(*threads));
+}
+
+#define RAW_HEXBASE std::setfill('0') << std::hex << std::right
+#define DECIMAL std::dec << std::setfill(' ')
+std::string
+MachTask::GetProfileData (DNBProfileDataScanType scanType)
+{
+ std::string result;
+
+ static int32_t numCPU = -1;
+ struct host_cpu_load_info host_info;
+ if (scanType & eProfileHostCPU)
+ {
+ int32_t mib[] = {CTL_HW, HW_AVAILCPU};
+ size_t len = sizeof(numCPU);
+ if (numCPU == -1)
+ {
+ if (sysctl(mib, sizeof(mib) / sizeof(int32_t), &numCPU, &len, NULL, 0) != 0)
+ return result;
+ }
+
+ mach_port_t localHost = mach_host_self();
+ mach_msg_type_number_t count = HOST_CPU_LOAD_INFO_COUNT;
+ kern_return_t kr = host_statistics(localHost, HOST_CPU_LOAD_INFO, (host_info_t)&host_info, &count);
+ if (kr != KERN_SUCCESS)
+ return result;
+ }
+
+ task_t task = TaskPort();
+ if (task == TASK_NULL)
+ return result;
+
+ pid_t pid = m_process->ProcessID();
+
+ struct task_basic_info task_info;
+ DNBError err;
+ err = BasicInfo(task, &task_info);
+
+ if (!err.Success())
+ return result;
+
+ uint64_t elapsed_usec = 0;
+ uint64_t task_used_usec = 0;
+ if (scanType & eProfileCPU)
+ {
+ // Get current used time.
+ struct timeval current_used_time;
+ struct timeval tv;
+ TIME_VALUE_TO_TIMEVAL(&task_info.user_time, &current_used_time);
+ TIME_VALUE_TO_TIMEVAL(&task_info.system_time, &tv);
+ timeradd(&current_used_time, &tv, &current_used_time);
+ task_used_usec = current_used_time.tv_sec * 1000000ULL + current_used_time.tv_usec;
+
+ struct timeval current_elapsed_time;
+ int res = gettimeofday(&current_elapsed_time, NULL);
+ if (res == 0)
+ {
+ elapsed_usec = current_elapsed_time.tv_sec * 1000000ULL + current_elapsed_time.tv_usec;
+ }
+ }
+
+ std::vector<uint64_t> threads_id;
+ std::vector<std::string> threads_name;
+ std::vector<uint64_t> threads_used_usec;
+
+ if (scanType & eProfileThreadsCPU)
+ {
+ get_threads_profile_data(scanType, task, pid, threads_id, threads_name, threads_used_usec);
+ }
+
+#if defined (HOST_VM_INFO64_COUNT)
+ vm_statistics64_data_t vminfo;
+#else
+ struct vm_statistics vminfo;
+#endif
+ uint64_t physical_memory;
+ mach_vm_size_t rprvt = 0;
+ mach_vm_size_t rsize = 0;
+ mach_vm_size_t vprvt = 0;
+ mach_vm_size_t vsize = 0;
+ mach_vm_size_t dirty_size = 0;
+ mach_vm_size_t purgeable = 0;
+ mach_vm_size_t anonymous = 0;
+ if (m_vm_memory.GetMemoryProfile(scanType, task, task_info, m_process->GetCPUType(), pid, vminfo, physical_memory, rprvt, rsize, vprvt, vsize, dirty_size, purgeable, anonymous))
+ {
+ std::ostringstream profile_data_stream;
+
+ if (scanType & eProfileHostCPU)
+ {
+ profile_data_stream << "num_cpu:" << numCPU << ';';
+ profile_data_stream << "host_user_ticks:" << host_info.cpu_ticks[CPU_STATE_USER] << ';';
+ profile_data_stream << "host_sys_ticks:" << host_info.cpu_ticks[CPU_STATE_SYSTEM] << ';';
+ profile_data_stream << "host_idle_ticks:" << host_info.cpu_ticks[CPU_STATE_IDLE] << ';';
+ }
+
+ if (scanType & eProfileCPU)
+ {
+ profile_data_stream << "elapsed_usec:" << elapsed_usec << ';';
+ profile_data_stream << "task_used_usec:" << task_used_usec << ';';
+ }
+
+ if (scanType & eProfileThreadsCPU)
+ {
+ const size_t num_threads = threads_id.size();
+ for (size_t i=0; i<num_threads; i++)
+ {
+ profile_data_stream << "thread_used_id:" << std::hex << threads_id[i] << std::dec << ';';
+ profile_data_stream << "thread_used_usec:" << threads_used_usec[i] << ';';
+
+ if (scanType & eProfileThreadName)
+ {
+ profile_data_stream << "thread_used_name:";
+ const size_t len = threads_name[i].size();
+ if (len)
+ {
+ const char *thread_name = threads_name[i].c_str();
+ // Make sure that thread name doesn't interfere with our delimiter.
+ profile_data_stream << RAW_HEXBASE << std::setw(2);
+ const uint8_t *ubuf8 = (const uint8_t *)(thread_name);
+ for (size_t j=0; j<len; j++)
+ {
+ profile_data_stream << (uint32_t)(ubuf8[j]);
+ }
+ // Reset back to DECIMAL.
+ profile_data_stream << DECIMAL;
+ }
+ profile_data_stream << ';';
+ }
+ }
+ }
+
+ if (scanType & eProfileHostMemory)
+ profile_data_stream << "total:" << physical_memory << ';';
+
+ if (scanType & eProfileMemory)
+ {
+#if defined (HOST_VM_INFO64_COUNT) && defined (_VM_PAGE_SIZE_H_)
+ static vm_size_t pagesize = vm_kernel_page_size;
+#else
+ static vm_size_t pagesize;
+ static bool calculated = false;
+ if (!calculated)
+ {
+ calculated = true;
+ pagesize = PageSize();
+ }
+#endif
+
+ /* Unused values. Optimized out for transfer performance.
+ profile_data_stream << "wired:" << vminfo.wire_count * pagesize << ';';
+ profile_data_stream << "active:" << vminfo.active_count * pagesize << ';';
+ profile_data_stream << "inactive:" << vminfo.inactive_count * pagesize << ';';
+ */
+#if defined (HOST_VM_INFO64_COUNT)
+ // This mimicks Activity Monitor.
+ uint64_t total_used_count = (physical_memory / pagesize) - (vminfo.free_count - vminfo.speculative_count) - vminfo.external_page_count - vminfo.purgeable_count;
+#else
+ uint64_t total_used_count = vminfo.wire_count + vminfo.inactive_count + vminfo.active_count;
+#endif
+ profile_data_stream << "used:" << total_used_count * pagesize << ';';
+ /* Unused values. Optimized out for transfer performance.
+ profile_data_stream << "free:" << vminfo.free_count * pagesize << ';';
+ */
+
+ profile_data_stream << "rprvt:" << rprvt << ';';
+ /* Unused values. Optimized out for transfer performance.
+ profile_data_stream << "rsize:" << rsize << ';';
+ profile_data_stream << "vprvt:" << vprvt << ';';
+ profile_data_stream << "vsize:" << vsize << ';';
+ */
+
+ if (scanType & eProfileMemoryDirtyPage)
+ profile_data_stream << "dirty:" << dirty_size << ';';
+
+ if (scanType & eProfileMemoryAnonymous)
+ {
+ profile_data_stream << "purgeable:" << purgeable << ';';
+ profile_data_stream << "anonymous:" << anonymous << ';';
+ }
+ }
+
+ // proc_pid_rusage pm_sample_task_and_pid pm_energy_impact needs to be tested for weakness in Cab
+#ifdef LLDB_ENERGY
+ if ((scanType & eProfileEnergy) && (pm_sample_task_and_pid != NULL))
+ {
+ struct rusage_info_v2 info;
+ int rc = proc_pid_rusage(pid, RUSAGE_INFO_V2, (rusage_info_t *)&info);
+ if (rc == 0)
+ {
+ uint64_t now = mach_absolute_time();
+ pm_task_energy_data_t pm_energy;
+ memset(&pm_energy, 0, sizeof(pm_energy));
+ /*
+ * Disable most features of pm_sample_pid. It will gather
+ * network/GPU/WindowServer information; fill in the rest.
+ */
+ pm_sample_task_and_pid(task, pid, &pm_energy, now, PM_SAMPLE_ALL & ~PM_SAMPLE_NAME & ~PM_SAMPLE_INTERVAL & ~PM_SAMPLE_CPU & ~PM_SAMPLE_DISK);
+ pm_energy.sti.total_user = info.ri_user_time;
+ pm_energy.sti.total_system = info.ri_system_time;
+ pm_energy.sti.task_interrupt_wakeups = info.ri_interrupt_wkups;
+ pm_energy.sti.task_platform_idle_wakeups = info.ri_pkg_idle_wkups;
+ pm_energy.diskio_bytesread = info.ri_diskio_bytesread;
+ pm_energy.diskio_byteswritten = info.ri_diskio_byteswritten;
+ pm_energy.pageins = info.ri_pageins;
+
+ uint64_t total_energy = (uint64_t)(pm_energy_impact(&pm_energy) * NSEC_PER_SEC);
+ //uint64_t process_age = now - info.ri_proc_start_abstime;
+ //uint64_t avg_energy = 100.0 * (double)total_energy / (double)process_age;
+
+ profile_data_stream << "energy:" << total_energy << ';';
+ }
+ }
+#endif
+
+ profile_data_stream << "--end--;";
+
+ result = profile_data_stream.str();
+ }
+
+ return result;
+}
+
+
+//----------------------------------------------------------------------
+// MachTask::TaskPortForProcessID
+//----------------------------------------------------------------------
+task_t
+MachTask::TaskPortForProcessID (DNBError &err, bool force)
+{
+ if (((m_task == TASK_NULL) || force) && m_process != NULL)
+ m_task = MachTask::TaskPortForProcessID(m_process->ProcessID(), err);
+ return m_task;
+}
+
+//----------------------------------------------------------------------
+// MachTask::TaskPortForProcessID
+//----------------------------------------------------------------------
+task_t
+MachTask::TaskPortForProcessID (pid_t pid, DNBError &err, uint32_t num_retries, uint32_t usec_interval)
+{
+ if (pid != INVALID_NUB_PROCESS)
+ {
+ DNBError err;
+ mach_port_t task_self = mach_task_self ();
+ task_t task = TASK_NULL;
+ for (uint32_t i=0; i<num_retries; i++)
+ {
+ err = ::task_for_pid ( task_self, pid, &task);
+
+ if (DNBLogCheckLogBit(LOG_TASK) || err.Fail())
+ {
+ char str[1024];
+ ::snprintf (str,
+ sizeof(str),
+ "::task_for_pid ( target_tport = 0x%4.4x, pid = %d, &task ) => err = 0x%8.8x (%s)",
+ task_self,
+ pid,
+ err.Error(),
+ err.AsString() ? err.AsString() : "success");
+ if (err.Fail())
+ err.SetErrorString(str);
+ err.LogThreaded(str);
+ }
+
+ if (err.Success())
+ return task;
+
+ // Sleep a bit and try again
+ ::usleep (usec_interval);
+ }
+ }
+ return TASK_NULL;
+}
+
+
+//----------------------------------------------------------------------
+// MachTask::BasicInfo
+//----------------------------------------------------------------------
+kern_return_t
+MachTask::BasicInfo(struct task_basic_info *info)
+{
+ return BasicInfo (TaskPort(), info);
+}
+
+//----------------------------------------------------------------------
+// MachTask::BasicInfo
+//----------------------------------------------------------------------
+kern_return_t
+MachTask::BasicInfo(task_t task, struct task_basic_info *info)
+{
+ if (info == NULL)
+ return KERN_INVALID_ARGUMENT;
+
+ DNBError err;
+ mach_msg_type_number_t count = TASK_BASIC_INFO_COUNT;
+ err = ::task_info (task, TASK_BASIC_INFO, (task_info_t)info, &count);
+ const bool log_process = DNBLogCheckLogBit(LOG_TASK);
+ if (log_process || err.Fail())
+ err.LogThreaded("::task_info ( target_task = 0x%4.4x, flavor = TASK_BASIC_INFO, task_info_out => %p, task_info_outCnt => %u )", task, info, count);
+ if (DNBLogCheckLogBit(LOG_TASK) && DNBLogCheckLogBit(LOG_VERBOSE) && err.Success())
+ {
+ float user = (float)info->user_time.seconds + (float)info->user_time.microseconds / 1000000.0f;
+ float system = (float)info->user_time.seconds + (float)info->user_time.microseconds / 1000000.0f;
+ DNBLogThreaded ("task_basic_info = { suspend_count = %i, virtual_size = 0x%8.8llx, resident_size = 0x%8.8llx, user_time = %f, system_time = %f }",
+ info->suspend_count,
+ (uint64_t)info->virtual_size,
+ (uint64_t)info->resident_size,
+ user,
+ system);
+ }
+ return err.Error();
+}
+
+
+//----------------------------------------------------------------------
+// MachTask::IsValid
+//
+// Returns true if a task is a valid task port for a current process.
+//----------------------------------------------------------------------
+bool
+MachTask::IsValid () const
+{
+ return MachTask::IsValid(TaskPort());
+}
+
+//----------------------------------------------------------------------
+// MachTask::IsValid
+//
+// Returns true if a task is a valid task port for a current process.
+//----------------------------------------------------------------------
+bool
+MachTask::IsValid (task_t task)
+{
+ if (task != TASK_NULL)
+ {
+ struct task_basic_info task_info;
+ return BasicInfo(task, &task_info) == KERN_SUCCESS;
+ }
+ return false;
+}
+
+
+bool
+MachTask::StartExceptionThread(DNBError &err)
+{
+ DNBLogThreadedIf(LOG_EXCEPTIONS, "MachTask::%s ( )", __FUNCTION__);
+
+ task_t task = TaskPortForProcessID(err);
+ if (MachTask::IsValid(task))
+ {
+ // Got the mach port for the current process
+ mach_port_t task_self = mach_task_self ();
+
+ // Allocate an exception port that we will use to track our child process
+ err = ::mach_port_allocate (task_self, MACH_PORT_RIGHT_RECEIVE, &m_exception_port);
+ if (err.Fail())
+ return false;
+
+ // Add the ability to send messages on the new exception port
+ err = ::mach_port_insert_right (task_self, m_exception_port, m_exception_port, MACH_MSG_TYPE_MAKE_SEND);
+ if (err.Fail())
+ return false;
+
+ // Save the original state of the exception ports for our child process
+ SaveExceptionPortInfo();
+
+ // We weren't able to save the info for our exception ports, we must stop...
+ if (m_exc_port_info.mask == 0)
+ {
+ err.SetErrorString("failed to get exception port info");
+ return false;
+ }
+
+ // Set the ability to get all exceptions on this port
+ err = ::task_set_exception_ports (task, m_exc_port_info.mask, m_exception_port, EXCEPTION_DEFAULT | MACH_EXCEPTION_CODES, THREAD_STATE_NONE);
+ if (DNBLogCheckLogBit(LOG_EXCEPTIONS) || err.Fail())
+ {
+ err.LogThreaded("::task_set_exception_ports ( task = 0x%4.4x, exception_mask = 0x%8.8x, new_port = 0x%4.4x, behavior = 0x%8.8x, new_flavor = 0x%8.8x )",
+ task,
+ m_exc_port_info.mask,
+ m_exception_port,
+ (EXCEPTION_DEFAULT | MACH_EXCEPTION_CODES),
+ THREAD_STATE_NONE);
+ }
+
+ if (err.Fail())
+ return false;
+
+ // Create the exception thread
+ err = ::pthread_create (&m_exception_thread, NULL, MachTask::ExceptionThread, this);
+ return err.Success();
+ }
+ else
+ {
+ DNBLogError("MachTask::%s (): task invalid, exception thread start failed.", __FUNCTION__);
+ }
+ return false;
+}
+
+kern_return_t
+MachTask::ShutDownExcecptionThread()
+{
+ DNBError err;
+
+ err = RestoreExceptionPortInfo();
+
+ // NULL our our exception port and let our exception thread exit
+ mach_port_t exception_port = m_exception_port;
+ m_exception_port = 0;
+
+ err.SetError(::pthread_cancel(m_exception_thread), DNBError::POSIX);
+ if (DNBLogCheckLogBit(LOG_TASK) || err.Fail())
+ err.LogThreaded("::pthread_cancel ( thread = %p )", m_exception_thread);
+
+ err.SetError(::pthread_join(m_exception_thread, NULL), DNBError::POSIX);
+ if (DNBLogCheckLogBit(LOG_TASK) || err.Fail())
+ err.LogThreaded("::pthread_join ( thread = %p, value_ptr = NULL)", m_exception_thread);
+
+ // Deallocate our exception port that we used to track our child process
+ mach_port_t task_self = mach_task_self ();
+ err = ::mach_port_deallocate (task_self, exception_port);
+ if (DNBLogCheckLogBit(LOG_TASK) || err.Fail())
+ err.LogThreaded("::mach_port_deallocate ( task = 0x%4.4x, name = 0x%4.4x )", task_self, exception_port);
+
+ return err.Error();
+}
+
+
+void *
+MachTask::ExceptionThread (void *arg)
+{
+ if (arg == NULL)
+ return NULL;
+
+ MachTask *mach_task = (MachTask*) arg;
+ MachProcess *mach_proc = mach_task->Process();
+ DNBLogThreadedIf(LOG_EXCEPTIONS, "MachTask::%s ( arg = %p ) starting thread...", __FUNCTION__, arg);
+
+#if defined (__APPLE__)
+ pthread_setname_np ("exception monitoring thread");
+#if defined (__arm__) || defined (__arm64__) || defined (__aarch64__)
+ struct sched_param thread_param;
+ int thread_sched_policy;
+ if (pthread_getschedparam(pthread_self(), &thread_sched_policy, &thread_param) == 0)
+ {
+ thread_param.sched_priority = 47;
+ pthread_setschedparam(pthread_self(), thread_sched_policy, &thread_param);
+ }
+#endif
+#endif
+
+ // We keep a count of the number of consecutive exceptions received so
+ // we know to grab all exceptions without a timeout. We do this to get a
+ // bunch of related exceptions on our exception port so we can process
+ // then together. When we have multiple threads, we can get an exception
+ // per thread and they will come in consecutively. The main loop in this
+ // thread can stop periodically if needed to service things related to this
+ // process.
+ // flag set in the options, so we will wait forever for an exception on
+ // our exception port. After we get one exception, we then will use the
+ // MACH_RCV_TIMEOUT option with a zero timeout to grab all other current
+ // exceptions for our process. After we have received the last pending
+ // exception, we will get a timeout which enables us to then notify
+ // our main thread that we have an exception bundle available. We then wait
+ // for the main thread to tell this exception thread to start trying to get
+ // exceptions messages again and we start again with a mach_msg read with
+ // infinite timeout.
+ uint32_t num_exceptions_received = 0;
+ DNBError err;
+ task_t task = mach_task->TaskPort();
+ mach_msg_timeout_t periodic_timeout = 0;
+
+#if defined (WITH_SPRINGBOARD) && !defined (WITH_BKS)
+ mach_msg_timeout_t watchdog_elapsed = 0;
+ mach_msg_timeout_t watchdog_timeout = 60 * 1000;
+ pid_t pid = mach_proc->ProcessID();
+ CFReleaser<SBSWatchdogAssertionRef> watchdog;
+
+ if (mach_proc->ProcessUsingSpringBoard())
+ {
+ // Request a renewal for every 60 seconds if we attached using SpringBoard
+ watchdog.reset(::SBSWatchdogAssertionCreateForPID(NULL, pid, 60));
+ DNBLogThreadedIf(LOG_TASK, "::SBSWatchdogAssertionCreateForPID (NULL, %4.4x, 60 ) => %p", pid, watchdog.get());
+
+ if (watchdog.get())
+ {
+ ::SBSWatchdogAssertionRenew (watchdog.get());
+
+ CFTimeInterval watchdogRenewalInterval = ::SBSWatchdogAssertionGetRenewalInterval (watchdog.get());
+ DNBLogThreadedIf(LOG_TASK, "::SBSWatchdogAssertionGetRenewalInterval ( %p ) => %g seconds", watchdog.get(), watchdogRenewalInterval);
+ if (watchdogRenewalInterval > 0.0)
+ {
+ watchdog_timeout = (mach_msg_timeout_t)watchdogRenewalInterval * 1000;
+ if (watchdog_timeout > 3000)
+ watchdog_timeout -= 1000; // Give us a second to renew our timeout
+ else if (watchdog_timeout > 1000)
+ watchdog_timeout -= 250; // Give us a quarter of a second to renew our timeout
+ }
+ }
+ if (periodic_timeout == 0 || periodic_timeout > watchdog_timeout)
+ periodic_timeout = watchdog_timeout;
+ }
+#endif // #if defined (WITH_SPRINGBOARD) && !defined (WITH_BKS)
+
+#ifdef WITH_BKS
+ CFReleaser<BKSWatchdogAssertionRef> watchdog;
+ if (mach_proc->ProcessUsingBackBoard())
+ {
+ pid_t pid = mach_proc->ProcessID();
+ CFAllocatorRef alloc = kCFAllocatorDefault;
+ watchdog.reset(::BKSWatchdogAssertionCreateForPID(alloc, pid));
+ }
+#endif // #ifdef WITH_BKS
+
+ while (mach_task->ExceptionPortIsValid())
+ {
+ ::pthread_testcancel ();
+
+ MachException::Message exception_message;
+
+
+ if (num_exceptions_received > 0)
+ {
+ // No timeout, just receive as many exceptions as we can since we already have one and we want
+ // to get all currently available exceptions for this task
+ err = exception_message.Receive(mach_task->ExceptionPort(), MACH_RCV_MSG | MACH_RCV_INTERRUPT | MACH_RCV_TIMEOUT, 0);
+ }
+ else if (periodic_timeout > 0)
+ {
+ // We need to stop periodically in this loop, so try and get a mach message with a valid timeout (ms)
+ err = exception_message.Receive(mach_task->ExceptionPort(), MACH_RCV_MSG | MACH_RCV_INTERRUPT | MACH_RCV_TIMEOUT, periodic_timeout);
+ }
+ else
+ {
+ // We don't need to parse all current exceptions or stop periodically,
+ // just wait for an exception forever.
+ err = exception_message.Receive(mach_task->ExceptionPort(), MACH_RCV_MSG | MACH_RCV_INTERRUPT, 0);
+ }
+
+ if (err.Error() == MACH_RCV_INTERRUPTED)
+ {
+ // If we have no task port we should exit this thread
+ if (!mach_task->ExceptionPortIsValid())
+ {
+ DNBLogThreadedIf(LOG_EXCEPTIONS, "thread cancelled...");
+ break;
+ }
+
+ // Make sure our task is still valid
+ if (MachTask::IsValid(task))
+ {
+ // Task is still ok
+ DNBLogThreadedIf(LOG_EXCEPTIONS, "interrupted, but task still valid, continuing...");
+ continue;
+ }
+ else
+ {
+ DNBLogThreadedIf(LOG_EXCEPTIONS, "task has exited...");
+ mach_proc->SetState(eStateExited);
+ // Our task has died, exit the thread.
+ break;
+ }
+ }
+ else if (err.Error() == MACH_RCV_TIMED_OUT)
+ {
+ if (num_exceptions_received > 0)
+ {
+ // We were receiving all current exceptions with a timeout of zero
+ // it is time to go back to our normal looping mode
+ num_exceptions_received = 0;
+
+ // Notify our main thread we have a complete exception message
+ // bundle available and get the possibly updated task port back
+ // from the process in case we exec'ed and our task port changed
+ task = mach_proc->ExceptionMessageBundleComplete();
+
+ // in case we use a timeout value when getting exceptions...
+ // Make sure our task is still valid
+ if (MachTask::IsValid(task))
+ {
+ // Task is still ok
+ DNBLogThreadedIf(LOG_EXCEPTIONS, "got a timeout, continuing...");
+ continue;
+ }
+ else
+ {
+ DNBLogThreadedIf(LOG_EXCEPTIONS, "task has exited...");
+ mach_proc->SetState(eStateExited);
+ // Our task has died, exit the thread.
+ break;
+ }
+ }
+
+#if defined (WITH_SPRINGBOARD) && !defined (WITH_BKS)
+ if (watchdog.get())
+ {
+ watchdog_elapsed += periodic_timeout;
+ if (watchdog_elapsed >= watchdog_timeout)
+ {
+ DNBLogThreadedIf(LOG_TASK, "SBSWatchdogAssertionRenew ( %p )", watchdog.get());
+ ::SBSWatchdogAssertionRenew (watchdog.get());
+ watchdog_elapsed = 0;
+ }
+ }
+#endif
+ }
+ else if (err.Error() != KERN_SUCCESS)
+ {
+ DNBLogThreadedIf(LOG_EXCEPTIONS, "got some other error, do something about it??? nah, continuing for now...");
+ // TODO: notify of error?
+ }
+ else
+ {
+ if (exception_message.CatchExceptionRaise(task))
+ {
+ ++num_exceptions_received;
+ mach_proc->ExceptionMessageReceived(exception_message);
+ }
+ }
+ }
+
+#if defined (WITH_SPRINGBOARD) && !defined (WITH_BKS)
+ if (watchdog.get())
+ {
+ // TODO: change SBSWatchdogAssertionRelease to SBSWatchdogAssertionCancel when we
+ // all are up and running on systems that support it. The SBS framework has a #define
+ // that will forward SBSWatchdogAssertionRelease to SBSWatchdogAssertionCancel for now
+ // so it should still build either way.
+ DNBLogThreadedIf(LOG_TASK, "::SBSWatchdogAssertionRelease(%p)", watchdog.get());
+ ::SBSWatchdogAssertionRelease (watchdog.get());
+ }
+#endif // #if defined (WITH_SPRINGBOARD) && !defined (WITH_BKS)
+
+ DNBLogThreadedIf(LOG_EXCEPTIONS, "MachTask::%s (%p): thread exiting...", __FUNCTION__, arg);
+ return NULL;
+}
+
+
+// So the TASK_DYLD_INFO used to just return the address of the all image infos
+// as a single member called "all_image_info". Then someone decided it would be
+// a good idea to rename this first member to "all_image_info_addr" and add a
+// size member called "all_image_info_size". This of course can not be detected
+// using code or #defines. So to hack around this problem, we define our own
+// version of the TASK_DYLD_INFO structure so we can guarantee what is inside it.
+
+struct hack_task_dyld_info {
+ mach_vm_address_t all_image_info_addr;
+ mach_vm_size_t all_image_info_size;
+};
+
+nub_addr_t
+MachTask::GetDYLDAllImageInfosAddress (DNBError& err)
+{
+ struct hack_task_dyld_info dyld_info;
+ mach_msg_type_number_t count = TASK_DYLD_INFO_COUNT;
+ // Make sure that COUNT isn't bigger than our hacked up struct hack_task_dyld_info.
+ // If it is, then make COUNT smaller to match.
+ if (count > (sizeof(struct hack_task_dyld_info) / sizeof(natural_t)))
+ count = (sizeof(struct hack_task_dyld_info) / sizeof(natural_t));
+
+ task_t task = TaskPortForProcessID (err);
+ if (err.Success())
+ {
+ err = ::task_info (task, TASK_DYLD_INFO, (task_info_t)&dyld_info, &count);
+ if (err.Success())
+ {
+ // We now have the address of the all image infos structure
+ return dyld_info.all_image_info_addr;
+ }
+ }
+ return INVALID_NUB_ADDRESS;
+}
+
+
+//----------------------------------------------------------------------
+// MachTask::AllocateMemory
+//----------------------------------------------------------------------
+nub_addr_t
+MachTask::AllocateMemory (size_t size, uint32_t permissions)
+{
+ mach_vm_address_t addr;
+ task_t task = TaskPort();
+ if (task == TASK_NULL)
+ return INVALID_NUB_ADDRESS;
+
+ DNBError err;
+ err = ::mach_vm_allocate (task, &addr, size, TRUE);
+ if (err.Error() == KERN_SUCCESS)
+ {
+ // Set the protections:
+ vm_prot_t mach_prot = VM_PROT_NONE;
+ if (permissions & eMemoryPermissionsReadable)
+ mach_prot |= VM_PROT_READ;
+ if (permissions & eMemoryPermissionsWritable)
+ mach_prot |= VM_PROT_WRITE;
+ if (permissions & eMemoryPermissionsExecutable)
+ mach_prot |= VM_PROT_EXECUTE;
+
+
+ err = ::mach_vm_protect (task, addr, size, 0, mach_prot);
+ if (err.Error() == KERN_SUCCESS)
+ {
+ m_allocations.insert (std::make_pair(addr, size));
+ return addr;
+ }
+ ::mach_vm_deallocate (task, addr, size);
+ }
+ return INVALID_NUB_ADDRESS;
+}
+
+//----------------------------------------------------------------------
+// MachTask::DeallocateMemory
+//----------------------------------------------------------------------
+nub_bool_t
+MachTask::DeallocateMemory (nub_addr_t addr)
+{
+ task_t task = TaskPort();
+ if (task == TASK_NULL)
+ return false;
+
+ // We have to stash away sizes for the allocations...
+ allocation_collection::iterator pos, end = m_allocations.end();
+ for (pos = m_allocations.begin(); pos != end; pos++)
+ {
+ if ((*pos).first == addr)
+ {
+ m_allocations.erase(pos);
+#define ALWAYS_ZOMBIE_ALLOCATIONS 0
+ if (ALWAYS_ZOMBIE_ALLOCATIONS || getenv ("DEBUGSERVER_ZOMBIE_ALLOCATIONS"))
+ {
+ ::mach_vm_protect (task, (*pos).first, (*pos).second, 0, VM_PROT_NONE);
+ return true;
+ }
+ else
+ return ::mach_vm_deallocate (task, (*pos).first, (*pos).second) == KERN_SUCCESS;
+ }
+
+ }
+ return false;
+}
+
+static void foundStackLog(mach_stack_logging_record_t record, void *context) {
+ *((bool*)context) = true;
+}
+
+bool
+MachTask::HasMallocLoggingEnabled ()
+{
+ bool found = false;
+
+ __mach_stack_logging_enumerate_records(m_task, 0x0, foundStackLog, &found);
+ return found;
+}
+
+struct history_enumerator_impl_data
+{
+ MachMallocEvent *buffer;
+ uint32_t *position;
+ uint32_t count;
+};
+
+static void history_enumerator_impl(mach_stack_logging_record_t record, void* enum_obj)
+{
+ history_enumerator_impl_data *data = (history_enumerator_impl_data*)enum_obj;
+
+ if (*data->position >= data->count)
+ return;
+
+ data->buffer[*data->position].m_base_address = record.address;
+ data->buffer[*data->position].m_size = record.argument;
+ data->buffer[*data->position].m_event_id = record.stack_identifier;
+ data->buffer[*data->position].m_event_type = record.type_flags == stack_logging_type_alloc ? eMachMallocEventTypeAlloc :
+ record.type_flags == stack_logging_type_dealloc ? eMachMallocEventTypeDealloc :
+ eMachMallocEventTypeOther;
+ *data->position+=1;
+}
+
+bool
+MachTask::EnumerateMallocRecords (MachMallocEvent *event_buffer,
+ uint32_t buffer_size,
+ uint32_t *count)
+{
+ return EnumerateMallocRecords(0,
+ event_buffer,
+ buffer_size,
+ count);
+}
+
+bool
+MachTask::EnumerateMallocRecords (mach_vm_address_t address,
+ MachMallocEvent *event_buffer,
+ uint32_t buffer_size,
+ uint32_t *count)
+{
+ if (!event_buffer || !count)
+ return false;
+
+ if (buffer_size == 0)
+ return false;
+
+ *count = 0;
+ history_enumerator_impl_data data = { event_buffer, count, buffer_size };
+ __mach_stack_logging_enumerate_records(m_task, address, history_enumerator_impl, &data);
+ return (*count > 0);
+}
+
+bool
+MachTask::EnumerateMallocFrames (MachMallocEventId event_id,
+ mach_vm_address_t *function_addresses_buffer,
+ uint32_t buffer_size,
+ uint32_t *count)
+{
+ if (!function_addresses_buffer || !count)
+ return false;
+
+ if (buffer_size == 0)
+ return false;
+
+ __mach_stack_logging_frames_for_uniqued_stack(m_task, event_id, &function_addresses_buffer[0], buffer_size, count);
+ *count -= 1;
+ if (function_addresses_buffer[*count-1] < PageSize())
+ *count -= 1;
+ return (*count > 0);
+}
+
+nub_size_t
+MachTask::PageSize ()
+{
+ return m_vm_memory.PageSize (m_task);
+}
diff --git a/tools/debugserver/source/MacOSX/MachThread.cpp b/tools/debugserver/source/MacOSX/MachThread.cpp
new file mode 100644
index 000000000000..897484156080
--- /dev/null
+++ b/tools/debugserver/source/MacOSX/MachThread.cpp
@@ -0,0 +1,922 @@
+//===-- MachThread.cpp ------------------------------------------*- C++ -*-===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// Created by Greg Clayton on 6/19/07.
+//
+//===----------------------------------------------------------------------===//
+
+#include <inttypes.h>
+#include <mach/thread_policy.h>
+#include <dlfcn.h>
+#include "MachThread.h"
+#include "MachProcess.h"
+#include "DNBLog.h"
+#include "DNB.h"
+#include "ThreadInfo.h"
+
+static uint32_t
+GetSequenceID()
+{
+ static uint32_t g_nextID = 0;
+ return ++g_nextID;
+}
+
+MachThread::MachThread (MachProcess *process, bool is_64_bit, uint64_t unique_thread_id, thread_t mach_port_num) :
+ m_process (process),
+ m_unique_id (unique_thread_id),
+ m_mach_port_number (mach_port_num),
+ m_seq_id (GetSequenceID()),
+ m_state (eStateUnloaded),
+ m_state_mutex (PTHREAD_MUTEX_RECURSIVE),
+ m_suspend_count (0),
+ m_stop_exception (),
+ m_arch_ap (DNBArchProtocol::Create (this)),
+ m_reg_sets (NULL),
+ m_num_reg_sets (0),
+ m_ident_info(),
+ m_proc_threadinfo(),
+ m_dispatch_queue_name(),
+ m_is_64_bit(is_64_bit),
+ m_pthread_qos_class_decode (nullptr)
+{
+ nub_size_t num_reg_sets = 0;
+ m_reg_sets = m_arch_ap->GetRegisterSetInfo (&num_reg_sets);
+ m_num_reg_sets = num_reg_sets;
+
+ m_pthread_qos_class_decode = (unsigned int (*)(unsigned long, int*, unsigned long*)) dlsym (RTLD_DEFAULT, "_pthread_qos_class_decode");
+
+ // Get the thread state so we know if a thread is in a state where we can't
+ // muck with it and also so we get the suspend count correct in case it was
+ // already suspended
+ GetBasicInfo();
+ DNBLogThreadedIf(LOG_THREAD | LOG_VERBOSE, "MachThread::MachThread ( process = %p, tid = 0x%8.8" PRIx64 ", seq_id = %u )", &m_process, m_unique_id, m_seq_id);
+}
+
+MachThread::~MachThread()
+{
+ DNBLogThreadedIf(LOG_THREAD | LOG_VERBOSE, "MachThread::~MachThread() for tid = 0x%8.8" PRIx64 " (%u)", m_unique_id, m_seq_id);
+}
+
+
+
+void
+MachThread::Suspend()
+{
+ DNBLogThreadedIf(LOG_THREAD | LOG_VERBOSE, "MachThread::%s ( )", __FUNCTION__);
+ if (MachPortNumberIsValid(m_mach_port_number))
+ {
+ DNBError err(::thread_suspend (m_mach_port_number), DNBError::MachKernel);
+ if (err.Success())
+ m_suspend_count++;
+ if (DNBLogCheckLogBit(LOG_THREAD) || err.Fail())
+ err.LogThreaded("::thread_suspend (%4.4" PRIx32 ")", m_mach_port_number);
+ }
+}
+
+void
+MachThread::Resume(bool others_stopped)
+{
+ DNBLogThreadedIf(LOG_THREAD | LOG_VERBOSE, "MachThread::%s ( )", __FUNCTION__);
+ if (MachPortNumberIsValid(m_mach_port_number))
+ {
+ SetSuspendCountBeforeResume(others_stopped);
+ }
+}
+
+bool
+MachThread::SetSuspendCountBeforeResume(bool others_stopped)
+{
+ DNBLogThreadedIf(LOG_THREAD | LOG_VERBOSE, "MachThread::%s ( )", __FUNCTION__);
+ DNBError err;
+ if (MachPortNumberIsValid(m_mach_port_number) == false)
+ return false;
+
+ integer_t times_to_resume;
+
+ if (others_stopped)
+ {
+ if (GetBasicInfo())
+ {
+ times_to_resume = m_basic_info.suspend_count;
+ m_suspend_count = - (times_to_resume - m_suspend_count);
+ }
+ else
+ times_to_resume = 0;
+ }
+ else
+ {
+ times_to_resume = m_suspend_count;
+ m_suspend_count = 0;
+ }
+
+ if (times_to_resume > 0)
+ {
+ while (times_to_resume > 0)
+ {
+ err = ::thread_resume (m_mach_port_number);
+ if (DNBLogCheckLogBit(LOG_THREAD) || err.Fail())
+ err.LogThreaded("::thread_resume (%4.4" PRIx32 ")", m_mach_port_number);
+ if (err.Success())
+ --times_to_resume;
+ else
+ {
+ if (GetBasicInfo())
+ times_to_resume = m_basic_info.suspend_count;
+ else
+ times_to_resume = 0;
+ }
+ }
+ }
+ return true;
+}
+
+bool
+MachThread::RestoreSuspendCountAfterStop ()
+{
+ DNBLogThreadedIf(LOG_THREAD | LOG_VERBOSE, "MachThread::%s ( )", __FUNCTION__);
+ DNBError err;
+ if (MachPortNumberIsValid(m_mach_port_number) == false)
+ return false;
+
+ if (m_suspend_count > 0)
+ {
+ while (m_suspend_count > 0)
+ {
+ err = ::thread_resume (m_mach_port_number);
+ if (DNBLogCheckLogBit(LOG_THREAD) || err.Fail())
+ err.LogThreaded("::thread_resume (%4.4" PRIx32 ")", m_mach_port_number);
+ if (err.Success())
+ --m_suspend_count;
+ else
+ {
+ if (GetBasicInfo())
+ m_suspend_count = m_basic_info.suspend_count;
+ else
+ m_suspend_count = 0;
+ return false; // ???
+ }
+ }
+ }
+ else if (m_suspend_count < 0)
+ {
+ while (m_suspend_count < 0)
+ {
+ err = ::thread_suspend (m_mach_port_number);
+ if (err.Success())
+ ++m_suspend_count;
+ if (DNBLogCheckLogBit(LOG_THREAD) || err.Fail())
+ {
+ err.LogThreaded("::thread_suspend (%4.4" PRIx32 ")", m_mach_port_number);
+ return false;
+ }
+ }
+ }
+ return true;
+}
+
+
+const char *
+MachThread::GetBasicInfoAsString () const
+{
+ static char g_basic_info_string[1024];
+ struct thread_basic_info basicInfo;
+
+ if (GetBasicInfo(m_mach_port_number, &basicInfo))
+ {
+
+// char run_state_str[32];
+// size_t run_state_str_size = sizeof(run_state_str);
+// switch (basicInfo.run_state)
+// {
+// case TH_STATE_RUNNING: strncpy(run_state_str, "running", run_state_str_size); break;
+// case TH_STATE_STOPPED: strncpy(run_state_str, "stopped", run_state_str_size); break;
+// case TH_STATE_WAITING: strncpy(run_state_str, "waiting", run_state_str_size); break;
+// case TH_STATE_UNINTERRUPTIBLE: strncpy(run_state_str, "uninterruptible", run_state_str_size); break;
+// case TH_STATE_HALTED: strncpy(run_state_str, "halted", run_state_str_size); break;
+// default: snprintf(run_state_str, run_state_str_size, "%d", basicInfo.run_state); break; // ???
+// }
+ float user = (float)basicInfo.user_time.seconds + (float)basicInfo.user_time.microseconds / 1000000.0f;
+ float system = (float)basicInfo.user_time.seconds + (float)basicInfo.user_time.microseconds / 1000000.0f;
+ snprintf(g_basic_info_string, sizeof(g_basic_info_string), "Thread 0x%8.8" PRIx64 ": user=%f system=%f cpu=%d sleep_time=%d",
+ m_unique_id,
+ user,
+ system,
+ basicInfo.cpu_usage,
+ basicInfo.sleep_time);
+
+ return g_basic_info_string;
+ }
+ return NULL;
+}
+
+// Finds the Mach port number for a given thread in the inferior process' port namespace.
+thread_t
+MachThread::InferiorThreadID() const
+{
+ mach_msg_type_number_t i;
+ mach_port_name_array_t names;
+ mach_port_type_array_t types;
+ mach_msg_type_number_t ncount, tcount;
+ thread_t inferior_tid = INVALID_NUB_THREAD;
+ task_t my_task = ::mach_task_self();
+ task_t task = m_process->Task().TaskPort();
+
+ kern_return_t kret = ::mach_port_names (task, &names, &ncount, &types, &tcount);
+ if (kret == KERN_SUCCESS)
+ {
+
+ for (i = 0; i < ncount; i++)
+ {
+ mach_port_t my_name;
+ mach_msg_type_name_t my_type;
+
+ kret = ::mach_port_extract_right (task, names[i], MACH_MSG_TYPE_COPY_SEND, &my_name, &my_type);
+ if (kret == KERN_SUCCESS)
+ {
+ ::mach_port_deallocate (my_task, my_name);
+ if (my_name == m_mach_port_number)
+ {
+ inferior_tid = names[i];
+ break;
+ }
+ }
+ }
+ // Free up the names and types
+ ::vm_deallocate (my_task, (vm_address_t) names, ncount * sizeof (mach_port_name_t));
+ ::vm_deallocate (my_task, (vm_address_t) types, tcount * sizeof (mach_port_type_t));
+ }
+ return inferior_tid;
+}
+
+bool
+MachThread::IsUserReady()
+{
+ if (m_basic_info.run_state == 0)
+ GetBasicInfo ();
+
+ switch (m_basic_info.run_state)
+ {
+ default:
+ case TH_STATE_UNINTERRUPTIBLE:
+ break;
+
+ case TH_STATE_RUNNING:
+ case TH_STATE_STOPPED:
+ case TH_STATE_WAITING:
+ case TH_STATE_HALTED:
+ return true;
+ }
+ return false;
+}
+
+struct thread_basic_info *
+MachThread::GetBasicInfo ()
+{
+ if (MachThread::GetBasicInfo(m_mach_port_number, &m_basic_info))
+ return &m_basic_info;
+ return NULL;
+}
+
+
+bool
+MachThread::GetBasicInfo(thread_t thread, struct thread_basic_info *basicInfoPtr)
+{
+ if (MachPortNumberIsValid(thread))
+ {
+ unsigned int info_count = THREAD_BASIC_INFO_COUNT;
+ kern_return_t err = ::thread_info (thread, THREAD_BASIC_INFO, (thread_info_t) basicInfoPtr, &info_count);
+ if (err == KERN_SUCCESS)
+ return true;
+ }
+ ::memset (basicInfoPtr, 0, sizeof (struct thread_basic_info));
+ return false;
+}
+
+
+bool
+MachThread::ThreadIDIsValid(uint64_t thread)
+{
+ return thread != 0;
+}
+
+bool
+MachThread::MachPortNumberIsValid(thread_t thread)
+{
+ return thread != THREAD_NULL;
+}
+
+bool
+MachThread::GetRegisterState(int flavor, bool force)
+{
+ return m_arch_ap->GetRegisterState(flavor, force) == KERN_SUCCESS;
+}
+
+bool
+MachThread::SetRegisterState(int flavor)
+{
+ return m_arch_ap->SetRegisterState(flavor) == KERN_SUCCESS;
+}
+
+uint64_t
+MachThread::GetPC(uint64_t failValue)
+{
+ // Get program counter
+ return m_arch_ap->GetPC(failValue);
+}
+
+bool
+MachThread::SetPC(uint64_t value)
+{
+ // Set program counter
+ return m_arch_ap->SetPC(value);
+}
+
+uint64_t
+MachThread::GetSP(uint64_t failValue)
+{
+ // Get stack pointer
+ return m_arch_ap->GetSP(failValue);
+}
+
+nub_process_t
+MachThread::ProcessID() const
+{
+ if (m_process)
+ return m_process->ProcessID();
+ return INVALID_NUB_PROCESS;
+}
+
+void
+MachThread::Dump(uint32_t index)
+{
+ const char * thread_run_state = NULL;
+
+ switch (m_basic_info.run_state)
+ {
+ case TH_STATE_RUNNING: thread_run_state = "running"; break; // 1 thread is running normally
+ case TH_STATE_STOPPED: thread_run_state = "stopped"; break; // 2 thread is stopped
+ case TH_STATE_WAITING: thread_run_state = "waiting"; break; // 3 thread is waiting normally
+ case TH_STATE_UNINTERRUPTIBLE: thread_run_state = "uninter"; break; // 4 thread is in an uninterruptible wait
+ case TH_STATE_HALTED: thread_run_state = "halted "; break; // 5 thread is halted at a
+ default: thread_run_state = "???"; break;
+ }
+
+ DNBLogThreaded("[%3u] #%3u tid: 0x%8.8" PRIx64 ", pc: 0x%16.16" PRIx64 ", sp: 0x%16.16" PRIx64 ", user: %d.%6.6d, system: %d.%6.6d, cpu: %2d, policy: %2d, run_state: %2d (%s), flags: %2d, suspend_count: %2d (current %2d), sleep_time: %d",
+ index,
+ m_seq_id,
+ m_unique_id,
+ GetPC(INVALID_NUB_ADDRESS),
+ GetSP(INVALID_NUB_ADDRESS),
+ m_basic_info.user_time.seconds, m_basic_info.user_time.microseconds,
+ m_basic_info.system_time.seconds, m_basic_info.system_time.microseconds,
+ m_basic_info.cpu_usage,
+ m_basic_info.policy,
+ m_basic_info.run_state,
+ thread_run_state,
+ m_basic_info.flags,
+ m_basic_info.suspend_count, m_suspend_count,
+ m_basic_info.sleep_time);
+ //DumpRegisterState(0);
+}
+
+void
+MachThread::ThreadWillResume(const DNBThreadResumeAction *thread_action, bool others_stopped)
+{
+ if (thread_action->addr != INVALID_NUB_ADDRESS)
+ SetPC (thread_action->addr);
+
+ SetState (thread_action->state);
+ switch (thread_action->state)
+ {
+ case eStateStopped:
+ case eStateSuspended:
+ assert (others_stopped == false);
+ Suspend();
+ break;
+
+ case eStateRunning:
+ case eStateStepping:
+ Resume(others_stopped);
+ break;
+ default:
+ break;
+ }
+ m_arch_ap->ThreadWillResume();
+ m_stop_exception.Clear();
+}
+
+DNBBreakpoint *
+MachThread::CurrentBreakpoint()
+{
+ return m_process->Breakpoints().FindByAddress(GetPC());
+}
+
+bool
+MachThread::ShouldStop(bool &step_more)
+{
+ // See if this thread is at a breakpoint?
+ DNBBreakpoint *bp = CurrentBreakpoint();
+
+ if (bp)
+ {
+ // This thread is sitting at a breakpoint, ask the breakpoint
+ // if we should be stopping here.
+ return true;
+ }
+ else
+ {
+ if (m_arch_ap->StepNotComplete())
+ {
+ step_more = true;
+ return false;
+ }
+ // The thread state is used to let us know what the thread was
+ // trying to do. MachThread::ThreadWillResume() will set the
+ // thread state to various values depending if the thread was
+ // the current thread and if it was to be single stepped, or
+ // resumed.
+ if (GetState() == eStateRunning)
+ {
+ // If our state is running, then we should continue as we are in
+ // the process of stepping over a breakpoint.
+ return false;
+ }
+ else
+ {
+ // Stop if we have any kind of valid exception for this
+ // thread.
+ if (GetStopException().IsValid())
+ return true;
+ }
+ }
+ return false;
+}
+bool
+MachThread::IsStepping()
+{
+ return GetState() == eStateStepping;
+}
+
+
+bool
+MachThread::ThreadDidStop()
+{
+ // This thread has existed prior to resuming under debug nub control,
+ // and has just been stopped. Do any cleanup that needs to be done
+ // after running.
+
+ // The thread state and breakpoint will still have the same values
+ // as they had prior to resuming the thread, so it makes it easy to check
+ // if we were trying to step a thread, or we tried to resume while being
+ // at a breakpoint.
+
+ // When this method gets called, the process state is still in the
+ // state it was in while running so we can act accordingly.
+ m_arch_ap->ThreadDidStop();
+
+
+ // We may have suspended this thread so the primary thread could step
+ // without worrying about race conditions, so lets restore our suspend
+ // count.
+ RestoreSuspendCountAfterStop();
+
+ // Update the basic information for a thread
+ MachThread::GetBasicInfo(m_mach_port_number, &m_basic_info);
+
+ if (m_basic_info.suspend_count > 0)
+ SetState(eStateSuspended);
+ else
+ SetState(eStateStopped);
+ return true;
+}
+
+bool
+MachThread::NotifyException(MachException::Data& exc)
+{
+ // Allow the arch specific protocol to process (MachException::Data &)exc
+ // first before possible reassignment of m_stop_exception with exc.
+ // See also MachThread::GetStopException().
+ bool handled = m_arch_ap->NotifyException(exc);
+
+ if (m_stop_exception.IsValid())
+ {
+ // We may have more than one exception for a thread, but we need to
+ // only remember the one that we will say is the reason we stopped.
+ // We may have been single stepping and also gotten a signal exception,
+ // so just remember the most pertinent one.
+ if (m_stop_exception.IsBreakpoint())
+ m_stop_exception = exc;
+ }
+ else
+ {
+ m_stop_exception = exc;
+ }
+
+ return handled;
+}
+
+
+nub_state_t
+MachThread::GetState()
+{
+ // If any other threads access this we will need a mutex for it
+ PTHREAD_MUTEX_LOCKER (locker, m_state_mutex);
+ return m_state;
+}
+
+void
+MachThread::SetState(nub_state_t state)
+{
+ PTHREAD_MUTEX_LOCKER (locker, m_state_mutex);
+ m_state = state;
+ DNBLogThreadedIf(LOG_THREAD, "MachThread::SetState ( %s ) for tid = 0x%8.8" PRIx64 "", DNBStateAsString(state), m_unique_id);
+}
+
+nub_size_t
+MachThread::GetNumRegistersInSet(nub_size_t regSet) const
+{
+ if (regSet < m_num_reg_sets)
+ return m_reg_sets[regSet].num_registers;
+ return 0;
+}
+
+const char *
+MachThread::GetRegisterSetName(nub_size_t regSet) const
+{
+ if (regSet < m_num_reg_sets)
+ return m_reg_sets[regSet].name;
+ return NULL;
+}
+
+const DNBRegisterInfo *
+MachThread::GetRegisterInfo(nub_size_t regSet, nub_size_t regIndex) const
+{
+ if (regSet < m_num_reg_sets)
+ if (regIndex < m_reg_sets[regSet].num_registers)
+ return &m_reg_sets[regSet].registers[regIndex];
+ return NULL;
+}
+void
+MachThread::DumpRegisterState(nub_size_t regSet)
+{
+ if (regSet == REGISTER_SET_ALL)
+ {
+ for (regSet = 1; regSet < m_num_reg_sets; regSet++)
+ DumpRegisterState(regSet);
+ }
+ else
+ {
+ if (m_arch_ap->RegisterSetStateIsValid((int)regSet))
+ {
+ const size_t numRegisters = GetNumRegistersInSet(regSet);
+ uint32_t regIndex = 0;
+ DNBRegisterValueClass reg;
+ for (regIndex = 0; regIndex < numRegisters; ++regIndex)
+ {
+ if (m_arch_ap->GetRegisterValue((uint32_t)regSet, regIndex, &reg))
+ {
+ reg.Dump(NULL, NULL);
+ }
+ }
+ }
+ else
+ {
+ DNBLog("%s: registers are not currently valid.", GetRegisterSetName(regSet));
+ }
+ }
+}
+
+const DNBRegisterSetInfo *
+MachThread::GetRegisterSetInfo(nub_size_t *num_reg_sets ) const
+{
+ *num_reg_sets = m_num_reg_sets;
+ return &m_reg_sets[0];
+}
+
+bool
+MachThread::GetRegisterValue ( uint32_t set, uint32_t reg, DNBRegisterValue *value )
+{
+ return m_arch_ap->GetRegisterValue(set, reg, value);
+}
+
+bool
+MachThread::SetRegisterValue ( uint32_t set, uint32_t reg, const DNBRegisterValue *value )
+{
+ return m_arch_ap->SetRegisterValue(set, reg, value);
+}
+
+nub_size_t
+MachThread::GetRegisterContext (void *buf, nub_size_t buf_len)
+{
+ return m_arch_ap->GetRegisterContext(buf, buf_len);
+}
+
+nub_size_t
+MachThread::SetRegisterContext (const void *buf, nub_size_t buf_len)
+{
+ return m_arch_ap->SetRegisterContext(buf, buf_len);
+}
+
+uint32_t
+MachThread::SaveRegisterState ()
+{
+ return m_arch_ap->SaveRegisterState();
+
+}
+bool
+MachThread::RestoreRegisterState (uint32_t save_id)
+{
+ return m_arch_ap->RestoreRegisterState(save_id);
+}
+
+uint32_t
+MachThread::EnableHardwareBreakpoint (const DNBBreakpoint *bp)
+{
+ if (bp != NULL && bp->IsBreakpoint())
+ return m_arch_ap->EnableHardwareBreakpoint(bp->Address(), bp->ByteSize());
+ return INVALID_NUB_HW_INDEX;
+}
+
+uint32_t
+MachThread::EnableHardwareWatchpoint (const DNBBreakpoint *wp, bool also_set_on_task)
+{
+ if (wp != NULL && wp->IsWatchpoint())
+ return m_arch_ap->EnableHardwareWatchpoint(wp->Address(), wp->ByteSize(), wp->WatchpointRead(), wp->WatchpointWrite(), also_set_on_task);
+ return INVALID_NUB_HW_INDEX;
+}
+
+bool
+MachThread::RollbackTransForHWP()
+{
+ return m_arch_ap->RollbackTransForHWP();
+}
+
+bool
+MachThread::FinishTransForHWP()
+{
+ return m_arch_ap->FinishTransForHWP();
+}
+
+bool
+MachThread::DisableHardwareBreakpoint (const DNBBreakpoint *bp)
+{
+ if (bp != NULL && bp->IsHardware())
+ return m_arch_ap->DisableHardwareBreakpoint(bp->GetHardwareIndex());
+ return false;
+}
+
+bool
+MachThread::DisableHardwareWatchpoint (const DNBBreakpoint *wp, bool also_set_on_task)
+{
+ if (wp != NULL && wp->IsHardware())
+ return m_arch_ap->DisableHardwareWatchpoint(wp->GetHardwareIndex(), also_set_on_task);
+ return false;
+}
+
+uint32_t
+MachThread::NumSupportedHardwareWatchpoints () const
+{
+ return m_arch_ap->NumSupportedHardwareWatchpoints();
+}
+
+bool
+MachThread::GetIdentifierInfo ()
+{
+ // Don't try to get the thread info once and cache it for the life of the thread. It changes over time, for instance
+ // if the thread name changes, then the thread_handle also changes... So you have to refetch it every time.
+ mach_msg_type_number_t count = THREAD_IDENTIFIER_INFO_COUNT;
+ kern_return_t kret = ::thread_info (m_mach_port_number, THREAD_IDENTIFIER_INFO, (thread_info_t) &m_ident_info, &count);
+ return kret == KERN_SUCCESS;
+
+ return false;
+}
+
+
+const char *
+MachThread::GetName ()
+{
+ if (GetIdentifierInfo ())
+ {
+ int len = ::proc_pidinfo (m_process->ProcessID(), PROC_PIDTHREADINFO, m_ident_info.thread_handle, &m_proc_threadinfo, sizeof (m_proc_threadinfo));
+
+ if (len && m_proc_threadinfo.pth_name[0])
+ return m_proc_threadinfo.pth_name;
+ }
+ return NULL;
+}
+
+
+uint64_t
+MachThread::GetGloballyUniqueThreadIDForMachPortID (thread_t mach_port_id)
+{
+ kern_return_t kr;
+ thread_identifier_info_data_t tident;
+ mach_msg_type_number_t tident_count = THREAD_IDENTIFIER_INFO_COUNT;
+ kr = thread_info (mach_port_id, THREAD_IDENTIFIER_INFO,
+ (thread_info_t) &tident, &tident_count);
+ if (kr != KERN_SUCCESS)
+ {
+ return mach_port_id;
+ }
+ return tident.thread_id;
+}
+
+nub_addr_t
+MachThread::GetPThreadT ()
+{
+ nub_addr_t pthread_t_value = INVALID_NUB_ADDRESS;
+ if (MachPortNumberIsValid (m_mach_port_number))
+ {
+ kern_return_t kr;
+ thread_identifier_info_data_t tident;
+ mach_msg_type_number_t tident_count = THREAD_IDENTIFIER_INFO_COUNT;
+ kr = thread_info (m_mach_port_number, THREAD_IDENTIFIER_INFO,
+ (thread_info_t) &tident, &tident_count);
+ if (kr == KERN_SUCCESS)
+ {
+ // Dereference thread_handle to get the pthread_t value for this thread.
+ if (m_is_64_bit)
+ {
+ uint64_t addr;
+ if (m_process->ReadMemory (tident.thread_handle, 8, &addr) == 8)
+ {
+ if (addr != 0)
+ {
+ pthread_t_value = addr;
+ }
+ }
+ }
+ else
+ {
+ uint32_t addr;
+ if (m_process->ReadMemory (tident.thread_handle, 4, &addr) == 4)
+ {
+ if (addr != 0)
+ {
+ pthread_t_value = addr;
+ }
+ }
+ }
+ }
+ }
+ return pthread_t_value;
+}
+
+// Return this thread's TSD (Thread Specific Data) address.
+// This is computed based on this thread's pthread_t value.
+//
+// We compute the TSD from the pthread_t by one of two methods.
+//
+// If plo_pthread_tsd_base_offset is non-zero, this is a simple offset that we add to
+// the pthread_t to get the TSD base address.
+//
+// Else we read a pointer from memory at pthread_t + plo_pthread_tsd_base_address_offset and
+// that gives us the TSD address.
+//
+// These plo_pthread_tsd_base values must be read out of libpthread by lldb & provided to debugserver.
+
+nub_addr_t
+MachThread::GetTSDAddressForThread (uint64_t plo_pthread_tsd_base_address_offset, uint64_t plo_pthread_tsd_base_offset, uint64_t plo_pthread_tsd_entry_size)
+{
+ nub_addr_t tsd_addr = INVALID_NUB_ADDRESS;
+ nub_addr_t pthread_t_value = GetPThreadT();
+ if (plo_pthread_tsd_base_offset != 0 && plo_pthread_tsd_base_offset != INVALID_NUB_ADDRESS)
+ {
+ tsd_addr = pthread_t_value + plo_pthread_tsd_base_offset;
+ }
+ else
+ {
+ if (plo_pthread_tsd_entry_size == 4)
+ {
+ uint32_t addr = 0;
+ if (m_process->ReadMemory (pthread_t_value + plo_pthread_tsd_base_address_offset, 4, &addr) == 4)
+ {
+ if (addr != 0)
+ {
+ tsd_addr = addr;
+ }
+ }
+ }
+ if (plo_pthread_tsd_entry_size == 4)
+ {
+ uint64_t addr = 0;
+ if (m_process->ReadMemory (pthread_t_value + plo_pthread_tsd_base_address_offset, 8, &addr) == 8)
+ {
+ if (addr != 0)
+ {
+ tsd_addr = addr;
+ }
+ }
+ }
+ }
+ return tsd_addr;
+}
+
+
+nub_addr_t
+MachThread::GetDispatchQueueT ()
+{
+ nub_addr_t dispatch_queue_t_value = INVALID_NUB_ADDRESS;
+ if (MachPortNumberIsValid (m_mach_port_number))
+ {
+ kern_return_t kr;
+ thread_identifier_info_data_t tident;
+ mach_msg_type_number_t tident_count = THREAD_IDENTIFIER_INFO_COUNT;
+ kr = thread_info (m_mach_port_number, THREAD_IDENTIFIER_INFO,
+ (thread_info_t) &tident, &tident_count);
+ if (kr == KERN_SUCCESS && tident.dispatch_qaddr != 0 && tident.dispatch_qaddr != INVALID_NUB_ADDRESS)
+ {
+ // Dereference dispatch_qaddr to get the dispatch_queue_t value for this thread's queue, if any.
+ if (m_is_64_bit)
+ {
+ uint64_t addr;
+ if (m_process->ReadMemory (tident.dispatch_qaddr, 8, &addr) == 8)
+ {
+ if (addr != 0)
+ dispatch_queue_t_value = addr;
+ }
+ }
+ else
+ {
+ uint32_t addr;
+ if (m_process->ReadMemory (tident.dispatch_qaddr, 4, &addr) == 4)
+ {
+ if (addr != 0)
+ dispatch_queue_t_value = addr;
+ }
+ }
+ }
+ }
+ return dispatch_queue_t_value;
+}
+
+
+ThreadInfo::QoS
+MachThread::GetRequestedQoS (nub_addr_t tsd, uint64_t dti_qos_class_index)
+{
+ ThreadInfo::QoS qos_value;
+ if (MachPortNumberIsValid (m_mach_port_number) && m_pthread_qos_class_decode != nullptr)
+ {
+ uint64_t pthread_priority_value = 0;
+ if (m_is_64_bit)
+ {
+ uint64_t pri;
+ if (m_process->ReadMemory (tsd + (dti_qos_class_index * 8), 8, &pri) == 8)
+ {
+ pthread_priority_value = pri;
+ }
+ }
+ else
+ {
+ uint32_t pri;
+ if (m_process->ReadMemory (tsd + (dti_qos_class_index * 4), 4, &pri) == 4)
+ {
+ pthread_priority_value = pri;
+ }
+ }
+
+ uint32_t requested_qos = m_pthread_qos_class_decode (pthread_priority_value, NULL, NULL);
+
+ switch (requested_qos)
+ {
+ // These constants from <pthread/qos.h>
+ case 0x21:
+ qos_value.enum_value = requested_qos;
+ qos_value.constant_name = "QOS_CLASS_USER_INTERACTIVE";
+ qos_value.printable_name = "User Interactive";
+ break;
+ case 0x19:
+ qos_value.enum_value = requested_qos;
+ qos_value.constant_name = "QOS_CLASS_USER_INITIATED";
+ qos_value.printable_name = "User Initiated";
+ break;
+ case 0x15:
+ qos_value.enum_value = requested_qos;
+ qos_value.constant_name = "QOS_CLASS_DEFAULT";
+ qos_value.printable_name = "Default";
+ break;
+ case 0x11:
+ qos_value.enum_value = requested_qos;
+ qos_value.constant_name = "QOS_CLASS_UTILITY";
+ qos_value.printable_name = "Utility";
+ break;
+ case 0x09:
+ qos_value.enum_value = requested_qos;
+ qos_value.constant_name = "QOS_CLASS_BACKGROUND";
+ qos_value.printable_name = "Background";
+ break;
+ case 0x00:
+ qos_value.enum_value = requested_qos;
+ qos_value.constant_name = "QOS_CLASS_UNSPECIFIED";
+ qos_value.printable_name = "Unspecified";
+ break;
+ }
+ }
+ return qos_value;
+}
diff --git a/tools/debugserver/source/MacOSX/MachThread.h b/tools/debugserver/source/MacOSX/MachThread.h
new file mode 100644
index 000000000000..a2a318172588
--- /dev/null
+++ b/tools/debugserver/source/MacOSX/MachThread.h
@@ -0,0 +1,159 @@
+//===-- MachThread.h --------------------------------------------*- C++ -*-===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// Created by Greg Clayton on 6/19/07.
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef __MachThread_h__
+#define __MachThread_h__
+
+#include <string>
+#include <vector>
+
+#include <libproc.h>
+#include <mach/mach.h>
+#include <pthread.h>
+#include <sys/signal.h>
+
+#include "PThreadCondition.h"
+#include "PThreadMutex.h"
+#include "MachException.h"
+#include "DNBArch.h"
+#include "DNBRegisterInfo.h"
+
+#include "ThreadInfo.h"
+
+class DNBBreakpoint;
+class MachProcess;
+class MachThreadList;
+
+class MachThread
+{
+public:
+
+ MachThread (MachProcess *process, bool is_64_bit, uint64_t unique_thread_id = 0, thread_t mach_port_number = 0);
+ ~MachThread ();
+
+ MachProcess * Process() { return m_process; }
+ const MachProcess *
+ Process() const { return m_process; }
+ nub_process_t ProcessID() const;
+ void Dump(uint32_t index);
+ uint64_t ThreadID() const { return m_unique_id; }
+ thread_t MachPortNumber() const { return m_mach_port_number; }
+ thread_t InferiorThreadID() const;
+
+ uint32_t SequenceID() const { return m_seq_id; }
+ static bool ThreadIDIsValid(uint64_t thread); // The 64-bit system-wide unique thread identifier
+ static bool MachPortNumberIsValid(thread_t thread); // The mach port # for this thread in debugserver namespace
+ void Resume(bool others_stopped);
+ void Suspend();
+ bool SetSuspendCountBeforeResume(bool others_stopped);
+ bool RestoreSuspendCountAfterStop();
+
+ bool GetRegisterState(int flavor, bool force);
+ bool SetRegisterState(int flavor);
+ uint64_t GetPC(uint64_t failValue = INVALID_NUB_ADDRESS); // Get program counter
+ bool SetPC(uint64_t value); // Set program counter
+ uint64_t GetSP(uint64_t failValue = INVALID_NUB_ADDRESS); // Get stack pointer
+
+ DNBBreakpoint * CurrentBreakpoint();
+ uint32_t EnableHardwareBreakpoint (const DNBBreakpoint *breakpoint);
+ uint32_t EnableHardwareWatchpoint (const DNBBreakpoint *watchpoint, bool also_set_on_task);
+ bool DisableHardwareBreakpoint (const DNBBreakpoint *breakpoint);
+ bool DisableHardwareWatchpoint (const DNBBreakpoint *watchpoint, bool also_set_on_task);
+ uint32_t NumSupportedHardwareWatchpoints () const;
+ bool RollbackTransForHWP();
+ bool FinishTransForHWP();
+
+ nub_state_t GetState();
+ void SetState(nub_state_t state);
+
+ void ThreadWillResume (const DNBThreadResumeAction *thread_action, bool others_stopped = false);
+ bool ShouldStop(bool &step_more);
+ bool IsStepping();
+ bool ThreadDidStop();
+ bool NotifyException(MachException::Data& exc);
+ const MachException::Data& GetStopException() { return m_stop_exception; }
+
+ nub_size_t GetNumRegistersInSet(nub_size_t regSet) const;
+ const char * GetRegisterSetName(nub_size_t regSet) const;
+ const DNBRegisterInfo *
+ GetRegisterInfo(nub_size_t regSet, nub_size_t regIndex) const;
+ void DumpRegisterState(nub_size_t regSet);
+ const DNBRegisterSetInfo *
+ GetRegisterSetInfo(nub_size_t *num_reg_sets ) const;
+ bool GetRegisterValue ( uint32_t reg_set_idx, uint32_t reg_idx, DNBRegisterValue *reg_value );
+ bool SetRegisterValue ( uint32_t reg_set_idx, uint32_t reg_idx, const DNBRegisterValue *reg_value );
+ nub_size_t GetRegisterContext (void *buf, nub_size_t buf_len);
+ nub_size_t SetRegisterContext (const void *buf, nub_size_t buf_len);
+ uint32_t SaveRegisterState ();
+ bool RestoreRegisterState (uint32_t save_id);
+
+ void NotifyBreakpointChanged (const DNBBreakpoint *bp)
+ {
+ }
+
+ bool IsUserReady();
+ struct thread_basic_info *
+ GetBasicInfo ();
+ const char * GetBasicInfoAsString () const;
+ const char * GetName ();
+
+ DNBArchProtocol*
+ GetArchProtocol()
+ {
+ return m_arch_ap.get();
+ }
+
+ ThreadInfo::QoS GetRequestedQoS (nub_addr_t tsd, uint64_t dti_qos_class_index);
+ nub_addr_t GetPThreadT();
+ nub_addr_t GetDispatchQueueT();
+ nub_addr_t GetTSDAddressForThread (uint64_t plo_pthread_tsd_base_address_offset, uint64_t plo_pthread_tsd_base_offset, uint64_t plo_pthread_tsd_entry_size);
+
+ static uint64_t GetGloballyUniqueThreadIDForMachPortID (thread_t mach_port_id);
+
+protected:
+ static bool GetBasicInfo(thread_t threadID, struct thread_basic_info *basic_info);
+
+ bool
+ GetIdentifierInfo ();
+
+// const char *
+// GetDispatchQueueName();
+//
+ MachProcess * m_process; // The process that owns this thread
+ uint64_t m_unique_id; // The globally unique ID for this thread (nub_thread_t)
+ thread_t m_mach_port_number; // The mach port # for this thread in debugserver namesp.
+ uint32_t m_seq_id; // A Sequential ID that increments with each new thread
+ nub_state_t m_state; // The state of our process
+ PThreadMutex m_state_mutex; // Multithreaded protection for m_state
+ struct thread_basic_info m_basic_info; // Basic information for a thread used to see if a thread is valid
+ int32_t m_suspend_count; // The current suspend count > 0 means we have suspended m_suspendCount times,
+ // < 0 means we have resumed it m_suspendCount times.
+ MachException::Data m_stop_exception; // The best exception that describes why this thread is stopped
+ std::unique_ptr<DNBArchProtocol> m_arch_ap; // Arch specific information for register state and more
+ const DNBRegisterSetInfo * m_reg_sets; // Register set information for this thread
+ nub_size_t m_num_reg_sets;
+ thread_identifier_info_data_t m_ident_info;
+ struct proc_threadinfo m_proc_threadinfo;
+ std::string m_dispatch_queue_name;
+ bool m_is_64_bit;
+
+ // qos_class_t _pthread_qos_class_decode(pthread_priority_t priority, int *, unsigned long *);
+ unsigned int (*m_pthread_qos_class_decode) (unsigned long priority, int*, unsigned long *);
+
+private:
+ friend class MachThreadList;
+};
+
+typedef std::shared_ptr<MachThread> MachThreadSP;
+
+#endif
diff --git a/tools/debugserver/source/MacOSX/MachThreadList.cpp b/tools/debugserver/source/MacOSX/MachThreadList.cpp
new file mode 100644
index 000000000000..8a7da6f4531e
--- /dev/null
+++ b/tools/debugserver/source/MacOSX/MachThreadList.cpp
@@ -0,0 +1,668 @@
+//===-- MachThreadList.cpp --------------------------------------*- C++ -*-===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// Created by Greg Clayton on 6/19/07.
+//
+//===----------------------------------------------------------------------===//
+
+#include "MachThreadList.h"
+
+#include <inttypes.h>
+#include <sys/sysctl.h>
+
+#include "DNBLog.h"
+#include "DNBThreadResumeActions.h"
+#include "MachProcess.h"
+
+MachThreadList::MachThreadList() :
+ m_threads(),
+ m_threads_mutex(PTHREAD_MUTEX_RECURSIVE),
+ m_is_64_bit(false)
+{
+}
+
+MachThreadList::~MachThreadList()
+{
+}
+
+nub_state_t
+MachThreadList::GetState(nub_thread_t tid)
+{
+ MachThreadSP thread_sp (GetThreadByID (tid));
+ if (thread_sp)
+ return thread_sp->GetState();
+ return eStateInvalid;
+}
+
+const char *
+MachThreadList::GetName (nub_thread_t tid)
+{
+ MachThreadSP thread_sp (GetThreadByID (tid));
+ if (thread_sp)
+ return thread_sp->GetName();
+ return NULL;
+}
+
+ThreadInfo::QoS
+MachThreadList::GetRequestedQoS (nub_thread_t tid, nub_addr_t tsd, uint64_t dti_qos_class_index)
+{
+ MachThreadSP thread_sp (GetThreadByID (tid));
+ if (thread_sp)
+ return thread_sp->GetRequestedQoS(tsd, dti_qos_class_index);
+ return ThreadInfo::QoS();
+}
+
+nub_addr_t
+MachThreadList::GetPThreadT (nub_thread_t tid)
+{
+ MachThreadSP thread_sp (GetThreadByID (tid));
+ if (thread_sp)
+ return thread_sp->GetPThreadT();
+ return INVALID_NUB_ADDRESS;
+}
+
+nub_addr_t
+MachThreadList::GetDispatchQueueT (nub_thread_t tid)
+{
+ MachThreadSP thread_sp (GetThreadByID (tid));
+ if (thread_sp)
+ return thread_sp->GetDispatchQueueT();
+ return INVALID_NUB_ADDRESS;
+}
+
+nub_addr_t
+MachThreadList::GetTSDAddressForThread (nub_thread_t tid, uint64_t plo_pthread_tsd_base_address_offset, uint64_t plo_pthread_tsd_base_offset, uint64_t plo_pthread_tsd_entry_size)
+{
+ MachThreadSP thread_sp (GetThreadByID (tid));
+ if (thread_sp)
+ return thread_sp->GetTSDAddressForThread(plo_pthread_tsd_base_address_offset, plo_pthread_tsd_base_offset, plo_pthread_tsd_entry_size);
+ return INVALID_NUB_ADDRESS;
+}
+
+nub_thread_t
+MachThreadList::SetCurrentThread(nub_thread_t tid)
+{
+ MachThreadSP thread_sp (GetThreadByID (tid));
+ if (thread_sp)
+ {
+ m_current_thread = thread_sp;
+ return tid;
+ }
+ return INVALID_NUB_THREAD;
+}
+
+
+bool
+MachThreadList::GetThreadStoppedReason(nub_thread_t tid, struct DNBThreadStopInfo *stop_info) const
+{
+ MachThreadSP thread_sp (GetThreadByID (tid));
+ if (thread_sp)
+ return thread_sp->GetStopException().GetStopInfo(stop_info);
+ return false;
+}
+
+bool
+MachThreadList::GetIdentifierInfo (nub_thread_t tid, thread_identifier_info_data_t *ident_info)
+{
+ thread_t mach_port_number = GetMachPortNumberByThreadID (tid);
+
+ mach_msg_type_number_t count = THREAD_IDENTIFIER_INFO_COUNT;
+ return ::thread_info (mach_port_number, THREAD_IDENTIFIER_INFO, (thread_info_t)ident_info, &count) == KERN_SUCCESS;
+}
+
+void
+MachThreadList::DumpThreadStoppedReason (nub_thread_t tid) const
+{
+ MachThreadSP thread_sp (GetThreadByID (tid));
+ if (thread_sp)
+ thread_sp->GetStopException().DumpStopReason();
+}
+
+const char *
+MachThreadList::GetThreadInfo (nub_thread_t tid) const
+{
+ MachThreadSP thread_sp (GetThreadByID (tid));
+ if (thread_sp)
+ return thread_sp->GetBasicInfoAsString();
+ return NULL;
+}
+
+MachThreadSP
+MachThreadList::GetThreadByID (nub_thread_t tid) const
+{
+ PTHREAD_MUTEX_LOCKER (locker, m_threads_mutex);
+ MachThreadSP thread_sp;
+ const size_t num_threads = m_threads.size();
+ for (size_t idx = 0; idx < num_threads; ++idx)
+ {
+ if (m_threads[idx]->ThreadID() == tid)
+ {
+ thread_sp = m_threads[idx];
+ break;
+ }
+ }
+ return thread_sp;
+}
+
+MachThreadSP
+MachThreadList::GetThreadByMachPortNumber (thread_t mach_port_number) const
+{
+ PTHREAD_MUTEX_LOCKER (locker, m_threads_mutex);
+ MachThreadSP thread_sp;
+ const size_t num_threads = m_threads.size();
+ for (size_t idx = 0; idx < num_threads; ++idx)
+ {
+ if (m_threads[idx]->MachPortNumber() == mach_port_number)
+ {
+ thread_sp = m_threads[idx];
+ break;
+ }
+ }
+ return thread_sp;
+}
+
+nub_thread_t
+MachThreadList::GetThreadIDByMachPortNumber (thread_t mach_port_number) const
+{
+ PTHREAD_MUTEX_LOCKER (locker, m_threads_mutex);
+ MachThreadSP thread_sp;
+ const size_t num_threads = m_threads.size();
+ for (size_t idx = 0; idx < num_threads; ++idx)
+ {
+ if (m_threads[idx]->MachPortNumber() == mach_port_number)
+ {
+ return m_threads[idx]->ThreadID();
+ }
+ }
+ return INVALID_NUB_THREAD;
+}
+
+thread_t
+MachThreadList::GetMachPortNumberByThreadID (nub_thread_t globally_unique_id) const
+{
+ PTHREAD_MUTEX_LOCKER (locker, m_threads_mutex);
+ MachThreadSP thread_sp;
+ const size_t num_threads = m_threads.size();
+ for (size_t idx = 0; idx < num_threads; ++idx)
+ {
+ if (m_threads[idx]->ThreadID() == globally_unique_id)
+ {
+ return m_threads[idx]->MachPortNumber();
+ }
+ }
+ return 0;
+}
+
+bool
+MachThreadList::GetRegisterValue (nub_thread_t tid, uint32_t set, uint32_t reg, DNBRegisterValue *reg_value ) const
+{
+ MachThreadSP thread_sp (GetThreadByID (tid));
+ if (thread_sp)
+ return thread_sp->GetRegisterValue(set, reg, reg_value);
+
+ return false;
+}
+
+bool
+MachThreadList::SetRegisterValue (nub_thread_t tid, uint32_t set, uint32_t reg, const DNBRegisterValue *reg_value ) const
+{
+ MachThreadSP thread_sp (GetThreadByID (tid));
+ if (thread_sp)
+ return thread_sp->SetRegisterValue(set, reg, reg_value);
+
+ return false;
+}
+
+nub_size_t
+MachThreadList::GetRegisterContext (nub_thread_t tid, void *buf, size_t buf_len)
+{
+ MachThreadSP thread_sp (GetThreadByID (tid));
+ if (thread_sp)
+ return thread_sp->GetRegisterContext (buf, buf_len);
+ return 0;
+}
+
+nub_size_t
+MachThreadList::SetRegisterContext (nub_thread_t tid, const void *buf, size_t buf_len)
+{
+ MachThreadSP thread_sp (GetThreadByID (tid));
+ if (thread_sp)
+ return thread_sp->SetRegisterContext (buf, buf_len);
+ return 0;
+}
+
+uint32_t
+MachThreadList::SaveRegisterState (nub_thread_t tid)
+{
+ MachThreadSP thread_sp (GetThreadByID (tid));
+ if (thread_sp)
+ return thread_sp->SaveRegisterState ();
+ return 0;
+}
+
+bool
+MachThreadList::RestoreRegisterState (nub_thread_t tid, uint32_t save_id)
+{
+ MachThreadSP thread_sp (GetThreadByID (tid));
+ if (thread_sp)
+ return thread_sp->RestoreRegisterState (save_id);
+ return 0;
+}
+
+
+nub_size_t
+MachThreadList::NumThreads () const
+{
+ PTHREAD_MUTEX_LOCKER (locker, m_threads_mutex);
+ return m_threads.size();
+}
+
+nub_thread_t
+MachThreadList::ThreadIDAtIndex (nub_size_t idx) const
+{
+ PTHREAD_MUTEX_LOCKER (locker, m_threads_mutex);
+ if (idx < m_threads.size())
+ return m_threads[idx]->ThreadID();
+ return INVALID_NUB_THREAD;
+}
+
+nub_thread_t
+MachThreadList::CurrentThreadID ( )
+{
+ MachThreadSP thread_sp;
+ CurrentThread(thread_sp);
+ if (thread_sp.get())
+ return thread_sp->ThreadID();
+ return INVALID_NUB_THREAD;
+}
+
+bool
+MachThreadList::NotifyException(MachException::Data& exc)
+{
+ MachThreadSP thread_sp (GetThreadByMachPortNumber (exc.thread_port));
+ if (thread_sp)
+ {
+ thread_sp->NotifyException(exc);
+ return true;
+ }
+ return false;
+}
+
+void
+MachThreadList::Clear()
+{
+ PTHREAD_MUTEX_LOCKER (locker, m_threads_mutex);
+ m_threads.clear();
+}
+
+uint32_t
+MachThreadList::UpdateThreadList(MachProcess *process, bool update, MachThreadList::collection *new_threads)
+{
+ // locker will keep a mutex locked until it goes out of scope
+ DNBLogThreadedIf (LOG_THREAD, "MachThreadList::UpdateThreadList (pid = %4.4x, update = %u) process stop count = %u", process->ProcessID(), update, process->StopCount());
+ PTHREAD_MUTEX_LOCKER (locker, m_threads_mutex);
+
+ if (process->StopCount() == 0)
+ {
+ int mib[4] = { CTL_KERN, KERN_PROC, KERN_PROC_PID, process->ProcessID() };
+ struct kinfo_proc processInfo;
+ size_t bufsize = sizeof(processInfo);
+ if (sysctl(mib, (unsigned)(sizeof(mib)/sizeof(int)), &processInfo, &bufsize, NULL, 0) == 0 && bufsize > 0)
+ {
+ if (processInfo.kp_proc.p_flag & P_LP64)
+ m_is_64_bit = true;
+ }
+#if defined (__i386__) || defined (__x86_64__)
+ if (m_is_64_bit)
+ DNBArchProtocol::SetArchitecture(CPU_TYPE_X86_64);
+ else
+ DNBArchProtocol::SetArchitecture(CPU_TYPE_I386);
+#elif defined (__arm__) || defined (__arm64__) || defined (__aarch64__)
+ if (m_is_64_bit)
+ DNBArchProtocol::SetArchitecture(CPU_TYPE_ARM64);
+ else
+ DNBArchProtocol::SetArchitecture(CPU_TYPE_ARM);
+#endif
+ }
+
+ if (m_threads.empty() || update)
+ {
+ thread_array_t thread_list = NULL;
+ mach_msg_type_number_t thread_list_count = 0;
+ task_t task = process->Task().TaskPort();
+ DNBError err(::task_threads (task, &thread_list, &thread_list_count), DNBError::MachKernel);
+
+ if (DNBLogCheckLogBit(LOG_THREAD) || err.Fail())
+ err.LogThreaded("::task_threads ( task = 0x%4.4x, thread_list => %p, thread_list_count => %u )", task, thread_list, thread_list_count);
+
+ if (err.Error() == KERN_SUCCESS && thread_list_count > 0)
+ {
+ MachThreadList::collection currThreads;
+ size_t idx;
+ // Iterator through the current thread list and see which threads
+ // we already have in our list (keep them), which ones we don't
+ // (add them), and which ones are not around anymore (remove them).
+ for (idx = 0; idx < thread_list_count; ++idx)
+ {
+ const thread_t mach_port_num = thread_list[idx];
+
+ uint64_t unique_thread_id = MachThread::GetGloballyUniqueThreadIDForMachPortID (mach_port_num);
+ MachThreadSP thread_sp (GetThreadByID (unique_thread_id));
+ if (thread_sp)
+ {
+ // Keep the existing thread class
+ currThreads.push_back(thread_sp);
+ }
+ else
+ {
+ // We don't have this thread, lets add it.
+ thread_sp.reset(new MachThread(process, m_is_64_bit, unique_thread_id, mach_port_num));
+
+ // Add the new thread regardless of its is user ready state...
+ // Make sure the thread is ready to be displayed and shown to users
+ // before we add this thread to our list...
+ if (thread_sp->IsUserReady())
+ {
+ if (new_threads)
+ new_threads->push_back(thread_sp);
+
+ currThreads.push_back(thread_sp);
+ }
+ }
+ }
+
+ m_threads.swap(currThreads);
+ m_current_thread.reset();
+
+ // Free the vm memory given to us by ::task_threads()
+ vm_size_t thread_list_size = (vm_size_t) (thread_list_count * sizeof (thread_t));
+ ::vm_deallocate (::mach_task_self(),
+ (vm_address_t)thread_list,
+ thread_list_size);
+ }
+ }
+ return static_cast<uint32_t>(m_threads.size());
+}
+
+
+void
+MachThreadList::CurrentThread (MachThreadSP& thread_sp)
+{
+ // locker will keep a mutex locked until it goes out of scope
+ PTHREAD_MUTEX_LOCKER (locker, m_threads_mutex);
+ if (m_current_thread.get() == NULL)
+ {
+ // Figure out which thread is going to be our current thread.
+ // This is currently done by finding the first thread in the list
+ // that has a valid exception.
+ const size_t num_threads = m_threads.size();
+ for (uint32_t idx = 0; idx < num_threads; ++idx)
+ {
+ if (m_threads[idx]->GetStopException().IsValid())
+ {
+ m_current_thread = m_threads[idx];
+ break;
+ }
+ }
+ }
+ thread_sp = m_current_thread;
+}
+
+void
+MachThreadList::Dump() const
+{
+ PTHREAD_MUTEX_LOCKER (locker, m_threads_mutex);
+ const size_t num_threads = m_threads.size();
+ for (uint32_t idx = 0; idx < num_threads; ++idx)
+ {
+ m_threads[idx]->Dump(idx);
+ }
+}
+
+
+void
+MachThreadList::ProcessWillResume(MachProcess *process, const DNBThreadResumeActions &thread_actions)
+{
+ PTHREAD_MUTEX_LOCKER (locker, m_threads_mutex);
+
+ // Update our thread list, because sometimes libdispatch or the kernel
+ // will spawn threads while a task is suspended.
+ MachThreadList::collection new_threads;
+
+ // First figure out if we were planning on running only one thread, and if so force that thread to resume.
+ bool run_one_thread;
+ nub_thread_t solo_thread = INVALID_NUB_THREAD;
+ if (thread_actions.GetSize() > 0
+ && thread_actions.NumActionsWithState(eStateStepping) + thread_actions.NumActionsWithState (eStateRunning) == 1)
+ {
+ run_one_thread = true;
+ const DNBThreadResumeAction *action_ptr = thread_actions.GetFirst();
+ size_t num_actions = thread_actions.GetSize();
+ for (size_t i = 0; i < num_actions; i++, action_ptr++)
+ {
+ if (action_ptr->state == eStateStepping || action_ptr->state == eStateRunning)
+ {
+ solo_thread = action_ptr->tid;
+ break;
+ }
+ }
+ }
+ else
+ run_one_thread = false;
+
+ UpdateThreadList(process, true, &new_threads);
+
+ DNBThreadResumeAction resume_new_threads = { -1U, eStateRunning, 0, INVALID_NUB_ADDRESS };
+ // If we are planning to run only one thread, any new threads should be suspended.
+ if (run_one_thread)
+ resume_new_threads.state = eStateSuspended;
+
+ const size_t num_new_threads = new_threads.size();
+ const size_t num_threads = m_threads.size();
+ for (uint32_t idx = 0; idx < num_threads; ++idx)
+ {
+ MachThread *thread = m_threads[idx].get();
+ bool handled = false;
+ for (uint32_t new_idx = 0; new_idx < num_new_threads; ++new_idx)
+ {
+ if (thread == new_threads[new_idx].get())
+ {
+ thread->ThreadWillResume(&resume_new_threads);
+ handled = true;
+ break;
+ }
+ }
+
+ if (!handled)
+ {
+ const DNBThreadResumeAction *thread_action = thread_actions.GetActionForThread (thread->ThreadID(), true);
+ // There must always be a thread action for every thread.
+ assert (thread_action);
+ bool others_stopped = false;
+ if (solo_thread == thread->ThreadID())
+ others_stopped = true;
+ thread->ThreadWillResume (thread_action, others_stopped);
+ }
+ }
+
+ if (new_threads.size())
+ {
+ for (uint32_t idx = 0; idx < num_new_threads; ++idx)
+ {
+ DNBLogThreadedIf (LOG_THREAD, "MachThreadList::ProcessWillResume (pid = %4.4x) stop-id=%u, resuming newly discovered thread: 0x%8.8" PRIx64 ", thread-is-user-ready=%i)",
+ process->ProcessID(),
+ process->StopCount(),
+ new_threads[idx]->ThreadID(),
+ new_threads[idx]->IsUserReady());
+ }
+ }
+}
+
+uint32_t
+MachThreadList::ProcessDidStop(MachProcess *process)
+{
+ PTHREAD_MUTEX_LOCKER (locker, m_threads_mutex);
+ // Update our thread list
+ const uint32_t num_threads = UpdateThreadList(process, true);
+ for (uint32_t idx = 0; idx < num_threads; ++idx)
+ {
+ m_threads[idx]->ThreadDidStop();
+ }
+ return num_threads;
+}
+
+//----------------------------------------------------------------------
+// Check each thread in our thread list to see if we should notify our
+// client of the current halt in execution.
+//
+// Breakpoints can have callback functions associated with them than
+// can return true to stop, or false to continue executing the inferior.
+//
+// RETURNS
+// true if we should stop and notify our clients
+// false if we should resume our child process and skip notification
+//----------------------------------------------------------------------
+bool
+MachThreadList::ShouldStop(bool &step_more)
+{
+ PTHREAD_MUTEX_LOCKER (locker, m_threads_mutex);
+ uint32_t should_stop = false;
+ const size_t num_threads = m_threads.size();
+ for (uint32_t idx = 0; !should_stop && idx < num_threads; ++idx)
+ {
+ should_stop = m_threads[idx]->ShouldStop(step_more);
+ }
+ return should_stop;
+}
+
+
+void
+MachThreadList::NotifyBreakpointChanged (const DNBBreakpoint *bp)
+{
+ PTHREAD_MUTEX_LOCKER (locker, m_threads_mutex);
+ const size_t num_threads = m_threads.size();
+ for (uint32_t idx = 0; idx < num_threads; ++idx)
+ {
+ m_threads[idx]->NotifyBreakpointChanged(bp);
+ }
+}
+
+
+uint32_t
+MachThreadList::EnableHardwareBreakpoint (const DNBBreakpoint* bp) const
+{
+ if (bp != NULL)
+ {
+ const size_t num_threads = m_threads.size();
+ for (uint32_t idx = 0; idx < num_threads; ++idx)
+ m_threads[idx]->EnableHardwareBreakpoint(bp);
+ }
+ return INVALID_NUB_HW_INDEX;
+}
+
+bool
+MachThreadList::DisableHardwareBreakpoint (const DNBBreakpoint* bp) const
+{
+ if (bp != NULL)
+ {
+ const size_t num_threads = m_threads.size();
+ for (uint32_t idx = 0; idx < num_threads; ++idx)
+ m_threads[idx]->DisableHardwareBreakpoint(bp);
+ }
+ return false;
+}
+
+// DNBWatchpointSet() -> MachProcess::CreateWatchpoint() -> MachProcess::EnableWatchpoint()
+// -> MachThreadList::EnableHardwareWatchpoint().
+uint32_t
+MachThreadList::EnableHardwareWatchpoint (const DNBBreakpoint* wp) const
+{
+ uint32_t hw_index = INVALID_NUB_HW_INDEX;
+ if (wp != NULL)
+ {
+ PTHREAD_MUTEX_LOCKER (locker, m_threads_mutex);
+ const size_t num_threads = m_threads.size();
+ // On Mac OS X we have to prime the control registers for new threads. We do this
+ // using the control register data for the first thread, for lack of a better way of choosing.
+ bool also_set_on_task = true;
+ for (uint32_t idx = 0; idx < num_threads; ++idx)
+ {
+ if ((hw_index = m_threads[idx]->EnableHardwareWatchpoint(wp, also_set_on_task)) == INVALID_NUB_HW_INDEX)
+ {
+ // We know that idx failed for some reason. Let's rollback the transaction for [0, idx).
+ for (uint32_t i = 0; i < idx; ++i)
+ m_threads[i]->RollbackTransForHWP();
+ return INVALID_NUB_HW_INDEX;
+ }
+ also_set_on_task = false;
+ }
+ // Notify each thread to commit the pending transaction.
+ for (uint32_t idx = 0; idx < num_threads; ++idx)
+ m_threads[idx]->FinishTransForHWP();
+
+ }
+ return hw_index;
+}
+
+bool
+MachThreadList::DisableHardwareWatchpoint (const DNBBreakpoint* wp) const
+{
+ if (wp != NULL)
+ {
+ PTHREAD_MUTEX_LOCKER (locker, m_threads_mutex);
+ const size_t num_threads = m_threads.size();
+
+ // On Mac OS X we have to prime the control registers for new threads. We do this
+ // using the control register data for the first thread, for lack of a better way of choosing.
+ bool also_set_on_task = true;
+ for (uint32_t idx = 0; idx < num_threads; ++idx)
+ {
+ if (!m_threads[idx]->DisableHardwareWatchpoint(wp, also_set_on_task))
+ {
+ // We know that idx failed for some reason. Let's rollback the transaction for [0, idx).
+ for (uint32_t i = 0; i < idx; ++i)
+ m_threads[i]->RollbackTransForHWP();
+ return false;
+ }
+ also_set_on_task = false;
+ }
+ // Notify each thread to commit the pending transaction.
+ for (uint32_t idx = 0; idx < num_threads; ++idx)
+ m_threads[idx]->FinishTransForHWP();
+
+ return true;
+ }
+ return false;
+}
+
+uint32_t
+MachThreadList::NumSupportedHardwareWatchpoints () const
+{
+ PTHREAD_MUTEX_LOCKER (locker, m_threads_mutex);
+ const size_t num_threads = m_threads.size();
+ // Use an arbitrary thread to retrieve the number of supported hardware watchpoints.
+ if (num_threads)
+ return m_threads[0]->NumSupportedHardwareWatchpoints();
+ return 0;
+}
+
+uint32_t
+MachThreadList::GetThreadIndexForThreadStoppedWithSignal (const int signo) const
+{
+ PTHREAD_MUTEX_LOCKER (locker, m_threads_mutex);
+ uint32_t should_stop = false;
+ const size_t num_threads = m_threads.size();
+ for (uint32_t idx = 0; !should_stop && idx < num_threads; ++idx)
+ {
+ if (m_threads[idx]->GetStopException().SoftSignal () == signo)
+ return idx;
+ }
+ return UINT32_MAX;
+}
+
diff --git a/tools/debugserver/source/MacOSX/MachThreadList.h b/tools/debugserver/source/MacOSX/MachThreadList.h
new file mode 100644
index 000000000000..0ab550e83fc6
--- /dev/null
+++ b/tools/debugserver/source/MacOSX/MachThreadList.h
@@ -0,0 +1,87 @@
+//===-- MachThreadList.h ----------------------------------------*- C++ -*-===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// Created by Greg Clayton on 6/19/07.
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef __MachThreadList_h__
+#define __MachThreadList_h__
+
+#include "MachThread.h"
+#include "ThreadInfo.h"
+
+class DNBThreadResumeActions;
+
+class MachThreadList
+{
+public:
+ MachThreadList ();
+ ~MachThreadList ();
+
+ void Clear ();
+ void Dump () const;
+ bool GetRegisterValue (nub_thread_t tid, uint32_t set, uint32_t reg, DNBRegisterValue *reg_value) const;
+ bool SetRegisterValue (nub_thread_t tid, uint32_t set, uint32_t reg, const DNBRegisterValue *reg_value) const;
+ nub_size_t GetRegisterContext (nub_thread_t tid, void *buf, size_t buf_len);
+ nub_size_t SetRegisterContext (nub_thread_t tid, const void *buf, size_t buf_len);
+ uint32_t SaveRegisterState (nub_thread_t tid);
+ bool RestoreRegisterState (nub_thread_t tid, uint32_t save_id);
+ const char * GetThreadInfo (nub_thread_t tid) const;
+ void ProcessWillResume (MachProcess *process, const DNBThreadResumeActions &thread_actions);
+ uint32_t ProcessDidStop (MachProcess *process);
+ bool NotifyException (MachException::Data& exc);
+ bool ShouldStop (bool &step_more);
+ const char * GetName (nub_thread_t tid);
+ nub_state_t GetState (nub_thread_t tid);
+ nub_thread_t SetCurrentThread (nub_thread_t tid);
+
+ ThreadInfo::QoS GetRequestedQoS (nub_thread_t tid, nub_addr_t tsd, uint64_t dti_qos_class_index);
+ nub_addr_t GetPThreadT (nub_thread_t tid);
+ nub_addr_t GetDispatchQueueT (nub_thread_t tid);
+ nub_addr_t GetTSDAddressForThread (nub_thread_t tid, uint64_t plo_pthread_tsd_base_address_offset, uint64_t plo_pthread_tsd_base_offset, uint64_t plo_pthread_tsd_entry_size);
+
+ bool GetThreadStoppedReason (nub_thread_t tid, struct DNBThreadStopInfo *stop_info) const;
+ void DumpThreadStoppedReason (nub_thread_t tid) const;
+ bool GetIdentifierInfo (nub_thread_t tid, thread_identifier_info_data_t *ident_info);
+ nub_size_t NumThreads () const;
+ nub_thread_t ThreadIDAtIndex (nub_size_t idx) const;
+ nub_thread_t CurrentThreadID ();
+ void CurrentThread (MachThreadSP& threadSP);
+ void NotifyBreakpointChanged (const DNBBreakpoint *bp);
+ uint32_t EnableHardwareBreakpoint (const DNBBreakpoint *bp) const;
+ bool DisableHardwareBreakpoint (const DNBBreakpoint *bp) const;
+ uint32_t EnableHardwareWatchpoint (const DNBBreakpoint *wp) const;
+ bool DisableHardwareWatchpoint (const DNBBreakpoint *wp) const;
+ uint32_t NumSupportedHardwareWatchpoints () const;
+
+ uint32_t GetThreadIndexForThreadStoppedWithSignal (const int signo) const;
+
+ MachThreadSP GetThreadByID (nub_thread_t tid) const;
+
+ MachThreadSP GetThreadByMachPortNumber (thread_t mach_port_number) const;
+ nub_thread_t GetThreadIDByMachPortNumber (thread_t mach_port_number) const;
+ thread_t GetMachPortNumberByThreadID (nub_thread_t globally_unique_id) const;
+
+protected:
+ typedef std::vector<MachThreadSP> collection;
+ typedef collection::iterator iterator;
+ typedef collection::const_iterator const_iterator;
+
+ uint32_t UpdateThreadList (MachProcess *process, bool update, collection *num_threads = NULL);
+// const_iterator FindThreadByID (thread_t tid) const;
+
+ collection m_threads;
+ mutable PThreadMutex m_threads_mutex;
+ MachThreadSP m_current_thread;
+ bool m_is_64_bit;
+};
+
+#endif // #ifndef __MachThreadList_h__
+
diff --git a/tools/debugserver/source/MacOSX/MachVMMemory.cpp b/tools/debugserver/source/MacOSX/MachVMMemory.cpp
new file mode 100644
index 000000000000..3b86a83024d9
--- /dev/null
+++ b/tools/debugserver/source/MacOSX/MachVMMemory.cpp
@@ -0,0 +1,598 @@
+//===-- MachVMMemory.cpp ----------------------------------------*- C++ -*-===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// Created by Greg Clayton on 6/26/07.
+//
+//===----------------------------------------------------------------------===//
+
+#include "MachVMMemory.h"
+#include "MachVMRegion.h"
+#include "DNBLog.h"
+#include <mach/mach_vm.h>
+#include <mach/shared_region.h>
+#include <sys/sysctl.h>
+#include <dlfcn.h>
+
+static const vm_size_t kInvalidPageSize = ~0;
+
+MachVMMemory::MachVMMemory() :
+ m_page_size (kInvalidPageSize),
+ m_err (0)
+{
+}
+
+MachVMMemory::~MachVMMemory()
+{
+}
+
+nub_size_t
+MachVMMemory::PageSize(task_t task)
+{
+ if (m_page_size == kInvalidPageSize)
+ {
+#if defined (TASK_VM_INFO) && TASK_VM_INFO >= 22
+ if (task != TASK_NULL)
+ {
+ kern_return_t kr;
+ mach_msg_type_number_t info_count = TASK_VM_INFO_COUNT;
+ task_vm_info_data_t vm_info;
+ kr = task_info (task, TASK_VM_INFO, (task_info_t) &vm_info, &info_count);
+ if (kr == KERN_SUCCESS)
+ {
+ DNBLogThreadedIf(LOG_TASK, "MachVMMemory::PageSize task_info returned page size of 0x%x", (int) vm_info.page_size);
+ m_page_size = vm_info.page_size;
+ return m_page_size;
+ }
+ else
+ {
+ DNBLogThreadedIf(LOG_TASK, "MachVMMemory::PageSize task_info call failed to get page size, TASK_VM_INFO %d, TASK_VM_INFO_COUNT %d, kern return %d", TASK_VM_INFO, TASK_VM_INFO_COUNT, kr);
+ }
+ }
+#endif
+ m_err = ::host_page_size( ::mach_host_self(), &m_page_size);
+ if (m_err.Fail())
+ m_page_size = 0;
+ }
+ return m_page_size;
+}
+
+nub_size_t
+MachVMMemory::MaxBytesLeftInPage(task_t task, nub_addr_t addr, nub_size_t count)
+{
+ const nub_size_t page_size = PageSize(task);
+ if (page_size > 0)
+ {
+ nub_size_t page_offset = (addr % page_size);
+ nub_size_t bytes_left_in_page = page_size - page_offset;
+ if (count > bytes_left_in_page)
+ count = bytes_left_in_page;
+ }
+ return count;
+}
+
+nub_bool_t
+MachVMMemory::GetMemoryRegionInfo(task_t task, nub_addr_t address, DNBRegionInfo *region_info)
+{
+ MachVMRegion vmRegion(task);
+
+ if (vmRegion.GetRegionForAddress(address))
+ {
+ region_info->addr = vmRegion.StartAddress();
+ region_info->size = vmRegion.GetByteSize();
+ region_info->permissions = vmRegion.GetDNBPermissions();
+ }
+ else
+ {
+ region_info->addr = address;
+ region_info->size = 0;
+ if (vmRegion.GetError().Success())
+ {
+ // vmRegion.GetRegionForAddress() return false, indicating that "address"
+ // wasn't in a valid region, but the "vmRegion" info was successfully
+ // read from the task which means the info describes the next valid
+ // region from which we can infer the size of this invalid region
+ mach_vm_address_t start_addr = vmRegion.StartAddress();
+ if (address < start_addr)
+ region_info->size = start_addr - address;
+ }
+ // If we can't get any info about the size from the next region it means
+ // we asked about an address that was past all mappings, so the size
+ // of this region will take up all remaining address space.
+ if (region_info->size == 0)
+ region_info->size = INVALID_NUB_ADDRESS - region_info->addr;
+
+ // Not readable, writeable or executable
+ region_info->permissions = 0;
+ }
+ return true;
+}
+
+// For integrated graphics chip, this makes the accounting info for 'wired' memory more like top.
+uint64_t
+MachVMMemory::GetStolenPages(task_t task)
+{
+ static uint64_t stolenPages = 0;
+ static bool calculated = false;
+ if (calculated) return stolenPages;
+
+ static int mib_reserved[CTL_MAXNAME];
+ static int mib_unusable[CTL_MAXNAME];
+ static int mib_other[CTL_MAXNAME];
+ static size_t mib_reserved_len = 0;
+ static size_t mib_unusable_len = 0;
+ static size_t mib_other_len = 0;
+ int r;
+
+ /* This can be used for testing: */
+ //tsamp->pages_stolen = (256 * 1024 * 1024ULL) / tsamp->pagesize;
+
+ if(0 == mib_reserved_len)
+ {
+ mib_reserved_len = CTL_MAXNAME;
+
+ r = sysctlnametomib("machdep.memmap.Reserved", mib_reserved,
+ &mib_reserved_len);
+
+ if(-1 == r)
+ {
+ mib_reserved_len = 0;
+ return 0;
+ }
+
+ mib_unusable_len = CTL_MAXNAME;
+
+ r = sysctlnametomib("machdep.memmap.Unusable", mib_unusable,
+ &mib_unusable_len);
+
+ if(-1 == r)
+ {
+ mib_reserved_len = 0;
+ return 0;
+ }
+
+
+ mib_other_len = CTL_MAXNAME;
+
+ r = sysctlnametomib("machdep.memmap.Other", mib_other,
+ &mib_other_len);
+
+ if(-1 == r)
+ {
+ mib_reserved_len = 0;
+ return 0;
+ }
+ }
+
+ if(mib_reserved_len > 0 && mib_unusable_len > 0 && mib_other_len > 0)
+ {
+ uint64_t reserved = 0, unusable = 0, other = 0;
+ size_t reserved_len;
+ size_t unusable_len;
+ size_t other_len;
+
+ reserved_len = sizeof(reserved);
+ unusable_len = sizeof(unusable);
+ other_len = sizeof(other);
+
+ /* These are all declared as QUAD/uint64_t sysctls in the kernel. */
+
+ if (sysctl (mib_reserved,
+ static_cast<u_int>(mib_reserved_len),
+ &reserved,
+ &reserved_len,
+ NULL,
+ 0))
+ {
+ return 0;
+ }
+
+ if (sysctl (mib_unusable,
+ static_cast<u_int>(mib_unusable_len),
+ &unusable,
+ &unusable_len,
+ NULL,
+ 0))
+ {
+ return 0;
+ }
+
+ if (sysctl (mib_other,
+ static_cast<u_int>(mib_other_len),
+ &other,
+ &other_len,
+ NULL,
+ 0))
+ {
+ return 0;
+ }
+
+ if (reserved_len == sizeof(reserved) &&
+ unusable_len == sizeof(unusable) &&
+ other_len == sizeof(other))
+ {
+ uint64_t stolen = reserved + unusable + other;
+ uint64_t mb128 = 128 * 1024 * 1024ULL;
+
+ if(stolen >= mb128)
+ {
+ stolen = (stolen & ~((128 * 1024 * 1024ULL) - 1)); // rounding down
+ stolenPages = stolen / PageSize (task);
+ }
+ }
+ }
+
+ calculated = true;
+ return stolenPages;
+}
+
+static uint64_t GetPhysicalMemory()
+{
+ // This doesn't change often at all. No need to poll each time.
+ static uint64_t physical_memory = 0;
+ static bool calculated = false;
+ if (calculated) return physical_memory;
+
+ size_t len = sizeof(physical_memory);
+ sysctlbyname("hw.memsize", &physical_memory, &len, NULL, 0);
+
+ calculated = true;
+ return physical_memory;
+}
+
+// rsize and dirty_size is not adjusted for dyld shared cache and multiple __LINKEDIT segment, as in vmmap. In practice, dirty_size doesn't differ much but rsize may. There is performance penalty for the adjustment. Right now, only use the dirty_size.
+void
+MachVMMemory::GetRegionSizes(task_t task, mach_vm_size_t &rsize, mach_vm_size_t &dirty_size)
+{
+#if defined (TASK_VM_INFO) && TASK_VM_INFO >= 22
+
+ task_vm_info_data_t vm_info;
+ mach_msg_type_number_t info_count;
+ kern_return_t kr;
+
+ info_count = TASK_VM_INFO_COUNT;
+ kr = task_info(task, TASK_VM_INFO_PURGEABLE, (task_info_t)&vm_info, &info_count);
+ if (kr == KERN_SUCCESS)
+ dirty_size = vm_info.internal;
+#endif
+}
+
+// Test whether the virtual address is within the architecture's shared region.
+static bool InSharedRegion(mach_vm_address_t addr, cpu_type_t type)
+{
+ mach_vm_address_t base = 0, size = 0;
+
+ switch(type) {
+#if defined (CPU_TYPE_ARM64) && defined (SHARED_REGION_BASE_ARM64)
+ case CPU_TYPE_ARM64:
+ base = SHARED_REGION_BASE_ARM64;
+ size = SHARED_REGION_SIZE_ARM64;
+ break;
+#endif
+
+ case CPU_TYPE_ARM:
+ base = SHARED_REGION_BASE_ARM;
+ size = SHARED_REGION_SIZE_ARM;
+ break;
+
+ case CPU_TYPE_X86_64:
+ base = SHARED_REGION_BASE_X86_64;
+ size = SHARED_REGION_SIZE_X86_64;
+ break;
+
+ case CPU_TYPE_I386:
+ base = SHARED_REGION_BASE_I386;
+ size = SHARED_REGION_SIZE_I386;
+ break;
+
+ default: {
+ // Log error abut unknown CPU type
+ break;
+ }
+ }
+
+
+ return(addr >= base && addr < (base + size));
+}
+
+void
+MachVMMemory::GetMemorySizes(task_t task, cpu_type_t cputype, nub_process_t pid, mach_vm_size_t &rprvt, mach_vm_size_t &vprvt)
+{
+ // Collecting some other info cheaply but not reporting for now.
+ mach_vm_size_t empty = 0;
+ mach_vm_size_t fw_private = 0;
+
+ mach_vm_size_t aliased = 0;
+ bool global_shared_text_data_mapped = false;
+ vm_size_t pagesize = PageSize (task);
+
+ for (mach_vm_address_t addr=0, size=0; ; addr += size)
+ {
+ vm_region_top_info_data_t info;
+ mach_msg_type_number_t count = VM_REGION_TOP_INFO_COUNT;
+ mach_port_t object_name;
+
+ kern_return_t kr = mach_vm_region(task, &addr, &size, VM_REGION_TOP_INFO, (vm_region_info_t)&info, &count, &object_name);
+ if (kr != KERN_SUCCESS) break;
+
+ if (InSharedRegion(addr, cputype))
+ {
+ // Private Shared
+ fw_private += info.private_pages_resident * pagesize;
+
+ // Check if this process has the globally shared text and data regions mapped in. If so, set global_shared_text_data_mapped to TRUE and avoid checking again.
+ if (global_shared_text_data_mapped == FALSE && info.share_mode == SM_EMPTY) {
+ vm_region_basic_info_data_64_t b_info;
+ mach_vm_address_t b_addr = addr;
+ mach_vm_size_t b_size = size;
+ count = VM_REGION_BASIC_INFO_COUNT_64;
+
+ kr = mach_vm_region(task, &b_addr, &b_size, VM_REGION_BASIC_INFO, (vm_region_info_t)&b_info, &count, &object_name);
+ if (kr != KERN_SUCCESS) break;
+
+ if (b_info.reserved) {
+ global_shared_text_data_mapped = TRUE;
+ }
+ }
+
+ // Short circuit the loop if this isn't a shared private region, since that's the only region type we care about within the current address range.
+ if (info.share_mode != SM_PRIVATE)
+ {
+ continue;
+ }
+ }
+
+ // Update counters according to the region type.
+ if (info.share_mode == SM_COW && info.ref_count == 1)
+ {
+ // Treat single reference SM_COW as SM_PRIVATE
+ info.share_mode = SM_PRIVATE;
+ }
+
+ switch (info.share_mode)
+ {
+ case SM_LARGE_PAGE:
+ // Treat SM_LARGE_PAGE the same as SM_PRIVATE
+ // since they are not shareable and are wired.
+ case SM_PRIVATE:
+ rprvt += info.private_pages_resident * pagesize;
+ rprvt += info.shared_pages_resident * pagesize;
+ vprvt += size;
+ break;
+
+ case SM_EMPTY:
+ empty += size;
+ break;
+
+ case SM_COW:
+ case SM_SHARED:
+ {
+ if (pid == 0)
+ {
+ // Treat kernel_task specially
+ if (info.share_mode == SM_COW)
+ {
+ rprvt += info.private_pages_resident * pagesize;
+ vprvt += size;
+ }
+ break;
+ }
+
+ if (info.share_mode == SM_COW)
+ {
+ rprvt += info.private_pages_resident * pagesize;
+ vprvt += info.private_pages_resident * pagesize;
+ }
+ break;
+ }
+ default:
+ // log that something is really bad.
+ break;
+ }
+ }
+
+ rprvt += aliased;
+}
+
+static void
+GetPurgeableAndAnonymous(task_t task, uint64_t &purgeable, uint64_t &anonymous)
+{
+#if defined (TASK_VM_INFO) && TASK_VM_INFO >= 22
+
+ kern_return_t kr;
+ mach_msg_type_number_t info_count;
+ task_vm_info_data_t vm_info;
+
+ info_count = TASK_VM_INFO_COUNT;
+ kr = task_info(task, TASK_VM_INFO_PURGEABLE, (task_info_t)&vm_info, &info_count);
+ if (kr == KERN_SUCCESS)
+ {
+ purgeable = vm_info.purgeable_volatile_resident;
+ anonymous = vm_info.internal + vm_info.compressed - vm_info.purgeable_volatile_pmap;
+ }
+
+#endif
+}
+
+#if defined (HOST_VM_INFO64_COUNT)
+nub_bool_t
+MachVMMemory::GetMemoryProfile(DNBProfileDataScanType scanType, task_t task, struct task_basic_info ti, cpu_type_t cputype, nub_process_t pid, vm_statistics64_data_t &vminfo, uint64_t &physical_memory, mach_vm_size_t &rprvt, mach_vm_size_t &rsize, mach_vm_size_t &vprvt, mach_vm_size_t &vsize, mach_vm_size_t &dirty_size, mach_vm_size_t &purgeable, mach_vm_size_t &anonymous)
+#else
+nub_bool_t
+MachVMMemory::GetMemoryProfile(DNBProfileDataScanType scanType, task_t task, struct task_basic_info ti, cpu_type_t cputype, nub_process_t pid, vm_statistics_data_t &vminfo, uint64_t &physical_memory, mach_vm_size_t &rprvt, mach_vm_size_t &rsize, mach_vm_size_t &vprvt, mach_vm_size_t &vsize, mach_vm_size_t &dirty_size, mach_vm_size_t &purgeable, mach_vm_size_t &anonymous)
+#endif
+{
+ if (scanType & eProfileHostMemory)
+ physical_memory = GetPhysicalMemory();
+
+ if (scanType & eProfileMemory)
+ {
+ static mach_port_t localHost = mach_host_self();
+#if defined (HOST_VM_INFO64_COUNT)
+ mach_msg_type_number_t count = HOST_VM_INFO64_COUNT;
+ host_statistics64(localHost, HOST_VM_INFO64, (host_info64_t)&vminfo, &count);
+#else
+ mach_msg_type_number_t count = HOST_VM_INFO_COUNT;
+ host_statistics(localHost, HOST_VM_INFO, (host_info_t)&vminfo, &count);
+ vminfo.wire_count += GetStolenPages(task);
+#endif
+
+ /* We are no longer reporting these. Let's not waste time.
+ GetMemorySizes(task, cputype, pid, rprvt, vprvt);
+ rsize = ti.resident_size;
+ vsize = ti.virtual_size;
+
+ if (scanType & eProfileMemoryDirtyPage)
+ {
+ // This uses vmmap strategy. We don't use the returned rsize for now. We prefer to match top's version since that's what we do for the rest of the metrics.
+ GetRegionSizes(task, rsize, dirty_size);
+ }
+ */
+
+ if (scanType & eProfileMemoryAnonymous)
+ {
+ GetPurgeableAndAnonymous(task, purgeable, anonymous);
+ }
+ }
+
+ return true;
+}
+
+nub_size_t
+MachVMMemory::Read(task_t task, nub_addr_t address, void *data, nub_size_t data_count)
+{
+ if (data == NULL || data_count == 0)
+ return 0;
+
+ nub_size_t total_bytes_read = 0;
+ nub_addr_t curr_addr = address;
+ uint8_t *curr_data = (uint8_t*)data;
+ while (total_bytes_read < data_count)
+ {
+ mach_vm_size_t curr_size = MaxBytesLeftInPage(task, curr_addr, data_count - total_bytes_read);
+ mach_msg_type_number_t curr_bytes_read = 0;
+ vm_offset_t vm_memory = 0;
+ m_err = ::mach_vm_read (task, curr_addr, curr_size, &vm_memory, &curr_bytes_read);
+
+ if (DNBLogCheckLogBit(LOG_MEMORY))
+ m_err.LogThreaded("::mach_vm_read ( task = 0x%4.4x, addr = 0x%8.8llx, size = %llu, data => %8.8p, dataCnt => %i )", task, (uint64_t)curr_addr, (uint64_t)curr_size, vm_memory, curr_bytes_read);
+
+ if (m_err.Success())
+ {
+ if (curr_bytes_read != curr_size)
+ {
+ if (DNBLogCheckLogBit(LOG_MEMORY))
+ m_err.LogThreaded("::mach_vm_read ( task = 0x%4.4x, addr = 0x%8.8llx, size = %llu, data => %8.8p, dataCnt=>%i ) only read %u of %llu bytes", task, (uint64_t)curr_addr, (uint64_t)curr_size, vm_memory, curr_bytes_read, curr_bytes_read, (uint64_t)curr_size);
+ }
+ ::memcpy (curr_data, (void *)vm_memory, curr_bytes_read);
+ ::vm_deallocate (mach_task_self (), vm_memory, curr_bytes_read);
+ total_bytes_read += curr_bytes_read;
+ curr_addr += curr_bytes_read;
+ curr_data += curr_bytes_read;
+ }
+ else
+ {
+ break;
+ }
+ }
+ return total_bytes_read;
+}
+
+
+nub_size_t
+MachVMMemory::Write(task_t task, nub_addr_t address, const void *data, nub_size_t data_count)
+{
+ MachVMRegion vmRegion(task);
+
+ nub_size_t total_bytes_written = 0;
+ nub_addr_t curr_addr = address;
+ const uint8_t *curr_data = (const uint8_t*)data;
+
+
+ while (total_bytes_written < data_count)
+ {
+ if (vmRegion.GetRegionForAddress(curr_addr))
+ {
+ mach_vm_size_t curr_data_count = data_count - total_bytes_written;
+ mach_vm_size_t region_bytes_left = vmRegion.BytesRemaining(curr_addr);
+ if (region_bytes_left == 0)
+ {
+ break;
+ }
+ if (curr_data_count > region_bytes_left)
+ curr_data_count = region_bytes_left;
+
+ if (vmRegion.SetProtections(curr_addr, curr_data_count, VM_PROT_READ | VM_PROT_WRITE))
+ {
+ nub_size_t bytes_written = WriteRegion(task, curr_addr, curr_data, curr_data_count);
+ if (bytes_written <= 0)
+ {
+ // Error should have already be posted by WriteRegion...
+ break;
+ }
+ else
+ {
+ total_bytes_written += bytes_written;
+ curr_addr += bytes_written;
+ curr_data += bytes_written;
+ }
+ }
+ else
+ {
+ DNBLogThreadedIf(LOG_MEMORY_PROTECTIONS, "Failed to set read/write protections on region for address: [0x%8.8llx-0x%8.8llx)", (uint64_t)curr_addr, (uint64_t)(curr_addr + curr_data_count));
+ break;
+ }
+ }
+ else
+ {
+ DNBLogThreadedIf(LOG_MEMORY_PROTECTIONS, "Failed to get region for address: 0x%8.8llx", (uint64_t)address);
+ break;
+ }
+ }
+
+ return total_bytes_written;
+}
+
+
+nub_size_t
+MachVMMemory::WriteRegion(task_t task, const nub_addr_t address, const void *data, const nub_size_t data_count)
+{
+ if (data == NULL || data_count == 0)
+ return 0;
+
+ nub_size_t total_bytes_written = 0;
+ nub_addr_t curr_addr = address;
+ const uint8_t *curr_data = (const uint8_t*)data;
+ while (total_bytes_written < data_count)
+ {
+ mach_msg_type_number_t curr_data_count = static_cast<mach_msg_type_number_t>(MaxBytesLeftInPage(task, curr_addr, data_count - total_bytes_written));
+ m_err = ::mach_vm_write (task, curr_addr, (pointer_t) curr_data, curr_data_count);
+ if (DNBLogCheckLogBit(LOG_MEMORY) || m_err.Fail())
+ m_err.LogThreaded("::mach_vm_write ( task = 0x%4.4x, addr = 0x%8.8llx, data = %8.8p, dataCnt = %u )", task, (uint64_t)curr_addr, curr_data, curr_data_count);
+
+#if !defined (__i386__) && !defined (__x86_64__)
+ vm_machine_attribute_val_t mattr_value = MATTR_VAL_CACHE_FLUSH;
+
+ m_err = ::vm_machine_attribute (task, curr_addr, curr_data_count, MATTR_CACHE, &mattr_value);
+ if (DNBLogCheckLogBit(LOG_MEMORY) || m_err.Fail())
+ m_err.LogThreaded("::vm_machine_attribute ( task = 0x%4.4x, addr = 0x%8.8llx, size = %u, attr = MATTR_CACHE, mattr_value => MATTR_VAL_CACHE_FLUSH )", task, (uint64_t)curr_addr, curr_data_count);
+#endif
+
+ if (m_err.Success())
+ {
+ total_bytes_written += curr_data_count;
+ curr_addr += curr_data_count;
+ curr_data += curr_data_count;
+ }
+ else
+ {
+ break;
+ }
+ }
+ return total_bytes_written;
+}
diff --git a/tools/debugserver/source/MacOSX/MachVMMemory.h b/tools/debugserver/source/MacOSX/MachVMMemory.h
new file mode 100644
index 000000000000..abaa20368a26
--- /dev/null
+++ b/tools/debugserver/source/MacOSX/MachVMMemory.h
@@ -0,0 +1,51 @@
+//===-- MachVMMemory.h ------------------------------------------*- C++ -*-===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// Created by Greg Clayton on 6/26/07.
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef __MachVMMemory_h__
+#define __MachVMMemory_h__
+
+#include "DNBDefs.h"
+#include "DNBError.h"
+#include <mach/mach.h>
+
+class MachVMMemory
+{
+public:
+ MachVMMemory();
+ ~MachVMMemory();
+ nub_size_t Read(task_t task, nub_addr_t address, void *data, nub_size_t data_count);
+ nub_size_t Write(task_t task, nub_addr_t address, const void *data, nub_size_t data_count);
+ nub_size_t PageSize(task_t task);
+ nub_bool_t GetMemoryRegionInfo(task_t task, nub_addr_t address, DNBRegionInfo *region_info);
+#if defined (HOST_VM_INFO64_COUNT)
+ nub_bool_t GetMemoryProfile(DNBProfileDataScanType scanType, task_t task, struct task_basic_info ti, cpu_type_t cputype, nub_process_t pid, vm_statistics64_data_t &vminfo, uint64_t &physical_memory, mach_vm_size_t &rprvt, mach_vm_size_t &rsize, mach_vm_size_t &vprvt, mach_vm_size_t &vsize, mach_vm_size_t &dirty_size, mach_vm_size_t &purgeable, mach_vm_size_t &anonymous);
+#else
+ nub_bool_t GetMemoryProfile(DNBProfileDataScanType scanType, task_t task, struct task_basic_info ti, cpu_type_t cputype, nub_process_t pid, vm_statistics_data_t &vminfo, uint64_t &physical_memory, mach_vm_size_t &rprvt, mach_vm_size_t &rsize, mach_vm_size_t &vprvt, mach_vm_size_t &vsize, mach_vm_size_t &dirty_size, mach_vm_size_t &purgeable, mach_vm_size_t &anonymous);
+#endif
+
+protected:
+ nub_size_t MaxBytesLeftInPage(task_t task, nub_addr_t addr, nub_size_t count);
+
+ uint64_t GetStolenPages(task_t task);
+ void GetRegionSizes(task_t task, mach_vm_size_t &rsize, mach_vm_size_t &dirty_size);
+ void GetMemorySizes(task_t task, cpu_type_t cputype, nub_process_t pid, mach_vm_size_t &rprvt, mach_vm_size_t &vprvt);
+
+
+ nub_size_t WriteRegion(task_t task, const nub_addr_t address, const void *data, const nub_size_t data_count);
+
+ vm_size_t m_page_size;
+ DNBError m_err;
+};
+
+
+#endif // #ifndef __MachVMMemory_h__
diff --git a/tools/debugserver/source/MacOSX/MachVMRegion.cpp b/tools/debugserver/source/MacOSX/MachVMRegion.cpp
new file mode 100644
index 000000000000..38757595cfed
--- /dev/null
+++ b/tools/debugserver/source/MacOSX/MachVMRegion.cpp
@@ -0,0 +1,202 @@
+//===-- MachVMRegion.cpp ----------------------------------------*- C++ -*-===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// Created by Greg Clayton on 6/26/07.
+//
+//===----------------------------------------------------------------------===//
+
+#include "MachVMRegion.h"
+#include <mach/mach_vm.h>
+#include "DNBLog.h"
+#include <assert.h>
+
+MachVMRegion::MachVMRegion(task_t task) :
+ m_task(task),
+ m_addr(INVALID_NUB_ADDRESS),
+ m_err(),
+ m_start(INVALID_NUB_ADDRESS),
+ m_size(0),
+ m_depth(-1),
+ m_curr_protection(0),
+ m_protection_addr(INVALID_NUB_ADDRESS),
+ m_protection_size(0)
+{
+ memset(&m_data, 0, sizeof(m_data));
+}
+
+MachVMRegion::~MachVMRegion()
+{
+ // Restore any original protections and clear our vars
+ Clear();
+}
+
+void
+MachVMRegion::Clear()
+{
+ RestoreProtections();
+ m_addr = INVALID_NUB_ADDRESS;
+ m_err.Clear();
+ m_start = INVALID_NUB_ADDRESS;
+ m_size = 0;
+ m_depth = -1;
+ memset(&m_data, 0, sizeof(m_data));
+ m_curr_protection = 0;
+ m_protection_addr = INVALID_NUB_ADDRESS;
+ m_protection_size = 0;
+}
+
+bool
+MachVMRegion::SetProtections(mach_vm_address_t addr, mach_vm_size_t size, vm_prot_t prot)
+{
+ if (ContainsAddress(addr))
+ {
+ mach_vm_size_t prot_size = size;
+ mach_vm_address_t end_addr = EndAddress();
+ if (prot_size > (end_addr - addr))
+ prot_size = end_addr - addr;
+
+ if (prot_size > 0)
+ {
+ if (prot == (m_curr_protection & VM_PROT_ALL))
+ {
+ DNBLogThreadedIf(LOG_MEMORY_PROTECTIONS | LOG_VERBOSE, "MachVMRegion::%s: protections (%u) already sufficient for task 0x%4.4x at address 0x%8.8llx) ", __FUNCTION__, prot, m_task, (uint64_t)addr);
+ // Protections are already set as requested...
+ return true;
+ }
+ else
+ {
+ m_err = ::mach_vm_protect (m_task, addr, prot_size, 0, prot);
+ if (DNBLogCheckLogBit(LOG_MEMORY_PROTECTIONS))
+ m_err.LogThreaded("::mach_vm_protect ( task = 0x%4.4x, addr = 0x%8.8llx, size = %llu, set_max = %i, prot = %u )", m_task, (uint64_t)addr, (uint64_t)prot_size, 0, prot);
+ if (m_err.Fail())
+ {
+ // Try again with the ability to create a copy on write region
+ m_err = ::mach_vm_protect (m_task, addr, prot_size, 0, prot | VM_PROT_COPY);
+ if (DNBLogCheckLogBit(LOG_MEMORY_PROTECTIONS) || m_err.Fail())
+ m_err.LogThreaded("::mach_vm_protect ( task = 0x%4.4x, addr = 0x%8.8llx, size = %llu, set_max = %i, prot = %u )", m_task, (uint64_t)addr, (uint64_t)prot_size, 0, prot | VM_PROT_COPY);
+ }
+ if (m_err.Success())
+ {
+ m_curr_protection = prot;
+ m_protection_addr = addr;
+ m_protection_size = prot_size;
+ return true;
+ }
+ }
+ }
+ else
+ {
+ DNBLogThreadedIf(LOG_MEMORY_PROTECTIONS | LOG_VERBOSE, "%s: Zero size for task 0x%4.4x at address 0x%8.8llx) ", __FUNCTION__, m_task, (uint64_t)addr);
+ }
+ }
+ return false;
+}
+
+bool
+MachVMRegion::RestoreProtections()
+{
+ if (m_curr_protection != m_data.protection && m_protection_size > 0)
+ {
+ m_err = ::mach_vm_protect (m_task, m_protection_addr, m_protection_size, 0, m_data.protection);
+ if (DNBLogCheckLogBit(LOG_MEMORY_PROTECTIONS) || m_err.Fail())
+ m_err.LogThreaded("::mach_vm_protect ( task = 0x%4.4x, addr = 0x%8.8llx, size = %llu, set_max = %i, prot = %u )", m_task, (uint64_t)m_protection_addr, (uint64_t)m_protection_size, 0, m_data.protection);
+ if (m_err.Success())
+ {
+ m_protection_size = 0;
+ m_protection_addr = INVALID_NUB_ADDRESS;
+ m_curr_protection = m_data.protection;
+ return true;
+ }
+ }
+ else
+ {
+ m_err.Clear();
+ return true;
+ }
+
+ return false;
+}
+
+bool
+MachVMRegion::GetRegionForAddress(nub_addr_t addr)
+{
+ // Restore any original protections and clear our vars
+ Clear();
+ m_err.Clear();
+ m_addr = addr;
+ m_start = addr;
+ m_depth = 1024;
+ mach_msg_type_number_t info_size = kRegionInfoSize;
+ assert(sizeof(info_size) == 4);
+ m_err = ::mach_vm_region_recurse (m_task, &m_start, &m_size, &m_depth, (vm_region_recurse_info_t)&m_data, &info_size);
+
+ const bool failed = m_err.Fail();
+ const bool log_protections = DNBLogCheckLogBit(LOG_MEMORY_PROTECTIONS);
+
+ if (log_protections || failed)
+ m_err.LogThreaded("::mach_vm_region_recurse ( task = 0x%4.4x, address => 0x%8.8llx, size => %llu, nesting_depth => %d, info => %p, infoCnt => %d) addr = 0x%8.8llx ", m_task, (uint64_t)m_start, (uint64_t)m_size, m_depth, &m_data, info_size, (uint64_t)addr);
+
+ if (failed)
+ return false;
+ if (log_protections)
+ {
+ DNBLogThreaded("info = { prot = %u, "
+ "max_prot = %u, "
+ "inheritance = 0x%8.8x, "
+ "offset = 0x%8.8llx, "
+ "user_tag = 0x%8.8x, "
+ "ref_count = %u, "
+ "shadow_depth = %u, "
+ "ext_pager = %u, "
+ "share_mode = %u, "
+ "is_submap = %d, "
+ "behavior = %d, "
+ "object_id = 0x%8.8x, "
+ "user_wired_count = 0x%4.4x }",
+ m_data.protection,
+ m_data.max_protection,
+ m_data.inheritance,
+ (uint64_t)m_data.offset,
+ m_data.user_tag,
+ m_data.ref_count,
+ m_data.shadow_depth,
+ m_data.external_pager,
+ m_data.share_mode,
+ m_data.is_submap,
+ m_data.behavior,
+ m_data.object_id,
+ m_data.user_wired_count);
+ }
+ m_curr_protection = m_data.protection;
+
+ // We make a request for an address and got no error back, but this
+ // doesn't mean that "addr" is in the range. The data in this object will
+ // be valid though, so you could see where the next region begins. So we
+ // return false, yet leave "m_err" with a successfull return code.
+ if ((addr < m_start) || (addr >= (m_start + m_size)))
+ return false;
+
+ return true;
+}
+
+uint32_t
+MachVMRegion::GetDNBPermissions () const
+{
+ if (m_addr == INVALID_NUB_ADDRESS || m_start == INVALID_NUB_ADDRESS || m_size == 0)
+ return 0;
+ uint32_t dnb_permissions = 0;
+
+ if ((m_data.protection & VM_PROT_READ) == VM_PROT_READ)
+ dnb_permissions |= eMemoryPermissionsReadable;
+ if ((m_data.protection & VM_PROT_WRITE) == VM_PROT_WRITE)
+ dnb_permissions |= eMemoryPermissionsWritable;
+ if ((m_data.protection & VM_PROT_EXECUTE) == VM_PROT_EXECUTE)
+ dnb_permissions |= eMemoryPermissionsExecutable;
+ return dnb_permissions;
+}
diff --git a/tools/debugserver/source/MacOSX/MachVMRegion.h b/tools/debugserver/source/MacOSX/MachVMRegion.h
new file mode 100644
index 000000000000..bcac60b83182
--- /dev/null
+++ b/tools/debugserver/source/MacOSX/MachVMRegion.h
@@ -0,0 +1,77 @@
+//===-- MachVMRegion.h ------------------------------------------*- C++ -*-===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// Created by Greg Clayton on 6/26/07.
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef __MachVMRegion_h__
+#define __MachVMRegion_h__
+
+#include "DNBDefs.h"
+#include "DNBError.h"
+#include <mach/mach.h>
+
+class MachVMRegion
+{
+public:
+ MachVMRegion(task_t task);
+ ~MachVMRegion();
+
+ void Clear();
+ mach_vm_address_t StartAddress() const { return m_start; }
+ mach_vm_address_t EndAddress() const { return m_start + m_size; }
+ mach_vm_size_t GetByteSize () const { return m_size; }
+ mach_vm_address_t BytesRemaining(mach_vm_address_t addr) const
+ {
+ if (ContainsAddress(addr))
+ return m_size - (addr - m_start);
+ else
+ return 0;
+ }
+ bool ContainsAddress(mach_vm_address_t addr) const
+ {
+ return addr >= StartAddress() && addr < EndAddress();
+ }
+
+ bool SetProtections(mach_vm_address_t addr, mach_vm_size_t size, vm_prot_t prot);
+ bool RestoreProtections();
+ bool GetRegionForAddress(nub_addr_t addr);
+
+ uint32_t
+ GetDNBPermissions () const;
+
+ const DNBError &
+ GetError ()
+ {
+ return m_err;
+ }
+protected:
+#if defined (VM_REGION_SUBMAP_SHORT_INFO_COUNT_64)
+ typedef vm_region_submap_short_info_data_64_t RegionInfo;
+ enum { kRegionInfoSize = VM_REGION_SUBMAP_SHORT_INFO_COUNT_64 };
+#else
+ typedef vm_region_submap_info_data_64_t RegionInfo;
+ enum { kRegionInfoSize = VM_REGION_SUBMAP_INFO_COUNT_64 };
+#endif
+
+ task_t m_task;
+ mach_vm_address_t m_addr;
+ DNBError m_err;
+ mach_vm_address_t m_start;
+ mach_vm_size_t m_size;
+ natural_t m_depth;
+ RegionInfo m_data;
+ vm_prot_t m_curr_protection; // The current, possibly modified protections. Original value is saved in m_data.protections.
+ mach_vm_address_t m_protection_addr; // The start address at which protections were changed
+ mach_vm_size_t m_protection_size; // The size of memory that had its protections changed
+
+};
+
+#endif // #ifndef __MachVMRegion_h__
diff --git a/tools/debugserver/source/MacOSX/Makefile b/tools/debugserver/source/MacOSX/Makefile
new file mode 100644
index 000000000000..d047444a9c81
--- /dev/null
+++ b/tools/debugserver/source/MacOSX/Makefile
@@ -0,0 +1,54 @@
+##===- tools/debugserver/source/MacOSX/Makefile ------------*- Makefile -*-===##
+#
+# The LLVM Compiler Infrastructure
+#
+# This file is distributed under the University of Illinois Open Source
+# License. See LICENSE.TXT for details.
+#
+##===----------------------------------------------------------------------===##
+LLDB_LEVEL := ../../../..
+
+DIRS := i386 x86_64
+
+TOOLNAME = debugserver
+
+CODESIGN_TOOLS := 1
+
+TOOL_CODESIGN_IDENTITY := lldb_codesign
+
+LLVMLibsOptions += -llldbDebugserverCommon -llldbUtility -llldbDebugserverMacOSX_I386 -llldbDebugserverMacOSX_X86_64 \
+ -framework Foundation -framework CoreFoundation
+
+GENERATED_MACH_SOURCES = $(PROJ_OBJ_DIR)/mach_excServer.c $(PROJ_OBJ_DIR)/mach_excUser.c
+
+SOURCES := CFBundle.cpp \
+ CFData.cpp \
+ CFString.cpp \
+ MachException.cpp \
+ MachProcess.cpp \
+ MachTask.cpp \
+ MachThread.cpp \
+ MachThreadList.cpp \
+ MachVMMemory.cpp \
+ MachVMRegion.cpp
+
+BUILT_SOURCES = $(GENERATED_MACH_SOURCES) $(PROJ_OBJ_DIR)/HasAVX.o
+
+CPP.Flags += -I$(PROJ_OBJ_DIR)/../.. -I$(PROJ_SRC_DIR)/..
+
+LD.Flags += -Wl,-sectcreate,__TEXT,__info_plist,$(PROJ_SRC_DIR)/../../resources/lldb-debugserver-Info.plist
+
+include $(LLDB_LEVEL)/Makefile
+
+ObjectsO += $(PROJ_OBJ_DIR)/HasAVX.o
+
+$(PROJ_OBJ_DIR)/HasAVX.o: $(PROJ_SRC_DIR)/HasAVX.s
+ $(Echo) "Compiling HasAVX.s for $(BuildMode) build" $(PIC_FLAG)
+ $(CC) $(TargetCommonOpts) $(CompileCommonOpts) -c $< -o $@
+
+ifeq ($(HOST_OS),Darwin)
+LLVMLibsOptions += -Wl,-rpath,@loader_path/../lib/
+endif
+
+$(GENERATED_MACH_SOURCES):
+ mig -I$(PROJ_OBJ_DIR)/../.. $(PROJ_SRC_DIR)/dbgnub-mig.defs \ No newline at end of file
diff --git a/tools/debugserver/source/MacOSX/ThreadInfo.h b/tools/debugserver/source/MacOSX/ThreadInfo.h
new file mode 100644
index 000000000000..1fd9d5790cf0
--- /dev/null
+++ b/tools/debugserver/source/MacOSX/ThreadInfo.h
@@ -0,0 +1,26 @@
+//===-- ThreadInfo.h -----------------------------------------------*- C++ -*-===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef __ThreadInfo_h__
+#define __ThreadInfo_h__
+
+namespace ThreadInfo {
+
+class QoS {
+public:
+ QoS () : constant_name(), printable_name(), enum_value(UINT32_MAX) { }
+ bool IsValid () { return enum_value != UINT32_MAX; }
+ std::string constant_name;
+ std::string printable_name;
+ uint32_t enum_value;
+};
+
+};
+
+#endif // __ThreadInfo_h__
diff --git a/tools/debugserver/source/MacOSX/arm/DNBArchImpl.cpp b/tools/debugserver/source/MacOSX/arm/DNBArchImpl.cpp
new file mode 100644
index 000000000000..2eac47b045c3
--- /dev/null
+++ b/tools/debugserver/source/MacOSX/arm/DNBArchImpl.cpp
@@ -0,0 +1,2162 @@
+//===-- DNBArchImpl.cpp -----------------------------------------*- C++ -*-===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// Created by Greg Clayton on 6/25/07.
+//
+//===----------------------------------------------------------------------===//
+
+#if defined (__arm__) || defined (__arm64__) || defined (__aarch64__)
+
+#include "MacOSX/arm/DNBArchImpl.h"
+#include "MacOSX/MachProcess.h"
+#include "MacOSX/MachThread.h"
+#include "DNBBreakpoint.h"
+#include "DNBLog.h"
+#include "DNBRegisterInfo.h"
+#include "DNB.h"
+#include "ARM_ehframe_Registers.h"
+#include "ARM_DWARF_Registers.h"
+
+#include <inttypes.h>
+#include <sys/sysctl.h>
+
+// BCR address match type
+#define BCR_M_IMVA_MATCH ((uint32_t)(0u << 21))
+#define BCR_M_CONTEXT_ID_MATCH ((uint32_t)(1u << 21))
+#define BCR_M_IMVA_MISMATCH ((uint32_t)(2u << 21))
+#define BCR_M_RESERVED ((uint32_t)(3u << 21))
+
+// Link a BVR/BCR or WVR/WCR pair to another
+#define E_ENABLE_LINKING ((uint32_t)(1u << 20))
+
+// Byte Address Select
+#define BAS_IMVA_PLUS_0 ((uint32_t)(1u << 5))
+#define BAS_IMVA_PLUS_1 ((uint32_t)(1u << 6))
+#define BAS_IMVA_PLUS_2 ((uint32_t)(1u << 7))
+#define BAS_IMVA_PLUS_3 ((uint32_t)(1u << 8))
+#define BAS_IMVA_0_1 ((uint32_t)(3u << 5))
+#define BAS_IMVA_2_3 ((uint32_t)(3u << 7))
+#define BAS_IMVA_ALL ((uint32_t)(0xfu << 5))
+
+// Break only in privileged or user mode
+#define S_RSVD ((uint32_t)(0u << 1))
+#define S_PRIV ((uint32_t)(1u << 1))
+#define S_USER ((uint32_t)(2u << 1))
+#define S_PRIV_USER ((S_PRIV) | (S_USER))
+
+#define BCR_ENABLE ((uint32_t)(1u))
+#define WCR_ENABLE ((uint32_t)(1u))
+
+// Watchpoint load/store
+#define WCR_LOAD ((uint32_t)(1u << 3))
+#define WCR_STORE ((uint32_t)(1u << 4))
+
+// Definitions for the Debug Status and Control Register fields:
+// [5:2] => Method of debug entry
+//#define WATCHPOINT_OCCURRED ((uint32_t)(2u))
+// I'm seeing this, instead.
+#define WATCHPOINT_OCCURRED ((uint32_t)(10u))
+
+// 0xE120BE70
+static const uint8_t g_arm_breakpoint_opcode[] = { 0x70, 0xBE, 0x20, 0xE1 };
+static const uint8_t g_thumb_breakpoint_opcode[] = { 0x70, 0xBE };
+
+// A watchpoint may need to be implemented using two watchpoint registers.
+// e.g. watching an 8-byte region when the device can only watch 4-bytes.
+//
+// This stores the lo->hi mappings. It's safe to initialize to all 0's
+// since hi > lo and therefore LoHi[i] cannot be 0.
+static uint32_t LoHi[16] = { 0 };
+
+// ARM constants used during decoding
+#define REG_RD 0
+#define LDM_REGLIST 1
+#define PC_REG 15
+#define PC_REGLIST_BIT 0x8000
+
+// ARM conditions
+#define COND_EQ 0x0
+#define COND_NE 0x1
+#define COND_CS 0x2
+#define COND_HS 0x2
+#define COND_CC 0x3
+#define COND_LO 0x3
+#define COND_MI 0x4
+#define COND_PL 0x5
+#define COND_VS 0x6
+#define COND_VC 0x7
+#define COND_HI 0x8
+#define COND_LS 0x9
+#define COND_GE 0xA
+#define COND_LT 0xB
+#define COND_GT 0xC
+#define COND_LE 0xD
+#define COND_AL 0xE
+#define COND_UNCOND 0xF
+
+#define MASK_CPSR_T (1u << 5)
+#define MASK_CPSR_J (1u << 24)
+
+#define MNEMONIC_STRING_SIZE 32
+#define OPERAND_STRING_SIZE 128
+
+// Returns true if the first 16 bit opcode of a thumb instruction indicates
+// the instruction will be a 32 bit thumb opcode
+static bool
+IsThumb32Opcode (uint16_t opcode)
+{
+ if (((opcode & 0xE000) == 0xE000) && (opcode & 0x1800))
+ return true;
+ return false;
+}
+
+void
+DNBArchMachARM::Initialize()
+{
+ DNBArchPluginInfo arch_plugin_info =
+ {
+ CPU_TYPE_ARM,
+ DNBArchMachARM::Create,
+ DNBArchMachARM::GetRegisterSetInfo,
+ DNBArchMachARM::SoftwareBreakpointOpcode
+ };
+
+ // Register this arch plug-in with the main protocol class
+ DNBArchProtocol::RegisterArchPlugin (arch_plugin_info);
+}
+
+
+DNBArchProtocol *
+DNBArchMachARM::Create (MachThread *thread)
+{
+ DNBArchMachARM *obj = new DNBArchMachARM (thread);
+ return obj;
+}
+
+const uint8_t *
+DNBArchMachARM::SoftwareBreakpointOpcode (nub_size_t byte_size)
+{
+ switch (byte_size)
+ {
+ case 2: return g_thumb_breakpoint_opcode;
+ case 4: return g_arm_breakpoint_opcode;
+ }
+ return NULL;
+}
+
+uint32_t
+DNBArchMachARM::GetCPUType()
+{
+ return CPU_TYPE_ARM;
+}
+
+uint64_t
+DNBArchMachARM::GetPC(uint64_t failValue)
+{
+ // Get program counter
+ if (GetGPRState(false) == KERN_SUCCESS)
+ return m_state.context.gpr.__pc;
+ return failValue;
+}
+
+kern_return_t
+DNBArchMachARM::SetPC(uint64_t value)
+{
+ // Get program counter
+ kern_return_t err = GetGPRState(false);
+ if (err == KERN_SUCCESS)
+ {
+ m_state.context.gpr.__pc = (uint32_t) value;
+ err = SetGPRState();
+ }
+ return err == KERN_SUCCESS;
+}
+
+uint64_t
+DNBArchMachARM::GetSP(uint64_t failValue)
+{
+ // Get stack pointer
+ if (GetGPRState(false) == KERN_SUCCESS)
+ return m_state.context.gpr.__sp;
+ return failValue;
+}
+
+kern_return_t
+DNBArchMachARM::GetGPRState(bool force)
+{
+ int set = e_regSetGPR;
+ // Check if we have valid cached registers
+ if (!force && m_state.GetError(set, Read) == KERN_SUCCESS)
+ return KERN_SUCCESS;
+
+ // Read the registers from our thread
+ mach_msg_type_number_t count = ARM_THREAD_STATE_COUNT;
+ kern_return_t kret = ::thread_get_state(m_thread->MachPortNumber(), ARM_THREAD_STATE, (thread_state_t)&m_state.context.gpr, &count);
+ uint32_t *r = &m_state.context.gpr.__r[0];
+ DNBLogThreadedIf(LOG_THREAD, "thread_get_state(0x%4.4x, %u, &gpr, %u) => 0x%8.8x (count = %u) regs r0=%8.8x r1=%8.8x r2=%8.8x r3=%8.8x r4=%8.8x r5=%8.8x r6=%8.8x r7=%8.8x r8=%8.8x r9=%8.8x r10=%8.8x r11=%8.8x s12=%8.8x sp=%8.8x lr=%8.8x pc=%8.8x cpsr=%8.8x",
+ m_thread->MachPortNumber(),
+ ARM_THREAD_STATE,
+ ARM_THREAD_STATE_COUNT,
+ kret,
+ count,
+ r[0],
+ r[1],
+ r[2],
+ r[3],
+ r[4],
+ r[5],
+ r[6],
+ r[7],
+ r[8],
+ r[9],
+ r[10],
+ r[11],
+ r[12],
+ r[13],
+ r[14],
+ r[15],
+ r[16]);
+ m_state.SetError(set, Read, kret);
+ return kret;
+}
+
+kern_return_t
+DNBArchMachARM::GetVFPState(bool force)
+{
+ int set = e_regSetVFP;
+ // Check if we have valid cached registers
+ if (!force && m_state.GetError(set, Read) == KERN_SUCCESS)
+ return KERN_SUCCESS;
+
+ kern_return_t kret;
+
+#if defined (__arm64__) || defined (__aarch64__)
+ // Read the registers from our thread
+ mach_msg_type_number_t count = ARM_NEON_STATE_COUNT;
+ kret = ::thread_get_state(m_thread->MachPortNumber(), ARM_NEON_STATE, (thread_state_t)&m_state.context.vfp, &count);
+ if (DNBLogEnabledForAny (LOG_THREAD))
+ {
+ DNBLogThreaded("thread_get_state(0x%4.4x, %u, &vfp, %u) => 0x%8.8x (count = %u) regs"
+ "\n q0 = 0x%16.16llx%16.16llx"
+ "\n q1 = 0x%16.16llx%16.16llx"
+ "\n q2 = 0x%16.16llx%16.16llx"
+ "\n q3 = 0x%16.16llx%16.16llx"
+ "\n q4 = 0x%16.16llx%16.16llx"
+ "\n q5 = 0x%16.16llx%16.16llx"
+ "\n q6 = 0x%16.16llx%16.16llx"
+ "\n q7 = 0x%16.16llx%16.16llx"
+ "\n q8 = 0x%16.16llx%16.16llx"
+ "\n q9 = 0x%16.16llx%16.16llx"
+ "\n q10 = 0x%16.16llx%16.16llx"
+ "\n q11 = 0x%16.16llx%16.16llx"
+ "\n q12 = 0x%16.16llx%16.16llx"
+ "\n q13 = 0x%16.16llx%16.16llx"
+ "\n q14 = 0x%16.16llx%16.16llx"
+ "\n q15 = 0x%16.16llx%16.16llx"
+ "\n fpsr = 0x%8.8x"
+ "\n fpcr = 0x%8.8x\n\n",
+ m_thread->MachPortNumber(),
+ ARM_NEON_STATE,
+ ARM_NEON_STATE_COUNT,
+ kret,
+ count,
+ ((uint64_t *)&m_state.context.vfp.__v[0])[0] , ((uint64_t *)&m_state.context.vfp.__v[0])[1],
+ ((uint64_t *)&m_state.context.vfp.__v[1])[0] , ((uint64_t *)&m_state.context.vfp.__v[1])[1],
+ ((uint64_t *)&m_state.context.vfp.__v[2])[0] , ((uint64_t *)&m_state.context.vfp.__v[2])[1],
+ ((uint64_t *)&m_state.context.vfp.__v[3])[0] , ((uint64_t *)&m_state.context.vfp.__v[3])[1],
+ ((uint64_t *)&m_state.context.vfp.__v[4])[0] , ((uint64_t *)&m_state.context.vfp.__v[4])[1],
+ ((uint64_t *)&m_state.context.vfp.__v[5])[0] , ((uint64_t *)&m_state.context.vfp.__v[5])[1],
+ ((uint64_t *)&m_state.context.vfp.__v[6])[0] , ((uint64_t *)&m_state.context.vfp.__v[6])[1],
+ ((uint64_t *)&m_state.context.vfp.__v[7])[0] , ((uint64_t *)&m_state.context.vfp.__v[7])[1],
+ ((uint64_t *)&m_state.context.vfp.__v[8])[0] , ((uint64_t *)&m_state.context.vfp.__v[8])[1],
+ ((uint64_t *)&m_state.context.vfp.__v[9])[0] , ((uint64_t *)&m_state.context.vfp.__v[9])[1],
+ ((uint64_t *)&m_state.context.vfp.__v[10])[0], ((uint64_t *)&m_state.context.vfp.__v[10])[1],
+ ((uint64_t *)&m_state.context.vfp.__v[11])[0], ((uint64_t *)&m_state.context.vfp.__v[11])[1],
+ ((uint64_t *)&m_state.context.vfp.__v[12])[0], ((uint64_t *)&m_state.context.vfp.__v[12])[1],
+ ((uint64_t *)&m_state.context.vfp.__v[13])[0], ((uint64_t *)&m_state.context.vfp.__v[13])[1],
+ ((uint64_t *)&m_state.context.vfp.__v[14])[0], ((uint64_t *)&m_state.context.vfp.__v[14])[1],
+ ((uint64_t *)&m_state.context.vfp.__v[15])[0], ((uint64_t *)&m_state.context.vfp.__v[15])[1],
+ m_state.context.vfp.__fpsr,
+ m_state.context.vfp.__fpcr);
+
+ }
+#else
+ // Read the registers from our thread
+ mach_msg_type_number_t count = ARM_VFP_STATE_COUNT;
+ kret = ::thread_get_state(m_thread->MachPortNumber(), ARM_VFP_STATE, (thread_state_t)&m_state.context.vfp, &count);
+
+ if (DNBLogEnabledForAny (LOG_THREAD))
+ {
+ uint32_t *r = &m_state.context.vfp.__r[0];
+ DNBLogThreaded ("thread_get_state(0x%4.4x, %u, &gpr, %u) => 0x%8.8x (count => %u)",
+ m_thread->MachPortNumber(),
+ ARM_THREAD_STATE,
+ ARM_THREAD_STATE_COUNT,
+ kret,
+ count);
+ DNBLogThreaded(" s0=%8.8x s1=%8.8x s2=%8.8x s3=%8.8x s4=%8.8x s5=%8.8x s6=%8.8x s7=%8.8x",r[ 0],r[ 1],r[ 2],r[ 3],r[ 4],r[ 5],r[ 6],r[ 7]);
+ DNBLogThreaded(" s8=%8.8x s9=%8.8x s10=%8.8x s11=%8.8x s12=%8.8x s13=%8.8x s14=%8.8x s15=%8.8x",r[ 8],r[ 9],r[10],r[11],r[12],r[13],r[14],r[15]);
+ DNBLogThreaded(" s16=%8.8x s17=%8.8x s18=%8.8x s19=%8.8x s20=%8.8x s21=%8.8x s22=%8.8x s23=%8.8x",r[16],r[17],r[18],r[19],r[20],r[21],r[22],r[23]);
+ DNBLogThreaded(" s24=%8.8x s25=%8.8x s26=%8.8x s27=%8.8x s28=%8.8x s29=%8.8x s30=%8.8x s31=%8.8x",r[24],r[25],r[26],r[27],r[28],r[29],r[30],r[31]);
+ DNBLogThreaded(" s32=%8.8x s33=%8.8x s34=%8.8x s35=%8.8x s36=%8.8x s37=%8.8x s38=%8.8x s39=%8.8x",r[32],r[33],r[34],r[35],r[36],r[37],r[38],r[39]);
+ DNBLogThreaded(" s40=%8.8x s41=%8.8x s42=%8.8x s43=%8.8x s44=%8.8x s45=%8.8x s46=%8.8x s47=%8.8x",r[40],r[41],r[42],r[43],r[44],r[45],r[46],r[47]);
+ DNBLogThreaded(" s48=%8.8x s49=%8.8x s50=%8.8x s51=%8.8x s52=%8.8x s53=%8.8x s54=%8.8x s55=%8.8x",r[48],r[49],r[50],r[51],r[52],r[53],r[54],r[55]);
+ DNBLogThreaded(" s56=%8.8x s57=%8.8x s58=%8.8x s59=%8.8x s60=%8.8x s61=%8.8x s62=%8.8x s63=%8.8x fpscr=%8.8x",r[56],r[57],r[58],r[59],r[60],r[61],r[62],r[63],r[64]);
+ }
+
+#endif
+ m_state.SetError(set, Read, kret);
+ return kret;
+}
+
+kern_return_t
+DNBArchMachARM::GetEXCState(bool force)
+{
+ int set = e_regSetEXC;
+ // Check if we have valid cached registers
+ if (!force && m_state.GetError(set, Read) == KERN_SUCCESS)
+ return KERN_SUCCESS;
+
+ // Read the registers from our thread
+ mach_msg_type_number_t count = ARM_EXCEPTION_STATE_COUNT;
+ kern_return_t kret = ::thread_get_state(m_thread->MachPortNumber(), ARM_EXCEPTION_STATE, (thread_state_t)&m_state.context.exc, &count);
+ m_state.SetError(set, Read, kret);
+ return kret;
+}
+
+static void
+DumpDBGState(const DNBArchMachARM::DBG& dbg)
+{
+ uint32_t i = 0;
+ for (i=0; i<16; i++)
+ {
+ DNBLogThreadedIf(LOG_STEP, "BVR%-2u/BCR%-2u = { 0x%8.8x, 0x%8.8x } WVR%-2u/WCR%-2u = { 0x%8.8x, 0x%8.8x }",
+ i, i, dbg.__bvr[i], dbg.__bcr[i],
+ i, i, dbg.__wvr[i], dbg.__wcr[i]);
+ }
+}
+
+kern_return_t
+DNBArchMachARM::GetDBGState(bool force)
+{
+ int set = e_regSetDBG;
+
+ // Check if we have valid cached registers
+ if (!force && m_state.GetError(set, Read) == KERN_SUCCESS)
+ return KERN_SUCCESS;
+
+ // Read the registers from our thread
+#if defined (ARM_DEBUG_STATE32) && (defined (__arm64__) || defined (__aarch64__))
+ mach_msg_type_number_t count = ARM_DEBUG_STATE32_COUNT;
+ kern_return_t kret = ::thread_get_state(m_thread->MachPortNumber(), ARM_DEBUG_STATE32, (thread_state_t)&m_state.dbg, &count);
+#else
+ mach_msg_type_number_t count = ARM_DEBUG_STATE_COUNT;
+ kern_return_t kret = ::thread_get_state(m_thread->MachPortNumber(), ARM_DEBUG_STATE, (thread_state_t)&m_state.dbg, &count);
+#endif
+ m_state.SetError(set, Read, kret);
+
+ return kret;
+}
+
+kern_return_t
+DNBArchMachARM::SetGPRState()
+{
+ int set = e_regSetGPR;
+ kern_return_t kret = ::thread_set_state(m_thread->MachPortNumber(), ARM_THREAD_STATE, (thread_state_t)&m_state.context.gpr, ARM_THREAD_STATE_COUNT);
+ m_state.SetError(set, Write, kret); // Set the current write error for this register set
+ m_state.InvalidateRegisterSetState(set); // Invalidate the current register state in case registers are read back differently
+ return kret; // Return the error code
+}
+
+kern_return_t
+DNBArchMachARM::SetVFPState()
+{
+ int set = e_regSetVFP;
+ kern_return_t kret;
+ mach_msg_type_number_t count;
+
+#if defined (__arm64__) || defined (__aarch64__)
+ count = ARM_NEON_STATE_COUNT;
+ kret = ::thread_set_state (m_thread->MachPortNumber(), ARM_NEON_STATE, (thread_state_t)&m_state.context.vfp, count);
+#else
+ count = ARM_VFP_STATE_COUNT;
+ kret = ::thread_set_state (m_thread->MachPortNumber(), ARM_VFP_STATE, (thread_state_t)&m_state.context.vfp, count);
+#endif
+
+#if defined (__arm64__) || defined (__aarch64__)
+ if (DNBLogEnabledForAny (LOG_THREAD))
+ {
+ DNBLogThreaded("thread_set_state(0x%4.4x, %u, &vfp, %u) => 0x%8.8x (count = %u) regs"
+ "\n q0 = 0x%16.16llx%16.16llx"
+ "\n q1 = 0x%16.16llx%16.16llx"
+ "\n q2 = 0x%16.16llx%16.16llx"
+ "\n q3 = 0x%16.16llx%16.16llx"
+ "\n q4 = 0x%16.16llx%16.16llx"
+ "\n q5 = 0x%16.16llx%16.16llx"
+ "\n q6 = 0x%16.16llx%16.16llx"
+ "\n q7 = 0x%16.16llx%16.16llx"
+ "\n q8 = 0x%16.16llx%16.16llx"
+ "\n q9 = 0x%16.16llx%16.16llx"
+ "\n q10 = 0x%16.16llx%16.16llx"
+ "\n q11 = 0x%16.16llx%16.16llx"
+ "\n q12 = 0x%16.16llx%16.16llx"
+ "\n q13 = 0x%16.16llx%16.16llx"
+ "\n q14 = 0x%16.16llx%16.16llx"
+ "\n q15 = 0x%16.16llx%16.16llx"
+ "\n fpsr = 0x%8.8x"
+ "\n fpcr = 0x%8.8x\n\n",
+ m_thread->MachPortNumber(),
+ ARM_NEON_STATE,
+ ARM_NEON_STATE_COUNT,
+ kret,
+ count,
+ ((uint64_t *)&m_state.context.vfp.__v[0])[0] , ((uint64_t *)&m_state.context.vfp.__v[0])[1],
+ ((uint64_t *)&m_state.context.vfp.__v[1])[0] , ((uint64_t *)&m_state.context.vfp.__v[1])[1],
+ ((uint64_t *)&m_state.context.vfp.__v[2])[0] , ((uint64_t *)&m_state.context.vfp.__v[2])[1],
+ ((uint64_t *)&m_state.context.vfp.__v[3])[0] , ((uint64_t *)&m_state.context.vfp.__v[3])[1],
+ ((uint64_t *)&m_state.context.vfp.__v[4])[0] , ((uint64_t *)&m_state.context.vfp.__v[4])[1],
+ ((uint64_t *)&m_state.context.vfp.__v[5])[0] , ((uint64_t *)&m_state.context.vfp.__v[5])[1],
+ ((uint64_t *)&m_state.context.vfp.__v[6])[0] , ((uint64_t *)&m_state.context.vfp.__v[6])[1],
+ ((uint64_t *)&m_state.context.vfp.__v[7])[0] , ((uint64_t *)&m_state.context.vfp.__v[7])[1],
+ ((uint64_t *)&m_state.context.vfp.__v[8])[0] , ((uint64_t *)&m_state.context.vfp.__v[8])[1],
+ ((uint64_t *)&m_state.context.vfp.__v[9])[0] , ((uint64_t *)&m_state.context.vfp.__v[9])[1],
+ ((uint64_t *)&m_state.context.vfp.__v[10])[0], ((uint64_t *)&m_state.context.vfp.__v[10])[1],
+ ((uint64_t *)&m_state.context.vfp.__v[11])[0], ((uint64_t *)&m_state.context.vfp.__v[11])[1],
+ ((uint64_t *)&m_state.context.vfp.__v[12])[0], ((uint64_t *)&m_state.context.vfp.__v[12])[1],
+ ((uint64_t *)&m_state.context.vfp.__v[13])[0], ((uint64_t *)&m_state.context.vfp.__v[13])[1],
+ ((uint64_t *)&m_state.context.vfp.__v[14])[0], ((uint64_t *)&m_state.context.vfp.__v[14])[1],
+ ((uint64_t *)&m_state.context.vfp.__v[15])[0], ((uint64_t *)&m_state.context.vfp.__v[15])[1],
+ m_state.context.vfp.__fpsr,
+ m_state.context.vfp.__fpcr);
+ }
+#else
+ if (DNBLogEnabledForAny (LOG_THREAD))
+ {
+ uint32_t *r = &m_state.context.vfp.__r[0];
+ DNBLogThreaded ("thread_get_state(0x%4.4x, %u, &gpr, %u) => 0x%8.8x (count => %u)",
+ m_thread->MachPortNumber(),
+ ARM_THREAD_STATE,
+ ARM_THREAD_STATE_COUNT,
+ kret,
+ count);
+ DNBLogThreaded(" s0=%8.8x s1=%8.8x s2=%8.8x s3=%8.8x s4=%8.8x s5=%8.8x s6=%8.8x s7=%8.8x",r[ 0],r[ 1],r[ 2],r[ 3],r[ 4],r[ 5],r[ 6],r[ 7]);
+ DNBLogThreaded(" s8=%8.8x s9=%8.8x s10=%8.8x s11=%8.8x s12=%8.8x s13=%8.8x s14=%8.8x s15=%8.8x",r[ 8],r[ 9],r[10],r[11],r[12],r[13],r[14],r[15]);
+ DNBLogThreaded(" s16=%8.8x s17=%8.8x s18=%8.8x s19=%8.8x s20=%8.8x s21=%8.8x s22=%8.8x s23=%8.8x",r[16],r[17],r[18],r[19],r[20],r[21],r[22],r[23]);
+ DNBLogThreaded(" s24=%8.8x s25=%8.8x s26=%8.8x s27=%8.8x s28=%8.8x s29=%8.8x s30=%8.8x s31=%8.8x",r[24],r[25],r[26],r[27],r[28],r[29],r[30],r[31]);
+ DNBLogThreaded(" s32=%8.8x s33=%8.8x s34=%8.8x s35=%8.8x s36=%8.8x s37=%8.8x s38=%8.8x s39=%8.8x",r[32],r[33],r[34],r[35],r[36],r[37],r[38],r[39]);
+ DNBLogThreaded(" s40=%8.8x s41=%8.8x s42=%8.8x s43=%8.8x s44=%8.8x s45=%8.8x s46=%8.8x s47=%8.8x",r[40],r[41],r[42],r[43],r[44],r[45],r[46],r[47]);
+ DNBLogThreaded(" s48=%8.8x s49=%8.8x s50=%8.8x s51=%8.8x s52=%8.8x s53=%8.8x s54=%8.8x s55=%8.8x",r[48],r[49],r[50],r[51],r[52],r[53],r[54],r[55]);
+ DNBLogThreaded(" s56=%8.8x s57=%8.8x s58=%8.8x s59=%8.8x s60=%8.8x s61=%8.8x s62=%8.8x s63=%8.8x fpscr=%8.8x",r[56],r[57],r[58],r[59],r[60],r[61],r[62],r[63],r[64]);
+ }
+#endif
+
+ m_state.SetError(set, Write, kret); // Set the current write error for this register set
+ m_state.InvalidateRegisterSetState(set); // Invalidate the current register state in case registers are read back differently
+ return kret; // Return the error code
+}
+
+kern_return_t
+DNBArchMachARM::SetEXCState()
+{
+ int set = e_regSetEXC;
+ kern_return_t kret = ::thread_set_state (m_thread->MachPortNumber(), ARM_EXCEPTION_STATE, (thread_state_t)&m_state.context.exc, ARM_EXCEPTION_STATE_COUNT);
+ m_state.SetError(set, Write, kret); // Set the current write error for this register set
+ m_state.InvalidateRegisterSetState(set); // Invalidate the current register state in case registers are read back differently
+ return kret; // Return the error code
+}
+
+kern_return_t
+DNBArchMachARM::SetDBGState(bool also_set_on_task)
+{
+ int set = e_regSetDBG;
+#if defined (ARM_DEBUG_STATE32) && (defined (__arm64__) || defined (__aarch64__))
+ kern_return_t kret = ::thread_set_state (m_thread->MachPortNumber(), ARM_DEBUG_STATE32, (thread_state_t)&m_state.dbg, ARM_DEBUG_STATE32_COUNT);
+ if (also_set_on_task)
+ {
+ kern_return_t task_kret = ::task_set_state (m_thread->Process()->Task().TaskPort(), ARM_DEBUG_STATE32, (thread_state_t)&m_state.dbg, ARM_DEBUG_STATE32_COUNT);
+ if (task_kret != KERN_SUCCESS)
+ DNBLogThreadedIf(LOG_WATCHPOINTS, "DNBArchMachARM::SetDBGState failed to set debug control register state: 0x%8.8x.", kret);
+ }
+#else
+ kern_return_t kret = ::thread_set_state (m_thread->MachPortNumber(), ARM_DEBUG_STATE, (thread_state_t)&m_state.dbg, ARM_DEBUG_STATE_COUNT);
+ if (also_set_on_task)
+ {
+ kern_return_t task_kret = ::task_set_state (m_thread->Process()->Task().TaskPort(), ARM_DEBUG_STATE, (thread_state_t)&m_state.dbg, ARM_DEBUG_STATE_COUNT);
+ if (task_kret != KERN_SUCCESS)
+ DNBLogThreadedIf(LOG_WATCHPOINTS, "DNBArchMachARM::SetDBGState failed to set debug control register state: 0x%8.8x.", kret);
+ }
+#endif
+
+ m_state.SetError(set, Write, kret); // Set the current write error for this register set
+ m_state.InvalidateRegisterSetState(set); // Invalidate the current register state in case registers are read back differently
+ return kret; // Return the error code
+}
+
+void
+DNBArchMachARM::ThreadWillResume()
+{
+ // Do we need to step this thread? If so, let the mach thread tell us so.
+ if (m_thread->IsStepping())
+ {
+ // This is the primary thread, let the arch do anything it needs
+ if (NumSupportedHardwareBreakpoints() > 0)
+ {
+ if (EnableHardwareSingleStep(true) != KERN_SUCCESS)
+ {
+ DNBLogThreaded("DNBArchMachARM::ThreadWillResume() failed to enable hardware single step");
+ }
+ }
+ }
+
+ // Disable the triggered watchpoint temporarily before we resume.
+ // Plus, we try to enable hardware single step to execute past the instruction which triggered our watchpoint.
+ if (m_watchpoint_did_occur)
+ {
+ if (m_watchpoint_hw_index >= 0)
+ {
+ kern_return_t kret = GetDBGState(false);
+ if (kret == KERN_SUCCESS && !IsWatchpointEnabled(m_state.dbg, m_watchpoint_hw_index)) {
+ // The watchpoint might have been disabled by the user. We don't need to do anything at all
+ // to enable hardware single stepping.
+ m_watchpoint_did_occur = false;
+ m_watchpoint_hw_index = -1;
+ return;
+ }
+
+ DisableHardwareWatchpoint(m_watchpoint_hw_index, false);
+ DNBLogThreadedIf(LOG_WATCHPOINTS, "DNBArchMachARM::ThreadWillResume() DisableHardwareWatchpoint(%d) called",
+ m_watchpoint_hw_index);
+
+ // Enable hardware single step to move past the watchpoint-triggering instruction.
+ m_watchpoint_resume_single_step_enabled = (EnableHardwareSingleStep(true) == KERN_SUCCESS);
+
+ // If we are not able to enable single step to move past the watchpoint-triggering instruction,
+ // at least we should reset the two watchpoint member variables so that the next time around
+ // this callback function is invoked, the enclosing logical branch is skipped.
+ if (!m_watchpoint_resume_single_step_enabled) {
+ // Reset the two watchpoint member variables.
+ m_watchpoint_did_occur = false;
+ m_watchpoint_hw_index = -1;
+ DNBLogThreadedIf(LOG_WATCHPOINTS, "DNBArchMachARM::ThreadWillResume() failed to enable single step");
+ }
+ else
+ DNBLogThreadedIf(LOG_WATCHPOINTS, "DNBArchMachARM::ThreadWillResume() succeeded to enable single step");
+ }
+ }
+}
+
+bool
+DNBArchMachARM::ThreadDidStop()
+{
+ bool success = true;
+
+ m_state.InvalidateRegisterSetState (e_regSetALL);
+
+ if (m_watchpoint_resume_single_step_enabled)
+ {
+ // Great! We now disable the hardware single step as well as re-enable the hardware watchpoint.
+ // See also ThreadWillResume().
+ if (EnableHardwareSingleStep(false) == KERN_SUCCESS)
+ {
+ if (m_watchpoint_did_occur && m_watchpoint_hw_index >= 0)
+ {
+ ReenableHardwareWatchpoint(m_watchpoint_hw_index);
+ m_watchpoint_resume_single_step_enabled = false;
+ m_watchpoint_did_occur = false;
+ m_watchpoint_hw_index = -1;
+ }
+ else
+ {
+ DNBLogError("internal error detected: m_watchpoint_resume_step_enabled is true but (m_watchpoint_did_occur && m_watchpoint_hw_index >= 0) does not hold!");
+ }
+ }
+ else
+ {
+ DNBLogError("internal error detected: m_watchpoint_resume_step_enabled is true but unable to disable single step!");
+ }
+ }
+
+ // Are we stepping a single instruction?
+ if (GetGPRState(true) == KERN_SUCCESS)
+ {
+ // We are single stepping, was this the primary thread?
+ if (m_thread->IsStepping())
+ {
+ success = EnableHardwareSingleStep(false) == KERN_SUCCESS;
+ }
+ else
+ {
+ // The MachThread will automatically restore the suspend count
+ // in ThreadDidStop(), so we don't need to do anything here if
+ // we weren't the primary thread the last time
+ }
+ }
+ return success;
+}
+
+bool
+DNBArchMachARM::NotifyException(MachException::Data& exc)
+{
+ switch (exc.exc_type)
+ {
+ default:
+ break;
+ case EXC_BREAKPOINT:
+ if (exc.exc_data.size() == 2 && exc.exc_data[0] == EXC_ARM_DA_DEBUG)
+ {
+ // The data break address is passed as exc_data[1].
+ nub_addr_t addr = exc.exc_data[1];
+ // Find the hardware index with the side effect of possibly massaging the
+ // addr to return the starting address as seen from the debugger side.
+ uint32_t hw_index = GetHardwareWatchpointHit(addr);
+ DNBLogThreadedIf(LOG_WATCHPOINTS, "DNBArchMachARM::NotifyException watchpoint %d was hit on address 0x%llx", hw_index, (uint64_t) addr);
+ const int num_watchpoints = NumSupportedHardwareWatchpoints ();
+ for (int i = 0; i < num_watchpoints; i++)
+ {
+ if (LoHi[i] != 0
+ && LoHi[i] == hw_index
+ && LoHi[i] != i
+ && GetWatchpointAddressByIndex (i) != INVALID_NUB_ADDRESS)
+ {
+ addr = GetWatchpointAddressByIndex (i);
+ DNBLogThreadedIf(LOG_WATCHPOINTS, "DNBArchMachARM::NotifyException It is a linked watchpoint; rewritten to index %d addr 0x%llx", LoHi[i], (uint64_t) addr);
+ }
+ }
+ if (hw_index != INVALID_NUB_HW_INDEX)
+ {
+ m_watchpoint_did_occur = true;
+ m_watchpoint_hw_index = hw_index;
+ exc.exc_data[1] = addr;
+ // Piggyback the hw_index in the exc.data.
+ exc.exc_data.push_back(hw_index);
+ }
+
+ return true;
+ }
+ break;
+ }
+ return false;
+}
+
+bool
+DNBArchMachARM::StepNotComplete ()
+{
+ if (m_hw_single_chained_step_addr != INVALID_NUB_ADDRESS)
+ {
+ kern_return_t kret = KERN_INVALID_ARGUMENT;
+ kret = GetGPRState(false);
+ if (kret == KERN_SUCCESS)
+ {
+ if (m_state.context.gpr.__pc == m_hw_single_chained_step_addr)
+ {
+ DNBLogThreadedIf(LOG_STEP, "Need to step some more at 0x%8.8llx", (uint64_t) m_hw_single_chained_step_addr);
+ return true;
+ }
+ }
+ }
+
+ m_hw_single_chained_step_addr = INVALID_NUB_ADDRESS;
+ return false;
+}
+
+// Set the single step bit in the processor status register.
+kern_return_t
+DNBArchMachARM::EnableHardwareSingleStep (bool enable)
+{
+ DNBError err;
+ DNBLogThreadedIf(LOG_STEP, "%s( enable = %d )", __FUNCTION__, enable);
+
+ err = GetGPRState(false);
+
+ if (err.Fail())
+ {
+ err.LogThreaded("%s: failed to read the GPR registers", __FUNCTION__);
+ return err.Error();
+ }
+
+ err = GetDBGState(false);
+
+ if (err.Fail())
+ {
+ err.LogThreaded("%s: failed to read the DBG registers", __FUNCTION__);
+ return err.Error();
+ }
+
+// The use of __arm64__ here is not ideal. If debugserver is running on
+// an armv8 device, regardless of whether it was built for arch arm or arch arm64,
+// it needs to use the MDSCR_EL1 SS bit to single instruction step.
+
+#if defined (__arm64__) || defined (__aarch64__)
+ if (enable)
+ {
+ DNBLogThreadedIf(LOG_STEP, "%s: Setting MDSCR_EL1 Single Step bit at pc 0x%llx", __FUNCTION__, (uint64_t) m_state.context.gpr.__pc);
+ m_state.dbg.__mdscr_el1 |= 1; // Set bit 0 (single step, SS) in the MDSCR_EL1.
+ }
+ else
+ {
+ DNBLogThreadedIf(LOG_STEP, "%s: Clearing MDSCR_EL1 Single Step bit at pc 0x%llx", __FUNCTION__, (uint64_t) m_state.context.gpr.__pc);
+ m_state.dbg.__mdscr_el1 &= ~(1ULL); // Clear bit 0 (single step, SS) in the MDSCR_EL1.
+ }
+#else
+ const uint32_t i = 0;
+ if (enable)
+ {
+ m_hw_single_chained_step_addr = INVALID_NUB_ADDRESS;
+
+ // Save our previous state
+ m_dbg_save = m_state.dbg;
+ // Set a breakpoint that will stop when the PC doesn't match the current one!
+ m_state.dbg.__bvr[i] = m_state.context.gpr.__pc & 0xFFFFFFFCu; // Set the current PC as the breakpoint address
+ m_state.dbg.__bcr[i] = BCR_M_IMVA_MISMATCH | // Stop on address mismatch
+ S_USER | // Stop only in user mode
+ BCR_ENABLE; // Enable this breakpoint
+ if (m_state.context.gpr.__cpsr & 0x20)
+ {
+ // Thumb breakpoint
+ if (m_state.context.gpr.__pc & 2)
+ m_state.dbg.__bcr[i] |= BAS_IMVA_2_3;
+ else
+ m_state.dbg.__bcr[i] |= BAS_IMVA_0_1;
+
+ uint16_t opcode;
+ if (sizeof(opcode) == m_thread->Process()->Task().ReadMemory(m_state.context.gpr.__pc, sizeof(opcode), &opcode))
+ {
+ if (IsThumb32Opcode(opcode))
+ {
+ // 32 bit thumb opcode...
+ if (m_state.context.gpr.__pc & 2)
+ {
+ // We can't take care of a 32 bit thumb instruction single step
+ // with just IVA mismatching. We will need to chain an extra
+ // hardware single step in order to complete this single step...
+ m_hw_single_chained_step_addr = m_state.context.gpr.__pc + 2;
+ }
+ else
+ {
+ // Extend the number of bits to ignore for the mismatch
+ m_state.dbg.__bcr[i] |= BAS_IMVA_ALL;
+ }
+ }
+ }
+ }
+ else
+ {
+ // ARM breakpoint
+ m_state.dbg.__bcr[i] |= BAS_IMVA_ALL; // Stop when any address bits change
+ }
+
+ DNBLogThreadedIf(LOG_STEP, "%s: BVR%u=0x%8.8x BCR%u=0x%8.8x", __FUNCTION__, i, m_state.dbg.__bvr[i], i, m_state.dbg.__bcr[i]);
+
+ for (uint32_t j=i+1; j<16; ++j)
+ {
+ // Disable all others
+ m_state.dbg.__bvr[j] = 0;
+ m_state.dbg.__bcr[j] = 0;
+ }
+ }
+ else
+ {
+ // Just restore the state we had before we did single stepping
+ m_state.dbg = m_dbg_save;
+ }
+#endif
+
+ return SetDBGState(false);
+}
+
+// return 1 if bit "BIT" is set in "value"
+static inline uint32_t bit(uint32_t value, uint32_t bit)
+{
+ return (value >> bit) & 1u;
+}
+
+// return the bitfield "value[msbit:lsbit]".
+static inline uint32_t bits(uint32_t value, uint32_t msbit, uint32_t lsbit)
+{
+ assert(msbit >= lsbit);
+ uint32_t shift_left = sizeof(value) * 8 - 1 - msbit;
+ value <<= shift_left; // shift anything above the msbit off of the unsigned edge
+ value >>= (shift_left + lsbit); // shift it back again down to the lsbit (including undoing any shift from above)
+ return value; // return our result
+}
+
+bool
+DNBArchMachARM::ConditionPassed(uint8_t condition, uint32_t cpsr)
+{
+ uint32_t cpsr_n = bit(cpsr, 31); // Negative condition code flag
+ uint32_t cpsr_z = bit(cpsr, 30); // Zero condition code flag
+ uint32_t cpsr_c = bit(cpsr, 29); // Carry condition code flag
+ uint32_t cpsr_v = bit(cpsr, 28); // Overflow condition code flag
+
+ switch (condition) {
+ case COND_EQ: // (0x0)
+ if (cpsr_z == 1) return true;
+ break;
+ case COND_NE: // (0x1)
+ if (cpsr_z == 0) return true;
+ break;
+ case COND_CS: // (0x2)
+ if (cpsr_c == 1) return true;
+ break;
+ case COND_CC: // (0x3)
+ if (cpsr_c == 0) return true;
+ break;
+ case COND_MI: // (0x4)
+ if (cpsr_n == 1) return true;
+ break;
+ case COND_PL: // (0x5)
+ if (cpsr_n == 0) return true;
+ break;
+ case COND_VS: // (0x6)
+ if (cpsr_v == 1) return true;
+ break;
+ case COND_VC: // (0x7)
+ if (cpsr_v == 0) return true;
+ break;
+ case COND_HI: // (0x8)
+ if ((cpsr_c == 1) && (cpsr_z == 0)) return true;
+ break;
+ case COND_LS: // (0x9)
+ if ((cpsr_c == 0) || (cpsr_z == 1)) return true;
+ break;
+ case COND_GE: // (0xA)
+ if (cpsr_n == cpsr_v) return true;
+ break;
+ case COND_LT: // (0xB)
+ if (cpsr_n != cpsr_v) return true;
+ break;
+ case COND_GT: // (0xC)
+ if ((cpsr_z == 0) && (cpsr_n == cpsr_v)) return true;
+ break;
+ case COND_LE: // (0xD)
+ if ((cpsr_z == 1) || (cpsr_n != cpsr_v)) return true;
+ break;
+ default:
+ return true;
+ break;
+ }
+
+ return false;
+}
+
+uint32_t
+DNBArchMachARM::NumSupportedHardwareBreakpoints()
+{
+ // Set the init value to something that will let us know that we need to
+ // autodetect how many breakpoints are supported dynamically...
+ static uint32_t g_num_supported_hw_breakpoints = UINT_MAX;
+ if (g_num_supported_hw_breakpoints == UINT_MAX)
+ {
+ // Set this to zero in case we can't tell if there are any HW breakpoints
+ g_num_supported_hw_breakpoints = 0;
+
+ size_t len;
+ uint32_t n = 0;
+ len = sizeof (n);
+ if (::sysctlbyname("hw.optional.breakpoint", &n, &len, NULL, 0) == 0)
+ {
+ g_num_supported_hw_breakpoints = n;
+ DNBLogThreadedIf(LOG_THREAD, "hw.optional.breakpoint=%u", n);
+ }
+ else
+ {
+#if !defined (__arm64__) && !defined (__aarch64__)
+ // Read the DBGDIDR to get the number of available hardware breakpoints
+ // However, in some of our current armv7 processors, hardware
+ // breakpoints/watchpoints were not properly connected. So detect those
+ // cases using a field in a sysctl. For now we are using "hw.cpusubtype"
+ // field to distinguish CPU architectures. This is a hack until we can
+ // get <rdar://problem/6372672> fixed, at which point we will switch to
+ // using a different sysctl string that will tell us how many BRPs
+ // are available to us directly without having to read DBGDIDR.
+ uint32_t register_DBGDIDR;
+
+ asm("mrc p14, 0, %0, c0, c0, 0" : "=r" (register_DBGDIDR));
+ uint32_t numBRPs = bits(register_DBGDIDR, 27, 24);
+ // Zero is reserved for the BRP count, so don't increment it if it is zero
+ if (numBRPs > 0)
+ numBRPs++;
+ DNBLogThreadedIf(LOG_THREAD, "DBGDIDR=0x%8.8x (number BRP pairs = %u)", register_DBGDIDR, numBRPs);
+
+ if (numBRPs > 0)
+ {
+ uint32_t cpusubtype;
+ len = sizeof(cpusubtype);
+ // TODO: remove this hack and change to using hw.optional.xx when implmented
+ if (::sysctlbyname("hw.cpusubtype", &cpusubtype, &len, NULL, 0) == 0)
+ {
+ DNBLogThreadedIf(LOG_THREAD, "hw.cpusubtype=%d", cpusubtype);
+ if (cpusubtype == CPU_SUBTYPE_ARM_V7)
+ DNBLogThreadedIf(LOG_THREAD, "Hardware breakpoints disabled for armv7 (rdar://problem/6372672)");
+ else
+ g_num_supported_hw_breakpoints = numBRPs;
+ }
+ }
+#endif
+ }
+ }
+ return g_num_supported_hw_breakpoints;
+}
+
+
+uint32_t
+DNBArchMachARM::NumSupportedHardwareWatchpoints()
+{
+ // Set the init value to something that will let us know that we need to
+ // autodetect how many watchpoints are supported dynamically...
+ static uint32_t g_num_supported_hw_watchpoints = UINT_MAX;
+ if (g_num_supported_hw_watchpoints == UINT_MAX)
+ {
+ // Set this to zero in case we can't tell if there are any HW breakpoints
+ g_num_supported_hw_watchpoints = 0;
+
+
+ size_t len;
+ uint32_t n = 0;
+ len = sizeof (n);
+ if (::sysctlbyname("hw.optional.watchpoint", &n, &len, NULL, 0) == 0)
+ {
+ g_num_supported_hw_watchpoints = n;
+ DNBLogThreadedIf(LOG_THREAD, "hw.optional.watchpoint=%u", n);
+ }
+ else
+ {
+#if !defined (__arm64__) && !defined (__aarch64__)
+ // Read the DBGDIDR to get the number of available hardware breakpoints
+ // However, in some of our current armv7 processors, hardware
+ // breakpoints/watchpoints were not properly connected. So detect those
+ // cases using a field in a sysctl. For now we are using "hw.cpusubtype"
+ // field to distinguish CPU architectures. This is a hack until we can
+ // get <rdar://problem/6372672> fixed, at which point we will switch to
+ // using a different sysctl string that will tell us how many WRPs
+ // are available to us directly without having to read DBGDIDR.
+
+ uint32_t register_DBGDIDR;
+ asm("mrc p14, 0, %0, c0, c0, 0" : "=r" (register_DBGDIDR));
+ uint32_t numWRPs = bits(register_DBGDIDR, 31, 28) + 1;
+ DNBLogThreadedIf(LOG_THREAD, "DBGDIDR=0x%8.8x (number WRP pairs = %u)", register_DBGDIDR, numWRPs);
+
+ if (numWRPs > 0)
+ {
+ uint32_t cpusubtype;
+ size_t len;
+ len = sizeof(cpusubtype);
+ // TODO: remove this hack and change to using hw.optional.xx when implmented
+ if (::sysctlbyname("hw.cpusubtype", &cpusubtype, &len, NULL, 0) == 0)
+ {
+ DNBLogThreadedIf(LOG_THREAD, "hw.cpusubtype=0x%d", cpusubtype);
+
+ if (cpusubtype == CPU_SUBTYPE_ARM_V7)
+ DNBLogThreadedIf(LOG_THREAD, "Hardware watchpoints disabled for armv7 (rdar://problem/6372672)");
+ else
+ g_num_supported_hw_watchpoints = numWRPs;
+ }
+ }
+#endif
+ }
+ }
+ return g_num_supported_hw_watchpoints;
+}
+
+
+uint32_t
+DNBArchMachARM::EnableHardwareBreakpoint (nub_addr_t addr, nub_size_t size)
+{
+ // Make sure our address isn't bogus
+ if (addr & 1)
+ return INVALID_NUB_HW_INDEX;
+
+ kern_return_t kret = GetDBGState(false);
+
+ if (kret == KERN_SUCCESS)
+ {
+ const uint32_t num_hw_breakpoints = NumSupportedHardwareBreakpoints();
+ uint32_t i;
+ for (i=0; i<num_hw_breakpoints; ++i)
+ {
+ if ((m_state.dbg.__bcr[i] & BCR_ENABLE) == 0)
+ break; // We found an available hw breakpoint slot (in i)
+ }
+
+ // See if we found an available hw breakpoint slot above
+ if (i < num_hw_breakpoints)
+ {
+ // Make sure bits 1:0 are clear in our address
+ m_state.dbg.__bvr[i] = addr & ~((nub_addr_t)3);
+
+ if (size == 2 || addr & 2)
+ {
+ uint32_t byte_addr_select = (addr & 2) ? BAS_IMVA_2_3 : BAS_IMVA_0_1;
+
+ // We have a thumb breakpoint
+ // We have an ARM breakpoint
+ m_state.dbg.__bcr[i] = BCR_M_IMVA_MATCH | // Stop on address mismatch
+ byte_addr_select | // Set the correct byte address select so we only trigger on the correct opcode
+ S_USER | // Which modes should this breakpoint stop in?
+ BCR_ENABLE; // Enable this hardware breakpoint
+ DNBLogThreadedIf (LOG_BREAKPOINTS, "DNBArchMachARM::EnableHardwareBreakpoint( addr = 0x%8.8llx, size = %llu ) - BVR%u/BCR%u = 0x%8.8x / 0x%8.8x (Thumb)",
+ (uint64_t)addr,
+ (uint64_t)size,
+ i,
+ i,
+ m_state.dbg.__bvr[i],
+ m_state.dbg.__bcr[i]);
+ }
+ else if (size == 4)
+ {
+ // We have an ARM breakpoint
+ m_state.dbg.__bcr[i] = BCR_M_IMVA_MATCH | // Stop on address mismatch
+ BAS_IMVA_ALL | // Stop on any of the four bytes following the IMVA
+ S_USER | // Which modes should this breakpoint stop in?
+ BCR_ENABLE; // Enable this hardware breakpoint
+ DNBLogThreadedIf (LOG_BREAKPOINTS, "DNBArchMachARM::EnableHardwareBreakpoint( addr = 0x%8.8llx, size = %llu ) - BVR%u/BCR%u = 0x%8.8x / 0x%8.8x (ARM)",
+ (uint64_t)addr,
+ (uint64_t)size,
+ i,
+ i,
+ m_state.dbg.__bvr[i],
+ m_state.dbg.__bcr[i]);
+ }
+
+ kret = SetDBGState(false);
+ DNBLogThreadedIf(LOG_BREAKPOINTS, "DNBArchMachARM::EnableHardwareBreakpoint() SetDBGState() => 0x%8.8x.", kret);
+
+ if (kret == KERN_SUCCESS)
+ return i;
+ }
+ else
+ {
+ DNBLogThreadedIf (LOG_BREAKPOINTS, "DNBArchMachARM::EnableHardwareBreakpoint(addr = 0x%8.8llx, size = %llu) => all hardware breakpoint resources are being used.", (uint64_t)addr, (uint64_t)size);
+ }
+ }
+
+ return INVALID_NUB_HW_INDEX;
+}
+
+bool
+DNBArchMachARM::DisableHardwareBreakpoint (uint32_t hw_index)
+{
+ kern_return_t kret = GetDBGState(false);
+
+ const uint32_t num_hw_points = NumSupportedHardwareBreakpoints();
+ if (kret == KERN_SUCCESS)
+ {
+ if (hw_index < num_hw_points)
+ {
+ m_state.dbg.__bcr[hw_index] = 0;
+ DNBLogThreadedIf(LOG_BREAKPOINTS, "DNBArchMachARM::SetHardwareBreakpoint( %u ) - BVR%u = 0x%8.8x BCR%u = 0x%8.8x",
+ hw_index,
+ hw_index,
+ m_state.dbg.__bvr[hw_index],
+ hw_index,
+ m_state.dbg.__bcr[hw_index]);
+
+ kret = SetDBGState(false);
+
+ if (kret == KERN_SUCCESS)
+ return true;
+ }
+ }
+ return false;
+}
+
+// ARM v7 watchpoints may be either word-size or double-word-size.
+// It's implementation defined which they can handle. It looks like on an
+// armv8 device, armv7 processes can watch dwords. But on a genuine armv7
+// device I tried, only word watchpoints are supported.
+
+#if defined (__arm64__) || defined (__aarch64__)
+#define WATCHPOINTS_ARE_DWORD 1
+#else
+#undef WATCHPOINTS_ARE_DWORD
+#endif
+
+uint32_t
+DNBArchMachARM::EnableHardwareWatchpoint (nub_addr_t addr, nub_size_t size, bool read, bool write, bool also_set_on_task)
+{
+
+ DNBLogThreadedIf(LOG_WATCHPOINTS, "DNBArchMachARM::EnableHardwareWatchpoint(addr = 0x%8.8llx, size = %zu, read = %u, write = %u)", (uint64_t)addr, size, read, write);
+
+ const uint32_t num_hw_watchpoints = NumSupportedHardwareWatchpoints();
+
+ // Can't watch zero bytes
+ if (size == 0)
+ return INVALID_NUB_HW_INDEX;
+
+ // We must watch for either read or write
+ if (read == false && write == false)
+ return INVALID_NUB_HW_INDEX;
+
+ // Otherwise, can't watch more than 8 bytes per WVR/WCR pair
+ if (size > 8)
+ return INVALID_NUB_HW_INDEX;
+
+ // Treat arm watchpoints as having an 8-byte alignment requirement. You can put a watchpoint on a 4-byte
+ // offset address but you can only watch 4 bytes with that watchpoint.
+
+ // arm watchpoints on an 8-byte (double word) aligned addr can watch any bytes in that
+ // 8-byte long region of memory. They can watch the 1st byte, the 2nd byte, 3rd byte, etc, or any
+ // combination therein by setting the bits in the BAS [12:5] (Byte Address Select) field of
+ // the DBGWCRn_EL1 reg for the watchpoint.
+
+ // If the MASK [28:24] bits in the DBGWCRn_EL1 allow a single watchpoint to monitor a larger region
+ // of memory (16 bytes, 32 bytes, or 2GB) but the Byte Address Select bitfield then selects a larger
+ // range of bytes, instead of individual bytes. See the ARMv8 Debug Architecture manual for details.
+ // This implementation does not currently use the MASK bits; the largest single region watched by a single
+ // watchpoint right now is 8-bytes.
+
+#if defined (WATCHPOINTS_ARE_DWORD)
+ nub_addr_t aligned_wp_address = addr & ~0x7;
+ uint32_t addr_dword_offset = addr & 0x7;
+ const int max_watchpoint_size = 8;
+#else
+ nub_addr_t aligned_wp_address = addr & ~0x3;
+ uint32_t addr_dword_offset = addr & 0x3;
+ const int max_watchpoint_size = 4;
+#endif
+
+ DNBLogThreadedIf(LOG_WATCHPOINTS, "DNBArchMachARM::EnableHardwareWatchpoint aligned_wp_address is 0x%llx and addr_dword_offset is 0x%x", (uint64_t)aligned_wp_address, addr_dword_offset);
+
+ // Do we need to split up this logical watchpoint into two hardware watchpoint
+ // registers?
+ // e.g. a watchpoint of length 4 on address 6. We need do this with
+ // one watchpoint on address 0 with bytes 6 & 7 being monitored
+ // one watchpoint on address 8 with bytes 0, 1, 2, 3 being monitored
+
+ if (addr_dword_offset + size > max_watchpoint_size)
+ {
+ DNBLogThreadedIf(LOG_WATCHPOINTS, "DNBArchMachARM::EnableHardwareWatchpoint(addr = 0x%8.8llx, size = %zu) needs two hardware watchpoints slots to monitor", (uint64_t)addr, size);
+ int low_watchpoint_size = max_watchpoint_size - addr_dword_offset;
+ int high_watchpoint_size = addr_dword_offset + size - max_watchpoint_size;
+
+ uint32_t lo = EnableHardwareWatchpoint(addr, low_watchpoint_size, read, write, also_set_on_task);
+ if (lo == INVALID_NUB_HW_INDEX)
+ return INVALID_NUB_HW_INDEX;
+ uint32_t hi = EnableHardwareWatchpoint (aligned_wp_address + max_watchpoint_size, high_watchpoint_size, read, write, also_set_on_task);
+ if (hi == INVALID_NUB_HW_INDEX)
+ {
+ DisableHardwareWatchpoint (lo, also_set_on_task);
+ return INVALID_NUB_HW_INDEX;
+ }
+ // Tag this lo->hi mapping in our database.
+ LoHi[lo] = hi;
+ return lo;
+ }
+
+ // At this point
+ // 1 aligned_wp_address is the requested address rounded down to 8-byte alignment
+ // 2 addr_dword_offset is the offset into that double word (8-byte) region that we are watching
+ // 3 size is the number of bytes within that 8-byte region that we are watching
+
+ // Set the Byte Address Selects bits DBGWCRn_EL1 bits [12:5] based on the above.
+ // The bit shift and negation operation will give us 0b11 for 2, 0b1111 for 4, etc, up to 0b11111111 for 8.
+ // then we shift those bits left by the offset into this dword that we are interested in.
+ // e.g. if we are watching bytes 4,5,6,7 in a dword we want a BAS of 0b11110000.
+ uint32_t byte_address_select = ((1 << size) - 1) << addr_dword_offset;
+
+ // Read the debug state
+ kern_return_t kret = GetDBGState(true);
+
+ if (kret == KERN_SUCCESS)
+ {
+ // Check to make sure we have the needed hardware support
+ uint32_t i = 0;
+
+ for (i=0; i<num_hw_watchpoints; ++i)
+ {
+ if ((m_state.dbg.__wcr[i] & WCR_ENABLE) == 0)
+ break; // We found an available hw watchpoint slot (in i)
+ }
+
+ // See if we found an available hw watchpoint slot above
+ if (i < num_hw_watchpoints)
+ {
+ //DumpDBGState(m_state.dbg);
+
+ // Clear any previous LoHi joined-watchpoint that may have been in use
+ LoHi[i] = 0;
+
+ // shift our Byte Address Select bits up to the correct bit range for the DBGWCRn_EL1
+ byte_address_select = byte_address_select << 5;
+
+ // Make sure bits 1:0 are clear in our address
+ m_state.dbg.__wvr[i] = aligned_wp_address; // DVA (Data Virtual Address)
+ m_state.dbg.__wcr[i] = byte_address_select | // Which bytes that follow the DVA that we will watch
+ S_USER | // Stop only in user mode
+ (read ? WCR_LOAD : 0) | // Stop on read access?
+ (write ? WCR_STORE : 0) | // Stop on write access?
+ WCR_ENABLE; // Enable this watchpoint;
+
+ DNBLogThreadedIf(LOG_WATCHPOINTS, "DNBArchMachARM::EnableHardwareWatchpoint() adding watchpoint on address 0x%llx with control register value 0x%x", (uint64_t) m_state.dbg.__wvr[i], (uint32_t) m_state.dbg.__wcr[i]);
+
+ // The kernel will set the MDE_ENABLE bit in the MDSCR_EL1 for us automatically, don't need to do it here.
+
+ kret = SetDBGState(also_set_on_task);
+ //DumpDBGState(m_state.dbg);
+
+ DNBLogThreadedIf(LOG_WATCHPOINTS, "DNBArchMachARM::EnableHardwareWatchpoint() SetDBGState() => 0x%8.8x.", kret);
+
+ if (kret == KERN_SUCCESS)
+ return i;
+ }
+ else
+ {
+ DNBLogThreadedIf(LOG_WATCHPOINTS, "DNBArchMachARM::EnableHardwareWatchpoint(): All hardware resources (%u) are in use.", num_hw_watchpoints);
+ }
+ }
+ return INVALID_NUB_HW_INDEX;
+}
+
+bool
+DNBArchMachARM::ReenableHardwareWatchpoint (uint32_t hw_index)
+{
+ // If this logical watchpoint # is actually implemented using
+ // two hardware watchpoint registers, re-enable both of them.
+
+ if (hw_index < NumSupportedHardwareWatchpoints() && LoHi[hw_index])
+ {
+ return ReenableHardwareWatchpoint_helper (hw_index) && ReenableHardwareWatchpoint_helper (LoHi[hw_index]);
+ }
+ else
+ {
+ return ReenableHardwareWatchpoint_helper (hw_index);
+ }
+}
+
+bool
+DNBArchMachARM::ReenableHardwareWatchpoint_helper (uint32_t hw_index)
+{
+ kern_return_t kret = GetDBGState(false);
+ if (kret != KERN_SUCCESS)
+ return false;
+ const uint32_t num_hw_points = NumSupportedHardwareWatchpoints();
+ if (hw_index >= num_hw_points)
+ return false;
+
+ m_state.dbg.__wvr[hw_index] = m_disabled_watchpoints[hw_index].addr;
+ m_state.dbg.__wcr[hw_index] = m_disabled_watchpoints[hw_index].control;
+
+ DNBLogThreadedIf(LOG_WATCHPOINTS, "DNBArchMachARM::EnableHardwareWatchpoint( %u ) - WVR%u = 0x%8.8llx WCR%u = 0x%8.8llx",
+ hw_index,
+ hw_index,
+ (uint64_t) m_state.dbg.__wvr[hw_index],
+ hw_index,
+ (uint64_t) m_state.dbg.__wcr[hw_index]);
+
+ // The kernel will set the MDE_ENABLE bit in the MDSCR_EL1 for us automatically, don't need to do it here.
+
+ kret = SetDBGState(false);
+
+ return (kret == KERN_SUCCESS);
+}
+
+bool
+DNBArchMachARM::DisableHardwareWatchpoint (uint32_t hw_index, bool also_set_on_task)
+{
+ if (hw_index < NumSupportedHardwareWatchpoints() && LoHi[hw_index])
+ {
+ return DisableHardwareWatchpoint_helper (hw_index, also_set_on_task) && DisableHardwareWatchpoint_helper (LoHi[hw_index], also_set_on_task);
+ }
+ else
+ {
+ return DisableHardwareWatchpoint_helper (hw_index, also_set_on_task);
+ }
+}
+
+bool
+DNBArchMachARM::DisableHardwareWatchpoint_helper (uint32_t hw_index, bool also_set_on_task)
+{
+ kern_return_t kret = GetDBGState(false);
+ if (kret != KERN_SUCCESS)
+ return false;
+
+ const uint32_t num_hw_points = NumSupportedHardwareWatchpoints();
+ if (hw_index >= num_hw_points)
+ return false;
+
+ m_disabled_watchpoints[hw_index].addr = m_state.dbg.__wvr[hw_index];
+ m_disabled_watchpoints[hw_index].control = m_state.dbg.__wcr[hw_index];
+
+ m_state.dbg.__wvr[hw_index] = 0;
+ m_state.dbg.__wcr[hw_index] = 0;
+ DNBLogThreadedIf(LOG_WATCHPOINTS, "DNBArchMachARM::DisableHardwareWatchpoint( %u ) - WVR%u = 0x%8.8llx WCR%u = 0x%8.8llx",
+ hw_index,
+ hw_index,
+ (uint64_t) m_state.dbg.__wvr[hw_index],
+ hw_index,
+ (uint64_t) m_state.dbg.__wcr[hw_index]);
+
+ kret = SetDBGState(also_set_on_task);
+
+ return (kret == KERN_SUCCESS);
+}
+
+// Returns -1 if the trailing bit patterns are not one of:
+// { 0b???1, 0b??10, 0b?100, 0b1000 }.
+static inline
+int32_t
+LowestBitSet(uint32_t val)
+{
+ for (unsigned i = 0; i < 4; ++i) {
+ if (bit(val, i))
+ return i;
+ }
+ return -1;
+}
+
+// Iterate through the debug registers; return the index of the first watchpoint whose address matches.
+// As a side effect, the starting address as understood by the debugger is returned which could be
+// different from 'addr' passed as an in/out argument.
+uint32_t
+DNBArchMachARM::GetHardwareWatchpointHit(nub_addr_t &addr)
+{
+ // Read the debug state
+ kern_return_t kret = GetDBGState(true);
+ //DumpDBGState(m_state.dbg);
+ DNBLogThreadedIf(LOG_WATCHPOINTS, "DNBArchMachARM::GetHardwareWatchpointHit() GetDBGState() => 0x%8.8x.", kret);
+ DNBLogThreadedIf(LOG_WATCHPOINTS, "DNBArchMachARM::GetHardwareWatchpointHit() addr = 0x%llx", (uint64_t)addr);
+
+ // This is the watchpoint value to match against, i.e., word address.
+#if defined (WATCHPOINTS_ARE_DWORD)
+ nub_addr_t wp_val = addr & ~((nub_addr_t)7);
+#else
+ nub_addr_t wp_val = addr & ~((nub_addr_t)3);
+#endif
+ if (kret == KERN_SUCCESS)
+ {
+ DBG &debug_state = m_state.dbg;
+ uint32_t i, num = NumSupportedHardwareWatchpoints();
+ for (i = 0; i < num; ++i)
+ {
+ nub_addr_t wp_addr = GetWatchAddress(debug_state, i);
+ DNBLogThreadedIf(LOG_WATCHPOINTS,
+ "DNBArchMachARM::GetHardwareWatchpointHit() slot: %u (addr = 0x%llx).",
+ i, (uint64_t)wp_addr);
+ if (wp_val == wp_addr) {
+#if defined (WATCHPOINTS_ARE_DWORD)
+ uint32_t byte_mask = bits(debug_state.__wcr[i], 12, 5);
+#else
+ uint32_t byte_mask = bits(debug_state.__wcr[i], 8, 5);
+#endif
+
+ // Sanity check the byte_mask, first.
+ if (LowestBitSet(byte_mask) < 0)
+ continue;
+
+ // Compute the starting address (from the point of view of the debugger).
+ addr = wp_addr + LowestBitSet(byte_mask);
+ return i;
+ }
+ }
+ }
+ return INVALID_NUB_HW_INDEX;
+}
+
+nub_addr_t
+DNBArchMachARM::GetWatchpointAddressByIndex (uint32_t hw_index)
+{
+ kern_return_t kret = GetDBGState(true);
+ if (kret != KERN_SUCCESS)
+ return INVALID_NUB_ADDRESS;
+ const uint32_t num = NumSupportedHardwareWatchpoints();
+ if (hw_index >= num)
+ return INVALID_NUB_ADDRESS;
+ if (IsWatchpointEnabled (m_state.dbg, hw_index))
+ return GetWatchAddress (m_state.dbg, hw_index);
+ return INVALID_NUB_ADDRESS;
+}
+
+bool
+DNBArchMachARM::IsWatchpointEnabled(const DBG &debug_state, uint32_t hw_index)
+{
+ // Watchpoint Control Registers, bitfield definitions
+ // ...
+ // Bits Value Description
+ // [0] 0 Watchpoint disabled
+ // 1 Watchpoint enabled.
+ return (debug_state.__wcr[hw_index] & 1u);
+}
+
+nub_addr_t
+DNBArchMachARM::GetWatchAddress(const DBG &debug_state, uint32_t hw_index)
+{
+ // Watchpoint Value Registers, bitfield definitions
+ // Bits Description
+ // [31:2] Watchpoint value (word address, i.e., 4-byte aligned)
+ // [1:0] RAZ/SBZP
+ return bits(debug_state.__wvr[hw_index], 31, 0);
+}
+
+//----------------------------------------------------------------------
+// Register information definitions for 32 bit ARMV7.
+//----------------------------------------------------------------------
+enum gpr_regnums
+{
+ gpr_r0 = 0,
+ gpr_r1,
+ gpr_r2,
+ gpr_r3,
+ gpr_r4,
+ gpr_r5,
+ gpr_r6,
+ gpr_r7,
+ gpr_r8,
+ gpr_r9,
+ gpr_r10,
+ gpr_r11,
+ gpr_r12,
+ gpr_sp,
+ gpr_lr,
+ gpr_pc,
+ gpr_cpsr
+};
+
+enum
+{
+ vfp_s0 = 0,
+ vfp_s1,
+ vfp_s2,
+ vfp_s3,
+ vfp_s4,
+ vfp_s5,
+ vfp_s6,
+ vfp_s7,
+ vfp_s8,
+ vfp_s9,
+ vfp_s10,
+ vfp_s11,
+ vfp_s12,
+ vfp_s13,
+ vfp_s14,
+ vfp_s15,
+ vfp_s16,
+ vfp_s17,
+ vfp_s18,
+ vfp_s19,
+ vfp_s20,
+ vfp_s21,
+ vfp_s22,
+ vfp_s23,
+ vfp_s24,
+ vfp_s25,
+ vfp_s26,
+ vfp_s27,
+ vfp_s28,
+ vfp_s29,
+ vfp_s30,
+ vfp_s31,
+ vfp_d0,
+ vfp_d1,
+ vfp_d2,
+ vfp_d3,
+ vfp_d4,
+ vfp_d5,
+ vfp_d6,
+ vfp_d7,
+ vfp_d8,
+ vfp_d9,
+ vfp_d10,
+ vfp_d11,
+ vfp_d12,
+ vfp_d13,
+ vfp_d14,
+ vfp_d15,
+ vfp_d16,
+ vfp_d17,
+ vfp_d18,
+ vfp_d19,
+ vfp_d20,
+ vfp_d21,
+ vfp_d22,
+ vfp_d23,
+ vfp_d24,
+ vfp_d25,
+ vfp_d26,
+ vfp_d27,
+ vfp_d28,
+ vfp_d29,
+ vfp_d30,
+ vfp_d31,
+ vfp_q0,
+ vfp_q1,
+ vfp_q2,
+ vfp_q3,
+ vfp_q4,
+ vfp_q5,
+ vfp_q6,
+ vfp_q7,
+ vfp_q8,
+ vfp_q9,
+ vfp_q10,
+ vfp_q11,
+ vfp_q12,
+ vfp_q13,
+ vfp_q14,
+ vfp_q15,
+#if defined (__arm64__) || defined (__aarch64__)
+ vfp_fpsr,
+ vfp_fpcr,
+#else
+ vfp_fpscr
+#endif
+};
+
+enum
+{
+ exc_exception,
+ exc_fsr,
+ exc_far,
+};
+
+#define GPR_OFFSET_IDX(idx) (offsetof (DNBArchMachARM::GPR, __r[idx]))
+#define GPR_OFFSET_NAME(reg) (offsetof (DNBArchMachARM::GPR, __##reg))
+
+#define EXC_OFFSET(reg) (offsetof (DNBArchMachARM::EXC, __##reg) + offsetof (DNBArchMachARM::Context, exc))
+
+// These macros will auto define the register name, alt name, register size,
+// register offset, encoding, format and native register. This ensures that
+// the register state structures are defined correctly and have the correct
+// sizes and offsets.
+#define DEFINE_GPR_IDX(idx, reg, alt, gen) { e_regSetGPR, gpr_##reg, #reg, alt, Uint, Hex, 4, GPR_OFFSET_IDX(idx), ehframe_##reg, dwarf_##reg, gen, INVALID_NUB_REGNUM, NULL, NULL}
+#define DEFINE_GPR_NAME(reg, alt, gen, inval) { e_regSetGPR, gpr_##reg, #reg, alt, Uint, Hex, 4, GPR_OFFSET_NAME(reg), ehframe_##reg, dwarf_##reg, gen, INVALID_NUB_REGNUM, NULL, inval}
+
+// In case we are debugging to a debug target that the ability to
+// change into the protected modes with folded registers (ABT, IRQ,
+// FIQ, SYS, USR, etc..), we should invalidate r8-r14 if the CPSR
+// gets modified.
+
+const char * g_invalidate_cpsr[] = { "r8", "r9", "r10", "r11", "r12", "sp", "lr", NULL };
+
+// General purpose registers
+const DNBRegisterInfo
+DNBArchMachARM::g_gpr_registers[] =
+{
+ DEFINE_GPR_IDX ( 0, r0,"arg1", GENERIC_REGNUM_ARG1 ),
+ DEFINE_GPR_IDX ( 1, r1,"arg2", GENERIC_REGNUM_ARG2 ),
+ DEFINE_GPR_IDX ( 2, r2,"arg3", GENERIC_REGNUM_ARG3 ),
+ DEFINE_GPR_IDX ( 3, r3,"arg4", GENERIC_REGNUM_ARG4 ),
+ DEFINE_GPR_IDX ( 4, r4, NULL, INVALID_NUB_REGNUM ),
+ DEFINE_GPR_IDX ( 5, r5, NULL, INVALID_NUB_REGNUM ),
+ DEFINE_GPR_IDX ( 6, r6, NULL, INVALID_NUB_REGNUM ),
+ DEFINE_GPR_IDX ( 7, r7, "fp", GENERIC_REGNUM_FP ),
+ DEFINE_GPR_IDX ( 8, r8, NULL, INVALID_NUB_REGNUM ),
+ DEFINE_GPR_IDX ( 9, r9, NULL, INVALID_NUB_REGNUM ),
+ DEFINE_GPR_IDX (10, r10, NULL, INVALID_NUB_REGNUM ),
+ DEFINE_GPR_IDX (11, r11, NULL, INVALID_NUB_REGNUM ),
+ DEFINE_GPR_IDX (12, r12, NULL, INVALID_NUB_REGNUM ),
+ DEFINE_GPR_NAME (sp, "r13", GENERIC_REGNUM_SP, NULL),
+ DEFINE_GPR_NAME (lr, "r14", GENERIC_REGNUM_RA, NULL),
+ DEFINE_GPR_NAME (pc, "r15", GENERIC_REGNUM_PC, NULL),
+ DEFINE_GPR_NAME (cpsr, "flags", GENERIC_REGNUM_FLAGS, g_invalidate_cpsr)
+};
+
+const char *g_contained_q0 [] { "q0", NULL };
+const char *g_contained_q1 [] { "q1", NULL };
+const char *g_contained_q2 [] { "q2", NULL };
+const char *g_contained_q3 [] { "q3", NULL };
+const char *g_contained_q4 [] { "q4", NULL };
+const char *g_contained_q5 [] { "q5", NULL };
+const char *g_contained_q6 [] { "q6", NULL };
+const char *g_contained_q7 [] { "q7", NULL };
+const char *g_contained_q8 [] { "q8", NULL };
+const char *g_contained_q9 [] { "q9", NULL };
+const char *g_contained_q10[] { "q10", NULL };
+const char *g_contained_q11[] { "q11", NULL };
+const char *g_contained_q12[] { "q12", NULL };
+const char *g_contained_q13[] { "q13", NULL };
+const char *g_contained_q14[] { "q14", NULL };
+const char *g_contained_q15[] { "q15", NULL };
+
+const char *g_invalidate_q0[] { "q0", "d0" , "d1" , "s0" , "s1" , "s2" , "s3" , NULL };
+const char *g_invalidate_q1[] { "q1", "d2" , "d3" , "s4" , "s5" , "s6" , "s7" , NULL };
+const char *g_invalidate_q2[] { "q2", "d4" , "d5" , "s8" , "s9" , "s10", "s11", NULL };
+const char *g_invalidate_q3[] { "q3", "d6" , "d7" , "s12", "s13", "s14", "s15", NULL };
+const char *g_invalidate_q4[] { "q4", "d8" , "d9" , "s16", "s17", "s18", "s19", NULL };
+const char *g_invalidate_q5[] { "q5", "d10", "d11", "s20", "s21", "s22", "s23", NULL };
+const char *g_invalidate_q6[] { "q6", "d12", "d13", "s24", "s25", "s26", "s27", NULL };
+const char *g_invalidate_q7[] { "q7", "d14", "d15", "s28", "s29", "s30", "s31", NULL };
+const char *g_invalidate_q8[] { "q8", "d16", "d17", NULL };
+const char *g_invalidate_q9[] { "q9", "d18", "d19", NULL };
+const char *g_invalidate_q10[] { "q10", "d20", "d21", NULL };
+const char *g_invalidate_q11[] { "q11", "d22", "d23", NULL };
+const char *g_invalidate_q12[] { "q12", "d24", "d25", NULL };
+const char *g_invalidate_q13[] { "q13", "d26", "d27", NULL };
+const char *g_invalidate_q14[] { "q14", "d28", "d29", NULL };
+const char *g_invalidate_q15[] { "q15", "d30", "d31", NULL };
+
+#define VFP_S_OFFSET_IDX(idx) (((idx) % 4) * 4) // offset into q reg: 0, 4, 8, 12
+#define VFP_D_OFFSET_IDX(idx) (((idx) % 2) * 8) // offset into q reg: 0, 8
+#define VFP_Q_OFFSET_IDX(idx) (VFP_S_OFFSET_IDX ((idx) * 4))
+
+#define VFP_OFFSET_NAME(reg) (offsetof (DNBArchMachARM::FPU, __##reg) + offsetof (DNBArchMachARM::Context, vfp))
+
+#define FLOAT_FORMAT Float
+
+#define DEFINE_VFP_S_IDX(idx) e_regSetVFP, vfp_s##idx, "s" #idx, NULL, IEEE754, FLOAT_FORMAT, 4, VFP_S_OFFSET_IDX(idx), INVALID_NUB_REGNUM, dwarf_s##idx, INVALID_NUB_REGNUM, INVALID_NUB_REGNUM
+#define DEFINE_VFP_D_IDX(idx) e_regSetVFP, vfp_d##idx, "d" #idx, NULL, IEEE754, FLOAT_FORMAT, 8, VFP_D_OFFSET_IDX(idx), INVALID_NUB_REGNUM, dwarf_d##idx, INVALID_NUB_REGNUM, INVALID_NUB_REGNUM
+#define DEFINE_VFP_Q_IDX(idx) e_regSetVFP, vfp_q##idx, "q" #idx, NULL, Vector, VectorOfUInt8, 16, VFP_Q_OFFSET_IDX(idx), INVALID_NUB_REGNUM, dwarf_q##idx, INVALID_NUB_REGNUM, INVALID_NUB_REGNUM
+
+// Floating point registers
+const DNBRegisterInfo
+DNBArchMachARM::g_vfp_registers[] =
+{
+ { DEFINE_VFP_S_IDX ( 0), g_contained_q0, g_invalidate_q0 },
+ { DEFINE_VFP_S_IDX ( 1), g_contained_q0, g_invalidate_q0 },
+ { DEFINE_VFP_S_IDX ( 2), g_contained_q0, g_invalidate_q0 },
+ { DEFINE_VFP_S_IDX ( 3), g_contained_q0, g_invalidate_q0 },
+ { DEFINE_VFP_S_IDX ( 4), g_contained_q1, g_invalidate_q1 },
+ { DEFINE_VFP_S_IDX ( 5), g_contained_q1, g_invalidate_q1 },
+ { DEFINE_VFP_S_IDX ( 6), g_contained_q1, g_invalidate_q1 },
+ { DEFINE_VFP_S_IDX ( 7), g_contained_q1, g_invalidate_q1 },
+ { DEFINE_VFP_S_IDX ( 8), g_contained_q2, g_invalidate_q2 },
+ { DEFINE_VFP_S_IDX ( 9), g_contained_q2, g_invalidate_q2 },
+ { DEFINE_VFP_S_IDX (10), g_contained_q2, g_invalidate_q2 },
+ { DEFINE_VFP_S_IDX (11), g_contained_q2, g_invalidate_q2 },
+ { DEFINE_VFP_S_IDX (12), g_contained_q3, g_invalidate_q3 },
+ { DEFINE_VFP_S_IDX (13), g_contained_q3, g_invalidate_q3 },
+ { DEFINE_VFP_S_IDX (14), g_contained_q3, g_invalidate_q3 },
+ { DEFINE_VFP_S_IDX (15), g_contained_q3, g_invalidate_q3 },
+ { DEFINE_VFP_S_IDX (16), g_contained_q4, g_invalidate_q4 },
+ { DEFINE_VFP_S_IDX (17), g_contained_q4, g_invalidate_q4 },
+ { DEFINE_VFP_S_IDX (18), g_contained_q4, g_invalidate_q4 },
+ { DEFINE_VFP_S_IDX (19), g_contained_q4, g_invalidate_q4 },
+ { DEFINE_VFP_S_IDX (20), g_contained_q5, g_invalidate_q5 },
+ { DEFINE_VFP_S_IDX (21), g_contained_q5, g_invalidate_q5 },
+ { DEFINE_VFP_S_IDX (22), g_contained_q5, g_invalidate_q5 },
+ { DEFINE_VFP_S_IDX (23), g_contained_q5, g_invalidate_q5 },
+ { DEFINE_VFP_S_IDX (24), g_contained_q6, g_invalidate_q6 },
+ { DEFINE_VFP_S_IDX (25), g_contained_q6, g_invalidate_q6 },
+ { DEFINE_VFP_S_IDX (26), g_contained_q6, g_invalidate_q6 },
+ { DEFINE_VFP_S_IDX (27), g_contained_q6, g_invalidate_q6 },
+ { DEFINE_VFP_S_IDX (28), g_contained_q7, g_invalidate_q7 },
+ { DEFINE_VFP_S_IDX (29), g_contained_q7, g_invalidate_q7 },
+ { DEFINE_VFP_S_IDX (30), g_contained_q7, g_invalidate_q7 },
+ { DEFINE_VFP_S_IDX (31), g_contained_q7, g_invalidate_q7 },
+
+ { DEFINE_VFP_D_IDX (0), g_contained_q0, g_invalidate_q0 },
+ { DEFINE_VFP_D_IDX (1), g_contained_q0, g_invalidate_q0 },
+ { DEFINE_VFP_D_IDX (2), g_contained_q1, g_invalidate_q1 },
+ { DEFINE_VFP_D_IDX (3), g_contained_q1, g_invalidate_q1 },
+ { DEFINE_VFP_D_IDX (4), g_contained_q2, g_invalidate_q2 },
+ { DEFINE_VFP_D_IDX (5), g_contained_q2, g_invalidate_q2 },
+ { DEFINE_VFP_D_IDX (6), g_contained_q3, g_invalidate_q3 },
+ { DEFINE_VFP_D_IDX (7), g_contained_q3, g_invalidate_q3 },
+ { DEFINE_VFP_D_IDX (8), g_contained_q4, g_invalidate_q4 },
+ { DEFINE_VFP_D_IDX (9), g_contained_q4, g_invalidate_q4 },
+ { DEFINE_VFP_D_IDX (10), g_contained_q5, g_invalidate_q5 },
+ { DEFINE_VFP_D_IDX (11), g_contained_q5, g_invalidate_q5 },
+ { DEFINE_VFP_D_IDX (12), g_contained_q6, g_invalidate_q6 },
+ { DEFINE_VFP_D_IDX (13), g_contained_q6, g_invalidate_q6 },
+ { DEFINE_VFP_D_IDX (14), g_contained_q7, g_invalidate_q7 },
+ { DEFINE_VFP_D_IDX (15), g_contained_q7, g_invalidate_q7 },
+ { DEFINE_VFP_D_IDX (16), g_contained_q8, g_invalidate_q8 },
+ { DEFINE_VFP_D_IDX (17), g_contained_q8, g_invalidate_q8 },
+ { DEFINE_VFP_D_IDX (18), g_contained_q9, g_invalidate_q9 },
+ { DEFINE_VFP_D_IDX (19), g_contained_q9, g_invalidate_q9 },
+ { DEFINE_VFP_D_IDX (20), g_contained_q10, g_invalidate_q10 },
+ { DEFINE_VFP_D_IDX (21), g_contained_q10, g_invalidate_q10 },
+ { DEFINE_VFP_D_IDX (22), g_contained_q11, g_invalidate_q11 },
+ { DEFINE_VFP_D_IDX (23), g_contained_q11, g_invalidate_q11 },
+ { DEFINE_VFP_D_IDX (24), g_contained_q12, g_invalidate_q12 },
+ { DEFINE_VFP_D_IDX (25), g_contained_q12, g_invalidate_q12 },
+ { DEFINE_VFP_D_IDX (26), g_contained_q13, g_invalidate_q13 },
+ { DEFINE_VFP_D_IDX (27), g_contained_q13, g_invalidate_q13 },
+ { DEFINE_VFP_D_IDX (28), g_contained_q14, g_invalidate_q14 },
+ { DEFINE_VFP_D_IDX (29), g_contained_q14, g_invalidate_q14 },
+ { DEFINE_VFP_D_IDX (30), g_contained_q15, g_invalidate_q15 },
+ { DEFINE_VFP_D_IDX (31), g_contained_q15, g_invalidate_q15 },
+
+ { DEFINE_VFP_Q_IDX (0), NULL, g_invalidate_q0 },
+ { DEFINE_VFP_Q_IDX (1), NULL, g_invalidate_q1 },
+ { DEFINE_VFP_Q_IDX (2), NULL, g_invalidate_q2 },
+ { DEFINE_VFP_Q_IDX (3), NULL, g_invalidate_q3 },
+ { DEFINE_VFP_Q_IDX (4), NULL, g_invalidate_q4 },
+ { DEFINE_VFP_Q_IDX (5), NULL, g_invalidate_q5 },
+ { DEFINE_VFP_Q_IDX (6), NULL, g_invalidate_q6 },
+ { DEFINE_VFP_Q_IDX (7), NULL, g_invalidate_q7 },
+ { DEFINE_VFP_Q_IDX (8), NULL, g_invalidate_q8 },
+ { DEFINE_VFP_Q_IDX (9), NULL, g_invalidate_q9 },
+ { DEFINE_VFP_Q_IDX (10), NULL, g_invalidate_q10 },
+ { DEFINE_VFP_Q_IDX (11), NULL, g_invalidate_q11 },
+ { DEFINE_VFP_Q_IDX (12), NULL, g_invalidate_q12 },
+ { DEFINE_VFP_Q_IDX (13), NULL, g_invalidate_q13 },
+ { DEFINE_VFP_Q_IDX (14), NULL, g_invalidate_q14 },
+ { DEFINE_VFP_Q_IDX (15), NULL, g_invalidate_q15 },
+
+#if defined (__arm64__) || defined (__aarch64__)
+ { e_regSetVFP, vfp_fpsr, "fpsr", NULL, Uint, Hex, 4, VFP_OFFSET_NAME(fpsr), INVALID_NUB_REGNUM, INVALID_NUB_REGNUM, INVALID_NUB_REGNUM, INVALID_NUB_REGNUM, NULL, NULL },
+ { e_regSetVFP, vfp_fpcr, "fpcr", NULL, Uint, Hex, 4, VFP_OFFSET_NAME(fpcr), INVALID_NUB_REGNUM, INVALID_NUB_REGNUM, INVALID_NUB_REGNUM, INVALID_NUB_REGNUM, NULL, NULL }
+#else
+ { e_regSetVFP, vfp_fpscr, "fpscr", NULL, Uint, Hex, 4, VFP_OFFSET_NAME(fpscr), INVALID_NUB_REGNUM, INVALID_NUB_REGNUM, INVALID_NUB_REGNUM, INVALID_NUB_REGNUM, NULL, NULL }
+#endif
+};
+
+// Exception registers
+
+const DNBRegisterInfo
+DNBArchMachARM::g_exc_registers[] =
+{
+ { e_regSetVFP, exc_exception , "exception" , NULL, Uint, Hex, 4, EXC_OFFSET(exception) , INVALID_NUB_REGNUM, INVALID_NUB_REGNUM, INVALID_NUB_REGNUM, INVALID_NUB_REGNUM },
+ { e_regSetVFP, exc_fsr , "fsr" , NULL, Uint, Hex, 4, EXC_OFFSET(fsr) , INVALID_NUB_REGNUM, INVALID_NUB_REGNUM, INVALID_NUB_REGNUM, INVALID_NUB_REGNUM },
+ { e_regSetVFP, exc_far , "far" , NULL, Uint, Hex, 4, EXC_OFFSET(far) , INVALID_NUB_REGNUM, INVALID_NUB_REGNUM, INVALID_NUB_REGNUM, INVALID_NUB_REGNUM }
+};
+
+// Number of registers in each register set
+const size_t DNBArchMachARM::k_num_gpr_registers = sizeof(g_gpr_registers)/sizeof(DNBRegisterInfo);
+const size_t DNBArchMachARM::k_num_vfp_registers = sizeof(g_vfp_registers)/sizeof(DNBRegisterInfo);
+const size_t DNBArchMachARM::k_num_exc_registers = sizeof(g_exc_registers)/sizeof(DNBRegisterInfo);
+const size_t DNBArchMachARM::k_num_all_registers = k_num_gpr_registers + k_num_vfp_registers + k_num_exc_registers;
+
+//----------------------------------------------------------------------
+// Register set definitions. The first definitions at register set index
+// of zero is for all registers, followed by other registers sets. The
+// register information for the all register set need not be filled in.
+//----------------------------------------------------------------------
+const DNBRegisterSetInfo
+DNBArchMachARM::g_reg_sets[] =
+{
+ { "ARM Registers", NULL, k_num_all_registers },
+ { "General Purpose Registers", g_gpr_registers, k_num_gpr_registers },
+ { "Floating Point Registers", g_vfp_registers, k_num_vfp_registers },
+ { "Exception State Registers", g_exc_registers, k_num_exc_registers }
+};
+// Total number of register sets for this architecture
+const size_t DNBArchMachARM::k_num_register_sets = sizeof(g_reg_sets)/sizeof(DNBRegisterSetInfo);
+
+
+const DNBRegisterSetInfo *
+DNBArchMachARM::GetRegisterSetInfo(nub_size_t *num_reg_sets)
+{
+ *num_reg_sets = k_num_register_sets;
+ return g_reg_sets;
+}
+
+bool
+DNBArchMachARM::GetRegisterValue(uint32_t set, uint32_t reg, DNBRegisterValue *value)
+{
+ if (set == REGISTER_SET_GENERIC)
+ {
+ switch (reg)
+ {
+ case GENERIC_REGNUM_PC: // Program Counter
+ set = e_regSetGPR;
+ reg = gpr_pc;
+ break;
+
+ case GENERIC_REGNUM_SP: // Stack Pointer
+ set = e_regSetGPR;
+ reg = gpr_sp;
+ break;
+
+ case GENERIC_REGNUM_FP: // Frame Pointer
+ set = e_regSetGPR;
+ reg = gpr_r7; // is this the right reg?
+ break;
+
+ case GENERIC_REGNUM_RA: // Return Address
+ set = e_regSetGPR;
+ reg = gpr_lr;
+ break;
+
+ case GENERIC_REGNUM_FLAGS: // Processor flags register
+ set = e_regSetGPR;
+ reg = gpr_cpsr;
+ break;
+
+ default:
+ return false;
+ }
+ }
+
+ if (GetRegisterState(set, false) != KERN_SUCCESS)
+ return false;
+
+ const DNBRegisterInfo *regInfo = m_thread->GetRegisterInfo(set, reg);
+ if (regInfo)
+ {
+ value->info = *regInfo;
+ switch (set)
+ {
+ case e_regSetGPR:
+ if (reg < k_num_gpr_registers)
+ {
+ value->value.uint32 = m_state.context.gpr.__r[reg];
+ return true;
+ }
+ break;
+
+ case e_regSetVFP:
+ // "reg" is an index into the floating point register set at this point.
+ // We need to translate it up so entry 0 in the fp reg set is the same as vfp_s0
+ // in the enumerated values for case statement below.
+ if (reg >= vfp_s0 && reg <= vfp_s31)
+ {
+#if defined (__arm64__) || defined (__aarch64__)
+ uint32_t *s_reg = ((uint32_t *) &m_state.context.vfp.__v[0]) + (reg - vfp_s0);
+ memcpy (&value->value.v_uint8, s_reg, 4);
+#else
+ value->value.uint32 = m_state.context.vfp.__r[reg];
+#endif
+ return true;
+ }
+ else if (reg >= vfp_d0 && reg <= vfp_d31)
+ {
+#if defined (__arm64__) || defined (__aarch64__)
+ uint64_t *d_reg = ((uint64_t *) &m_state.context.vfp.__v[0]) + (reg - vfp_d0);
+ memcpy (&value->value.v_uint8, d_reg, 8);
+#else
+ uint32_t d_reg_idx = reg - vfp_d0;
+ uint32_t s_reg_idx = d_reg_idx * 2;
+ value->value.v_sint32[0] = m_state.context.vfp.__r[s_reg_idx + 0];
+ value->value.v_sint32[1] = m_state.context.vfp.__r[s_reg_idx + 1];
+#endif
+ return true;
+ }
+ else if (reg >= vfp_q0 && reg <= vfp_q15)
+ {
+#if defined (__arm64__) || defined (__aarch64__)
+ memcpy (&value->value.v_uint8, (uint8_t *) &m_state.context.vfp.__v[reg - vfp_q0], 16);
+#else
+ uint32_t s_reg_idx = (reg - vfp_q0) * 4;
+ memcpy (&value->value.v_uint8, (uint8_t *) &m_state.context.vfp.__r[s_reg_idx], 16);
+#endif
+ return true;
+ }
+#if defined (__arm64__) || defined (__aarch64__)
+ else if (reg == vfp_fpsr)
+ {
+ value->value.uint32 = m_state.context.vfp.__fpsr;
+ return true;
+ }
+ else if (reg == vfp_fpcr)
+ {
+ value->value.uint32 = m_state.context.vfp.__fpcr;
+ return true;
+ }
+#else
+ else if (reg == vfp_fpscr)
+ {
+ value->value.uint32 = m_state.context.vfp.__fpscr;
+ return true;
+ }
+#endif
+ break;
+
+ case e_regSetEXC:
+ if (reg < k_num_exc_registers)
+ {
+ value->value.uint32 = (&m_state.context.exc.__exception)[reg];
+ return true;
+ }
+ break;
+ }
+ }
+ return false;
+}
+
+bool
+DNBArchMachARM::SetRegisterValue(uint32_t set, uint32_t reg, const DNBRegisterValue *value)
+{
+ if (set == REGISTER_SET_GENERIC)
+ {
+ switch (reg)
+ {
+ case GENERIC_REGNUM_PC: // Program Counter
+ set = e_regSetGPR;
+ reg = gpr_pc;
+ break;
+
+ case GENERIC_REGNUM_SP: // Stack Pointer
+ set = e_regSetGPR;
+ reg = gpr_sp;
+ break;
+
+ case GENERIC_REGNUM_FP: // Frame Pointer
+ set = e_regSetGPR;
+ reg = gpr_r7;
+ break;
+
+ case GENERIC_REGNUM_RA: // Return Address
+ set = e_regSetGPR;
+ reg = gpr_lr;
+ break;
+
+ case GENERIC_REGNUM_FLAGS: // Processor flags register
+ set = e_regSetGPR;
+ reg = gpr_cpsr;
+ break;
+
+ default:
+ return false;
+ }
+ }
+
+ if (GetRegisterState(set, false) != KERN_SUCCESS)
+ return false;
+
+ bool success = false;
+ const DNBRegisterInfo *regInfo = m_thread->GetRegisterInfo(set, reg);
+ if (regInfo)
+ {
+ switch (set)
+ {
+ case e_regSetGPR:
+ if (reg < k_num_gpr_registers)
+ {
+ m_state.context.gpr.__r[reg] = value->value.uint32;
+ success = true;
+ }
+ break;
+
+ case e_regSetVFP:
+ // "reg" is an index into the floating point register set at this point.
+ // We need to translate it up so entry 0 in the fp reg set is the same as vfp_s0
+ // in the enumerated values for case statement below.
+ if (reg >= vfp_s0 && reg <= vfp_s31)
+ {
+#if defined (__arm64__) || defined (__aarch64__)
+ uint32_t *s_reg = ((uint32_t *) &m_state.context.vfp.__v[0]) + (reg - vfp_s0);
+ memcpy (s_reg, &value->value.v_uint8, 4);
+#else
+ m_state.context.vfp.__r[reg] = value->value.uint32;
+#endif
+ success = true;
+ }
+ else if (reg >= vfp_d0 && reg <= vfp_d31)
+ {
+#if defined (__arm64__) || defined (__aarch64__)
+ uint64_t *d_reg = ((uint64_t *) &m_state.context.vfp.__v[0]) + (reg - vfp_d0);
+ memcpy (d_reg, &value->value.v_uint8, 8);
+#else
+ uint32_t d_reg_idx = reg - vfp_d0;
+ uint32_t s_reg_idx = d_reg_idx * 2;
+ m_state.context.vfp.__r[s_reg_idx + 0] = value->value.v_sint32[0];
+ m_state.context.vfp.__r[s_reg_idx + 1] = value->value.v_sint32[1];
+#endif
+ success = true;
+ }
+ else if (reg >= vfp_q0 && reg <= vfp_q15)
+ {
+#if defined (__arm64__) || defined (__aarch64__)
+ memcpy ((uint8_t *) &m_state.context.vfp.__v[reg - vfp_q0], &value->value.v_uint8, 16);
+#else
+ uint32_t s_reg_idx = (reg - vfp_q0) * 4;
+ memcpy ((uint8_t *) &m_state.context.vfp.__r[s_reg_idx], &value->value.v_uint8, 16);
+#endif
+ success = true;
+ }
+#if defined (__arm64__) || defined (__aarch64__)
+ else if (reg == vfp_fpsr)
+ {
+ m_state.context.vfp.__fpsr = value->value.uint32;
+ success = true;
+ }
+ else if (reg == vfp_fpcr)
+ {
+ m_state.context.vfp.__fpcr = value->value.uint32;
+ success = true;
+ }
+#else
+ else if (reg == vfp_fpscr)
+ {
+ m_state.context.vfp.__fpscr = value->value.uint32;
+ success = true;
+ }
+#endif
+ break;
+
+ case e_regSetEXC:
+ if (reg < k_num_exc_registers)
+ {
+ (&m_state.context.exc.__exception)[reg] = value->value.uint32;
+ success = true;
+ }
+ break;
+ }
+
+ }
+ if (success)
+ return SetRegisterState(set) == KERN_SUCCESS;
+ return false;
+}
+
+kern_return_t
+DNBArchMachARM::GetRegisterState(int set, bool force)
+{
+ switch (set)
+ {
+ case e_regSetALL: return GetGPRState(force) |
+ GetVFPState(force) |
+ GetEXCState(force) |
+ GetDBGState(force);
+ case e_regSetGPR: return GetGPRState(force);
+ case e_regSetVFP: return GetVFPState(force);
+ case e_regSetEXC: return GetEXCState(force);
+ case e_regSetDBG: return GetDBGState(force);
+ default: break;
+ }
+ return KERN_INVALID_ARGUMENT;
+}
+
+kern_return_t
+DNBArchMachARM::SetRegisterState(int set)
+{
+ // Make sure we have a valid context to set.
+ kern_return_t err = GetRegisterState(set, false);
+ if (err != KERN_SUCCESS)
+ return err;
+
+ switch (set)
+ {
+ case e_regSetALL: return SetGPRState() |
+ SetVFPState() |
+ SetEXCState() |
+ SetDBGState(false);
+ case e_regSetGPR: return SetGPRState();
+ case e_regSetVFP: return SetVFPState();
+ case e_regSetEXC: return SetEXCState();
+ case e_regSetDBG: return SetDBGState(false);
+ default: break;
+ }
+ return KERN_INVALID_ARGUMENT;
+}
+
+bool
+DNBArchMachARM::RegisterSetStateIsValid (int set) const
+{
+ return m_state.RegsAreValid(set);
+}
+
+
+nub_size_t
+DNBArchMachARM::GetRegisterContext (void *buf, nub_size_t buf_len)
+{
+ nub_size_t size = sizeof (m_state.context.gpr) +
+ sizeof (m_state.context.vfp) +
+ sizeof (m_state.context.exc);
+
+ if (buf && buf_len)
+ {
+ if (size > buf_len)
+ size = buf_len;
+
+ bool force = false;
+ if (GetGPRState(force) | GetVFPState(force) | GetEXCState(force))
+ return 0;
+
+ // Copy each struct individually to avoid any padding that might be between the structs in m_state.context
+ uint8_t *p = (uint8_t *)buf;
+ ::memcpy (p, &m_state.context.gpr, sizeof(m_state.context.gpr));
+ p += sizeof(m_state.context.gpr);
+ ::memcpy (p, &m_state.context.vfp, sizeof(m_state.context.vfp));
+ p += sizeof(m_state.context.vfp);
+ ::memcpy (p, &m_state.context.exc, sizeof(m_state.context.exc));
+ p += sizeof(m_state.context.exc);
+
+ size_t bytes_written = p - (uint8_t *)buf;
+ UNUSED_IF_ASSERT_DISABLED(bytes_written);
+ assert (bytes_written == size);
+
+ }
+ DNBLogThreadedIf (LOG_THREAD, "DNBArchMachARM::GetRegisterContext (buf = %p, len = %llu) => %llu", buf, (uint64_t)buf_len, (uint64_t)size);
+ // Return the size of the register context even if NULL was passed in
+ return size;
+}
+
+nub_size_t
+DNBArchMachARM::SetRegisterContext (const void *buf, nub_size_t buf_len)
+{
+ nub_size_t size = sizeof (m_state.context.gpr) +
+ sizeof (m_state.context.vfp) +
+ sizeof (m_state.context.exc);
+
+ if (buf == NULL || buf_len == 0)
+ size = 0;
+
+ if (size)
+ {
+ if (size > buf_len)
+ size = buf_len;
+
+ // Copy each struct individually to avoid any padding that might be between the structs in m_state.context
+ uint8_t *p = (uint8_t *)buf;
+ ::memcpy (&m_state.context.gpr, p, sizeof(m_state.context.gpr));
+ p += sizeof(m_state.context.gpr);
+ ::memcpy (&m_state.context.vfp, p, sizeof(m_state.context.vfp));
+ p += sizeof(m_state.context.vfp);
+ ::memcpy (&m_state.context.exc, p, sizeof(m_state.context.exc));
+ p += sizeof(m_state.context.exc);
+
+ size_t bytes_written = p - (uint8_t *)buf;
+ UNUSED_IF_ASSERT_DISABLED(bytes_written);
+ assert (bytes_written == size);
+
+ if (SetGPRState() | SetVFPState() | SetEXCState())
+ return 0;
+ }
+ DNBLogThreadedIf (LOG_THREAD, "DNBArchMachARM::SetRegisterContext (buf = %p, len = %llu) => %llu", buf, (uint64_t)buf_len, (uint64_t)size);
+ return size;
+}
+
+
+uint32_t
+DNBArchMachARM::SaveRegisterState ()
+{
+ kern_return_t kret = ::thread_abort_safely(m_thread->MachPortNumber());
+ DNBLogThreadedIf (LOG_THREAD, "thread = 0x%4.4x calling thread_abort_safely (tid) => %u (SetGPRState() for stop_count = %u)", m_thread->MachPortNumber(), kret, m_thread->Process()->StopCount());
+
+ // Always re-read the registers because above we call thread_abort_safely();
+ bool force = true;
+
+ if ((kret = GetGPRState(force)) != KERN_SUCCESS)
+ {
+ DNBLogThreadedIf (LOG_THREAD, "DNBArchMachARM::SaveRegisterState () error: GPR regs failed to read: %u ", kret);
+ }
+ else if ((kret = GetVFPState(force)) != KERN_SUCCESS)
+ {
+ DNBLogThreadedIf (LOG_THREAD, "DNBArchMachARM::SaveRegisterState () error: %s regs failed to read: %u", "VFP", kret);
+ }
+ else
+ {
+ const uint32_t save_id = GetNextRegisterStateSaveID ();
+ m_saved_register_states[save_id] = m_state.context;
+ return save_id;
+ }
+ return UINT32_MAX;
+}
+
+bool
+DNBArchMachARM::RestoreRegisterState (uint32_t save_id)
+{
+ SaveRegisterStates::iterator pos = m_saved_register_states.find(save_id);
+ if (pos != m_saved_register_states.end())
+ {
+ m_state.context.gpr = pos->second.gpr;
+ m_state.context.vfp = pos->second.vfp;
+ kern_return_t kret;
+ bool success = true;
+ if ((kret = SetGPRState()) != KERN_SUCCESS)
+ {
+ DNBLogThreadedIf (LOG_THREAD, "DNBArchMachARM::RestoreRegisterState (save_id = %u) error: GPR regs failed to write: %u", save_id, kret);
+ success = false;
+ }
+ else if ((kret = SetVFPState()) != KERN_SUCCESS)
+ {
+ DNBLogThreadedIf (LOG_THREAD, "DNBArchMachARM::RestoreRegisterState (save_id = %u) error: %s regs failed to write: %u", save_id, "VFP", kret);
+ success = false;
+ }
+ m_saved_register_states.erase(pos);
+ return success;
+ }
+ return false;
+}
+
+
+#endif // #if defined (__arm__)
+
diff --git a/tools/debugserver/source/MacOSX/arm/DNBArchImpl.h b/tools/debugserver/source/MacOSX/arm/DNBArchImpl.h
new file mode 100644
index 000000000000..ae8974855238
--- /dev/null
+++ b/tools/debugserver/source/MacOSX/arm/DNBArchImpl.h
@@ -0,0 +1,282 @@
+//===-- DNBArchImpl.h -------------------------------------------*- C++ -*-===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// Created by Greg Clayton on 6/25/07.
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef __DebugNubArchMachARM_h__
+#define __DebugNubArchMachARM_h__
+
+#if defined (__arm__) || defined (__arm64__) || defined (__aarch64__)
+
+#include "DNBArch.h"
+
+#include <map>
+
+class MachThread;
+
+class DNBArchMachARM : public DNBArchProtocol
+{
+public:
+ enum { kMaxNumThumbITBreakpoints = 4 };
+
+ DNBArchMachARM(MachThread *thread) :
+ m_thread(thread),
+ m_state(),
+ m_disabled_watchpoints(),
+ m_hw_single_chained_step_addr(INVALID_NUB_ADDRESS),
+ m_last_decode_pc(INVALID_NUB_ADDRESS),
+ m_watchpoint_hw_index(-1),
+ m_watchpoint_did_occur(false),
+ m_watchpoint_resume_single_step_enabled(false),
+ m_saved_register_states()
+ {
+ m_disabled_watchpoints.resize (16);
+ memset(&m_dbg_save, 0, sizeof(m_dbg_save));
+#if defined (USE_ARM_DISASSEMBLER_FRAMEWORK)
+ ThumbStaticsInit(&m_last_decode_thumb);
+#endif
+ }
+
+ virtual ~DNBArchMachARM()
+ {
+ }
+
+ static void Initialize();
+ static const DNBRegisterSetInfo *
+ GetRegisterSetInfo(nub_size_t *num_reg_sets);
+
+ virtual bool GetRegisterValue(uint32_t set, uint32_t reg, DNBRegisterValue *value);
+ virtual bool SetRegisterValue(uint32_t set, uint32_t reg, const DNBRegisterValue *value);
+ virtual nub_size_t GetRegisterContext (void *buf, nub_size_t buf_len);
+ virtual nub_size_t SetRegisterContext (const void *buf, nub_size_t buf_len);
+ virtual uint32_t SaveRegisterState ();
+ virtual bool RestoreRegisterState (uint32_t save_id);
+
+ virtual kern_return_t GetRegisterState (int set, bool force);
+ virtual kern_return_t SetRegisterState (int set);
+ virtual bool RegisterSetStateIsValid (int set) const;
+
+ virtual uint64_t GetPC(uint64_t failValue); // Get program counter
+ virtual kern_return_t SetPC(uint64_t value);
+ virtual uint64_t GetSP(uint64_t failValue); // Get stack pointer
+ virtual void ThreadWillResume();
+ virtual bool ThreadDidStop();
+ virtual bool NotifyException(MachException::Data& exc);
+
+ static DNBArchProtocol *Create (MachThread *thread);
+ static const uint8_t * SoftwareBreakpointOpcode (nub_size_t byte_size);
+ static uint32_t GetCPUType();
+
+ virtual uint32_t NumSupportedHardwareBreakpoints();
+ virtual uint32_t NumSupportedHardwareWatchpoints();
+ virtual uint32_t EnableHardwareBreakpoint (nub_addr_t addr, nub_size_t size);
+ virtual bool DisableHardwareBreakpoint (uint32_t hw_break_index);
+
+ virtual uint32_t EnableHardwareWatchpoint (nub_addr_t addr, nub_size_t size, bool read, bool write, bool also_set_on_task);
+ virtual bool DisableHardwareWatchpoint (uint32_t hw_break_index, bool also_set_on_task);
+ virtual bool DisableHardwareWatchpoint_helper (uint32_t hw_break_index, bool also_set_on_task);
+ virtual bool ReenableHardwareWatchpoint (uint32_t hw_break_index);
+ virtual bool ReenableHardwareWatchpoint_helper (uint32_t hw_break_index);
+
+ virtual bool StepNotComplete ();
+ virtual uint32_t GetHardwareWatchpointHit(nub_addr_t &addr);
+
+#if defined (ARM_DEBUG_STATE32) && (defined (__arm64__) || defined (__aarch64__))
+ typedef arm_debug_state32_t DBG;
+#else
+ typedef arm_debug_state_t DBG;
+#endif
+
+protected:
+
+
+ kern_return_t EnableHardwareSingleStep (bool enable);
+ kern_return_t SetSingleStepSoftwareBreakpoints ();
+
+ bool ConditionPassed(uint8_t condition, uint32_t cpsr);
+#if defined (USE_ARM_DISASSEMBLER_FRAMEWORK)
+ bool ComputeNextPC(nub_addr_t currentPC, arm_decoded_instruction_t decodedInstruction, bool currentPCIsThumb, nub_addr_t *targetPC);
+ arm_error_t DecodeInstructionUsingDisassembler(nub_addr_t curr_pc, uint32_t curr_cpsr, arm_decoded_instruction_t *decodedInstruction, thumb_static_data_t *thumbStaticData, nub_addr_t *next_pc);
+ void DecodeITBlockInstructions(nub_addr_t curr_pc);
+#endif
+ void EvaluateNextInstructionForSoftwareBreakpointSetup(nub_addr_t currentPC, uint32_t cpsr, bool currentPCIsThumb, nub_addr_t *nextPC, bool *nextPCIsThumb);
+
+
+ typedef enum RegisterSetTag
+ {
+ e_regSetALL = REGISTER_SET_ALL,
+ e_regSetGPR, // ARM_THREAD_STATE
+ e_regSetVFP, // ARM_VFP_STATE (ARM_NEON_STATE if defined __arm64__)
+ e_regSetEXC, // ARM_EXCEPTION_STATE
+ e_regSetDBG, // ARM_DEBUG_STATE (ARM_DEBUG_STATE32 if defined __arm64__)
+ kNumRegisterSets
+ } RegisterSet;
+
+ enum
+ {
+ Read = 0,
+ Write = 1,
+ kNumErrors = 2
+ };
+
+ typedef arm_thread_state_t GPR;
+#if defined (__arm64__) || defined (__aarch64__)
+ typedef arm_neon_state_t FPU;
+#else
+ typedef arm_vfp_state_t FPU;
+#endif
+ typedef arm_exception_state_t EXC;
+
+ static const DNBRegisterInfo g_gpr_registers[];
+ static const DNBRegisterInfo g_vfp_registers[];
+ static const DNBRegisterInfo g_exc_registers[];
+ static const DNBRegisterSetInfo g_reg_sets[];
+
+ static const size_t k_num_gpr_registers;
+ static const size_t k_num_vfp_registers;
+ static const size_t k_num_exc_registers;
+ static const size_t k_num_all_registers;
+ static const size_t k_num_register_sets;
+
+ struct Context
+ {
+ GPR gpr;
+ FPU vfp;
+ EXC exc;
+ };
+
+ struct State
+ {
+ Context context;
+ DBG dbg;
+ kern_return_t gpr_errs[2]; // Read/Write errors
+ kern_return_t vfp_errs[2]; // Read/Write errors
+ kern_return_t exc_errs[2]; // Read/Write errors
+ kern_return_t dbg_errs[2]; // Read/Write errors
+ State()
+ {
+ uint32_t i;
+ for (i=0; i<kNumErrors; i++)
+ {
+ gpr_errs[i] = -1;
+ vfp_errs[i] = -1;
+ exc_errs[i] = -1;
+ dbg_errs[i] = -1;
+ }
+ }
+ void InvalidateRegisterSetState(int set)
+ {
+ SetError (set, Read, -1);
+ }
+ kern_return_t GetError (int set, uint32_t err_idx) const
+ {
+ if (err_idx < kNumErrors)
+ {
+ switch (set)
+ {
+ // When getting all errors, just OR all values together to see if
+ // we got any kind of error.
+ case e_regSetALL: return gpr_errs[err_idx] |
+ vfp_errs[err_idx] |
+ exc_errs[err_idx] |
+ dbg_errs[err_idx] ;
+ case e_regSetGPR: return gpr_errs[err_idx];
+ case e_regSetVFP: return vfp_errs[err_idx];
+ case e_regSetEXC: return exc_errs[err_idx];
+ case e_regSetDBG: return dbg_errs[err_idx];
+ default: break;
+ }
+ }
+ return -1;
+ }
+ bool SetError (int set, uint32_t err_idx, kern_return_t err)
+ {
+ if (err_idx < kNumErrors)
+ {
+ switch (set)
+ {
+ case e_regSetALL:
+ gpr_errs[err_idx] = err;
+ vfp_errs[err_idx] = err;
+ dbg_errs[err_idx] = err;
+ exc_errs[err_idx] = err;
+ return true;
+
+ case e_regSetGPR:
+ gpr_errs[err_idx] = err;
+ return true;
+
+ case e_regSetVFP:
+ vfp_errs[err_idx] = err;
+ return true;
+
+ case e_regSetEXC:
+ exc_errs[err_idx] = err;
+ return true;
+
+ case e_regSetDBG:
+ dbg_errs[err_idx] = err;
+ return true;
+ default: break;
+ }
+ }
+ return false;
+ }
+ bool RegsAreValid (int set) const
+ {
+ return GetError(set, Read) == KERN_SUCCESS;
+ }
+ };
+
+ kern_return_t GetGPRState (bool force);
+ kern_return_t GetVFPState (bool force);
+ kern_return_t GetEXCState (bool force);
+ kern_return_t GetDBGState (bool force);
+
+ kern_return_t SetGPRState ();
+ kern_return_t SetVFPState ();
+ kern_return_t SetEXCState ();
+ kern_return_t SetDBGState (bool also_set_on_task);
+
+ bool IsWatchpointEnabled(const DBG &debug_state, uint32_t hw_index);
+ nub_addr_t GetWatchpointAddressByIndex (uint32_t hw_index);
+ nub_addr_t GetWatchAddress(const DBG &debug_state, uint32_t hw_index);
+
+ class disabled_watchpoint {
+ public:
+ disabled_watchpoint () { addr = 0; control = 0; }
+ nub_addr_t addr;
+ uint32_t control;
+ };
+
+protected:
+ MachThread * m_thread;
+ State m_state;
+ DBG m_dbg_save;
+
+ // armv8 doesn't keep the disabled watchpoint values in the debug register context like armv7;
+ // we need to save them aside when we disable them temporarily.
+ std::vector<disabled_watchpoint> m_disabled_watchpoints;
+
+ nub_addr_t m_hw_single_chained_step_addr;
+ nub_addr_t m_last_decode_pc;
+
+ // The following member variables should be updated atomically.
+ int32_t m_watchpoint_hw_index;
+ bool m_watchpoint_did_occur;
+ bool m_watchpoint_resume_single_step_enabled;
+
+ typedef std::map<uint32_t, Context> SaveRegisterStates;
+ SaveRegisterStates m_saved_register_states;
+};
+
+#endif // #if defined (__arm__)
+#endif // #ifndef __DebugNubArchMachARM_h__
diff --git a/tools/debugserver/source/MacOSX/arm64/DNBArchImplARM64.cpp b/tools/debugserver/source/MacOSX/arm64/DNBArchImplARM64.cpp
new file mode 100644
index 000000000000..e79d3d52e8f8
--- /dev/null
+++ b/tools/debugserver/source/MacOSX/arm64/DNBArchImplARM64.cpp
@@ -0,0 +1,2095 @@
+//===-- DNBArchMachARM64.cpp ------------------------------------*- C++ -*-===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// Created by Greg Clayton on 6/25/07.
+//
+//===----------------------------------------------------------------------===//
+
+#if defined (__arm__) || defined (__arm64__) || defined (__aarch64__)
+
+#include "MacOSX/arm64/DNBArchImplARM64.h"
+
+#if defined (ARM_THREAD_STATE64_COUNT)
+
+#include "MacOSX/MachProcess.h"
+#include "MacOSX/MachThread.h"
+#include "DNBBreakpoint.h"
+#include "DNBLog.h"
+#include "DNBRegisterInfo.h"
+#include "DNB.h"
+
+#include <inttypes.h>
+#include <sys/sysctl.h>
+
+// Break only in privileged or user mode
+// (PAC bits in the DBGWVRn_EL1 watchpoint control register)
+#define S_USER ((uint32_t)(2u << 1))
+
+#define BCR_ENABLE ((uint32_t)(1u))
+#define WCR_ENABLE ((uint32_t)(1u))
+
+// Watchpoint load/store
+// (LSC bits in the DBGWVRn_EL1 watchpoint control register)
+#define WCR_LOAD ((uint32_t)(1u << 3))
+#define WCR_STORE ((uint32_t)(1u << 4))
+
+// Enable breakpoint, watchpoint, and vector catch debug exceptions.
+// (MDE bit in the MDSCR_EL1 register. Equivalent to the MDBGen bit in DBGDSCRext in Aarch32)
+#define MDE_ENABLE ((uint32_t)(1u << 15))
+
+// Single instruction step
+// (SS bit in the MDSCR_EL1 register)
+#define SS_ENABLE ((uint32_t)(1u))
+
+static const uint8_t g_arm64_breakpoint_opcode[] = { 0x00, 0x00, 0x20, 0xD4 }; // "brk #0", 0xd4200000 in BE byte order
+static const uint8_t g_arm_breakpoint_opcode[] = { 0xFE, 0xDE, 0xFF, 0xE7 }; // this armv7 insn also works in arm64
+
+// If we need to set one logical watchpoint by using
+// two hardware watchpoint registers, the watchpoint
+// will be split into a "high" and "low" watchpoint.
+// Record both of them in the LoHi array.
+
+// It's safe to initialize to all 0's since
+// hi > lo and therefore LoHi[i] cannot be 0.
+static uint32_t LoHi[16] = { 0 };
+
+
+void
+DNBArchMachARM64::Initialize()
+{
+ DNBArchPluginInfo arch_plugin_info =
+ {
+ CPU_TYPE_ARM64,
+ DNBArchMachARM64::Create,
+ DNBArchMachARM64::GetRegisterSetInfo,
+ DNBArchMachARM64::SoftwareBreakpointOpcode
+ };
+
+ // Register this arch plug-in with the main protocol class
+ DNBArchProtocol::RegisterArchPlugin (arch_plugin_info);
+}
+
+
+DNBArchProtocol *
+DNBArchMachARM64::Create (MachThread *thread)
+{
+ DNBArchMachARM64 *obj = new DNBArchMachARM64 (thread);
+
+ return obj;
+}
+
+const uint8_t *
+DNBArchMachARM64::SoftwareBreakpointOpcode (nub_size_t byte_size)
+{
+ return g_arm_breakpoint_opcode;
+}
+
+uint32_t
+DNBArchMachARM64::GetCPUType()
+{
+ return CPU_TYPE_ARM64;
+}
+
+uint64_t
+DNBArchMachARM64::GetPC(uint64_t failValue)
+{
+ // Get program counter
+ if (GetGPRState(false) == KERN_SUCCESS)
+ return m_state.context.gpr.__pc;
+ return failValue;
+}
+
+kern_return_t
+DNBArchMachARM64::SetPC(uint64_t value)
+{
+ // Get program counter
+ kern_return_t err = GetGPRState(false);
+ if (err == KERN_SUCCESS)
+ {
+ m_state.context.gpr.__pc = value;
+ err = SetGPRState();
+ }
+ return err == KERN_SUCCESS;
+}
+
+uint64_t
+DNBArchMachARM64::GetSP(uint64_t failValue)
+{
+ // Get stack pointer
+ if (GetGPRState(false) == KERN_SUCCESS)
+ return m_state.context.gpr.__sp;
+ return failValue;
+}
+
+kern_return_t
+DNBArchMachARM64::GetGPRState(bool force)
+{
+ int set = e_regSetGPR;
+ // Check if we have valid cached registers
+ if (!force && m_state.GetError(set, Read) == KERN_SUCCESS)
+ return KERN_SUCCESS;
+
+ // Read the registers from our thread
+ mach_msg_type_number_t count = e_regSetGPRCount;
+ kern_return_t kret = ::thread_get_state(m_thread->MachPortNumber(), ARM_THREAD_STATE64, (thread_state_t)&m_state.context.gpr, &count);
+ if (DNBLogEnabledForAny (LOG_THREAD))
+ {
+ uint64_t *x = &m_state.context.gpr.__x[0];
+ DNBLogThreaded("thread_get_state(0x%4.4x, %u, &gpr, %u) => 0x%8.8x (count = %u) regs"
+ "\n x0=%16.16llx"
+ "\n x1=%16.16llx"
+ "\n x2=%16.16llx"
+ "\n x3=%16.16llx"
+ "\n x4=%16.16llx"
+ "\n x5=%16.16llx"
+ "\n x6=%16.16llx"
+ "\n x7=%16.16llx"
+ "\n x8=%16.16llx"
+ "\n x9=%16.16llx"
+ "\n x10=%16.16llx"
+ "\n x11=%16.16llx"
+ "\n x12=%16.16llx"
+ "\n x13=%16.16llx"
+ "\n x14=%16.16llx"
+ "\n x15=%16.16llx"
+ "\n x16=%16.16llx"
+ "\n x17=%16.16llx"
+ "\n x18=%16.16llx"
+ "\n x19=%16.16llx"
+ "\n x20=%16.16llx"
+ "\n x21=%16.16llx"
+ "\n x22=%16.16llx"
+ "\n x23=%16.16llx"
+ "\n x24=%16.16llx"
+ "\n x25=%16.16llx"
+ "\n x26=%16.16llx"
+ "\n x27=%16.16llx"
+ "\n x28=%16.16llx"
+ "\n fp=%16.16llx"
+ "\n lr=%16.16llx"
+ "\n sp=%16.16llx"
+ "\n pc=%16.16llx"
+ "\n cpsr=%8.8x",
+ m_thread->MachPortNumber(),
+ e_regSetGPR,
+ e_regSetGPRCount,
+ kret,
+ count,
+ x[0],
+ x[1],
+ x[2],
+ x[3],
+ x[4],
+ x[5],
+ x[6],
+ x[7],
+ x[8],
+ x[9],
+ x[0],
+ x[11],
+ x[12],
+ x[13],
+ x[14],
+ x[15],
+ x[16],
+ x[17],
+ x[18],
+ x[19],
+ x[20],
+ x[21],
+ x[22],
+ x[23],
+ x[24],
+ x[25],
+ x[26],
+ x[27],
+ x[28],
+ m_state.context.gpr.__fp,
+ m_state.context.gpr.__lr,
+ m_state.context.gpr.__sp,
+ m_state.context.gpr.__pc,
+ m_state.context.gpr.__cpsr);
+ }
+ m_state.SetError(set, Read, kret);
+ return kret;
+}
+
+kern_return_t
+DNBArchMachARM64::GetVFPState(bool force)
+{
+ int set = e_regSetVFP;
+ // Check if we have valid cached registers
+ if (!force && m_state.GetError(set, Read) == KERN_SUCCESS)
+ return KERN_SUCCESS;
+
+ // Read the registers from our thread
+ mach_msg_type_number_t count = e_regSetVFPCount;
+ kern_return_t kret = ::thread_get_state(m_thread->MachPortNumber(), ARM_NEON_STATE64, (thread_state_t)&m_state.context.vfp, &count);
+ if (DNBLogEnabledForAny (LOG_THREAD))
+ {
+#if defined (__arm64__) || defined (__aarch64__)
+ DNBLogThreaded("thread_get_state(0x%4.4x, %u, &vfp, %u) => 0x%8.8x (count = %u) regs"
+ "\n q0 = 0x%16.16llx%16.16llx"
+ "\n q1 = 0x%16.16llx%16.16llx"
+ "\n q2 = 0x%16.16llx%16.16llx"
+ "\n q3 = 0x%16.16llx%16.16llx"
+ "\n q4 = 0x%16.16llx%16.16llx"
+ "\n q5 = 0x%16.16llx%16.16llx"
+ "\n q6 = 0x%16.16llx%16.16llx"
+ "\n q7 = 0x%16.16llx%16.16llx"
+ "\n q8 = 0x%16.16llx%16.16llx"
+ "\n q9 = 0x%16.16llx%16.16llx"
+ "\n q10 = 0x%16.16llx%16.16llx"
+ "\n q11 = 0x%16.16llx%16.16llx"
+ "\n q12 = 0x%16.16llx%16.16llx"
+ "\n q13 = 0x%16.16llx%16.16llx"
+ "\n q14 = 0x%16.16llx%16.16llx"
+ "\n q15 = 0x%16.16llx%16.16llx"
+ "\n q16 = 0x%16.16llx%16.16llx"
+ "\n q17 = 0x%16.16llx%16.16llx"
+ "\n q18 = 0x%16.16llx%16.16llx"
+ "\n q19 = 0x%16.16llx%16.16llx"
+ "\n q20 = 0x%16.16llx%16.16llx"
+ "\n q21 = 0x%16.16llx%16.16llx"
+ "\n q22 = 0x%16.16llx%16.16llx"
+ "\n q23 = 0x%16.16llx%16.16llx"
+ "\n q24 = 0x%16.16llx%16.16llx"
+ "\n q25 = 0x%16.16llx%16.16llx"
+ "\n q26 = 0x%16.16llx%16.16llx"
+ "\n q27 = 0x%16.16llx%16.16llx"
+ "\n q28 = 0x%16.16llx%16.16llx"
+ "\n q29 = 0x%16.16llx%16.16llx"
+ "\n q30 = 0x%16.16llx%16.16llx"
+ "\n q31 = 0x%16.16llx%16.16llx"
+ "\n fpsr = 0x%8.8x"
+ "\n fpcr = 0x%8.8x\n\n",
+ m_thread->MachPortNumber(),
+ e_regSetVFP,
+ e_regSetVFPCount,
+ kret,
+ count,
+ ((uint64_t *)&m_state.context.vfp.__v[0])[0] , ((uint64_t *)&m_state.context.vfp.__v[0])[1],
+ ((uint64_t *)&m_state.context.vfp.__v[1])[0] , ((uint64_t *)&m_state.context.vfp.__v[1])[1],
+ ((uint64_t *)&m_state.context.vfp.__v[2])[0] , ((uint64_t *)&m_state.context.vfp.__v[2])[1],
+ ((uint64_t *)&m_state.context.vfp.__v[3])[0] , ((uint64_t *)&m_state.context.vfp.__v[3])[1],
+ ((uint64_t *)&m_state.context.vfp.__v[4])[0] , ((uint64_t *)&m_state.context.vfp.__v[4])[1],
+ ((uint64_t *)&m_state.context.vfp.__v[5])[0] , ((uint64_t *)&m_state.context.vfp.__v[5])[1],
+ ((uint64_t *)&m_state.context.vfp.__v[6])[0] , ((uint64_t *)&m_state.context.vfp.__v[6])[1],
+ ((uint64_t *)&m_state.context.vfp.__v[7])[0] , ((uint64_t *)&m_state.context.vfp.__v[7])[1],
+ ((uint64_t *)&m_state.context.vfp.__v[8])[0] , ((uint64_t *)&m_state.context.vfp.__v[8])[1],
+ ((uint64_t *)&m_state.context.vfp.__v[9])[0] , ((uint64_t *)&m_state.context.vfp.__v[9])[1],
+ ((uint64_t *)&m_state.context.vfp.__v[10])[0], ((uint64_t *)&m_state.context.vfp.__v[10])[1],
+ ((uint64_t *)&m_state.context.vfp.__v[11])[0], ((uint64_t *)&m_state.context.vfp.__v[11])[1],
+ ((uint64_t *)&m_state.context.vfp.__v[12])[0], ((uint64_t *)&m_state.context.vfp.__v[12])[1],
+ ((uint64_t *)&m_state.context.vfp.__v[13])[0], ((uint64_t *)&m_state.context.vfp.__v[13])[1],
+ ((uint64_t *)&m_state.context.vfp.__v[14])[0], ((uint64_t *)&m_state.context.vfp.__v[14])[1],
+ ((uint64_t *)&m_state.context.vfp.__v[15])[0], ((uint64_t *)&m_state.context.vfp.__v[15])[1],
+ ((uint64_t *)&m_state.context.vfp.__v[16])[0], ((uint64_t *)&m_state.context.vfp.__v[16])[1],
+ ((uint64_t *)&m_state.context.vfp.__v[17])[0], ((uint64_t *)&m_state.context.vfp.__v[17])[1],
+ ((uint64_t *)&m_state.context.vfp.__v[18])[0], ((uint64_t *)&m_state.context.vfp.__v[18])[1],
+ ((uint64_t *)&m_state.context.vfp.__v[19])[0], ((uint64_t *)&m_state.context.vfp.__v[19])[1],
+ ((uint64_t *)&m_state.context.vfp.__v[20])[0], ((uint64_t *)&m_state.context.vfp.__v[20])[1],
+ ((uint64_t *)&m_state.context.vfp.__v[21])[0], ((uint64_t *)&m_state.context.vfp.__v[21])[1],
+ ((uint64_t *)&m_state.context.vfp.__v[22])[0], ((uint64_t *)&m_state.context.vfp.__v[22])[1],
+ ((uint64_t *)&m_state.context.vfp.__v[23])[0], ((uint64_t *)&m_state.context.vfp.__v[23])[1],
+ ((uint64_t *)&m_state.context.vfp.__v[24])[0], ((uint64_t *)&m_state.context.vfp.__v[24])[1],
+ ((uint64_t *)&m_state.context.vfp.__v[25])[0], ((uint64_t *)&m_state.context.vfp.__v[25])[1],
+ ((uint64_t *)&m_state.context.vfp.__v[26])[0], ((uint64_t *)&m_state.context.vfp.__v[26])[1],
+ ((uint64_t *)&m_state.context.vfp.__v[27])[0], ((uint64_t *)&m_state.context.vfp.__v[27])[1],
+ ((uint64_t *)&m_state.context.vfp.__v[28])[0], ((uint64_t *)&m_state.context.vfp.__v[28])[1],
+ ((uint64_t *)&m_state.context.vfp.__v[29])[0], ((uint64_t *)&m_state.context.vfp.__v[29])[1],
+ ((uint64_t *)&m_state.context.vfp.__v[30])[0], ((uint64_t *)&m_state.context.vfp.__v[30])[1],
+ ((uint64_t *)&m_state.context.vfp.__v[31])[0], ((uint64_t *)&m_state.context.vfp.__v[31])[1],
+ m_state.context.vfp.__fpsr,
+ m_state.context.vfp.__fpcr);
+#endif
+ }
+ m_state.SetError(set, Read, kret);
+ return kret;
+}
+
+kern_return_t
+DNBArchMachARM64::GetEXCState(bool force)
+{
+ int set = e_regSetEXC;
+ // Check if we have valid cached registers
+ if (!force && m_state.GetError(set, Read) == KERN_SUCCESS)
+ return KERN_SUCCESS;
+
+ // Read the registers from our thread
+ mach_msg_type_number_t count = e_regSetEXCCount;
+ kern_return_t kret = ::thread_get_state(m_thread->MachPortNumber(), ARM_EXCEPTION_STATE64, (thread_state_t)&m_state.context.exc, &count);
+ m_state.SetError(set, Read, kret);
+ return kret;
+}
+
+static void
+DumpDBGState(const arm_debug_state_t& dbg)
+{
+ uint32_t i = 0;
+ for (i=0; i<16; i++)
+ DNBLogThreadedIf(LOG_STEP, "BVR%-2u/BCR%-2u = { 0x%8.8x, 0x%8.8x } WVR%-2u/WCR%-2u = { 0x%8.8x, 0x%8.8x }",
+ i, i, dbg.__bvr[i], dbg.__bcr[i],
+ i, i, dbg.__wvr[i], dbg.__wcr[i]);
+}
+
+kern_return_t
+DNBArchMachARM64::GetDBGState(bool force)
+{
+ int set = e_regSetDBG;
+
+ // Check if we have valid cached registers
+ if (!force && m_state.GetError(set, Read) == KERN_SUCCESS)
+ return KERN_SUCCESS;
+
+ // Read the registers from our thread
+ mach_msg_type_number_t count = e_regSetDBGCount;
+ kern_return_t kret = ::thread_get_state(m_thread->MachPortNumber(), ARM_DEBUG_STATE64, (thread_state_t)&m_state.dbg, &count);
+ m_state.SetError(set, Read, kret);
+
+ return kret;
+}
+
+kern_return_t
+DNBArchMachARM64::SetGPRState()
+{
+ int set = e_regSetGPR;
+ kern_return_t kret = ::thread_set_state(m_thread->MachPortNumber(), ARM_THREAD_STATE64, (thread_state_t)&m_state.context.gpr, e_regSetGPRCount);
+ m_state.SetError(set, Write, kret); // Set the current write error for this register set
+ m_state.InvalidateRegisterSetState(set); // Invalidate the current register state in case registers are read back differently
+ return kret; // Return the error code
+}
+
+kern_return_t
+DNBArchMachARM64::SetVFPState()
+{
+ int set = e_regSetVFP;
+ kern_return_t kret = ::thread_set_state (m_thread->MachPortNumber(), ARM_NEON_STATE64, (thread_state_t)&m_state.context.vfp, e_regSetVFPCount);
+ m_state.SetError(set, Write, kret); // Set the current write error for this register set
+ m_state.InvalidateRegisterSetState(set); // Invalidate the current register state in case registers are read back differently
+ return kret; // Return the error code
+}
+
+kern_return_t
+DNBArchMachARM64::SetEXCState()
+{
+ int set = e_regSetEXC;
+ kern_return_t kret = ::thread_set_state (m_thread->MachPortNumber(), ARM_EXCEPTION_STATE64, (thread_state_t)&m_state.context.exc, e_regSetEXCCount);
+ m_state.SetError(set, Write, kret); // Set the current write error for this register set
+ m_state.InvalidateRegisterSetState(set); // Invalidate the current register state in case registers are read back differently
+ return kret; // Return the error code
+}
+
+kern_return_t
+DNBArchMachARM64::SetDBGState(bool also_set_on_task)
+{
+ int set = e_regSetDBG;
+ kern_return_t kret = ::thread_set_state (m_thread->MachPortNumber(), ARM_DEBUG_STATE64, (thread_state_t)&m_state.dbg, e_regSetDBGCount);
+ if (also_set_on_task)
+ {
+ kern_return_t task_kret = task_set_state (m_thread->Process()->Task().TaskPort(), ARM_DEBUG_STATE64, (thread_state_t)&m_state.dbg, e_regSetDBGCount);
+ if (task_kret != KERN_SUCCESS)
+ DNBLogThreadedIf(LOG_WATCHPOINTS, "DNBArchMachARM64::SetDBGState failed to set debug control register state: 0x%8.8x.", task_kret);
+ }
+ m_state.SetError(set, Write, kret); // Set the current write error for this register set
+ m_state.InvalidateRegisterSetState(set); // Invalidate the current register state in case registers are read back differently
+
+ return kret; // Return the error code
+}
+
+void
+DNBArchMachARM64::ThreadWillResume()
+{
+ // Do we need to step this thread? If so, let the mach thread tell us so.
+ if (m_thread->IsStepping())
+ {
+ EnableHardwareSingleStep(true);
+ }
+
+ // Disable the triggered watchpoint temporarily before we resume.
+ // Plus, we try to enable hardware single step to execute past the instruction which triggered our watchpoint.
+ if (m_watchpoint_did_occur)
+ {
+ if (m_watchpoint_hw_index >= 0)
+ {
+ kern_return_t kret = GetDBGState(false);
+ if (kret == KERN_SUCCESS && !IsWatchpointEnabled(m_state.dbg, m_watchpoint_hw_index)) {
+ // The watchpoint might have been disabled by the user. We don't need to do anything at all
+ // to enable hardware single stepping.
+ m_watchpoint_did_occur = false;
+ m_watchpoint_hw_index = -1;
+ return;
+ }
+
+ DisableHardwareWatchpoint(m_watchpoint_hw_index, false);
+ DNBLogThreadedIf(LOG_WATCHPOINTS, "DNBArchMachARM::ThreadWillResume() DisableHardwareWatchpoint(%d) called",
+ m_watchpoint_hw_index);
+
+ // Enable hardware single step to move past the watchpoint-triggering instruction.
+ m_watchpoint_resume_single_step_enabled = (EnableHardwareSingleStep(true) == KERN_SUCCESS);
+
+ // If we are not able to enable single step to move past the watchpoint-triggering instruction,
+ // at least we should reset the two watchpoint member variables so that the next time around
+ // this callback function is invoked, the enclosing logical branch is skipped.
+ if (!m_watchpoint_resume_single_step_enabled) {
+ // Reset the two watchpoint member variables.
+ m_watchpoint_did_occur = false;
+ m_watchpoint_hw_index = -1;
+ DNBLogThreadedIf(LOG_WATCHPOINTS, "DNBArchMachARM::ThreadWillResume() failed to enable single step");
+ }
+ else
+ DNBLogThreadedIf(LOG_WATCHPOINTS, "DNBArchMachARM::ThreadWillResume() succeeded to enable single step");
+ }
+ }
+}
+
+bool
+DNBArchMachARM64::NotifyException(MachException::Data& exc)
+{
+
+ switch (exc.exc_type)
+ {
+ default:
+ break;
+ case EXC_BREAKPOINT:
+ if (exc.exc_data.size() == 2 && exc.exc_data[0] == EXC_ARM_DA_DEBUG)
+ {
+ // The data break address is passed as exc_data[1].
+ nub_addr_t addr = exc.exc_data[1];
+ // Find the hardware index with the side effect of possibly massaging the
+ // addr to return the starting address as seen from the debugger side.
+ uint32_t hw_index = GetHardwareWatchpointHit(addr);
+
+ // One logical watchpoint was split into two watchpoint locations because
+ // it was too big. If the watchpoint exception is indicating the 2nd half
+ // of the two-parter, find the address of the 1st half and report that --
+ // that's what lldb is going to expect to see.
+ DNBLogThreadedIf(LOG_WATCHPOINTS, "DNBArchMachARM::NotifyException watchpoint %d was hit on address 0x%llx", hw_index, (uint64_t) addr);
+ const int num_watchpoints = NumSupportedHardwareWatchpoints ();
+ for (int i = 0; i < num_watchpoints; i++)
+ {
+ if (LoHi[i] != 0
+ && LoHi[i] == hw_index
+ && LoHi[i] != i
+ && GetWatchpointAddressByIndex (i) != INVALID_NUB_ADDRESS)
+ {
+ addr = GetWatchpointAddressByIndex (i);
+ DNBLogThreadedIf(LOG_WATCHPOINTS, "DNBArchMachARM::NotifyException It is a linked watchpoint; rewritten to index %d addr 0x%llx", LoHi[i], (uint64_t) addr);
+ }
+ }
+
+ if (hw_index != INVALID_NUB_HW_INDEX)
+ {
+ m_watchpoint_did_occur = true;
+ m_watchpoint_hw_index = hw_index;
+ exc.exc_data[1] = addr;
+ // Piggyback the hw_index in the exc.data.
+ exc.exc_data.push_back(hw_index);
+ }
+
+ return true;
+ }
+ break;
+ }
+ return false;
+}
+
+bool
+DNBArchMachARM64::ThreadDidStop()
+{
+ bool success = true;
+
+ m_state.InvalidateAllRegisterStates();
+
+ if (m_watchpoint_resume_single_step_enabled)
+ {
+ // Great! We now disable the hardware single step as well as re-enable the hardware watchpoint.
+ // See also ThreadWillResume().
+ if (EnableHardwareSingleStep(false) == KERN_SUCCESS)
+ {
+ if (m_watchpoint_did_occur && m_watchpoint_hw_index >= 0)
+ {
+ ReenableHardwareWatchpoint(m_watchpoint_hw_index);
+ m_watchpoint_resume_single_step_enabled = false;
+ m_watchpoint_did_occur = false;
+ m_watchpoint_hw_index = -1;
+ }
+ else
+ {
+ DNBLogError("internal error detected: m_watchpoint_resume_step_enabled is true but (m_watchpoint_did_occur && m_watchpoint_hw_index >= 0) does not hold!");
+ }
+ }
+ else
+ {
+ DNBLogError("internal error detected: m_watchpoint_resume_step_enabled is true but unable to disable single step!");
+ }
+ }
+
+ // Are we stepping a single instruction?
+ if (GetGPRState(true) == KERN_SUCCESS)
+ {
+ // We are single stepping, was this the primary thread?
+ if (m_thread->IsStepping())
+ {
+ // This was the primary thread, we need to clear the trace
+ // bit if so.
+ success = EnableHardwareSingleStep(false) == KERN_SUCCESS;
+ }
+ else
+ {
+ // The MachThread will automatically restore the suspend count
+ // in ThreadDidStop(), so we don't need to do anything here if
+ // we weren't the primary thread the last time
+ }
+ }
+ return success;
+}
+
+// Set the single step bit in the processor status register.
+kern_return_t
+DNBArchMachARM64::EnableHardwareSingleStep (bool enable)
+{
+ DNBError err;
+ DNBLogThreadedIf(LOG_STEP, "%s( enable = %d )", __FUNCTION__, enable);
+
+ err = GetGPRState(false);
+
+ if (err.Fail())
+ {
+ err.LogThreaded("%s: failed to read the GPR registers", __FUNCTION__);
+ return err.Error();
+ }
+
+ err = GetDBGState(false);
+
+ if (err.Fail())
+ {
+ err.LogThreaded("%s: failed to read the DBG registers", __FUNCTION__);
+ return err.Error();
+ }
+
+ if (enable)
+ {
+ DNBLogThreadedIf(LOG_STEP, "%s: Setting MDSCR_EL1 Single Step bit at pc 0x%llx", __FUNCTION__, (uint64_t) m_state.context.gpr.__pc);
+ m_state.dbg.__mdscr_el1 |= SS_ENABLE;
+ }
+ else
+ {
+ DNBLogThreadedIf(LOG_STEP, "%s: Clearing MDSCR_EL1 Single Step bit at pc 0x%llx", __FUNCTION__, (uint64_t) m_state.context.gpr.__pc);
+ m_state.dbg.__mdscr_el1 &= ~(SS_ENABLE);
+ }
+
+ return SetDBGState(false);
+}
+
+// return 1 if bit "BIT" is set in "value"
+static inline uint32_t bit(uint32_t value, uint32_t bit)
+{
+ return (value >> bit) & 1u;
+}
+
+// return the bitfield "value[msbit:lsbit]".
+static inline uint64_t bits(uint64_t value, uint32_t msbit, uint32_t lsbit)
+{
+ assert(msbit >= lsbit);
+ uint64_t shift_left = sizeof(value) * 8 - 1 - msbit;
+ value <<= shift_left; // shift anything above the msbit off of the unsigned edge
+ value >>= shift_left + lsbit; // shift it back again down to the lsbit (including undoing any shift from above)
+ return value; // return our result
+}
+
+uint32_t
+DNBArchMachARM64::NumSupportedHardwareWatchpoints()
+{
+ // Set the init value to something that will let us know that we need to
+ // autodetect how many watchpoints are supported dynamically...
+ static uint32_t g_num_supported_hw_watchpoints = UINT_MAX;
+ if (g_num_supported_hw_watchpoints == UINT_MAX)
+ {
+ // Set this to zero in case we can't tell if there are any HW breakpoints
+ g_num_supported_hw_watchpoints = 0;
+
+
+ size_t len;
+ uint32_t n = 0;
+ len = sizeof (n);
+ if (::sysctlbyname("hw.optional.watchpoint", &n, &len, NULL, 0) == 0)
+ {
+ g_num_supported_hw_watchpoints = n;
+ DNBLogThreadedIf(LOG_THREAD, "hw.optional.watchpoint=%u", n);
+ }
+ else
+ {
+ // For AArch64 we would need to look at ID_AA64DFR0_EL1 but debugserver runs in EL0 so it can't
+ // access that reg. The kernel should have filled in the sysctls based on it though.
+#if defined (__arm__)
+ uint32_t register_DBGDIDR;
+
+ asm("mrc p14, 0, %0, c0, c0, 0" : "=r" (register_DBGDIDR));
+ uint32_t numWRPs = bits(register_DBGDIDR, 31, 28);
+ // Zero is reserved for the WRP count, so don't increment it if it is zero
+ if (numWRPs > 0)
+ numWRPs++;
+ g_num_supported_hw_watchpoints = numWRPs;
+ DNBLogThreadedIf(LOG_THREAD, "Number of supported hw watchpoints via asm(): %d", g_num_supported_hw_watchpoints);
+#endif
+ }
+ }
+ return g_num_supported_hw_watchpoints;
+}
+
+uint32_t
+DNBArchMachARM64::EnableHardwareWatchpoint (nub_addr_t addr, nub_size_t size, bool read, bool write, bool also_set_on_task)
+{
+ DNBLogThreadedIf(LOG_WATCHPOINTS, "DNBArchMachARM64::EnableHardwareWatchpoint(addr = 0x%8.8llx, size = %zu, read = %u, write = %u)", (uint64_t)addr, size, read, write);
+
+ const uint32_t num_hw_watchpoints = NumSupportedHardwareWatchpoints();
+
+ // Can't watch zero bytes
+ if (size == 0)
+ return INVALID_NUB_HW_INDEX;
+
+ // We must watch for either read or write
+ if (read == false && write == false)
+ return INVALID_NUB_HW_INDEX;
+
+ // Otherwise, can't watch more than 8 bytes per WVR/WCR pair
+ if (size > 8)
+ return INVALID_NUB_HW_INDEX;
+
+ // arm64 watchpoints really have an 8-byte alignment requirement. You can put a watchpoint on a 4-byte
+ // offset address but you can only watch 4 bytes with that watchpoint.
+
+ // arm64 watchpoints on an 8-byte (double word) aligned addr can watch any bytes in that
+ // 8-byte long region of memory. They can watch the 1st byte, the 2nd byte, 3rd byte, etc, or any
+ // combination therein by setting the bits in the BAS [12:5] (Byte Address Select) field of
+ // the DBGWCRn_EL1 reg for the watchpoint.
+
+ // If the MASK [28:24] bits in the DBGWCRn_EL1 allow a single watchpoint to monitor a larger region
+ // of memory (16 bytes, 32 bytes, or 2GB) but the Byte Address Select bitfield then selects a larger
+ // range of bytes, instead of individual bytes. See the ARMv8 Debug Architecture manual for details.
+ // This implementation does not currently use the MASK bits; the largest single region watched by a single
+ // watchpoint right now is 8-bytes.
+
+ nub_addr_t aligned_wp_address = addr & ~0x7;
+ uint32_t addr_dword_offset = addr & 0x7;
+
+ // Do we need to split up this logical watchpoint into two hardware watchpoint
+ // registers?
+ // e.g. a watchpoint of length 4 on address 6. We need do this with
+ // one watchpoint on address 0 with bytes 6 & 7 being monitored
+ // one watchpoint on address 8 with bytes 0, 1, 2, 3 being monitored
+
+ if (addr_dword_offset + size > 8)
+ {
+ DNBLogThreadedIf(LOG_WATCHPOINTS, "DNBArchMachARM64::EnableHardwareWatchpoint(addr = 0x%8.8llx, size = %zu) needs two hardware watchpoints slots to monitor", (uint64_t)addr, size);
+ int low_watchpoint_size = 8 - addr_dword_offset;
+ int high_watchpoint_size = addr_dword_offset + size - 8;
+
+ uint32_t lo = EnableHardwareWatchpoint(addr, low_watchpoint_size, read, write, also_set_on_task);
+ if (lo == INVALID_NUB_HW_INDEX)
+ return INVALID_NUB_HW_INDEX;
+ uint32_t hi = EnableHardwareWatchpoint (aligned_wp_address + 8, high_watchpoint_size, read, write, also_set_on_task);
+ if (hi == INVALID_NUB_HW_INDEX)
+ {
+ DisableHardwareWatchpoint (lo, also_set_on_task);
+ return INVALID_NUB_HW_INDEX;
+ }
+ // Tag this lo->hi mapping in our database.
+ LoHi[lo] = hi;
+ return lo;
+ }
+
+ // At this point
+ // 1 aligned_wp_address is the requested address rounded down to 8-byte alignment
+ // 2 addr_dword_offset is the offset into that double word (8-byte) region that we are watching
+ // 3 size is the number of bytes within that 8-byte region that we are watching
+
+ // Set the Byte Address Selects bits DBGWCRn_EL1 bits [12:5] based on the above.
+ // The bit shift and negation operation will give us 0b11 for 2, 0b1111 for 4, etc, up to 0b11111111 for 8.
+ // then we shift those bits left by the offset into this dword that we are interested in.
+ // e.g. if we are watching bytes 4,5,6,7 in a dword we want a BAS of 0b11110000.
+ uint32_t byte_address_select = ((1 << size) - 1) << addr_dword_offset;
+
+ // Read the debug state
+ kern_return_t kret = GetDBGState(false);
+
+ if (kret == KERN_SUCCESS)
+ {
+ // Check to make sure we have the needed hardware support
+ uint32_t i = 0;
+
+ for (i=0; i<num_hw_watchpoints; ++i)
+ {
+ if ((m_state.dbg.__wcr[i] & WCR_ENABLE) == 0)
+ break; // We found an available hw watchpoint slot (in i)
+ }
+
+ // See if we found an available hw watchpoint slot above
+ if (i < num_hw_watchpoints)
+ {
+ //DumpDBGState(m_state.dbg);
+
+ // Clear any previous LoHi joined-watchpoint that may have been in use
+ LoHi[i] = 0;
+
+ // shift our Byte Address Select bits up to the correct bit range for the DBGWCRn_EL1
+ byte_address_select = byte_address_select << 5;
+
+ // Make sure bits 1:0 are clear in our address
+ m_state.dbg.__wvr[i] = aligned_wp_address; // DVA (Data Virtual Address)
+ m_state.dbg.__wcr[i] = byte_address_select | // Which bytes that follow the DVA that we will watch
+ S_USER | // Stop only in user mode
+ (read ? WCR_LOAD : 0) | // Stop on read access?
+ (write ? WCR_STORE : 0) | // Stop on write access?
+ WCR_ENABLE; // Enable this watchpoint;
+
+ DNBLogThreadedIf(LOG_WATCHPOINTS, "DNBArchMachARM64::EnableHardwareWatchpoint() adding watchpoint on address 0x%llx with control register value 0x%x", (uint64_t) m_state.dbg.__wvr[i], (uint32_t) m_state.dbg.__wcr[i]);
+
+ // The kernel will set the MDE_ENABLE bit in the MDSCR_EL1 for us automatically, don't need to do it here.
+
+ kret = SetDBGState(also_set_on_task);
+ //DumpDBGState(m_state.dbg);
+
+ DNBLogThreadedIf(LOG_WATCHPOINTS, "DNBArchMachARM64::EnableHardwareWatchpoint() SetDBGState() => 0x%8.8x.", kret);
+
+ if (kret == KERN_SUCCESS)
+ return i;
+ }
+ else
+ {
+ DNBLogThreadedIf(LOG_WATCHPOINTS, "DNBArchMachARM64::EnableHardwareWatchpoint(): All hardware resources (%u) are in use.", num_hw_watchpoints);
+ }
+ }
+ return INVALID_NUB_HW_INDEX;
+}
+
+bool
+DNBArchMachARM64::ReenableHardwareWatchpoint (uint32_t hw_index)
+{
+ // If this logical watchpoint # is actually implemented using
+ // two hardware watchpoint registers, re-enable both of them.
+
+ if (hw_index < NumSupportedHardwareWatchpoints() && LoHi[hw_index])
+ {
+ return ReenableHardwareWatchpoint_helper (hw_index) && ReenableHardwareWatchpoint_helper (LoHi[hw_index]);
+ }
+ else
+ {
+ return ReenableHardwareWatchpoint_helper (hw_index);
+ }
+}
+
+bool
+DNBArchMachARM64::ReenableHardwareWatchpoint_helper (uint32_t hw_index)
+{
+ kern_return_t kret = GetDBGState(false);
+ if (kret != KERN_SUCCESS)
+ return false;
+
+ const uint32_t num_hw_points = NumSupportedHardwareWatchpoints();
+ if (hw_index >= num_hw_points)
+ return false;
+
+ m_state.dbg.__wvr[hw_index] = m_disabled_watchpoints[hw_index].addr;
+ m_state.dbg.__wcr[hw_index] = m_disabled_watchpoints[hw_index].control;
+
+ DNBLogThreadedIf(LOG_WATCHPOINTS, "DNBArchMachARM64::EnableHardwareWatchpoint( %u ) - WVR%u = 0x%8.8llx WCR%u = 0x%8.8llx",
+ hw_index,
+ hw_index,
+ (uint64_t) m_state.dbg.__wvr[hw_index],
+ hw_index,
+ (uint64_t) m_state.dbg.__wcr[hw_index]);
+
+ // The kernel will set the MDE_ENABLE bit in the MDSCR_EL1 for us automatically, don't need to do it here.
+
+ kret = SetDBGState(false);
+
+ return (kret == KERN_SUCCESS);
+}
+
+bool
+DNBArchMachARM64::DisableHardwareWatchpoint (uint32_t hw_index, bool also_set_on_task)
+{
+ if (hw_index < NumSupportedHardwareWatchpoints() && LoHi[hw_index])
+ {
+ return DisableHardwareWatchpoint_helper (hw_index, also_set_on_task) && DisableHardwareWatchpoint_helper (LoHi[hw_index], also_set_on_task);
+ }
+ else
+ {
+ return DisableHardwareWatchpoint_helper (hw_index, also_set_on_task);
+ }
+}
+
+bool
+DNBArchMachARM64::DisableHardwareWatchpoint_helper (uint32_t hw_index, bool also_set_on_task)
+{
+ kern_return_t kret = GetDBGState(false);
+ if (kret != KERN_SUCCESS)
+ return false;
+
+ const uint32_t num_hw_points = NumSupportedHardwareWatchpoints();
+ if (hw_index >= num_hw_points)
+ return false;
+
+ m_disabled_watchpoints[hw_index].addr = m_state.dbg.__wvr[hw_index];
+ m_disabled_watchpoints[hw_index].control = m_state.dbg.__wcr[hw_index];
+
+ m_state.dbg.__wcr[hw_index] &= ~((nub_addr_t)WCR_ENABLE);
+ DNBLogThreadedIf(LOG_WATCHPOINTS, "DNBArchMachARM64::DisableHardwareWatchpoint( %u ) - WVR%u = 0x%8.8llx WCR%u = 0x%8.8llx",
+ hw_index,
+ hw_index,
+ (uint64_t) m_state.dbg.__wvr[hw_index],
+ hw_index,
+ (uint64_t) m_state.dbg.__wcr[hw_index]);
+
+ kret = SetDBGState(also_set_on_task);
+
+ return (kret == KERN_SUCCESS);
+}
+
+// This is for checking the Byte Address Select bits in the DBRWCRn_EL1 control register.
+// Returns -1 if the trailing bit patterns are not one of:
+// { 0b???????1, 0b??????10, 0b?????100, 0b????1000, 0b???10000, 0b??100000, 0b?1000000, 0b10000000 }.
+static inline
+int32_t
+LowestBitSet(uint32_t val)
+{
+ for (unsigned i = 0; i < 8; ++i) {
+ if (bit(val, i))
+ return i;
+ }
+ return -1;
+}
+
+// Iterate through the debug registers; return the index of the first watchpoint whose address matches.
+// As a side effect, the starting address as understood by the debugger is returned which could be
+// different from 'addr' passed as an in/out argument.
+uint32_t
+DNBArchMachARM64::GetHardwareWatchpointHit(nub_addr_t &addr)
+{
+ // Read the debug state
+ kern_return_t kret = GetDBGState(true);
+ //DumpDBGState(m_state.dbg);
+ DNBLogThreadedIf(LOG_WATCHPOINTS, "DNBArchMachARM64::GetHardwareWatchpointHit() GetDBGState() => 0x%8.8x.", kret);
+ DNBLogThreadedIf(LOG_WATCHPOINTS, "DNBArchMachARM64::GetHardwareWatchpointHit() addr = 0x%llx", (uint64_t)addr);
+
+ // This is the watchpoint value to match against, i.e., word address.
+ nub_addr_t wp_val = addr & ~((nub_addr_t)3);
+ if (kret == KERN_SUCCESS)
+ {
+ DBG &debug_state = m_state.dbg;
+ uint32_t i, num = NumSupportedHardwareWatchpoints();
+ for (i = 0; i < num; ++i)
+ {
+ nub_addr_t wp_addr = GetWatchAddress(debug_state, i);
+ DNBLogThreadedIf(LOG_WATCHPOINTS,
+ "DNBArchMachARM64::GetHardwareWatchpointHit() slot: %u (addr = 0x%llx).",
+ i, (uint64_t)wp_addr);
+ if (wp_val == wp_addr) {
+ uint32_t byte_mask = bits(debug_state.__wcr[i], 12, 5);
+
+ // Sanity check the byte_mask, first.
+ if (LowestBitSet(byte_mask) < 0)
+ continue;
+
+ // Check that the watchpoint is enabled.
+ if (!IsWatchpointEnabled(debug_state, i))
+ continue;
+
+ // Compute the starting address (from the point of view of the debugger).
+ addr = wp_addr + LowestBitSet(byte_mask);
+ return i;
+ }
+ }
+ }
+ return INVALID_NUB_HW_INDEX;
+}
+
+nub_addr_t
+DNBArchMachARM64::GetWatchpointAddressByIndex (uint32_t hw_index)
+{
+ kern_return_t kret = GetDBGState(true);
+ if (kret != KERN_SUCCESS)
+ return INVALID_NUB_ADDRESS;
+ const uint32_t num = NumSupportedHardwareWatchpoints();
+ if (hw_index >= num)
+ return INVALID_NUB_ADDRESS;
+ if (IsWatchpointEnabled (m_state.dbg, hw_index))
+ return GetWatchAddress (m_state.dbg, hw_index);
+ return INVALID_NUB_ADDRESS;
+}
+
+bool
+DNBArchMachARM64::IsWatchpointEnabled(const DBG &debug_state, uint32_t hw_index)
+{
+ // Watchpoint Control Registers, bitfield definitions
+ // ...
+ // Bits Value Description
+ // [0] 0 Watchpoint disabled
+ // 1 Watchpoint enabled.
+ return (debug_state.__wcr[hw_index] & 1u);
+}
+
+nub_addr_t
+DNBArchMachARM64::GetWatchAddress(const DBG &debug_state, uint32_t hw_index)
+{
+ // Watchpoint Value Registers, bitfield definitions
+ // Bits Description
+ // [31:2] Watchpoint value (word address, i.e., 4-byte aligned)
+ // [1:0] RAZ/SBZP
+ return bits(debug_state.__wvr[hw_index], 63, 0);
+}
+
+//----------------------------------------------------------------------
+// Register information definitions for 64 bit ARMv8.
+//----------------------------------------------------------------------
+enum gpr_regnums
+{
+ gpr_x0 = 0,
+ gpr_x1,
+ gpr_x2,
+ gpr_x3,
+ gpr_x4,
+ gpr_x5,
+ gpr_x6,
+ gpr_x7,
+ gpr_x8,
+ gpr_x9,
+ gpr_x10,
+ gpr_x11,
+ gpr_x12,
+ gpr_x13,
+ gpr_x14,
+ gpr_x15,
+ gpr_x16,
+ gpr_x17,
+ gpr_x18,
+ gpr_x19,
+ gpr_x20,
+ gpr_x21,
+ gpr_x22,
+ gpr_x23,
+ gpr_x24,
+ gpr_x25,
+ gpr_x26,
+ gpr_x27,
+ gpr_x28,
+ gpr_fp, gpr_x29 = gpr_fp,
+ gpr_lr, gpr_x30 = gpr_lr,
+ gpr_sp, gpr_x31 = gpr_sp,
+ gpr_pc,
+ gpr_cpsr,
+ gpr_w0,
+ gpr_w1,
+ gpr_w2,
+ gpr_w3,
+ gpr_w4,
+ gpr_w5,
+ gpr_w6,
+ gpr_w7,
+ gpr_w8,
+ gpr_w9,
+ gpr_w10,
+ gpr_w11,
+ gpr_w12,
+ gpr_w13,
+ gpr_w14,
+ gpr_w15,
+ gpr_w16,
+ gpr_w17,
+ gpr_w18,
+ gpr_w19,
+ gpr_w20,
+ gpr_w21,
+ gpr_w22,
+ gpr_w23,
+ gpr_w24,
+ gpr_w25,
+ gpr_w26,
+ gpr_w27,
+ gpr_w28
+
+};
+
+enum
+{
+ vfp_v0 = 0,
+ vfp_v1,
+ vfp_v2,
+ vfp_v3,
+ vfp_v4,
+ vfp_v5,
+ vfp_v6,
+ vfp_v7,
+ vfp_v8,
+ vfp_v9,
+ vfp_v10,
+ vfp_v11,
+ vfp_v12,
+ vfp_v13,
+ vfp_v14,
+ vfp_v15,
+ vfp_v16,
+ vfp_v17,
+ vfp_v18,
+ vfp_v19,
+ vfp_v20,
+ vfp_v21,
+ vfp_v22,
+ vfp_v23,
+ vfp_v24,
+ vfp_v25,
+ vfp_v26,
+ vfp_v27,
+ vfp_v28,
+ vfp_v29,
+ vfp_v30,
+ vfp_v31,
+ vfp_fpsr,
+ vfp_fpcr,
+
+ // lower 32 bits of the corresponding vfp_v<n> reg.
+ vfp_s0,
+ vfp_s1,
+ vfp_s2,
+ vfp_s3,
+ vfp_s4,
+ vfp_s5,
+ vfp_s6,
+ vfp_s7,
+ vfp_s8,
+ vfp_s9,
+ vfp_s10,
+ vfp_s11,
+ vfp_s12,
+ vfp_s13,
+ vfp_s14,
+ vfp_s15,
+ vfp_s16,
+ vfp_s17,
+ vfp_s18,
+ vfp_s19,
+ vfp_s20,
+ vfp_s21,
+ vfp_s22,
+ vfp_s23,
+ vfp_s24,
+ vfp_s25,
+ vfp_s26,
+ vfp_s27,
+ vfp_s28,
+ vfp_s29,
+ vfp_s30,
+ vfp_s31,
+
+ // lower 64 bits of the corresponding vfp_v<n> reg.
+ vfp_d0,
+ vfp_d1,
+ vfp_d2,
+ vfp_d3,
+ vfp_d4,
+ vfp_d5,
+ vfp_d6,
+ vfp_d7,
+ vfp_d8,
+ vfp_d9,
+ vfp_d10,
+ vfp_d11,
+ vfp_d12,
+ vfp_d13,
+ vfp_d14,
+ vfp_d15,
+ vfp_d16,
+ vfp_d17,
+ vfp_d18,
+ vfp_d19,
+ vfp_d20,
+ vfp_d21,
+ vfp_d22,
+ vfp_d23,
+ vfp_d24,
+ vfp_d25,
+ vfp_d26,
+ vfp_d27,
+ vfp_d28,
+ vfp_d29,
+ vfp_d30,
+ vfp_d31
+};
+
+enum
+{
+ exc_far = 0,
+ exc_esr,
+ exc_exception
+};
+
+// These numbers from the "DWARF for the ARM 64-bit Architecture (AArch64)" document.
+
+enum
+{
+ dwarf_x0 = 0,
+ dwarf_x1,
+ dwarf_x2,
+ dwarf_x3,
+ dwarf_x4,
+ dwarf_x5,
+ dwarf_x6,
+ dwarf_x7,
+ dwarf_x8,
+ dwarf_x9,
+ dwarf_x10,
+ dwarf_x11,
+ dwarf_x12,
+ dwarf_x13,
+ dwarf_x14,
+ dwarf_x15,
+ dwarf_x16,
+ dwarf_x17,
+ dwarf_x18,
+ dwarf_x19,
+ dwarf_x20,
+ dwarf_x21,
+ dwarf_x22,
+ dwarf_x23,
+ dwarf_x24,
+ dwarf_x25,
+ dwarf_x26,
+ dwarf_x27,
+ dwarf_x28,
+ dwarf_x29,
+ dwarf_x30,
+ dwarf_x31,
+ dwarf_pc = 32,
+ dwarf_elr_mode = 33,
+ dwarf_fp = dwarf_x29,
+ dwarf_lr = dwarf_x30,
+ dwarf_sp = dwarf_x31,
+ // 34-63 reserved
+
+ // V0-V31 (128 bit vector registers)
+ dwarf_v0 = 64,
+ dwarf_v1,
+ dwarf_v2,
+ dwarf_v3,
+ dwarf_v4,
+ dwarf_v5,
+ dwarf_v6,
+ dwarf_v7,
+ dwarf_v8,
+ dwarf_v9,
+ dwarf_v10,
+ dwarf_v11,
+ dwarf_v12,
+ dwarf_v13,
+ dwarf_v14,
+ dwarf_v15,
+ dwarf_v16,
+ dwarf_v17,
+ dwarf_v18,
+ dwarf_v19,
+ dwarf_v20,
+ dwarf_v21,
+ dwarf_v22,
+ dwarf_v23,
+ dwarf_v24,
+ dwarf_v25,
+ dwarf_v26,
+ dwarf_v27,
+ dwarf_v28,
+ dwarf_v29,
+ dwarf_v30,
+ dwarf_v31
+
+ // 96-127 reserved
+};
+
+enum
+{
+ debugserver_gpr_x0 = 0,
+ debugserver_gpr_x1,
+ debugserver_gpr_x2,
+ debugserver_gpr_x3,
+ debugserver_gpr_x4,
+ debugserver_gpr_x5,
+ debugserver_gpr_x6,
+ debugserver_gpr_x7,
+ debugserver_gpr_x8,
+ debugserver_gpr_x9,
+ debugserver_gpr_x10,
+ debugserver_gpr_x11,
+ debugserver_gpr_x12,
+ debugserver_gpr_x13,
+ debugserver_gpr_x14,
+ debugserver_gpr_x15,
+ debugserver_gpr_x16,
+ debugserver_gpr_x17,
+ debugserver_gpr_x18,
+ debugserver_gpr_x19,
+ debugserver_gpr_x20,
+ debugserver_gpr_x21,
+ debugserver_gpr_x22,
+ debugserver_gpr_x23,
+ debugserver_gpr_x24,
+ debugserver_gpr_x25,
+ debugserver_gpr_x26,
+ debugserver_gpr_x27,
+ debugserver_gpr_x28,
+ debugserver_gpr_fp, // x29
+ debugserver_gpr_lr, // x30
+ debugserver_gpr_sp, // sp aka xsp
+ debugserver_gpr_pc,
+ debugserver_gpr_cpsr,
+ debugserver_vfp_v0,
+ debugserver_vfp_v1,
+ debugserver_vfp_v2,
+ debugserver_vfp_v3,
+ debugserver_vfp_v4,
+ debugserver_vfp_v5,
+ debugserver_vfp_v6,
+ debugserver_vfp_v7,
+ debugserver_vfp_v8,
+ debugserver_vfp_v9,
+ debugserver_vfp_v10,
+ debugserver_vfp_v11,
+ debugserver_vfp_v12,
+ debugserver_vfp_v13,
+ debugserver_vfp_v14,
+ debugserver_vfp_v15,
+ debugserver_vfp_v16,
+ debugserver_vfp_v17,
+ debugserver_vfp_v18,
+ debugserver_vfp_v19,
+ debugserver_vfp_v20,
+ debugserver_vfp_v21,
+ debugserver_vfp_v22,
+ debugserver_vfp_v23,
+ debugserver_vfp_v24,
+ debugserver_vfp_v25,
+ debugserver_vfp_v26,
+ debugserver_vfp_v27,
+ debugserver_vfp_v28,
+ debugserver_vfp_v29,
+ debugserver_vfp_v30,
+ debugserver_vfp_v31,
+ debugserver_vfp_fpsr,
+ debugserver_vfp_fpcr
+};
+
+const char *g_contained_x0[] {"x0", NULL };
+const char *g_contained_x1[] {"x1", NULL };
+const char *g_contained_x2[] {"x2", NULL };
+const char *g_contained_x3[] {"x3", NULL };
+const char *g_contained_x4[] {"x4", NULL };
+const char *g_contained_x5[] {"x5", NULL };
+const char *g_contained_x6[] {"x6", NULL };
+const char *g_contained_x7[] {"x7", NULL };
+const char *g_contained_x8[] {"x8", NULL };
+const char *g_contained_x9[] {"x9", NULL };
+const char *g_contained_x10[] {"x10", NULL };
+const char *g_contained_x11[] {"x11", NULL };
+const char *g_contained_x12[] {"x12", NULL };
+const char *g_contained_x13[] {"x13", NULL };
+const char *g_contained_x14[] {"x14", NULL };
+const char *g_contained_x15[] {"x15", NULL };
+const char *g_contained_x16[] {"x16", NULL };
+const char *g_contained_x17[] {"x17", NULL };
+const char *g_contained_x18[] {"x18", NULL };
+const char *g_contained_x19[] {"x19", NULL };
+const char *g_contained_x20[] {"x20", NULL };
+const char *g_contained_x21[] {"x21", NULL };
+const char *g_contained_x22[] {"x22", NULL };
+const char *g_contained_x23[] {"x23", NULL };
+const char *g_contained_x24[] {"x24", NULL };
+const char *g_contained_x25[] {"x25", NULL };
+const char *g_contained_x26[] {"x26", NULL };
+const char *g_contained_x27[] {"x27", NULL };
+const char *g_contained_x28[] {"x28", NULL };
+
+const char *g_invalidate_x0[] {"x0", "w0", NULL };
+const char *g_invalidate_x1[] {"x1", "w1", NULL };
+const char *g_invalidate_x2[] {"x2", "w2", NULL };
+const char *g_invalidate_x3[] {"x3", "w3", NULL };
+const char *g_invalidate_x4[] {"x4", "w4", NULL };
+const char *g_invalidate_x5[] {"x5", "w5", NULL };
+const char *g_invalidate_x6[] {"x6", "w6", NULL };
+const char *g_invalidate_x7[] {"x7", "w7", NULL };
+const char *g_invalidate_x8[] {"x8", "w8", NULL };
+const char *g_invalidate_x9[] {"x9", "w9", NULL };
+const char *g_invalidate_x10[] {"x10", "w10", NULL };
+const char *g_invalidate_x11[] {"x11", "w11", NULL };
+const char *g_invalidate_x12[] {"x12", "w12", NULL };
+const char *g_invalidate_x13[] {"x13", "w13", NULL };
+const char *g_invalidate_x14[] {"x14", "w14", NULL };
+const char *g_invalidate_x15[] {"x15", "w15", NULL };
+const char *g_invalidate_x16[] {"x16", "w16", NULL };
+const char *g_invalidate_x17[] {"x17", "w17", NULL };
+const char *g_invalidate_x18[] {"x18", "w18", NULL };
+const char *g_invalidate_x19[] {"x19", "w19", NULL };
+const char *g_invalidate_x20[] {"x20", "w20", NULL };
+const char *g_invalidate_x21[] {"x21", "w21", NULL };
+const char *g_invalidate_x22[] {"x22", "w22", NULL };
+const char *g_invalidate_x23[] {"x23", "w23", NULL };
+const char *g_invalidate_x24[] {"x24", "w24", NULL };
+const char *g_invalidate_x25[] {"x25", "w25", NULL };
+const char *g_invalidate_x26[] {"x26", "w26", NULL };
+const char *g_invalidate_x27[] {"x27", "w27", NULL };
+const char *g_invalidate_x28[] {"x28", "w28", NULL };
+
+#define GPR_OFFSET_IDX(idx) (offsetof (DNBArchMachARM64::GPR, __x[idx]))
+
+#define GPR_OFFSET_NAME(reg) (offsetof (DNBArchMachARM64::GPR , __##reg))
+
+// These macros will auto define the register name, alt name, register size,
+// register offset, encoding, format and native register. This ensures that
+// the register state structures are defined correctly and have the correct
+// sizes and offsets.
+#define DEFINE_GPR_IDX(idx, reg, alt, gen) { e_regSetGPR, gpr_##reg, #reg, alt, Uint, Hex, 8, GPR_OFFSET_IDX(idx) , dwarf_##reg, dwarf_##reg, gen, debugserver_gpr_##reg, NULL, g_invalidate_x##idx }
+#define DEFINE_GPR_NAME(reg, alt, gen) { e_regSetGPR, gpr_##reg, #reg, alt, Uint, Hex, 8, GPR_OFFSET_NAME(reg), dwarf_##reg, dwarf_##reg, gen, debugserver_gpr_##reg, NULL, NULL }
+#define DEFINE_PSEUDO_GPR_IDX(idx, reg) { e_regSetGPR, gpr_##reg, #reg, NULL, Uint, Hex, 4, 0, INVALID_NUB_REGNUM, INVALID_NUB_REGNUM, INVALID_NUB_REGNUM, INVALID_NUB_REGNUM, g_contained_x##idx, g_invalidate_x##idx }
+
+//_STRUCT_ARM_THREAD_STATE64
+//{
+// uint64_t x[29]; /* General purpose registers x0-x28 */
+// uint64_t fp; /* Frame pointer x29 */
+// uint64_t lr; /* Link register x30 */
+// uint64_t sp; /* Stack pointer x31 */
+// uint64_t pc; /* Program counter */
+// uint32_t cpsr; /* Current program status register */
+//};
+
+
+// General purpose registers
+const DNBRegisterInfo
+DNBArchMachARM64::g_gpr_registers[] =
+{
+ DEFINE_GPR_IDX ( 0, x0, "arg1", GENERIC_REGNUM_ARG1 ),
+ DEFINE_GPR_IDX ( 1, x1, "arg2", GENERIC_REGNUM_ARG2 ),
+ DEFINE_GPR_IDX ( 2, x2, "arg3", GENERIC_REGNUM_ARG3 ),
+ DEFINE_GPR_IDX ( 3, x3, "arg4", GENERIC_REGNUM_ARG4 ),
+ DEFINE_GPR_IDX ( 4, x4, "arg5", GENERIC_REGNUM_ARG5 ),
+ DEFINE_GPR_IDX ( 5, x5, "arg6", GENERIC_REGNUM_ARG6 ),
+ DEFINE_GPR_IDX ( 6, x6, "arg7", GENERIC_REGNUM_ARG7 ),
+ DEFINE_GPR_IDX ( 7, x7, "arg8", GENERIC_REGNUM_ARG8 ),
+ DEFINE_GPR_IDX ( 8, x8, NULL, INVALID_NUB_REGNUM ),
+ DEFINE_GPR_IDX ( 9, x9, NULL, INVALID_NUB_REGNUM ),
+ DEFINE_GPR_IDX (10, x10, NULL, INVALID_NUB_REGNUM ),
+ DEFINE_GPR_IDX (11, x11, NULL, INVALID_NUB_REGNUM ),
+ DEFINE_GPR_IDX (12, x12, NULL, INVALID_NUB_REGNUM ),
+ DEFINE_GPR_IDX (13, x13, NULL, INVALID_NUB_REGNUM ),
+ DEFINE_GPR_IDX (14, x14, NULL, INVALID_NUB_REGNUM ),
+ DEFINE_GPR_IDX (15, x15, NULL, INVALID_NUB_REGNUM ),
+ DEFINE_GPR_IDX (16, x16, NULL, INVALID_NUB_REGNUM ),
+ DEFINE_GPR_IDX (17, x17, NULL, INVALID_NUB_REGNUM ),
+ DEFINE_GPR_IDX (18, x18, NULL, INVALID_NUB_REGNUM ),
+ DEFINE_GPR_IDX (19, x19, NULL, INVALID_NUB_REGNUM ),
+ DEFINE_GPR_IDX (20, x20, NULL, INVALID_NUB_REGNUM ),
+ DEFINE_GPR_IDX (21, x21, NULL, INVALID_NUB_REGNUM ),
+ DEFINE_GPR_IDX (22, x22, NULL, INVALID_NUB_REGNUM ),
+ DEFINE_GPR_IDX (23, x23, NULL, INVALID_NUB_REGNUM ),
+ DEFINE_GPR_IDX (24, x24, NULL, INVALID_NUB_REGNUM ),
+ DEFINE_GPR_IDX (25, x25, NULL, INVALID_NUB_REGNUM ),
+ DEFINE_GPR_IDX (26, x26, NULL, INVALID_NUB_REGNUM ),
+ DEFINE_GPR_IDX (27, x27, NULL, INVALID_NUB_REGNUM ),
+ DEFINE_GPR_IDX (28, x28, NULL, INVALID_NUB_REGNUM ),
+ DEFINE_GPR_NAME (fp, "x29", GENERIC_REGNUM_FP),
+ DEFINE_GPR_NAME (lr, "x30", GENERIC_REGNUM_RA),
+ DEFINE_GPR_NAME (sp, "xsp", GENERIC_REGNUM_SP),
+ DEFINE_GPR_NAME (pc, NULL, GENERIC_REGNUM_PC),
+
+ // in armv7 we specify that writing to the CPSR should invalidate r8-12, sp, lr.
+ // this should be specified for arm64 too even though debugserver is only used for
+ // userland debugging.
+ { e_regSetGPR, gpr_cpsr, "cpsr", "flags", Uint, Hex, 4, GPR_OFFSET_NAME(cpsr), dwarf_elr_mode, dwarf_elr_mode, INVALID_NUB_REGNUM, debugserver_gpr_cpsr, NULL, NULL },
+
+ DEFINE_PSEUDO_GPR_IDX ( 0, w0),
+ DEFINE_PSEUDO_GPR_IDX ( 1, w1),
+ DEFINE_PSEUDO_GPR_IDX ( 2, w2),
+ DEFINE_PSEUDO_GPR_IDX ( 3, w3),
+ DEFINE_PSEUDO_GPR_IDX ( 4, w4),
+ DEFINE_PSEUDO_GPR_IDX ( 5, w5),
+ DEFINE_PSEUDO_GPR_IDX ( 6, w6),
+ DEFINE_PSEUDO_GPR_IDX ( 7, w7),
+ DEFINE_PSEUDO_GPR_IDX ( 8, w8),
+ DEFINE_PSEUDO_GPR_IDX ( 9, w9),
+ DEFINE_PSEUDO_GPR_IDX (10, w10),
+ DEFINE_PSEUDO_GPR_IDX (11, w11),
+ DEFINE_PSEUDO_GPR_IDX (12, w12),
+ DEFINE_PSEUDO_GPR_IDX (13, w13),
+ DEFINE_PSEUDO_GPR_IDX (14, w14),
+ DEFINE_PSEUDO_GPR_IDX (15, w15),
+ DEFINE_PSEUDO_GPR_IDX (16, w16),
+ DEFINE_PSEUDO_GPR_IDX (17, w17),
+ DEFINE_PSEUDO_GPR_IDX (18, w18),
+ DEFINE_PSEUDO_GPR_IDX (19, w19),
+ DEFINE_PSEUDO_GPR_IDX (20, w20),
+ DEFINE_PSEUDO_GPR_IDX (21, w21),
+ DEFINE_PSEUDO_GPR_IDX (22, w22),
+ DEFINE_PSEUDO_GPR_IDX (23, w23),
+ DEFINE_PSEUDO_GPR_IDX (24, w24),
+ DEFINE_PSEUDO_GPR_IDX (25, w25),
+ DEFINE_PSEUDO_GPR_IDX (26, w26),
+ DEFINE_PSEUDO_GPR_IDX (27, w27),
+ DEFINE_PSEUDO_GPR_IDX (28, w28)
+};
+
+const char *g_contained_v0[] {"v0", NULL };
+const char *g_contained_v1[] {"v1", NULL };
+const char *g_contained_v2[] {"v2", NULL };
+const char *g_contained_v3[] {"v3", NULL };
+const char *g_contained_v4[] {"v4", NULL };
+const char *g_contained_v5[] {"v5", NULL };
+const char *g_contained_v6[] {"v6", NULL };
+const char *g_contained_v7[] {"v7", NULL };
+const char *g_contained_v8[] {"v8", NULL };
+const char *g_contained_v9[] {"v9", NULL };
+const char *g_contained_v10[] {"v10", NULL };
+const char *g_contained_v11[] {"v11", NULL };
+const char *g_contained_v12[] {"v12", NULL };
+const char *g_contained_v13[] {"v13", NULL };
+const char *g_contained_v14[] {"v14", NULL };
+const char *g_contained_v15[] {"v15", NULL };
+const char *g_contained_v16[] {"v16", NULL };
+const char *g_contained_v17[] {"v17", NULL };
+const char *g_contained_v18[] {"v18", NULL };
+const char *g_contained_v19[] {"v19", NULL };
+const char *g_contained_v20[] {"v20", NULL };
+const char *g_contained_v21[] {"v21", NULL };
+const char *g_contained_v22[] {"v22", NULL };
+const char *g_contained_v23[] {"v23", NULL };
+const char *g_contained_v24[] {"v24", NULL };
+const char *g_contained_v25[] {"v25", NULL };
+const char *g_contained_v26[] {"v26", NULL };
+const char *g_contained_v27[] {"v27", NULL };
+const char *g_contained_v28[] {"v28", NULL };
+const char *g_contained_v29[] {"v29", NULL };
+const char *g_contained_v30[] {"v30", NULL };
+const char *g_contained_v31[] {"v31", NULL };
+
+const char *g_invalidate_v0[] {"v0", "d0", "s0", NULL };
+const char *g_invalidate_v1[] {"v1", "d1", "s1", NULL };
+const char *g_invalidate_v2[] {"v2", "d2", "s2", NULL };
+const char *g_invalidate_v3[] {"v3", "d3", "s3", NULL };
+const char *g_invalidate_v4[] {"v4", "d4", "s4", NULL };
+const char *g_invalidate_v5[] {"v5", "d5", "s5", NULL };
+const char *g_invalidate_v6[] {"v6", "d6", "s6", NULL };
+const char *g_invalidate_v7[] {"v7", "d7", "s7", NULL };
+const char *g_invalidate_v8[] {"v8", "d8", "s8", NULL };
+const char *g_invalidate_v9[] {"v9", "d9", "s9", NULL };
+const char *g_invalidate_v10[] {"v10", "d10", "s10", NULL };
+const char *g_invalidate_v11[] {"v11", "d11", "s11", NULL };
+const char *g_invalidate_v12[] {"v12", "d12", "s12", NULL };
+const char *g_invalidate_v13[] {"v13", "d13", "s13", NULL };
+const char *g_invalidate_v14[] {"v14", "d14", "s14", NULL };
+const char *g_invalidate_v15[] {"v15", "d15", "s15", NULL };
+const char *g_invalidate_v16[] {"v16", "d16", "s16", NULL };
+const char *g_invalidate_v17[] {"v17", "d17", "s17", NULL };
+const char *g_invalidate_v18[] {"v18", "d18", "s18", NULL };
+const char *g_invalidate_v19[] {"v19", "d19", "s19", NULL };
+const char *g_invalidate_v20[] {"v20", "d20", "s20", NULL };
+const char *g_invalidate_v21[] {"v21", "d21", "s21", NULL };
+const char *g_invalidate_v22[] {"v22", "d22", "s22", NULL };
+const char *g_invalidate_v23[] {"v23", "d23", "s23", NULL };
+const char *g_invalidate_v24[] {"v24", "d24", "s24", NULL };
+const char *g_invalidate_v25[] {"v25", "d25", "s25", NULL };
+const char *g_invalidate_v26[] {"v26", "d26", "s26", NULL };
+const char *g_invalidate_v27[] {"v27", "d27", "s27", NULL };
+const char *g_invalidate_v28[] {"v28", "d28", "s28", NULL };
+const char *g_invalidate_v29[] {"v29", "d29", "s29", NULL };
+const char *g_invalidate_v30[] {"v30", "d30", "s30", NULL };
+const char *g_invalidate_v31[] {"v31", "d31", "s31", NULL };
+
+#if defined (__arm64__) || defined (__aarch64__)
+#define VFP_V_OFFSET_IDX(idx) (offsetof (DNBArchMachARM64::FPU, __v) + (idx * 16) + offsetof (DNBArchMachARM64::Context, vfp))
+#else
+#define VFP_V_OFFSET_IDX(idx) (offsetof (DNBArchMachARM64::FPU, opaque) + (idx * 16) + offsetof (DNBArchMachARM64::Context, vfp))
+#endif
+#define VFP_OFFSET_NAME(reg) (offsetof (DNBArchMachARM64::FPU, reg) + offsetof (DNBArchMachARM64::Context, vfp))
+#define EXC_OFFSET(reg) (offsetof (DNBArchMachARM64::EXC, reg) + offsetof (DNBArchMachARM64::Context, exc))
+
+//#define FLOAT_FORMAT Float
+#define DEFINE_VFP_V_IDX(idx) { e_regSetVFP, vfp_v##idx, "v" #idx, "q" #idx, Vector, VectorOfUInt8, 16, VFP_V_OFFSET_IDX(idx), INVALID_NUB_REGNUM, dwarf_v##idx, INVALID_NUB_REGNUM, debugserver_vfp_v##idx, NULL, g_invalidate_v##idx }
+#define DEFINE_PSEUDO_VFP_S_IDX(idx) { e_regSetVFP, vfp_s##idx, "s" #idx, NULL, IEEE754, Float, 4, 0, INVALID_NUB_REGNUM, INVALID_NUB_REGNUM, INVALID_NUB_REGNUM, INVALID_NUB_REGNUM, g_contained_v##idx, g_invalidate_v##idx }
+#define DEFINE_PSEUDO_VFP_D_IDX(idx) { e_regSetVFP, vfp_d##idx, "d" #idx, NULL, IEEE754, Float, 8, 0, INVALID_NUB_REGNUM, INVALID_NUB_REGNUM, INVALID_NUB_REGNUM, INVALID_NUB_REGNUM, g_contained_v##idx, g_invalidate_v##idx }
+
+// Floating point registers
+const DNBRegisterInfo
+DNBArchMachARM64::g_vfp_registers[] =
+{
+ DEFINE_VFP_V_IDX ( 0),
+ DEFINE_VFP_V_IDX ( 1),
+ DEFINE_VFP_V_IDX ( 2),
+ DEFINE_VFP_V_IDX ( 3),
+ DEFINE_VFP_V_IDX ( 4),
+ DEFINE_VFP_V_IDX ( 5),
+ DEFINE_VFP_V_IDX ( 6),
+ DEFINE_VFP_V_IDX ( 7),
+ DEFINE_VFP_V_IDX ( 8),
+ DEFINE_VFP_V_IDX ( 9),
+ DEFINE_VFP_V_IDX (10),
+ DEFINE_VFP_V_IDX (11),
+ DEFINE_VFP_V_IDX (12),
+ DEFINE_VFP_V_IDX (13),
+ DEFINE_VFP_V_IDX (14),
+ DEFINE_VFP_V_IDX (15),
+ DEFINE_VFP_V_IDX (16),
+ DEFINE_VFP_V_IDX (17),
+ DEFINE_VFP_V_IDX (18),
+ DEFINE_VFP_V_IDX (19),
+ DEFINE_VFP_V_IDX (20),
+ DEFINE_VFP_V_IDX (21),
+ DEFINE_VFP_V_IDX (22),
+ DEFINE_VFP_V_IDX (23),
+ DEFINE_VFP_V_IDX (24),
+ DEFINE_VFP_V_IDX (25),
+ DEFINE_VFP_V_IDX (26),
+ DEFINE_VFP_V_IDX (27),
+ DEFINE_VFP_V_IDX (28),
+ DEFINE_VFP_V_IDX (29),
+ DEFINE_VFP_V_IDX (30),
+ DEFINE_VFP_V_IDX (31),
+ { e_regSetVFP, vfp_fpsr, "fpsr", NULL, Uint, Hex, 4, VFP_V_OFFSET_IDX (32) + 0, INVALID_NUB_REGNUM, INVALID_NUB_REGNUM, INVALID_NUB_REGNUM, INVALID_NUB_REGNUM, NULL, NULL },
+ { e_regSetVFP, vfp_fpcr, "fpcr", NULL, Uint, Hex, 4, VFP_V_OFFSET_IDX (32) + 4, INVALID_NUB_REGNUM, INVALID_NUB_REGNUM, INVALID_NUB_REGNUM, INVALID_NUB_REGNUM, NULL, NULL },
+
+ DEFINE_PSEUDO_VFP_S_IDX (0),
+ DEFINE_PSEUDO_VFP_S_IDX (1),
+ DEFINE_PSEUDO_VFP_S_IDX (2),
+ DEFINE_PSEUDO_VFP_S_IDX (3),
+ DEFINE_PSEUDO_VFP_S_IDX (4),
+ DEFINE_PSEUDO_VFP_S_IDX (5),
+ DEFINE_PSEUDO_VFP_S_IDX (6),
+ DEFINE_PSEUDO_VFP_S_IDX (7),
+ DEFINE_PSEUDO_VFP_S_IDX (8),
+ DEFINE_PSEUDO_VFP_S_IDX (9),
+ DEFINE_PSEUDO_VFP_S_IDX (10),
+ DEFINE_PSEUDO_VFP_S_IDX (11),
+ DEFINE_PSEUDO_VFP_S_IDX (12),
+ DEFINE_PSEUDO_VFP_S_IDX (13),
+ DEFINE_PSEUDO_VFP_S_IDX (14),
+ DEFINE_PSEUDO_VFP_S_IDX (15),
+ DEFINE_PSEUDO_VFP_S_IDX (16),
+ DEFINE_PSEUDO_VFP_S_IDX (17),
+ DEFINE_PSEUDO_VFP_S_IDX (18),
+ DEFINE_PSEUDO_VFP_S_IDX (19),
+ DEFINE_PSEUDO_VFP_S_IDX (20),
+ DEFINE_PSEUDO_VFP_S_IDX (21),
+ DEFINE_PSEUDO_VFP_S_IDX (22),
+ DEFINE_PSEUDO_VFP_S_IDX (23),
+ DEFINE_PSEUDO_VFP_S_IDX (24),
+ DEFINE_PSEUDO_VFP_S_IDX (25),
+ DEFINE_PSEUDO_VFP_S_IDX (26),
+ DEFINE_PSEUDO_VFP_S_IDX (27),
+ DEFINE_PSEUDO_VFP_S_IDX (28),
+ DEFINE_PSEUDO_VFP_S_IDX (29),
+ DEFINE_PSEUDO_VFP_S_IDX (30),
+ DEFINE_PSEUDO_VFP_S_IDX (31),
+
+ DEFINE_PSEUDO_VFP_D_IDX (0),
+ DEFINE_PSEUDO_VFP_D_IDX (1),
+ DEFINE_PSEUDO_VFP_D_IDX (2),
+ DEFINE_PSEUDO_VFP_D_IDX (3),
+ DEFINE_PSEUDO_VFP_D_IDX (4),
+ DEFINE_PSEUDO_VFP_D_IDX (5),
+ DEFINE_PSEUDO_VFP_D_IDX (6),
+ DEFINE_PSEUDO_VFP_D_IDX (7),
+ DEFINE_PSEUDO_VFP_D_IDX (8),
+ DEFINE_PSEUDO_VFP_D_IDX (9),
+ DEFINE_PSEUDO_VFP_D_IDX (10),
+ DEFINE_PSEUDO_VFP_D_IDX (11),
+ DEFINE_PSEUDO_VFP_D_IDX (12),
+ DEFINE_PSEUDO_VFP_D_IDX (13),
+ DEFINE_PSEUDO_VFP_D_IDX (14),
+ DEFINE_PSEUDO_VFP_D_IDX (15),
+ DEFINE_PSEUDO_VFP_D_IDX (16),
+ DEFINE_PSEUDO_VFP_D_IDX (17),
+ DEFINE_PSEUDO_VFP_D_IDX (18),
+ DEFINE_PSEUDO_VFP_D_IDX (19),
+ DEFINE_PSEUDO_VFP_D_IDX (20),
+ DEFINE_PSEUDO_VFP_D_IDX (21),
+ DEFINE_PSEUDO_VFP_D_IDX (22),
+ DEFINE_PSEUDO_VFP_D_IDX (23),
+ DEFINE_PSEUDO_VFP_D_IDX (24),
+ DEFINE_PSEUDO_VFP_D_IDX (25),
+ DEFINE_PSEUDO_VFP_D_IDX (26),
+ DEFINE_PSEUDO_VFP_D_IDX (27),
+ DEFINE_PSEUDO_VFP_D_IDX (28),
+ DEFINE_PSEUDO_VFP_D_IDX (29),
+ DEFINE_PSEUDO_VFP_D_IDX (30),
+ DEFINE_PSEUDO_VFP_D_IDX (31)
+
+};
+
+
+//_STRUCT_ARM_EXCEPTION_STATE64
+//{
+// uint64_t far; /* Virtual Fault Address */
+// uint32_t esr; /* Exception syndrome */
+// uint32_t exception; /* number of arm exception taken */
+//};
+
+// Exception registers
+const DNBRegisterInfo
+DNBArchMachARM64::g_exc_registers[] =
+{
+ { e_regSetEXC, exc_far , "far" , NULL, Uint, Hex, 8, EXC_OFFSET(__far) , INVALID_NUB_REGNUM, INVALID_NUB_REGNUM, INVALID_NUB_REGNUM, INVALID_NUB_REGNUM, NULL, NULL },
+ { e_regSetEXC, exc_esr , "esr" , NULL, Uint, Hex, 4, EXC_OFFSET(__esr) , INVALID_NUB_REGNUM, INVALID_NUB_REGNUM, INVALID_NUB_REGNUM, INVALID_NUB_REGNUM, NULL, NULL },
+ { e_regSetEXC, exc_exception , "exception" , NULL, Uint, Hex, 4, EXC_OFFSET(__exception) , INVALID_NUB_REGNUM, INVALID_NUB_REGNUM, INVALID_NUB_REGNUM, INVALID_NUB_REGNUM, NULL, NULL }
+};
+
+// Number of registers in each register set
+const size_t DNBArchMachARM64::k_num_gpr_registers = sizeof(g_gpr_registers)/sizeof(DNBRegisterInfo);
+const size_t DNBArchMachARM64::k_num_vfp_registers = sizeof(g_vfp_registers)/sizeof(DNBRegisterInfo);
+const size_t DNBArchMachARM64::k_num_exc_registers = sizeof(g_exc_registers)/sizeof(DNBRegisterInfo);
+const size_t DNBArchMachARM64::k_num_all_registers = k_num_gpr_registers + k_num_vfp_registers + k_num_exc_registers;
+
+//----------------------------------------------------------------------
+// Register set definitions. The first definitions at register set index
+// of zero is for all registers, followed by other registers sets. The
+// register information for the all register set need not be filled in.
+//----------------------------------------------------------------------
+const DNBRegisterSetInfo
+DNBArchMachARM64::g_reg_sets[] =
+{
+ { "ARM64 Registers", NULL, k_num_all_registers },
+ { "General Purpose Registers", g_gpr_registers, k_num_gpr_registers },
+ { "Floating Point Registers", g_vfp_registers, k_num_vfp_registers },
+ { "Exception State Registers", g_exc_registers, k_num_exc_registers }
+};
+// Total number of register sets for this architecture
+const size_t DNBArchMachARM64::k_num_register_sets = sizeof(g_reg_sets)/sizeof(DNBRegisterSetInfo);
+
+
+const DNBRegisterSetInfo *
+DNBArchMachARM64::GetRegisterSetInfo(nub_size_t *num_reg_sets)
+{
+ *num_reg_sets = k_num_register_sets;
+ return g_reg_sets;
+}
+
+bool
+DNBArchMachARM64::FixGenericRegisterNumber (uint32_t &set, uint32_t &reg)
+{
+ if (set == REGISTER_SET_GENERIC)
+ {
+ switch (reg)
+ {
+ case GENERIC_REGNUM_PC: // Program Counter
+ set = e_regSetGPR;
+ reg = gpr_pc;
+ break;
+
+ case GENERIC_REGNUM_SP: // Stack Pointer
+ set = e_regSetGPR;
+ reg = gpr_sp;
+ break;
+
+ case GENERIC_REGNUM_FP: // Frame Pointer
+ set = e_regSetGPR;
+ reg = gpr_fp;
+ break;
+
+ case GENERIC_REGNUM_RA: // Return Address
+ set = e_regSetGPR;
+ reg = gpr_lr;
+ break;
+
+ case GENERIC_REGNUM_FLAGS: // Processor flags register
+ set = e_regSetGPR;
+ reg = gpr_cpsr;
+ break;
+
+ case GENERIC_REGNUM_ARG1:
+ case GENERIC_REGNUM_ARG2:
+ case GENERIC_REGNUM_ARG3:
+ case GENERIC_REGNUM_ARG4:
+ case GENERIC_REGNUM_ARG5:
+ case GENERIC_REGNUM_ARG6:
+ set = e_regSetGPR;
+ reg = gpr_x0 + reg - GENERIC_REGNUM_ARG1;
+ break;
+
+ default:
+ return false;
+ }
+ }
+ return true;
+}
+bool
+DNBArchMachARM64::GetRegisterValue(uint32_t set, uint32_t reg, DNBRegisterValue *value)
+{
+ if (!FixGenericRegisterNumber (set, reg))
+ return false;
+
+ if (GetRegisterState(set, false) != KERN_SUCCESS)
+ return false;
+
+ const DNBRegisterInfo *regInfo = m_thread->GetRegisterInfo(set, reg);
+ if (regInfo)
+ {
+ value->info = *regInfo;
+ switch (set)
+ {
+ case e_regSetGPR:
+ if (reg <= gpr_pc)
+ {
+ value->value.uint64 = m_state.context.gpr.__x[reg];
+ return true;
+ }
+ else if (reg == gpr_cpsr)
+ {
+ value->value.uint32 = m_state.context.gpr.__cpsr;
+ return true;
+ }
+ break;
+
+ case e_regSetVFP:
+
+ if (reg >= vfp_v0 && reg <= vfp_v31)
+ {
+#if defined (__arm64__) || defined (__aarch64__)
+ memcpy (&value->value.v_uint8, &m_state.context.vfp.__v[reg - vfp_v0], 16);
+#else
+ memcpy (&value->value.v_uint8, ((uint8_t *) &m_state.context.vfp.opaque) + ((reg - vfp_v0) * 16), 16);
+#endif
+ return true;
+ }
+ else if (reg == vfp_fpsr)
+ {
+#if defined (__arm64__) || defined (__aarch64__)
+ memcpy (&value->value.uint32, &m_state.context.vfp.__fpsr, 4);
+#else
+ memcpy (&value->value.uint32, ((uint8_t *) &m_state.context.vfp.opaque) + (32 * 16) + 0, 4);
+#endif
+ return true;
+ }
+ else if (reg == vfp_fpcr)
+ {
+#if defined (__arm64__) || defined (__aarch64__)
+ memcpy (&value->value.uint32, &m_state.context.vfp.__fpcr, 4);
+#else
+ memcpy (&value->value.uint32, ((uint8_t *) &m_state.context.vfp.opaque) + (32 * 16) + 4, 4);
+#endif
+ return true;
+ }
+ else if (reg >= vfp_s0 && reg <= vfp_s31)
+ {
+#if defined (__arm64__) || defined (__aarch64__)
+ memcpy (&value->value.v_uint8, &m_state.context.vfp.__v[reg - vfp_s0], 4);
+#else
+ memcpy (&value->value.v_uint8, ((uint8_t *) &m_state.context.vfp.opaque) + ((reg - vfp_s0) * 16), 4);
+#endif
+ return true;
+ }
+ else if (reg >= vfp_d0 && reg <= vfp_d31)
+ {
+#if defined (__arm64__) || defined (__aarch64__)
+ memcpy (&value->value.v_uint8, &m_state.context.vfp.__v[reg - vfp_d0], 8);
+#else
+ memcpy (&value->value.v_uint8, ((uint8_t *) &m_state.context.vfp.opaque) + ((reg - vfp_d0) * 16), 8);
+#endif
+ return true;
+ }
+ break;
+
+ case e_regSetEXC:
+ if (reg == exc_far)
+ {
+ value->value.uint64 = m_state.context.exc.__far;
+ return true;
+ }
+ else if (reg == exc_esr)
+ {
+ value->value.uint32 = m_state.context.exc.__esr;
+ return true;
+ }
+ else if (reg == exc_exception)
+ {
+ value->value.uint32 = m_state.context.exc.__exception;
+ return true;
+ }
+ break;
+ }
+ }
+ return false;
+}
+
+bool
+DNBArchMachARM64::SetRegisterValue(uint32_t set, uint32_t reg, const DNBRegisterValue *value)
+{
+ if (!FixGenericRegisterNumber (set, reg))
+ return false;
+
+ if (GetRegisterState(set, false) != KERN_SUCCESS)
+ return false;
+
+ bool success = false;
+ const DNBRegisterInfo *regInfo = m_thread->GetRegisterInfo(set, reg);
+ if (regInfo)
+ {
+ switch (set)
+ {
+ case e_regSetGPR:
+ if (reg <= gpr_pc)
+ {
+ m_state.context.gpr.__x[reg] = value->value.uint64;
+ success = true;
+ }
+ else if (reg == gpr_cpsr)
+ {
+ m_state.context.gpr.__cpsr = value->value.uint32;
+ success = true;
+ }
+ break;
+
+ case e_regSetVFP:
+ if (reg >= vfp_v0 && reg <= vfp_v31)
+ {
+#if defined (__arm64__) || defined (__aarch64__)
+ memcpy (&m_state.context.vfp.__v[reg - vfp_v0], &value->value.v_uint8, 16);
+#else
+ memcpy (((uint8_t *) &m_state.context.vfp.opaque) + ((reg - vfp_v0) * 16), &value->value.v_uint8, 16);
+#endif
+ success = true;
+ }
+ else if (reg == vfp_fpsr)
+ {
+#if defined (__arm64__) || defined (__aarch64__)
+ memcpy (&m_state.context.vfp.__fpsr, &value->value.uint32, 4);
+#else
+ memcpy (((uint8_t *) &m_state.context.vfp.opaque) + (32 * 16) + 0, &value->value.uint32, 4);
+#endif
+ success = true;
+ }
+ else if (reg == vfp_fpcr)
+ {
+#if defined (__arm64__) || defined (__aarch64__)
+ memcpy (&m_state.context.vfp.__fpcr, &value->value.uint32, 4);
+#else
+ memcpy (((uint8_t *) m_state.context.vfp.opaque) + (32 * 16) + 4, &value->value.uint32, 4);
+#endif
+ success = true;
+ }
+ else if (reg >= vfp_s0 && reg <= vfp_s31)
+ {
+#if defined (__arm64__) || defined (__aarch64__)
+ memcpy (&m_state.context.vfp.__v[reg - vfp_s0], &value->value.v_uint8, 4);
+#else
+ memcpy (((uint8_t *) &m_state.context.vfp.opaque) + ((reg - vfp_s0) * 16), &value->value.v_uint8, 4);
+#endif
+ success = true;
+ }
+ else if (reg >= vfp_d0 && reg <= vfp_d31)
+ {
+#if defined (__arm64__) || defined (__aarch64__)
+ memcpy (&m_state.context.vfp.__v[reg - vfp_d0], &value->value.v_uint8, 8);
+#else
+ memcpy (((uint8_t *) &m_state.context.vfp.opaque) + ((reg - vfp_d0) * 16), &value->value.v_uint8, 8);
+#endif
+ success = true;
+ }
+ break;
+
+ case e_regSetEXC:
+ if (reg == exc_far)
+ {
+ m_state.context.exc.__far = value->value.uint64;
+ success = true;
+ }
+ else if (reg == exc_esr)
+ {
+ m_state.context.exc.__esr = value->value.uint32;
+ success = true;
+ }
+ else if (reg == exc_exception)
+ {
+ m_state.context.exc.__exception = value->value.uint32;
+ success = true;
+ }
+ break;
+ }
+
+ }
+ if (success)
+ return SetRegisterState(set) == KERN_SUCCESS;
+ return false;
+}
+
+kern_return_t
+DNBArchMachARM64::GetRegisterState(int set, bool force)
+{
+ switch (set)
+ {
+ case e_regSetALL: return GetGPRState(force) |
+ GetVFPState(force) |
+ GetEXCState(force) |
+ GetDBGState(force);
+ case e_regSetGPR: return GetGPRState(force);
+ case e_regSetVFP: return GetVFPState(force);
+ case e_regSetEXC: return GetEXCState(force);
+ case e_regSetDBG: return GetDBGState(force);
+ default: break;
+ }
+ return KERN_INVALID_ARGUMENT;
+}
+
+kern_return_t
+DNBArchMachARM64::SetRegisterState(int set)
+{
+ // Make sure we have a valid context to set.
+ kern_return_t err = GetRegisterState(set, false);
+ if (err != KERN_SUCCESS)
+ return err;
+
+ switch (set)
+ {
+ case e_regSetALL: return SetGPRState() |
+ SetVFPState() |
+ SetEXCState() |
+ SetDBGState(false);
+ case e_regSetGPR: return SetGPRState();
+ case e_regSetVFP: return SetVFPState();
+ case e_regSetEXC: return SetEXCState();
+ case e_regSetDBG: return SetDBGState(false);
+ default: break;
+ }
+ return KERN_INVALID_ARGUMENT;
+}
+
+bool
+DNBArchMachARM64::RegisterSetStateIsValid (int set) const
+{
+ return m_state.RegsAreValid(set);
+}
+
+
+nub_size_t
+DNBArchMachARM64::GetRegisterContext (void *buf, nub_size_t buf_len)
+{
+ nub_size_t size = sizeof (m_state.context.gpr) +
+ sizeof (m_state.context.vfp) +
+ sizeof (m_state.context.exc);
+
+ if (buf && buf_len)
+ {
+ if (size > buf_len)
+ size = buf_len;
+
+ bool force = false;
+ if (GetGPRState(force) | GetVFPState(force) | GetEXCState(force))
+ return 0;
+
+ // Copy each struct individually to avoid any padding that might be between the structs in m_state.context
+ uint8_t *p = (uint8_t *)buf;
+ ::memcpy (p, &m_state.context.gpr, sizeof(m_state.context.gpr));
+ p += sizeof(m_state.context.gpr);
+ ::memcpy (p, &m_state.context.vfp, sizeof(m_state.context.vfp));
+ p += sizeof(m_state.context.vfp);
+ ::memcpy (p, &m_state.context.exc, sizeof(m_state.context.exc));
+ p += sizeof(m_state.context.exc);
+
+ size_t bytes_written = p - (uint8_t *)buf;
+ UNUSED_IF_ASSERT_DISABLED(bytes_written);
+ assert (bytes_written == size);
+ }
+ DNBLogThreadedIf (LOG_THREAD, "DNBArchMachARM64::GetRegisterContext (buf = %p, len = %zu) => %zu", buf, buf_len, size);
+ // Return the size of the register context even if NULL was passed in
+ return size;
+}
+
+nub_size_t
+DNBArchMachARM64::SetRegisterContext (const void *buf, nub_size_t buf_len)
+{
+ nub_size_t size = sizeof (m_state.context.gpr) +
+ sizeof (m_state.context.vfp) +
+ sizeof (m_state.context.exc);
+
+ if (buf == NULL || buf_len == 0)
+ size = 0;
+
+ if (size)
+ {
+ if (size > buf_len)
+ size = buf_len;
+
+ // Copy each struct individually to avoid any padding that might be between the structs in m_state.context
+ uint8_t *p = (uint8_t *)buf;
+ ::memcpy (&m_state.context.gpr, p, sizeof(m_state.context.gpr));
+ p += sizeof(m_state.context.gpr);
+ ::memcpy (&m_state.context.vfp, p, sizeof(m_state.context.vfp));
+ p += sizeof(m_state.context.vfp);
+ ::memcpy (&m_state.context.exc, p, sizeof(m_state.context.exc));
+ p += sizeof(m_state.context.exc);
+
+ size_t bytes_written = p - (uint8_t *)buf;
+ UNUSED_IF_ASSERT_DISABLED(bytes_written);
+ assert (bytes_written == size);
+ SetGPRState();
+ SetVFPState();
+ SetEXCState();
+ }
+ DNBLogThreadedIf (LOG_THREAD, "DNBArchMachARM64::SetRegisterContext (buf = %p, len = %zu) => %zu", buf, buf_len, size);
+ return size;
+}
+
+uint32_t
+DNBArchMachARM64::SaveRegisterState ()
+{
+ kern_return_t kret = ::thread_abort_safely(m_thread->MachPortNumber());
+ DNBLogThreadedIf (LOG_THREAD, "thread = 0x%4.4x calling thread_abort_safely (tid) => %u (SetGPRState() for stop_count = %u)", m_thread->MachPortNumber(), kret, m_thread->Process()->StopCount());
+
+ // Always re-read the registers because above we call thread_abort_safely();
+ bool force = true;
+
+ if ((kret = GetGPRState(force)) != KERN_SUCCESS)
+ {
+ DNBLogThreadedIf (LOG_THREAD, "DNBArchMachARM64::SaveRegisterState () error: GPR regs failed to read: %u ", kret);
+ }
+ else if ((kret = GetVFPState(force)) != KERN_SUCCESS)
+ {
+ DNBLogThreadedIf (LOG_THREAD, "DNBArchMachARM64::SaveRegisterState () error: %s regs failed to read: %u", "VFP", kret);
+ }
+ else
+ {
+ const uint32_t save_id = GetNextRegisterStateSaveID ();
+ m_saved_register_states[save_id] = m_state.context;
+ return save_id;
+ }
+ return UINT32_MAX;
+}
+
+bool
+DNBArchMachARM64::RestoreRegisterState (uint32_t save_id)
+{
+ SaveRegisterStates::iterator pos = m_saved_register_states.find(save_id);
+ if (pos != m_saved_register_states.end())
+ {
+ m_state.context.gpr = pos->second.gpr;
+ m_state.context.vfp = pos->second.vfp;
+ kern_return_t kret;
+ bool success = true;
+ if ((kret = SetGPRState()) != KERN_SUCCESS)
+ {
+ DNBLogThreadedIf (LOG_THREAD, "DNBArchMachARM64::RestoreRegisterState (save_id = %u) error: GPR regs failed to write: %u", save_id, kret);
+ success = false;
+ }
+ else if ((kret = SetVFPState()) != KERN_SUCCESS)
+ {
+ DNBLogThreadedIf (LOG_THREAD, "DNBArchMachARM64::RestoreRegisterState (save_id = %u) error: %s regs failed to write: %u", save_id, "VFP", kret);
+ success = false;
+ }
+ m_saved_register_states.erase(pos);
+ return success;
+ }
+ return false;
+}
+
+
+#endif // #if defined (ARM_THREAD_STATE64_COUNT)
+#endif // #if defined (__arm__) || defined (__arm64__) || defined (__aarch64__)
diff --git a/tools/debugserver/source/MacOSX/arm64/DNBArchImplARM64.h b/tools/debugserver/source/MacOSX/arm64/DNBArchImplARM64.h
new file mode 100644
index 000000000000..7e68e411a765
--- /dev/null
+++ b/tools/debugserver/source/MacOSX/arm64/DNBArchImplARM64.h
@@ -0,0 +1,272 @@
+//===-- DNBArchMachARM64.h --------------------------------------*- C++ -*-===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+
+
+#ifndef __DNBArchImplARM64_h__
+#define __DNBArchImplARM64_h__
+
+#if defined (__arm__) || defined (__arm64__) || defined (__aarch64__)
+
+#include <map>
+#include <mach/thread_status.h>
+
+#if defined (ARM_THREAD_STATE64_COUNT)
+
+#include "DNBArch.h"
+
+class MachThread;
+
+class DNBArchMachARM64 : public DNBArchProtocol
+{
+public:
+ enum { kMaxNumThumbITBreakpoints = 4 };
+
+ DNBArchMachARM64(MachThread *thread) :
+ m_thread(thread),
+ m_state(),
+ m_disabled_watchpoints(),
+ m_watchpoint_hw_index(-1),
+ m_watchpoint_did_occur(false),
+ m_watchpoint_resume_single_step_enabled(false),
+ m_saved_register_states()
+ {
+ m_disabled_watchpoints.resize (16);
+ memset(&m_dbg_save, 0, sizeof(m_dbg_save));
+ }
+
+ virtual ~DNBArchMachARM64()
+ {
+ }
+
+ static void Initialize();
+ static const DNBRegisterSetInfo *
+ GetRegisterSetInfo(nub_size_t *num_reg_sets);
+
+ virtual bool GetRegisterValue(uint32_t set, uint32_t reg, DNBRegisterValue *value);
+ virtual bool SetRegisterValue(uint32_t set, uint32_t reg, const DNBRegisterValue *value);
+ virtual nub_size_t GetRegisterContext (void *buf, nub_size_t buf_len);
+ virtual nub_size_t SetRegisterContext (const void *buf, nub_size_t buf_len);
+ virtual uint32_t SaveRegisterState ();
+ virtual bool RestoreRegisterState (uint32_t save_id);
+
+ virtual kern_return_t GetRegisterState (int set, bool force);
+ virtual kern_return_t SetRegisterState (int set);
+ virtual bool RegisterSetStateIsValid (int set) const;
+
+ virtual uint64_t GetPC(uint64_t failValue); // Get program counter
+ virtual kern_return_t SetPC(uint64_t value);
+ virtual uint64_t GetSP(uint64_t failValue); // Get stack pointer
+ virtual void ThreadWillResume();
+ virtual bool ThreadDidStop();
+ virtual bool NotifyException(MachException::Data& exc);
+
+ static DNBArchProtocol *Create (MachThread *thread);
+ static const uint8_t * SoftwareBreakpointOpcode (nub_size_t byte_size);
+ static uint32_t GetCPUType();
+
+ virtual uint32_t NumSupportedHardwareWatchpoints();
+ virtual uint32_t EnableHardwareWatchpoint (nub_addr_t addr, nub_size_t size, bool read, bool write, bool also_set_on_task);
+ virtual bool DisableHardwareWatchpoint (uint32_t hw_break_index, bool also_set_on_task);
+ virtual bool DisableHardwareWatchpoint_helper (uint32_t hw_break_index, bool also_set_on_task);
+
+protected:
+
+
+ kern_return_t EnableHardwareSingleStep (bool enable);
+ static bool FixGenericRegisterNumber (uint32_t &set, uint32_t &reg);
+
+ typedef enum RegisterSetTag
+ {
+ e_regSetALL = REGISTER_SET_ALL,
+ e_regSetGPR, // ARM_THREAD_STATE64,
+ e_regSetVFP, // ARM_NEON_STATE64,
+ e_regSetEXC, // ARM_EXCEPTION_STATE64,
+ e_regSetDBG, // ARM_DEBUG_STATE64,
+ kNumRegisterSets
+ } RegisterSet;
+
+ enum
+ {
+ e_regSetGPRCount = ARM_THREAD_STATE64_COUNT,
+ e_regSetVFPCount = ARM_NEON_STATE64_COUNT,
+ e_regSetEXCCount = ARM_EXCEPTION_STATE64_COUNT,
+ e_regSetDBGCount = ARM_DEBUG_STATE64_COUNT,
+ };
+
+ enum
+ {
+ Read = 0,
+ Write = 1,
+ kNumErrors = 2
+ };
+
+ typedef arm_thread_state64_t GPR;
+ typedef arm_neon_state64_t FPU;
+ typedef arm_exception_state64_t EXC;
+
+ static const DNBRegisterInfo g_gpr_registers[];
+ static const DNBRegisterInfo g_vfp_registers[];
+ static const DNBRegisterInfo g_exc_registers[];
+ static const DNBRegisterSetInfo g_reg_sets[];
+
+ static const size_t k_num_gpr_registers;
+ static const size_t k_num_vfp_registers;
+ static const size_t k_num_exc_registers;
+ static const size_t k_num_all_registers;
+ static const size_t k_num_register_sets;
+
+ struct Context
+ {
+ GPR gpr;
+ FPU vfp;
+ EXC exc;
+ };
+
+ struct State
+ {
+ Context context;
+ arm_debug_state64_t dbg;
+ kern_return_t gpr_errs[2]; // Read/Write errors
+ kern_return_t vfp_errs[2]; // Read/Write errors
+ kern_return_t exc_errs[2]; // Read/Write errors
+ kern_return_t dbg_errs[2]; // Read/Write errors
+ State()
+ {
+ uint32_t i;
+ for (i=0; i<kNumErrors; i++)
+ {
+ gpr_errs[i] = -1;
+ vfp_errs[i] = -1;
+ exc_errs[i] = -1;
+ dbg_errs[i] = -1;
+ }
+ }
+ void InvalidateRegisterSetState(int set)
+ {
+ SetError (set, Read, -1);
+ }
+
+ void
+ InvalidateAllRegisterStates()
+ {
+ SetError (e_regSetALL, Read, -1);
+ }
+
+ kern_return_t GetError (int set, uint32_t err_idx) const
+ {
+ if (err_idx < kNumErrors)
+ {
+ switch (set)
+ {
+ // When getting all errors, just OR all values together to see if
+ // we got any kind of error.
+ case e_regSetALL: return gpr_errs[err_idx] |
+ vfp_errs[err_idx] |
+ exc_errs[err_idx] |
+ dbg_errs[err_idx] ;
+ case e_regSetGPR: return gpr_errs[err_idx];
+ case e_regSetVFP: return vfp_errs[err_idx];
+ case e_regSetEXC: return exc_errs[err_idx];
+ //case e_regSetDBG: return dbg_errs[err_idx];
+ default: break;
+ }
+ }
+ return -1;
+ }
+ bool SetError (int set, uint32_t err_idx, kern_return_t err)
+ {
+ if (err_idx < kNumErrors)
+ {
+ switch (set)
+ {
+ case e_regSetALL:
+ gpr_errs[err_idx] = err;
+ vfp_errs[err_idx] = err;
+ dbg_errs[err_idx] = err;
+ exc_errs[err_idx] = err;
+ return true;
+
+ case e_regSetGPR:
+ gpr_errs[err_idx] = err;
+ return true;
+
+ case e_regSetVFP:
+ vfp_errs[err_idx] = err;
+ return true;
+
+ case e_regSetEXC:
+ exc_errs[err_idx] = err;
+ return true;
+
+// case e_regSetDBG:
+// dbg_errs[err_idx] = err;
+// return true;
+ default: break;
+ }
+ }
+ return false;
+ }
+ bool RegsAreValid (int set) const
+ {
+ return GetError(set, Read) == KERN_SUCCESS;
+ }
+ };
+
+ kern_return_t GetGPRState (bool force);
+ kern_return_t GetVFPState (bool force);
+ kern_return_t GetEXCState (bool force);
+ kern_return_t GetDBGState (bool force);
+
+ kern_return_t SetGPRState ();
+ kern_return_t SetVFPState ();
+ kern_return_t SetEXCState ();
+ kern_return_t SetDBGState (bool also_set_on_task);
+
+ // Helper functions for watchpoint implementaions.
+
+ typedef arm_debug_state64_t DBG;
+
+ void ClearWatchpointOccurred();
+ bool HasWatchpointOccurred();
+ bool IsWatchpointEnabled(const DBG &debug_state, uint32_t hw_index);
+ nub_addr_t GetWatchpointAddressByIndex (uint32_t hw_index);
+ nub_addr_t GetWatchAddress(const DBG &debug_state, uint32_t hw_index);
+ virtual bool ReenableHardwareWatchpoint (uint32_t hw_break_index);
+ virtual bool ReenableHardwareWatchpoint_helper (uint32_t hw_break_index);
+ virtual uint32_t GetHardwareWatchpointHit(nub_addr_t &addr);
+
+
+ class disabled_watchpoint {
+ public:
+ disabled_watchpoint () { addr = 0; control = 0; }
+ nub_addr_t addr;
+ uint32_t control;
+ };
+
+protected:
+ MachThread * m_thread;
+ State m_state;
+ arm_debug_state64_t m_dbg_save;
+
+ // arm64 doesn't keep the disabled watchpoint values in the debug register context like armv7;
+ // we need to save them aside when we disable them temporarily.
+ std::vector<disabled_watchpoint> m_disabled_watchpoints;
+
+ // The following member variables should be updated atomically.
+ int32_t m_watchpoint_hw_index;
+ bool m_watchpoint_did_occur;
+ bool m_watchpoint_resume_single_step_enabled;
+
+ typedef std::map<uint32_t, Context> SaveRegisterStates;
+ SaveRegisterStates m_saved_register_states;
+};
+
+#endif // #if defined (ARM_THREAD_STATE64_COUNT)
+#endif // #if defined (__arm__)
+#endif // #ifndef __DNBArchImplARM64_h__
diff --git a/tools/debugserver/source/MacOSX/dbgnub-mig.defs b/tools/debugserver/source/MacOSX/dbgnub-mig.defs
new file mode 100644
index 000000000000..cd5be1700704
--- /dev/null
+++ b/tools/debugserver/source/MacOSX/dbgnub-mig.defs
@@ -0,0 +1,5 @@
+/*
+ * nub.defs
+ */
+
+#import <mach/mach_exc.defs>
diff --git a/tools/debugserver/source/MacOSX/i386/CMakeLists.txt b/tools/debugserver/source/MacOSX/i386/CMakeLists.txt
new file mode 100644
index 000000000000..dee2c1ea96b0
--- /dev/null
+++ b/tools/debugserver/source/MacOSX/i386/CMakeLists.txt
@@ -0,0 +1,4 @@
+include_directories(${LLDB_SOURCE_DIR}/tools/debugserver/source)
+add_library(lldbDebugserverMacOSX_I386
+ DNBArchImplI386.cpp
+ )
diff --git a/tools/debugserver/source/MacOSX/i386/DNBArchImplI386.cpp b/tools/debugserver/source/MacOSX/i386/DNBArchImplI386.cpp
new file mode 100644
index 000000000000..93d4d894300b
--- /dev/null
+++ b/tools/debugserver/source/MacOSX/i386/DNBArchImplI386.cpp
@@ -0,0 +1,1901 @@
+//===-- DNBArchImplI386.cpp -------------------------------------*- C++ -*-===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// Created by Greg Clayton on 6/25/07.
+//
+//===----------------------------------------------------------------------===//
+
+#if defined (__i386__) || defined (__x86_64__)
+
+#include <sys/cdefs.h>
+
+#include "MacOSX/i386/DNBArchImplI386.h"
+#include "DNBLog.h"
+#include "MachThread.h"
+#include "MachProcess.h"
+
+extern "C" bool CPUHasAVX(); // Defined over in DNBArchImplX86_64.cpp
+
+#if defined (LLDB_DEBUGSERVER_RELEASE) || defined (LLDB_DEBUGSERVER_DEBUG)
+enum debugState {
+ debugStateUnknown,
+ debugStateOff,
+ debugStateOn
+};
+
+static debugState sFPUDebugState = debugStateUnknown;
+static debugState sAVXForceState = debugStateUnknown;
+
+static bool DebugFPURegs ()
+{
+ if (sFPUDebugState == debugStateUnknown)
+ {
+ if (getenv("DNB_DEBUG_FPU_REGS"))
+ sFPUDebugState = debugStateOn;
+ else
+ sFPUDebugState = debugStateOff;
+ }
+
+ return (sFPUDebugState == debugStateOn);
+}
+
+static bool ForceAVXRegs ()
+{
+ if (sFPUDebugState == debugStateUnknown)
+ {
+ if (getenv("DNB_DEBUG_X86_FORCE_AVX_REGS"))
+ sAVXForceState = debugStateOn;
+ else
+ sAVXForceState = debugStateOff;
+ }
+
+ return (sAVXForceState == debugStateOn);
+}
+
+#define DEBUG_FPU_REGS (DebugFPURegs())
+#define FORCE_AVX_REGS (ForceAVXRegs())
+#else
+#define DEBUG_FPU_REGS (0)
+#define FORCE_AVX_REGS (0)
+#endif
+
+enum
+{
+ gpr_eax = 0,
+ gpr_ebx = 1,
+ gpr_ecx = 2,
+ gpr_edx = 3,
+ gpr_edi = 4,
+ gpr_esi = 5,
+ gpr_ebp = 6,
+ gpr_esp = 7,
+ gpr_ss = 8,
+ gpr_eflags = 9,
+ gpr_eip = 10,
+ gpr_cs = 11,
+ gpr_ds = 12,
+ gpr_es = 13,
+ gpr_fs = 14,
+ gpr_gs = 15,
+ gpr_ax ,
+ gpr_bx ,
+ gpr_cx ,
+ gpr_dx ,
+ gpr_di ,
+ gpr_si ,
+ gpr_bp ,
+ gpr_sp ,
+ gpr_ah ,
+ gpr_bh ,
+ gpr_ch ,
+ gpr_dh ,
+ gpr_al ,
+ gpr_bl ,
+ gpr_cl ,
+ gpr_dl ,
+ gpr_dil,
+ gpr_sil,
+ gpr_bpl,
+ gpr_spl,
+ k_num_gpr_regs
+};
+
+enum {
+ fpu_fcw,
+ fpu_fsw,
+ fpu_ftw,
+ fpu_fop,
+ fpu_ip,
+ fpu_cs,
+ fpu_dp,
+ fpu_ds,
+ fpu_mxcsr,
+ fpu_mxcsrmask,
+ fpu_stmm0,
+ fpu_stmm1,
+ fpu_stmm2,
+ fpu_stmm3,
+ fpu_stmm4,
+ fpu_stmm5,
+ fpu_stmm6,
+ fpu_stmm7,
+ fpu_xmm0,
+ fpu_xmm1,
+ fpu_xmm2,
+ fpu_xmm3,
+ fpu_xmm4,
+ fpu_xmm5,
+ fpu_xmm6,
+ fpu_xmm7,
+ fpu_ymm0,
+ fpu_ymm1,
+ fpu_ymm2,
+ fpu_ymm3,
+ fpu_ymm4,
+ fpu_ymm5,
+ fpu_ymm6,
+ fpu_ymm7,
+ k_num_fpu_regs,
+
+ // Aliases
+ fpu_fctrl = fpu_fcw,
+ fpu_fstat = fpu_fsw,
+ fpu_ftag = fpu_ftw,
+ fpu_fiseg = fpu_cs,
+ fpu_fioff = fpu_ip,
+ fpu_foseg = fpu_ds,
+ fpu_fooff = fpu_dp
+};
+
+enum {
+ exc_trapno,
+ exc_err,
+ exc_faultvaddr,
+ k_num_exc_regs,
+};
+
+
+enum
+{
+ ehframe_eax = 0,
+ ehframe_ecx,
+ ehframe_edx,
+ ehframe_ebx,
+
+ // On i386 Darwin the eh_frame register numbers for ebp and esp are reversed from DWARF.
+ // It's due to an ancient compiler bug in the output of the eh_frame.
+ // Specifically, on i386 darwin eh_frame, 4 is ebp, 5 is esp.
+ // On i386 darwin debug_frame (and debug_info), 4 is esp, 5 is ebp.
+ ehframe_ebp,
+ ehframe_esp,
+ ehframe_esi,
+ ehframe_edi,
+ ehframe_eip,
+ ehframe_eflags
+};
+
+enum
+{
+ dwarf_eax = 0,
+ dwarf_ecx,
+ dwarf_edx,
+ dwarf_ebx,
+ dwarf_esp,
+ dwarf_ebp,
+ dwarf_esi,
+ dwarf_edi,
+ dwarf_eip,
+ dwarf_eflags,
+ dwarf_stmm0 = 11,
+ dwarf_stmm1,
+ dwarf_stmm2,
+ dwarf_stmm3,
+ dwarf_stmm4,
+ dwarf_stmm5,
+ dwarf_stmm6,
+ dwarf_stmm7,
+ dwarf_xmm0 = 21,
+ dwarf_xmm1,
+ dwarf_xmm2,
+ dwarf_xmm3,
+ dwarf_xmm4,
+ dwarf_xmm5,
+ dwarf_xmm6,
+ dwarf_xmm7,
+ dwarf_ymm0 = dwarf_xmm0,
+ dwarf_ymm1 = dwarf_xmm1,
+ dwarf_ymm2 = dwarf_xmm2,
+ dwarf_ymm3 = dwarf_xmm3,
+ dwarf_ymm4 = dwarf_xmm4,
+ dwarf_ymm5 = dwarf_xmm5,
+ dwarf_ymm6 = dwarf_xmm6,
+ dwarf_ymm7 = dwarf_xmm7,
+};
+
+enum
+{
+ debugserver_eax = 0,
+ debugserver_ecx = 1,
+ debugserver_edx = 2,
+ debugserver_ebx = 3,
+ debugserver_esp = 4,
+ debugserver_ebp = 5,
+ debugserver_esi = 6,
+ debugserver_edi = 7,
+ debugserver_eip = 8,
+ debugserver_eflags = 9,
+ debugserver_cs = 10,
+ debugserver_ss = 11,
+ debugserver_ds = 12,
+ debugserver_es = 13,
+ debugserver_fs = 14,
+ debugserver_gs = 15,
+ debugserver_stmm0 = 16,
+ debugserver_stmm1 = 17,
+ debugserver_stmm2 = 18,
+ debugserver_stmm3 = 19,
+ debugserver_stmm4 = 20,
+ debugserver_stmm5 = 21,
+ debugserver_stmm6 = 22,
+ debugserver_stmm7 = 23,
+ debugserver_fctrl = 24, debugserver_fcw = debugserver_fctrl,
+ debugserver_fstat = 25, debugserver_fsw = debugserver_fstat,
+ debugserver_ftag = 26, debugserver_ftw = debugserver_ftag,
+ debugserver_fiseg = 27, debugserver_fpu_cs = debugserver_fiseg,
+ debugserver_fioff = 28, debugserver_ip = debugserver_fioff,
+ debugserver_foseg = 29, debugserver_fpu_ds = debugserver_foseg,
+ debugserver_fooff = 30, debugserver_dp = debugserver_fooff,
+ debugserver_fop = 31,
+ debugserver_xmm0 = 32,
+ debugserver_xmm1 = 33,
+ debugserver_xmm2 = 34,
+ debugserver_xmm3 = 35,
+ debugserver_xmm4 = 36,
+ debugserver_xmm5 = 37,
+ debugserver_xmm6 = 38,
+ debugserver_xmm7 = 39,
+ debugserver_mxcsr = 40,
+ debugserver_mm0 = 41,
+ debugserver_mm1 = 42,
+ debugserver_mm2 = 43,
+ debugserver_mm3 = 44,
+ debugserver_mm4 = 45,
+ debugserver_mm5 = 46,
+ debugserver_mm6 = 47,
+ debugserver_mm7 = 48,
+ debugserver_ymm0 = debugserver_xmm0,
+ debugserver_ymm1 = debugserver_xmm1,
+ debugserver_ymm2 = debugserver_xmm2,
+ debugserver_ymm3 = debugserver_xmm3,
+ debugserver_ymm4 = debugserver_xmm4,
+ debugserver_ymm5 = debugserver_xmm5,
+ debugserver_ymm6 = debugserver_xmm6,
+ debugserver_ymm7 = debugserver_xmm7
+};
+
+uint64_t
+DNBArchImplI386::GetPC(uint64_t failValue)
+{
+ // Get program counter
+ if (GetGPRState(false) == KERN_SUCCESS)
+ return m_state.context.gpr.__eip;
+ return failValue;
+}
+
+kern_return_t
+DNBArchImplI386::SetPC(uint64_t value)
+{
+ // Get program counter
+ kern_return_t err = GetGPRState(false);
+ if (err == KERN_SUCCESS)
+ {
+ m_state.context.gpr.__eip = static_cast<uint32_t>(value);
+ err = SetGPRState();
+ }
+ return err == KERN_SUCCESS;
+}
+
+uint64_t
+DNBArchImplI386::GetSP(uint64_t failValue)
+{
+ // Get stack pointer
+ if (GetGPRState(false) == KERN_SUCCESS)
+ return m_state.context.gpr.__esp;
+ return failValue;
+}
+
+// Uncomment the value below to verify the values in the debugger.
+//#define DEBUG_GPR_VALUES 1 // DO NOT CHECK IN WITH THIS DEFINE ENABLED
+//#define SET_GPR(reg) m_state.context.gpr.__##reg = gpr_##reg
+
+kern_return_t
+DNBArchImplI386::GetGPRState(bool force)
+{
+ if (force || m_state.GetError(e_regSetGPR, Read))
+ {
+#if DEBUG_GPR_VALUES
+ SET_GPR(eax);
+ SET_GPR(ebx);
+ SET_GPR(ecx);
+ SET_GPR(edx);
+ SET_GPR(edi);
+ SET_GPR(esi);
+ SET_GPR(ebp);
+ SET_GPR(esp);
+ SET_GPR(ss);
+ SET_GPR(eflags);
+ SET_GPR(eip);
+ SET_GPR(cs);
+ SET_GPR(ds);
+ SET_GPR(es);
+ SET_GPR(fs);
+ SET_GPR(gs);
+ m_state.SetError(e_regSetGPR, Read, 0);
+#else
+ mach_msg_type_number_t count = e_regSetWordSizeGPR;
+ m_state.SetError(e_regSetGPR, Read, ::thread_get_state(m_thread->MachPortNumber(), __i386_THREAD_STATE, (thread_state_t)&m_state.context.gpr, &count));
+#endif
+ }
+ return m_state.GetError(e_regSetGPR, Read);
+}
+
+// Uncomment the value below to verify the values in the debugger.
+//#define DEBUG_FPU_VALUES 1 // DO NOT CHECK IN WITH THIS DEFINE ENABLED
+
+kern_return_t
+DNBArchImplI386::GetFPUState(bool force)
+{
+ if (force || m_state.GetError(e_regSetFPU, Read))
+ {
+ if (DEBUG_FPU_REGS)
+ {
+ if (CPUHasAVX() || FORCE_AVX_REGS)
+ {
+ m_state.context.fpu.avx.__fpu_reserved[0] = -1;
+ m_state.context.fpu.avx.__fpu_reserved[1] = -1;
+ *(uint16_t *)&(m_state.context.fpu.avx.__fpu_fcw) = 0x1234;
+ *(uint16_t *)&(m_state.context.fpu.avx.__fpu_fsw) = 0x5678;
+ m_state.context.fpu.avx.__fpu_ftw = 1;
+ m_state.context.fpu.avx.__fpu_rsrv1 = UINT8_MAX;
+ m_state.context.fpu.avx.__fpu_fop = 2;
+ m_state.context.fpu.avx.__fpu_ip = 3;
+ m_state.context.fpu.avx.__fpu_cs = 4;
+ m_state.context.fpu.avx.__fpu_rsrv2 = 5;
+ m_state.context.fpu.avx.__fpu_dp = 6;
+ m_state.context.fpu.avx.__fpu_ds = 7;
+ m_state.context.fpu.avx.__fpu_rsrv3 = UINT16_MAX;
+ m_state.context.fpu.avx.__fpu_mxcsr = 8;
+ m_state.context.fpu.avx.__fpu_mxcsrmask = 9;
+ int i;
+ for (i=0; i<16; ++i)
+ {
+ if (i<10)
+ {
+ m_state.context.fpu.avx.__fpu_stmm0.__mmst_reg[i] = 'a';
+ m_state.context.fpu.avx.__fpu_stmm1.__mmst_reg[i] = 'b';
+ m_state.context.fpu.avx.__fpu_stmm2.__mmst_reg[i] = 'c';
+ m_state.context.fpu.avx.__fpu_stmm3.__mmst_reg[i] = 'd';
+ m_state.context.fpu.avx.__fpu_stmm4.__mmst_reg[i] = 'e';
+ m_state.context.fpu.avx.__fpu_stmm5.__mmst_reg[i] = 'f';
+ m_state.context.fpu.avx.__fpu_stmm6.__mmst_reg[i] = 'g';
+ m_state.context.fpu.avx.__fpu_stmm7.__mmst_reg[i] = 'h';
+ }
+ else
+ {
+ m_state.context.fpu.avx.__fpu_stmm0.__mmst_reg[i] = INT8_MIN;
+ m_state.context.fpu.avx.__fpu_stmm1.__mmst_reg[i] = INT8_MIN;
+ m_state.context.fpu.avx.__fpu_stmm2.__mmst_reg[i] = INT8_MIN;
+ m_state.context.fpu.avx.__fpu_stmm3.__mmst_reg[i] = INT8_MIN;
+ m_state.context.fpu.avx.__fpu_stmm4.__mmst_reg[i] = INT8_MIN;
+ m_state.context.fpu.avx.__fpu_stmm5.__mmst_reg[i] = INT8_MIN;
+ m_state.context.fpu.avx.__fpu_stmm6.__mmst_reg[i] = INT8_MIN;
+ m_state.context.fpu.avx.__fpu_stmm7.__mmst_reg[i] = INT8_MIN;
+ }
+
+ m_state.context.fpu.avx.__fpu_xmm0.__xmm_reg[i] = '0';
+ m_state.context.fpu.avx.__fpu_xmm1.__xmm_reg[i] = '1';
+ m_state.context.fpu.avx.__fpu_xmm2.__xmm_reg[i] = '2';
+ m_state.context.fpu.avx.__fpu_xmm3.__xmm_reg[i] = '3';
+ m_state.context.fpu.avx.__fpu_xmm4.__xmm_reg[i] = '4';
+ m_state.context.fpu.avx.__fpu_xmm5.__xmm_reg[i] = '5';
+ m_state.context.fpu.avx.__fpu_xmm6.__xmm_reg[i] = '6';
+ m_state.context.fpu.avx.__fpu_xmm7.__xmm_reg[i] = '7';
+ }
+ for (i=0; i<sizeof(m_state.context.fpu.avx.__fpu_rsrv4); ++i)
+ m_state.context.fpu.avx.__fpu_rsrv4[i] = INT8_MIN;
+ m_state.context.fpu.avx.__fpu_reserved1 = -1;
+ for (i=0; i<sizeof(m_state.context.fpu.avx.__avx_reserved1); ++i)
+ m_state.context.fpu.avx.__avx_reserved1[i] = INT8_MIN;
+
+ for (i = 0; i < 16; ++i)
+ {
+ m_state.context.fpu.avx.__fpu_ymmh0.__xmm_reg[i] = '0';
+ m_state.context.fpu.avx.__fpu_ymmh1.__xmm_reg[i] = '1';
+ m_state.context.fpu.avx.__fpu_ymmh2.__xmm_reg[i] = '2';
+ m_state.context.fpu.avx.__fpu_ymmh3.__xmm_reg[i] = '3';
+ m_state.context.fpu.avx.__fpu_ymmh4.__xmm_reg[i] = '4';
+ m_state.context.fpu.avx.__fpu_ymmh5.__xmm_reg[i] = '5';
+ m_state.context.fpu.avx.__fpu_ymmh6.__xmm_reg[i] = '6';
+ m_state.context.fpu.avx.__fpu_ymmh7.__xmm_reg[i] = '7';
+ }
+ }
+ else
+ {
+ m_state.context.fpu.no_avx.__fpu_reserved[0] = -1;
+ m_state.context.fpu.no_avx.__fpu_reserved[1] = -1;
+ *(uint16_t *)&(m_state.context.fpu.no_avx.__fpu_fcw) = 0x1234;
+ *(uint16_t *)&(m_state.context.fpu.no_avx.__fpu_fsw) = 0x5678;
+ m_state.context.fpu.no_avx.__fpu_ftw = 1;
+ m_state.context.fpu.no_avx.__fpu_rsrv1 = UINT8_MAX;
+ m_state.context.fpu.no_avx.__fpu_fop = 2;
+ m_state.context.fpu.no_avx.__fpu_ip = 3;
+ m_state.context.fpu.no_avx.__fpu_cs = 4;
+ m_state.context.fpu.no_avx.__fpu_rsrv2 = 5;
+ m_state.context.fpu.no_avx.__fpu_dp = 6;
+ m_state.context.fpu.no_avx.__fpu_ds = 7;
+ m_state.context.fpu.no_avx.__fpu_rsrv3 = UINT16_MAX;
+ m_state.context.fpu.no_avx.__fpu_mxcsr = 8;
+ m_state.context.fpu.no_avx.__fpu_mxcsrmask = 9;
+ int i;
+ for (i=0; i<16; ++i)
+ {
+ if (i<10)
+ {
+ m_state.context.fpu.no_avx.__fpu_stmm0.__mmst_reg[i] = 'a';
+ m_state.context.fpu.no_avx.__fpu_stmm1.__mmst_reg[i] = 'b';
+ m_state.context.fpu.no_avx.__fpu_stmm2.__mmst_reg[i] = 'c';
+ m_state.context.fpu.no_avx.__fpu_stmm3.__mmst_reg[i] = 'd';
+ m_state.context.fpu.no_avx.__fpu_stmm4.__mmst_reg[i] = 'e';
+ m_state.context.fpu.no_avx.__fpu_stmm5.__mmst_reg[i] = 'f';
+ m_state.context.fpu.no_avx.__fpu_stmm6.__mmst_reg[i] = 'g';
+ m_state.context.fpu.no_avx.__fpu_stmm7.__mmst_reg[i] = 'h';
+ }
+ else
+ {
+ m_state.context.fpu.no_avx.__fpu_stmm0.__mmst_reg[i] = INT8_MIN;
+ m_state.context.fpu.no_avx.__fpu_stmm1.__mmst_reg[i] = INT8_MIN;
+ m_state.context.fpu.no_avx.__fpu_stmm2.__mmst_reg[i] = INT8_MIN;
+ m_state.context.fpu.no_avx.__fpu_stmm3.__mmst_reg[i] = INT8_MIN;
+ m_state.context.fpu.no_avx.__fpu_stmm4.__mmst_reg[i] = INT8_MIN;
+ m_state.context.fpu.no_avx.__fpu_stmm5.__mmst_reg[i] = INT8_MIN;
+ m_state.context.fpu.no_avx.__fpu_stmm6.__mmst_reg[i] = INT8_MIN;
+ m_state.context.fpu.no_avx.__fpu_stmm7.__mmst_reg[i] = INT8_MIN;
+ }
+
+ m_state.context.fpu.no_avx.__fpu_xmm0.__xmm_reg[i] = '0';
+ m_state.context.fpu.no_avx.__fpu_xmm1.__xmm_reg[i] = '1';
+ m_state.context.fpu.no_avx.__fpu_xmm2.__xmm_reg[i] = '2';
+ m_state.context.fpu.no_avx.__fpu_xmm3.__xmm_reg[i] = '3';
+ m_state.context.fpu.no_avx.__fpu_xmm4.__xmm_reg[i] = '4';
+ m_state.context.fpu.no_avx.__fpu_xmm5.__xmm_reg[i] = '5';
+ m_state.context.fpu.no_avx.__fpu_xmm6.__xmm_reg[i] = '6';
+ m_state.context.fpu.no_avx.__fpu_xmm7.__xmm_reg[i] = '7';
+ }
+ for (i=0; i<sizeof(m_state.context.fpu.avx.__fpu_rsrv4); ++i)
+ m_state.context.fpu.no_avx.__fpu_rsrv4[i] = INT8_MIN;
+ m_state.context.fpu.no_avx.__fpu_reserved1 = -1;
+ }
+ m_state.SetError(e_regSetFPU, Read, 0);
+ }
+ else
+ {
+ if (CPUHasAVX() || FORCE_AVX_REGS)
+ {
+ mach_msg_type_number_t count = e_regSetWordSizeAVX;
+ m_state.SetError (e_regSetFPU, Read, ::thread_get_state(m_thread->MachPortNumber(), __i386_AVX_STATE, (thread_state_t)&m_state.context.fpu.avx, &count));
+ DNBLogThreadedIf (LOG_THREAD, "::thread_get_state (0x%4.4x, %u, &avx, %u (%u passed in)) => 0x%8.8x",
+ m_thread->MachPortNumber(), __i386_AVX_STATE, count, e_regSetWordSizeAVX,
+ m_state.GetError(e_regSetFPU, Read));
+ }
+ else
+ {
+ mach_msg_type_number_t count = e_regSetWordSizeFPU;
+ m_state.SetError(e_regSetFPU, Read, ::thread_get_state(m_thread->MachPortNumber(), __i386_FLOAT_STATE, (thread_state_t)&m_state.context.fpu.no_avx, &count));
+ DNBLogThreadedIf (LOG_THREAD, "::thread_get_state (0x%4.4x, %u, &fpu, %u (%u passed in) => 0x%8.8x",
+ m_thread->MachPortNumber(), __i386_FLOAT_STATE, count, e_regSetWordSizeFPU,
+ m_state.GetError(e_regSetFPU, Read));
+ }
+ }
+ }
+ return m_state.GetError(e_regSetFPU, Read);
+}
+
+kern_return_t
+DNBArchImplI386::GetEXCState(bool force)
+{
+ if (force || m_state.GetError(e_regSetEXC, Read))
+ {
+ mach_msg_type_number_t count = e_regSetWordSizeEXC;
+ m_state.SetError(e_regSetEXC, Read, ::thread_get_state(m_thread->MachPortNumber(), __i386_EXCEPTION_STATE, (thread_state_t)&m_state.context.exc, &count));
+ }
+ return m_state.GetError(e_regSetEXC, Read);
+}
+
+kern_return_t
+DNBArchImplI386::SetGPRState()
+{
+ kern_return_t kret = ::thread_abort_safely(m_thread->MachPortNumber());
+ DNBLogThreadedIf (LOG_THREAD, "thread = 0x%4.4x calling thread_abort_safely (tid) => %u (SetGPRState() for stop_count = %u)", m_thread->MachPortNumber(), kret, m_thread->Process()->StopCount());
+
+
+ m_state.SetError(e_regSetGPR, Write, ::thread_set_state(m_thread->MachPortNumber(), __i386_THREAD_STATE, (thread_state_t)&m_state.context.gpr, e_regSetWordSizeGPR));
+ return m_state.GetError(e_regSetGPR, Write);
+}
+
+kern_return_t
+DNBArchImplI386::SetFPUState()
+{
+ if (DEBUG_FPU_REGS)
+ {
+ m_state.SetError(e_regSetFPU, Write, 0);
+ return m_state.GetError(e_regSetFPU, Write);
+ }
+ else
+ {
+ if (CPUHasAVX() || FORCE_AVX_REGS)
+ m_state.SetError(e_regSetFPU, Write, ::thread_set_state(m_thread->MachPortNumber(), __i386_AVX_STATE, (thread_state_t)&m_state.context.fpu.avx, e_regSetWordSizeAVX));
+ else
+ m_state.SetError(e_regSetFPU, Write, ::thread_set_state(m_thread->MachPortNumber(), __i386_FLOAT_STATE, (thread_state_t)&m_state.context.fpu.no_avx, e_regSetWordSizeFPU));
+ return m_state.GetError(e_regSetFPU, Write);
+ }
+}
+
+kern_return_t
+DNBArchImplI386::SetEXCState()
+{
+ m_state.SetError(e_regSetEXC, Write, ::thread_set_state(m_thread->MachPortNumber(), __i386_EXCEPTION_STATE, (thread_state_t)&m_state.context.exc, e_regSetWordSizeEXC));
+ return m_state.GetError(e_regSetEXC, Write);
+}
+
+kern_return_t
+DNBArchImplI386::GetDBGState(bool force)
+{
+ if (force || m_state.GetError(e_regSetDBG, Read))
+ {
+ mach_msg_type_number_t count = e_regSetWordSizeDBG;
+ m_state.SetError(e_regSetDBG, Read, ::thread_get_state(m_thread->MachPortNumber(), __i386_DEBUG_STATE, (thread_state_t)&m_state.context.dbg, &count));
+ }
+ return m_state.GetError(e_regSetDBG, Read);
+}
+
+kern_return_t
+DNBArchImplI386::SetDBGState(bool also_set_on_task)
+{
+ m_state.SetError(e_regSetDBG, Write, ::thread_set_state(m_thread->MachPortNumber(), __i386_DEBUG_STATE, (thread_state_t)&m_state.context.dbg, e_regSetWordSizeDBG));
+ if (also_set_on_task)
+ {
+ kern_return_t kret = ::task_set_state(m_thread->Process()->Task().TaskPort(), __i386_DEBUG_STATE, (thread_state_t)&m_state.context.dbg, e_regSetWordSizeDBG);
+ if (kret != KERN_SUCCESS)
+ DNBLogThreadedIf(LOG_WATCHPOINTS, "DNBArchImplI386::SetDBGState failed to set debug control register state: 0x%8.8x.", kret);
+
+ }
+ return m_state.GetError(e_regSetDBG, Write);
+}
+
+void
+DNBArchImplI386::ThreadWillResume()
+{
+ // Do we need to step this thread? If so, let the mach thread tell us so.
+ if (m_thread->IsStepping())
+ {
+ // This is the primary thread, let the arch do anything it needs
+ EnableHardwareSingleStep(true);
+ }
+
+ // Reset the debug status register, if necessary, before we resume.
+ kern_return_t kret = GetDBGState(false);
+ DNBLogThreadedIf(LOG_WATCHPOINTS, "DNBArchImplI386::ThreadWillResume() GetDBGState() => 0x%8.8x.", kret);
+ if (kret != KERN_SUCCESS)
+ return;
+
+ DBG &debug_state = m_state.context.dbg;
+ bool need_reset = false;
+ uint32_t i, num = NumSupportedHardwareWatchpoints();
+ for (i = 0; i < num; ++i)
+ if (IsWatchpointHit(debug_state, i))
+ need_reset = true;
+
+ if (need_reset)
+ {
+ ClearWatchpointHits(debug_state);
+ kret = SetDBGState(false);
+ DNBLogThreadedIf(LOG_WATCHPOINTS,"DNBArchImplI386::ThreadWillResume() SetDBGState() => 0x%8.8x.", kret);
+ }
+}
+
+bool
+DNBArchImplI386::ThreadDidStop()
+{
+ bool success = true;
+
+ m_state.InvalidateAllRegisterStates();
+
+ // Are we stepping a single instruction?
+ if (GetGPRState(true) == KERN_SUCCESS)
+ {
+ // We are single stepping, was this the primary thread?
+ if (m_thread->IsStepping())
+ {
+ // This was the primary thread, we need to clear the trace
+ // bit if so.
+ success = EnableHardwareSingleStep(false) == KERN_SUCCESS;
+ }
+ else
+ {
+ // The MachThread will automatically restore the suspend count
+ // in ThreadDidStop(), so we don't need to do anything here if
+ // we weren't the primary thread the last time
+ }
+ }
+ return success;
+}
+
+bool
+DNBArchImplI386::NotifyException(MachException::Data& exc)
+{
+ switch (exc.exc_type)
+ {
+ case EXC_BAD_ACCESS:
+ break;
+ case EXC_BAD_INSTRUCTION:
+ break;
+ case EXC_ARITHMETIC:
+ break;
+ case EXC_EMULATION:
+ break;
+ case EXC_SOFTWARE:
+ break;
+ case EXC_BREAKPOINT:
+ if (exc.exc_data.size() >= 2 && exc.exc_data[0] == 2)
+ {
+ // exc_code = EXC_I386_BPT
+ //
+ nub_addr_t pc = GetPC(INVALID_NUB_ADDRESS);
+ if (pc != INVALID_NUB_ADDRESS && pc > 0)
+ {
+ pc -= 1;
+ // Check for a breakpoint at one byte prior to the current PC value
+ // since the PC will be just past the trap.
+
+ DNBBreakpoint *bp = m_thread->Process()->Breakpoints().FindByAddress(pc);
+ if (bp)
+ {
+ // Backup the PC for i386 since the trap was taken and the PC
+ // is at the address following the single byte trap instruction.
+ if (m_state.context.gpr.__eip > 0)
+ {
+ m_state.context.gpr.__eip = static_cast<uint32_t>(pc);
+ // Write the new PC back out
+ SetGPRState ();
+ }
+ }
+ return true;
+ }
+ }
+ else if (exc.exc_data.size() >= 2 && exc.exc_data[0] == 1)
+ {
+ // exc_code = EXC_I386_SGL
+ //
+ // Check whether this corresponds to a watchpoint hit event.
+ // If yes, set the exc_sub_code to the data break address.
+ nub_addr_t addr = 0;
+ uint32_t hw_index = GetHardwareWatchpointHit(addr);
+ if (hw_index != INVALID_NUB_HW_INDEX)
+ {
+ exc.exc_data[1] = addr;
+ // Piggyback the hw_index in the exc.data.
+ exc.exc_data.push_back(hw_index);
+ }
+
+ return true;
+ }
+ break;
+ case EXC_SYSCALL:
+ break;
+ case EXC_MACH_SYSCALL:
+ break;
+ case EXC_RPC_ALERT:
+ break;
+ }
+ return false;
+}
+
+uint32_t
+DNBArchImplI386::NumSupportedHardwareWatchpoints()
+{
+ // Available debug address registers: dr0, dr1, dr2, dr3.
+ return 4;
+}
+
+static uint32_t
+size_and_rw_bits(nub_size_t size, bool read, bool write)
+{
+ uint32_t rw;
+ if (read) {
+ rw = 0x3; // READ or READ/WRITE
+ } else if (write) {
+ rw = 0x1; // WRITE
+ } else {
+ assert(0 && "read and write cannot both be false");
+ }
+
+ switch (size) {
+ case 1:
+ return rw;
+ case 2:
+ return (0x1 << 2) | rw;
+ case 4:
+ return (0x3 << 2) | rw;
+ case 8:
+ return (0x2 << 2) | rw;
+ }
+ assert(0 && "invalid size, must be one of 1, 2, 4, or 8");
+ return 0;
+}
+
+void
+DNBArchImplI386::SetWatchpoint(DBG &debug_state, uint32_t hw_index, nub_addr_t addr, nub_size_t size, bool read, bool write)
+{
+ // Set both dr7 (debug control register) and dri (debug address register).
+
+ // dr7{7-0} encodes the local/gloabl enable bits:
+ // global enable --. .-- local enable
+ // | |
+ // v v
+ // dr0 -> bits{1-0}
+ // dr1 -> bits{3-2}
+ // dr2 -> bits{5-4}
+ // dr3 -> bits{7-6}
+ //
+ // dr7{31-16} encodes the rw/len bits:
+ // b_x+3, b_x+2, b_x+1, b_x
+ // where bits{x+1, x} => rw
+ // 0b00: execute, 0b01: write, 0b11: read-or-write, 0b10: io read-or-write (unused)
+ // and bits{x+3, x+2} => len
+ // 0b00: 1-byte, 0b01: 2-byte, 0b11: 4-byte, 0b10: 8-byte
+ //
+ // dr0 -> bits{19-16}
+ // dr1 -> bits{23-20}
+ // dr2 -> bits{27-24}
+ // dr3 -> bits{31-28}
+ debug_state.__dr7 |= (1 << (2*hw_index) |
+ size_and_rw_bits(size, read, write) << (16+4*hw_index));
+ uint32_t addr_32 = addr & 0xffffffff;
+ switch (hw_index) {
+ case 0:
+ debug_state.__dr0 = addr_32; break;
+ case 1:
+ debug_state.__dr1 = addr_32; break;
+ case 2:
+ debug_state.__dr2 = addr_32; break;
+ case 3:
+ debug_state.__dr3 = addr_32; break;
+ default:
+ assert(0 && "invalid hardware register index, must be one of 0, 1, 2, or 3");
+ }
+ return;
+}
+
+void
+DNBArchImplI386::ClearWatchpoint(DBG &debug_state, uint32_t hw_index)
+{
+ debug_state.__dr7 &= ~(3 << (2*hw_index));
+ switch (hw_index) {
+ case 0:
+ debug_state.__dr0 = 0; break;
+ case 1:
+ debug_state.__dr1 = 0; break;
+ case 2:
+ debug_state.__dr2 = 0; break;
+ case 3:
+ debug_state.__dr3 = 0; break;
+ default:
+ assert(0 && "invalid hardware register index, must be one of 0, 1, 2, or 3");
+ }
+ return;
+}
+
+bool
+DNBArchImplI386::IsWatchpointVacant(const DBG &debug_state, uint32_t hw_index)
+{
+ // Check dr7 (debug control register) for local/global enable bits:
+ // global enable --. .-- local enable
+ // | |
+ // v v
+ // dr0 -> bits{1-0}
+ // dr1 -> bits{3-2}
+ // dr2 -> bits{5-4}
+ // dr3 -> bits{7-6}
+ return (debug_state.__dr7 & (3 << (2*hw_index))) == 0;
+}
+
+// Resets local copy of debug status register to wait for the next debug exception.
+void
+DNBArchImplI386::ClearWatchpointHits(DBG &debug_state)
+{
+ // See also IsWatchpointHit().
+ debug_state.__dr6 = 0;
+ return;
+}
+
+bool
+DNBArchImplI386::IsWatchpointHit(const DBG &debug_state, uint32_t hw_index)
+{
+ // Check dr6 (debug status register) whether a watchpoint hits:
+ // is watchpoint hit?
+ // |
+ // v
+ // dr0 -> bits{0}
+ // dr1 -> bits{1}
+ // dr2 -> bits{2}
+ // dr3 -> bits{3}
+ return (debug_state.__dr6 & (1 << hw_index));
+}
+
+nub_addr_t
+DNBArchImplI386::GetWatchAddress(const DBG &debug_state, uint32_t hw_index)
+{
+ switch (hw_index) {
+ case 0:
+ return debug_state.__dr0;
+ case 1:
+ return debug_state.__dr1;
+ case 2:
+ return debug_state.__dr2;
+ case 3:
+ return debug_state.__dr3;
+ }
+ assert(0 && "invalid hardware register index, must be one of 0, 1, 2, or 3");
+ return 0;
+}
+
+bool
+DNBArchImplI386::StartTransForHWP()
+{
+ if (m_2pc_trans_state != Trans_Done && m_2pc_trans_state != Trans_Rolled_Back)
+ DNBLogError ("%s inconsistent state detected, expected %d or %d, got: %d", __FUNCTION__, Trans_Done, Trans_Rolled_Back, m_2pc_trans_state);
+ m_2pc_dbg_checkpoint = m_state.context.dbg;
+ m_2pc_trans_state = Trans_Pending;
+ return true;
+}
+bool
+DNBArchImplI386::RollbackTransForHWP()
+{
+ m_state.context.dbg = m_2pc_dbg_checkpoint;
+ if (m_2pc_trans_state != Trans_Pending)
+ DNBLogError ("%s inconsistent state detected, expected %d, got: %d", __FUNCTION__, Trans_Pending, m_2pc_trans_state);
+ m_2pc_trans_state = Trans_Rolled_Back;
+ kern_return_t kret = SetDBGState(false);
+ DNBLogThreadedIf(LOG_WATCHPOINTS, "DNBArchImplI386::RollbackTransForHWP() SetDBGState() => 0x%8.8x.", kret);
+
+ if (kret == KERN_SUCCESS)
+ return true;
+ else
+ return false;
+}
+bool
+DNBArchImplI386::FinishTransForHWP()
+{
+ m_2pc_trans_state = Trans_Done;
+ return true;
+}
+DNBArchImplI386::DBG
+DNBArchImplI386::GetDBGCheckpoint()
+{
+ return m_2pc_dbg_checkpoint;
+}
+
+uint32_t
+DNBArchImplI386::EnableHardwareWatchpoint (nub_addr_t addr, nub_size_t size, bool read, bool write, bool also_set_on_task)
+{
+ DNBLogThreadedIf(LOG_WATCHPOINTS, "DNBArchImplI386::EnableHardwareWatchpoint(addr = 0x%llx, size = %llu, read = %u, write = %u)", (uint64_t)addr, (uint64_t)size, read, write);
+
+ const uint32_t num_hw_watchpoints = NumSupportedHardwareWatchpoints();
+
+ // Can only watch 1, 2, 4, or 8 bytes.
+ if (!(size == 1 || size == 2 || size == 4 || size == 8))
+ return INVALID_NUB_HW_INDEX;
+
+ // We must watch for either read or write
+ if (read == false && write == false)
+ return INVALID_NUB_HW_INDEX;
+
+ // Read the debug state
+ kern_return_t kret = GetDBGState(false);
+
+ if (kret == KERN_SUCCESS)
+ {
+ // Check to make sure we have the needed hardware support
+ uint32_t i = 0;
+
+ DBG &debug_state = m_state.context.dbg;
+ for (i = 0; i < num_hw_watchpoints; ++i)
+ {
+ if (IsWatchpointVacant(debug_state, i))
+ break;
+ }
+
+ // See if we found an available hw breakpoint slot above
+ if (i < num_hw_watchpoints)
+ {
+ StartTransForHWP();
+
+ // Modify our local copy of the debug state, first.
+ SetWatchpoint(debug_state, i, addr, size, read, write);
+ // Now set the watch point in the inferior.
+ kret = SetDBGState(also_set_on_task);
+ DNBLogThreadedIf(LOG_WATCHPOINTS, "DNBArchImplI386::EnableHardwareWatchpoint() SetDBGState() => 0x%8.8x.", kret);
+
+ if (kret == KERN_SUCCESS)
+ return i;
+ else // Revert to the previous debug state voluntarily. The transaction coordinator knows that we have failed.
+ m_state.context.dbg = GetDBGCheckpoint();
+ }
+ else
+ {
+ DNBLogThreadedIf(LOG_WATCHPOINTS, "DNBArchImplI386::EnableHardwareWatchpoint(): All hardware resources (%u) are in use.", num_hw_watchpoints);
+ }
+ }
+ return INVALID_NUB_HW_INDEX;
+}
+
+bool
+DNBArchImplI386::DisableHardwareWatchpoint (uint32_t hw_index, bool also_set_on_task)
+{
+ kern_return_t kret = GetDBGState(false);
+
+ const uint32_t num_hw_points = NumSupportedHardwareWatchpoints();
+ if (kret == KERN_SUCCESS)
+ {
+ DBG &debug_state = m_state.context.dbg;
+ if (hw_index < num_hw_points && !IsWatchpointVacant(debug_state, hw_index))
+ {
+ StartTransForHWP();
+
+ // Modify our local copy of the debug state, first.
+ ClearWatchpoint(debug_state, hw_index);
+ // Now disable the watch point in the inferior.
+ kret = SetDBGState(also_set_on_task);
+ DNBLogThreadedIf(LOG_WATCHPOINTS, "DNBArchImplI386::DisableHardwareWatchpoint( %u )",
+ hw_index);
+
+ if (kret == KERN_SUCCESS)
+ return true;
+ else // Revert to the previous debug state voluntarily. The transaction coordinator knows that we have failed.
+ m_state.context.dbg = GetDBGCheckpoint();
+ }
+ }
+ return false;
+}
+
+// Iterate through the debug status register; return the index of the first hit.
+uint32_t
+DNBArchImplI386::GetHardwareWatchpointHit(nub_addr_t &addr)
+{
+ // Read the debug state
+ kern_return_t kret = GetDBGState(true);
+ DNBLogThreadedIf(LOG_WATCHPOINTS, "DNBArchImplI386::GetHardwareWatchpointHit() GetDBGState() => 0x%8.8x.", kret);
+ if (kret == KERN_SUCCESS)
+ {
+ DBG &debug_state = m_state.context.dbg;
+ uint32_t i, num = NumSupportedHardwareWatchpoints();
+ for (i = 0; i < num; ++i)
+ {
+ if (IsWatchpointHit(debug_state, i))
+ {
+ addr = GetWatchAddress(debug_state, i);
+ DNBLogThreadedIf(LOG_WATCHPOINTS,
+ "DNBArchImplI386::GetHardwareWatchpointHit() found => %u (addr = 0x%llx).",
+ i, (uint64_t)addr);
+ return i;
+ }
+ }
+ }
+ return INVALID_NUB_HW_INDEX;
+}
+
+// Set the single step bit in the processor status register.
+kern_return_t
+DNBArchImplI386::EnableHardwareSingleStep (bool enable)
+{
+ if (GetGPRState(false) == KERN_SUCCESS)
+ {
+ const uint32_t trace_bit = 0x100u;
+ if (enable)
+ m_state.context.gpr.__eflags |= trace_bit;
+ else
+ m_state.context.gpr.__eflags &= ~trace_bit;
+ return SetGPRState();
+ }
+ return m_state.GetError(e_regSetGPR, Read);
+}
+
+
+//----------------------------------------------------------------------
+// Register information definitions
+//----------------------------------------------------------------------
+
+#define DEFINE_GPR_PSEUDO_16(reg16,reg32) { e_regSetGPR, gpr_##reg16, #reg16, NULL, Uint, Hex, 2, 0,INVALID_NUB_REGNUM, INVALID_NUB_REGNUM, INVALID_NUB_REGNUM, INVALID_NUB_REGNUM, g_contained_##reg32, g_invalidate_##reg32 }
+#define DEFINE_GPR_PSEUDO_8H(reg8,reg32) { e_regSetGPR, gpr_##reg8 , #reg8 , NULL, Uint, Hex, 1, 1,INVALID_NUB_REGNUM, INVALID_NUB_REGNUM, INVALID_NUB_REGNUM, INVALID_NUB_REGNUM, g_contained_##reg32, g_invalidate_##reg32 }
+#define DEFINE_GPR_PSEUDO_8L(reg8,reg32) { e_regSetGPR, gpr_##reg8 , #reg8 , NULL, Uint, Hex, 1, 0,INVALID_NUB_REGNUM, INVALID_NUB_REGNUM, INVALID_NUB_REGNUM, INVALID_NUB_REGNUM, g_contained_##reg32, g_invalidate_##reg32 }
+
+
+#define GPR_OFFSET(reg) (offsetof (DNBArchImplI386::GPR, __##reg))
+#define FPU_OFFSET(reg) (offsetof (DNBArchImplI386::FPU, __fpu_##reg) + offsetof (DNBArchImplI386::Context, fpu.no_avx))
+#define AVX_OFFSET(reg) (offsetof (DNBArchImplI386::AVX, __fpu_##reg) + offsetof (DNBArchImplI386::Context, fpu.avx))
+#define EXC_OFFSET(reg) (offsetof (DNBArchImplI386::EXC, __##reg) + offsetof (DNBArchImplI386::Context, exc))
+
+#define GPR_SIZE(reg) (sizeof(((DNBArchImplI386::GPR *)NULL)->__##reg))
+#define FPU_SIZE_UINT(reg) (sizeof(((DNBArchImplI386::FPU *)NULL)->__fpu_##reg))
+#define FPU_SIZE_MMST(reg) (sizeof(((DNBArchImplI386::FPU *)NULL)->__fpu_##reg.__mmst_reg))
+#define FPU_SIZE_XMM(reg) (sizeof(((DNBArchImplI386::FPU *)NULL)->__fpu_##reg.__xmm_reg))
+#define FPU_SIZE_YMM(reg) (32)
+#define EXC_SIZE(reg) (sizeof(((DNBArchImplI386::EXC *)NULL)->__##reg))
+
+// This does not accurately identify the location of ymm0...7 in
+// Context.fpu.avx. That is because there is a bunch of padding
+// in Context.fpu.avx that we don't need. Offset macros lay out
+// the register state that Debugserver transmits to the debugger
+// -- not to interpret the thread_get_state info.
+#define AVX_OFFSET_YMM(n) (AVX_OFFSET(xmm7) + FPU_SIZE_XMM(xmm7) + (32 * n))
+
+// These macros will auto define the register name, alt name, register size,
+// register offset, encoding, format and native register. This ensures that
+// the register state structures are defined correctly and have the correct
+// sizes and offsets.
+
+const char * g_contained_eax[] = { "eax", NULL };
+const char * g_contained_ebx[] = { "ebx", NULL };
+const char * g_contained_ecx[] = { "ecx", NULL };
+const char * g_contained_edx[] = { "edx", NULL };
+const char * g_contained_edi[] = { "edi", NULL };
+const char * g_contained_esi[] = { "esi", NULL };
+const char * g_contained_ebp[] = { "ebp", NULL };
+const char * g_contained_esp[] = { "esp", NULL };
+
+const char * g_invalidate_eax[] = { "eax", "ax", "ah", "al", NULL };
+const char * g_invalidate_ebx[] = { "ebx", "bx", "bh", "bl", NULL };
+const char * g_invalidate_ecx[] = { "ecx", "cx", "ch", "cl", NULL };
+const char * g_invalidate_edx[] = { "edx", "dx", "dh", "dl", NULL };
+const char * g_invalidate_edi[] = { "edi", "di", "dil", NULL };
+const char * g_invalidate_esi[] = { "esi", "si", "sil", NULL };
+const char * g_invalidate_ebp[] = { "ebp", "bp", "bpl", NULL };
+const char * g_invalidate_esp[] = { "esp", "sp", "spl", NULL };
+
+// General purpose registers for 64 bit
+const DNBRegisterInfo
+DNBArchImplI386::g_gpr_registers[] =
+{
+{ e_regSetGPR, gpr_eax, "eax" , NULL , Uint, Hex, GPR_SIZE(eax), GPR_OFFSET(eax) , ehframe_eax , dwarf_eax , INVALID_NUB_REGNUM , debugserver_eax , NULL, g_invalidate_eax },
+{ e_regSetGPR, gpr_ebx, "ebx" , NULL , Uint, Hex, GPR_SIZE(ebx), GPR_OFFSET(ebx) , ehframe_ebx , dwarf_ebx , INVALID_NUB_REGNUM , debugserver_ebx , NULL, g_invalidate_ebx },
+{ e_regSetGPR, gpr_ecx, "ecx" , NULL , Uint, Hex, GPR_SIZE(ecx), GPR_OFFSET(ecx) , ehframe_ecx , dwarf_ecx , INVALID_NUB_REGNUM , debugserver_ecx , NULL, g_invalidate_ecx },
+{ e_regSetGPR, gpr_edx, "edx" , NULL , Uint, Hex, GPR_SIZE(edx), GPR_OFFSET(edx) , ehframe_edx , dwarf_edx , INVALID_NUB_REGNUM , debugserver_edx , NULL, g_invalidate_edx },
+{ e_regSetGPR, gpr_edi, "edi" , NULL , Uint, Hex, GPR_SIZE(edi), GPR_OFFSET(edi) , ehframe_edi , dwarf_edi , INVALID_NUB_REGNUM , debugserver_edi , NULL, g_invalidate_edi },
+{ e_regSetGPR, gpr_esi, "esi" , NULL , Uint, Hex, GPR_SIZE(esi), GPR_OFFSET(esi) , ehframe_esi , dwarf_esi , INVALID_NUB_REGNUM , debugserver_esi , NULL, g_invalidate_esi },
+{ e_regSetGPR, gpr_ebp, "ebp" , "fp" , Uint, Hex, GPR_SIZE(ebp), GPR_OFFSET(ebp) , ehframe_ebp , dwarf_ebp , GENERIC_REGNUM_FP , debugserver_ebp , NULL, g_invalidate_ebp },
+{ e_regSetGPR, gpr_esp, "esp" , "sp" , Uint, Hex, GPR_SIZE(esp), GPR_OFFSET(esp) , ehframe_esp , dwarf_esp , GENERIC_REGNUM_SP , debugserver_esp , NULL, g_invalidate_esp },
+{ e_regSetGPR, gpr_ss, "ss" , NULL , Uint, Hex, GPR_SIZE(ss), GPR_OFFSET(ss) , INVALID_NUB_REGNUM, INVALID_NUB_REGNUM, INVALID_NUB_REGNUM , debugserver_ss , NULL, NULL},
+{ e_regSetGPR, gpr_eflags, "eflags", "flags" , Uint, Hex, GPR_SIZE(eflags), GPR_OFFSET(eflags) , ehframe_eflags , dwarf_eflags , GENERIC_REGNUM_FLAGS , debugserver_eflags, NULL, NULL},
+{ e_regSetGPR, gpr_eip, "eip" , "pc" , Uint, Hex, GPR_SIZE(eip), GPR_OFFSET(eip) , ehframe_eip , dwarf_eip , GENERIC_REGNUM_PC , debugserver_eip , NULL, NULL},
+{ e_regSetGPR, gpr_cs, "cs" , NULL , Uint, Hex, GPR_SIZE(cs), GPR_OFFSET(cs) , INVALID_NUB_REGNUM, INVALID_NUB_REGNUM, INVALID_NUB_REGNUM , debugserver_cs , NULL, NULL},
+{ e_regSetGPR, gpr_ds, "ds" , NULL , Uint, Hex, GPR_SIZE(ds), GPR_OFFSET(ds) , INVALID_NUB_REGNUM, INVALID_NUB_REGNUM, INVALID_NUB_REGNUM , debugserver_ds , NULL, NULL},
+{ e_regSetGPR, gpr_es, "es" , NULL , Uint, Hex, GPR_SIZE(es), GPR_OFFSET(es) , INVALID_NUB_REGNUM, INVALID_NUB_REGNUM, INVALID_NUB_REGNUM , debugserver_es , NULL, NULL},
+{ e_regSetGPR, gpr_fs, "fs" , NULL , Uint, Hex, GPR_SIZE(fs), GPR_OFFSET(fs) , INVALID_NUB_REGNUM, INVALID_NUB_REGNUM, INVALID_NUB_REGNUM , debugserver_fs , NULL, NULL},
+{ e_regSetGPR, gpr_gs, "gs" , NULL , Uint, Hex, GPR_SIZE(gs), GPR_OFFSET(gs) , INVALID_NUB_REGNUM, INVALID_NUB_REGNUM, INVALID_NUB_REGNUM , debugserver_gs , NULL, NULL},
+DEFINE_GPR_PSEUDO_16 (ax , eax),
+DEFINE_GPR_PSEUDO_16 (bx , ebx),
+DEFINE_GPR_PSEUDO_16 (cx , ecx),
+DEFINE_GPR_PSEUDO_16 (dx , edx),
+DEFINE_GPR_PSEUDO_16 (di , edi),
+DEFINE_GPR_PSEUDO_16 (si , esi),
+DEFINE_GPR_PSEUDO_16 (bp , ebp),
+DEFINE_GPR_PSEUDO_16 (sp , esp),
+DEFINE_GPR_PSEUDO_8H (ah , eax),
+DEFINE_GPR_PSEUDO_8H (bh , ebx),
+DEFINE_GPR_PSEUDO_8H (ch , ecx),
+DEFINE_GPR_PSEUDO_8H (dh , edx),
+DEFINE_GPR_PSEUDO_8L (al , eax),
+DEFINE_GPR_PSEUDO_8L (bl , ebx),
+DEFINE_GPR_PSEUDO_8L (cl , ecx),
+DEFINE_GPR_PSEUDO_8L (dl , edx),
+DEFINE_GPR_PSEUDO_8L (dil, edi),
+DEFINE_GPR_PSEUDO_8L (sil, esi),
+DEFINE_GPR_PSEUDO_8L (bpl, ebp),
+DEFINE_GPR_PSEUDO_8L (spl, esp)
+};
+
+
+const DNBRegisterInfo
+DNBArchImplI386::g_fpu_registers_no_avx[] =
+{
+{ e_regSetFPU, fpu_fcw , "fctrl" , NULL, Uint, Hex, FPU_SIZE_UINT(fcw) , FPU_OFFSET(fcw) , INVALID_NUB_REGNUM, INVALID_NUB_REGNUM, INVALID_NUB_REGNUM, INVALID_NUB_REGNUM, NULL, NULL },
+{ e_regSetFPU, fpu_fsw , "fstat" , NULL, Uint, Hex, FPU_SIZE_UINT(fsw) , FPU_OFFSET(fsw) , INVALID_NUB_REGNUM, INVALID_NUB_REGNUM, INVALID_NUB_REGNUM, INVALID_NUB_REGNUM, NULL, NULL },
+{ e_regSetFPU, fpu_ftw , "ftag" , NULL, Uint, Hex, FPU_SIZE_UINT(ftw) , FPU_OFFSET(ftw) , INVALID_NUB_REGNUM, INVALID_NUB_REGNUM, INVALID_NUB_REGNUM, INVALID_NUB_REGNUM, NULL, NULL },
+{ e_regSetFPU, fpu_fop , "fop" , NULL, Uint, Hex, FPU_SIZE_UINT(fop) , FPU_OFFSET(fop) , INVALID_NUB_REGNUM, INVALID_NUB_REGNUM, INVALID_NUB_REGNUM, INVALID_NUB_REGNUM, NULL, NULL },
+{ e_regSetFPU, fpu_ip , "fioff" , NULL, Uint, Hex, FPU_SIZE_UINT(ip) , FPU_OFFSET(ip) , INVALID_NUB_REGNUM, INVALID_NUB_REGNUM, INVALID_NUB_REGNUM, INVALID_NUB_REGNUM, NULL, NULL },
+{ e_regSetFPU, fpu_cs , "fiseg" , NULL, Uint, Hex, FPU_SIZE_UINT(cs) , FPU_OFFSET(cs) , INVALID_NUB_REGNUM, INVALID_NUB_REGNUM, INVALID_NUB_REGNUM, INVALID_NUB_REGNUM, NULL, NULL },
+{ e_regSetFPU, fpu_dp , "fooff" , NULL, Uint, Hex, FPU_SIZE_UINT(dp) , FPU_OFFSET(dp) , INVALID_NUB_REGNUM, INVALID_NUB_REGNUM, INVALID_NUB_REGNUM, INVALID_NUB_REGNUM, NULL, NULL },
+{ e_regSetFPU, fpu_ds , "foseg" , NULL, Uint, Hex, FPU_SIZE_UINT(ds) , FPU_OFFSET(ds) , INVALID_NUB_REGNUM, INVALID_NUB_REGNUM, INVALID_NUB_REGNUM, INVALID_NUB_REGNUM, NULL, NULL },
+{ e_regSetFPU, fpu_mxcsr , "mxcsr" , NULL, Uint, Hex, FPU_SIZE_UINT(mxcsr) , FPU_OFFSET(mxcsr) , INVALID_NUB_REGNUM, INVALID_NUB_REGNUM, INVALID_NUB_REGNUM, INVALID_NUB_REGNUM, NULL, NULL },
+{ e_regSetFPU, fpu_mxcsrmask, "mxcsrmask" , NULL, Uint, Hex, FPU_SIZE_UINT(mxcsrmask) , FPU_OFFSET(mxcsrmask) , INVALID_NUB_REGNUM, INVALID_NUB_REGNUM, INVALID_NUB_REGNUM, INVALID_NUB_REGNUM, NULL, NULL },
+
+{ e_regSetFPU, fpu_stmm0, "stmm0", NULL, Vector, VectorOfUInt8, FPU_SIZE_MMST(stmm0), FPU_OFFSET(stmm0), INVALID_NUB_REGNUM, dwarf_stmm0, INVALID_NUB_REGNUM, debugserver_stmm0, NULL, NULL },
+{ e_regSetFPU, fpu_stmm1, "stmm1", NULL, Vector, VectorOfUInt8, FPU_SIZE_MMST(stmm1), FPU_OFFSET(stmm1), INVALID_NUB_REGNUM, dwarf_stmm1, INVALID_NUB_REGNUM, debugserver_stmm1, NULL, NULL },
+{ e_regSetFPU, fpu_stmm2, "stmm2", NULL, Vector, VectorOfUInt8, FPU_SIZE_MMST(stmm2), FPU_OFFSET(stmm2), INVALID_NUB_REGNUM, dwarf_stmm2, INVALID_NUB_REGNUM, debugserver_stmm2, NULL, NULL },
+{ e_regSetFPU, fpu_stmm3, "stmm3", NULL, Vector, VectorOfUInt8, FPU_SIZE_MMST(stmm3), FPU_OFFSET(stmm3), INVALID_NUB_REGNUM, dwarf_stmm3, INVALID_NUB_REGNUM, debugserver_stmm3, NULL, NULL },
+{ e_regSetFPU, fpu_stmm4, "stmm4", NULL, Vector, VectorOfUInt8, FPU_SIZE_MMST(stmm4), FPU_OFFSET(stmm4), INVALID_NUB_REGNUM, dwarf_stmm4, INVALID_NUB_REGNUM, debugserver_stmm4, NULL, NULL },
+{ e_regSetFPU, fpu_stmm5, "stmm5", NULL, Vector, VectorOfUInt8, FPU_SIZE_MMST(stmm5), FPU_OFFSET(stmm5), INVALID_NUB_REGNUM, dwarf_stmm5, INVALID_NUB_REGNUM, debugserver_stmm5, NULL, NULL },
+{ e_regSetFPU, fpu_stmm6, "stmm6", NULL, Vector, VectorOfUInt8, FPU_SIZE_MMST(stmm6), FPU_OFFSET(stmm6), INVALID_NUB_REGNUM, dwarf_stmm6, INVALID_NUB_REGNUM, debugserver_stmm6, NULL, NULL },
+{ e_regSetFPU, fpu_stmm7, "stmm7", NULL, Vector, VectorOfUInt8, FPU_SIZE_MMST(stmm7), FPU_OFFSET(stmm7), INVALID_NUB_REGNUM, dwarf_stmm7, INVALID_NUB_REGNUM, debugserver_stmm7, NULL, NULL },
+
+{ e_regSetFPU, fpu_xmm0, "xmm0", NULL, Vector, VectorOfUInt8, FPU_SIZE_XMM(xmm0), FPU_OFFSET(xmm0), INVALID_NUB_REGNUM, dwarf_xmm0, INVALID_NUB_REGNUM, debugserver_xmm0, NULL, NULL },
+{ e_regSetFPU, fpu_xmm1, "xmm1", NULL, Vector, VectorOfUInt8, FPU_SIZE_XMM(xmm1), FPU_OFFSET(xmm1), INVALID_NUB_REGNUM, dwarf_xmm1, INVALID_NUB_REGNUM, debugserver_xmm1, NULL, NULL },
+{ e_regSetFPU, fpu_xmm2, "xmm2", NULL, Vector, VectorOfUInt8, FPU_SIZE_XMM(xmm2), FPU_OFFSET(xmm2), INVALID_NUB_REGNUM, dwarf_xmm2, INVALID_NUB_REGNUM, debugserver_xmm2, NULL, NULL },
+{ e_regSetFPU, fpu_xmm3, "xmm3", NULL, Vector, VectorOfUInt8, FPU_SIZE_XMM(xmm3), FPU_OFFSET(xmm3), INVALID_NUB_REGNUM, dwarf_xmm3, INVALID_NUB_REGNUM, debugserver_xmm3, NULL, NULL },
+{ e_regSetFPU, fpu_xmm4, "xmm4", NULL, Vector, VectorOfUInt8, FPU_SIZE_XMM(xmm4), FPU_OFFSET(xmm4), INVALID_NUB_REGNUM, dwarf_xmm4, INVALID_NUB_REGNUM, debugserver_xmm4, NULL, NULL },
+{ e_regSetFPU, fpu_xmm5, "xmm5", NULL, Vector, VectorOfUInt8, FPU_SIZE_XMM(xmm5), FPU_OFFSET(xmm5), INVALID_NUB_REGNUM, dwarf_xmm5, INVALID_NUB_REGNUM, debugserver_xmm5, NULL, NULL },
+{ e_regSetFPU, fpu_xmm6, "xmm6", NULL, Vector, VectorOfUInt8, FPU_SIZE_XMM(xmm6), FPU_OFFSET(xmm6), INVALID_NUB_REGNUM, dwarf_xmm6, INVALID_NUB_REGNUM, debugserver_xmm6, NULL, NULL },
+{ e_regSetFPU, fpu_xmm7, "xmm7", NULL, Vector, VectorOfUInt8, FPU_SIZE_XMM(xmm7), FPU_OFFSET(xmm7), INVALID_NUB_REGNUM, dwarf_xmm7, INVALID_NUB_REGNUM, debugserver_xmm7, NULL, NULL }
+};
+
+
+static const char *g_contained_ymm0 [] = { "ymm0", NULL };
+static const char *g_contained_ymm1 [] = { "ymm1", NULL };
+static const char *g_contained_ymm2 [] = { "ymm2", NULL };
+static const char *g_contained_ymm3 [] = { "ymm3", NULL };
+static const char *g_contained_ymm4 [] = { "ymm4", NULL };
+static const char *g_contained_ymm5 [] = { "ymm5", NULL };
+static const char *g_contained_ymm6 [] = { "ymm6", NULL };
+static const char *g_contained_ymm7 [] = { "ymm7", NULL };
+
+
+const DNBRegisterInfo
+DNBArchImplI386::g_fpu_registers_avx[] =
+{
+{ e_regSetFPU, fpu_fcw , "fctrl" , NULL, Uint, Hex, FPU_SIZE_UINT(fcw) , AVX_OFFSET(fcw) , INVALID_NUB_REGNUM, INVALID_NUB_REGNUM, INVALID_NUB_REGNUM, INVALID_NUB_REGNUM, NULL, NULL },
+{ e_regSetFPU, fpu_fsw , "fstat" , NULL, Uint, Hex, FPU_SIZE_UINT(fsw) , AVX_OFFSET(fsw) , INVALID_NUB_REGNUM, INVALID_NUB_REGNUM, INVALID_NUB_REGNUM, INVALID_NUB_REGNUM, NULL, NULL },
+{ e_regSetFPU, fpu_ftw , "ftag" , NULL, Uint, Hex, FPU_SIZE_UINT(ftw) , AVX_OFFSET(ftw) , INVALID_NUB_REGNUM, INVALID_NUB_REGNUM, INVALID_NUB_REGNUM, INVALID_NUB_REGNUM, NULL, NULL },
+{ e_regSetFPU, fpu_fop , "fop" , NULL, Uint, Hex, FPU_SIZE_UINT(fop) , AVX_OFFSET(fop) , INVALID_NUB_REGNUM, INVALID_NUB_REGNUM, INVALID_NUB_REGNUM, INVALID_NUB_REGNUM, NULL, NULL },
+{ e_regSetFPU, fpu_ip , "fioff" , NULL, Uint, Hex, FPU_SIZE_UINT(ip) , AVX_OFFSET(ip) , INVALID_NUB_REGNUM, INVALID_NUB_REGNUM, INVALID_NUB_REGNUM, INVALID_NUB_REGNUM, NULL, NULL },
+{ e_regSetFPU, fpu_cs , "fiseg" , NULL, Uint, Hex, FPU_SIZE_UINT(cs) , AVX_OFFSET(cs) , INVALID_NUB_REGNUM, INVALID_NUB_REGNUM, INVALID_NUB_REGNUM, INVALID_NUB_REGNUM, NULL, NULL },
+{ e_regSetFPU, fpu_dp , "fooff" , NULL, Uint, Hex, FPU_SIZE_UINT(dp) , AVX_OFFSET(dp) , INVALID_NUB_REGNUM, INVALID_NUB_REGNUM, INVALID_NUB_REGNUM, INVALID_NUB_REGNUM, NULL, NULL },
+{ e_regSetFPU, fpu_ds , "foseg" , NULL, Uint, Hex, FPU_SIZE_UINT(ds) , AVX_OFFSET(ds) , INVALID_NUB_REGNUM, INVALID_NUB_REGNUM, INVALID_NUB_REGNUM, INVALID_NUB_REGNUM, NULL, NULL },
+{ e_regSetFPU, fpu_mxcsr , "mxcsr" , NULL, Uint, Hex, FPU_SIZE_UINT(mxcsr) , AVX_OFFSET(mxcsr) , INVALID_NUB_REGNUM, INVALID_NUB_REGNUM, INVALID_NUB_REGNUM, INVALID_NUB_REGNUM, NULL, NULL },
+{ e_regSetFPU, fpu_mxcsrmask, "mxcsrmask" , NULL, Uint, Hex, FPU_SIZE_UINT(mxcsrmask) , AVX_OFFSET(mxcsrmask) , INVALID_NUB_REGNUM, INVALID_NUB_REGNUM, INVALID_NUB_REGNUM, INVALID_NUB_REGNUM, NULL, NULL },
+
+{ e_regSetFPU, fpu_stmm0, "stmm0", NULL, Vector, VectorOfUInt8, FPU_SIZE_MMST(stmm0), AVX_OFFSET(stmm0), INVALID_NUB_REGNUM, dwarf_stmm0, INVALID_NUB_REGNUM, debugserver_stmm0, NULL, NULL },
+{ e_regSetFPU, fpu_stmm1, "stmm1", NULL, Vector, VectorOfUInt8, FPU_SIZE_MMST(stmm1), AVX_OFFSET(stmm1), INVALID_NUB_REGNUM, dwarf_stmm1, INVALID_NUB_REGNUM, debugserver_stmm1, NULL, NULL },
+{ e_regSetFPU, fpu_stmm2, "stmm2", NULL, Vector, VectorOfUInt8, FPU_SIZE_MMST(stmm2), AVX_OFFSET(stmm2), INVALID_NUB_REGNUM, dwarf_stmm2, INVALID_NUB_REGNUM, debugserver_stmm2, NULL, NULL },
+{ e_regSetFPU, fpu_stmm3, "stmm3", NULL, Vector, VectorOfUInt8, FPU_SIZE_MMST(stmm3), AVX_OFFSET(stmm3), INVALID_NUB_REGNUM, dwarf_stmm3, INVALID_NUB_REGNUM, debugserver_stmm3, NULL, NULL },
+{ e_regSetFPU, fpu_stmm4, "stmm4", NULL, Vector, VectorOfUInt8, FPU_SIZE_MMST(stmm4), AVX_OFFSET(stmm4), INVALID_NUB_REGNUM, dwarf_stmm4, INVALID_NUB_REGNUM, debugserver_stmm4, NULL, NULL },
+{ e_regSetFPU, fpu_stmm5, "stmm5", NULL, Vector, VectorOfUInt8, FPU_SIZE_MMST(stmm5), AVX_OFFSET(stmm5), INVALID_NUB_REGNUM, dwarf_stmm5, INVALID_NUB_REGNUM, debugserver_stmm5, NULL, NULL },
+{ e_regSetFPU, fpu_stmm6, "stmm6", NULL, Vector, VectorOfUInt8, FPU_SIZE_MMST(stmm6), AVX_OFFSET(stmm6), INVALID_NUB_REGNUM, dwarf_stmm6, INVALID_NUB_REGNUM, debugserver_stmm6, NULL, NULL },
+{ e_regSetFPU, fpu_stmm7, "stmm7", NULL, Vector, VectorOfUInt8, FPU_SIZE_MMST(stmm7), AVX_OFFSET(stmm7), INVALID_NUB_REGNUM, dwarf_stmm7, INVALID_NUB_REGNUM, debugserver_stmm7, NULL, NULL },
+
+{ e_regSetFPU, fpu_ymm0, "ymm0", NULL, Vector, VectorOfUInt8, FPU_SIZE_YMM(ymm0), AVX_OFFSET_YMM(0), INVALID_NUB_REGNUM, dwarf_ymm0, INVALID_NUB_REGNUM, debugserver_ymm0, NULL, NULL },
+{ e_regSetFPU, fpu_ymm1, "ymm1", NULL, Vector, VectorOfUInt8, FPU_SIZE_YMM(ymm1), AVX_OFFSET_YMM(1), INVALID_NUB_REGNUM, dwarf_ymm1, INVALID_NUB_REGNUM, debugserver_ymm1, NULL, NULL },
+{ e_regSetFPU, fpu_ymm2, "ymm2", NULL, Vector, VectorOfUInt8, FPU_SIZE_YMM(ymm2), AVX_OFFSET_YMM(2), INVALID_NUB_REGNUM, dwarf_ymm2, INVALID_NUB_REGNUM, debugserver_ymm2, NULL, NULL },
+{ e_regSetFPU, fpu_ymm3, "ymm3", NULL, Vector, VectorOfUInt8, FPU_SIZE_YMM(ymm3), AVX_OFFSET_YMM(3), INVALID_NUB_REGNUM, dwarf_ymm3, INVALID_NUB_REGNUM, debugserver_ymm3, NULL, NULL },
+{ e_regSetFPU, fpu_ymm4, "ymm4", NULL, Vector, VectorOfUInt8, FPU_SIZE_YMM(ymm4), AVX_OFFSET_YMM(4), INVALID_NUB_REGNUM, dwarf_ymm4, INVALID_NUB_REGNUM, debugserver_ymm4, NULL, NULL },
+{ e_regSetFPU, fpu_ymm5, "ymm5", NULL, Vector, VectorOfUInt8, FPU_SIZE_YMM(ymm5), AVX_OFFSET_YMM(5), INVALID_NUB_REGNUM, dwarf_ymm5, INVALID_NUB_REGNUM, debugserver_ymm5, NULL, NULL },
+{ e_regSetFPU, fpu_ymm6, "ymm6", NULL, Vector, VectorOfUInt8, FPU_SIZE_YMM(ymm6), AVX_OFFSET_YMM(6), INVALID_NUB_REGNUM, dwarf_ymm6, INVALID_NUB_REGNUM, debugserver_ymm6, NULL, NULL },
+{ e_regSetFPU, fpu_ymm7, "ymm7", NULL, Vector, VectorOfUInt8, FPU_SIZE_YMM(ymm7), AVX_OFFSET_YMM(7), INVALID_NUB_REGNUM, dwarf_ymm7, INVALID_NUB_REGNUM, debugserver_ymm7, NULL, NULL },
+
+{ e_regSetFPU, fpu_xmm0, "xmm0", NULL, Vector, VectorOfUInt8, FPU_SIZE_XMM(xmm0), 0, INVALID_NUB_REGNUM, dwarf_xmm0, INVALID_NUB_REGNUM, debugserver_xmm0, g_contained_ymm0, NULL },
+{ e_regSetFPU, fpu_xmm1, "xmm1", NULL, Vector, VectorOfUInt8, FPU_SIZE_XMM(xmm1), 0, INVALID_NUB_REGNUM, dwarf_xmm1, INVALID_NUB_REGNUM, debugserver_xmm1, g_contained_ymm1, NULL },
+{ e_regSetFPU, fpu_xmm2, "xmm2", NULL, Vector, VectorOfUInt8, FPU_SIZE_XMM(xmm2), 0, INVALID_NUB_REGNUM, dwarf_xmm2, INVALID_NUB_REGNUM, debugserver_xmm2, g_contained_ymm2, NULL },
+{ e_regSetFPU, fpu_xmm3, "xmm3", NULL, Vector, VectorOfUInt8, FPU_SIZE_XMM(xmm3), 0, INVALID_NUB_REGNUM, dwarf_xmm3, INVALID_NUB_REGNUM, debugserver_xmm3, g_contained_ymm3, NULL },
+{ e_regSetFPU, fpu_xmm4, "xmm4", NULL, Vector, VectorOfUInt8, FPU_SIZE_XMM(xmm4), 0, INVALID_NUB_REGNUM, dwarf_xmm4, INVALID_NUB_REGNUM, debugserver_xmm4, g_contained_ymm4, NULL },
+{ e_regSetFPU, fpu_xmm5, "xmm5", NULL, Vector, VectorOfUInt8, FPU_SIZE_XMM(xmm5), 0, INVALID_NUB_REGNUM, dwarf_xmm5, INVALID_NUB_REGNUM, debugserver_xmm5, g_contained_ymm5, NULL },
+{ e_regSetFPU, fpu_xmm6, "xmm6", NULL, Vector, VectorOfUInt8, FPU_SIZE_XMM(xmm6), 0, INVALID_NUB_REGNUM, dwarf_xmm6, INVALID_NUB_REGNUM, debugserver_xmm6, g_contained_ymm6, NULL },
+{ e_regSetFPU, fpu_xmm7, "xmm7", NULL, Vector, VectorOfUInt8, FPU_SIZE_XMM(xmm7), 0, INVALID_NUB_REGNUM, dwarf_xmm7, INVALID_NUB_REGNUM, debugserver_xmm7, g_contained_ymm7, NULL },
+
+};
+
+const DNBRegisterInfo
+DNBArchImplI386::g_exc_registers[] =
+{
+{ e_regSetEXC, exc_trapno, "trapno" , NULL, Uint, Hex, EXC_SIZE (trapno) , EXC_OFFSET (trapno) , INVALID_NUB_REGNUM, INVALID_NUB_REGNUM, INVALID_NUB_REGNUM, INVALID_NUB_REGNUM, NULL, NULL },
+{ e_regSetEXC, exc_err, "err" , NULL, Uint, Hex, EXC_SIZE (err) , EXC_OFFSET (err) , INVALID_NUB_REGNUM, INVALID_NUB_REGNUM, INVALID_NUB_REGNUM, INVALID_NUB_REGNUM, NULL, NULL },
+{ e_regSetEXC, exc_faultvaddr, "faultvaddr", NULL, Uint, Hex, EXC_SIZE (faultvaddr), EXC_OFFSET (faultvaddr) , INVALID_NUB_REGNUM, INVALID_NUB_REGNUM, INVALID_NUB_REGNUM, INVALID_NUB_REGNUM, NULL, NULL }
+};
+
+// Number of registers in each register set
+const size_t DNBArchImplI386::k_num_gpr_registers = sizeof(g_gpr_registers)/sizeof(DNBRegisterInfo);
+const size_t DNBArchImplI386::k_num_fpu_registers_no_avx = sizeof(g_fpu_registers_no_avx)/sizeof(DNBRegisterInfo);
+const size_t DNBArchImplI386::k_num_fpu_registers_avx = sizeof(g_fpu_registers_avx)/sizeof(DNBRegisterInfo);
+const size_t DNBArchImplI386::k_num_exc_registers = sizeof(g_exc_registers)/sizeof(DNBRegisterInfo);
+const size_t DNBArchImplI386::k_num_all_registers_no_avx = k_num_gpr_registers + k_num_fpu_registers_no_avx + k_num_exc_registers;
+const size_t DNBArchImplI386::k_num_all_registers_avx = k_num_gpr_registers + k_num_fpu_registers_avx + k_num_exc_registers;
+
+//----------------------------------------------------------------------
+// Register set definitions. The first definitions at register set index
+// of zero is for all registers, followed by other registers sets. The
+// register information for the all register set need not be filled in.
+//----------------------------------------------------------------------
+const DNBRegisterSetInfo
+DNBArchImplI386::g_reg_sets_no_avx[] =
+{
+ { "i386 Registers", NULL, k_num_all_registers_no_avx },
+ { "General Purpose Registers", g_gpr_registers, k_num_gpr_registers },
+ { "Floating Point Registers", g_fpu_registers_no_avx, k_num_fpu_registers_no_avx },
+ { "Exception State Registers", g_exc_registers, k_num_exc_registers }
+};
+
+const DNBRegisterSetInfo
+DNBArchImplI386::g_reg_sets_avx[] =
+{
+ { "i386 Registers", NULL, k_num_all_registers_avx },
+ { "General Purpose Registers", g_gpr_registers, k_num_gpr_registers },
+ { "Floating Point Registers", g_fpu_registers_avx, k_num_fpu_registers_avx },
+ { "Exception State Registers", g_exc_registers, k_num_exc_registers }
+};
+
+// Total number of register sets for this architecture
+const size_t DNBArchImplI386::k_num_register_sets = sizeof(g_reg_sets_no_avx)/sizeof(DNBRegisterSetInfo);
+
+DNBArchProtocol *
+DNBArchImplI386::Create (MachThread *thread)
+{
+ DNBArchImplI386 *obj = new DNBArchImplI386 (thread);
+ return obj;
+}
+
+const uint8_t *
+DNBArchImplI386::SoftwareBreakpointOpcode (nub_size_t byte_size)
+{
+ static const uint8_t g_breakpoint_opcode[] = { 0xCC };
+ if (byte_size == 1)
+ return g_breakpoint_opcode;
+ return NULL;
+}
+
+const DNBRegisterSetInfo *
+DNBArchImplI386::GetRegisterSetInfo(nub_size_t *num_reg_sets)
+{
+ *num_reg_sets = k_num_register_sets;
+ if (CPUHasAVX() || FORCE_AVX_REGS)
+ return g_reg_sets_avx;
+ else
+ return g_reg_sets_no_avx;
+}
+
+
+void
+DNBArchImplI386::Initialize()
+{
+ DNBArchPluginInfo arch_plugin_info =
+ {
+ CPU_TYPE_I386,
+ DNBArchImplI386::Create,
+ DNBArchImplI386::GetRegisterSetInfo,
+ DNBArchImplI386::SoftwareBreakpointOpcode
+ };
+
+ // Register this arch plug-in with the main protocol class
+ DNBArchProtocol::RegisterArchPlugin (arch_plugin_info);
+}
+
+bool
+DNBArchImplI386::GetRegisterValue(uint32_t set, uint32_t reg, DNBRegisterValue *value)
+{
+ if (set == REGISTER_SET_GENERIC)
+ {
+ switch (reg)
+ {
+ case GENERIC_REGNUM_PC: // Program Counter
+ set = e_regSetGPR;
+ reg = gpr_eip;
+ break;
+
+ case GENERIC_REGNUM_SP: // Stack Pointer
+ set = e_regSetGPR;
+ reg = gpr_esp;
+ break;
+
+ case GENERIC_REGNUM_FP: // Frame Pointer
+ set = e_regSetGPR;
+ reg = gpr_ebp;
+ break;
+
+ case GENERIC_REGNUM_FLAGS: // Processor flags register
+ set = e_regSetGPR;
+ reg = gpr_eflags;
+ break;
+
+ case GENERIC_REGNUM_RA: // Return Address
+ default:
+ return false;
+ }
+ }
+
+ if (GetRegisterState(set, false) != KERN_SUCCESS)
+ return false;
+
+ const DNBRegisterInfo *regInfo = m_thread->GetRegisterInfo(set, reg);
+ if (regInfo)
+ {
+ value->info = *regInfo;
+ switch (set)
+ {
+ case e_regSetGPR:
+ if (reg < k_num_gpr_registers)
+ {
+ value->value.uint32 = ((uint32_t*)(&m_state.context.gpr))[reg];
+ return true;
+ }
+ break;
+
+ case e_regSetFPU:
+ if (CPUHasAVX() || FORCE_AVX_REGS)
+ {
+ switch (reg)
+ {
+ case fpu_fcw: value->value.uint16 = *((uint16_t *)(&m_state.context.fpu.avx.__fpu_fcw)); return true;
+ case fpu_fsw: value->value.uint16 = *((uint16_t *)(&m_state.context.fpu.avx.__fpu_fsw)); return true;
+ case fpu_ftw: value->value.uint8 = m_state.context.fpu.avx.__fpu_ftw; return true;
+ case fpu_fop: value->value.uint16 = m_state.context.fpu.avx.__fpu_fop; return true;
+ case fpu_ip: value->value.uint32 = m_state.context.fpu.avx.__fpu_ip; return true;
+ case fpu_cs: value->value.uint16 = m_state.context.fpu.avx.__fpu_cs; return true;
+ case fpu_dp: value->value.uint32 = m_state.context.fpu.avx.__fpu_dp; return true;
+ case fpu_ds: value->value.uint16 = m_state.context.fpu.avx.__fpu_ds; return true;
+ case fpu_mxcsr: value->value.uint32 = m_state.context.fpu.avx.__fpu_mxcsr; return true;
+ case fpu_mxcsrmask: value->value.uint32 = m_state.context.fpu.avx.__fpu_mxcsrmask; return true;
+
+ case fpu_stmm0: memcpy(&value->value.uint8, m_state.context.fpu.avx.__fpu_stmm0.__mmst_reg, 10); return true;
+ case fpu_stmm1: memcpy(&value->value.uint8, m_state.context.fpu.avx.__fpu_stmm1.__mmst_reg, 10); return true;
+ case fpu_stmm2: memcpy(&value->value.uint8, m_state.context.fpu.avx.__fpu_stmm2.__mmst_reg, 10); return true;
+ case fpu_stmm3: memcpy(&value->value.uint8, m_state.context.fpu.avx.__fpu_stmm3.__mmst_reg, 10); return true;
+ case fpu_stmm4: memcpy(&value->value.uint8, m_state.context.fpu.avx.__fpu_stmm4.__mmst_reg, 10); return true;
+ case fpu_stmm5: memcpy(&value->value.uint8, m_state.context.fpu.avx.__fpu_stmm5.__mmst_reg, 10); return true;
+ case fpu_stmm6: memcpy(&value->value.uint8, m_state.context.fpu.avx.__fpu_stmm6.__mmst_reg, 10); return true;
+ case fpu_stmm7: memcpy(&value->value.uint8, m_state.context.fpu.avx.__fpu_stmm7.__mmst_reg, 10); return true;
+
+ case fpu_xmm0: memcpy(&value->value.uint8, m_state.context.fpu.avx.__fpu_xmm0.__xmm_reg, 16); return true;
+ case fpu_xmm1: memcpy(&value->value.uint8, m_state.context.fpu.avx.__fpu_xmm1.__xmm_reg, 16); return true;
+ case fpu_xmm2: memcpy(&value->value.uint8, m_state.context.fpu.avx.__fpu_xmm2.__xmm_reg, 16); return true;
+ case fpu_xmm3: memcpy(&value->value.uint8, m_state.context.fpu.avx.__fpu_xmm3.__xmm_reg, 16); return true;
+ case fpu_xmm4: memcpy(&value->value.uint8, m_state.context.fpu.avx.__fpu_xmm4.__xmm_reg, 16); return true;
+ case fpu_xmm5: memcpy(&value->value.uint8, m_state.context.fpu.avx.__fpu_xmm5.__xmm_reg, 16); return true;
+ case fpu_xmm6: memcpy(&value->value.uint8, m_state.context.fpu.avx.__fpu_xmm6.__xmm_reg, 16); return true;
+ case fpu_xmm7: memcpy(&value->value.uint8, m_state.context.fpu.avx.__fpu_xmm7.__xmm_reg, 16); return true;
+
+#define MEMCPY_YMM(n) \
+ memcpy(&value->value.uint8, m_state.context.fpu.avx.__fpu_xmm##n.__xmm_reg, 16); \
+ memcpy((&value->value.uint8) + 16, m_state.context.fpu.avx.__fpu_ymmh##n.__xmm_reg, 16);
+ case fpu_ymm0: MEMCPY_YMM(0); return true;
+ case fpu_ymm1: MEMCPY_YMM(1); return true;
+ case fpu_ymm2: MEMCPY_YMM(2); return true;
+ case fpu_ymm3: MEMCPY_YMM(3); return true;
+ case fpu_ymm4: MEMCPY_YMM(4); return true;
+ case fpu_ymm5: MEMCPY_YMM(5); return true;
+ case fpu_ymm6: MEMCPY_YMM(6); return true;
+ case fpu_ymm7: MEMCPY_YMM(7); return true;
+#undef MEMCPY_YMM
+ }
+ }
+ else
+ {
+ switch (reg)
+ {
+ case fpu_fcw: value->value.uint16 = *((uint16_t *)(&m_state.context.fpu.no_avx.__fpu_fcw)); return true;
+ case fpu_fsw: value->value.uint16 = *((uint16_t *)(&m_state.context.fpu.no_avx.__fpu_fsw)); return true;
+ case fpu_ftw: value->value.uint8 = m_state.context.fpu.no_avx.__fpu_ftw; return true;
+ case fpu_fop: value->value.uint16 = m_state.context.fpu.no_avx.__fpu_fop; return true;
+ case fpu_ip: value->value.uint32 = m_state.context.fpu.no_avx.__fpu_ip; return true;
+ case fpu_cs: value->value.uint16 = m_state.context.fpu.no_avx.__fpu_cs; return true;
+ case fpu_dp: value->value.uint32 = m_state.context.fpu.no_avx.__fpu_dp; return true;
+ case fpu_ds: value->value.uint16 = m_state.context.fpu.no_avx.__fpu_ds; return true;
+ case fpu_mxcsr: value->value.uint32 = m_state.context.fpu.no_avx.__fpu_mxcsr; return true;
+ case fpu_mxcsrmask: value->value.uint32 = m_state.context.fpu.no_avx.__fpu_mxcsrmask; return true;
+
+ case fpu_stmm0: memcpy(&value->value.uint8, m_state.context.fpu.no_avx.__fpu_stmm0.__mmst_reg, 10); return true;
+ case fpu_stmm1: memcpy(&value->value.uint8, m_state.context.fpu.no_avx.__fpu_stmm1.__mmst_reg, 10); return true;
+ case fpu_stmm2: memcpy(&value->value.uint8, m_state.context.fpu.no_avx.__fpu_stmm2.__mmst_reg, 10); return true;
+ case fpu_stmm3: memcpy(&value->value.uint8, m_state.context.fpu.no_avx.__fpu_stmm3.__mmst_reg, 10); return true;
+ case fpu_stmm4: memcpy(&value->value.uint8, m_state.context.fpu.no_avx.__fpu_stmm4.__mmst_reg, 10); return true;
+ case fpu_stmm5: memcpy(&value->value.uint8, m_state.context.fpu.no_avx.__fpu_stmm5.__mmst_reg, 10); return true;
+ case fpu_stmm6: memcpy(&value->value.uint8, m_state.context.fpu.no_avx.__fpu_stmm6.__mmst_reg, 10); return true;
+ case fpu_stmm7: memcpy(&value->value.uint8, m_state.context.fpu.no_avx.__fpu_stmm7.__mmst_reg, 10); return true;
+
+ case fpu_xmm0: memcpy(&value->value.uint8, m_state.context.fpu.no_avx.__fpu_xmm0.__xmm_reg, 16); return true;
+ case fpu_xmm1: memcpy(&value->value.uint8, m_state.context.fpu.no_avx.__fpu_xmm1.__xmm_reg, 16); return true;
+ case fpu_xmm2: memcpy(&value->value.uint8, m_state.context.fpu.no_avx.__fpu_xmm2.__xmm_reg, 16); return true;
+ case fpu_xmm3: memcpy(&value->value.uint8, m_state.context.fpu.no_avx.__fpu_xmm3.__xmm_reg, 16); return true;
+ case fpu_xmm4: memcpy(&value->value.uint8, m_state.context.fpu.no_avx.__fpu_xmm4.__xmm_reg, 16); return true;
+ case fpu_xmm5: memcpy(&value->value.uint8, m_state.context.fpu.no_avx.__fpu_xmm5.__xmm_reg, 16); return true;
+ case fpu_xmm6: memcpy(&value->value.uint8, m_state.context.fpu.no_avx.__fpu_xmm6.__xmm_reg, 16); return true;
+ case fpu_xmm7: memcpy(&value->value.uint8, m_state.context.fpu.no_avx.__fpu_xmm7.__xmm_reg, 16); return true;
+ }
+ }
+ break;
+
+ case e_regSetEXC:
+ if (reg < k_num_exc_registers)
+ {
+ value->value.uint32 = (&m_state.context.exc.__trapno)[reg];
+ return true;
+ }
+ break;
+ }
+ }
+ return false;
+}
+
+
+bool
+DNBArchImplI386::SetRegisterValue(uint32_t set, uint32_t reg, const DNBRegisterValue *value)
+{
+ if (set == REGISTER_SET_GENERIC)
+ {
+ switch (reg)
+ {
+ case GENERIC_REGNUM_PC: // Program Counter
+ set = e_regSetGPR;
+ reg = gpr_eip;
+ break;
+
+ case GENERIC_REGNUM_SP: // Stack Pointer
+ set = e_regSetGPR;
+ reg = gpr_esp;
+ break;
+
+ case GENERIC_REGNUM_FP: // Frame Pointer
+ set = e_regSetGPR;
+ reg = gpr_ebp;
+ break;
+
+ case GENERIC_REGNUM_FLAGS: // Processor flags register
+ set = e_regSetGPR;
+ reg = gpr_eflags;
+ break;
+
+ case GENERIC_REGNUM_RA: // Return Address
+ default:
+ return false;
+ }
+ }
+
+ if (GetRegisterState(set, false) != KERN_SUCCESS)
+ return false;
+
+ bool success = false;
+ const DNBRegisterInfo *regInfo = m_thread->GetRegisterInfo(set, reg);
+ if (regInfo)
+ {
+ switch (set)
+ {
+ case e_regSetGPR:
+ if (reg < k_num_gpr_registers)
+ {
+ ((uint32_t*)(&m_state.context.gpr))[reg] = value->value.uint32;
+ success = true;
+ }
+ break;
+
+ case e_regSetFPU:
+ if (CPUHasAVX() || FORCE_AVX_REGS)
+ {
+ switch (reg)
+ {
+ case fpu_fcw: *((uint16_t *)(&m_state.context.fpu.avx.__fpu_fcw)) = value->value.uint16; success = true; break;
+ case fpu_fsw: *((uint16_t *)(&m_state.context.fpu.avx.__fpu_fsw)) = value->value.uint16; success = true; break;
+ case fpu_ftw: m_state.context.fpu.avx.__fpu_ftw = value->value.uint8; success = true; break;
+ case fpu_fop: m_state.context.fpu.avx.__fpu_fop = value->value.uint16; success = true; break;
+ case fpu_ip: m_state.context.fpu.avx.__fpu_ip = value->value.uint32; success = true; break;
+ case fpu_cs: m_state.context.fpu.avx.__fpu_cs = value->value.uint16; success = true; break;
+ case fpu_dp: m_state.context.fpu.avx.__fpu_dp = value->value.uint32; success = true; break;
+ case fpu_ds: m_state.context.fpu.avx.__fpu_ds = value->value.uint16; success = true; break;
+ case fpu_mxcsr: m_state.context.fpu.avx.__fpu_mxcsr = value->value.uint32; success = true; break;
+ case fpu_mxcsrmask: m_state.context.fpu.avx.__fpu_mxcsrmask = value->value.uint32; success = true; break;
+
+ case fpu_stmm0: memcpy (m_state.context.fpu.avx.__fpu_stmm0.__mmst_reg, &value->value.uint8, 10); success = true; break;
+ case fpu_stmm1: memcpy (m_state.context.fpu.avx.__fpu_stmm1.__mmst_reg, &value->value.uint8, 10); success = true; break;
+ case fpu_stmm2: memcpy (m_state.context.fpu.avx.__fpu_stmm2.__mmst_reg, &value->value.uint8, 10); success = true; break;
+ case fpu_stmm3: memcpy (m_state.context.fpu.avx.__fpu_stmm3.__mmst_reg, &value->value.uint8, 10); success = true; break;
+ case fpu_stmm4: memcpy (m_state.context.fpu.avx.__fpu_stmm4.__mmst_reg, &value->value.uint8, 10); success = true; break;
+ case fpu_stmm5: memcpy (m_state.context.fpu.avx.__fpu_stmm5.__mmst_reg, &value->value.uint8, 10); success = true; break;
+ case fpu_stmm6: memcpy (m_state.context.fpu.avx.__fpu_stmm6.__mmst_reg, &value->value.uint8, 10); success = true; break;
+ case fpu_stmm7: memcpy (m_state.context.fpu.avx.__fpu_stmm7.__mmst_reg, &value->value.uint8, 10); success = true; break;
+
+ case fpu_xmm0: memcpy(m_state.context.fpu.avx.__fpu_xmm0.__xmm_reg, &value->value.uint8, 16); success = true; break;
+ case fpu_xmm1: memcpy(m_state.context.fpu.avx.__fpu_xmm1.__xmm_reg, &value->value.uint8, 16); success = true; break;
+ case fpu_xmm2: memcpy(m_state.context.fpu.avx.__fpu_xmm2.__xmm_reg, &value->value.uint8, 16); success = true; break;
+ case fpu_xmm3: memcpy(m_state.context.fpu.avx.__fpu_xmm3.__xmm_reg, &value->value.uint8, 16); success = true; break;
+ case fpu_xmm4: memcpy(m_state.context.fpu.avx.__fpu_xmm4.__xmm_reg, &value->value.uint8, 16); success = true; break;
+ case fpu_xmm5: memcpy(m_state.context.fpu.avx.__fpu_xmm5.__xmm_reg, &value->value.uint8, 16); success = true; break;
+ case fpu_xmm6: memcpy(m_state.context.fpu.avx.__fpu_xmm6.__xmm_reg, &value->value.uint8, 16); success = true; break;
+ case fpu_xmm7: memcpy(m_state.context.fpu.avx.__fpu_xmm7.__xmm_reg, &value->value.uint8, 16); success = true; break;
+
+#define MEMCPY_YMM(n) \
+ memcpy(m_state.context.fpu.avx.__fpu_xmm##n.__xmm_reg, &value->value.uint8, 16); \
+ memcpy(m_state.context.fpu.avx.__fpu_ymmh##n.__xmm_reg, (&value->value.uint8) + 16, 16);
+ case fpu_ymm0: MEMCPY_YMM(0); return true;
+ case fpu_ymm1: MEMCPY_YMM(1); return true;
+ case fpu_ymm2: MEMCPY_YMM(2); return true;
+ case fpu_ymm3: MEMCPY_YMM(3); return true;
+ case fpu_ymm4: MEMCPY_YMM(4); return true;
+ case fpu_ymm5: MEMCPY_YMM(5); return true;
+ case fpu_ymm6: MEMCPY_YMM(6); return true;
+ case fpu_ymm7: MEMCPY_YMM(7); return true;
+#undef MEMCPY_YMM
+ }
+ }
+ else
+ {
+ switch (reg)
+ {
+ case fpu_fcw: *((uint16_t *)(&m_state.context.fpu.no_avx.__fpu_fcw)) = value->value.uint16; success = true; break;
+ case fpu_fsw: *((uint16_t *)(&m_state.context.fpu.no_avx.__fpu_fsw)) = value->value.uint16; success = true; break;
+ case fpu_ftw: m_state.context.fpu.no_avx.__fpu_ftw = value->value.uint8; success = true; break;
+ case fpu_fop: m_state.context.fpu.no_avx.__fpu_fop = value->value.uint16; success = true; break;
+ case fpu_ip: m_state.context.fpu.no_avx.__fpu_ip = value->value.uint32; success = true; break;
+ case fpu_cs: m_state.context.fpu.no_avx.__fpu_cs = value->value.uint16; success = true; break;
+ case fpu_dp: m_state.context.fpu.no_avx.__fpu_dp = value->value.uint32; success = true; break;
+ case fpu_ds: m_state.context.fpu.no_avx.__fpu_ds = value->value.uint16; success = true; break;
+ case fpu_mxcsr: m_state.context.fpu.no_avx.__fpu_mxcsr = value->value.uint32; success = true; break;
+ case fpu_mxcsrmask: m_state.context.fpu.no_avx.__fpu_mxcsrmask = value->value.uint32; success = true; break;
+
+ case fpu_stmm0: memcpy (m_state.context.fpu.no_avx.__fpu_stmm0.__mmst_reg, &value->value.uint8, 10); success = true; break;
+ case fpu_stmm1: memcpy (m_state.context.fpu.no_avx.__fpu_stmm1.__mmst_reg, &value->value.uint8, 10); success = true; break;
+ case fpu_stmm2: memcpy (m_state.context.fpu.no_avx.__fpu_stmm2.__mmst_reg, &value->value.uint8, 10); success = true; break;
+ case fpu_stmm3: memcpy (m_state.context.fpu.no_avx.__fpu_stmm3.__mmst_reg, &value->value.uint8, 10); success = true; break;
+ case fpu_stmm4: memcpy (m_state.context.fpu.no_avx.__fpu_stmm4.__mmst_reg, &value->value.uint8, 10); success = true; break;
+ case fpu_stmm5: memcpy (m_state.context.fpu.no_avx.__fpu_stmm5.__mmst_reg, &value->value.uint8, 10); success = true; break;
+ case fpu_stmm6: memcpy (m_state.context.fpu.no_avx.__fpu_stmm6.__mmst_reg, &value->value.uint8, 10); success = true; break;
+ case fpu_stmm7: memcpy (m_state.context.fpu.no_avx.__fpu_stmm7.__mmst_reg, &value->value.uint8, 10); success = true; break;
+
+ case fpu_xmm0: memcpy(m_state.context.fpu.no_avx.__fpu_xmm0.__xmm_reg, &value->value.uint8, 16); success = true; break;
+ case fpu_xmm1: memcpy(m_state.context.fpu.no_avx.__fpu_xmm1.__xmm_reg, &value->value.uint8, 16); success = true; break;
+ case fpu_xmm2: memcpy(m_state.context.fpu.no_avx.__fpu_xmm2.__xmm_reg, &value->value.uint8, 16); success = true; break;
+ case fpu_xmm3: memcpy(m_state.context.fpu.no_avx.__fpu_xmm3.__xmm_reg, &value->value.uint8, 16); success = true; break;
+ case fpu_xmm4: memcpy(m_state.context.fpu.no_avx.__fpu_xmm4.__xmm_reg, &value->value.uint8, 16); success = true; break;
+ case fpu_xmm5: memcpy(m_state.context.fpu.no_avx.__fpu_xmm5.__xmm_reg, &value->value.uint8, 16); success = true; break;
+ case fpu_xmm6: memcpy(m_state.context.fpu.no_avx.__fpu_xmm6.__xmm_reg, &value->value.uint8, 16); success = true; break;
+ case fpu_xmm7: memcpy(m_state.context.fpu.no_avx.__fpu_xmm7.__xmm_reg, &value->value.uint8, 16); success = true; break;
+ }
+ }
+ break;
+
+ case e_regSetEXC:
+ if (reg < k_num_exc_registers)
+ {
+ (&m_state.context.exc.__trapno)[reg] = value->value.uint32;
+ success = true;
+ }
+ break;
+ }
+ }
+
+ if (success)
+ return SetRegisterState(set) == KERN_SUCCESS;
+ return false;
+}
+
+
+uint32_t
+DNBArchImplI386::GetRegisterContextSize()
+{
+ static uint32_t g_cached_size = 0;
+ if (g_cached_size == 0)
+ {
+ if (CPUHasAVX() || FORCE_AVX_REGS)
+ {
+ for (size_t i=0; i<k_num_fpu_registers_avx; ++i)
+ {
+ if (g_fpu_registers_avx[i].value_regs == NULL)
+ g_cached_size += g_fpu_registers_avx[i].size;
+ }
+ }
+ else
+ {
+ for (size_t i=0; i<k_num_fpu_registers_no_avx; ++i)
+ {
+ if (g_fpu_registers_no_avx[i].value_regs == NULL)
+ g_cached_size += g_fpu_registers_no_avx[i].size;
+ }
+ }
+ DNBLogThreaded ("DNBArchImplX86_64::GetRegisterContextSize() - GPR = %zu, FPU = %u, EXC = %zu", sizeof(GPR), g_cached_size, sizeof(EXC));
+ g_cached_size += sizeof(GPR);
+ g_cached_size += sizeof(EXC);
+ DNBLogThreaded ("DNBArchImplX86_64::GetRegisterContextSize() - GPR + FPU + EXC = %u", g_cached_size);
+ }
+ return g_cached_size;
+}
+
+
+nub_size_t
+DNBArchImplI386::GetRegisterContext (void *buf, nub_size_t buf_len)
+{
+ uint32_t size = GetRegisterContextSize();
+
+ if (buf && buf_len)
+ {
+ if (size > buf_len)
+ size = static_cast<uint32_t>(buf_len);
+
+ bool force = false;
+ kern_return_t kret;
+ if ((kret = GetGPRState(force)) != KERN_SUCCESS)
+ {
+ DNBLogThreadedIf (LOG_THREAD, "DNBArchImplI386::GetRegisterContext (buf = %p, len = %llu) error: GPR regs failed to read: %u ", buf, (uint64_t)buf_len, kret);
+ size = 0;
+ }
+ else if ((kret = GetFPUState(force)) != KERN_SUCCESS)
+ {
+ DNBLogThreadedIf (LOG_THREAD, "DNBArchImplI386::GetRegisterContext (buf = %p, len = %llu) error: %s regs failed to read: %u", buf, (uint64_t)buf_len, CPUHasAVX() ? "AVX" : "FPU", kret);
+ size = 0;
+ }
+ else if ((kret = GetEXCState(force)) != KERN_SUCCESS)
+ {
+ DNBLogThreadedIf (LOG_THREAD, "DNBArchImplI386::GetRegisterContext (buf = %p, len = %llu) error: EXC regs failed to read: %u", buf, (uint64_t)buf_len, kret);
+ size = 0;
+ }
+ else
+ {
+ uint8_t *p = (uint8_t *)buf;
+ // Copy the GPR registers
+ memcpy(p, &m_state.context.gpr, sizeof(GPR));
+ p += sizeof(GPR);
+
+ if (CPUHasAVX() || FORCE_AVX_REGS)
+ {
+ // Walk around the gaps in the FPU regs
+ memcpy(p, &m_state.context.fpu.avx.__fpu_fcw, 5);
+ p += 5;
+ memcpy(p, &m_state.context.fpu.avx.__fpu_fop, 8);
+ p += 8;
+ memcpy(p, &m_state.context.fpu.avx.__fpu_dp, 6);
+ p += 6;
+ memcpy(p, &m_state.context.fpu.avx.__fpu_mxcsr, 8);
+ p += 8;
+
+ // Work around the padding between the stmm registers as they are 16
+ // byte structs with 10 bytes of the value in each
+ for (size_t i=0; i<8; ++i)
+ {
+ memcpy(p, &m_state.context.fpu.avx.__fpu_stmm0 + i, 10);
+ p += 10;
+ }
+
+ // Interleave the XMM and YMMH registers to make the YMM registers
+ for (size_t i=0; i<8; ++i)
+ {
+ memcpy(p, &m_state.context.fpu.avx.__fpu_xmm0 + i, 16);
+ p += 16;
+ memcpy(p, &m_state.context.fpu.avx.__fpu_ymmh0 + i, 16);
+ p += 16;
+ }
+ }
+ else
+ {
+ // Walk around the gaps in the FPU regs
+ memcpy(p, &m_state.context.fpu.no_avx.__fpu_fcw, 5);
+ p += 5;
+ memcpy(p, &m_state.context.fpu.no_avx.__fpu_fop, 8);
+ p += 8;
+ memcpy(p, &m_state.context.fpu.no_avx.__fpu_dp, 6);
+ p += 6;
+ memcpy(p, &m_state.context.fpu.no_avx.__fpu_mxcsr, 8);
+ p += 8;
+
+ // Work around the padding between the stmm registers as they are 16
+ // byte structs with 10 bytes of the value in each
+ for (size_t i=0; i<8; ++i)
+ {
+ memcpy(p, &m_state.context.fpu.no_avx.__fpu_stmm0 + i, 10);
+ p += 10;
+ }
+
+ // Copy the XMM registers in a single block
+ memcpy(p, &m_state.context.fpu.no_avx.__fpu_xmm0, 8 * 16);
+ p += 8 * 16;
+ }
+
+ // Copy the exception registers
+ memcpy(p, &m_state.context.exc, sizeof(EXC));
+ p += sizeof(EXC);
+
+ // make sure we end up with exactly what we think we should have
+ size_t bytes_written = p - (uint8_t *)buf;
+ UNUSED_IF_ASSERT_DISABLED(bytes_written);
+ assert (bytes_written == size);
+ }
+ }
+ DNBLogThreadedIf (LOG_THREAD, "DNBArchImplI386::GetRegisterContext (buf = %p, len = %llu) => %llu", buf, (uint64_t)buf_len, (uint64_t)size);
+ // Return the size of the register context even if NULL was passed in
+ return size;
+}
+
+nub_size_t
+DNBArchImplI386::SetRegisterContext (const void *buf, nub_size_t buf_len)
+{
+ nub_size_t size = sizeof (m_state.context);
+ if (buf == NULL || buf_len == 0)
+ size = 0;
+
+ if (size)
+ {
+ if (size > buf_len)
+ size = buf_len;
+
+ uint8_t *p = (uint8_t *)buf;
+ // Copy the GPR registers
+ memcpy(&m_state.context.gpr, p, sizeof(GPR));
+ p += sizeof(GPR);
+
+ if (CPUHasAVX() || FORCE_AVX_REGS)
+ {
+ // Walk around the gaps in the FPU regs
+ memcpy(&m_state.context.fpu.avx.__fpu_fcw, p, 5);
+ p += 5;
+ memcpy(&m_state.context.fpu.avx.__fpu_fop, p, 8);
+ p += 8;
+ memcpy(&m_state.context.fpu.avx.__fpu_dp, p, 6);
+ p += 6;
+ memcpy(&m_state.context.fpu.avx.__fpu_mxcsr, p, 8);
+ p += 8;
+
+ // Work around the padding between the stmm registers as they are 16
+ // byte structs with 10 bytes of the value in each
+ for (size_t i=0; i<8; ++i)
+ {
+ memcpy(&m_state.context.fpu.avx.__fpu_stmm0 + i, p, 10);
+ p += 10;
+ }
+
+ // Interleave the XMM and YMMH registers to make the YMM registers
+ for (size_t i=0; i<8; ++i)
+ {
+ memcpy(&m_state.context.fpu.avx.__fpu_xmm0 + i, p, 16);
+ p += 16;
+ memcpy(&m_state.context.fpu.avx.__fpu_ymmh0 + i, p, 16);
+ p += 16;
+ }
+ }
+ else
+ {
+ // Copy fcw through mxcsrmask as there is no padding
+ memcpy(&m_state.context.fpu.no_avx.__fpu_fcw, p, 5);
+ p += 5;
+ memcpy(&m_state.context.fpu.no_avx.__fpu_fop, p, 8);
+ p += 8;
+ memcpy(&m_state.context.fpu.no_avx.__fpu_dp, p, 6);
+ p += 6;
+ memcpy(&m_state.context.fpu.no_avx.__fpu_mxcsr, p, 8);
+ p += 8;
+
+ // Work around the padding between the stmm registers as they are 16
+ // byte structs with 10 bytes of the value in each
+ for (size_t i=0; i<8; ++i)
+ {
+ memcpy(&m_state.context.fpu.no_avx.__fpu_stmm0 + i, p, 10);
+ p += 10;
+ }
+
+ // Copy the XMM registers in a single block
+ memcpy(&m_state.context.fpu.no_avx.__fpu_xmm0, p, 8 * 16);
+ p += 8 * 16;
+ }
+
+ // Copy the exception registers
+ memcpy(&m_state.context.exc, p, sizeof(EXC));
+ p += sizeof(EXC);
+
+ // make sure we end up with exactly what we think we should have
+ size_t bytes_written = p - (uint8_t *)buf;
+ UNUSED_IF_ASSERT_DISABLED(bytes_written);
+ assert (bytes_written == size);
+ kern_return_t kret;
+ if ((kret = SetGPRState()) != KERN_SUCCESS)
+ DNBLogThreadedIf (LOG_THREAD, "DNBArchImplI386::SetRegisterContext (buf = %p, len = %llu) error: GPR regs failed to write: %u", buf, (uint64_t)buf_len, kret);
+ if ((kret = SetFPUState()) != KERN_SUCCESS)
+ DNBLogThreadedIf (LOG_THREAD, "DNBArchImplI386::SetRegisterContext (buf = %p, len = %llu) error: %s regs failed to write: %u", buf, (uint64_t)buf_len, CPUHasAVX() ? "AVX" : "FPU", kret);
+ if ((kret = SetEXCState()) != KERN_SUCCESS)
+ DNBLogThreadedIf (LOG_THREAD, "DNBArchImplI386::SetRegisterContext (buf = %p, len = %llu) error: EXP regs failed to write: %u", buf, (uint64_t)buf_len, kret);
+ }
+ DNBLogThreadedIf (LOG_THREAD, "DNBArchImplI386::SetRegisterContext (buf = %p, len = %llu) => %llu", buf, (uint64_t)buf_len, (uint64_t)size);
+ return size;
+}
+
+
+uint32_t
+DNBArchImplI386::SaveRegisterState ()
+{
+ kern_return_t kret = ::thread_abort_safely(m_thread->MachPortNumber());
+ DNBLogThreadedIf (LOG_THREAD, "thread = 0x%4.4x calling thread_abort_safely (tid) => %u (SetGPRState() for stop_count = %u)", m_thread->MachPortNumber(), kret, m_thread->Process()->StopCount());
+
+ bool force = true;
+
+ if ((kret = GetGPRState(force)) != KERN_SUCCESS)
+ {
+ DNBLogThreadedIf (LOG_THREAD, "DNBArchImplI386::SaveRegisterState () error: GPR regs failed to read: %u ", kret);
+ }
+ else if ((kret = GetFPUState(force)) != KERN_SUCCESS)
+ {
+ DNBLogThreadedIf (LOG_THREAD, "DNBArchImplI386::SaveRegisterState () error: %s regs failed to read: %u", CPUHasAVX() ? "AVX" : "FPU", kret);
+ }
+ else
+ {
+ const uint32_t save_id = GetNextRegisterStateSaveID ();
+ m_saved_register_states[save_id] = m_state.context;
+ return save_id;
+ }
+ return 0;
+}
+bool
+DNBArchImplI386::RestoreRegisterState (uint32_t save_id)
+{
+ SaveRegisterStates::iterator pos = m_saved_register_states.find(save_id);
+ if (pos != m_saved_register_states.end())
+ {
+ m_state.context.gpr = pos->second.gpr;
+ m_state.context.fpu = pos->second.fpu;
+ m_state.context.exc = pos->second.exc;
+ m_state.SetError(e_regSetGPR, Read, 0);
+ m_state.SetError(e_regSetFPU, Read, 0);
+ m_state.SetError(e_regSetEXC, Read, 0);
+ kern_return_t kret;
+ bool success = true;
+ if ((kret = SetGPRState()) != KERN_SUCCESS)
+ {
+ DNBLogThreadedIf (LOG_THREAD, "DNBArchImplI386::RestoreRegisterState (save_id = %u) error: GPR regs failed to write: %u", save_id, kret);
+ success = false;
+ }
+ else if ((kret = SetFPUState()) != KERN_SUCCESS)
+ {
+ DNBLogThreadedIf (LOG_THREAD, "DNBArchImplI386::RestoreRegisterState (save_id = %u) error: %s regs failed to write: %u", save_id, CPUHasAVX() ? "AVX" : "FPU", kret);
+ success = false;
+ }
+ m_saved_register_states.erase(pos);
+ return success;
+ }
+ return false;
+}
+
+
+kern_return_t
+DNBArchImplI386::GetRegisterState(int set, bool force)
+{
+ switch (set)
+ {
+ case e_regSetALL: return GetGPRState(force) | GetFPUState(force) | GetEXCState(force);
+ case e_regSetGPR: return GetGPRState(force);
+ case e_regSetFPU: return GetFPUState(force);
+ case e_regSetEXC: return GetEXCState(force);
+ default: break;
+ }
+ return KERN_INVALID_ARGUMENT;
+}
+
+kern_return_t
+DNBArchImplI386::SetRegisterState(int set)
+{
+ // Make sure we have a valid context to set.
+ if (RegisterSetStateIsValid(set))
+ {
+ switch (set)
+ {
+ case e_regSetALL: return SetGPRState() | SetFPUState() | SetEXCState();
+ case e_regSetGPR: return SetGPRState();
+ case e_regSetFPU: return SetFPUState();
+ case e_regSetEXC: return SetEXCState();
+ default: break;
+ }
+ }
+ return KERN_INVALID_ARGUMENT;
+}
+
+bool
+DNBArchImplI386::RegisterSetStateIsValid (int set) const
+{
+ return m_state.RegsAreValid(set);
+}
+
+#endif // #if defined (__i386__)
diff --git a/tools/debugserver/source/MacOSX/i386/DNBArchImplI386.h b/tools/debugserver/source/MacOSX/i386/DNBArchImplI386.h
new file mode 100644
index 000000000000..6b4252151feb
--- /dev/null
+++ b/tools/debugserver/source/MacOSX/i386/DNBArchImplI386.h
@@ -0,0 +1,255 @@
+//===-- DNBArchImplI386.h ---------------------------------------*- C++ -*-===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// Created by Greg Clayton on 6/25/07.
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef __DNBArchImplI386_h__
+#define __DNBArchImplI386_h__
+
+#if defined (__i386__) || defined (__x86_64__)
+
+#include "DNBArch.h"
+#include "../HasAVX.h"
+#include "MachRegisterStatesI386.h"
+
+#include <map>
+
+class MachThread;
+
+class DNBArchImplI386 : public DNBArchProtocol
+{
+public:
+ DNBArchImplI386(MachThread *thread) :
+ DNBArchProtocol(),
+ m_thread(thread),
+ m_state(),
+ m_2pc_dbg_checkpoint(),
+ m_2pc_trans_state(Trans_Done),
+ m_saved_register_states()
+ {
+ }
+ virtual ~DNBArchImplI386()
+ {
+ }
+
+ static void Initialize();
+
+ virtual bool GetRegisterValue(uint32_t set, uint32_t reg, DNBRegisterValue *value);
+ virtual bool SetRegisterValue(uint32_t set, uint32_t reg, const DNBRegisterValue *value);
+ virtual nub_size_t GetRegisterContext (void *buf, nub_size_t buf_len);
+ virtual nub_size_t SetRegisterContext (const void *buf, nub_size_t buf_len);
+ virtual uint32_t SaveRegisterState ();
+ virtual bool RestoreRegisterState (uint32_t save_id);
+
+ virtual kern_return_t GetRegisterState (int set, bool force);
+ virtual kern_return_t SetRegisterState (int set);
+ virtual bool RegisterSetStateIsValid (int set) const;
+
+ virtual uint64_t GetPC(uint64_t failValue); // Get program counter
+ virtual kern_return_t SetPC(uint64_t value);
+ virtual uint64_t GetSP(uint64_t failValue); // Get stack pointer
+ virtual void ThreadWillResume();
+ virtual bool ThreadDidStop();
+ virtual bool NotifyException(MachException::Data& exc);
+
+ virtual uint32_t NumSupportedHardwareWatchpoints();
+ virtual uint32_t EnableHardwareWatchpoint (nub_addr_t addr, nub_size_t size, bool read, bool write, bool also_set_on_task);
+ virtual bool DisableHardwareWatchpoint (uint32_t hw_break_index, bool also_set_on_task);
+ virtual uint32_t GetHardwareWatchpointHit(nub_addr_t &addr);
+
+protected:
+ kern_return_t EnableHardwareSingleStep (bool enable);
+
+ typedef __i386_thread_state_t GPR;
+ typedef __i386_float_state_t FPU;
+ typedef __i386_exception_state_t EXC;
+ typedef __i386_avx_state_t AVX;
+ typedef __i386_debug_state_t DBG;
+
+ static const DNBRegisterInfo g_gpr_registers[];
+ static const DNBRegisterInfo g_fpu_registers_no_avx[];
+ static const DNBRegisterInfo g_fpu_registers_avx[];
+ static const DNBRegisterInfo g_exc_registers[];
+ static const DNBRegisterSetInfo g_reg_sets_no_avx[];
+ static const DNBRegisterSetInfo g_reg_sets_avx[];
+ static const size_t k_num_gpr_registers;
+ static const size_t k_num_fpu_registers_no_avx;
+ static const size_t k_num_fpu_registers_avx;
+ static const size_t k_num_exc_registers;
+ static const size_t k_num_all_registers_no_avx;
+ static const size_t k_num_all_registers_avx;
+ static const size_t k_num_register_sets;
+
+ typedef enum RegisterSetTag
+ {
+ e_regSetALL = REGISTER_SET_ALL,
+ e_regSetGPR,
+ e_regSetFPU,
+ e_regSetEXC,
+ e_regSetDBG,
+ kNumRegisterSets
+ } RegisterSet;
+
+ typedef enum RegisterSetWordSizeTag
+ {
+ e_regSetWordSizeGPR = sizeof(GPR) / sizeof(int),
+ e_regSetWordSizeFPU = sizeof(FPU) / sizeof(int),
+ e_regSetWordSizeEXC = sizeof(EXC) / sizeof(int),
+ e_regSetWordSizeAVX = sizeof(AVX) / sizeof(int),
+ e_regSetWordSizeDBG = sizeof(DBG) / sizeof(int)
+ } RegisterSetWordSize;
+
+ enum
+ {
+ Read = 0,
+ Write = 1,
+ kNumErrors = 2
+ };
+
+ struct Context
+ {
+ GPR gpr;
+ union {
+ FPU no_avx;
+ AVX avx;
+ } fpu;
+ EXC exc;
+ DBG dbg;
+ };
+
+ struct State
+ {
+ Context context;
+ kern_return_t gpr_errs[2]; // Read/Write errors
+ kern_return_t fpu_errs[2]; // Read/Write errors
+ kern_return_t exc_errs[2]; // Read/Write errors
+ kern_return_t dbg_errs[2]; // Read/Write errors
+
+ State()
+ {
+ uint32_t i;
+ for (i=0; i<kNumErrors; i++)
+ {
+ gpr_errs[i] = -1;
+ fpu_errs[i] = -1;
+ exc_errs[i] = -1;
+ dbg_errs[i] = -1;
+ }
+ }
+ void InvalidateAllRegisterStates()
+ {
+ SetError (e_regSetALL, Read, -1);
+ }
+ kern_return_t GetError (int flavor, uint32_t err_idx) const
+ {
+ if (err_idx < kNumErrors)
+ {
+ switch (flavor)
+ {
+ // When getting all errors, just OR all values together to see if
+ // we got any kind of error.
+ case e_regSetALL: return gpr_errs[err_idx] |
+ fpu_errs[err_idx] |
+ exc_errs[err_idx];
+ case e_regSetGPR: return gpr_errs[err_idx];
+ case e_regSetFPU: return fpu_errs[err_idx];
+ case e_regSetEXC: return exc_errs[err_idx];
+ case e_regSetDBG: return dbg_errs[err_idx];
+ default: break;
+ }
+ }
+ return -1;
+ }
+ bool SetError (int flavor, uint32_t err_idx, kern_return_t err)
+ {
+ if (err_idx < kNumErrors)
+ {
+ switch (flavor)
+ {
+ case e_regSetALL:
+ gpr_errs[err_idx] =
+ fpu_errs[err_idx] =
+ exc_errs[err_idx] =
+ dbg_errs[err_idx] = err;
+ return true;
+
+ case e_regSetGPR:
+ gpr_errs[err_idx] = err;
+ return true;
+
+ case e_regSetFPU:
+ fpu_errs[err_idx] = err;
+ return true;
+
+ case e_regSetEXC:
+ exc_errs[err_idx] = err;
+ return true;
+
+ case e_regSetDBG:
+ dbg_errs[err_idx] = err;
+ return true;
+
+ default: break;
+ }
+ }
+ return false;
+ }
+ bool RegsAreValid (int flavor) const
+ {
+ return GetError(flavor, Read) == KERN_SUCCESS;
+ }
+ };
+
+ kern_return_t GetGPRState (bool force);
+ kern_return_t GetFPUState (bool force);
+ kern_return_t GetEXCState (bool force);
+ kern_return_t GetDBGState (bool force);
+
+ kern_return_t SetGPRState ();
+ kern_return_t SetFPUState ();
+ kern_return_t SetEXCState ();
+ kern_return_t SetDBGState (bool also_set_on_task);
+
+ static DNBArchProtocol *
+ Create (MachThread *thread);
+
+ static const uint8_t *
+ SoftwareBreakpointOpcode (nub_size_t byte_size);
+
+ static const DNBRegisterSetInfo *
+ GetRegisterSetInfo(nub_size_t *num_reg_sets);
+
+ static uint32_t
+ GetRegisterContextSize();
+
+ // Helper functions for watchpoint manipulations.
+ static void SetWatchpoint(DBG &debug_state, uint32_t hw_index, nub_addr_t addr, nub_size_t size, bool read, bool write);
+ static void ClearWatchpoint(DBG &debug_state, uint32_t hw_index);
+ static bool IsWatchpointVacant(const DBG &debug_state, uint32_t hw_index);
+ static void ClearWatchpointHits(DBG &debug_state);
+ static bool IsWatchpointHit(const DBG &debug_state, uint32_t hw_index);
+ static nub_addr_t GetWatchAddress(const DBG &debug_state, uint32_t hw_index);
+
+ virtual bool StartTransForHWP();
+ virtual bool RollbackTransForHWP();
+ virtual bool FinishTransForHWP();
+ DBG GetDBGCheckpoint();
+
+ MachThread *m_thread;
+ State m_state;
+ DBG m_2pc_dbg_checkpoint;
+ uint32_t m_2pc_trans_state; // Is transaction of DBG state change: Pedning (0), Done (1), or Rolled Back (2)?
+ typedef std::map<uint32_t, Context> SaveRegisterStates;
+ SaveRegisterStates m_saved_register_states;
+};
+
+#endif // #if defined (__i386__) || defined (__x86_64__)
+#endif // #ifndef __DNBArchImplI386_h__
diff --git a/tools/debugserver/source/MacOSX/i386/MachRegisterStatesI386.h b/tools/debugserver/source/MacOSX/i386/MachRegisterStatesI386.h
new file mode 100644
index 000000000000..59cfbe055a36
--- /dev/null
+++ b/tools/debugserver/source/MacOSX/i386/MachRegisterStatesI386.h
@@ -0,0 +1,180 @@
+//===-- MachRegisterStatesI386.h --------------------------------*- C++ -*-===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// Created by Sean Callanan on 3/16/11.
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef __MachRegisterStatesI386_h__
+#define __MachRegisterStatesI386_h__
+
+#include <inttypes.h>
+
+#define __i386_THREAD_STATE 1
+#define __i386_FLOAT_STATE 2
+#define __i386_EXCEPTION_STATE 3
+#define __i386_DEBUG_STATE 10
+#define __i386_AVX_STATE 16
+
+typedef struct {
+ uint32_t __eax;
+ uint32_t __ebx;
+ uint32_t __ecx;
+ uint32_t __edx;
+ uint32_t __edi;
+ uint32_t __esi;
+ uint32_t __ebp;
+ uint32_t __esp;
+ uint32_t __ss;
+ uint32_t __eflags;
+ uint32_t __eip;
+ uint32_t __cs;
+ uint32_t __ds;
+ uint32_t __es;
+ uint32_t __fs;
+ uint32_t __gs;
+} __i386_thread_state_t;
+
+typedef struct {
+ uint16_t __invalid : 1;
+ uint16_t __denorm : 1;
+ uint16_t __zdiv : 1;
+ uint16_t __ovrfl : 1;
+ uint16_t __undfl : 1;
+ uint16_t __precis : 1;
+ uint16_t __PAD1 : 2;
+ uint16_t __pc : 2;
+ uint16_t __rc : 2;
+ uint16_t __PAD2 : 1;
+ uint16_t __PAD3 : 3;
+} __i386_fp_control_t;
+
+typedef struct {
+ uint16_t __invalid : 1;
+ uint16_t __denorm : 1;
+ uint16_t __zdiv : 1;
+ uint16_t __ovrfl : 1;
+ uint16_t __undfl : 1;
+ uint16_t __precis : 1;
+ uint16_t __stkflt : 1;
+ uint16_t __errsumm : 1;
+ uint16_t __c0 : 1;
+ uint16_t __c1 : 1;
+ uint16_t __c2 : 1;
+ uint16_t __tos : 3;
+ uint16_t __c3 : 1;
+ uint16_t __busy : 1;
+} __i386_fp_status_t;
+
+typedef struct {
+ uint8_t __mmst_reg[10];
+ uint8_t __mmst_rsrv[6];
+} __i386_mmst_reg;
+
+typedef struct {
+ uint8_t __xmm_reg[16];
+} __i386_xmm_reg;
+
+typedef struct {
+ uint32_t __fpu_reserved[2];
+ __i386_fp_control_t __fpu_fcw;
+ __i386_fp_status_t __fpu_fsw;
+ uint8_t __fpu_ftw;
+ uint8_t __fpu_rsrv1;
+ uint16_t __fpu_fop;
+ uint32_t __fpu_ip;
+ uint16_t __fpu_cs;
+ uint16_t __fpu_rsrv2;
+ uint32_t __fpu_dp;
+ uint16_t __fpu_ds;
+ uint16_t __fpu_rsrv3;
+ uint32_t __fpu_mxcsr;
+ uint32_t __fpu_mxcsrmask;
+ __i386_mmst_reg __fpu_stmm0;
+ __i386_mmst_reg __fpu_stmm1;
+ __i386_mmst_reg __fpu_stmm2;
+ __i386_mmst_reg __fpu_stmm3;
+ __i386_mmst_reg __fpu_stmm4;
+ __i386_mmst_reg __fpu_stmm5;
+ __i386_mmst_reg __fpu_stmm6;
+ __i386_mmst_reg __fpu_stmm7;
+ __i386_xmm_reg __fpu_xmm0;
+ __i386_xmm_reg __fpu_xmm1;
+ __i386_xmm_reg __fpu_xmm2;
+ __i386_xmm_reg __fpu_xmm3;
+ __i386_xmm_reg __fpu_xmm4;
+ __i386_xmm_reg __fpu_xmm5;
+ __i386_xmm_reg __fpu_xmm6;
+ __i386_xmm_reg __fpu_xmm7;
+ uint8_t __fpu_rsrv4[14*16];
+ uint32_t __fpu_reserved1;
+} __i386_float_state_t;
+
+typedef struct {
+ uint32_t __fpu_reserved[2];
+ __i386_fp_control_t __fpu_fcw;
+ __i386_fp_status_t __fpu_fsw;
+ uint8_t __fpu_ftw;
+ uint8_t __fpu_rsrv1;
+ uint16_t __fpu_fop;
+ uint32_t __fpu_ip;
+ uint16_t __fpu_cs;
+ uint16_t __fpu_rsrv2;
+ uint32_t __fpu_dp;
+ uint16_t __fpu_ds;
+ uint16_t __fpu_rsrv3;
+ uint32_t __fpu_mxcsr;
+ uint32_t __fpu_mxcsrmask;
+ __i386_mmst_reg __fpu_stmm0;
+ __i386_mmst_reg __fpu_stmm1;
+ __i386_mmst_reg __fpu_stmm2;
+ __i386_mmst_reg __fpu_stmm3;
+ __i386_mmst_reg __fpu_stmm4;
+ __i386_mmst_reg __fpu_stmm5;
+ __i386_mmst_reg __fpu_stmm6;
+ __i386_mmst_reg __fpu_stmm7;
+ __i386_xmm_reg __fpu_xmm0;
+ __i386_xmm_reg __fpu_xmm1;
+ __i386_xmm_reg __fpu_xmm2;
+ __i386_xmm_reg __fpu_xmm3;
+ __i386_xmm_reg __fpu_xmm4;
+ __i386_xmm_reg __fpu_xmm5;
+ __i386_xmm_reg __fpu_xmm6;
+ __i386_xmm_reg __fpu_xmm7;
+ uint8_t __fpu_rsrv4[14*16];
+ uint32_t __fpu_reserved1;
+ uint8_t __avx_reserved1[64];
+ __i386_xmm_reg __fpu_ymmh0;
+ __i386_xmm_reg __fpu_ymmh1;
+ __i386_xmm_reg __fpu_ymmh2;
+ __i386_xmm_reg __fpu_ymmh3;
+ __i386_xmm_reg __fpu_ymmh4;
+ __i386_xmm_reg __fpu_ymmh5;
+ __i386_xmm_reg __fpu_ymmh6;
+ __i386_xmm_reg __fpu_ymmh7;
+} __i386_avx_state_t;
+
+typedef struct {
+ uint32_t __trapno;
+ uint32_t __err;
+ uint32_t __faultvaddr;
+} __i386_exception_state_t;
+
+typedef struct {
+ uint32_t __dr0;
+ uint32_t __dr1;
+ uint32_t __dr2;
+ uint32_t __dr3;
+ uint32_t __dr4;
+ uint32_t __dr5;
+ uint32_t __dr6;
+ uint32_t __dr7;
+} __i386_debug_state_t;
+
+#endif
diff --git a/tools/debugserver/source/MacOSX/i386/Makefile b/tools/debugserver/source/MacOSX/i386/Makefile
new file mode 100644
index 000000000000..f770b19834bb
--- /dev/null
+++ b/tools/debugserver/source/MacOSX/i386/Makefile
@@ -0,0 +1,19 @@
+##===- tools/debugserver/source/MacOSX/i386/Makefile -------*- Makefile -*-===##
+#
+# The LLVM Compiler Infrastructure
+#
+# This file is distributed under the University of Illinois Open Source
+# License. See LICENSE.TXT for details.
+#
+##===----------------------------------------------------------------------===##
+
+LLDB_LEVEL := ../../../../..
+
+LIBRARYNAME := lldbDebugserverMacOSX_I386
+BUILD_ARCHIVE = 1
+
+SOURCES := DNBArchImplI386.cpp
+
+include $(LLDB_LEVEL)/Makefile
+
+CPP.Flags += -I$(PROJ_SRC_DIR)/.. -I$(PROJ_SRC_DIR)/../.. -I$(PROJ_OBJ_DIR)/../../.. \ No newline at end of file
diff --git a/tools/debugserver/source/MacOSX/ppc/DNBArchImpl.cpp b/tools/debugserver/source/MacOSX/ppc/DNBArchImpl.cpp
new file mode 100644
index 000000000000..c6f1a718ac92
--- /dev/null
+++ b/tools/debugserver/source/MacOSX/ppc/DNBArchImpl.cpp
@@ -0,0 +1,569 @@
+//===-- DNBArchImpl.cpp -----------------------------------------*- C++ -*-===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// Created by Greg Clayton on 6/25/07.
+//
+//===----------------------------------------------------------------------===//
+
+#if defined (__powerpc__) || defined (__ppc__) || defined (__ppc64__)
+
+#if __DARWIN_UNIX03
+#define PREFIX_DOUBLE_UNDERSCORE_DARWIN_UNIX03(reg) __##reg
+#else
+#define PREFIX_DOUBLE_UNDERSCORE_DARWIN_UNIX03(reg) reg
+#endif
+
+#include "MacOSX/ppc/DNBArchImpl.h"
+#include "MacOSX/MachThread.h"
+#include "DNBBreakpoint.h"
+#include "DNBLog.h"
+#include "DNBRegisterInfo.h"
+
+static const uint8_t g_breakpoint_opcode[] = { 0x7F, 0xC0, 0x00, 0x08 };
+
+const uint8_t *
+DNBArchMachPPC::SoftwareBreakpointOpcode (nub_size_t size)
+{
+ if (size == 4)
+ return g_breakpoint_opcode;
+ return NULL;
+}
+
+uint32_t
+DNBArchMachPPC::GetCPUType()
+{
+ return CPU_TYPE_POWERPC;
+}
+
+uint64_t
+DNBArchMachPPC::GetPC(uint64_t failValue)
+{
+ // Get program counter
+ if (GetGPRState(false) == KERN_SUCCESS)
+ return m_state.gpr.PREFIX_DOUBLE_UNDERSCORE_DARWIN_UNIX03(srr0);
+ return failValue;
+}
+
+kern_return_t
+DNBArchMachPPC::SetPC(uint64_t value)
+{
+ // Get program counter
+ kern_return_t err = GetGPRState(false);
+ if (err == KERN_SUCCESS)
+ {
+ m_state.gpr.PREFIX_DOUBLE_UNDERSCORE_DARWIN_UNIX03(srr0) = value;
+ err = SetGPRState();
+ }
+ return err == KERN_SUCCESS;
+}
+
+uint64_t
+DNBArchMachPPC::GetSP(uint64_t failValue)
+{
+ // Get stack pointer
+ if (GetGPRState(false) == KERN_SUCCESS)
+ return m_state.gpr.PREFIX_DOUBLE_UNDERSCORE_DARWIN_UNIX03(r1);
+ return failValue;
+}
+
+kern_return_t
+DNBArchMachPPC::GetGPRState(bool force)
+{
+ if (force || m_state.GetError(e_regSetGPR, Read))
+ {
+ mach_msg_type_number_t count = e_regSetWordSizeGPR;
+ m_state.SetError(e_regSetGPR, Read, ::thread_get_state(m_thread->MachPortNumber(), e_regSetGPR, (thread_state_t)&m_state.gpr, &count));
+ }
+ return m_state.GetError(e_regSetGPR, Read);
+}
+
+kern_return_t
+DNBArchMachPPC::GetFPRState(bool force)
+{
+ if (force || m_state.GetError(e_regSetFPR, Read))
+ {
+ mach_msg_type_number_t count = e_regSetWordSizeFPR;
+ m_state.SetError(e_regSetFPR, Read, ::thread_get_state(m_thread->MachPortNumber(), e_regSetFPR, (thread_state_t)&m_state.fpr, &count));
+ }
+ return m_state.GetError(e_regSetFPR, Read);
+}
+
+kern_return_t
+DNBArchMachPPC::GetEXCState(bool force)
+{
+ if (force || m_state.GetError(e_regSetEXC, Read))
+ {
+ mach_msg_type_number_t count = e_regSetWordSizeEXC;
+ m_state.SetError(e_regSetEXC, Read, ::thread_get_state(m_thread->MachPortNumber(), e_regSetEXC, (thread_state_t)&m_state.exc, &count));
+ }
+ return m_state.GetError(e_regSetEXC, Read);
+}
+
+kern_return_t
+DNBArchMachPPC::GetVECState(bool force)
+{
+ if (force || m_state.GetError(e_regSetVEC, Read))
+ {
+ mach_msg_type_number_t count = e_regSetWordSizeVEC;
+ m_state.SetError(e_regSetVEC, Read, ::thread_get_state(m_thread->MachPortNumber(), e_regSetVEC, (thread_state_t)&m_state.vec, &count));
+ }
+ return m_state.GetError(e_regSetVEC, Read);
+}
+
+kern_return_t
+DNBArchMachPPC::SetGPRState()
+{
+ m_state.SetError(e_regSetGPR, Write, ::thread_set_state(m_thread->MachPortNumber(), e_regSetGPR, (thread_state_t)&m_state.gpr, e_regSetWordSizeGPR));
+ return m_state.GetError(e_regSetGPR, Write);
+}
+
+kern_return_t
+DNBArchMachPPC::SetFPRState()
+{
+ m_state.SetError(e_regSetFPR, Write, ::thread_set_state(m_thread->MachPortNumber(), e_regSetFPR, (thread_state_t)&m_state.fpr, e_regSetWordSizeFPR));
+ return m_state.GetError(e_regSetFPR, Write);
+}
+
+kern_return_t
+DNBArchMachPPC::SetEXCState()
+{
+ m_state.SetError(e_regSetEXC, Write, ::thread_set_state(m_thread->MachPortNumber(), e_regSetEXC, (thread_state_t)&m_state.exc, e_regSetWordSizeEXC));
+ return m_state.GetError(e_regSetEXC, Write);
+}
+
+kern_return_t
+DNBArchMachPPC::SetVECState()
+{
+ m_state.SetError(e_regSetVEC, Write, ::thread_set_state(m_thread->MachPortNumber(), e_regSetVEC, (thread_state_t)&m_state.vec, e_regSetWordSizeVEC));
+ return m_state.GetError(e_regSetVEC, Write);
+}
+
+bool
+DNBArchMachPPC::ThreadWillResume()
+{
+ bool success = true;
+
+ // Do we need to step this thread? If so, let the mach thread tell us so.
+ if (m_thread->IsStepping())
+ {
+ // This is the primary thread, let the arch do anything it needs
+ success = EnableHardwareSingleStep(true) == KERN_SUCCESS;
+ }
+ return success;
+}
+
+bool
+DNBArchMachPPC::ThreadDidStop()
+{
+ bool success = true;
+
+ m_state.InvalidateAllRegisterStates();
+
+ // Are we stepping a single instruction?
+ if (GetGPRState(true) == KERN_SUCCESS)
+ {
+ // We are single stepping, was this the primary thread?
+ if (m_thread->IsStepping())
+ {
+ // This was the primary thread, we need to clear the trace
+ // bit if so.
+ success = EnableHardwareSingleStep(false) == KERN_SUCCESS;
+ }
+ else
+ {
+ // The MachThread will automatically restore the suspend count
+ // in ThreadDidStop(), so we don't need to do anything here if
+ // we weren't the primary thread the last time
+ }
+ }
+ return success;
+}
+
+
+// Set the single step bit in the processor status register.
+kern_return_t
+DNBArchMachPPC::EnableHardwareSingleStep (bool enable)
+{
+ DNBLogThreadedIf(LOG_STEP, "DNBArchMachPPC::EnableHardwareSingleStep( enable = %d )", enable);
+ if (GetGPRState(false) == KERN_SUCCESS)
+ {
+ const uint32_t trace_bit = 0x400;
+ if (enable)
+ m_state.gpr.PREFIX_DOUBLE_UNDERSCORE_DARWIN_UNIX03(srr1) |= trace_bit;
+ else
+ m_state.gpr.PREFIX_DOUBLE_UNDERSCORE_DARWIN_UNIX03(srr1) &= ~trace_bit;
+ return SetGPRState();
+ }
+ return m_state.GetError(e_regSetGPR, Read);
+}
+
+//----------------------------------------------------------------------
+// Register information definitions for 32 bit PowerPC.
+//----------------------------------------------------------------------
+
+enum gpr_regnums
+{
+ e_regNumGPR_srr0,
+ e_regNumGPR_srr1,
+ e_regNumGPR_r0,
+ e_regNumGPR_r1,
+ e_regNumGPR_r2,
+ e_regNumGPR_r3,
+ e_regNumGPR_r4,
+ e_regNumGPR_r5,
+ e_regNumGPR_r6,
+ e_regNumGPR_r7,
+ e_regNumGPR_r8,
+ e_regNumGPR_r9,
+ e_regNumGPR_r10,
+ e_regNumGPR_r11,
+ e_regNumGPR_r12,
+ e_regNumGPR_r13,
+ e_regNumGPR_r14,
+ e_regNumGPR_r15,
+ e_regNumGPR_r16,
+ e_regNumGPR_r17,
+ e_regNumGPR_r18,
+ e_regNumGPR_r19,
+ e_regNumGPR_r20,
+ e_regNumGPR_r21,
+ e_regNumGPR_r22,
+ e_regNumGPR_r23,
+ e_regNumGPR_r24,
+ e_regNumGPR_r25,
+ e_regNumGPR_r26,
+ e_regNumGPR_r27,
+ e_regNumGPR_r28,
+ e_regNumGPR_r29,
+ e_regNumGPR_r30,
+ e_regNumGPR_r31,
+ e_regNumGPR_cr,
+ e_regNumGPR_xer,
+ e_regNumGPR_lr,
+ e_regNumGPR_ctr,
+ e_regNumGPR_mq,
+ e_regNumGPR_vrsave
+};
+
+
+
+
+// General purpose registers
+static DNBRegisterInfo g_gpr_registers[] =
+{
+ { "srr0" , Uint, 4, Hex },
+ { "srr1" , Uint, 4, Hex },
+ { "r0" , Uint, 4, Hex },
+ { "r1" , Uint, 4, Hex },
+ { "r2" , Uint, 4, Hex },
+ { "r3" , Uint, 4, Hex },
+ { "r4" , Uint, 4, Hex },
+ { "r5" , Uint, 4, Hex },
+ { "r6" , Uint, 4, Hex },
+ { "r7" , Uint, 4, Hex },
+ { "r8" , Uint, 4, Hex },
+ { "r9" , Uint, 4, Hex },
+ { "r10" , Uint, 4, Hex },
+ { "r11" , Uint, 4, Hex },
+ { "r12" , Uint, 4, Hex },
+ { "r13" , Uint, 4, Hex },
+ { "r14" , Uint, 4, Hex },
+ { "r15" , Uint, 4, Hex },
+ { "r16" , Uint, 4, Hex },
+ { "r17" , Uint, 4, Hex },
+ { "r18" , Uint, 4, Hex },
+ { "r19" , Uint, 4, Hex },
+ { "r20" , Uint, 4, Hex },
+ { "r21" , Uint, 4, Hex },
+ { "r22" , Uint, 4, Hex },
+ { "r23" , Uint, 4, Hex },
+ { "r24" , Uint, 4, Hex },
+ { "r25" , Uint, 4, Hex },
+ { "r26" , Uint, 4, Hex },
+ { "r27" , Uint, 4, Hex },
+ { "r28" , Uint, 4, Hex },
+ { "r29" , Uint, 4, Hex },
+ { "r30" , Uint, 4, Hex },
+ { "r31" , Uint, 4, Hex },
+ { "cr" , Uint, 4, Hex },
+ { "xer" , Uint, 4, Hex },
+ { "lr" , Uint, 4, Hex },
+ { "ctr" , Uint, 4, Hex },
+ { "mq" , Uint, 4, Hex },
+ { "vrsave", Uint, 4, Hex },
+};
+
+// Floating point registers
+static DNBRegisterInfo g_fpr_registers[] =
+{
+ { "fp0" , IEEE754, 8, Float },
+ { "fp1" , IEEE754, 8, Float },
+ { "fp2" , IEEE754, 8, Float },
+ { "fp3" , IEEE754, 8, Float },
+ { "fp4" , IEEE754, 8, Float },
+ { "fp5" , IEEE754, 8, Float },
+ { "fp6" , IEEE754, 8, Float },
+ { "fp7" , IEEE754, 8, Float },
+ { "fp8" , IEEE754, 8, Float },
+ { "fp9" , IEEE754, 8, Float },
+ { "fp10" , IEEE754, 8, Float },
+ { "fp11" , IEEE754, 8, Float },
+ { "fp12" , IEEE754, 8, Float },
+ { "fp13" , IEEE754, 8, Float },
+ { "fp14" , IEEE754, 8, Float },
+ { "fp15" , IEEE754, 8, Float },
+ { "fp16" , IEEE754, 8, Float },
+ { "fp17" , IEEE754, 8, Float },
+ { "fp18" , IEEE754, 8, Float },
+ { "fp19" , IEEE754, 8, Float },
+ { "fp20" , IEEE754, 8, Float },
+ { "fp21" , IEEE754, 8, Float },
+ { "fp22" , IEEE754, 8, Float },
+ { "fp23" , IEEE754, 8, Float },
+ { "fp24" , IEEE754, 8, Float },
+ { "fp25" , IEEE754, 8, Float },
+ { "fp26" , IEEE754, 8, Float },
+ { "fp27" , IEEE754, 8, Float },
+ { "fp28" , IEEE754, 8, Float },
+ { "fp29" , IEEE754, 8, Float },
+ { "fp30" , IEEE754, 8, Float },
+ { "fp31" , IEEE754, 8, Float },
+ { "fpscr" , Uint, 4, Hex }
+};
+
+// Exception registers
+
+static DNBRegisterInfo g_exc_registers[] =
+{
+ { "dar" , Uint, 4, Hex },
+ { "dsisr" , Uint, 4, Hex },
+ { "exception" , Uint, 4, Hex }
+};
+
+// Altivec registers
+static DNBRegisterInfo g_vec_registers[] =
+{
+ { "vr0" , Vector, 16, VectorOfFloat32 },
+ { "vr1" , Vector, 16, VectorOfFloat32 },
+ { "vr2" , Vector, 16, VectorOfFloat32 },
+ { "vr3" , Vector, 16, VectorOfFloat32 },
+ { "vr4" , Vector, 16, VectorOfFloat32 },
+ { "vr5" , Vector, 16, VectorOfFloat32 },
+ { "vr6" , Vector, 16, VectorOfFloat32 },
+ { "vr7" , Vector, 16, VectorOfFloat32 },
+ { "vr8" , Vector, 16, VectorOfFloat32 },
+ { "vr9" , Vector, 16, VectorOfFloat32 },
+ { "vr10" , Vector, 16, VectorOfFloat32 },
+ { "vr11" , Vector, 16, VectorOfFloat32 },
+ { "vr12" , Vector, 16, VectorOfFloat32 },
+ { "vr13" , Vector, 16, VectorOfFloat32 },
+ { "vr14" , Vector, 16, VectorOfFloat32 },
+ { "vr15" , Vector, 16, VectorOfFloat32 },
+ { "vr16" , Vector, 16, VectorOfFloat32 },
+ { "vr17" , Vector, 16, VectorOfFloat32 },
+ { "vr18" , Vector, 16, VectorOfFloat32 },
+ { "vr19" , Vector, 16, VectorOfFloat32 },
+ { "vr20" , Vector, 16, VectorOfFloat32 },
+ { "vr21" , Vector, 16, VectorOfFloat32 },
+ { "vr22" , Vector, 16, VectorOfFloat32 },
+ { "vr23" , Vector, 16, VectorOfFloat32 },
+ { "vr24" , Vector, 16, VectorOfFloat32 },
+ { "vr25" , Vector, 16, VectorOfFloat32 },
+ { "vr26" , Vector, 16, VectorOfFloat32 },
+ { "vr27" , Vector, 16, VectorOfFloat32 },
+ { "vr28" , Vector, 16, VectorOfFloat32 },
+ { "vr29" , Vector, 16, VectorOfFloat32 },
+ { "vr30" , Vector, 16, VectorOfFloat32 },
+ { "vr31" , Vector, 16, VectorOfFloat32 },
+ { "vscr" , Uint, 16, Hex },
+ { "vrvalid" , Uint, 4, Hex }
+};
+
+// Number of registers in each register set
+const size_t k_num_gpr_registers = sizeof(g_gpr_registers)/sizeof(DNBRegisterInfo);
+const size_t k_num_fpr_registers = sizeof(g_fpr_registers)/sizeof(DNBRegisterInfo);
+const size_t k_num_exc_registers = sizeof(g_exc_registers)/sizeof(DNBRegisterInfo);
+const size_t k_num_vec_registers = sizeof(g_vec_registers)/sizeof(DNBRegisterInfo);
+// Total number of registers for this architecture
+const size_t k_num_ppc_registers = k_num_gpr_registers + k_num_fpr_registers + k_num_exc_registers + k_num_vec_registers;
+
+//----------------------------------------------------------------------
+// Register set definitions. The first definitions at register set index
+// of zero is for all registers, followed by other registers sets. The
+// register information for the all register set need not be filled in.
+//----------------------------------------------------------------------
+static const DNBRegisterSetInfo g_reg_sets[] =
+{
+ { "PowerPC Registers", NULL, k_num_ppc_registers },
+ { "General Purpose Registers", g_gpr_registers, k_num_gpr_registers },
+ { "Floating Point Registers", g_fpr_registers, k_num_fpr_registers },
+ { "Exception State Registers", g_exc_registers, k_num_exc_registers },
+ { "Altivec Registers", g_vec_registers, k_num_vec_registers }
+};
+// Total number of register sets for this architecture
+const size_t k_num_register_sets = sizeof(g_reg_sets)/sizeof(DNBRegisterSetInfo);
+
+
+const DNBRegisterSetInfo *
+DNBArchMachPPC::GetRegisterSetInfo(nub_size_t *num_reg_sets) const
+{
+ *num_reg_sets = k_num_register_sets;
+ return g_reg_sets;
+}
+
+bool
+DNBArchMachPPC::GetRegisterValue(uint32_t set, uint32_t reg, DNBRegisterValue *value) const
+{
+ if (set == REGISTER_SET_GENERIC)
+ {
+ switch (reg)
+ {
+ case GENERIC_REGNUM_PC: // Program Counter
+ set = e_regSetGPR;
+ reg = e_regNumGPR_srr0;
+ break;
+
+ case GENERIC_REGNUM_SP: // Stack Pointer
+ set = e_regSetGPR;
+ reg = e_regNumGPR_r1;
+ break;
+
+ case GENERIC_REGNUM_FP: // Frame Pointer
+ // Return false for now instead of returning r30 as gcc 3.x would
+ // use a variety of registers for the FP and it takes inspecting
+ // the stack to make sure there is a frame pointer before we can
+ // determine the FP.
+ return false;
+
+ case GENERIC_REGNUM_RA: // Return Address
+ set = e_regSetGPR;
+ reg = e_regNumGPR_lr;
+ break;
+
+ case GENERIC_REGNUM_FLAGS: // Processor flags register
+ set = e_regSetGPR;
+ reg = e_regNumGPR_srr1;
+ break;
+
+ default:
+ return false;
+ }
+ }
+
+ if (!m_state.RegsAreValid(set))
+ return false;
+
+ const DNBRegisterInfo *regInfo = m_thread->GetRegisterInfo(set, reg);
+ if (regInfo)
+ {
+ value->info = *regInfo;
+ switch (set)
+ {
+ case e_regSetGPR:
+ if (reg < k_num_gpr_registers)
+ {
+ value->value.uint32 = (&m_state.gpr.PREFIX_DOUBLE_UNDERSCORE_DARWIN_UNIX03(srr0))[reg];
+ return true;
+ }
+ break;
+
+ case e_regSetFPR:
+ if (reg < 32)
+ {
+ value->value.float64 = m_state.fpr.PREFIX_DOUBLE_UNDERSCORE_DARWIN_UNIX03(fpregs)[reg];
+ return true;
+ }
+ else if (reg == 32)
+ {
+ value->value.uint32 = m_state.fpr.PREFIX_DOUBLE_UNDERSCORE_DARWIN_UNIX03(fpscr);
+ return true;
+ }
+ break;
+
+ case e_regSetEXC:
+ if (reg < k_num_exc_registers)
+ {
+ value->value.uint32 = (&m_state.exc.PREFIX_DOUBLE_UNDERSCORE_DARWIN_UNIX03(dar))[reg];
+ return true;
+ }
+ break;
+
+ case e_regSetVEC:
+ if (reg < k_num_vec_registers)
+ {
+ if (reg < 33) // FP0 - FP31 and VSCR
+ {
+ // Copy all 4 uint32 values for this vector register
+ value->value.v_uint32[0] = m_state.vec.PREFIX_DOUBLE_UNDERSCORE_DARWIN_UNIX03(save_vr)[reg][0];
+ value->value.v_uint32[1] = m_state.vec.PREFIX_DOUBLE_UNDERSCORE_DARWIN_UNIX03(save_vr)[reg][1];
+ value->value.v_uint32[2] = m_state.vec.PREFIX_DOUBLE_UNDERSCORE_DARWIN_UNIX03(save_vr)[reg][2];
+ value->value.v_uint32[3] = m_state.vec.PREFIX_DOUBLE_UNDERSCORE_DARWIN_UNIX03(save_vr)[reg][3];
+ return true;
+ }
+ else if (reg == 34) // VRVALID
+ {
+ value->value.uint32 = m_state.vec.PREFIX_DOUBLE_UNDERSCORE_DARWIN_UNIX03(save_vrvalid);
+ return true;
+ }
+ }
+ break;
+ }
+ }
+ return false;
+}
+
+
+kern_return_t
+DNBArchMachPPC::GetRegisterState(int set, bool force)
+{
+ switch (set)
+ {
+ case e_regSetALL:
+ return GetGPRState(force) |
+ GetFPRState(force) |
+ GetEXCState(force) |
+ GetVECState(force);
+ case e_regSetGPR: return GetGPRState(force);
+ case e_regSetFPR: return GetFPRState(force);
+ case e_regSetEXC: return GetEXCState(force);
+ case e_regSetVEC: return GetVECState(force);
+ default: break;
+ }
+ return KERN_INVALID_ARGUMENT;
+}
+
+kern_return_t
+DNBArchMachPPC::SetRegisterState(int set)
+{
+ // Make sure we have a valid context to set.
+ kern_return_t err = GetRegisterState(set, false);
+ if (err != KERN_SUCCESS)
+ return err;
+
+ switch (set)
+ {
+ case e_regSetALL: return SetGPRState() | SetFPRState() | SetEXCState() | SetVECState();
+ case e_regSetGPR: return SetGPRState();
+ case e_regSetFPR: return SetFPRState();
+ case e_regSetEXC: return SetEXCState();
+ case e_regSetVEC: return SetVECState();
+ default: break;
+ }
+ return KERN_INVALID_ARGUMENT;
+}
+
+bool
+DNBArchMachPPC::RegisterSetStateIsValid (int set) const
+{
+ return m_state.RegsAreValid(set);
+}
+
+
+#endif // #if defined (__powerpc__) || defined (__ppc__) || defined (__ppc64__)
+
diff --git a/tools/debugserver/source/MacOSX/ppc/DNBArchImpl.h b/tools/debugserver/source/MacOSX/ppc/DNBArchImpl.h
new file mode 100644
index 000000000000..8ea81538dc48
--- /dev/null
+++ b/tools/debugserver/source/MacOSX/ppc/DNBArchImpl.h
@@ -0,0 +1,179 @@
+//===-- DNBArchImpl.h -------------------------------------------*- C++ -*-===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// Created by Greg Clayton on 6/25/07.
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef __DebugNubArchMachPPC_h__
+#define __DebugNubArchMachPPC_h__
+
+#if defined (__powerpc__) || defined (__ppc__) || defined (__ppc64__)
+
+#include "DNBArch.h"
+
+class MachThread;
+
+class DNBArchMachPPC : public DNBArchProtocol
+{
+public:
+ DNBArchMachPPC(MachThread *thread) :
+ m_thread(thread),
+ m_state()
+ {
+ }
+
+ virtual ~DNBArchMachPPC()
+ {
+ }
+
+ virtual const DNBRegisterSetInfo *
+ GetRegisterSetInfo(nub_size_t *num_reg_sets) const;
+ virtual bool GetRegisterValue(uint32_t set, uint32_t reg, DNBRegisterValue *value) const;
+ virtual kern_return_t GetRegisterState (int set, bool force);
+ virtual kern_return_t SetRegisterState (int set);
+ virtual bool RegisterSetStateIsValid (int set) const;
+
+ virtual uint64_t GetPC(uint64_t failValue); // Get program counter
+ virtual kern_return_t SetPC(uint64_t value);
+ virtual uint64_t GetSP(uint64_t failValue); // Get stack pointer
+ virtual bool ThreadWillResume();
+ virtual bool ThreadDidStop();
+
+ static const uint8_t * SoftwareBreakpointOpcode (nub_size_t byte_size);
+ static uint32_t GetCPUType();
+
+protected:
+
+
+ kern_return_t EnableHardwareSingleStep (bool enable);
+
+ typedef enum RegisterSetTag
+ {
+ e_regSetALL = REGISTER_SET_ALL,
+ e_regSetGPR,
+ e_regSetFPR,
+ e_regSetEXC,
+ e_regSetVEC,
+ kNumRegisterSets
+ } RegisterSet;
+
+ typedef enum RegisterSetWordSizeTag
+ {
+ e_regSetWordSizeGPR = PPC_THREAD_STATE_COUNT,
+ e_regSetWordSizeFPR = PPC_FLOAT_STATE_COUNT,
+ e_regSetWordSizeEXC = PPC_EXCEPTION_STATE_COUNT,
+ e_regSetWordSizeVEC = PPC_VECTOR_STATE_COUNT
+ } RegisterSetWordSize;
+
+ enum
+ {
+ Read = 0,
+ Write = 1,
+ kNumErrors = 2
+ };
+
+ struct State
+ {
+ ppc_thread_state_t gpr;
+ ppc_float_state_t fpr;
+ ppc_exception_state_t exc;
+ ppc_vector_state_t vec;
+ kern_return_t gpr_errs[2]; // Read/Write errors
+ kern_return_t fpr_errs[2]; // Read/Write errors
+ kern_return_t exc_errs[2]; // Read/Write errors
+ kern_return_t vec_errs[2]; // Read/Write errors
+
+ State()
+ {
+ uint32_t i;
+ for (i=0; i<kNumErrors; i++)
+ {
+ gpr_errs[i] = -1;
+ fpr_errs[i] = -1;
+ exc_errs[i] = -1;
+ vec_errs[i] = -1;
+ }
+ }
+ void InvalidateAllRegisterStates()
+ {
+ SetError (e_regSetALL, Read, -1);
+ }
+ kern_return_t GetError (int set, uint32_t err_idx) const
+ {
+ if (err_idx < kNumErrors)
+ {
+ switch (set)
+ {
+ // When getting all errors, just OR all values together to see if
+ // we got any kind of error.
+ case e_regSetALL: return gpr_errs[err_idx] | fpr_errs[err_idx] | exc_errs[err_idx] | vec_errs[err_idx];
+ case e_regSetGPR: return gpr_errs[err_idx];
+ case e_regSetFPR: return fpr_errs[err_idx];
+ case e_regSetEXC: return exc_errs[err_idx];
+ case e_regSetVEC: return vec_errs[err_idx];
+ default: break;
+ }
+ }
+ return -1;
+ }
+ bool SetError (int set, uint32_t err_idx, kern_return_t err)
+ {
+ if (err_idx < kNumErrors)
+ {
+ switch (set)
+ {
+ case e_regSetALL:
+ gpr_errs[err_idx] = fpr_errs[err_idx] = exc_errs[err_idx] = vec_errs[err_idx] = err;
+ return true;
+
+ case e_regSetGPR:
+ gpr_errs[err_idx] = err;
+ return true;
+
+ case e_regSetFPR:
+ fpr_errs[err_idx] = err;
+ return true;
+
+ case e_regSetEXC:
+ exc_errs[err_idx] = err;
+ return true;
+
+ case e_regSetVEC:
+ vec_errs[err_idx] = err;
+ return true;
+
+ default: break;
+ }
+ }
+ return false;
+ }
+ bool RegsAreValid (int set) const
+ {
+ return GetError(set, Read) == KERN_SUCCESS;
+ }
+ };
+
+ kern_return_t GetGPRState (bool force);
+ kern_return_t GetFPRState (bool force);
+ kern_return_t GetEXCState (bool force);
+ kern_return_t GetVECState (bool force);
+
+ kern_return_t SetGPRState ();
+ kern_return_t SetFPRState ();
+ kern_return_t SetEXCState ();
+ kern_return_t SetVECState ();
+
+protected:
+ MachThread * m_thread;
+ State m_state;
+};
+
+#endif // #if defined (__powerpc__) || defined (__ppc__) || defined (__ppc64__)
+#endif // #ifndef __DebugNubArchMachPPC_h__
diff --git a/tools/debugserver/source/MacOSX/stack_logging.h b/tools/debugserver/source/MacOSX/stack_logging.h
new file mode 100644
index 000000000000..5b0a3080349b
--- /dev/null
+++ b/tools/debugserver/source/MacOSX/stack_logging.h
@@ -0,0 +1,122 @@
+/*
+ * Copyright (c) 1999-2007 Apple Inc. All rights reserved.
+ *
+ * @APPLE_LICENSE_HEADER_START@
+ *
+ * This file contains Original Code and/or Modifications of Original Code
+ * as defined in and that are subject to the Apple Public Source License
+ * Version 2.0 (the 'License'). You may not use this file except in
+ * compliance with the License. Please obtain a copy of the License at
+ * http://www.opensource.apple.com/apsl/ and read it before using this
+ * file.
+ *
+ * The Original Code and all software distributed under the License are
+ * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
+ * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
+ * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
+ * Please see the License for the specific language governing rights and
+ * limitations under the License.
+ *
+ * @APPLE_LICENSE_HEADER_END@
+ */
+
+#ifndef malloc_history_test_stack_logging_h
+#define malloc_history_test_stack_logging_h
+
+#import <malloc/malloc.h>
+
+#define stack_logging_type_free 0
+#define stack_logging_type_generic 1 /* anything that is not allocation/deallocation */
+#define stack_logging_type_alloc 2 /* malloc, realloc, etc... */
+#define stack_logging_type_dealloc 4 /* free, realloc, etc... */
+
+// Following flags are absorbed by stack_logging_log_stack()
+#define stack_logging_flag_zone 8 /* NSZoneMalloc, etc... */
+#define stack_logging_flag_calloc 16 /* multiply arguments to get the size */
+#define stack_logging_flag_object 32 /* NSAllocateObject(Class, extraBytes, zone) */
+#define stack_logging_flag_cleared 64 /* for NewEmptyHandle */
+#define stack_logging_flag_handle 128 /* for Handle (de-)allocation routines */
+#define stack_logging_flag_set_handle_size 256 /* (Handle, newSize) treated specially */
+
+/* Macro used to disguise addresses so that leak finding can work */
+#define STACK_LOGGING_DISGUISE(address) ((address) ^ 0x00005555) /* nicely idempotent */
+
+extern "C" int stack_logging_enable_logging; /* when clear, no logging takes place */
+extern "C" int stack_logging_dontcompact; /* default is to compact; when set does not compact alloc/free logs; useful for tracing history */
+
+
+extern "C" void stack_logging_log_stack(unsigned type, unsigned arg1, unsigned arg2, unsigned arg3, unsigned result, unsigned num_hot_to_skip);
+/* This is the old log-to-memory logger, which is now deprecated. It remains for compatibility with performance tools that haven't been updated to disk_stack_logging_log_stack() yet. */
+
+extern "C" void __disk_stack_logging_log_stack(uint32_t type_flags, uintptr_t zone_ptr, uintptr_t size, uintptr_t ptr_arg, uintptr_t return_val, uint32_t num_hot_to_skip);
+/* Fits as the malloc_logger; logs malloc/free/realloc events and can log custom events if called directly */
+
+
+/* 64-bit-aware stack log access. */
+typedef struct {
+ uint32_t type_flags;
+ uint64_t stack_identifier;
+ uint64_t argument;
+ mach_vm_address_t address;
+} mach_stack_logging_record_t;
+
+extern "C" kern_return_t __mach_stack_logging_get_frames(task_t task, mach_vm_address_t address, mach_vm_address_t *stack_frames_buffer, uint32_t max_stack_frames, uint32_t *count);
+/* Gets the last allocation record (malloc, realloc, or free) about address */
+
+extern "C" kern_return_t __mach_stack_logging_enumerate_records(task_t task, mach_vm_address_t address, void enumerator(mach_stack_logging_record_t, void *), void *context);
+/* Applies enumerator to all records involving address sending context as enumerator's second parameter; if !address, applies enumerator to all records */
+
+extern "C" kern_return_t __mach_stack_logging_frames_for_uniqued_stack(task_t task, uint64_t stack_identifier, mach_vm_address_t *stack_frames_buffer, uint32_t max_stack_frames, uint32_t *count);
+/* Given a uniqued_stack fills stack_frames_buffer */
+
+
+#pragma mark -
+#pragma mark Legacy
+
+/* The following is the old 32-bit-only, in-process-memory stack logging. This is deprecated and clients should move to the above 64-bit-aware disk stack logging SPI. */
+
+typedef struct {
+ unsigned type;
+ unsigned uniqued_stack;
+ unsigned argument;
+ unsigned address; /* disguised, to avoid confusing leaks */
+} stack_logging_record_t;
+
+typedef struct {
+ unsigned overall_num_bytes;
+ unsigned num_records;
+ unsigned lock; /* 0 means OK to lock; used for inter-process locking */
+ unsigned *uniquing_table; /* allocated using vm_allocate() */
+ /* hashtable organized as (PC, uniqued parent)
+ Only the second half of the table is active
+ To enable us to grow dynamically */
+ unsigned uniquing_table_num_pages; /* number of pages of the table */
+ unsigned extra_retain_count; /* not used by stack_logging_log_stack */
+ unsigned filler[2]; /* align to cache lines for better performance */
+ stack_logging_record_t records[0]; /* records follow here */
+} stack_logging_record_list_t;
+
+extern "C" stack_logging_record_list_t *stack_logging_the_record_list;
+/* This is the global variable containing all logs */
+
+extern "C" kern_return_t stack_logging_get_frames(task_t task, memory_reader_t reader, vm_address_t address, vm_address_t *stack_frames_buffer, unsigned max_stack_frames, unsigned *num_frames);
+/* Gets the last record in stack_logging_the_record_list about address */
+
+#define STACK_LOGGING_ENUMERATION_PROVIDED 1 // temporary to avoid dependencies between projects
+
+extern "C" kern_return_t stack_logging_enumerate_records(task_t task, memory_reader_t reader, vm_address_t address, void enumerator(stack_logging_record_t, void *), void *context);
+/* Gets all the records about address;
+ If !address, gets all records */
+
+extern "C" kern_return_t stack_logging_frames_for_uniqued_stack(task_t task, memory_reader_t reader, unsigned uniqued_stack, vm_address_t *stack_frames_buffer, unsigned max_stack_frames, unsigned *num_frames);
+/* Given a uniqued_stack fills stack_frames_buffer */
+
+
+
+extern "C" void thread_stack_pcs(vm_address_t *buffer, unsigned max, unsigned *num);
+/* Convenience to fill buffer with the PCs of the frames, starting with the hot frames;
+ num: returned number of frames
+ */
+
+#endif
diff --git a/tools/debugserver/source/MacOSX/x86_64/CMakeLists.txt b/tools/debugserver/source/MacOSX/x86_64/CMakeLists.txt
new file mode 100644
index 000000000000..bb41b04d9d9e
--- /dev/null
+++ b/tools/debugserver/source/MacOSX/x86_64/CMakeLists.txt
@@ -0,0 +1,8 @@
+# Due to sources including headers like:
+# #include "MacOSX/i386/DNBArchImplI386.h"
+# we must include the grandparent directory...
+include_directories(${LLDB_SOURCE_DIR}/tools/debugserver/source)
+
+add_library(lldbDebugserverMacOSX_X86_64
+ DNBArchImplX86_64.cpp
+ )
diff --git a/tools/debugserver/source/MacOSX/x86_64/DNBArchImplX86_64.cpp b/tools/debugserver/source/MacOSX/x86_64/DNBArchImplX86_64.cpp
new file mode 100644
index 000000000000..3d2805cddb91
--- /dev/null
+++ b/tools/debugserver/source/MacOSX/x86_64/DNBArchImplX86_64.cpp
@@ -0,0 +1,2289 @@
+//===-- DNBArchImplX86_64.cpp -----------------------------------*- C++ -*-===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// Created by Greg Clayton on 6/25/07.
+//
+//===----------------------------------------------------------------------===//
+
+#if defined (__i386__) || defined (__x86_64__)
+
+#include <sys/cdefs.h>
+#include <sys/types.h>
+#include <sys/sysctl.h>
+
+#include "MacOSX/x86_64/DNBArchImplX86_64.h"
+#include "../HasAVX.h"
+#include "DNBLog.h"
+#include "MachThread.h"
+#include "MachProcess.h"
+#include <mach/mach.h>
+#include <stdlib.h>
+
+#if defined (LLDB_DEBUGSERVER_RELEASE) || defined (LLDB_DEBUGSERVER_DEBUG)
+enum debugState {
+ debugStateUnknown,
+ debugStateOff,
+ debugStateOn
+};
+
+static debugState sFPUDebugState = debugStateUnknown;
+static debugState sAVXForceState = debugStateUnknown;
+
+static bool DebugFPURegs ()
+{
+ if (sFPUDebugState == debugStateUnknown)
+ {
+ if (getenv("DNB_DEBUG_FPU_REGS"))
+ sFPUDebugState = debugStateOn;
+ else
+ sFPUDebugState = debugStateOff;
+ }
+
+ return (sFPUDebugState == debugStateOn);
+}
+
+static bool ForceAVXRegs ()
+{
+ if (sFPUDebugState == debugStateUnknown)
+ {
+ if (getenv("DNB_DEBUG_X86_FORCE_AVX_REGS"))
+ sAVXForceState = debugStateOn;
+ else
+ sAVXForceState = debugStateOff;
+ }
+
+ return (sAVXForceState == debugStateOn);
+}
+
+#define DEBUG_FPU_REGS (DebugFPURegs())
+#define FORCE_AVX_REGS (ForceAVXRegs())
+#else
+#define DEBUG_FPU_REGS (0)
+#define FORCE_AVX_REGS (0)
+#endif
+
+
+extern "C" bool
+CPUHasAVX()
+{
+ enum AVXPresence
+ {
+ eAVXUnknown = -1,
+ eAVXNotPresent = 0,
+ eAVXPresent = 1
+ };
+
+ static AVXPresence g_has_avx = eAVXUnknown;
+ if (g_has_avx == eAVXUnknown)
+ {
+ g_has_avx = eAVXNotPresent;
+
+ // Only xnu-2020 or later has AVX support, any versions before
+ // this have a busted thread_get_state RPC where it would truncate
+ // the thread state buffer (<rdar://problem/10122874>). So we need to
+ // verify the kernel version number manually or disable AVX support.
+ int mib[2];
+ char buffer[1024];
+ size_t length = sizeof(buffer);
+ uint64_t xnu_version = 0;
+ mib[0] = CTL_KERN;
+ mib[1] = KERN_VERSION;
+ int err = ::sysctl(mib, 2, &buffer, &length, NULL, 0);
+ if (err == 0)
+ {
+ const char *xnu = strstr (buffer, "xnu-");
+ if (xnu)
+ {
+ const char *xnu_version_cstr = xnu + 4;
+ xnu_version = strtoull (xnu_version_cstr, NULL, 0);
+ if (xnu_version >= 2020 && xnu_version != ULLONG_MAX)
+ {
+ if (::HasAVX())
+ {
+ g_has_avx = eAVXPresent;
+ }
+ }
+ }
+ }
+ DNBLogThreadedIf (LOG_THREAD, "CPUHasAVX(): g_has_avx = %i (err = %i, errno = %i, xnu_version = %llu)", g_has_avx, err, errno, xnu_version);
+ }
+
+ return (g_has_avx == eAVXPresent);
+}
+
+uint64_t
+DNBArchImplX86_64::GetPC(uint64_t failValue)
+{
+ // Get program counter
+ if (GetGPRState(false) == KERN_SUCCESS)
+ return m_state.context.gpr.__rip;
+ return failValue;
+}
+
+kern_return_t
+DNBArchImplX86_64::SetPC(uint64_t value)
+{
+ // Get program counter
+ kern_return_t err = GetGPRState(false);
+ if (err == KERN_SUCCESS)
+ {
+ m_state.context.gpr.__rip = value;
+ err = SetGPRState();
+ }
+ return err == KERN_SUCCESS;
+}
+
+uint64_t
+DNBArchImplX86_64::GetSP(uint64_t failValue)
+{
+ // Get stack pointer
+ if (GetGPRState(false) == KERN_SUCCESS)
+ return m_state.context.gpr.__rsp;
+ return failValue;
+}
+
+// Uncomment the value below to verify the values in the debugger.
+//#define DEBUG_GPR_VALUES 1 // DO NOT CHECK IN WITH THIS DEFINE ENABLED
+
+kern_return_t
+DNBArchImplX86_64::GetGPRState(bool force)
+{
+ if (force || m_state.GetError(e_regSetGPR, Read))
+ {
+#if DEBUG_GPR_VALUES
+ m_state.context.gpr.__rax = ('a' << 8) + 'x';
+ m_state.context.gpr.__rbx = ('b' << 8) + 'x';
+ m_state.context.gpr.__rcx = ('c' << 8) + 'x';
+ m_state.context.gpr.__rdx = ('d' << 8) + 'x';
+ m_state.context.gpr.__rdi = ('d' << 8) + 'i';
+ m_state.context.gpr.__rsi = ('s' << 8) + 'i';
+ m_state.context.gpr.__rbp = ('b' << 8) + 'p';
+ m_state.context.gpr.__rsp = ('s' << 8) + 'p';
+ m_state.context.gpr.__r8 = ('r' << 8) + '8';
+ m_state.context.gpr.__r9 = ('r' << 8) + '9';
+ m_state.context.gpr.__r10 = ('r' << 8) + 'a';
+ m_state.context.gpr.__r11 = ('r' << 8) + 'b';
+ m_state.context.gpr.__r12 = ('r' << 8) + 'c';
+ m_state.context.gpr.__r13 = ('r' << 8) + 'd';
+ m_state.context.gpr.__r14 = ('r' << 8) + 'e';
+ m_state.context.gpr.__r15 = ('r' << 8) + 'f';
+ m_state.context.gpr.__rip = ('i' << 8) + 'p';
+ m_state.context.gpr.__rflags = ('f' << 8) + 'l';
+ m_state.context.gpr.__cs = ('c' << 8) + 's';
+ m_state.context.gpr.__fs = ('f' << 8) + 's';
+ m_state.context.gpr.__gs = ('g' << 8) + 's';
+ m_state.SetError(e_regSetGPR, Read, 0);
+#else
+ mach_msg_type_number_t count = e_regSetWordSizeGPR;
+ m_state.SetError(e_regSetGPR, Read, ::thread_get_state(m_thread->MachPortNumber(), __x86_64_THREAD_STATE, (thread_state_t)&m_state.context.gpr, &count));
+ DNBLogThreadedIf (LOG_THREAD, "::thread_get_state (0x%4.4x, %u, &gpr, %u) => 0x%8.8x"
+ "\n\trax = %16.16llx rbx = %16.16llx rcx = %16.16llx rdx = %16.16llx"
+ "\n\trdi = %16.16llx rsi = %16.16llx rbp = %16.16llx rsp = %16.16llx"
+ "\n\t r8 = %16.16llx r9 = %16.16llx r10 = %16.16llx r11 = %16.16llx"
+ "\n\tr12 = %16.16llx r13 = %16.16llx r14 = %16.16llx r15 = %16.16llx"
+ "\n\trip = %16.16llx"
+ "\n\tflg = %16.16llx cs = %16.16llx fs = %16.16llx gs = %16.16llx",
+ m_thread->MachPortNumber(), x86_THREAD_STATE64, x86_THREAD_STATE64_COUNT,
+ m_state.GetError(e_regSetGPR, Read),
+ m_state.context.gpr.__rax,m_state.context.gpr.__rbx,m_state.context.gpr.__rcx,
+ m_state.context.gpr.__rdx,m_state.context.gpr.__rdi,m_state.context.gpr.__rsi,
+ m_state.context.gpr.__rbp,m_state.context.gpr.__rsp,m_state.context.gpr.__r8,
+ m_state.context.gpr.__r9, m_state.context.gpr.__r10,m_state.context.gpr.__r11,
+ m_state.context.gpr.__r12,m_state.context.gpr.__r13,m_state.context.gpr.__r14,
+ m_state.context.gpr.__r15,m_state.context.gpr.__rip,m_state.context.gpr.__rflags,
+ m_state.context.gpr.__cs,m_state.context.gpr.__fs, m_state.context.gpr.__gs);
+
+ // DNBLogThreadedIf (LOG_THREAD, "thread_get_state(0x%4.4x, %u, &gpr, %u) => 0x%8.8x"
+ // "\n\trax = %16.16llx"
+ // "\n\trbx = %16.16llx"
+ // "\n\trcx = %16.16llx"
+ // "\n\trdx = %16.16llx"
+ // "\n\trdi = %16.16llx"
+ // "\n\trsi = %16.16llx"
+ // "\n\trbp = %16.16llx"
+ // "\n\trsp = %16.16llx"
+ // "\n\t r8 = %16.16llx"
+ // "\n\t r9 = %16.16llx"
+ // "\n\tr10 = %16.16llx"
+ // "\n\tr11 = %16.16llx"
+ // "\n\tr12 = %16.16llx"
+ // "\n\tr13 = %16.16llx"
+ // "\n\tr14 = %16.16llx"
+ // "\n\tr15 = %16.16llx"
+ // "\n\trip = %16.16llx"
+ // "\n\tflg = %16.16llx"
+ // "\n\t cs = %16.16llx"
+ // "\n\t fs = %16.16llx"
+ // "\n\t gs = %16.16llx",
+ // m_thread->MachPortNumber(),
+ // x86_THREAD_STATE64,
+ // x86_THREAD_STATE64_COUNT,
+ // m_state.GetError(e_regSetGPR, Read),
+ // m_state.context.gpr.__rax,
+ // m_state.context.gpr.__rbx,
+ // m_state.context.gpr.__rcx,
+ // m_state.context.gpr.__rdx,
+ // m_state.context.gpr.__rdi,
+ // m_state.context.gpr.__rsi,
+ // m_state.context.gpr.__rbp,
+ // m_state.context.gpr.__rsp,
+ // m_state.context.gpr.__r8,
+ // m_state.context.gpr.__r9,
+ // m_state.context.gpr.__r10,
+ // m_state.context.gpr.__r11,
+ // m_state.context.gpr.__r12,
+ // m_state.context.gpr.__r13,
+ // m_state.context.gpr.__r14,
+ // m_state.context.gpr.__r15,
+ // m_state.context.gpr.__rip,
+ // m_state.context.gpr.__rflags,
+ // m_state.context.gpr.__cs,
+ // m_state.context.gpr.__fs,
+ // m_state.context.gpr.__gs);
+#endif
+ }
+ return m_state.GetError(e_regSetGPR, Read);
+}
+
+// Uncomment the value below to verify the values in the debugger.
+//#define DEBUG_FPU_REGS 1 // DO NOT CHECK IN WITH THIS DEFINE ENABLED
+
+kern_return_t
+DNBArchImplX86_64::GetFPUState(bool force)
+{
+ if (force || m_state.GetError(e_regSetFPU, Read))
+ {
+ if (DEBUG_FPU_REGS) {
+ if (CPUHasAVX() || FORCE_AVX_REGS)
+ {
+ m_state.context.fpu.avx.__fpu_reserved[0] = -1;
+ m_state.context.fpu.avx.__fpu_reserved[1] = -1;
+ *(uint16_t *)&(m_state.context.fpu.avx.__fpu_fcw) = 0x1234;
+ *(uint16_t *)&(m_state.context.fpu.avx.__fpu_fsw) = 0x5678;
+ m_state.context.fpu.avx.__fpu_ftw = 1;
+ m_state.context.fpu.avx.__fpu_rsrv1 = UINT8_MAX;
+ m_state.context.fpu.avx.__fpu_fop = 2;
+ m_state.context.fpu.avx.__fpu_ip = 3;
+ m_state.context.fpu.avx.__fpu_cs = 4;
+ m_state.context.fpu.avx.__fpu_rsrv2 = UINT8_MAX;
+ m_state.context.fpu.avx.__fpu_dp = 5;
+ m_state.context.fpu.avx.__fpu_ds = 6;
+ m_state.context.fpu.avx.__fpu_rsrv3 = UINT16_MAX;
+ m_state.context.fpu.avx.__fpu_mxcsr = 8;
+ m_state.context.fpu.avx.__fpu_mxcsrmask = 9;
+ int i;
+ for (i=0; i<16; ++i)
+ {
+ if (i<10)
+ {
+ m_state.context.fpu.avx.__fpu_stmm0.__mmst_reg[i] = 'a';
+ m_state.context.fpu.avx.__fpu_stmm1.__mmst_reg[i] = 'b';
+ m_state.context.fpu.avx.__fpu_stmm2.__mmst_reg[i] = 'c';
+ m_state.context.fpu.avx.__fpu_stmm3.__mmst_reg[i] = 'd';
+ m_state.context.fpu.avx.__fpu_stmm4.__mmst_reg[i] = 'e';
+ m_state.context.fpu.avx.__fpu_stmm5.__mmst_reg[i] = 'f';
+ m_state.context.fpu.avx.__fpu_stmm6.__mmst_reg[i] = 'g';
+ m_state.context.fpu.avx.__fpu_stmm7.__mmst_reg[i] = 'h';
+ }
+ else
+ {
+ m_state.context.fpu.avx.__fpu_stmm0.__mmst_reg[i] = INT8_MIN;
+ m_state.context.fpu.avx.__fpu_stmm1.__mmst_reg[i] = INT8_MIN;
+ m_state.context.fpu.avx.__fpu_stmm2.__mmst_reg[i] = INT8_MIN;
+ m_state.context.fpu.avx.__fpu_stmm3.__mmst_reg[i] = INT8_MIN;
+ m_state.context.fpu.avx.__fpu_stmm4.__mmst_reg[i] = INT8_MIN;
+ m_state.context.fpu.avx.__fpu_stmm5.__mmst_reg[i] = INT8_MIN;
+ m_state.context.fpu.avx.__fpu_stmm6.__mmst_reg[i] = INT8_MIN;
+ m_state.context.fpu.avx.__fpu_stmm7.__mmst_reg[i] = INT8_MIN;
+ }
+
+ m_state.context.fpu.avx.__fpu_xmm0.__xmm_reg[i] = '0' + 2 * i;
+ m_state.context.fpu.avx.__fpu_xmm1.__xmm_reg[i] = '1' + 2 * i;
+ m_state.context.fpu.avx.__fpu_xmm2.__xmm_reg[i] = '2' + 2 * i;
+ m_state.context.fpu.avx.__fpu_xmm3.__xmm_reg[i] = '3' + 2 * i;
+ m_state.context.fpu.avx.__fpu_xmm4.__xmm_reg[i] = '4' + 2 * i;
+ m_state.context.fpu.avx.__fpu_xmm5.__xmm_reg[i] = '5' + 2 * i;
+ m_state.context.fpu.avx.__fpu_xmm6.__xmm_reg[i] = '6' + 2 * i;
+ m_state.context.fpu.avx.__fpu_xmm7.__xmm_reg[i] = '7' + 2 * i;
+ m_state.context.fpu.avx.__fpu_xmm8.__xmm_reg[i] = '8' + 2 * i;
+ m_state.context.fpu.avx.__fpu_xmm9.__xmm_reg[i] = '9' + 2 * i;
+ m_state.context.fpu.avx.__fpu_xmm10.__xmm_reg[i] = 'A' + 2 * i;
+ m_state.context.fpu.avx.__fpu_xmm11.__xmm_reg[i] = 'B' + 2 * i;
+ m_state.context.fpu.avx.__fpu_xmm12.__xmm_reg[i] = 'C' + 2 * i;
+ m_state.context.fpu.avx.__fpu_xmm13.__xmm_reg[i] = 'D' + 2 * i;
+ m_state.context.fpu.avx.__fpu_xmm14.__xmm_reg[i] = 'E' + 2 * i;
+ m_state.context.fpu.avx.__fpu_xmm15.__xmm_reg[i] = 'F' + 2 * i;
+
+ m_state.context.fpu.avx.__fpu_ymmh0.__xmm_reg[i] = '0' + i;
+ m_state.context.fpu.avx.__fpu_ymmh1.__xmm_reg[i] = '1' + i;
+ m_state.context.fpu.avx.__fpu_ymmh2.__xmm_reg[i] = '2' + i;
+ m_state.context.fpu.avx.__fpu_ymmh3.__xmm_reg[i] = '3' + i;
+ m_state.context.fpu.avx.__fpu_ymmh4.__xmm_reg[i] = '4' + i;
+ m_state.context.fpu.avx.__fpu_ymmh5.__xmm_reg[i] = '5' + i;
+ m_state.context.fpu.avx.__fpu_ymmh6.__xmm_reg[i] = '6' + i;
+ m_state.context.fpu.avx.__fpu_ymmh7.__xmm_reg[i] = '7' + i;
+ m_state.context.fpu.avx.__fpu_ymmh8.__xmm_reg[i] = '8' + i;
+ m_state.context.fpu.avx.__fpu_ymmh9.__xmm_reg[i] = '9' + i;
+ m_state.context.fpu.avx.__fpu_ymmh10.__xmm_reg[i] = 'A' + i;
+ m_state.context.fpu.avx.__fpu_ymmh11.__xmm_reg[i] = 'B' + i;
+ m_state.context.fpu.avx.__fpu_ymmh12.__xmm_reg[i] = 'C' + i;
+ m_state.context.fpu.avx.__fpu_ymmh13.__xmm_reg[i] = 'D' + i;
+ m_state.context.fpu.avx.__fpu_ymmh14.__xmm_reg[i] = 'E' + i;
+ m_state.context.fpu.avx.__fpu_ymmh15.__xmm_reg[i] = 'F' + i;
+ }
+ for (i=0; i<sizeof(m_state.context.fpu.avx.__fpu_rsrv4); ++i)
+ m_state.context.fpu.avx.__fpu_rsrv4[i] = INT8_MIN;
+ m_state.context.fpu.avx.__fpu_reserved1 = -1;
+ for (i=0; i<sizeof(m_state.context.fpu.avx.__avx_reserved1); ++i)
+ m_state.context.fpu.avx.__avx_reserved1[i] = INT8_MIN;
+ m_state.SetError(e_regSetFPU, Read, 0);
+ }
+ else
+ {
+ m_state.context.fpu.no_avx.__fpu_reserved[0] = -1;
+ m_state.context.fpu.no_avx.__fpu_reserved[1] = -1;
+ *(uint16_t *)&(m_state.context.fpu.no_avx.__fpu_fcw) = 0x1234;
+ *(uint16_t *)&(m_state.context.fpu.no_avx.__fpu_fsw) = 0x5678;
+ m_state.context.fpu.no_avx.__fpu_ftw = 1;
+ m_state.context.fpu.no_avx.__fpu_rsrv1 = UINT8_MAX;
+ m_state.context.fpu.no_avx.__fpu_fop = 2;
+ m_state.context.fpu.no_avx.__fpu_ip = 3;
+ m_state.context.fpu.no_avx.__fpu_cs = 4;
+ m_state.context.fpu.no_avx.__fpu_rsrv2 = 5;
+ m_state.context.fpu.no_avx.__fpu_dp = 6;
+ m_state.context.fpu.no_avx.__fpu_ds = 7;
+ m_state.context.fpu.no_avx.__fpu_rsrv3 = UINT16_MAX;
+ m_state.context.fpu.no_avx.__fpu_mxcsr = 8;
+ m_state.context.fpu.no_avx.__fpu_mxcsrmask = 9;
+ int i;
+ for (i=0; i<16; ++i)
+ {
+ if (i<10)
+ {
+ m_state.context.fpu.no_avx.__fpu_stmm0.__mmst_reg[i] = 'a';
+ m_state.context.fpu.no_avx.__fpu_stmm1.__mmst_reg[i] = 'b';
+ m_state.context.fpu.no_avx.__fpu_stmm2.__mmst_reg[i] = 'c';
+ m_state.context.fpu.no_avx.__fpu_stmm3.__mmst_reg[i] = 'd';
+ m_state.context.fpu.no_avx.__fpu_stmm4.__mmst_reg[i] = 'e';
+ m_state.context.fpu.no_avx.__fpu_stmm5.__mmst_reg[i] = 'f';
+ m_state.context.fpu.no_avx.__fpu_stmm6.__mmst_reg[i] = 'g';
+ m_state.context.fpu.no_avx.__fpu_stmm7.__mmst_reg[i] = 'h';
+ }
+ else
+ {
+ m_state.context.fpu.no_avx.__fpu_stmm0.__mmst_reg[i] = INT8_MIN;
+ m_state.context.fpu.no_avx.__fpu_stmm1.__mmst_reg[i] = INT8_MIN;
+ m_state.context.fpu.no_avx.__fpu_stmm2.__mmst_reg[i] = INT8_MIN;
+ m_state.context.fpu.no_avx.__fpu_stmm3.__mmst_reg[i] = INT8_MIN;
+ m_state.context.fpu.no_avx.__fpu_stmm4.__mmst_reg[i] = INT8_MIN;
+ m_state.context.fpu.no_avx.__fpu_stmm5.__mmst_reg[i] = INT8_MIN;
+ m_state.context.fpu.no_avx.__fpu_stmm6.__mmst_reg[i] = INT8_MIN;
+ m_state.context.fpu.no_avx.__fpu_stmm7.__mmst_reg[i] = INT8_MIN;
+ }
+
+ m_state.context.fpu.no_avx.__fpu_xmm0.__xmm_reg[i] = '0';
+ m_state.context.fpu.no_avx.__fpu_xmm1.__xmm_reg[i] = '1';
+ m_state.context.fpu.no_avx.__fpu_xmm2.__xmm_reg[i] = '2';
+ m_state.context.fpu.no_avx.__fpu_xmm3.__xmm_reg[i] = '3';
+ m_state.context.fpu.no_avx.__fpu_xmm4.__xmm_reg[i] = '4';
+ m_state.context.fpu.no_avx.__fpu_xmm5.__xmm_reg[i] = '5';
+ m_state.context.fpu.no_avx.__fpu_xmm6.__xmm_reg[i] = '6';
+ m_state.context.fpu.no_avx.__fpu_xmm7.__xmm_reg[i] = '7';
+ m_state.context.fpu.no_avx.__fpu_xmm8.__xmm_reg[i] = '8';
+ m_state.context.fpu.no_avx.__fpu_xmm9.__xmm_reg[i] = '9';
+ m_state.context.fpu.no_avx.__fpu_xmm10.__xmm_reg[i] = 'A';
+ m_state.context.fpu.no_avx.__fpu_xmm11.__xmm_reg[i] = 'B';
+ m_state.context.fpu.no_avx.__fpu_xmm12.__xmm_reg[i] = 'C';
+ m_state.context.fpu.no_avx.__fpu_xmm13.__xmm_reg[i] = 'D';
+ m_state.context.fpu.no_avx.__fpu_xmm14.__xmm_reg[i] = 'E';
+ m_state.context.fpu.no_avx.__fpu_xmm15.__xmm_reg[i] = 'F';
+ }
+ for (i=0; i<sizeof(m_state.context.fpu.no_avx.__fpu_rsrv4); ++i)
+ m_state.context.fpu.no_avx.__fpu_rsrv4[i] = INT8_MIN;
+ m_state.context.fpu.no_avx.__fpu_reserved1 = -1;
+ m_state.SetError(e_regSetFPU, Read, 0);
+ }
+ }
+ else
+ {
+ if (CPUHasAVX() || FORCE_AVX_REGS)
+ {
+ mach_msg_type_number_t count = e_regSetWordSizeAVX;
+ m_state.SetError(e_regSetFPU, Read, ::thread_get_state(m_thread->MachPortNumber(), __x86_64_AVX_STATE, (thread_state_t)&m_state.context.fpu.avx, &count));
+ DNBLogThreadedIf (LOG_THREAD, "::thread_get_state (0x%4.4x, %u, &avx, %u (%u passed in) carp) => 0x%8.8x",
+ m_thread->MachPortNumber(), __x86_64_AVX_STATE, (uint32_t)count,
+ e_regSetWordSizeAVX, m_state.GetError(e_regSetFPU, Read));
+ }
+ else
+ {
+ mach_msg_type_number_t count = e_regSetWordSizeFPU;
+ m_state.SetError(e_regSetFPU, Read, ::thread_get_state(m_thread->MachPortNumber(), __x86_64_FLOAT_STATE, (thread_state_t)&m_state.context.fpu.no_avx, &count));
+ DNBLogThreadedIf (LOG_THREAD, "::thread_get_state (0x%4.4x, %u, &fpu, %u (%u passed in) => 0x%8.8x",
+ m_thread->MachPortNumber(), __x86_64_FLOAT_STATE, (uint32_t)count,
+ e_regSetWordSizeFPU, m_state.GetError(e_regSetFPU, Read));
+ }
+ }
+ }
+ return m_state.GetError(e_regSetFPU, Read);
+}
+
+kern_return_t
+DNBArchImplX86_64::GetEXCState(bool force)
+{
+ if (force || m_state.GetError(e_regSetEXC, Read))
+ {
+ mach_msg_type_number_t count = e_regSetWordSizeEXC;
+ m_state.SetError(e_regSetEXC, Read, ::thread_get_state(m_thread->MachPortNumber(), __x86_64_EXCEPTION_STATE, (thread_state_t)&m_state.context.exc, &count));
+ }
+ return m_state.GetError(e_regSetEXC, Read);
+}
+
+kern_return_t
+DNBArchImplX86_64::SetGPRState()
+{
+ kern_return_t kret = ::thread_abort_safely(m_thread->MachPortNumber());
+ DNBLogThreadedIf (LOG_THREAD, "thread = 0x%4.4x calling thread_abort_safely (tid) => %u (SetGPRState() for stop_count = %u)", m_thread->MachPortNumber(), kret, m_thread->Process()->StopCount());
+
+ m_state.SetError(e_regSetGPR, Write, ::thread_set_state(m_thread->MachPortNumber(), __x86_64_THREAD_STATE, (thread_state_t)&m_state.context.gpr, e_regSetWordSizeGPR));
+ DNBLogThreadedIf (LOG_THREAD, "::thread_set_state (0x%4.4x, %u, &gpr, %u) => 0x%8.8x"
+ "\n\trax = %16.16llx rbx = %16.16llx rcx = %16.16llx rdx = %16.16llx"
+ "\n\trdi = %16.16llx rsi = %16.16llx rbp = %16.16llx rsp = %16.16llx"
+ "\n\t r8 = %16.16llx r9 = %16.16llx r10 = %16.16llx r11 = %16.16llx"
+ "\n\tr12 = %16.16llx r13 = %16.16llx r14 = %16.16llx r15 = %16.16llx"
+ "\n\trip = %16.16llx"
+ "\n\tflg = %16.16llx cs = %16.16llx fs = %16.16llx gs = %16.16llx",
+ m_thread->MachPortNumber(), __x86_64_THREAD_STATE, e_regSetWordSizeGPR,
+ m_state.GetError(e_regSetGPR, Write),
+ m_state.context.gpr.__rax,m_state.context.gpr.__rbx,m_state.context.gpr.__rcx,
+ m_state.context.gpr.__rdx,m_state.context.gpr.__rdi,m_state.context.gpr.__rsi,
+ m_state.context.gpr.__rbp,m_state.context.gpr.__rsp,m_state.context.gpr.__r8,
+ m_state.context.gpr.__r9, m_state.context.gpr.__r10,m_state.context.gpr.__r11,
+ m_state.context.gpr.__r12,m_state.context.gpr.__r13,m_state.context.gpr.__r14,
+ m_state.context.gpr.__r15,m_state.context.gpr.__rip,m_state.context.gpr.__rflags,
+ m_state.context.gpr.__cs, m_state.context.gpr.__fs, m_state.context.gpr.__gs);
+ return m_state.GetError(e_regSetGPR, Write);
+}
+
+kern_return_t
+DNBArchImplX86_64::SetFPUState()
+{
+ if (DEBUG_FPU_REGS)
+ {
+ m_state.SetError(e_regSetFPU, Write, 0);
+ return m_state.GetError(e_regSetFPU, Write);
+ }
+ else
+ {
+ if (CPUHasAVX() || FORCE_AVX_REGS)
+ {
+ m_state.SetError(e_regSetFPU, Write, ::thread_set_state(m_thread->MachPortNumber(), __x86_64_AVX_STATE, (thread_state_t)&m_state.context.fpu.avx, e_regSetWordSizeAVX));
+ return m_state.GetError(e_regSetFPU, Write);
+ }
+ else
+ {
+ m_state.SetError(e_regSetFPU, Write, ::thread_set_state(m_thread->MachPortNumber(), __x86_64_FLOAT_STATE, (thread_state_t)&m_state.context.fpu.no_avx, e_regSetWordSizeFPU));
+ return m_state.GetError(e_regSetFPU, Write);
+ }
+ }
+}
+
+kern_return_t
+DNBArchImplX86_64::SetEXCState()
+{
+ m_state.SetError(e_regSetEXC, Write, ::thread_set_state(m_thread->MachPortNumber(), __x86_64_EXCEPTION_STATE, (thread_state_t)&m_state.context.exc, e_regSetWordSizeEXC));
+ return m_state.GetError(e_regSetEXC, Write);
+}
+
+kern_return_t
+DNBArchImplX86_64::GetDBGState(bool force)
+{
+ if (force || m_state.GetError(e_regSetDBG, Read))
+ {
+ mach_msg_type_number_t count = e_regSetWordSizeDBG;
+ m_state.SetError(e_regSetDBG, Read, ::thread_get_state(m_thread->MachPortNumber(), __x86_64_DEBUG_STATE, (thread_state_t)&m_state.context.dbg, &count));
+ }
+ return m_state.GetError(e_regSetDBG, Read);
+}
+
+kern_return_t
+DNBArchImplX86_64::SetDBGState(bool also_set_on_task)
+{
+ m_state.SetError(e_regSetDBG, Write, ::thread_set_state(m_thread->MachPortNumber(), __x86_64_DEBUG_STATE, (thread_state_t)&m_state.context.dbg, e_regSetWordSizeDBG));
+ if (also_set_on_task)
+ {
+ kern_return_t kret = ::task_set_state(m_thread->Process()->Task().TaskPort(), __x86_64_DEBUG_STATE, (thread_state_t)&m_state.context.dbg, e_regSetWordSizeDBG);
+ if (kret != KERN_SUCCESS)
+ DNBLogThreadedIf(LOG_WATCHPOINTS, "DNBArchImplX86_64::SetDBGState failed to set debug control register state: 0x%8.8x.", kret);
+ }
+ return m_state.GetError(e_regSetDBG, Write);
+}
+
+void
+DNBArchImplX86_64::ThreadWillResume()
+{
+ // Do we need to step this thread? If so, let the mach thread tell us so.
+ if (m_thread->IsStepping())
+ {
+ // This is the primary thread, let the arch do anything it needs
+ EnableHardwareSingleStep(true);
+ }
+
+ // Reset the debug status register, if necessary, before we resume.
+ kern_return_t kret = GetDBGState(false);
+ DNBLogThreadedIf(LOG_WATCHPOINTS, "DNBArchImplX86_64::ThreadWillResume() GetDBGState() => 0x%8.8x.", kret);
+ if (kret != KERN_SUCCESS)
+ return;
+
+ DBG &debug_state = m_state.context.dbg;
+ bool need_reset = false;
+ uint32_t i, num = NumSupportedHardwareWatchpoints();
+ for (i = 0; i < num; ++i)
+ if (IsWatchpointHit(debug_state, i))
+ need_reset = true;
+
+ if (need_reset)
+ {
+ ClearWatchpointHits(debug_state);
+ kret = SetDBGState(false);
+ DNBLogThreadedIf(LOG_WATCHPOINTS, "DNBArchImplX86_64::ThreadWillResume() SetDBGState() => 0x%8.8x.", kret);
+ }
+}
+
+bool
+DNBArchImplX86_64::ThreadDidStop()
+{
+ bool success = true;
+
+ m_state.InvalidateAllRegisterStates();
+
+ // Are we stepping a single instruction?
+ if (GetGPRState(true) == KERN_SUCCESS)
+ {
+ // We are single stepping, was this the primary thread?
+ if (m_thread->IsStepping())
+ {
+ // This was the primary thread, we need to clear the trace
+ // bit if so.
+ success = EnableHardwareSingleStep(false) == KERN_SUCCESS;
+ }
+ else
+ {
+ // The MachThread will automatically restore the suspend count
+ // in ThreadDidStop(), so we don't need to do anything here if
+ // we weren't the primary thread the last time
+ }
+ }
+ return success;
+}
+
+bool
+DNBArchImplX86_64::NotifyException(MachException::Data& exc)
+{
+ switch (exc.exc_type)
+ {
+ case EXC_BAD_ACCESS:
+ break;
+ case EXC_BAD_INSTRUCTION:
+ break;
+ case EXC_ARITHMETIC:
+ break;
+ case EXC_EMULATION:
+ break;
+ case EXC_SOFTWARE:
+ break;
+ case EXC_BREAKPOINT:
+ if (exc.exc_data.size() >= 2 && exc.exc_data[0] == 2)
+ {
+ // exc_code = EXC_I386_BPT
+ //
+ nub_addr_t pc = GetPC(INVALID_NUB_ADDRESS);
+ if (pc != INVALID_NUB_ADDRESS && pc > 0)
+ {
+ pc -= 1;
+ // Check for a breakpoint at one byte prior to the current PC value
+ // since the PC will be just past the trap.
+
+ DNBBreakpoint *bp = m_thread->Process()->Breakpoints().FindByAddress(pc);
+ if (bp)
+ {
+ // Backup the PC for i386 since the trap was taken and the PC
+ // is at the address following the single byte trap instruction.
+ if (m_state.context.gpr.__rip > 0)
+ {
+ m_state.context.gpr.__rip = pc;
+ // Write the new PC back out
+ SetGPRState ();
+ }
+ }
+ return true;
+ }
+ }
+ else if (exc.exc_data.size() >= 2 && exc.exc_data[0] == 1)
+ {
+ // exc_code = EXC_I386_SGL
+ //
+ // Check whether this corresponds to a watchpoint hit event.
+ // If yes, set the exc_sub_code to the data break address.
+ nub_addr_t addr = 0;
+ uint32_t hw_index = GetHardwareWatchpointHit(addr);
+ if (hw_index != INVALID_NUB_HW_INDEX)
+ {
+ exc.exc_data[1] = addr;
+ // Piggyback the hw_index in the exc.data.
+ exc.exc_data.push_back(hw_index);
+ }
+
+ return true;
+ }
+ break;
+ case EXC_SYSCALL:
+ break;
+ case EXC_MACH_SYSCALL:
+ break;
+ case EXC_RPC_ALERT:
+ break;
+ }
+ return false;
+}
+
+uint32_t
+DNBArchImplX86_64::NumSupportedHardwareWatchpoints()
+{
+ // Available debug address registers: dr0, dr1, dr2, dr3.
+ return 4;
+}
+
+static uint32_t
+size_and_rw_bits(nub_size_t size, bool read, bool write)
+{
+ uint32_t rw;
+ if (read) {
+ rw = 0x3; // READ or READ/WRITE
+ } else if (write) {
+ rw = 0x1; // WRITE
+ } else {
+ assert(0 && "read and write cannot both be false");
+ }
+
+ switch (size) {
+ case 1:
+ return rw;
+ case 2:
+ return (0x1 << 2) | rw;
+ case 4:
+ return (0x3 << 2) | rw;
+ case 8:
+ return (0x2 << 2) | rw;
+ }
+ assert(0 && "invalid size, must be one of 1, 2, 4, or 8");
+ return 0;
+}
+void
+DNBArchImplX86_64::SetWatchpoint(DBG &debug_state, uint32_t hw_index, nub_addr_t addr, nub_size_t size, bool read, bool write)
+{
+ // Set both dr7 (debug control register) and dri (debug address register).
+
+ // dr7{7-0} encodes the local/gloabl enable bits:
+ // global enable --. .-- local enable
+ // | |
+ // v v
+ // dr0 -> bits{1-0}
+ // dr1 -> bits{3-2}
+ // dr2 -> bits{5-4}
+ // dr3 -> bits{7-6}
+ //
+ // dr7{31-16} encodes the rw/len bits:
+ // b_x+3, b_x+2, b_x+1, b_x
+ // where bits{x+1, x} => rw
+ // 0b00: execute, 0b01: write, 0b11: read-or-write, 0b10: io read-or-write (unused)
+ // and bits{x+3, x+2} => len
+ // 0b00: 1-byte, 0b01: 2-byte, 0b11: 4-byte, 0b10: 8-byte
+ //
+ // dr0 -> bits{19-16}
+ // dr1 -> bits{23-20}
+ // dr2 -> bits{27-24}
+ // dr3 -> bits{31-28}
+ debug_state.__dr7 |= (1 << (2*hw_index) |
+ size_and_rw_bits(size, read, write) << (16+4*hw_index));
+ switch (hw_index) {
+ case 0:
+ debug_state.__dr0 = addr; break;
+ case 1:
+ debug_state.__dr1 = addr; break;
+ case 2:
+ debug_state.__dr2 = addr; break;
+ case 3:
+ debug_state.__dr3 = addr; break;
+ default:
+ assert(0 && "invalid hardware register index, must be one of 0, 1, 2, or 3");
+ }
+ return;
+}
+
+void
+DNBArchImplX86_64::ClearWatchpoint(DBG &debug_state, uint32_t hw_index)
+{
+ debug_state.__dr7 &= ~(3 << (2*hw_index));
+ switch (hw_index) {
+ case 0:
+ debug_state.__dr0 = 0; break;
+ case 1:
+ debug_state.__dr1 = 0; break;
+ case 2:
+ debug_state.__dr2 = 0; break;
+ case 3:
+ debug_state.__dr3 = 0; break;
+ default:
+ assert(0 && "invalid hardware register index, must be one of 0, 1, 2, or 3");
+ }
+ return;
+}
+
+bool
+DNBArchImplX86_64::IsWatchpointVacant(const DBG &debug_state, uint32_t hw_index)
+{
+ // Check dr7 (debug control register) for local/global enable bits:
+ // global enable --. .-- local enable
+ // | |
+ // v v
+ // dr0 -> bits{1-0}
+ // dr1 -> bits{3-2}
+ // dr2 -> bits{5-4}
+ // dr3 -> bits{7-6}
+ return (debug_state.__dr7 & (3 << (2*hw_index))) == 0;
+}
+
+// Resets local copy of debug status register to wait for the next debug exception.
+void
+DNBArchImplX86_64::ClearWatchpointHits(DBG &debug_state)
+{
+ // See also IsWatchpointHit().
+ debug_state.__dr6 = 0;
+ return;
+}
+
+bool
+DNBArchImplX86_64::IsWatchpointHit(const DBG &debug_state, uint32_t hw_index)
+{
+ // Check dr6 (debug status register) whether a watchpoint hits:
+ // is watchpoint hit?
+ // |
+ // v
+ // dr0 -> bits{0}
+ // dr1 -> bits{1}
+ // dr2 -> bits{2}
+ // dr3 -> bits{3}
+ return (debug_state.__dr6 & (1 << hw_index));
+}
+
+nub_addr_t
+DNBArchImplX86_64::GetWatchAddress(const DBG &debug_state, uint32_t hw_index)
+{
+ switch (hw_index) {
+ case 0:
+ return debug_state.__dr0;
+ case 1:
+ return debug_state.__dr1;
+ case 2:
+ return debug_state.__dr2;
+ case 3:
+ return debug_state.__dr3;
+ }
+ assert(0 && "invalid hardware register index, must be one of 0, 1, 2, or 3");
+ return 0;
+}
+
+bool
+DNBArchImplX86_64::StartTransForHWP()
+{
+ if (m_2pc_trans_state != Trans_Done && m_2pc_trans_state != Trans_Rolled_Back)
+ DNBLogError ("%s inconsistent state detected, expected %d or %d, got: %d", __FUNCTION__, Trans_Done, Trans_Rolled_Back, m_2pc_trans_state);
+ m_2pc_dbg_checkpoint = m_state.context.dbg;
+ m_2pc_trans_state = Trans_Pending;
+ return true;
+}
+bool
+DNBArchImplX86_64::RollbackTransForHWP()
+{
+ m_state.context.dbg = m_2pc_dbg_checkpoint;
+ if (m_2pc_trans_state != Trans_Pending)
+ DNBLogError ("%s inconsistent state detected, expected %d, got: %d", __FUNCTION__, Trans_Pending, m_2pc_trans_state);
+ m_2pc_trans_state = Trans_Rolled_Back;
+ kern_return_t kret = SetDBGState(false);
+ DNBLogThreadedIf(LOG_WATCHPOINTS, "DNBArchImplX86_64::RollbackTransForHWP() SetDBGState() => 0x%8.8x.", kret);
+
+ if (kret == KERN_SUCCESS)
+ return true;
+ else
+ return false;
+}
+bool
+DNBArchImplX86_64::FinishTransForHWP()
+{
+ m_2pc_trans_state = Trans_Done;
+ return true;
+}
+DNBArchImplX86_64::DBG
+DNBArchImplX86_64::GetDBGCheckpoint()
+{
+ return m_2pc_dbg_checkpoint;
+}
+
+uint32_t
+DNBArchImplX86_64::EnableHardwareWatchpoint (nub_addr_t addr, nub_size_t size, bool read, bool write, bool also_set_on_task)
+{
+ DNBLogThreadedIf(LOG_WATCHPOINTS, "DNBArchImplX86_64::EnableHardwareWatchpoint(addr = 0x%llx, size = %llu, read = %u, write = %u)", (uint64_t)addr, (uint64_t)size, read, write);
+
+ const uint32_t num_hw_watchpoints = NumSupportedHardwareWatchpoints();
+
+ // Can only watch 1, 2, 4, or 8 bytes.
+ if (!(size == 1 || size == 2 || size == 4 || size == 8))
+ return INVALID_NUB_HW_INDEX;
+
+ // We must watch for either read or write
+ if (read == false && write == false)
+ return INVALID_NUB_HW_INDEX;
+
+ // Read the debug state
+ kern_return_t kret = GetDBGState(false);
+
+ if (kret == KERN_SUCCESS)
+ {
+ // Check to make sure we have the needed hardware support
+ uint32_t i = 0;
+
+ DBG &debug_state = m_state.context.dbg;
+ for (i = 0; i < num_hw_watchpoints; ++i)
+ {
+ if (IsWatchpointVacant(debug_state, i))
+ break;
+ }
+
+ // See if we found an available hw breakpoint slot above
+ if (i < num_hw_watchpoints)
+ {
+ StartTransForHWP();
+
+ // Modify our local copy of the debug state, first.
+ SetWatchpoint(debug_state, i, addr, size, read, write);
+ // Now set the watch point in the inferior.
+ kret = SetDBGState(also_set_on_task);
+ DNBLogThreadedIf(LOG_WATCHPOINTS, "DNBArchImplX86_64::EnableHardwareWatchpoint() SetDBGState() => 0x%8.8x.", kret);
+
+ if (kret == KERN_SUCCESS)
+ return i;
+ else // Revert to the previous debug state voluntarily. The transaction coordinator knows that we have failed.
+ m_state.context.dbg = GetDBGCheckpoint();
+ }
+ else
+ {
+ DNBLogThreadedIf(LOG_WATCHPOINTS, "DNBArchImplX86_64::EnableHardwareWatchpoint(): All hardware resources (%u) are in use.", num_hw_watchpoints);
+ }
+ }
+ return INVALID_NUB_HW_INDEX;
+}
+
+bool
+DNBArchImplX86_64::DisableHardwareWatchpoint (uint32_t hw_index, bool also_set_on_task)
+{
+ kern_return_t kret = GetDBGState(false);
+
+ const uint32_t num_hw_points = NumSupportedHardwareWatchpoints();
+ if (kret == KERN_SUCCESS)
+ {
+ DBG &debug_state = m_state.context.dbg;
+ if (hw_index < num_hw_points && !IsWatchpointVacant(debug_state, hw_index))
+ {
+ StartTransForHWP();
+
+ // Modify our local copy of the debug state, first.
+ ClearWatchpoint(debug_state, hw_index);
+ // Now disable the watch point in the inferior.
+ kret = SetDBGState(also_set_on_task);
+ DNBLogThreadedIf(LOG_WATCHPOINTS, "DNBArchImplX86_64::DisableHardwareWatchpoint( %u )",
+ hw_index);
+
+ if (kret == KERN_SUCCESS)
+ return true;
+ else // Revert to the previous debug state voluntarily. The transaction coordinator knows that we have failed.
+ m_state.context.dbg = GetDBGCheckpoint();
+ }
+ }
+ return false;
+}
+
+// Iterate through the debug status register; return the index of the first hit.
+uint32_t
+DNBArchImplX86_64::GetHardwareWatchpointHit(nub_addr_t &addr)
+{
+ // Read the debug state
+ kern_return_t kret = GetDBGState(true);
+ DNBLogThreadedIf(LOG_WATCHPOINTS, "DNBArchImplX86_64::GetHardwareWatchpointHit() GetDBGState() => 0x%8.8x.", kret);
+ if (kret == KERN_SUCCESS)
+ {
+ DBG &debug_state = m_state.context.dbg;
+ uint32_t i, num = NumSupportedHardwareWatchpoints();
+ for (i = 0; i < num; ++i)
+ {
+ if (IsWatchpointHit(debug_state, i))
+ {
+ addr = GetWatchAddress(debug_state, i);
+ DNBLogThreadedIf(LOG_WATCHPOINTS,
+ "DNBArchImplX86_64::GetHardwareWatchpointHit() found => %u (addr = 0x%llx).",
+ i,
+ (uint64_t)addr);
+ return i;
+ }
+ }
+ }
+ return INVALID_NUB_HW_INDEX;
+}
+
+// Set the single step bit in the processor status register.
+kern_return_t
+DNBArchImplX86_64::EnableHardwareSingleStep (bool enable)
+{
+ if (GetGPRState(false) == KERN_SUCCESS)
+ {
+ const uint32_t trace_bit = 0x100u;
+ if (enable)
+ m_state.context.gpr.__rflags |= trace_bit;
+ else
+ m_state.context.gpr.__rflags &= ~trace_bit;
+ return SetGPRState();
+ }
+ return m_state.GetError(e_regSetGPR, Read);
+}
+
+
+//----------------------------------------------------------------------
+// Register information definitions
+//----------------------------------------------------------------------
+
+enum
+{
+ gpr_rax = 0,
+ gpr_rbx,
+ gpr_rcx,
+ gpr_rdx,
+ gpr_rdi,
+ gpr_rsi,
+ gpr_rbp,
+ gpr_rsp,
+ gpr_r8,
+ gpr_r9,
+ gpr_r10,
+ gpr_r11,
+ gpr_r12,
+ gpr_r13,
+ gpr_r14,
+ gpr_r15,
+ gpr_rip,
+ gpr_rflags,
+ gpr_cs,
+ gpr_fs,
+ gpr_gs,
+ gpr_eax,
+ gpr_ebx,
+ gpr_ecx,
+ gpr_edx,
+ gpr_edi,
+ gpr_esi,
+ gpr_ebp,
+ gpr_esp,
+ gpr_r8d, // Low 32 bits or r8
+ gpr_r9d, // Low 32 bits or r9
+ gpr_r10d, // Low 32 bits or r10
+ gpr_r11d, // Low 32 bits or r11
+ gpr_r12d, // Low 32 bits or r12
+ gpr_r13d, // Low 32 bits or r13
+ gpr_r14d, // Low 32 bits or r14
+ gpr_r15d, // Low 32 bits or r15
+ gpr_ax ,
+ gpr_bx ,
+ gpr_cx ,
+ gpr_dx ,
+ gpr_di ,
+ gpr_si ,
+ gpr_bp ,
+ gpr_sp ,
+ gpr_r8w, // Low 16 bits or r8
+ gpr_r9w, // Low 16 bits or r9
+ gpr_r10w, // Low 16 bits or r10
+ gpr_r11w, // Low 16 bits or r11
+ gpr_r12w, // Low 16 bits or r12
+ gpr_r13w, // Low 16 bits or r13
+ gpr_r14w, // Low 16 bits or r14
+ gpr_r15w, // Low 16 bits or r15
+ gpr_ah ,
+ gpr_bh ,
+ gpr_ch ,
+ gpr_dh ,
+ gpr_al ,
+ gpr_bl ,
+ gpr_cl ,
+ gpr_dl ,
+ gpr_dil,
+ gpr_sil,
+ gpr_bpl,
+ gpr_spl,
+ gpr_r8l, // Low 8 bits or r8
+ gpr_r9l, // Low 8 bits or r9
+ gpr_r10l, // Low 8 bits or r10
+ gpr_r11l, // Low 8 bits or r11
+ gpr_r12l, // Low 8 bits or r12
+ gpr_r13l, // Low 8 bits or r13
+ gpr_r14l, // Low 8 bits or r14
+ gpr_r15l, // Low 8 bits or r15
+ k_num_gpr_regs
+};
+
+enum {
+ fpu_fcw,
+ fpu_fsw,
+ fpu_ftw,
+ fpu_fop,
+ fpu_ip,
+ fpu_cs,
+ fpu_dp,
+ fpu_ds,
+ fpu_mxcsr,
+ fpu_mxcsrmask,
+ fpu_stmm0,
+ fpu_stmm1,
+ fpu_stmm2,
+ fpu_stmm3,
+ fpu_stmm4,
+ fpu_stmm5,
+ fpu_stmm6,
+ fpu_stmm7,
+ fpu_xmm0,
+ fpu_xmm1,
+ fpu_xmm2,
+ fpu_xmm3,
+ fpu_xmm4,
+ fpu_xmm5,
+ fpu_xmm6,
+ fpu_xmm7,
+ fpu_xmm8,
+ fpu_xmm9,
+ fpu_xmm10,
+ fpu_xmm11,
+ fpu_xmm12,
+ fpu_xmm13,
+ fpu_xmm14,
+ fpu_xmm15,
+ fpu_ymm0,
+ fpu_ymm1,
+ fpu_ymm2,
+ fpu_ymm3,
+ fpu_ymm4,
+ fpu_ymm5,
+ fpu_ymm6,
+ fpu_ymm7,
+ fpu_ymm8,
+ fpu_ymm9,
+ fpu_ymm10,
+ fpu_ymm11,
+ fpu_ymm12,
+ fpu_ymm13,
+ fpu_ymm14,
+ fpu_ymm15,
+ k_num_fpu_regs,
+
+ // Aliases
+ fpu_fctrl = fpu_fcw,
+ fpu_fstat = fpu_fsw,
+ fpu_ftag = fpu_ftw,
+ fpu_fiseg = fpu_cs,
+ fpu_fioff = fpu_ip,
+ fpu_foseg = fpu_ds,
+ fpu_fooff = fpu_dp
+};
+
+enum {
+ exc_trapno,
+ exc_err,
+ exc_faultvaddr,
+ k_num_exc_regs,
+};
+
+
+enum ehframe_dwarf_regnums
+{
+ ehframe_dwarf_rax = 0,
+ ehframe_dwarf_rdx = 1,
+ ehframe_dwarf_rcx = 2,
+ ehframe_dwarf_rbx = 3,
+ ehframe_dwarf_rsi = 4,
+ ehframe_dwarf_rdi = 5,
+ ehframe_dwarf_rbp = 6,
+ ehframe_dwarf_rsp = 7,
+ ehframe_dwarf_r8,
+ ehframe_dwarf_r9,
+ ehframe_dwarf_r10,
+ ehframe_dwarf_r11,
+ ehframe_dwarf_r12,
+ ehframe_dwarf_r13,
+ ehframe_dwarf_r14,
+ ehframe_dwarf_r15,
+ ehframe_dwarf_rip,
+ ehframe_dwarf_xmm0,
+ ehframe_dwarf_xmm1,
+ ehframe_dwarf_xmm2,
+ ehframe_dwarf_xmm3,
+ ehframe_dwarf_xmm4,
+ ehframe_dwarf_xmm5,
+ ehframe_dwarf_xmm6,
+ ehframe_dwarf_xmm7,
+ ehframe_dwarf_xmm8,
+ ehframe_dwarf_xmm9,
+ ehframe_dwarf_xmm10,
+ ehframe_dwarf_xmm11,
+ ehframe_dwarf_xmm12,
+ ehframe_dwarf_xmm13,
+ ehframe_dwarf_xmm14,
+ ehframe_dwarf_xmm15,
+ ehframe_dwarf_stmm0,
+ ehframe_dwarf_stmm1,
+ ehframe_dwarf_stmm2,
+ ehframe_dwarf_stmm3,
+ ehframe_dwarf_stmm4,
+ ehframe_dwarf_stmm5,
+ ehframe_dwarf_stmm6,
+ ehframe_dwarf_stmm7,
+ ehframe_dwarf_ymm0 = ehframe_dwarf_xmm0,
+ ehframe_dwarf_ymm1 = ehframe_dwarf_xmm1,
+ ehframe_dwarf_ymm2 = ehframe_dwarf_xmm2,
+ ehframe_dwarf_ymm3 = ehframe_dwarf_xmm3,
+ ehframe_dwarf_ymm4 = ehframe_dwarf_xmm4,
+ ehframe_dwarf_ymm5 = ehframe_dwarf_xmm5,
+ ehframe_dwarf_ymm6 = ehframe_dwarf_xmm6,
+ ehframe_dwarf_ymm7 = ehframe_dwarf_xmm7,
+ ehframe_dwarf_ymm8 = ehframe_dwarf_xmm8,
+ ehframe_dwarf_ymm9 = ehframe_dwarf_xmm9,
+ ehframe_dwarf_ymm10 = ehframe_dwarf_xmm10,
+ ehframe_dwarf_ymm11 = ehframe_dwarf_xmm11,
+ ehframe_dwarf_ymm12 = ehframe_dwarf_xmm12,
+ ehframe_dwarf_ymm13 = ehframe_dwarf_xmm13,
+ ehframe_dwarf_ymm14 = ehframe_dwarf_xmm14,
+ ehframe_dwarf_ymm15 = ehframe_dwarf_xmm15
+};
+
+enum debugserver_regnums
+{
+ debugserver_rax = 0,
+ debugserver_rbx = 1,
+ debugserver_rcx = 2,
+ debugserver_rdx = 3,
+ debugserver_rsi = 4,
+ debugserver_rdi = 5,
+ debugserver_rbp = 6,
+ debugserver_rsp = 7,
+ debugserver_r8 = 8,
+ debugserver_r9 = 9,
+ debugserver_r10 = 10,
+ debugserver_r11 = 11,
+ debugserver_r12 = 12,
+ debugserver_r13 = 13,
+ debugserver_r14 = 14,
+ debugserver_r15 = 15,
+ debugserver_rip = 16,
+ debugserver_rflags = 17,
+ debugserver_cs = 18,
+ debugserver_ss = 19,
+ debugserver_ds = 20,
+ debugserver_es = 21,
+ debugserver_fs = 22,
+ debugserver_gs = 23,
+ debugserver_stmm0 = 24,
+ debugserver_stmm1 = 25,
+ debugserver_stmm2 = 26,
+ debugserver_stmm3 = 27,
+ debugserver_stmm4 = 28,
+ debugserver_stmm5 = 29,
+ debugserver_stmm6 = 30,
+ debugserver_stmm7 = 31,
+ debugserver_fctrl = 32, debugserver_fcw = debugserver_fctrl,
+ debugserver_fstat = 33, debugserver_fsw = debugserver_fstat,
+ debugserver_ftag = 34, debugserver_ftw = debugserver_ftag,
+ debugserver_fiseg = 35, debugserver_fpu_cs = debugserver_fiseg,
+ debugserver_fioff = 36, debugserver_ip = debugserver_fioff,
+ debugserver_foseg = 37, debugserver_fpu_ds = debugserver_foseg,
+ debugserver_fooff = 38, debugserver_dp = debugserver_fooff,
+ debugserver_fop = 39,
+ debugserver_xmm0 = 40,
+ debugserver_xmm1 = 41,
+ debugserver_xmm2 = 42,
+ debugserver_xmm3 = 43,
+ debugserver_xmm4 = 44,
+ debugserver_xmm5 = 45,
+ debugserver_xmm6 = 46,
+ debugserver_xmm7 = 47,
+ debugserver_xmm8 = 48,
+ debugserver_xmm9 = 49,
+ debugserver_xmm10 = 50,
+ debugserver_xmm11 = 51,
+ debugserver_xmm12 = 52,
+ debugserver_xmm13 = 53,
+ debugserver_xmm14 = 54,
+ debugserver_xmm15 = 55,
+ debugserver_mxcsr = 56,
+ debugserver_ymm0 = debugserver_xmm0,
+ debugserver_ymm1 = debugserver_xmm1,
+ debugserver_ymm2 = debugserver_xmm2,
+ debugserver_ymm3 = debugserver_xmm3,
+ debugserver_ymm4 = debugserver_xmm4,
+ debugserver_ymm5 = debugserver_xmm5,
+ debugserver_ymm6 = debugserver_xmm6,
+ debugserver_ymm7 = debugserver_xmm7,
+ debugserver_ymm8 = debugserver_xmm8,
+ debugserver_ymm9 = debugserver_xmm9,
+ debugserver_ymm10 = debugserver_xmm10,
+ debugserver_ymm11 = debugserver_xmm11,
+ debugserver_ymm12 = debugserver_xmm12,
+ debugserver_ymm13 = debugserver_xmm13,
+ debugserver_ymm14 = debugserver_xmm14,
+ debugserver_ymm15 = debugserver_xmm15
+};
+
+#define GPR_OFFSET(reg) (offsetof (DNBArchImplX86_64::GPR, __##reg))
+#define FPU_OFFSET(reg) (offsetof (DNBArchImplX86_64::FPU, __fpu_##reg) + offsetof (DNBArchImplX86_64::Context, fpu.no_avx))
+#define AVX_OFFSET(reg) (offsetof (DNBArchImplX86_64::AVX, __fpu_##reg) + offsetof (DNBArchImplX86_64::Context, fpu.avx))
+#define EXC_OFFSET(reg) (offsetof (DNBArchImplX86_64::EXC, __##reg) + offsetof (DNBArchImplX86_64::Context, exc))
+#define AVX_OFFSET_YMM(n) (AVX_OFFSET(ymmh0) + (32 * n))
+
+#define GPR_SIZE(reg) (sizeof(((DNBArchImplX86_64::GPR *)NULL)->__##reg))
+#define FPU_SIZE_UINT(reg) (sizeof(((DNBArchImplX86_64::FPU *)NULL)->__fpu_##reg))
+#define FPU_SIZE_MMST(reg) (sizeof(((DNBArchImplX86_64::FPU *)NULL)->__fpu_##reg.__mmst_reg))
+#define FPU_SIZE_XMM(reg) (sizeof(((DNBArchImplX86_64::FPU *)NULL)->__fpu_##reg.__xmm_reg))
+#define FPU_SIZE_YMM(reg) (32)
+#define EXC_SIZE(reg) (sizeof(((DNBArchImplX86_64::EXC *)NULL)->__##reg))
+
+// These macros will auto define the register name, alt name, register size,
+// register offset, encoding, format and native register. This ensures that
+// the register state structures are defined correctly and have the correct
+// sizes and offsets.
+#define DEFINE_GPR(reg) { e_regSetGPR, gpr_##reg, #reg, NULL, Uint, Hex, GPR_SIZE(reg), GPR_OFFSET(reg), ehframe_dwarf_##reg, ehframe_dwarf_##reg, INVALID_NUB_REGNUM, debugserver_##reg, NULL, g_invalidate_##reg }
+#define DEFINE_GPR_ALT(reg, alt, gen) { e_regSetGPR, gpr_##reg, #reg, alt, Uint, Hex, GPR_SIZE(reg), GPR_OFFSET(reg), ehframe_dwarf_##reg, ehframe_dwarf_##reg, gen, debugserver_##reg, NULL, g_invalidate_##reg }
+#define DEFINE_GPR_ALT2(reg, alt) { e_regSetGPR, gpr_##reg, #reg, alt, Uint, Hex, GPR_SIZE(reg), GPR_OFFSET(reg), INVALID_NUB_REGNUM, INVALID_NUB_REGNUM, INVALID_NUB_REGNUM, debugserver_##reg, NULL, NULL }
+#define DEFINE_GPR_ALT3(reg, alt, gen) { e_regSetGPR, gpr_##reg, #reg, alt, Uint, Hex, GPR_SIZE(reg), GPR_OFFSET(reg), INVALID_NUB_REGNUM, INVALID_NUB_REGNUM, gen, debugserver_##reg, NULL, NULL }
+#define DEFINE_GPR_ALT4(reg, alt, gen) { e_regSetGPR, gpr_##reg, #reg, alt, Uint, Hex, GPR_SIZE(reg), GPR_OFFSET(reg), ehframe_dwarf_##reg, ehframe_dwarf_##reg, gen, debugserver_##reg, NULL, NULL }
+
+#define DEFINE_GPR_PSEUDO_32(reg32,reg64) { e_regSetGPR, gpr_##reg32, #reg32, NULL, Uint, Hex, 4, 0,INVALID_NUB_REGNUM, INVALID_NUB_REGNUM, INVALID_NUB_REGNUM, INVALID_NUB_REGNUM, g_contained_##reg64, g_invalidate_##reg64 }
+#define DEFINE_GPR_PSEUDO_16(reg16,reg64) { e_regSetGPR, gpr_##reg16, #reg16, NULL, Uint, Hex, 2, 0,INVALID_NUB_REGNUM, INVALID_NUB_REGNUM, INVALID_NUB_REGNUM, INVALID_NUB_REGNUM, g_contained_##reg64, g_invalidate_##reg64 }
+#define DEFINE_GPR_PSEUDO_8H(reg8,reg64) { e_regSetGPR, gpr_##reg8 , #reg8 , NULL, Uint, Hex, 1, 1,INVALID_NUB_REGNUM, INVALID_NUB_REGNUM, INVALID_NUB_REGNUM, INVALID_NUB_REGNUM, g_contained_##reg64, g_invalidate_##reg64 }
+#define DEFINE_GPR_PSEUDO_8L(reg8,reg64) { e_regSetGPR, gpr_##reg8 , #reg8 , NULL, Uint, Hex, 1, 0,INVALID_NUB_REGNUM, INVALID_NUB_REGNUM, INVALID_NUB_REGNUM, INVALID_NUB_REGNUM, g_contained_##reg64, g_invalidate_##reg64 }
+
+// General purpose registers for 64 bit
+
+const char *g_contained_rax[] = { "rax", NULL };
+const char *g_contained_rbx[] = { "rbx", NULL };
+const char *g_contained_rcx[] = { "rcx", NULL };
+const char *g_contained_rdx[] = { "rdx", NULL };
+const char *g_contained_rdi[] = { "rdi", NULL };
+const char *g_contained_rsi[] = { "rsi", NULL };
+const char *g_contained_rbp[] = { "rbp", NULL };
+const char *g_contained_rsp[] = { "rsp", NULL };
+const char *g_contained_r8[] = { "r8", NULL };
+const char *g_contained_r9[] = { "r9", NULL };
+const char *g_contained_r10[] = { "r10", NULL };
+const char *g_contained_r11[] = { "r11", NULL };
+const char *g_contained_r12[] = { "r12", NULL };
+const char *g_contained_r13[] = { "r13", NULL };
+const char *g_contained_r14[] = { "r14", NULL };
+const char *g_contained_r15[] = { "r15", NULL };
+
+const char *g_invalidate_rax[] = { "rax", "eax", "ax", "ah", "al", NULL };
+const char *g_invalidate_rbx[] = { "rbx", "ebx", "bx", "bh", "bl", NULL };
+const char *g_invalidate_rcx[] = { "rcx", "ecx", "cx", "ch", "cl", NULL };
+const char *g_invalidate_rdx[] = { "rdx", "edx", "dx", "dh", "dl", NULL };
+const char *g_invalidate_rdi[] = { "rdi", "edi", "di", "dil", NULL };
+const char *g_invalidate_rsi[] = { "rsi", "esi", "si", "sil", NULL };
+const char *g_invalidate_rbp[] = { "rbp", "ebp", "bp", "bpl", NULL };
+const char *g_invalidate_rsp[] = { "rsp", "esp", "sp", "spl", NULL };
+const char *g_invalidate_r8 [] = { "r8", "r8d", "r8w", "r8l", NULL };
+const char *g_invalidate_r9 [] = { "r9", "r9d", "r9w", "r9l", NULL };
+const char *g_invalidate_r10[] = { "r10", "r10d", "r10w", "r10l", NULL };
+const char *g_invalidate_r11[] = { "r11", "r11d", "r11w", "r11l", NULL };
+const char *g_invalidate_r12[] = { "r12", "r12d", "r12w", "r12l", NULL };
+const char *g_invalidate_r13[] = { "r13", "r13d", "r13w", "r13l", NULL };
+const char *g_invalidate_r14[] = { "r14", "r14d", "r14w", "r14l", NULL };
+const char *g_invalidate_r15[] = { "r15", "r15d", "r15w", "r15l", NULL };
+
+const DNBRegisterInfo
+DNBArchImplX86_64::g_gpr_registers[] =
+{
+ DEFINE_GPR (rax),
+ DEFINE_GPR (rbx),
+ DEFINE_GPR_ALT (rcx , "arg4", GENERIC_REGNUM_ARG4),
+ DEFINE_GPR_ALT (rdx , "arg3", GENERIC_REGNUM_ARG3),
+ DEFINE_GPR_ALT (rdi , "arg1", GENERIC_REGNUM_ARG1),
+ DEFINE_GPR_ALT (rsi , "arg2", GENERIC_REGNUM_ARG2),
+ DEFINE_GPR_ALT (rbp , "fp" , GENERIC_REGNUM_FP),
+ DEFINE_GPR_ALT (rsp , "sp" , GENERIC_REGNUM_SP),
+ DEFINE_GPR_ALT (r8 , "arg5", GENERIC_REGNUM_ARG5),
+ DEFINE_GPR_ALT (r9 , "arg6", GENERIC_REGNUM_ARG6),
+ DEFINE_GPR (r10),
+ DEFINE_GPR (r11),
+ DEFINE_GPR (r12),
+ DEFINE_GPR (r13),
+ DEFINE_GPR (r14),
+ DEFINE_GPR (r15),
+ DEFINE_GPR_ALT4 (rip , "pc", GENERIC_REGNUM_PC),
+ DEFINE_GPR_ALT3 (rflags, "flags", GENERIC_REGNUM_FLAGS),
+ DEFINE_GPR_ALT2 (cs, NULL),
+ DEFINE_GPR_ALT2 (fs, NULL),
+ DEFINE_GPR_ALT2 (gs, NULL),
+ DEFINE_GPR_PSEUDO_32 (eax, rax),
+ DEFINE_GPR_PSEUDO_32 (ebx, rbx),
+ DEFINE_GPR_PSEUDO_32 (ecx, rcx),
+ DEFINE_GPR_PSEUDO_32 (edx, rdx),
+ DEFINE_GPR_PSEUDO_32 (edi, rdi),
+ DEFINE_GPR_PSEUDO_32 (esi, rsi),
+ DEFINE_GPR_PSEUDO_32 (ebp, rbp),
+ DEFINE_GPR_PSEUDO_32 (esp, rsp),
+ DEFINE_GPR_PSEUDO_32 (r8d, r8),
+ DEFINE_GPR_PSEUDO_32 (r9d, r9),
+ DEFINE_GPR_PSEUDO_32 (r10d, r10),
+ DEFINE_GPR_PSEUDO_32 (r11d, r11),
+ DEFINE_GPR_PSEUDO_32 (r12d, r12),
+ DEFINE_GPR_PSEUDO_32 (r13d, r13),
+ DEFINE_GPR_PSEUDO_32 (r14d, r14),
+ DEFINE_GPR_PSEUDO_32 (r15d, r15),
+ DEFINE_GPR_PSEUDO_16 (ax , rax),
+ DEFINE_GPR_PSEUDO_16 (bx , rbx),
+ DEFINE_GPR_PSEUDO_16 (cx , rcx),
+ DEFINE_GPR_PSEUDO_16 (dx , rdx),
+ DEFINE_GPR_PSEUDO_16 (di , rdi),
+ DEFINE_GPR_PSEUDO_16 (si , rsi),
+ DEFINE_GPR_PSEUDO_16 (bp , rbp),
+ DEFINE_GPR_PSEUDO_16 (sp , rsp),
+ DEFINE_GPR_PSEUDO_16 (r8w, r8),
+ DEFINE_GPR_PSEUDO_16 (r9w, r9),
+ DEFINE_GPR_PSEUDO_16 (r10w, r10),
+ DEFINE_GPR_PSEUDO_16 (r11w, r11),
+ DEFINE_GPR_PSEUDO_16 (r12w, r12),
+ DEFINE_GPR_PSEUDO_16 (r13w, r13),
+ DEFINE_GPR_PSEUDO_16 (r14w, r14),
+ DEFINE_GPR_PSEUDO_16 (r15w, r15),
+ DEFINE_GPR_PSEUDO_8H (ah , rax),
+ DEFINE_GPR_PSEUDO_8H (bh , rbx),
+ DEFINE_GPR_PSEUDO_8H (ch , rcx),
+ DEFINE_GPR_PSEUDO_8H (dh , rdx),
+ DEFINE_GPR_PSEUDO_8L (al , rax),
+ DEFINE_GPR_PSEUDO_8L (bl , rbx),
+ DEFINE_GPR_PSEUDO_8L (cl , rcx),
+ DEFINE_GPR_PSEUDO_8L (dl , rdx),
+ DEFINE_GPR_PSEUDO_8L (dil, rdi),
+ DEFINE_GPR_PSEUDO_8L (sil, rsi),
+ DEFINE_GPR_PSEUDO_8L (bpl, rbp),
+ DEFINE_GPR_PSEUDO_8L (spl, rsp),
+ DEFINE_GPR_PSEUDO_8L (r8l, r8),
+ DEFINE_GPR_PSEUDO_8L (r9l, r9),
+ DEFINE_GPR_PSEUDO_8L (r10l, r10),
+ DEFINE_GPR_PSEUDO_8L (r11l, r11),
+ DEFINE_GPR_PSEUDO_8L (r12l, r12),
+ DEFINE_GPR_PSEUDO_8L (r13l, r13),
+ DEFINE_GPR_PSEUDO_8L (r14l, r14),
+ DEFINE_GPR_PSEUDO_8L (r15l, r15)
+};
+
+// Floating point registers 64 bit
+const DNBRegisterInfo
+DNBArchImplX86_64::g_fpu_registers_no_avx[] =
+{
+ { e_regSetFPU, fpu_fcw , "fctrl" , NULL, Uint, Hex, FPU_SIZE_UINT(fcw) , FPU_OFFSET(fcw) , -1U, -1U, -1U, -1U, NULL, NULL },
+ { e_regSetFPU, fpu_fsw , "fstat" , NULL, Uint, Hex, FPU_SIZE_UINT(fsw) , FPU_OFFSET(fsw) , -1U, -1U, -1U, -1U, NULL, NULL },
+ { e_regSetFPU, fpu_ftw , "ftag" , NULL, Uint, Hex, FPU_SIZE_UINT(ftw) , FPU_OFFSET(ftw) , -1U, -1U, -1U, -1U, NULL, NULL },
+ { e_regSetFPU, fpu_fop , "fop" , NULL, Uint, Hex, FPU_SIZE_UINT(fop) , FPU_OFFSET(fop) , -1U, -1U, -1U, -1U, NULL, NULL },
+ { e_regSetFPU, fpu_ip , "fioff" , NULL, Uint, Hex, FPU_SIZE_UINT(ip) , FPU_OFFSET(ip) , -1U, -1U, -1U, -1U, NULL, NULL },
+ { e_regSetFPU, fpu_cs , "fiseg" , NULL, Uint, Hex, FPU_SIZE_UINT(cs) , FPU_OFFSET(cs) , -1U, -1U, -1U, -1U, NULL, NULL },
+ { e_regSetFPU, fpu_dp , "fooff" , NULL, Uint, Hex, FPU_SIZE_UINT(dp) , FPU_OFFSET(dp) , -1U, -1U, -1U, -1U, NULL, NULL },
+ { e_regSetFPU, fpu_ds , "foseg" , NULL, Uint, Hex, FPU_SIZE_UINT(ds) , FPU_OFFSET(ds) , -1U, -1U, -1U, -1U, NULL, NULL },
+ { e_regSetFPU, fpu_mxcsr , "mxcsr" , NULL, Uint, Hex, FPU_SIZE_UINT(mxcsr) , FPU_OFFSET(mxcsr) , -1U, -1U, -1U, -1U, NULL, NULL },
+ { e_regSetFPU, fpu_mxcsrmask, "mxcsrmask" , NULL, Uint, Hex, FPU_SIZE_UINT(mxcsrmask) , FPU_OFFSET(mxcsrmask) , -1U, -1U, -1U, -1U, NULL, NULL },
+
+ { e_regSetFPU, fpu_stmm0, "stmm0", NULL, Vector, VectorOfUInt8, FPU_SIZE_MMST(stmm0), FPU_OFFSET(stmm0), ehframe_dwarf_stmm0, ehframe_dwarf_stmm0, -1U, debugserver_stmm0, NULL, NULL },
+ { e_regSetFPU, fpu_stmm1, "stmm1", NULL, Vector, VectorOfUInt8, FPU_SIZE_MMST(stmm1), FPU_OFFSET(stmm1), ehframe_dwarf_stmm1, ehframe_dwarf_stmm1, -1U, debugserver_stmm1, NULL, NULL },
+ { e_regSetFPU, fpu_stmm2, "stmm2", NULL, Vector, VectorOfUInt8, FPU_SIZE_MMST(stmm2), FPU_OFFSET(stmm2), ehframe_dwarf_stmm2, ehframe_dwarf_stmm2, -1U, debugserver_stmm2, NULL, NULL },
+ { e_regSetFPU, fpu_stmm3, "stmm3", NULL, Vector, VectorOfUInt8, FPU_SIZE_MMST(stmm3), FPU_OFFSET(stmm3), ehframe_dwarf_stmm3, ehframe_dwarf_stmm3, -1U, debugserver_stmm3, NULL, NULL },
+ { e_regSetFPU, fpu_stmm4, "stmm4", NULL, Vector, VectorOfUInt8, FPU_SIZE_MMST(stmm4), FPU_OFFSET(stmm4), ehframe_dwarf_stmm4, ehframe_dwarf_stmm4, -1U, debugserver_stmm4, NULL, NULL },
+ { e_regSetFPU, fpu_stmm5, "stmm5", NULL, Vector, VectorOfUInt8, FPU_SIZE_MMST(stmm5), FPU_OFFSET(stmm5), ehframe_dwarf_stmm5, ehframe_dwarf_stmm5, -1U, debugserver_stmm5, NULL, NULL },
+ { e_regSetFPU, fpu_stmm6, "stmm6", NULL, Vector, VectorOfUInt8, FPU_SIZE_MMST(stmm6), FPU_OFFSET(stmm6), ehframe_dwarf_stmm6, ehframe_dwarf_stmm6, -1U, debugserver_stmm6, NULL, NULL },
+ { e_regSetFPU, fpu_stmm7, "stmm7", NULL, Vector, VectorOfUInt8, FPU_SIZE_MMST(stmm7), FPU_OFFSET(stmm7), ehframe_dwarf_stmm7, ehframe_dwarf_stmm7, -1U, debugserver_stmm7, NULL, NULL },
+
+ { e_regSetFPU, fpu_xmm0 , "xmm0" , NULL, Vector, VectorOfUInt8, FPU_SIZE_XMM(xmm0) , FPU_OFFSET(xmm0) , ehframe_dwarf_xmm0 , ehframe_dwarf_xmm0 , -1U, debugserver_xmm0 , NULL, NULL },
+ { e_regSetFPU, fpu_xmm1 , "xmm1" , NULL, Vector, VectorOfUInt8, FPU_SIZE_XMM(xmm1) , FPU_OFFSET(xmm1) , ehframe_dwarf_xmm1 , ehframe_dwarf_xmm1 , -1U, debugserver_xmm1 , NULL, NULL },
+ { e_regSetFPU, fpu_xmm2 , "xmm2" , NULL, Vector, VectorOfUInt8, FPU_SIZE_XMM(xmm2) , FPU_OFFSET(xmm2) , ehframe_dwarf_xmm2 , ehframe_dwarf_xmm2 , -1U, debugserver_xmm2 , NULL, NULL },
+ { e_regSetFPU, fpu_xmm3 , "xmm3" , NULL, Vector, VectorOfUInt8, FPU_SIZE_XMM(xmm3) , FPU_OFFSET(xmm3) , ehframe_dwarf_xmm3 , ehframe_dwarf_xmm3 , -1U, debugserver_xmm3 , NULL, NULL },
+ { e_regSetFPU, fpu_xmm4 , "xmm4" , NULL, Vector, VectorOfUInt8, FPU_SIZE_XMM(xmm4) , FPU_OFFSET(xmm4) , ehframe_dwarf_xmm4 , ehframe_dwarf_xmm4 , -1U, debugserver_xmm4 , NULL, NULL },
+ { e_regSetFPU, fpu_xmm5 , "xmm5" , NULL, Vector, VectorOfUInt8, FPU_SIZE_XMM(xmm5) , FPU_OFFSET(xmm5) , ehframe_dwarf_xmm5 , ehframe_dwarf_xmm5 , -1U, debugserver_xmm5 , NULL, NULL },
+ { e_regSetFPU, fpu_xmm6 , "xmm6" , NULL, Vector, VectorOfUInt8, FPU_SIZE_XMM(xmm6) , FPU_OFFSET(xmm6) , ehframe_dwarf_xmm6 , ehframe_dwarf_xmm6 , -1U, debugserver_xmm6 , NULL, NULL },
+ { e_regSetFPU, fpu_xmm7 , "xmm7" , NULL, Vector, VectorOfUInt8, FPU_SIZE_XMM(xmm7) , FPU_OFFSET(xmm7) , ehframe_dwarf_xmm7 , ehframe_dwarf_xmm7 , -1U, debugserver_xmm7 , NULL, NULL },
+ { e_regSetFPU, fpu_xmm8 , "xmm8" , NULL, Vector, VectorOfUInt8, FPU_SIZE_XMM(xmm8) , FPU_OFFSET(xmm8) , ehframe_dwarf_xmm8 , ehframe_dwarf_xmm8 , -1U, debugserver_xmm8 , NULL, NULL },
+ { e_regSetFPU, fpu_xmm9 , "xmm9" , NULL, Vector, VectorOfUInt8, FPU_SIZE_XMM(xmm9) , FPU_OFFSET(xmm9) , ehframe_dwarf_xmm9 , ehframe_dwarf_xmm9 , -1U, debugserver_xmm9 , NULL, NULL },
+ { e_regSetFPU, fpu_xmm10, "xmm10" , NULL, Vector, VectorOfUInt8, FPU_SIZE_XMM(xmm10) , FPU_OFFSET(xmm10), ehframe_dwarf_xmm10, ehframe_dwarf_xmm10, -1U, debugserver_xmm10, NULL, NULL },
+ { e_regSetFPU, fpu_xmm11, "xmm11" , NULL, Vector, VectorOfUInt8, FPU_SIZE_XMM(xmm11) , FPU_OFFSET(xmm11), ehframe_dwarf_xmm11, ehframe_dwarf_xmm11, -1U, debugserver_xmm11, NULL, NULL },
+ { e_regSetFPU, fpu_xmm12, "xmm12" , NULL, Vector, VectorOfUInt8, FPU_SIZE_XMM(xmm12) , FPU_OFFSET(xmm12), ehframe_dwarf_xmm12, ehframe_dwarf_xmm12, -1U, debugserver_xmm12, NULL, NULL },
+ { e_regSetFPU, fpu_xmm13, "xmm13" , NULL, Vector, VectorOfUInt8, FPU_SIZE_XMM(xmm13) , FPU_OFFSET(xmm13), ehframe_dwarf_xmm13, ehframe_dwarf_xmm13, -1U, debugserver_xmm13, NULL, NULL },
+ { e_regSetFPU, fpu_xmm14, "xmm14" , NULL, Vector, VectorOfUInt8, FPU_SIZE_XMM(xmm14) , FPU_OFFSET(xmm14), ehframe_dwarf_xmm14, ehframe_dwarf_xmm14, -1U, debugserver_xmm14, NULL, NULL },
+ { e_regSetFPU, fpu_xmm15, "xmm15" , NULL, Vector, VectorOfUInt8, FPU_SIZE_XMM(xmm15) , FPU_OFFSET(xmm15), ehframe_dwarf_xmm15, ehframe_dwarf_xmm15, -1U, debugserver_xmm15, NULL, NULL },
+};
+
+static const char *g_contained_ymm0 [] = { "ymm0", NULL };
+static const char *g_contained_ymm1 [] = { "ymm1", NULL };
+static const char *g_contained_ymm2 [] = { "ymm2", NULL };
+static const char *g_contained_ymm3 [] = { "ymm3", NULL };
+static const char *g_contained_ymm4 [] = { "ymm4", NULL };
+static const char *g_contained_ymm5 [] = { "ymm5", NULL };
+static const char *g_contained_ymm6 [] = { "ymm6", NULL };
+static const char *g_contained_ymm7 [] = { "ymm7", NULL };
+static const char *g_contained_ymm8 [] = { "ymm8", NULL };
+static const char *g_contained_ymm9 [] = { "ymm9", NULL };
+static const char *g_contained_ymm10[] = { "ymm10", NULL };
+static const char *g_contained_ymm11[] = { "ymm11", NULL };
+static const char *g_contained_ymm12[] = { "ymm12", NULL };
+static const char *g_contained_ymm13[] = { "ymm13", NULL };
+static const char *g_contained_ymm14[] = { "ymm14", NULL };
+static const char *g_contained_ymm15[] = { "ymm15", NULL };
+
+const DNBRegisterInfo
+DNBArchImplX86_64::g_fpu_registers_avx[] =
+{
+ { e_regSetFPU, fpu_fcw , "fctrl" , NULL, Uint, Hex, FPU_SIZE_UINT(fcw) , AVX_OFFSET(fcw) , -1U, -1U, -1U, -1U, NULL, NULL },
+ { e_regSetFPU, fpu_fsw , "fstat" , NULL, Uint, Hex, FPU_SIZE_UINT(fsw) , AVX_OFFSET(fsw) , -1U, -1U, -1U, -1U, NULL, NULL },
+ { e_regSetFPU, fpu_ftw , "ftag" , NULL, Uint, Hex, FPU_SIZE_UINT(ftw) , AVX_OFFSET(ftw) , -1U, -1U, -1U, -1U, NULL, NULL },
+ { e_regSetFPU, fpu_fop , "fop" , NULL, Uint, Hex, FPU_SIZE_UINT(fop) , AVX_OFFSET(fop) , -1U, -1U, -1U, -1U, NULL, NULL },
+ { e_regSetFPU, fpu_ip , "fioff" , NULL, Uint, Hex, FPU_SIZE_UINT(ip) , AVX_OFFSET(ip) , -1U, -1U, -1U, -1U, NULL, NULL },
+ { e_regSetFPU, fpu_cs , "fiseg" , NULL, Uint, Hex, FPU_SIZE_UINT(cs) , AVX_OFFSET(cs) , -1U, -1U, -1U, -1U, NULL, NULL },
+ { e_regSetFPU, fpu_dp , "fooff" , NULL, Uint, Hex, FPU_SIZE_UINT(dp) , AVX_OFFSET(dp) , -1U, -1U, -1U, -1U, NULL, NULL },
+ { e_regSetFPU, fpu_ds , "foseg" , NULL, Uint, Hex, FPU_SIZE_UINT(ds) , AVX_OFFSET(ds) , -1U, -1U, -1U, -1U, NULL, NULL },
+ { e_regSetFPU, fpu_mxcsr , "mxcsr" , NULL, Uint, Hex, FPU_SIZE_UINT(mxcsr) , AVX_OFFSET(mxcsr) , -1U, -1U, -1U, -1U, NULL, NULL },
+ { e_regSetFPU, fpu_mxcsrmask, "mxcsrmask" , NULL, Uint, Hex, FPU_SIZE_UINT(mxcsrmask) , AVX_OFFSET(mxcsrmask) , -1U, -1U, -1U, -1U, NULL, NULL },
+
+ { e_regSetFPU, fpu_stmm0, "stmm0", NULL, Vector, VectorOfUInt8, FPU_SIZE_MMST(stmm0), AVX_OFFSET(stmm0), ehframe_dwarf_stmm0, ehframe_dwarf_stmm0, -1U, debugserver_stmm0, NULL, NULL },
+ { e_regSetFPU, fpu_stmm1, "stmm1", NULL, Vector, VectorOfUInt8, FPU_SIZE_MMST(stmm1), AVX_OFFSET(stmm1), ehframe_dwarf_stmm1, ehframe_dwarf_stmm1, -1U, debugserver_stmm1, NULL, NULL },
+ { e_regSetFPU, fpu_stmm2, "stmm2", NULL, Vector, VectorOfUInt8, FPU_SIZE_MMST(stmm2), AVX_OFFSET(stmm2), ehframe_dwarf_stmm2, ehframe_dwarf_stmm2, -1U, debugserver_stmm2, NULL, NULL },
+ { e_regSetFPU, fpu_stmm3, "stmm3", NULL, Vector, VectorOfUInt8, FPU_SIZE_MMST(stmm3), AVX_OFFSET(stmm3), ehframe_dwarf_stmm3, ehframe_dwarf_stmm3, -1U, debugserver_stmm3, NULL, NULL },
+ { e_regSetFPU, fpu_stmm4, "stmm4", NULL, Vector, VectorOfUInt8, FPU_SIZE_MMST(stmm4), AVX_OFFSET(stmm4), ehframe_dwarf_stmm4, ehframe_dwarf_stmm4, -1U, debugserver_stmm4, NULL, NULL },
+ { e_regSetFPU, fpu_stmm5, "stmm5", NULL, Vector, VectorOfUInt8, FPU_SIZE_MMST(stmm5), AVX_OFFSET(stmm5), ehframe_dwarf_stmm5, ehframe_dwarf_stmm5, -1U, debugserver_stmm5, NULL, NULL },
+ { e_regSetFPU, fpu_stmm6, "stmm6", NULL, Vector, VectorOfUInt8, FPU_SIZE_MMST(stmm6), AVX_OFFSET(stmm6), ehframe_dwarf_stmm6, ehframe_dwarf_stmm6, -1U, debugserver_stmm6, NULL, NULL },
+ { e_regSetFPU, fpu_stmm7, "stmm7", NULL, Vector, VectorOfUInt8, FPU_SIZE_MMST(stmm7), AVX_OFFSET(stmm7), ehframe_dwarf_stmm7, ehframe_dwarf_stmm7, -1U, debugserver_stmm7, NULL, NULL },
+
+ { e_regSetFPU, fpu_ymm0 , "ymm0" , NULL, Vector, VectorOfUInt8, FPU_SIZE_YMM(ymm0) , AVX_OFFSET_YMM(0) , ehframe_dwarf_ymm0 , ehframe_dwarf_ymm0 , -1U, debugserver_ymm0, NULL, NULL },
+ { e_regSetFPU, fpu_ymm1 , "ymm1" , NULL, Vector, VectorOfUInt8, FPU_SIZE_YMM(ymm1) , AVX_OFFSET_YMM(1) , ehframe_dwarf_ymm1 , ehframe_dwarf_ymm1 , -1U, debugserver_ymm1, NULL, NULL },
+ { e_regSetFPU, fpu_ymm2 , "ymm2" , NULL, Vector, VectorOfUInt8, FPU_SIZE_YMM(ymm2) , AVX_OFFSET_YMM(2) , ehframe_dwarf_ymm2 , ehframe_dwarf_ymm2 , -1U, debugserver_ymm2, NULL, NULL },
+ { e_regSetFPU, fpu_ymm3 , "ymm3" , NULL, Vector, VectorOfUInt8, FPU_SIZE_YMM(ymm3) , AVX_OFFSET_YMM(3) , ehframe_dwarf_ymm3 , ehframe_dwarf_ymm3 , -1U, debugserver_ymm3, NULL, NULL },
+ { e_regSetFPU, fpu_ymm4 , "ymm4" , NULL, Vector, VectorOfUInt8, FPU_SIZE_YMM(ymm4) , AVX_OFFSET_YMM(4) , ehframe_dwarf_ymm4 , ehframe_dwarf_ymm4 , -1U, debugserver_ymm4, NULL, NULL },
+ { e_regSetFPU, fpu_ymm5 , "ymm5" , NULL, Vector, VectorOfUInt8, FPU_SIZE_YMM(ymm5) , AVX_OFFSET_YMM(5) , ehframe_dwarf_ymm5 , ehframe_dwarf_ymm5 , -1U, debugserver_ymm5, NULL, NULL },
+ { e_regSetFPU, fpu_ymm6 , "ymm6" , NULL, Vector, VectorOfUInt8, FPU_SIZE_YMM(ymm6) , AVX_OFFSET_YMM(6) , ehframe_dwarf_ymm6 , ehframe_dwarf_ymm6 , -1U, debugserver_ymm6, NULL, NULL },
+ { e_regSetFPU, fpu_ymm7 , "ymm7" , NULL, Vector, VectorOfUInt8, FPU_SIZE_YMM(ymm7) , AVX_OFFSET_YMM(7) , ehframe_dwarf_ymm7 , ehframe_dwarf_ymm7 , -1U, debugserver_ymm7, NULL, NULL },
+ { e_regSetFPU, fpu_ymm8 , "ymm8" , NULL, Vector, VectorOfUInt8, FPU_SIZE_YMM(ymm8) , AVX_OFFSET_YMM(8) , ehframe_dwarf_ymm8 , ehframe_dwarf_ymm8 , -1U, debugserver_ymm8 , NULL, NULL },
+ { e_regSetFPU, fpu_ymm9 , "ymm9" , NULL, Vector, VectorOfUInt8, FPU_SIZE_YMM(ymm9) , AVX_OFFSET_YMM(9) , ehframe_dwarf_ymm9 , ehframe_dwarf_ymm9 , -1U, debugserver_ymm9 , NULL, NULL },
+ { e_regSetFPU, fpu_ymm10, "ymm10" , NULL, Vector, VectorOfUInt8, FPU_SIZE_YMM(ymm10) , AVX_OFFSET_YMM(10), ehframe_dwarf_ymm10, ehframe_dwarf_ymm10, -1U, debugserver_ymm10, NULL, NULL },
+ { e_regSetFPU, fpu_ymm11, "ymm11" , NULL, Vector, VectorOfUInt8, FPU_SIZE_YMM(ymm11) , AVX_OFFSET_YMM(11), ehframe_dwarf_ymm11, ehframe_dwarf_ymm11, -1U, debugserver_ymm11, NULL, NULL },
+ { e_regSetFPU, fpu_ymm12, "ymm12" , NULL, Vector, VectorOfUInt8, FPU_SIZE_YMM(ymm12) , AVX_OFFSET_YMM(12), ehframe_dwarf_ymm12, ehframe_dwarf_ymm12, -1U, debugserver_ymm12, NULL, NULL },
+ { e_regSetFPU, fpu_ymm13, "ymm13" , NULL, Vector, VectorOfUInt8, FPU_SIZE_YMM(ymm13) , AVX_OFFSET_YMM(13), ehframe_dwarf_ymm13, ehframe_dwarf_ymm13, -1U, debugserver_ymm13, NULL, NULL },
+ { e_regSetFPU, fpu_ymm14, "ymm14" , NULL, Vector, VectorOfUInt8, FPU_SIZE_YMM(ymm14) , AVX_OFFSET_YMM(14), ehframe_dwarf_ymm14, ehframe_dwarf_ymm14, -1U, debugserver_ymm14, NULL, NULL },
+ { e_regSetFPU, fpu_ymm15, "ymm15" , NULL, Vector, VectorOfUInt8, FPU_SIZE_YMM(ymm15) , AVX_OFFSET_YMM(15), ehframe_dwarf_ymm15, ehframe_dwarf_ymm15, -1U, debugserver_ymm15, NULL, NULL },
+
+ { e_regSetFPU, fpu_xmm0 , "xmm0" , NULL, Vector, VectorOfUInt8, FPU_SIZE_XMM(xmm0) , 0, ehframe_dwarf_xmm0 , ehframe_dwarf_xmm0 , -1U, debugserver_xmm0 , g_contained_ymm0 , NULL },
+ { e_regSetFPU, fpu_xmm1 , "xmm1" , NULL, Vector, VectorOfUInt8, FPU_SIZE_XMM(xmm1) , 0, ehframe_dwarf_xmm1 , ehframe_dwarf_xmm1 , -1U, debugserver_xmm1 , g_contained_ymm1 , NULL },
+ { e_regSetFPU, fpu_xmm2 , "xmm2" , NULL, Vector, VectorOfUInt8, FPU_SIZE_XMM(xmm2) , 0, ehframe_dwarf_xmm2 , ehframe_dwarf_xmm2 , -1U, debugserver_xmm2 , g_contained_ymm2 , NULL },
+ { e_regSetFPU, fpu_xmm3 , "xmm3" , NULL, Vector, VectorOfUInt8, FPU_SIZE_XMM(xmm3) , 0, ehframe_dwarf_xmm3 , ehframe_dwarf_xmm3 , -1U, debugserver_xmm3 , g_contained_ymm3 , NULL },
+ { e_regSetFPU, fpu_xmm4 , "xmm4" , NULL, Vector, VectorOfUInt8, FPU_SIZE_XMM(xmm4) , 0, ehframe_dwarf_xmm4 , ehframe_dwarf_xmm4 , -1U, debugserver_xmm4 , g_contained_ymm4 , NULL },
+ { e_regSetFPU, fpu_xmm5 , "xmm5" , NULL, Vector, VectorOfUInt8, FPU_SIZE_XMM(xmm5) , 0, ehframe_dwarf_xmm5 , ehframe_dwarf_xmm5 , -1U, debugserver_xmm5 , g_contained_ymm5 , NULL },
+ { e_regSetFPU, fpu_xmm6 , "xmm6" , NULL, Vector, VectorOfUInt8, FPU_SIZE_XMM(xmm6) , 0, ehframe_dwarf_xmm6 , ehframe_dwarf_xmm6 , -1U, debugserver_xmm6 , g_contained_ymm6 , NULL },
+ { e_regSetFPU, fpu_xmm7 , "xmm7" , NULL, Vector, VectorOfUInt8, FPU_SIZE_XMM(xmm7) , 0, ehframe_dwarf_xmm7 , ehframe_dwarf_xmm7 , -1U, debugserver_xmm7 , g_contained_ymm7 , NULL },
+ { e_regSetFPU, fpu_xmm8 , "xmm8" , NULL, Vector, VectorOfUInt8, FPU_SIZE_XMM(xmm8) , 0, ehframe_dwarf_xmm8 , ehframe_dwarf_xmm8 , -1U, debugserver_xmm8 , g_contained_ymm8 , NULL },
+ { e_regSetFPU, fpu_xmm9 , "xmm9" , NULL, Vector, VectorOfUInt8, FPU_SIZE_XMM(xmm9) , 0, ehframe_dwarf_xmm9 , ehframe_dwarf_xmm9 , -1U, debugserver_xmm9 , g_contained_ymm9 , NULL },
+ { e_regSetFPU, fpu_xmm10, "xmm10" , NULL, Vector, VectorOfUInt8, FPU_SIZE_XMM(xmm10) , 0, ehframe_dwarf_xmm10, ehframe_dwarf_xmm10, -1U, debugserver_xmm10, g_contained_ymm10, NULL },
+ { e_regSetFPU, fpu_xmm11, "xmm11" , NULL, Vector, VectorOfUInt8, FPU_SIZE_XMM(xmm11) , 0, ehframe_dwarf_xmm11, ehframe_dwarf_xmm11, -1U, debugserver_xmm11, g_contained_ymm11, NULL },
+ { e_regSetFPU, fpu_xmm12, "xmm12" , NULL, Vector, VectorOfUInt8, FPU_SIZE_XMM(xmm12) , 0, ehframe_dwarf_xmm12, ehframe_dwarf_xmm12, -1U, debugserver_xmm12, g_contained_ymm12, NULL },
+ { e_regSetFPU, fpu_xmm13, "xmm13" , NULL, Vector, VectorOfUInt8, FPU_SIZE_XMM(xmm13) , 0, ehframe_dwarf_xmm13, ehframe_dwarf_xmm13, -1U, debugserver_xmm13, g_contained_ymm13, NULL },
+ { e_regSetFPU, fpu_xmm14, "xmm14" , NULL, Vector, VectorOfUInt8, FPU_SIZE_XMM(xmm14) , 0, ehframe_dwarf_xmm14, ehframe_dwarf_xmm14, -1U, debugserver_xmm14, g_contained_ymm14, NULL },
+ { e_regSetFPU, fpu_xmm15, "xmm15" , NULL, Vector, VectorOfUInt8, FPU_SIZE_XMM(xmm15) , 0, ehframe_dwarf_xmm15, ehframe_dwarf_xmm15, -1U, debugserver_xmm15, g_contained_ymm15, NULL }
+
+
+};
+
+// Exception registers
+
+const DNBRegisterInfo
+DNBArchImplX86_64::g_exc_registers[] =
+{
+ { e_regSetEXC, exc_trapno, "trapno" , NULL, Uint, Hex, EXC_SIZE (trapno) , EXC_OFFSET (trapno) , -1U, -1U, -1U, -1U, NULL, NULL },
+ { e_regSetEXC, exc_err, "err" , NULL, Uint, Hex, EXC_SIZE (err) , EXC_OFFSET (err) , -1U, -1U, -1U, -1U, NULL, NULL },
+ { e_regSetEXC, exc_faultvaddr, "faultvaddr", NULL, Uint, Hex, EXC_SIZE (faultvaddr), EXC_OFFSET (faultvaddr) , -1U, -1U, -1U, -1U, NULL, NULL }
+};
+
+// Number of registers in each register set
+const size_t DNBArchImplX86_64::k_num_gpr_registers = sizeof(g_gpr_registers)/sizeof(DNBRegisterInfo);
+const size_t DNBArchImplX86_64::k_num_fpu_registers_no_avx = sizeof(g_fpu_registers_no_avx)/sizeof(DNBRegisterInfo);
+const size_t DNBArchImplX86_64::k_num_fpu_registers_avx = sizeof(g_fpu_registers_avx)/sizeof(DNBRegisterInfo);
+const size_t DNBArchImplX86_64::k_num_exc_registers = sizeof(g_exc_registers)/sizeof(DNBRegisterInfo);
+const size_t DNBArchImplX86_64::k_num_all_registers_no_avx = k_num_gpr_registers + k_num_fpu_registers_no_avx + k_num_exc_registers;
+const size_t DNBArchImplX86_64::k_num_all_registers_avx = k_num_gpr_registers + k_num_fpu_registers_avx + k_num_exc_registers;
+
+//----------------------------------------------------------------------
+// Register set definitions. The first definitions at register set index
+// of zero is for all registers, followed by other registers sets. The
+// register information for the all register set need not be filled in.
+//----------------------------------------------------------------------
+const DNBRegisterSetInfo
+DNBArchImplX86_64::g_reg_sets_no_avx[] =
+{
+ { "x86_64 Registers", NULL, k_num_all_registers_no_avx },
+ { "General Purpose Registers", g_gpr_registers, k_num_gpr_registers },
+ { "Floating Point Registers", g_fpu_registers_no_avx, k_num_fpu_registers_no_avx },
+ { "Exception State Registers", g_exc_registers, k_num_exc_registers }
+};
+
+const DNBRegisterSetInfo
+DNBArchImplX86_64::g_reg_sets_avx[] =
+{
+ { "x86_64 Registers", NULL, k_num_all_registers_avx },
+ { "General Purpose Registers", g_gpr_registers, k_num_gpr_registers },
+ { "Floating Point Registers", g_fpu_registers_avx, k_num_fpu_registers_avx },
+ { "Exception State Registers", g_exc_registers, k_num_exc_registers }
+};
+
+// Total number of register sets for this architecture
+const size_t DNBArchImplX86_64::k_num_register_sets = sizeof(g_reg_sets_avx)/sizeof(DNBRegisterSetInfo);
+
+
+DNBArchProtocol *
+DNBArchImplX86_64::Create (MachThread *thread)
+{
+ DNBArchImplX86_64 *obj = new DNBArchImplX86_64 (thread);
+ return obj;
+}
+
+const uint8_t *
+DNBArchImplX86_64::SoftwareBreakpointOpcode (nub_size_t byte_size)
+{
+ static const uint8_t g_breakpoint_opcode[] = { 0xCC };
+ if (byte_size == 1)
+ return g_breakpoint_opcode;
+ return NULL;
+}
+
+const DNBRegisterSetInfo *
+DNBArchImplX86_64::GetRegisterSetInfo(nub_size_t *num_reg_sets)
+{
+ *num_reg_sets = k_num_register_sets;
+
+ if (CPUHasAVX() || FORCE_AVX_REGS)
+ return g_reg_sets_avx;
+ else
+ return g_reg_sets_no_avx;
+}
+
+void
+DNBArchImplX86_64::Initialize()
+{
+ DNBArchPluginInfo arch_plugin_info =
+ {
+ CPU_TYPE_X86_64,
+ DNBArchImplX86_64::Create,
+ DNBArchImplX86_64::GetRegisterSetInfo,
+ DNBArchImplX86_64::SoftwareBreakpointOpcode
+ };
+
+ // Register this arch plug-in with the main protocol class
+ DNBArchProtocol::RegisterArchPlugin (arch_plugin_info);
+}
+
+bool
+DNBArchImplX86_64::GetRegisterValue(uint32_t set, uint32_t reg, DNBRegisterValue *value)
+{
+ if (set == REGISTER_SET_GENERIC)
+ {
+ switch (reg)
+ {
+ case GENERIC_REGNUM_PC: // Program Counter
+ set = e_regSetGPR;
+ reg = gpr_rip;
+ break;
+
+ case GENERIC_REGNUM_SP: // Stack Pointer
+ set = e_regSetGPR;
+ reg = gpr_rsp;
+ break;
+
+ case GENERIC_REGNUM_FP: // Frame Pointer
+ set = e_regSetGPR;
+ reg = gpr_rbp;
+ break;
+
+ case GENERIC_REGNUM_FLAGS: // Processor flags register
+ set = e_regSetGPR;
+ reg = gpr_rflags;
+ break;
+
+ case GENERIC_REGNUM_RA: // Return Address
+ default:
+ return false;
+ }
+ }
+
+ if (GetRegisterState(set, false) != KERN_SUCCESS)
+ return false;
+
+ const DNBRegisterInfo *regInfo = m_thread->GetRegisterInfo(set, reg);
+ if (regInfo)
+ {
+ value->info = *regInfo;
+ switch (set)
+ {
+ case e_regSetGPR:
+ if (reg < k_num_gpr_registers)
+ {
+ value->value.uint64 = ((uint64_t*)(&m_state.context.gpr))[reg];
+ return true;
+ }
+ break;
+
+ case e_regSetFPU:
+ if (CPUHasAVX() || FORCE_AVX_REGS)
+ {
+ switch (reg)
+ {
+ case fpu_fcw: value->value.uint16 = *((uint16_t *)(&m_state.context.fpu.avx.__fpu_fcw)); return true;
+ case fpu_fsw: value->value.uint16 = *((uint16_t *)(&m_state.context.fpu.avx.__fpu_fsw)); return true;
+ case fpu_ftw: value->value.uint8 = m_state.context.fpu.avx.__fpu_ftw; return true;
+ case fpu_fop: value->value.uint16 = m_state.context.fpu.avx.__fpu_fop; return true;
+ case fpu_ip: value->value.uint32 = m_state.context.fpu.avx.__fpu_ip; return true;
+ case fpu_cs: value->value.uint16 = m_state.context.fpu.avx.__fpu_cs; return true;
+ case fpu_dp: value->value.uint32 = m_state.context.fpu.avx.__fpu_dp; return true;
+ case fpu_ds: value->value.uint16 = m_state.context.fpu.avx.__fpu_ds; return true;
+ case fpu_mxcsr: value->value.uint32 = m_state.context.fpu.avx.__fpu_mxcsr; return true;
+ case fpu_mxcsrmask: value->value.uint32 = m_state.context.fpu.avx.__fpu_mxcsrmask; return true;
+
+ case fpu_stmm0:
+ case fpu_stmm1:
+ case fpu_stmm2:
+ case fpu_stmm3:
+ case fpu_stmm4:
+ case fpu_stmm5:
+ case fpu_stmm6:
+ case fpu_stmm7:
+ memcpy(&value->value.uint8, &m_state.context.fpu.avx.__fpu_stmm0 + (reg - fpu_stmm0), 10);
+ return true;
+
+ case fpu_xmm0:
+ case fpu_xmm1:
+ case fpu_xmm2:
+ case fpu_xmm3:
+ case fpu_xmm4:
+ case fpu_xmm5:
+ case fpu_xmm6:
+ case fpu_xmm7:
+ case fpu_xmm8:
+ case fpu_xmm9:
+ case fpu_xmm10:
+ case fpu_xmm11:
+ case fpu_xmm12:
+ case fpu_xmm13:
+ case fpu_xmm14:
+ case fpu_xmm15:
+ memcpy(&value->value.uint8, &m_state.context.fpu.avx.__fpu_xmm0 + (reg - fpu_xmm0), 16);
+ return true;
+
+ case fpu_ymm0:
+ case fpu_ymm1:
+ case fpu_ymm2:
+ case fpu_ymm3:
+ case fpu_ymm4:
+ case fpu_ymm5:
+ case fpu_ymm6:
+ case fpu_ymm7:
+ case fpu_ymm8:
+ case fpu_ymm9:
+ case fpu_ymm10:
+ case fpu_ymm11:
+ case fpu_ymm12:
+ case fpu_ymm13:
+ case fpu_ymm14:
+ case fpu_ymm15:
+ memcpy(&value->value.uint8, &m_state.context.fpu.avx.__fpu_xmm0 + (reg - fpu_ymm0), 16);
+ memcpy((&value->value.uint8) + 16, &m_state.context.fpu.avx.__fpu_ymmh0 + (reg - fpu_ymm0), 16);
+ return true;
+ }
+ }
+ else
+ {
+ switch (reg)
+ {
+ case fpu_fcw: value->value.uint16 = *((uint16_t *)(&m_state.context.fpu.no_avx.__fpu_fcw)); return true;
+ case fpu_fsw: value->value.uint16 = *((uint16_t *)(&m_state.context.fpu.no_avx.__fpu_fsw)); return true;
+ case fpu_ftw: value->value.uint8 = m_state.context.fpu.no_avx.__fpu_ftw; return true;
+ case fpu_fop: value->value.uint16 = m_state.context.fpu.no_avx.__fpu_fop; return true;
+ case fpu_ip: value->value.uint32 = m_state.context.fpu.no_avx.__fpu_ip; return true;
+ case fpu_cs: value->value.uint16 = m_state.context.fpu.no_avx.__fpu_cs; return true;
+ case fpu_dp: value->value.uint32 = m_state.context.fpu.no_avx.__fpu_dp; return true;
+ case fpu_ds: value->value.uint16 = m_state.context.fpu.no_avx.__fpu_ds; return true;
+ case fpu_mxcsr: value->value.uint32 = m_state.context.fpu.no_avx.__fpu_mxcsr; return true;
+ case fpu_mxcsrmask: value->value.uint32 = m_state.context.fpu.no_avx.__fpu_mxcsrmask; return true;
+
+ case fpu_stmm0:
+ case fpu_stmm1:
+ case fpu_stmm2:
+ case fpu_stmm3:
+ case fpu_stmm4:
+ case fpu_stmm5:
+ case fpu_stmm6:
+ case fpu_stmm7:
+ memcpy(&value->value.uint8, &m_state.context.fpu.no_avx.__fpu_stmm0 + (reg - fpu_stmm0), 10);
+ return true;
+
+ case fpu_xmm0:
+ case fpu_xmm1:
+ case fpu_xmm2:
+ case fpu_xmm3:
+ case fpu_xmm4:
+ case fpu_xmm5:
+ case fpu_xmm6:
+ case fpu_xmm7:
+ case fpu_xmm8:
+ case fpu_xmm9:
+ case fpu_xmm10:
+ case fpu_xmm11:
+ case fpu_xmm12:
+ case fpu_xmm13:
+ case fpu_xmm14:
+ case fpu_xmm15:
+ memcpy(&value->value.uint8, &m_state.context.fpu.no_avx.__fpu_xmm0 + (reg - fpu_xmm0), 16);
+ return true;
+ }
+ }
+ break;
+
+ case e_regSetEXC:
+ switch (reg)
+ {
+ case exc_trapno: value->value.uint32 = m_state.context.exc.__trapno; return true;
+ case exc_err: value->value.uint32 = m_state.context.exc.__err; return true;
+ case exc_faultvaddr:value->value.uint64 = m_state.context.exc.__faultvaddr; return true;
+ }
+ break;
+ }
+ }
+ return false;
+}
+
+
+bool
+DNBArchImplX86_64::SetRegisterValue(uint32_t set, uint32_t reg, const DNBRegisterValue *value)
+{
+ if (set == REGISTER_SET_GENERIC)
+ {
+ switch (reg)
+ {
+ case GENERIC_REGNUM_PC: // Program Counter
+ set = e_regSetGPR;
+ reg = gpr_rip;
+ break;
+
+ case GENERIC_REGNUM_SP: // Stack Pointer
+ set = e_regSetGPR;
+ reg = gpr_rsp;
+ break;
+
+ case GENERIC_REGNUM_FP: // Frame Pointer
+ set = e_regSetGPR;
+ reg = gpr_rbp;
+ break;
+
+ case GENERIC_REGNUM_FLAGS: // Processor flags register
+ set = e_regSetGPR;
+ reg = gpr_rflags;
+ break;
+
+ case GENERIC_REGNUM_RA: // Return Address
+ default:
+ return false;
+ }
+ }
+
+ if (GetRegisterState(set, false) != KERN_SUCCESS)
+ return false;
+
+ bool success = false;
+ const DNBRegisterInfo *regInfo = m_thread->GetRegisterInfo(set, reg);
+ if (regInfo)
+ {
+ switch (set)
+ {
+ case e_regSetGPR:
+ if (reg < k_num_gpr_registers)
+ {
+ ((uint64_t*)(&m_state.context.gpr))[reg] = value->value.uint64;
+ success = true;
+ }
+ break;
+
+ case e_regSetFPU:
+ if (CPUHasAVX() || FORCE_AVX_REGS)
+ {
+ switch (reg)
+ {
+ case fpu_fcw: *((uint16_t *)(&m_state.context.fpu.avx.__fpu_fcw)) = value->value.uint16; success = true; break;
+ case fpu_fsw: *((uint16_t *)(&m_state.context.fpu.avx.__fpu_fsw)) = value->value.uint16; success = true; break;
+ case fpu_ftw: m_state.context.fpu.avx.__fpu_ftw = value->value.uint8; success = true; break;
+ case fpu_fop: m_state.context.fpu.avx.__fpu_fop = value->value.uint16; success = true; break;
+ case fpu_ip: m_state.context.fpu.avx.__fpu_ip = value->value.uint32; success = true; break;
+ case fpu_cs: m_state.context.fpu.avx.__fpu_cs = value->value.uint16; success = true; break;
+ case fpu_dp: m_state.context.fpu.avx.__fpu_dp = value->value.uint32; success = true; break;
+ case fpu_ds: m_state.context.fpu.avx.__fpu_ds = value->value.uint16; success = true; break;
+ case fpu_mxcsr: m_state.context.fpu.avx.__fpu_mxcsr = value->value.uint32; success = true; break;
+ case fpu_mxcsrmask: m_state.context.fpu.avx.__fpu_mxcsrmask = value->value.uint32; success = true; break;
+
+ case fpu_stmm0:
+ case fpu_stmm1:
+ case fpu_stmm2:
+ case fpu_stmm3:
+ case fpu_stmm4:
+ case fpu_stmm5:
+ case fpu_stmm6:
+ case fpu_stmm7:
+ memcpy (&m_state.context.fpu.avx.__fpu_stmm0 + (reg - fpu_stmm0), &value->value.uint8, 10);
+ success = true;
+ break;
+
+ case fpu_xmm0:
+ case fpu_xmm1:
+ case fpu_xmm2:
+ case fpu_xmm3:
+ case fpu_xmm4:
+ case fpu_xmm5:
+ case fpu_xmm6:
+ case fpu_xmm7:
+ case fpu_xmm8:
+ case fpu_xmm9:
+ case fpu_xmm10:
+ case fpu_xmm11:
+ case fpu_xmm12:
+ case fpu_xmm13:
+ case fpu_xmm14:
+ case fpu_xmm15:
+ memcpy (&m_state.context.fpu.avx.__fpu_xmm0 + (reg - fpu_xmm0), &value->value.uint8, 16);
+ success = true;
+ break;
+
+ case fpu_ymm0:
+ case fpu_ymm1:
+ case fpu_ymm2:
+ case fpu_ymm3:
+ case fpu_ymm4:
+ case fpu_ymm5:
+ case fpu_ymm6:
+ case fpu_ymm7:
+ case fpu_ymm8:
+ case fpu_ymm9:
+ case fpu_ymm10:
+ case fpu_ymm11:
+ case fpu_ymm12:
+ case fpu_ymm13:
+ case fpu_ymm14:
+ case fpu_ymm15:
+ memcpy(&m_state.context.fpu.avx.__fpu_xmm0 + (reg - fpu_ymm0), &value->value.uint8, 16);
+ memcpy(&m_state.context.fpu.avx.__fpu_ymmh0 + (reg - fpu_ymm0), (&value->value.uint8) + 16, 16);
+ return true;
+ }
+ }
+ else
+ {
+ switch (reg)
+ {
+ case fpu_fcw: *((uint16_t *)(&m_state.context.fpu.no_avx.__fpu_fcw)) = value->value.uint16; success = true; break;
+ case fpu_fsw: *((uint16_t *)(&m_state.context.fpu.no_avx.__fpu_fsw)) = value->value.uint16; success = true; break;
+ case fpu_ftw: m_state.context.fpu.no_avx.__fpu_ftw = value->value.uint8; success = true; break;
+ case fpu_fop: m_state.context.fpu.no_avx.__fpu_fop = value->value.uint16; success = true; break;
+ case fpu_ip: m_state.context.fpu.no_avx.__fpu_ip = value->value.uint32; success = true; break;
+ case fpu_cs: m_state.context.fpu.no_avx.__fpu_cs = value->value.uint16; success = true; break;
+ case fpu_dp: m_state.context.fpu.no_avx.__fpu_dp = value->value.uint32; success = true; break;
+ case fpu_ds: m_state.context.fpu.no_avx.__fpu_ds = value->value.uint16; success = true; break;
+ case fpu_mxcsr: m_state.context.fpu.no_avx.__fpu_mxcsr = value->value.uint32; success = true; break;
+ case fpu_mxcsrmask: m_state.context.fpu.no_avx.__fpu_mxcsrmask = value->value.uint32; success = true; break;
+
+ case fpu_stmm0:
+ case fpu_stmm1:
+ case fpu_stmm2:
+ case fpu_stmm3:
+ case fpu_stmm4:
+ case fpu_stmm5:
+ case fpu_stmm6:
+ case fpu_stmm7:
+ memcpy (&m_state.context.fpu.no_avx.__fpu_stmm0 + (reg - fpu_stmm0), &value->value.uint8, 10);
+ success = true;
+ break;
+
+ case fpu_xmm0:
+ case fpu_xmm1:
+ case fpu_xmm2:
+ case fpu_xmm3:
+ case fpu_xmm4:
+ case fpu_xmm5:
+ case fpu_xmm6:
+ case fpu_xmm7:
+ case fpu_xmm8:
+ case fpu_xmm9:
+ case fpu_xmm10:
+ case fpu_xmm11:
+ case fpu_xmm12:
+ case fpu_xmm13:
+ case fpu_xmm14:
+ case fpu_xmm15:
+ memcpy (&m_state.context.fpu.no_avx.__fpu_xmm0 + (reg - fpu_xmm0), &value->value.uint8, 16);
+ success = true;
+ break;
+ }
+ }
+ break;
+
+ case e_regSetEXC:
+ switch (reg)
+ {
+ case exc_trapno: m_state.context.exc.__trapno = value->value.uint32; success = true; break;
+ case exc_err: m_state.context.exc.__err = value->value.uint32; success = true; break;
+ case exc_faultvaddr:m_state.context.exc.__faultvaddr = value->value.uint64; success = true; break;
+ }
+ break;
+ }
+ }
+
+ if (success)
+ return SetRegisterState(set) == KERN_SUCCESS;
+ return false;
+}
+
+uint32_t
+DNBArchImplX86_64::GetRegisterContextSize()
+{
+ static uint32_t g_cached_size = 0;
+ if (g_cached_size == 0)
+ {
+ if (CPUHasAVX() || FORCE_AVX_REGS)
+ {
+ for (size_t i=0; i<k_num_fpu_registers_avx; ++i)
+ {
+ if (g_fpu_registers_avx[i].value_regs == NULL)
+ g_cached_size += g_fpu_registers_avx[i].size;
+ }
+ }
+ else
+ {
+ for (size_t i=0; i<k_num_fpu_registers_no_avx; ++i)
+ {
+ if (g_fpu_registers_no_avx[i].value_regs == NULL)
+ g_cached_size += g_fpu_registers_no_avx[i].size;
+ }
+ }
+ DNBLogThreaded ("DNBArchImplX86_64::GetRegisterContextSize() - GPR = %zu, FPU = %u, EXC = %zu", sizeof(GPR), g_cached_size, sizeof(EXC));
+ g_cached_size += sizeof(GPR);
+ g_cached_size += sizeof(EXC);
+ DNBLogThreaded ("DNBArchImplX86_64::GetRegisterContextSize() - GPR + FPU + EXC = %u", g_cached_size);
+ }
+ return g_cached_size;
+}
+
+nub_size_t
+DNBArchImplX86_64::GetRegisterContext (void *buf, nub_size_t buf_len)
+{
+ uint32_t size = GetRegisterContextSize();
+
+ if (buf && buf_len)
+ {
+ bool force = false;
+ kern_return_t kret;
+
+ if ((kret = GetGPRState(force)) != KERN_SUCCESS)
+ {
+ DNBLogThreadedIf (LOG_THREAD, "DNBArchImplX86_64::GetRegisterContext (buf = %p, len = %llu) error: GPR regs failed to read: %u ", buf, (uint64_t)buf_len, kret);
+ size = 0;
+ }
+ else
+ if ((kret = GetFPUState(force)) != KERN_SUCCESS)
+ {
+ DNBLogThreadedIf (LOG_THREAD, "DNBArchImplX86_64::GetRegisterContext (buf = %p, len = %llu) error: %s regs failed to read: %u", buf, (uint64_t)buf_len, CPUHasAVX() ? "AVX" : "FPU", kret);
+ size = 0;
+ }
+ else
+ if ((kret = GetEXCState(force)) != KERN_SUCCESS)
+ {
+ DNBLogThreadedIf (LOG_THREAD, "DNBArchImplX86_64::GetRegisterContext (buf = %p, len = %llu) error: EXC regs failed to read: %u", buf, (uint64_t)buf_len, kret);
+ size = 0;
+ }
+ else
+ {
+ uint8_t *p = (uint8_t *)buf;
+ // Copy the GPR registers
+ memcpy(p, &m_state.context.gpr, sizeof(GPR));
+ p += sizeof(GPR);
+
+ if (CPUHasAVX() || FORCE_AVX_REGS)
+ {
+ // Walk around the gaps in the FPU regs
+ memcpy(p, &m_state.context.fpu.avx.__fpu_fcw, 5);
+ p += 5;
+ memcpy(p, &m_state.context.fpu.avx.__fpu_fop, 8);
+ p += 8;
+ memcpy(p, &m_state.context.fpu.avx.__fpu_dp, 6);
+ p += 6;
+ memcpy(p, &m_state.context.fpu.avx.__fpu_mxcsr, 8);
+ p += 8;
+
+ // Work around the padding between the stmm registers as they are 16
+ // byte structs with 10 bytes of the value in each
+ for (size_t i=0; i<8; ++i)
+ {
+ memcpy(p, &m_state.context.fpu.avx.__fpu_stmm0 + i, 10);
+ p += 10;
+ }
+
+ // Interleave the XMM and YMMH registers to make the YMM registers
+ for (size_t i=0; i<16; ++i)
+ {
+ memcpy(p, &m_state.context.fpu.avx.__fpu_xmm0 + i, 16);
+ p += 16;
+ memcpy(p, &m_state.context.fpu.avx.__fpu_ymmh0 + i, 16);
+ p += 16;
+ }
+ }
+ else
+ {
+ // Walk around the gaps in the FPU regs
+ memcpy(p, &m_state.context.fpu.no_avx.__fpu_fcw, 5);
+ p += 5;
+ memcpy(p, &m_state.context.fpu.no_avx.__fpu_fop, 8);
+ p += 8;
+ memcpy(p, &m_state.context.fpu.no_avx.__fpu_dp, 6);
+ p += 6;
+ memcpy(p, &m_state.context.fpu.no_avx.__fpu_mxcsr, 8);
+ p += 8;
+
+ // Work around the padding between the stmm registers as they are 16
+ // byte structs with 10 bytes of the value in each
+ for (size_t i=0; i<8; ++i)
+ {
+ memcpy(p, &m_state.context.fpu.no_avx.__fpu_stmm0 + i, 10);
+ p += 10;
+ }
+
+ // Copy the XMM registers in a single block
+ memcpy(p, &m_state.context.fpu.no_avx.__fpu_xmm0, 16 * 16);
+ p += 16 * 16;
+ }
+
+ // Copy the exception registers
+ memcpy(p, &m_state.context.exc, sizeof(EXC));
+ p += sizeof(EXC);
+
+ // make sure we end up with exactly what we think we should have
+ size_t bytes_written = p - (uint8_t *)buf;
+ UNUSED_IF_ASSERT_DISABLED(bytes_written);
+ assert (bytes_written == size);
+ }
+
+ }
+
+ DNBLogThreadedIf (LOG_THREAD, "DNBArchImplX86_64::GetRegisterContext (buf = %p, len = %llu) => %u", buf, (uint64_t)buf_len, size);
+ // Return the size of the register context even if NULL was passed in
+ return size;
+}
+
+nub_size_t
+DNBArchImplX86_64::SetRegisterContext (const void *buf, nub_size_t buf_len)
+{
+ uint32_t size = GetRegisterContextSize();
+ if (buf == NULL || buf_len == 0)
+ size = 0;
+
+ if (size)
+ {
+ if (size > buf_len)
+ size = static_cast<uint32_t>(buf_len);
+
+ uint8_t *p = (uint8_t *)buf;
+ // Copy the GPR registers
+ memcpy(&m_state.context.gpr, p, sizeof(GPR));
+ p += sizeof(GPR);
+
+ if (CPUHasAVX() || FORCE_AVX_REGS)
+ {
+ // Walk around the gaps in the FPU regs
+ memcpy(&m_state.context.fpu.avx.__fpu_fcw, p, 5);
+ p += 5;
+ memcpy(&m_state.context.fpu.avx.__fpu_fop, p, 8);
+ p += 8;
+ memcpy(&m_state.context.fpu.avx.__fpu_dp, p, 6);
+ p += 6;
+ memcpy(&m_state.context.fpu.avx.__fpu_mxcsr, p, 8);
+ p += 8;
+
+ // Work around the padding between the stmm registers as they are 16
+ // byte structs with 10 bytes of the value in each
+ for (size_t i=0; i<8; ++i)
+ {
+ memcpy(&m_state.context.fpu.avx.__fpu_stmm0 + i, p, 10);
+ p += 10;
+ }
+
+ // Interleave the XMM and YMMH registers to make the YMM registers
+ for (size_t i=0; i<16; ++i)
+ {
+ memcpy(&m_state.context.fpu.avx.__fpu_xmm0 + i, p, 16);
+ p += 16;
+ memcpy(&m_state.context.fpu.avx.__fpu_ymmh0 + i, p, 16);
+ p += 16;
+ }
+ }
+ else
+ {
+ // Copy fcw through mxcsrmask as there is no padding
+ memcpy(&m_state.context.fpu.no_avx.__fpu_fcw, p, 5);
+ p += 5;
+ memcpy(&m_state.context.fpu.no_avx.__fpu_fop, p, 8);
+ p += 8;
+ memcpy(&m_state.context.fpu.no_avx.__fpu_dp, p, 6);
+ p += 6;
+ memcpy(&m_state.context.fpu.no_avx.__fpu_mxcsr, p, 8);
+ p += 8;
+
+ // Work around the padding between the stmm registers as they are 16
+ // byte structs with 10 bytes of the value in each
+ for (size_t i=0; i<8; ++i)
+ {
+ memcpy(&m_state.context.fpu.no_avx.__fpu_stmm0 + i, p, 10);
+ p += 10;
+ }
+
+ // Copy the XMM registers in a single block
+ memcpy(&m_state.context.fpu.no_avx.__fpu_xmm0, p, 16 * 16);
+ p += 16 * 16;
+ }
+
+ // Copy the exception registers
+ memcpy(&m_state.context.exc, p, sizeof(EXC));
+ p += sizeof(EXC);
+
+ // make sure we end up with exactly what we think we should have
+ size_t bytes_written = p - (uint8_t *)buf;
+ UNUSED_IF_ASSERT_DISABLED(bytes_written);
+ assert (bytes_written == size);
+
+ kern_return_t kret;
+ if ((kret = SetGPRState()) != KERN_SUCCESS)
+ DNBLogThreadedIf (LOG_THREAD, "DNBArchImplX86_64::SetRegisterContext (buf = %p, len = %llu) error: GPR regs failed to write: %u", buf, (uint64_t)buf_len, kret);
+ if ((kret = SetFPUState()) != KERN_SUCCESS)
+ DNBLogThreadedIf (LOG_THREAD, "DNBArchImplX86_64::SetRegisterContext (buf = %p, len = %llu) error: %s regs failed to write: %u", buf, (uint64_t)buf_len, CPUHasAVX() ? "AVX" : "FPU", kret);
+ if ((kret = SetEXCState()) != KERN_SUCCESS)
+ DNBLogThreadedIf (LOG_THREAD, "DNBArchImplX86_64::SetRegisterContext (buf = %p, len = %llu) error: EXP regs failed to write: %u", buf, (uint64_t)buf_len, kret);
+ }
+ DNBLogThreadedIf (LOG_THREAD, "DNBArchImplX86_64::SetRegisterContext (buf = %p, len = %llu) => %llu", buf, (uint64_t)buf_len, (uint64_t)size);
+ return size;
+}
+
+uint32_t
+DNBArchImplX86_64::SaveRegisterState ()
+{
+ kern_return_t kret = ::thread_abort_safely(m_thread->MachPortNumber());
+ DNBLogThreadedIf (LOG_THREAD, "thread = 0x%4.4x calling thread_abort_safely (tid) => %u (SetGPRState() for stop_count = %u)", m_thread->MachPortNumber(), kret, m_thread->Process()->StopCount());
+
+ // Always re-read the registers because above we call thread_abort_safely();
+ bool force = true;
+
+ if ((kret = GetGPRState(force)) != KERN_SUCCESS)
+ {
+ DNBLogThreadedIf (LOG_THREAD, "DNBArchImplX86_64::SaveRegisterState () error: GPR regs failed to read: %u ", kret);
+ }
+ else if ((kret = GetFPUState(force)) != KERN_SUCCESS)
+ {
+ DNBLogThreadedIf (LOG_THREAD, "DNBArchImplX86_64::SaveRegisterState () error: %s regs failed to read: %u", CPUHasAVX() ? "AVX" : "FPU", kret);
+ }
+ else
+ {
+ const uint32_t save_id = GetNextRegisterStateSaveID ();
+ m_saved_register_states[save_id] = m_state.context;
+ return save_id;
+ }
+ return 0;
+}
+bool
+DNBArchImplX86_64::RestoreRegisterState (uint32_t save_id)
+{
+ SaveRegisterStates::iterator pos = m_saved_register_states.find(save_id);
+ if (pos != m_saved_register_states.end())
+ {
+ m_state.context.gpr = pos->second.gpr;
+ m_state.context.fpu = pos->second.fpu;
+ m_state.SetError(e_regSetGPR, Read, 0);
+ m_state.SetError(e_regSetFPU, Read, 0);
+ kern_return_t kret;
+ bool success = true;
+ if ((kret = SetGPRState()) != KERN_SUCCESS)
+ {
+ DNBLogThreadedIf (LOG_THREAD, "DNBArchImplX86_64::RestoreRegisterState (save_id = %u) error: GPR regs failed to write: %u", save_id, kret);
+ success = false;
+ }
+ else if ((kret = SetFPUState()) != KERN_SUCCESS)
+ {
+ DNBLogThreadedIf (LOG_THREAD, "DNBArchImplX86_64::RestoreRegisterState (save_id = %u) error: %s regs failed to write: %u", save_id, CPUHasAVX() ? "AVX" : "FPU", kret);
+ success = false;
+ }
+ m_saved_register_states.erase(pos);
+ return success;
+ }
+ return false;
+}
+
+
+kern_return_t
+DNBArchImplX86_64::GetRegisterState(int set, bool force)
+{
+ switch (set)
+ {
+ case e_regSetALL: return GetGPRState(force) | GetFPUState(force) | GetEXCState(force);
+ case e_regSetGPR: return GetGPRState(force);
+ case e_regSetFPU: return GetFPUState(force);
+ case e_regSetEXC: return GetEXCState(force);
+ default: break;
+ }
+ return KERN_INVALID_ARGUMENT;
+}
+
+kern_return_t
+DNBArchImplX86_64::SetRegisterState(int set)
+{
+ // Make sure we have a valid context to set.
+ if (RegisterSetStateIsValid(set))
+ {
+ switch (set)
+ {
+ case e_regSetALL: return SetGPRState() | SetFPUState() | SetEXCState();
+ case e_regSetGPR: return SetGPRState();
+ case e_regSetFPU: return SetFPUState();
+ case e_regSetEXC: return SetEXCState();
+ default: break;
+ }
+ }
+ return KERN_INVALID_ARGUMENT;
+}
+
+bool
+DNBArchImplX86_64::RegisterSetStateIsValid (int set) const
+{
+ return m_state.RegsAreValid(set);
+}
+
+
+
+#endif // #if defined (__i386__) || defined (__x86_64__)
diff --git a/tools/debugserver/source/MacOSX/x86_64/DNBArchImplX86_64.h b/tools/debugserver/source/MacOSX/x86_64/DNBArchImplX86_64.h
new file mode 100644
index 000000000000..20844951261b
--- /dev/null
+++ b/tools/debugserver/source/MacOSX/x86_64/DNBArchImplX86_64.h
@@ -0,0 +1,260 @@
+//===-- DNBArchImplX86_64.h -------------------------------------*- C++ -*-===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// Created by Greg Clayton on 6/25/07.
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef __DNBArchImplX86_64_h__
+#define __DNBArchImplX86_64_h__
+
+#if defined (__i386__) || defined (__x86_64__)
+#include "DNBArch.h"
+#include "MachRegisterStatesX86_64.h"
+
+#include <map>
+
+class MachThread;
+
+class DNBArchImplX86_64 : public DNBArchProtocol
+{
+public:
+ DNBArchImplX86_64(MachThread *thread) :
+ DNBArchProtocol(),
+ m_thread(thread),
+ m_state(),
+ m_2pc_dbg_checkpoint(),
+ m_2pc_trans_state(Trans_Done),
+ m_saved_register_states()
+ {
+ }
+ virtual ~DNBArchImplX86_64()
+ {
+ }
+
+ static void Initialize();
+
+ virtual bool GetRegisterValue(uint32_t set, uint32_t reg, DNBRegisterValue *value);
+ virtual bool SetRegisterValue(uint32_t set, uint32_t reg, const DNBRegisterValue *value);
+ virtual nub_size_t GetRegisterContext (void *buf, nub_size_t buf_len);
+ virtual nub_size_t SetRegisterContext (const void *buf, nub_size_t buf_len);
+ virtual uint32_t SaveRegisterState ();
+ virtual bool RestoreRegisterState (uint32_t save_id);
+
+ virtual kern_return_t GetRegisterState (int set, bool force);
+ virtual kern_return_t SetRegisterState (int set);
+ virtual bool RegisterSetStateIsValid (int set) const;
+
+ virtual uint64_t GetPC(uint64_t failValue); // Get program counter
+ virtual kern_return_t SetPC(uint64_t value);
+ virtual uint64_t GetSP(uint64_t failValue); // Get stack pointer
+ virtual void ThreadWillResume();
+ virtual bool ThreadDidStop();
+ virtual bool NotifyException(MachException::Data& exc);
+
+ virtual uint32_t NumSupportedHardwareWatchpoints();
+ virtual uint32_t EnableHardwareWatchpoint (nub_addr_t addr, nub_size_t size, bool read, bool write, bool also_set_on_task);
+ virtual bool DisableHardwareWatchpoint (uint32_t hw_break_index, bool also_set_on_task);
+ virtual uint32_t GetHardwareWatchpointHit(nub_addr_t &addr);
+
+protected:
+ kern_return_t EnableHardwareSingleStep (bool enable);
+
+ typedef __x86_64_thread_state_t GPR;
+ typedef __x86_64_float_state_t FPU;
+ typedef __x86_64_exception_state_t EXC;
+ typedef __x86_64_avx_state_t AVX;
+ typedef __x86_64_debug_state_t DBG;
+
+ static const DNBRegisterInfo g_gpr_registers[];
+ static const DNBRegisterInfo g_fpu_registers_no_avx[];
+ static const DNBRegisterInfo g_fpu_registers_avx[];
+ static const DNBRegisterInfo g_exc_registers[];
+ static const DNBRegisterSetInfo g_reg_sets_no_avx[];
+ static const DNBRegisterSetInfo g_reg_sets_avx[];
+ static const size_t k_num_gpr_registers;
+ static const size_t k_num_fpu_registers_no_avx;
+ static const size_t k_num_fpu_registers_avx;
+ static const size_t k_num_exc_registers;
+ static const size_t k_num_all_registers_no_avx;
+ static const size_t k_num_all_registers_avx;
+ static const size_t k_num_register_sets;
+
+ typedef enum RegisterSetTag
+ {
+ e_regSetALL = REGISTER_SET_ALL,
+ e_regSetGPR,
+ e_regSetFPU,
+ e_regSetEXC,
+ e_regSetDBG,
+ kNumRegisterSets
+ } RegisterSet;
+
+ typedef enum RegisterSetWordSizeTag
+ {
+ e_regSetWordSizeGPR = sizeof(GPR) / sizeof(int),
+ e_regSetWordSizeFPU = sizeof(FPU) / sizeof(int),
+ e_regSetWordSizeEXC = sizeof(EXC) / sizeof(int),
+ e_regSetWordSizeAVX = sizeof(AVX) / sizeof(int),
+ e_regSetWordSizeDBG = sizeof(DBG) / sizeof(int)
+ } RegisterSetWordSize;
+
+ enum
+ {
+ Read = 0,
+ Write = 1,
+ kNumErrors = 2
+ };
+
+ struct Context
+ {
+ GPR gpr;
+ union {
+ FPU no_avx;
+ AVX avx;
+ } fpu;
+ EXC exc;
+ DBG dbg;
+ };
+
+ struct State
+ {
+ Context context;
+ kern_return_t gpr_errs[2]; // Read/Write errors
+ kern_return_t fpu_errs[2]; // Read/Write errors
+ kern_return_t exc_errs[2]; // Read/Write errors
+ kern_return_t dbg_errs[2]; // Read/Write errors
+
+ State()
+ {
+ uint32_t i;
+ for (i=0; i<kNumErrors; i++)
+ {
+ gpr_errs[i] = -1;
+ fpu_errs[i] = -1;
+ exc_errs[i] = -1;
+ dbg_errs[i] = -1;
+ }
+ }
+
+ void
+ InvalidateAllRegisterStates()
+ {
+ SetError (e_regSetALL, Read, -1);
+ }
+
+ kern_return_t
+ GetError (int flavor, uint32_t err_idx) const
+ {
+ if (err_idx < kNumErrors)
+ {
+ switch (flavor)
+ {
+ // When getting all errors, just OR all values together to see if
+ // we got any kind of error.
+ case e_regSetALL: return gpr_errs[err_idx] |
+ fpu_errs[err_idx] |
+ exc_errs[err_idx];
+ case e_regSetGPR: return gpr_errs[err_idx];
+ case e_regSetFPU: return fpu_errs[err_idx];
+ case e_regSetEXC: return exc_errs[err_idx];
+ case e_regSetDBG: return dbg_errs[err_idx];
+ default: break;
+ }
+ }
+ return -1;
+ }
+
+ bool
+ SetError (int flavor, uint32_t err_idx, kern_return_t err)
+ {
+ if (err_idx < kNumErrors)
+ {
+ switch (flavor)
+ {
+ case e_regSetALL:
+ gpr_errs[err_idx] =
+ fpu_errs[err_idx] =
+ exc_errs[err_idx] =
+ dbg_errs[err_idx] = err;
+ return true;
+
+ case e_regSetGPR:
+ gpr_errs[err_idx] = err;
+ return true;
+
+ case e_regSetFPU:
+ fpu_errs[err_idx] = err;
+ return true;
+
+ case e_regSetEXC:
+ exc_errs[err_idx] = err;
+ return true;
+
+ case e_regSetDBG:
+ dbg_errs[err_idx] = err;
+ return true;
+
+ default: break;
+ }
+ }
+ return false;
+ }
+
+ bool
+ RegsAreValid (int flavor) const
+ {
+ return GetError(flavor, Read) == KERN_SUCCESS;
+ }
+ };
+
+ kern_return_t GetGPRState (bool force);
+ kern_return_t GetFPUState (bool force);
+ kern_return_t GetEXCState (bool force);
+ kern_return_t GetDBGState (bool force);
+
+ kern_return_t SetGPRState ();
+ kern_return_t SetFPUState ();
+ kern_return_t SetEXCState ();
+ kern_return_t SetDBGState (bool also_set_on_task);
+
+ static DNBArchProtocol *
+ Create (MachThread *thread);
+
+ static const uint8_t *
+ SoftwareBreakpointOpcode (nub_size_t byte_size);
+
+ static const DNBRegisterSetInfo *
+ GetRegisterSetInfo(nub_size_t *num_reg_sets);
+
+ static uint32_t GetRegisterContextSize();
+
+ // Helper functions for watchpoint manipulations.
+ static void SetWatchpoint(DBG &debug_state, uint32_t hw_index, nub_addr_t addr, nub_size_t size, bool read, bool write);
+ static void ClearWatchpoint(DBG &debug_state, uint32_t hw_index);
+ static bool IsWatchpointVacant(const DBG &debug_state, uint32_t hw_index);
+ static void ClearWatchpointHits(DBG &debug_state);
+ static bool IsWatchpointHit(const DBG &debug_state, uint32_t hw_index);
+ static nub_addr_t GetWatchAddress(const DBG &debug_state, uint32_t hw_index);
+
+ virtual bool StartTransForHWP();
+ virtual bool RollbackTransForHWP();
+ virtual bool FinishTransForHWP();
+ DBG GetDBGCheckpoint();
+
+ MachThread *m_thread;
+ State m_state;
+ DBG m_2pc_dbg_checkpoint;
+ uint32_t m_2pc_trans_state; // Is transaction of DBG state change: Pedning (0), Done (1), or Rolled Back (2)?
+ typedef std::map<uint32_t, Context> SaveRegisterStates;
+ SaveRegisterStates m_saved_register_states;
+};
+
+#endif // #if defined (__i386__) || defined (__x86_64__)
+#endif // #ifndef __DNBArchImplX86_64_h__
diff --git a/tools/debugserver/source/MacOSX/x86_64/MachRegisterStatesX86_64.h b/tools/debugserver/source/MacOSX/x86_64/MachRegisterStatesX86_64.h
new file mode 100644
index 000000000000..4e48e9645dd5
--- /dev/null
+++ b/tools/debugserver/source/MacOSX/x86_64/MachRegisterStatesX86_64.h
@@ -0,0 +1,210 @@
+//===-- MachRegisterStatesX86_64.h --------------------------------*- C++ -*-===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// Created by Sean Callanan on 3/16/11.
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef __MachRegisterStatesX86_64_h__
+#define __MachRegisterStatesX86_64_h__
+
+#include <inttypes.h>
+
+#define __x86_64_THREAD_STATE 4
+#define __x86_64_FLOAT_STATE 5
+#define __x86_64_EXCEPTION_STATE 6
+#define __x86_64_DEBUG_STATE 11
+#define __x86_64_AVX_STATE 17
+
+typedef struct {
+ uint64_t __rax;
+ uint64_t __rbx;
+ uint64_t __rcx;
+ uint64_t __rdx;
+ uint64_t __rdi;
+ uint64_t __rsi;
+ uint64_t __rbp;
+ uint64_t __rsp;
+ uint64_t __r8;
+ uint64_t __r9;
+ uint64_t __r10;
+ uint64_t __r11;
+ uint64_t __r12;
+ uint64_t __r13;
+ uint64_t __r14;
+ uint64_t __r15;
+ uint64_t __rip;
+ uint64_t __rflags;
+ uint64_t __cs;
+ uint64_t __fs;
+ uint64_t __gs;
+} __x86_64_thread_state_t;
+
+typedef struct {
+ uint16_t __invalid : 1;
+ uint16_t __denorm : 1;
+ uint16_t __zdiv : 1;
+ uint16_t __ovrfl : 1;
+ uint16_t __undfl : 1;
+ uint16_t __precis : 1;
+ uint16_t __PAD1 : 2;
+ uint16_t __pc : 2;
+ uint16_t __rc : 2;
+ uint16_t __PAD2 : 1;
+ uint16_t __PAD3 : 3;
+} __x86_64_fp_control_t;
+
+typedef struct {
+ uint16_t __invalid : 1;
+ uint16_t __denorm : 1;
+ uint16_t __zdiv : 1;
+ uint16_t __ovrfl : 1;
+ uint16_t __undfl : 1;
+ uint16_t __precis : 1;
+ uint16_t __stkflt : 1;
+ uint16_t __errsumm : 1;
+ uint16_t __c0 : 1;
+ uint16_t __c1 : 1;
+ uint16_t __c2 : 1;
+ uint16_t __tos : 3;
+ uint16_t __c3 : 1;
+ uint16_t __busy : 1;
+} __x86_64_fp_status_t;
+
+typedef struct {
+ uint8_t __mmst_reg[10];
+ uint8_t __mmst_rsrv[6];
+} __x86_64_mmst_reg;
+
+typedef struct {
+ uint8_t __xmm_reg[16];
+} __x86_64_xmm_reg;
+
+typedef struct {
+ int32_t __fpu_reserved[2];
+ __x86_64_fp_control_t __fpu_fcw;
+ __x86_64_fp_status_t __fpu_fsw;
+ uint8_t __fpu_ftw;
+ uint8_t __fpu_rsrv1;
+ uint16_t __fpu_fop;
+ uint32_t __fpu_ip;
+ uint16_t __fpu_cs;
+ uint16_t __fpu_rsrv2;
+ uint32_t __fpu_dp;
+ uint16_t __fpu_ds;
+ uint16_t __fpu_rsrv3;
+ uint32_t __fpu_mxcsr;
+ uint32_t __fpu_mxcsrmask;
+ __x86_64_mmst_reg __fpu_stmm0;
+ __x86_64_mmst_reg __fpu_stmm1;
+ __x86_64_mmst_reg __fpu_stmm2;
+ __x86_64_mmst_reg __fpu_stmm3;
+ __x86_64_mmst_reg __fpu_stmm4;
+ __x86_64_mmst_reg __fpu_stmm5;
+ __x86_64_mmst_reg __fpu_stmm6;
+ __x86_64_mmst_reg __fpu_stmm7;
+ __x86_64_xmm_reg __fpu_xmm0;
+ __x86_64_xmm_reg __fpu_xmm1;
+ __x86_64_xmm_reg __fpu_xmm2;
+ __x86_64_xmm_reg __fpu_xmm3;
+ __x86_64_xmm_reg __fpu_xmm4;
+ __x86_64_xmm_reg __fpu_xmm5;
+ __x86_64_xmm_reg __fpu_xmm6;
+ __x86_64_xmm_reg __fpu_xmm7;
+ __x86_64_xmm_reg __fpu_xmm8;
+ __x86_64_xmm_reg __fpu_xmm9;
+ __x86_64_xmm_reg __fpu_xmm10;
+ __x86_64_xmm_reg __fpu_xmm11;
+ __x86_64_xmm_reg __fpu_xmm12;
+ __x86_64_xmm_reg __fpu_xmm13;
+ __x86_64_xmm_reg __fpu_xmm14;
+ __x86_64_xmm_reg __fpu_xmm15;
+ uint8_t __fpu_rsrv4[6*16];
+ int32_t __fpu_reserved1;
+} __x86_64_float_state_t;
+
+typedef struct {
+ uint32_t __fpu_reserved[2];
+ __x86_64_fp_control_t __fpu_fcw;
+ __x86_64_fp_status_t __fpu_fsw;
+ uint8_t __fpu_ftw;
+ uint8_t __fpu_rsrv1;
+ uint16_t __fpu_fop;
+ uint32_t __fpu_ip;
+ uint16_t __fpu_cs;
+ uint16_t __fpu_rsrv2;
+ uint32_t __fpu_dp;
+ uint16_t __fpu_ds;
+ uint16_t __fpu_rsrv3;
+ uint32_t __fpu_mxcsr;
+ uint32_t __fpu_mxcsrmask;
+ __x86_64_mmst_reg __fpu_stmm0;
+ __x86_64_mmst_reg __fpu_stmm1;
+ __x86_64_mmst_reg __fpu_stmm2;
+ __x86_64_mmst_reg __fpu_stmm3;
+ __x86_64_mmst_reg __fpu_stmm4;
+ __x86_64_mmst_reg __fpu_stmm5;
+ __x86_64_mmst_reg __fpu_stmm6;
+ __x86_64_mmst_reg __fpu_stmm7;
+ __x86_64_xmm_reg __fpu_xmm0;
+ __x86_64_xmm_reg __fpu_xmm1;
+ __x86_64_xmm_reg __fpu_xmm2;
+ __x86_64_xmm_reg __fpu_xmm3;
+ __x86_64_xmm_reg __fpu_xmm4;
+ __x86_64_xmm_reg __fpu_xmm5;
+ __x86_64_xmm_reg __fpu_xmm6;
+ __x86_64_xmm_reg __fpu_xmm7;
+ __x86_64_xmm_reg __fpu_xmm8;
+ __x86_64_xmm_reg __fpu_xmm9;
+ __x86_64_xmm_reg __fpu_xmm10;
+ __x86_64_xmm_reg __fpu_xmm11;
+ __x86_64_xmm_reg __fpu_xmm12;
+ __x86_64_xmm_reg __fpu_xmm13;
+ __x86_64_xmm_reg __fpu_xmm14;
+ __x86_64_xmm_reg __fpu_xmm15;
+ uint8_t __fpu_rsrv4[6*16];
+ uint32_t __fpu_reserved1;
+ uint8_t __avx_reserved1[64];
+ __x86_64_xmm_reg __fpu_ymmh0;
+ __x86_64_xmm_reg __fpu_ymmh1;
+ __x86_64_xmm_reg __fpu_ymmh2;
+ __x86_64_xmm_reg __fpu_ymmh3;
+ __x86_64_xmm_reg __fpu_ymmh4;
+ __x86_64_xmm_reg __fpu_ymmh5;
+ __x86_64_xmm_reg __fpu_ymmh6;
+ __x86_64_xmm_reg __fpu_ymmh7;
+ __x86_64_xmm_reg __fpu_ymmh8;
+ __x86_64_xmm_reg __fpu_ymmh9;
+ __x86_64_xmm_reg __fpu_ymmh10;
+ __x86_64_xmm_reg __fpu_ymmh11;
+ __x86_64_xmm_reg __fpu_ymmh12;
+ __x86_64_xmm_reg __fpu_ymmh13;
+ __x86_64_xmm_reg __fpu_ymmh14;
+ __x86_64_xmm_reg __fpu_ymmh15;
+} __x86_64_avx_state_t;
+
+typedef struct {
+ uint32_t __trapno;
+ uint32_t __err;
+ uint64_t __faultvaddr;
+} __x86_64_exception_state_t;
+
+
+typedef struct {
+ uint64_t __dr0;
+ uint64_t __dr1;
+ uint64_t __dr2;
+ uint64_t __dr3;
+ uint64_t __dr4;
+ uint64_t __dr5;
+ uint64_t __dr6;
+ uint64_t __dr7;
+} __x86_64_debug_state_t;
+
+#endif
diff --git a/tools/debugserver/source/MacOSX/x86_64/Makefile b/tools/debugserver/source/MacOSX/x86_64/Makefile
new file mode 100644
index 000000000000..bf488c859242
--- /dev/null
+++ b/tools/debugserver/source/MacOSX/x86_64/Makefile
@@ -0,0 +1,19 @@
+##===- tools/debugserver/source/MacOSX/i386/Makefile -------*- Makefile -*-===##
+#
+# The LLVM Compiler Infrastructure
+#
+# This file is distributed under the University of Illinois Open Source
+# License. See LICENSE.TXT for details.
+#
+##===----------------------------------------------------------------------===##
+
+LLDB_LEVEL := ../../../../..
+
+LIBRARYNAME := lldbDebugserverMacOSX_X86_64
+BUILD_ARCHIVE = 1
+
+SOURCES := DNBArchImplX86_64.cpp
+
+include $(LLDB_LEVEL)/Makefile
+
+CPP.Flags += -I$(PROJ_SRC_DIR)/.. -I$(PROJ_SRC_DIR)/../.. -I$(PROJ_OBJ_DIR)/../../.. \ No newline at end of file