1 /*
2 * CDDL HEADER START
3 *
4 * The contents of this file are subject to the terms of the
5 * Common Development and Distribution License (the "License").
6 * You may not use this file except in compliance with the License.
7 *
8 * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
9 * or http://www.opensolaris.org/os/licensing.
10 * See the License for the specific language governing permissions
11 * and limitations under the License.
12 *
13 * When distributing Covered Code, include this CDDL HEADER in each
14 * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
15 * If applicable, add the following below this CDDL HEADER, with the
16 * fields enclosed by brackets "[]" replaced with your own identifying
17 * information: Portions Copyright [yyyy] [name of copyright owner]
18 *
19 * CDDL HEADER END
20 */
21 /*
22 * Copyright 2009 Sun Microsystems, Inc. All rights reserved.
23 * Use is subject to license terms.
24 *
25 * Copyright 2018 Joyent, Inc.
26 */
27
28 /*
29 * MDB Target Layer
30 *
31 * The *target* is the program being inspected by the debugger. The MDB target
32 * layer provides a set of functions that insulate common debugger code,
33 * including the MDB Module API, from the implementation details of how the
34 * debugger accesses information from a given target. Each target exports a
35 * standard set of properties, including one or more address spaces, one or
36 * more symbol tables, a set of load objects, and a set of threads that can be
37 * examined using the interfaces in <mdb/mdb_target.h>. This technique has
38 * been employed successfully in other debuggers, including [1], primarily
39 * to improve portability, although the term "target" often refers to the
40 * encapsulation of architectural or operating system-specific details. The
41 * target abstraction is useful for MDB because it allows us to easily extend
42 * the debugger to examine a variety of different program forms. Primarily,
43 * the target functions validate input arguments and then call an appropriate
44 * function in the target ops vector, defined in <mdb/mdb_target_impl.h>.
45 * However, this interface layer provides a very high level of flexibility for
46 * separating the debugger interface from instrumentation details. Experience
47 * has shown this kind of design can facilitate separating out debugger
48 * instrumentation into an external agent [2] and enable the development of
49 * advanced instrumentation frameworks [3]. We want MDB to be an ideal
50 * extensible framework for the development of such applications.
51 *
52 * Aside from a set of wrapper functions, the target layer also provides event
53 * management for targets that represent live executing programs. Our model of
54 * events is also extensible, and is based upon work in [3] and [4]. We define
55 * a *software event* as a state transition in the target program (for example,
56 * the transition of the program counter to a location of interest) that is
57 * observed by the debugger or its agent. A *software event specifier* is a
58 * description of a class of software events that is used by the debugger to
59 * instrument the target so that the corresponding software events can be
60 * observed. In MDB, software event specifiers are represented by the
61 * mdb_sespec_t structure, defined in <mdb/mdb_target_impl.h>. As the user,
62 * the internal debugger code, and MDB modules may all wish to observe software
63 * events and receive appropriate notification and callbacks, we do not expose
64 * software event specifiers directly as part of the user interface. Instead,
65 * clients of the target layer request that events be observed by creating
66 * new *virtual event specifiers*. Each virtual specifier is named by a unique
67 * non-zero integer (the VID), and is represented by a mdb_vespec_t structure.
68 * One or more virtual specifiers are then associated with each underlying
69 * software event specifier. This design enforces the constraint that the
70 * target must only insert one set of instrumentation, regardless of how many
71 * times the target layer was asked to trace a given event. For example, if
72 * multiple clients request a breakpoint at a particular address, the virtual
73 * specifiers will map to the same sespec, ensuring that only one breakpoint
74 * trap instruction is actually planted at the given target address. When no
75 * virtual specifiers refer to an sespec, it is no longer needed and can be
76 * removed, along with the corresponding instrumentation.
77 *
78 * The following state transition diagram illustrates the life cycle of a
79 * software event specifier and example transitions:
80 *
81 * cont/
82 * +--------+ delete +--------+ stop +-------+
83 * (|( DEAD )|) <------- ( ACTIVE ) <------> ( ARMED )
84 * +--------+ +--------+ +-------+
85 * ^ load/unload ^ ^ failure/ |
86 * delete | object / \ reset | failure
87 * | v v |
88 * | +--------+ +-------+ |
89 * +---- ( IDLE ) ( ERR ) <----+
90 * | +--------+ +-------+
91 * | |
92 * +------------------------------+
93 *
94 * The MDB execution control model is based upon the synchronous debugging
95 * model exported by Solaris proc(4). A target program is set running or the
96 * debugger is attached to a running target. On ISTOP (stop on event of
97 * interest), one target thread is selected as the representative. The
98 * algorithm for selecting the representative is target-specific, but we assume
99 * that if an observed software event has occurred, the target will select the
100 * thread that triggered the state transition of interest. The other threads
101 * are stopped in sympathy with the representative as soon as possible. Prior
102 * to continuing the target, we plant our instrumentation, transitioning event
103 * specifiers from the ACTIVE to the ARMED state, and then back again when the
104 * target stops. We then query each active event specifier to learn which ones
105 * are matched, and then invoke the callbacks associated with their vespecs.
106 * If an OS error occurs while attempting to arm or disarm a specifier, the
107 * specifier is transitioned to the ERROR state; we will attempt to arm it
108 * again at the next continue. If no target process is under our control or
109 * if an event is not currently applicable (e.g. a deferred breakpoint on an
110 * object that is not yet loaded), it remains in the IDLE state. The target
111 * implementation should intercept object load events and then transition the
112 * specifier to the ACTIVE state when the corresponding object is loaded.
113 *
114 * To simplify the debugger implementation and allow targets to easily provide
115 * new types of observable events, most of the event specifier management is
116 * done by the target layer. Each software event specifier provides an ops
117 * vector of subroutines that the target layer can call to perform the
118 * various state transitions described above. The target maintains two lists
119 * of mdb_sespec_t's: the t_idle list (IDLE state) and the t_active list
120 * (ACTIVE, ARMED, and ERROR states). Each mdb_sespec_t maintains a list of
121 * associated mdb_vespec_t's. If an sespec is IDLE or ERROR, its se_errno
122 * field will have an errno value specifying the reason for its inactivity.
123 * The vespec stores the client's callback function and private data, and the
124 * arguments used to construct the sespec. All objects are reference counted
125 * so we can destroy an object when it is no longer needed. The mdb_sespec_t
126 * invariants for the respective states are as follows:
127 *
128 * IDLE: on t_idle list, se_data == NULL, se_errno != 0, se_ctor not called
129 * ACTIVE: on t_active list, se_data valid, se_errno == 0, se_ctor called
130 * ARMED: on t_active list, se_data valid, se_errno == 0, se_ctor called
131 * ERROR: on t_active list, se_data valid, se_errno != 0, se_ctor called
132 *
133 * Additional commentary on specific state transitions and issues involving
134 * event management can be found below near the target layer functions.
135 *
136 * References
137 *
138 * [1] John Gilmore, "Working in GDB", Technical Report, Cygnus Support,
139 * 1.84 edition, 1994.
140 *
141 * [2] David R. Hanson and Mukund Raghavachari, "A Machine-Independent
142 * Debugger", Software--Practice and Experience, 26(11), 1277-1299(1996).
143 *
144 * [3] Michael W. Shapiro, "RDB: A System for Incremental Replay Debugging",
145 * Technical Report CS-97-12, Department of Computer Science,
146 * Brown University.
147 *
148 * [4] Daniel B. Price, "New Techniques for Replay Debugging", Technical
149 * Report CS-98-05, Department of Computer Science, Brown University.
150 */
151
152 #include <mdb/mdb_target_impl.h>
153 #include <mdb/mdb_debug.h>
154 #include <mdb/mdb_modapi.h>
155 #include <mdb/mdb_err.h>
156 #include <mdb/mdb_callb.h>
157 #include <mdb/mdb_gelf.h>
158 #include <mdb/mdb_io_impl.h>
159 #include <mdb/mdb_string.h>
160 #include <mdb/mdb_signal.h>
161 #include <mdb/mdb_frame.h>
162 #include <mdb/mdb.h>
163
164 #include <sys/stat.h>
165 #include <sys/param.h>
166 #include <sys/signal.h>
167 #include <strings.h>
168 #include <stdlib.h>
169 #include <errno.h>
170
171 /*
172 * Define convenience macros for referencing the set of vespec flag bits that
173 * are preserved by the target implementation, and the set of bits that
174 * determine automatic ve_hits == ve_limit behavior.
175 */
176 #define T_IMPL_BITS \
177 (MDB_TGT_SPEC_INTERNAL | MDB_TGT_SPEC_SILENT | MDB_TGT_SPEC_MATCHED | \
178 MDB_TGT_SPEC_DELETED)
179
180 #define T_AUTO_BITS \
181 (MDB_TGT_SPEC_AUTOSTOP | MDB_TGT_SPEC_AUTODEL | MDB_TGT_SPEC_AUTODIS)
182
183 /*
184 * Define convenience macro for referencing target flag pending continue bits.
185 */
186 #define T_CONT_BITS \
187 (MDB_TGT_F_STEP | MDB_TGT_F_STEP_OUT | MDB_TGT_F_NEXT | MDB_TGT_F_CONT)
188
189 mdb_tgt_t *
mdb_tgt_create(mdb_tgt_ctor_f * ctor,int flags,int argc,const char * argv[])190 mdb_tgt_create(mdb_tgt_ctor_f *ctor, int flags, int argc, const char *argv[])
191 {
192 mdb_module_t *mp;
193 mdb_tgt_t *t;
194
195 if (flags & ~MDB_TGT_F_ALL) {
196 (void) set_errno(EINVAL);
197 return (NULL);
198 }
199
200 t = mdb_zalloc(sizeof (mdb_tgt_t), UM_SLEEP);
201 mdb_list_append(&mdb.m_tgtlist, t);
202
203 t->t_module = &mdb.m_rmod;
204 t->t_matched = T_SE_END;
205 t->t_flags = flags;
206 t->t_vepos = 1;
207 t->t_veneg = 1;
208
209 for (mp = mdb.m_mhead; mp != NULL; mp = mp->mod_next) {
210 if (ctor == mp->mod_tgt_ctor) {
211 t->t_module = mp;
212 break;
213 }
214 }
215
216 if (ctor(t, argc, argv) != 0) {
217 mdb_list_delete(&mdb.m_tgtlist, t);
218 mdb_free(t, sizeof (mdb_tgt_t));
219 return (NULL);
220 }
221
222 mdb_dprintf(MDB_DBG_TGT, "t_create %s (%p)\n",
223 t->t_module->mod_name, (void *)t);
224
225 (void) t->t_ops->t_status(t, &t->t_status);
226 return (t);
227 }
228
229 int
mdb_tgt_getflags(mdb_tgt_t * t)230 mdb_tgt_getflags(mdb_tgt_t *t)
231 {
232 return (t->t_flags);
233 }
234
235 int
mdb_tgt_setflags(mdb_tgt_t * t,int flags)236 mdb_tgt_setflags(mdb_tgt_t *t, int flags)
237 {
238 if (flags & ~MDB_TGT_F_ALL)
239 return (set_errno(EINVAL));
240
241 return (t->t_ops->t_setflags(t, flags));
242 }
243
244 int
mdb_tgt_setcontext(mdb_tgt_t * t,void * context)245 mdb_tgt_setcontext(mdb_tgt_t *t, void *context)
246 {
247 return (t->t_ops->t_setcontext(t, context));
248 }
249
250 /*ARGSUSED*/
251 static int
tgt_delete_vespec(mdb_tgt_t * t,void * private,int vid,void * data)252 tgt_delete_vespec(mdb_tgt_t *t, void *private, int vid, void *data)
253 {
254 (void) mdb_tgt_vespec_delete(t, vid);
255 return (0);
256 }
257
258 void
mdb_tgt_destroy(mdb_tgt_t * t)259 mdb_tgt_destroy(mdb_tgt_t *t)
260 {
261 mdb_xdata_t *xdp, *nxdp;
262
263 if (mdb.m_target == t) {
264 mdb_dprintf(MDB_DBG_TGT, "t_deactivate %s (%p)\n",
265 t->t_module->mod_name, (void *)t);
266 t->t_ops->t_deactivate(t);
267 mdb.m_target = NULL;
268 }
269
270 mdb_dprintf(MDB_DBG_TGT, "t_destroy %s (%p)\n",
271 t->t_module->mod_name, (void *)t);
272
273 for (xdp = mdb_list_next(&t->t_xdlist); xdp != NULL; xdp = nxdp) {
274 nxdp = mdb_list_next(xdp);
275 mdb_list_delete(&t->t_xdlist, xdp);
276 mdb_free(xdp, sizeof (mdb_xdata_t));
277 }
278
279 mdb_tgt_sespec_idle_all(t, EBUSY, TRUE);
280 (void) mdb_tgt_vespec_iter(t, tgt_delete_vespec, NULL);
281 t->t_ops->t_destroy(t);
282
283 mdb_list_delete(&mdb.m_tgtlist, t);
284 mdb_free(t, sizeof (mdb_tgt_t));
285
286 if (mdb.m_target == NULL)
287 mdb_tgt_activate(mdb_list_prev(&mdb.m_tgtlist));
288 }
289
290 void
mdb_tgt_activate(mdb_tgt_t * t)291 mdb_tgt_activate(mdb_tgt_t *t)
292 {
293 mdb_tgt_t *otgt = mdb.m_target;
294
295 if (mdb.m_target != NULL) {
296 mdb_dprintf(MDB_DBG_TGT, "t_deactivate %s (%p)\n",
297 mdb.m_target->t_module->mod_name, (void *)mdb.m_target);
298 mdb.m_target->t_ops->t_deactivate(mdb.m_target);
299 }
300
301 if ((mdb.m_target = t) != NULL) {
302 const char *v = strstr(mdb.m_root, "%V");
303
304 mdb_dprintf(MDB_DBG_TGT, "t_activate %s (%p)\n",
305 t->t_module->mod_name, (void *)t);
306
307 /*
308 * If the root was explicitly set with -R and contains %V,
309 * expand it like a path. If the resulting directory is
310 * not present, then replace %V with "latest" and re-evaluate.
311 */
312 if (v != NULL) {
313 char old_root[MAXPATHLEN];
314 const char **p;
315 #ifndef _KMDB
316 struct stat s;
317 #endif
318 size_t len;
319
320 p = mdb_path_alloc(mdb.m_root, &len);
321 (void) strcpy(old_root, mdb.m_root);
322 (void) strncpy(mdb.m_root, p[0], MAXPATHLEN);
323 mdb.m_root[MAXPATHLEN - 1] = '\0';
324 mdb_path_free(p, len);
325
326 #ifndef _KMDB
327 if (stat(mdb.m_root, &s) == -1 && errno == ENOENT) {
328 mdb.m_flags |= MDB_FL_LATEST;
329 p = mdb_path_alloc(old_root, &len);
330 (void) strncpy(mdb.m_root, p[0], MAXPATHLEN);
331 mdb.m_root[MAXPATHLEN - 1] = '\0';
332 mdb_path_free(p, len);
333 }
334 #endif
335 }
336
337 /*
338 * Re-evaluate the macro and dmod paths now that we have the
339 * new target set and m_root figured out.
340 */
341 if (otgt == NULL) {
342 mdb_set_ipath(mdb.m_ipathstr);
343 mdb_set_lpath(mdb.m_lpathstr);
344 }
345
346 t->t_ops->t_activate(t);
347 }
348 }
349
350 void
mdb_tgt_periodic(mdb_tgt_t * t)351 mdb_tgt_periodic(mdb_tgt_t *t)
352 {
353 t->t_ops->t_periodic(t);
354 }
355
356 const char *
mdb_tgt_name(mdb_tgt_t * t)357 mdb_tgt_name(mdb_tgt_t *t)
358 {
359 return (t->t_ops->t_name(t));
360 }
361
362 const char *
mdb_tgt_isa(mdb_tgt_t * t)363 mdb_tgt_isa(mdb_tgt_t *t)
364 {
365 return (t->t_ops->t_isa(t));
366 }
367
368 const char *
mdb_tgt_platform(mdb_tgt_t * t)369 mdb_tgt_platform(mdb_tgt_t *t)
370 {
371 return (t->t_ops->t_platform(t));
372 }
373
374 int
mdb_tgt_uname(mdb_tgt_t * t,struct utsname * utsp)375 mdb_tgt_uname(mdb_tgt_t *t, struct utsname *utsp)
376 {
377 return (t->t_ops->t_uname(t, utsp));
378 }
379
380 int
mdb_tgt_dmodel(mdb_tgt_t * t)381 mdb_tgt_dmodel(mdb_tgt_t *t)
382 {
383 return (t->t_ops->t_dmodel(t));
384 }
385
386 int
mdb_tgt_auxv(mdb_tgt_t * t,const auxv_t ** auxvp)387 mdb_tgt_auxv(mdb_tgt_t *t, const auxv_t **auxvp)
388 {
389 return (t->t_ops->t_auxv(t, auxvp));
390 }
391
392 ssize_t
mdb_tgt_aread(mdb_tgt_t * t,mdb_tgt_as_t as,void * buf,size_t n,mdb_tgt_addr_t addr)393 mdb_tgt_aread(mdb_tgt_t *t, mdb_tgt_as_t as,
394 void *buf, size_t n, mdb_tgt_addr_t addr)
395 {
396 if (t->t_flags & MDB_TGT_F_ASIO)
397 return (t->t_ops->t_aread(t, as, buf, n, addr));
398
399 switch ((uintptr_t)as) {
400 case (uintptr_t)MDB_TGT_AS_VIRT:
401 case (uintptr_t)MDB_TGT_AS_VIRT_I:
402 case (uintptr_t)MDB_TGT_AS_VIRT_S:
403 return (t->t_ops->t_vread(t, buf, n, addr));
404 case (uintptr_t)MDB_TGT_AS_PHYS:
405 return (t->t_ops->t_pread(t, buf, n, addr));
406 case (uintptr_t)MDB_TGT_AS_FILE:
407 return (t->t_ops->t_fread(t, buf, n, addr));
408 case (uintptr_t)MDB_TGT_AS_IO:
409 return (t->t_ops->t_ioread(t, buf, n, addr));
410 }
411 return (t->t_ops->t_aread(t, as, buf, n, addr));
412 }
413
414 ssize_t
mdb_tgt_awrite(mdb_tgt_t * t,mdb_tgt_as_t as,const void * buf,size_t n,mdb_tgt_addr_t addr)415 mdb_tgt_awrite(mdb_tgt_t *t, mdb_tgt_as_t as,
416 const void *buf, size_t n, mdb_tgt_addr_t addr)
417 {
418 if (!(t->t_flags & MDB_TGT_F_RDWR))
419 return (set_errno(EMDB_TGTRDONLY));
420
421 if (t->t_flags & MDB_TGT_F_ASIO)
422 return (t->t_ops->t_awrite(t, as, buf, n, addr));
423
424 switch ((uintptr_t)as) {
425 case (uintptr_t)MDB_TGT_AS_VIRT:
426 case (uintptr_t)MDB_TGT_AS_VIRT_I:
427 case (uintptr_t)MDB_TGT_AS_VIRT_S:
428 return (t->t_ops->t_vwrite(t, buf, n, addr));
429 case (uintptr_t)MDB_TGT_AS_PHYS:
430 return (t->t_ops->t_pwrite(t, buf, n, addr));
431 case (uintptr_t)MDB_TGT_AS_FILE:
432 return (t->t_ops->t_fwrite(t, buf, n, addr));
433 case (uintptr_t)MDB_TGT_AS_IO:
434 return (t->t_ops->t_iowrite(t, buf, n, addr));
435 }
436 return (t->t_ops->t_awrite(t, as, buf, n, addr));
437 }
438
439 ssize_t
mdb_tgt_vread(mdb_tgt_t * t,void * buf,size_t n,uintptr_t addr)440 mdb_tgt_vread(mdb_tgt_t *t, void *buf, size_t n, uintptr_t addr)
441 {
442 return (t->t_ops->t_vread(t, buf, n, addr));
443 }
444
445 ssize_t
mdb_tgt_vwrite(mdb_tgt_t * t,const void * buf,size_t n,uintptr_t addr)446 mdb_tgt_vwrite(mdb_tgt_t *t, const void *buf, size_t n, uintptr_t addr)
447 {
448 if (t->t_flags & MDB_TGT_F_RDWR)
449 return (t->t_ops->t_vwrite(t, buf, n, addr));
450
451 return (set_errno(EMDB_TGTRDONLY));
452 }
453
454 ssize_t
mdb_tgt_pread(mdb_tgt_t * t,void * buf,size_t n,physaddr_t addr)455 mdb_tgt_pread(mdb_tgt_t *t, void *buf, size_t n, physaddr_t addr)
456 {
457 return (t->t_ops->t_pread(t, buf, n, addr));
458 }
459
460 ssize_t
mdb_tgt_pwrite(mdb_tgt_t * t,const void * buf,size_t n,physaddr_t addr)461 mdb_tgt_pwrite(mdb_tgt_t *t, const void *buf, size_t n, physaddr_t addr)
462 {
463 if (t->t_flags & MDB_TGT_F_RDWR)
464 return (t->t_ops->t_pwrite(t, buf, n, addr));
465
466 return (set_errno(EMDB_TGTRDONLY));
467 }
468
469 ssize_t
mdb_tgt_fread(mdb_tgt_t * t,void * buf,size_t n,uintptr_t addr)470 mdb_tgt_fread(mdb_tgt_t *t, void *buf, size_t n, uintptr_t addr)
471 {
472 return (t->t_ops->t_fread(t, buf, n, addr));
473 }
474
475 ssize_t
mdb_tgt_fwrite(mdb_tgt_t * t,const void * buf,size_t n,uintptr_t addr)476 mdb_tgt_fwrite(mdb_tgt_t *t, const void *buf, size_t n, uintptr_t addr)
477 {
478 if (t->t_flags & MDB_TGT_F_RDWR)
479 return (t->t_ops->t_fwrite(t, buf, n, addr));
480
481 return (set_errno(EMDB_TGTRDONLY));
482 }
483
484 ssize_t
mdb_tgt_ioread(mdb_tgt_t * t,void * buf,size_t n,uintptr_t addr)485 mdb_tgt_ioread(mdb_tgt_t *t, void *buf, size_t n, uintptr_t addr)
486 {
487 return (t->t_ops->t_ioread(t, buf, n, addr));
488 }
489
490 ssize_t
mdb_tgt_iowrite(mdb_tgt_t * t,const void * buf,size_t n,uintptr_t addr)491 mdb_tgt_iowrite(mdb_tgt_t *t, const void *buf, size_t n, uintptr_t addr)
492 {
493 if (t->t_flags & MDB_TGT_F_RDWR)
494 return (t->t_ops->t_iowrite(t, buf, n, addr));
495
496 return (set_errno(EMDB_TGTRDONLY));
497 }
498
499 int
mdb_tgt_vtop(mdb_tgt_t * t,mdb_tgt_as_t as,uintptr_t va,physaddr_t * pap)500 mdb_tgt_vtop(mdb_tgt_t *t, mdb_tgt_as_t as, uintptr_t va, physaddr_t *pap)
501 {
502 return (t->t_ops->t_vtop(t, as, va, pap));
503 }
504
505 ssize_t
mdb_tgt_readstr(mdb_tgt_t * t,mdb_tgt_as_t as,char * buf,size_t nbytes,mdb_tgt_addr_t addr)506 mdb_tgt_readstr(mdb_tgt_t *t, mdb_tgt_as_t as, char *buf,
507 size_t nbytes, mdb_tgt_addr_t addr)
508 {
509 ssize_t n = -1, nread = mdb_tgt_aread(t, as, buf, nbytes, addr);
510 char *p;
511
512 if (nread >= 0) {
513 if ((p = memchr(buf, '\0', nread)) != NULL)
514 nread = (size_t)(p - buf);
515 goto done;
516 }
517
518 nread = 0;
519 p = &buf[0];
520
521 while (nread < nbytes && (n = mdb_tgt_aread(t, as, p, 1, addr)) == 1) {
522 if (*p == '\0')
523 return (nread);
524 nread++;
525 addr++;
526 p++;
527 }
528
529 if (nread == 0 && n == -1)
530 return (-1); /* If we can't even read a byte, return -1 */
531
532 done:
533 if (nbytes != 0)
534 buf[MIN(nread, nbytes - 1)] = '\0';
535
536 return (nread);
537 }
538
539 ssize_t
mdb_tgt_writestr(mdb_tgt_t * t,mdb_tgt_as_t as,const char * buf,mdb_tgt_addr_t addr)540 mdb_tgt_writestr(mdb_tgt_t *t, mdb_tgt_as_t as,
541 const char *buf, mdb_tgt_addr_t addr)
542 {
543 ssize_t nwritten = mdb_tgt_awrite(t, as, buf, strlen(buf) + 1, addr);
544 return (nwritten > 0 ? nwritten - 1 : nwritten);
545 }
546
547 int
mdb_tgt_lookup_by_name(mdb_tgt_t * t,const char * obj,const char * name,GElf_Sym * symp,mdb_syminfo_t * sip)548 mdb_tgt_lookup_by_name(mdb_tgt_t *t, const char *obj,
549 const char *name, GElf_Sym *symp, mdb_syminfo_t *sip)
550 {
551 mdb_syminfo_t info;
552 GElf_Sym sym;
553 uint_t id;
554
555 if (name == NULL || t == NULL)
556 return (set_errno(EINVAL));
557
558 if (obj == MDB_TGT_OBJ_EVERY &&
559 mdb_gelf_symtab_lookup_by_name(mdb.m_prsym, name, &sym, &id) == 0) {
560 info.sym_table = MDB_TGT_PRVSYM;
561 info.sym_id = id;
562 goto found;
563 }
564
565 if (t->t_ops->t_lookup_by_name(t, obj, name, &sym, &info) == 0)
566 goto found;
567
568 return (-1);
569
570 found:
571 if (symp != NULL)
572 *symp = sym;
573 if (sip != NULL)
574 *sip = info;
575 return (0);
576 }
577
578 int
mdb_tgt_lookup_by_addr(mdb_tgt_t * t,uintptr_t addr,uint_t flags,char * buf,size_t len,GElf_Sym * symp,mdb_syminfo_t * sip)579 mdb_tgt_lookup_by_addr(mdb_tgt_t *t, uintptr_t addr, uint_t flags,
580 char *buf, size_t len, GElf_Sym *symp, mdb_syminfo_t *sip)
581 {
582 mdb_syminfo_t info;
583 GElf_Sym sym;
584
585 if (t == NULL)
586 return (set_errno(EINVAL));
587
588 if (t->t_ops->t_lookup_by_addr(t, addr, flags,
589 buf, len, &sym, &info) == 0) {
590 if (symp != NULL)
591 *symp = sym;
592 if (sip != NULL)
593 *sip = info;
594 return (0);
595 }
596
597 return (-1);
598 }
599
600 /*
601 * The mdb_tgt_lookup_by_scope function is a convenience routine for code that
602 * wants to look up a scoped symbol name such as "object`symbol". It is
603 * implemented as a simple wrapper around mdb_tgt_lookup_by_name. Note that
604 * we split on the *last* occurrence of "`", so the object name itself may
605 * contain additional scopes whose evaluation is left to the target. This
606 * allows targets to implement additional scopes, such as source files,
607 * function names, link map identifiers, etc.
608 */
609 int
mdb_tgt_lookup_by_scope(mdb_tgt_t * t,const char * s,GElf_Sym * symp,mdb_syminfo_t * sip)610 mdb_tgt_lookup_by_scope(mdb_tgt_t *t, const char *s, GElf_Sym *symp,
611 mdb_syminfo_t *sip)
612 {
613 const char *object = MDB_TGT_OBJ_EVERY;
614 const char *name = s;
615 char buf[MDB_TGT_SYM_NAMLEN];
616
617 if (t == NULL)
618 return (set_errno(EINVAL));
619
620 if (strchr(name, '`') != NULL) {
621
622 (void) strncpy(buf, s, sizeof (buf));
623 buf[sizeof (buf) - 1] = '\0';
624 name = buf;
625
626 if ((s = strrsplit(buf, '`')) != NULL) {
627 object = buf;
628 name = s;
629 if (*object == '\0')
630 return (set_errno(EMDB_NOOBJ));
631 if (*name == '\0')
632 return (set_errno(EMDB_NOSYM));
633 }
634 }
635
636 return (mdb_tgt_lookup_by_name(t, object, name, symp, sip));
637 }
638
639 int
mdb_tgt_symbol_iter(mdb_tgt_t * t,const char * obj,uint_t which,uint_t type,mdb_tgt_sym_f * cb,void * p)640 mdb_tgt_symbol_iter(mdb_tgt_t *t, const char *obj, uint_t which,
641 uint_t type, mdb_tgt_sym_f *cb, void *p)
642 {
643 if ((which != MDB_TGT_SYMTAB && which != MDB_TGT_DYNSYM) ||
644 (type & ~(MDB_TGT_BIND_ANY | MDB_TGT_TYPE_ANY)) != 0)
645 return (set_errno(EINVAL));
646
647 return (t->t_ops->t_symbol_iter(t, obj, which, type, cb, p));
648 }
649
650 ssize_t
mdb_tgt_readsym(mdb_tgt_t * t,mdb_tgt_as_t as,void * buf,size_t nbytes,const char * obj,const char * name)651 mdb_tgt_readsym(mdb_tgt_t *t, mdb_tgt_as_t as, void *buf, size_t nbytes,
652 const char *obj, const char *name)
653 {
654 GElf_Sym sym;
655
656 if (mdb_tgt_lookup_by_name(t, obj, name, &sym, NULL) == 0)
657 return (mdb_tgt_aread(t, as, buf, nbytes, sym.st_value));
658
659 return (-1);
660 }
661
662 ssize_t
mdb_tgt_writesym(mdb_tgt_t * t,mdb_tgt_as_t as,const void * buf,size_t nbytes,const char * obj,const char * name)663 mdb_tgt_writesym(mdb_tgt_t *t, mdb_tgt_as_t as, const void *buf,
664 size_t nbytes, const char *obj, const char *name)
665 {
666 GElf_Sym sym;
667
668 if (mdb_tgt_lookup_by_name(t, obj, name, &sym, NULL) == 0)
669 return (mdb_tgt_awrite(t, as, buf, nbytes, sym.st_value));
670
671 return (-1);
672 }
673
674 int
mdb_tgt_mapping_iter(mdb_tgt_t * t,mdb_tgt_map_f * cb,void * p)675 mdb_tgt_mapping_iter(mdb_tgt_t *t, mdb_tgt_map_f *cb, void *p)
676 {
677 return (t->t_ops->t_mapping_iter(t, cb, p));
678 }
679
680 int
mdb_tgt_object_iter(mdb_tgt_t * t,mdb_tgt_map_f * cb,void * p)681 mdb_tgt_object_iter(mdb_tgt_t *t, mdb_tgt_map_f *cb, void *p)
682 {
683 return (t->t_ops->t_object_iter(t, cb, p));
684 }
685
686 const mdb_map_t *
mdb_tgt_addr_to_map(mdb_tgt_t * t,uintptr_t addr)687 mdb_tgt_addr_to_map(mdb_tgt_t *t, uintptr_t addr)
688 {
689 return (t->t_ops->t_addr_to_map(t, addr));
690 }
691
692 const mdb_map_t *
mdb_tgt_name_to_map(mdb_tgt_t * t,const char * name)693 mdb_tgt_name_to_map(mdb_tgt_t *t, const char *name)
694 {
695 return (t->t_ops->t_name_to_map(t, name));
696 }
697
698 struct ctf_file *
mdb_tgt_addr_to_ctf(mdb_tgt_t * t,uintptr_t addr)699 mdb_tgt_addr_to_ctf(mdb_tgt_t *t, uintptr_t addr)
700 {
701 return (t->t_ops->t_addr_to_ctf(t, addr));
702 }
703
704 struct ctf_file *
mdb_tgt_name_to_ctf(mdb_tgt_t * t,const char * name)705 mdb_tgt_name_to_ctf(mdb_tgt_t *t, const char *name)
706 {
707 return (t->t_ops->t_name_to_ctf(t, name));
708 }
709
710 /*
711 * Return the latest target status. We just copy out our cached copy. The
712 * status only needs to change when the target is run, stepped, or continued.
713 */
714 int
mdb_tgt_status(mdb_tgt_t * t,mdb_tgt_status_t * tsp)715 mdb_tgt_status(mdb_tgt_t *t, mdb_tgt_status_t *tsp)
716 {
717 uint_t dstop = (t->t_status.st_flags & MDB_TGT_DSTOP);
718 uint_t istop = (t->t_status.st_flags & MDB_TGT_ISTOP);
719 uint_t state = t->t_status.st_state;
720
721 if (tsp == NULL)
722 return (set_errno(EINVAL));
723
724 /*
725 * If we're called with the address of the target's internal status,
726 * then call down to update it; otherwise copy out the saved status.
727 */
728 if (tsp == &t->t_status && t->t_ops->t_status(t, &t->t_status) != 0)
729 return (-1); /* errno is set for us */
730
731 /*
732 * Assert that our state is valid before returning it. The state must
733 * be valid, and DSTOP and ISTOP cannot be set simultaneously. ISTOP
734 * is only valid when stopped. DSTOP is only valid when running or
735 * stopped. If any test fails, abort the debugger.
736 */
737 if (state > MDB_TGT_LOST)
738 fail("invalid target state (%u)\n", state);
739 if (state != MDB_TGT_STOPPED && istop)
740 fail("target state is (%u) and ISTOP is set\n", state);
741 if (state != MDB_TGT_STOPPED && state != MDB_TGT_RUNNING && dstop)
742 fail("target state is (%u) and DSTOP is set\n", state);
743 if (istop && dstop)
744 fail("target has ISTOP and DSTOP set simultaneously\n");
745
746 if (tsp != &t->t_status)
747 bcopy(&t->t_status, tsp, sizeof (mdb_tgt_status_t));
748
749 return (0);
750 }
751
752 /*
753 * For the given sespec, scan its list of vespecs for ones that are marked
754 * temporary and delete them. We use the same method as vespec_delete below.
755 */
756 /*ARGSUSED*/
757 void
mdb_tgt_sespec_prune_one(mdb_tgt_t * t,mdb_sespec_t * sep)758 mdb_tgt_sespec_prune_one(mdb_tgt_t *t, mdb_sespec_t *sep)
759 {
760 mdb_vespec_t *vep, *nvep;
761
762 for (vep = mdb_list_next(&sep->se_velist); vep; vep = nvep) {
763 nvep = mdb_list_next(vep);
764
765 if ((vep->ve_flags & (MDB_TGT_SPEC_DELETED |
766 MDB_TGT_SPEC_TEMPORARY)) == MDB_TGT_SPEC_TEMPORARY) {
767 vep->ve_flags |= MDB_TGT_SPEC_DELETED;
768 mdb_tgt_vespec_rele(t, vep);
769 }
770 }
771 }
772
773 /*
774 * Prune each sespec on the active list of temporary vespecs. This function
775 * is called, for example, after the target finishes a continue operation.
776 */
777 void
mdb_tgt_sespec_prune_all(mdb_tgt_t * t)778 mdb_tgt_sespec_prune_all(mdb_tgt_t *t)
779 {
780 mdb_sespec_t *sep, *nsep;
781
782 for (sep = mdb_list_next(&t->t_active); sep != NULL; sep = nsep) {
783 nsep = mdb_list_next(sep);
784 mdb_tgt_sespec_prune_one(t, sep);
785 }
786 }
787
788 /*
789 * Transition the given sespec to the IDLE state. We invoke the destructor,
790 * and then move the sespec from the active list to the idle list.
791 */
792 void
mdb_tgt_sespec_idle_one(mdb_tgt_t * t,mdb_sespec_t * sep,int reason)793 mdb_tgt_sespec_idle_one(mdb_tgt_t *t, mdb_sespec_t *sep, int reason)
794 {
795 ASSERT(sep->se_state != MDB_TGT_SPEC_IDLE);
796
797 if (sep->se_state == MDB_TGT_SPEC_ARMED)
798 (void) sep->se_ops->se_disarm(t, sep);
799
800 sep->se_ops->se_dtor(t, sep);
801 sep->se_data = NULL;
802
803 sep->se_state = MDB_TGT_SPEC_IDLE;
804 sep->se_errno = reason;
805
806 mdb_list_delete(&t->t_active, sep);
807 mdb_list_append(&t->t_idle, sep);
808
809 mdb_tgt_sespec_prune_one(t, sep);
810 }
811
812 /*
813 * Transition each sespec on the active list to the IDLE state. This function
814 * is called, for example, after the target terminates execution.
815 */
816 void
mdb_tgt_sespec_idle_all(mdb_tgt_t * t,int reason,int clear_matched)817 mdb_tgt_sespec_idle_all(mdb_tgt_t *t, int reason, int clear_matched)
818 {
819 mdb_sespec_t *sep, *nsep;
820 mdb_vespec_t *vep;
821
822 while ((sep = t->t_matched) != T_SE_END && clear_matched) {
823 for (vep = mdb_list_next(&sep->se_velist); vep != NULL; ) {
824 vep->ve_flags &= ~MDB_TGT_SPEC_MATCHED;
825 vep = mdb_list_next(vep);
826 }
827
828 t->t_matched = sep->se_matched;
829 sep->se_matched = NULL;
830 mdb_tgt_sespec_rele(t, sep);
831 }
832
833 for (sep = mdb_list_next(&t->t_active); sep != NULL; sep = nsep) {
834 nsep = mdb_list_next(sep);
835 mdb_tgt_sespec_idle_one(t, sep, reason);
836 }
837 }
838
839 /*
840 * Attempt to transition the given sespec from the IDLE to ACTIVE state. We
841 * do this by invoking se_ctor -- if this fails, we save the reason in se_errno
842 * and return -1 with errno set. One strange case we need to deal with here is
843 * the possibility that a given vespec is sitting on the idle list with its
844 * corresponding sespec, but it is actually a duplicate of another sespec on the
845 * active list. This can happen if the sespec is associated with a
846 * MDB_TGT_SPEC_DISABLED vespec that was just enabled, and is now ready to be
847 * activated. A more interesting reason this situation might arise is the case
848 * where a virtual address breakpoint is set at an address just mmap'ed by
849 * dlmopen. Since no symbol table information is available for this mapping
850 * yet, a pre-existing deferred symbolic breakpoint may already exist for this
851 * address, but it is on the idle list. When the symbol table is ready and the
852 * DLACTIVITY event occurs, we now discover that the virtual address obtained by
853 * evaluating the symbolic breakpoint matches the explicit virtual address of
854 * the active virtual breakpoint. To resolve this conflict in either case, we
855 * destroy the idle sespec, and attach its list of vespecs to the existing
856 * active sespec.
857 */
858 int
mdb_tgt_sespec_activate_one(mdb_tgt_t * t,mdb_sespec_t * sep)859 mdb_tgt_sespec_activate_one(mdb_tgt_t *t, mdb_sespec_t *sep)
860 {
861 mdb_vespec_t *vep = mdb_list_next(&sep->se_velist);
862
863 mdb_vespec_t *nvep;
864 mdb_sespec_t *dup;
865
866 ASSERT(sep->se_state == MDB_TGT_SPEC_IDLE);
867 ASSERT(vep != NULL);
868
869 if (vep->ve_flags & MDB_TGT_SPEC_DISABLED)
870 return (0); /* cannot be activated while disabled bit set */
871
872 /*
873 * First search the active list for an existing, duplicate sespec to
874 * handle the special case described above.
875 */
876 for (dup = mdb_list_next(&t->t_active); dup; dup = mdb_list_next(dup)) {
877 if (dup->se_ops == sep->se_ops &&
878 dup->se_ops->se_secmp(t, dup, vep->ve_args)) {
879 ASSERT(dup != sep);
880 break;
881 }
882 }
883
884 /*
885 * If a duplicate is found, destroy the existing, idle sespec, and
886 * attach all of its vespecs to the duplicate sespec.
887 */
888 if (dup != NULL) {
889 for (vep = mdb_list_next(&sep->se_velist); vep; vep = nvep) {
890 mdb_dprintf(MDB_DBG_TGT, "merge [ %d ] to sespec %p\n",
891 vep->ve_id, (void *)dup);
892
893 if (dup->se_matched != NULL)
894 vep->ve_flags |= MDB_TGT_SPEC_MATCHED;
895
896 nvep = mdb_list_next(vep);
897 vep->ve_hits = 0;
898
899 mdb_list_delete(&sep->se_velist, vep);
900 mdb_tgt_sespec_rele(t, sep);
901
902 mdb_list_append(&dup->se_velist, vep);
903 mdb_tgt_sespec_hold(t, dup);
904 vep->ve_se = dup;
905 }
906
907 mdb_dprintf(MDB_DBG_TGT, "merged idle sespec %p with %p\n",
908 (void *)sep, (void *)dup);
909 return (0);
910 }
911
912 /*
913 * If no duplicate is found, call the sespec's constructor. If this
914 * is successful, move the sespec to the active list.
915 */
916 if (sep->se_ops->se_ctor(t, sep, vep->ve_args) < 0) {
917 sep->se_errno = errno;
918 sep->se_data = NULL;
919
920 return (-1);
921 }
922
923 for (vep = mdb_list_next(&sep->se_velist); vep; vep = nvep) {
924 nvep = mdb_list_next(vep);
925 vep->ve_hits = 0;
926 }
927 mdb_list_delete(&t->t_idle, sep);
928 mdb_list_append(&t->t_active, sep);
929 sep->se_state = MDB_TGT_SPEC_ACTIVE;
930 sep->se_errno = 0;
931
932 return (0);
933 }
934
935 /*
936 * Transition each sespec on the idle list to the ACTIVE state. This function
937 * is called, for example, after the target's t_run() function returns. If
938 * the se_ctor() function fails, the specifier is not yet applicable; it will
939 * remain on the idle list and can be activated later.
940 *
941 * Returns 1 if there weren't any unexpected activation failures; 0 if there
942 * were.
943 */
944 int
mdb_tgt_sespec_activate_all(mdb_tgt_t * t)945 mdb_tgt_sespec_activate_all(mdb_tgt_t *t)
946 {
947 mdb_sespec_t *sep, *nsep;
948 int rc = 1;
949
950 for (sep = mdb_list_next(&t->t_idle); sep != NULL; sep = nsep) {
951 nsep = mdb_list_next(sep);
952
953 if (mdb_tgt_sespec_activate_one(t, sep) < 0 &&
954 sep->se_errno != EMDB_NOOBJ)
955 rc = 0;
956 }
957
958 return (rc);
959 }
960
961 /*
962 * Transition the given sespec to the ARMED state. Note that we attempt to
963 * re-arm sespecs previously in the ERROR state. If se_arm() fails the sespec
964 * transitions to the ERROR state but stays on the active list.
965 */
966 void
mdb_tgt_sespec_arm_one(mdb_tgt_t * t,mdb_sespec_t * sep)967 mdb_tgt_sespec_arm_one(mdb_tgt_t *t, mdb_sespec_t *sep)
968 {
969 ASSERT(sep->se_state != MDB_TGT_SPEC_IDLE);
970
971 if (sep->se_state == MDB_TGT_SPEC_ARMED)
972 return; /* do not arm sespecs more than once */
973
974 if (sep->se_ops->se_arm(t, sep) == -1) {
975 sep->se_state = MDB_TGT_SPEC_ERROR;
976 sep->se_errno = errno;
977 } else {
978 sep->se_state = MDB_TGT_SPEC_ARMED;
979 sep->se_errno = 0;
980 }
981 }
982
983 /*
984 * Transition each sespec on the active list (except matched specs) to the
985 * ARMED state. This function is called prior to continuing the target.
986 */
987 void
mdb_tgt_sespec_arm_all(mdb_tgt_t * t)988 mdb_tgt_sespec_arm_all(mdb_tgt_t *t)
989 {
990 mdb_sespec_t *sep, *nsep;
991
992 for (sep = mdb_list_next(&t->t_active); sep != NULL; sep = nsep) {
993 nsep = mdb_list_next(sep);
994 if (sep->se_matched == NULL)
995 mdb_tgt_sespec_arm_one(t, sep);
996 }
997 }
998
999 /*
1000 * Transition each sespec on the active list that is in the ARMED state to
1001 * the ACTIVE state. If se_disarm() fails, the sespec is transitioned to
1002 * the ERROR state instead, but left on the active list.
1003 */
1004 static void
tgt_disarm_sespecs(mdb_tgt_t * t)1005 tgt_disarm_sespecs(mdb_tgt_t *t)
1006 {
1007 mdb_sespec_t *sep;
1008
1009 for (sep = mdb_list_next(&t->t_active); sep; sep = mdb_list_next(sep)) {
1010 if (sep->se_state != MDB_TGT_SPEC_ARMED)
1011 continue; /* do not disarm if in ERROR state */
1012
1013 if (sep->se_ops->se_disarm(t, sep) == -1) {
1014 sep->se_state = MDB_TGT_SPEC_ERROR;
1015 sep->se_errno = errno;
1016 } else {
1017 sep->se_state = MDB_TGT_SPEC_ACTIVE;
1018 sep->se_errno = 0;
1019 }
1020 }
1021 }
1022
1023 /*
1024 * Determine if the software event that triggered the most recent stop matches
1025 * any of the active event specifiers. If 'all' is TRUE, we consider all
1026 * sespecs in our search. If 'all' is FALSE, we only consider ARMED sespecs.
1027 * If we successfully match an event, we add it to the t_matched list and
1028 * place an additional hold on it.
1029 */
1030 static mdb_sespec_t *
tgt_match_sespecs(mdb_tgt_t * t,int all)1031 tgt_match_sespecs(mdb_tgt_t *t, int all)
1032 {
1033 mdb_sespec_t *sep;
1034
1035 for (sep = mdb_list_next(&t->t_active); sep; sep = mdb_list_next(sep)) {
1036 if (all == FALSE && sep->se_state != MDB_TGT_SPEC_ARMED)
1037 continue; /* restrict search to ARMED sespecs */
1038
1039 if (sep->se_state != MDB_TGT_SPEC_ERROR &&
1040 sep->se_ops->se_match(t, sep, &t->t_status)) {
1041 mdb_dprintf(MDB_DBG_TGT, "match se %p\n", (void *)sep);
1042 mdb_tgt_sespec_hold(t, sep);
1043 sep->se_matched = t->t_matched;
1044 t->t_matched = sep;
1045 }
1046 }
1047
1048 return (t->t_matched);
1049 }
1050
1051 /*
1052 * This function provides the low-level target continue algorithm. We proceed
1053 * in three phases: (1) we arm the active sespecs, except the specs matched at
1054 * the time we last stopped, (2) we call se_cont() on any matched sespecs to
1055 * step over these event transitions, and then arm the corresponding sespecs,
1056 * and (3) we call the appropriate low-level continue routine. Once the
1057 * target stops again, we determine which sespecs were matched, and invoke the
1058 * appropriate vespec callbacks and perform other vespec maintenance.
1059 */
1060 static int
tgt_continue(mdb_tgt_t * t,mdb_tgt_status_t * tsp,int (* t_cont)(mdb_tgt_t *,mdb_tgt_status_t *))1061 tgt_continue(mdb_tgt_t *t, mdb_tgt_status_t *tsp,
1062 int (*t_cont)(mdb_tgt_t *, mdb_tgt_status_t *))
1063 {
1064 mdb_var_t *hitv = mdb_nv_lookup(&mdb.m_nv, "hits");
1065 uintptr_t pc = t->t_status.st_pc;
1066 int error = 0;
1067
1068 mdb_sespec_t *sep, *nsep, *matched;
1069 mdb_vespec_t *vep, *nvep;
1070 uintptr_t addr;
1071
1072 uint_t cbits = 0; /* union of pending continue bits */
1073 uint_t ncont = 0; /* # of callbacks that requested cont */
1074 uint_t n = 0; /* # of callbacks */
1075
1076 /*
1077 * If the target is undead, dead, or lost, we no longer allow continue.
1078 * This effectively forces the user to use ::kill or ::run after death.
1079 */
1080 if (t->t_status.st_state == MDB_TGT_UNDEAD)
1081 return (set_errno(EMDB_TGTZOMB));
1082 if (t->t_status.st_state == MDB_TGT_DEAD)
1083 return (set_errno(EMDB_TGTCORE));
1084 if (t->t_status.st_state == MDB_TGT_LOST)
1085 return (set_errno(EMDB_TGTLOST));
1086
1087 /*
1088 * If any of single-step, step-over, or step-out is pending, it takes
1089 * precedence over an explicit or pending continue, because these are
1090 * all different specialized forms of continue.
1091 */
1092 if (t->t_flags & MDB_TGT_F_STEP)
1093 t_cont = t->t_ops->t_step;
1094 else if (t->t_flags & MDB_TGT_F_NEXT)
1095 t_cont = t->t_ops->t_step;
1096 else if (t->t_flags & MDB_TGT_F_STEP_OUT)
1097 t_cont = t->t_ops->t_cont;
1098
1099 /*
1100 * To handle step-over, we ask the target to find the address past the
1101 * next control transfer instruction. If an address is found, we plant
1102 * a temporary breakpoint there and continue; otherwise just step.
1103 */
1104 if ((t->t_flags & MDB_TGT_F_NEXT) && !(t->t_flags & MDB_TGT_F_STEP)) {
1105 if (t->t_ops->t_next(t, &addr) == -1 || mdb_tgt_add_vbrkpt(t,
1106 addr, MDB_TGT_SPEC_HIDDEN | MDB_TGT_SPEC_TEMPORARY,
1107 no_se_f, NULL) == 0) {
1108 mdb_dprintf(MDB_DBG_TGT, "next falling back to step: "
1109 "%s\n", mdb_strerror(errno));
1110 } else
1111 t_cont = t->t_ops->t_cont;
1112 }
1113
1114 /*
1115 * To handle step-out, we ask the target to find the return address of
1116 * the current frame, plant a temporary breakpoint there, and continue.
1117 */
1118 if (t->t_flags & MDB_TGT_F_STEP_OUT) {
1119 if (t->t_ops->t_step_out(t, &addr) == -1)
1120 return (-1); /* errno is set for us */
1121
1122 if (mdb_tgt_add_vbrkpt(t, addr, MDB_TGT_SPEC_HIDDEN |
1123 MDB_TGT_SPEC_TEMPORARY, no_se_f, NULL) == 0)
1124 return (-1); /* errno is set for us */
1125 }
1126
1127 (void) mdb_signal_block(SIGHUP);
1128 (void) mdb_signal_block(SIGTERM);
1129 mdb_intr_disable();
1130
1131 t->t_flags &= ~T_CONT_BITS;
1132 t->t_flags |= MDB_TGT_F_BUSY;
1133 mdb_tgt_sespec_arm_all(t);
1134
1135 ASSERT(t->t_matched != NULL);
1136 matched = t->t_matched;
1137 t->t_matched = T_SE_END;
1138
1139 if (mdb.m_term != NULL)
1140 IOP_SUSPEND(mdb.m_term);
1141
1142 /*
1143 * Iterate over the matched sespec list, performing autostop processing
1144 * and clearing the matched bit for each associated vespec. We then
1145 * invoke each sespec's se_cont callback in order to continue past
1146 * the corresponding event. If the matched list has more than one
1147 * sespec, we assume that the se_cont callbacks are non-interfering.
1148 */
1149 for (sep = matched; sep != T_SE_END; sep = sep->se_matched) {
1150 for (vep = mdb_list_next(&sep->se_velist); vep != NULL; ) {
1151 if ((vep->ve_flags & MDB_TGT_SPEC_AUTOSTOP) &&
1152 (vep->ve_limit && vep->ve_hits == vep->ve_limit))
1153 vep->ve_hits = 0;
1154
1155 vep->ve_flags &= ~MDB_TGT_SPEC_MATCHED;
1156 vep = mdb_list_next(vep);
1157 }
1158
1159 if (sep->se_ops->se_cont(t, sep, &t->t_status) == -1) {
1160 error = errno ? errno : -1;
1161 tgt_disarm_sespecs(t);
1162 break;
1163 }
1164
1165 if (!(t->t_status.st_flags & MDB_TGT_ISTOP)) {
1166 tgt_disarm_sespecs(t);
1167 if (t->t_status.st_state == MDB_TGT_UNDEAD)
1168 mdb_tgt_sespec_idle_all(t, EMDB_TGTZOMB, TRUE);
1169 else if (t->t_status.st_state == MDB_TGT_LOST)
1170 mdb_tgt_sespec_idle_all(t, EMDB_TGTLOST, TRUE);
1171 break;
1172 }
1173 }
1174
1175 /*
1176 * Clear the se_matched field for each matched sespec, and drop the
1177 * reference count since the sespec is no longer on the matched list.
1178 */
1179 for (sep = matched; sep != T_SE_END; sep = nsep) {
1180 nsep = sep->se_matched;
1181 sep->se_matched = NULL;
1182 mdb_tgt_sespec_rele(t, sep);
1183 }
1184
1185 /*
1186 * If the matched list was non-empty, see if we hit another event while
1187 * performing se_cont() processing. If so, don't bother continuing any
1188 * further. If not, arm the sespecs on the old matched list by calling
1189 * mdb_tgt_sespec_arm_all() again and then continue by calling t_cont.
1190 */
1191 if (matched != T_SE_END) {
1192 if (error != 0 || !(t->t_status.st_flags & MDB_TGT_ISTOP))
1193 goto out; /* abort now if se_cont() failed */
1194
1195 if ((t->t_matched = tgt_match_sespecs(t, FALSE)) != T_SE_END) {
1196 tgt_disarm_sespecs(t);
1197 goto out;
1198 }
1199
1200 mdb_tgt_sespec_arm_all(t);
1201 }
1202
1203 if (t_cont != t->t_ops->t_step || pc == t->t_status.st_pc) {
1204 if (t_cont(t, &t->t_status) != 0)
1205 error = errno ? errno : -1;
1206 }
1207
1208 tgt_disarm_sespecs(t);
1209
1210 if (t->t_flags & MDB_TGT_F_UNLOAD)
1211 longjmp(mdb.m_frame->f_pcb, MDB_ERR_QUIT);
1212
1213 if (t->t_status.st_state == MDB_TGT_UNDEAD)
1214 mdb_tgt_sespec_idle_all(t, EMDB_TGTZOMB, TRUE);
1215 else if (t->t_status.st_state == MDB_TGT_LOST)
1216 mdb_tgt_sespec_idle_all(t, EMDB_TGTLOST, TRUE);
1217 else if (t->t_status.st_flags & MDB_TGT_ISTOP)
1218 t->t_matched = tgt_match_sespecs(t, TRUE);
1219 out:
1220 if (mdb.m_term != NULL)
1221 IOP_RESUME(mdb.m_term);
1222
1223 (void) mdb_signal_unblock(SIGTERM);
1224 (void) mdb_signal_unblock(SIGHUP);
1225 mdb_intr_enable();
1226
1227 for (sep = t->t_matched; sep != T_SE_END; sep = sep->se_matched) {
1228 /*
1229 * When we invoke a ve_callback, it may in turn request that the
1230 * target continue immediately after callback processing is
1231 * complete. We only allow this to occur if *all* callbacks
1232 * agree to continue. To implement this behavior, we keep a
1233 * count (ncont) of such requests, and only apply the cumulative
1234 * continue bits (cbits) to the target if ncont is equal to the
1235 * total number of callbacks that are invoked (n).
1236 */
1237 for (vep = mdb_list_next(&sep->se_velist);
1238 vep != NULL; vep = nvep, n++) {
1239 /*
1240 * Place an extra hold on the current vespec and pick
1241 * up the next pointer before invoking the callback: we
1242 * must be prepared for the vespec to be deleted or
1243 * moved to a different list by the callback.
1244 */
1245 mdb_tgt_vespec_hold(t, vep);
1246 nvep = mdb_list_next(vep);
1247
1248 vep->ve_flags |= MDB_TGT_SPEC_MATCHED;
1249 vep->ve_hits++;
1250
1251 mdb_nv_set_value(mdb.m_dot, t->t_status.st_pc);
1252 mdb_nv_set_value(hitv, vep->ve_hits);
1253
1254 ASSERT((t->t_flags & T_CONT_BITS) == 0);
1255 vep->ve_callback(t, vep->ve_id, vep->ve_data);
1256
1257 ncont += (t->t_flags & T_CONT_BITS) != 0;
1258 cbits |= (t->t_flags & T_CONT_BITS);
1259 t->t_flags &= ~T_CONT_BITS;
1260
1261 if (vep->ve_limit && vep->ve_hits == vep->ve_limit) {
1262 if (vep->ve_flags & MDB_TGT_SPEC_AUTODEL)
1263 (void) mdb_tgt_vespec_delete(t,
1264 vep->ve_id);
1265 else if (vep->ve_flags & MDB_TGT_SPEC_AUTODIS)
1266 (void) mdb_tgt_vespec_disable(t,
1267 vep->ve_id);
1268 }
1269
1270 if (vep->ve_limit && vep->ve_hits < vep->ve_limit) {
1271 if (vep->ve_flags & MDB_TGT_SPEC_AUTOSTOP)
1272 (void) mdb_tgt_continue(t, NULL);
1273 }
1274
1275 mdb_tgt_vespec_rele(t, vep);
1276 }
1277 }
1278
1279 if (t->t_matched != T_SE_END && ncont == n)
1280 t->t_flags |= cbits; /* apply continues (see above) */
1281
1282 mdb_tgt_sespec_prune_all(t);
1283
1284 t->t_status.st_flags &= ~MDB_TGT_BUSY;
1285 t->t_flags &= ~MDB_TGT_F_BUSY;
1286
1287 if (tsp != NULL)
1288 bcopy(&t->t_status, tsp, sizeof (mdb_tgt_status_t));
1289
1290 if (error != 0)
1291 return (set_errno(error));
1292
1293 return (0);
1294 }
1295
1296 /*
1297 * This function is the common glue that connects the high-level target layer
1298 * continue functions (e.g. step and cont below) with the low-level
1299 * tgt_continue() function above. Since vespec callbacks may perform any
1300 * actions, including attempting to continue the target itself, we must be
1301 * prepared to be called while the target is still marked F_BUSY. In this
1302 * case, we just set a pending bit and return. When we return from the call
1303 * to tgt_continue() that made us busy into the tgt_request_continue() call
1304 * that is still on the stack, we will loop around and call tgt_continue()
1305 * again. This allows vespecs to continue the target without recursion.
1306 */
1307 static int
tgt_request_continue(mdb_tgt_t * t,mdb_tgt_status_t * tsp,uint_t tflag,int (* t_cont)(mdb_tgt_t *,mdb_tgt_status_t *))1308 tgt_request_continue(mdb_tgt_t *t, mdb_tgt_status_t *tsp, uint_t tflag,
1309 int (*t_cont)(mdb_tgt_t *, mdb_tgt_status_t *))
1310 {
1311 mdb_tgt_spec_desc_t desc;
1312 mdb_sespec_t *sep;
1313 char buf[BUFSIZ];
1314 int status;
1315
1316 if (t->t_flags & MDB_TGT_F_BUSY) {
1317 t->t_flags |= tflag;
1318 return (0);
1319 }
1320
1321 do {
1322 status = tgt_continue(t, tsp, t_cont);
1323 } while (status == 0 && (t->t_flags & T_CONT_BITS));
1324
1325 if (status == 0) {
1326 for (sep = t->t_matched; sep != T_SE_END;
1327 sep = sep->se_matched) {
1328 mdb_vespec_t *vep;
1329
1330 for (vep = mdb_list_next(&sep->se_velist); vep;
1331 vep = mdb_list_next(vep)) {
1332 if (vep->ve_flags & MDB_TGT_SPEC_SILENT)
1333 continue;
1334 warn("%s\n", sep->se_ops->se_info(t, sep,
1335 vep, &desc, buf, sizeof (buf)));
1336 }
1337 }
1338
1339 mdb_callb_fire(MDB_CALLB_STCHG);
1340 }
1341
1342 t->t_flags &= ~T_CONT_BITS;
1343 return (status);
1344 }
1345
1346 /*
1347 * Restart target execution: we rely upon the underlying target implementation
1348 * to do most of the work for us. In particular, we assume it will properly
1349 * preserve the state of our event lists if the run fails for some reason,
1350 * and that it will reset all events to the IDLE state if the run succeeds.
1351 * If it is successful, we attempt to activate all of the idle sespecs. The
1352 * t_run() operation is defined to leave the target stopped at the earliest
1353 * possible point in execution, and then return control to the debugger,
1354 * awaiting a step or continue operation to set it running again.
1355 */
1356 int
mdb_tgt_run(mdb_tgt_t * t,int argc,const mdb_arg_t * argv)1357 mdb_tgt_run(mdb_tgt_t *t, int argc, const mdb_arg_t *argv)
1358 {
1359 int i;
1360
1361 for (i = 0; i < argc; i++) {
1362 if (argv->a_type != MDB_TYPE_STRING)
1363 return (set_errno(EINVAL));
1364 }
1365
1366 if (t->t_ops->t_run(t, argc, argv) == -1)
1367 return (-1); /* errno is set for us */
1368
1369 t->t_flags &= ~T_CONT_BITS;
1370 (void) mdb_tgt_sespec_activate_all(t);
1371
1372 if (mdb.m_term != NULL)
1373 IOP_CTL(mdb.m_term, MDB_IOC_CTTY, NULL);
1374
1375 return (0);
1376 }
1377
1378 int
mdb_tgt_step(mdb_tgt_t * t,mdb_tgt_status_t * tsp)1379 mdb_tgt_step(mdb_tgt_t *t, mdb_tgt_status_t *tsp)
1380 {
1381 return (tgt_request_continue(t, tsp, MDB_TGT_F_STEP, t->t_ops->t_step));
1382 }
1383
1384 int
mdb_tgt_step_out(mdb_tgt_t * t,mdb_tgt_status_t * tsp)1385 mdb_tgt_step_out(mdb_tgt_t *t, mdb_tgt_status_t *tsp)
1386 {
1387 t->t_flags |= MDB_TGT_F_STEP_OUT; /* set flag even if tgt not busy */
1388 return (tgt_request_continue(t, tsp, 0, t->t_ops->t_cont));
1389 }
1390
1391 int
mdb_tgt_next(mdb_tgt_t * t,mdb_tgt_status_t * tsp)1392 mdb_tgt_next(mdb_tgt_t *t, mdb_tgt_status_t *tsp)
1393 {
1394 t->t_flags |= MDB_TGT_F_NEXT; /* set flag even if tgt not busy */
1395 return (tgt_request_continue(t, tsp, 0, t->t_ops->t_step));
1396 }
1397
1398 int
mdb_tgt_continue(mdb_tgt_t * t,mdb_tgt_status_t * tsp)1399 mdb_tgt_continue(mdb_tgt_t *t, mdb_tgt_status_t *tsp)
1400 {
1401 return (tgt_request_continue(t, tsp, MDB_TGT_F_CONT, t->t_ops->t_cont));
1402 }
1403
1404 int
mdb_tgt_signal(mdb_tgt_t * t,int sig)1405 mdb_tgt_signal(mdb_tgt_t *t, int sig)
1406 {
1407 return (t->t_ops->t_signal(t, sig));
1408 }
1409
1410 void *
mdb_tgt_vespec_data(mdb_tgt_t * t,int vid)1411 mdb_tgt_vespec_data(mdb_tgt_t *t, int vid)
1412 {
1413 mdb_vespec_t *vep = mdb_tgt_vespec_lookup(t, vid);
1414
1415 if (vep == NULL) {
1416 (void) set_errno(EMDB_NOSESPEC);
1417 return (NULL);
1418 }
1419
1420 return (vep->ve_data);
1421 }
1422
1423 /*
1424 * Return a structured description and comment string for the given vespec.
1425 * We fill in the common information from the vespec, and then call down to
1426 * the underlying sespec to provide the comment string and modify any
1427 * event type-specific information.
1428 */
1429 char *
mdb_tgt_vespec_info(mdb_tgt_t * t,int vid,mdb_tgt_spec_desc_t * sp,char * buf,size_t nbytes)1430 mdb_tgt_vespec_info(mdb_tgt_t *t, int vid, mdb_tgt_spec_desc_t *sp,
1431 char *buf, size_t nbytes)
1432 {
1433 mdb_vespec_t *vep = mdb_tgt_vespec_lookup(t, vid);
1434
1435 mdb_tgt_spec_desc_t desc;
1436 mdb_sespec_t *sep;
1437
1438 if (vep == NULL) {
1439 if (sp != NULL)
1440 bzero(sp, sizeof (mdb_tgt_spec_desc_t));
1441 (void) set_errno(EMDB_NOSESPEC);
1442 return (NULL);
1443 }
1444
1445 if (sp == NULL)
1446 sp = &desc;
1447
1448 sep = vep->ve_se;
1449
1450 sp->spec_id = vep->ve_id;
1451 sp->spec_flags = vep->ve_flags;
1452 sp->spec_hits = vep->ve_hits;
1453 sp->spec_limit = vep->ve_limit;
1454 sp->spec_state = sep->se_state;
1455 sp->spec_errno = sep->se_errno;
1456 sp->spec_base = 0;
1457 sp->spec_size = 0;
1458 sp->spec_data = vep->ve_data;
1459
1460 return (sep->se_ops->se_info(t, sep, vep, sp, buf, nbytes));
1461 }
1462
1463 /*
1464 * Qsort callback for sorting vespecs by VID, used below.
1465 */
1466 static int
tgt_vespec_compare(const mdb_vespec_t ** lp,const mdb_vespec_t ** rp)1467 tgt_vespec_compare(const mdb_vespec_t **lp, const mdb_vespec_t **rp)
1468 {
1469 return ((*lp)->ve_id - (*rp)->ve_id);
1470 }
1471
1472 /*
1473 * Iterate over all vespecs and call the specified callback function with the
1474 * corresponding VID and caller data pointer. We want the callback function
1475 * to see a consistent, sorted snapshot of the vespecs, and allow the callback
1476 * to take actions such as deleting the vespec itself, so we cannot simply
1477 * iterate over the lists. Instead, we pre-allocate an array of vespec
1478 * pointers, fill it in and place an additional hold on each vespec, and then
1479 * sort it. After the callback has been executed on each vespec in the
1480 * sorted array, we remove our hold and free the temporary array.
1481 */
1482 int
mdb_tgt_vespec_iter(mdb_tgt_t * t,mdb_tgt_vespec_f * func,void * p)1483 mdb_tgt_vespec_iter(mdb_tgt_t *t, mdb_tgt_vespec_f *func, void *p)
1484 {
1485 mdb_vespec_t **veps, **vepp, **vend;
1486 mdb_vespec_t *vep, *nvep;
1487 mdb_sespec_t *sep;
1488
1489 uint_t vecnt = t->t_vecnt;
1490
1491 veps = mdb_alloc(sizeof (mdb_vespec_t *) * vecnt, UM_SLEEP);
1492 vend = veps + vecnt;
1493 vepp = veps;
1494
1495 for (sep = mdb_list_next(&t->t_active); sep; sep = mdb_list_next(sep)) {
1496 for (vep = mdb_list_next(&sep->se_velist); vep; vep = nvep) {
1497 mdb_tgt_vespec_hold(t, vep);
1498 nvep = mdb_list_next(vep);
1499 *vepp++ = vep;
1500 }
1501 }
1502
1503 for (sep = mdb_list_next(&t->t_idle); sep; sep = mdb_list_next(sep)) {
1504 for (vep = mdb_list_next(&sep->se_velist); vep; vep = nvep) {
1505 mdb_tgt_vespec_hold(t, vep);
1506 nvep = mdb_list_next(vep);
1507 *vepp++ = vep;
1508 }
1509 }
1510
1511 if (vepp != vend) {
1512 fail("target has %u vespecs on list but vecnt shows %u\n",
1513 (uint_t)(vepp - veps), vecnt);
1514 }
1515
1516 qsort(veps, vecnt, sizeof (mdb_vespec_t *),
1517 (int (*)(const void *, const void *))tgt_vespec_compare);
1518
1519 for (vepp = veps; vepp < vend; vepp++) {
1520 if (func(t, p, (*vepp)->ve_id, (*vepp)->ve_data) != 0)
1521 break;
1522 }
1523
1524 for (vepp = veps; vepp < vend; vepp++)
1525 mdb_tgt_vespec_rele(t, *vepp);
1526
1527 mdb_free(veps, sizeof (mdb_vespec_t *) * vecnt);
1528 return (0);
1529 }
1530
1531 /*
1532 * Reset the vespec flags, match limit, and callback data to the specified
1533 * values. We silently correct invalid parameters, except for the VID.
1534 * The caller is required to query the existing properties and pass back
1535 * the existing values for any properties that should not be modified.
1536 * If the callback data is modified, the caller is responsible for cleaning
1537 * up any state associated with the previous value.
1538 */
1539 int
mdb_tgt_vespec_modify(mdb_tgt_t * t,int id,uint_t flags,uint_t limit,void * data)1540 mdb_tgt_vespec_modify(mdb_tgt_t *t, int id, uint_t flags,
1541 uint_t limit, void *data)
1542 {
1543 mdb_vespec_t *vep = mdb_tgt_vespec_lookup(t, id);
1544
1545 if (vep == NULL)
1546 return (set_errno(EMDB_NOSESPEC));
1547
1548 /*
1549 * If the value of the MDB_TGT_SPEC_DISABLED bit is changing, call the
1550 * appropriate vespec function to do the enable/disable work.
1551 */
1552 if ((flags & MDB_TGT_SPEC_DISABLED) !=
1553 (vep->ve_flags & MDB_TGT_SPEC_DISABLED)) {
1554 if (flags & MDB_TGT_SPEC_DISABLED)
1555 (void) mdb_tgt_vespec_disable(t, id);
1556 else
1557 (void) mdb_tgt_vespec_enable(t, id);
1558 }
1559
1560 /*
1561 * Make that only one MDB_TGT_SPEC_AUTO* bit is set in the new flags
1562 * value: extra bits are cleared according to order of precedence.
1563 */
1564 if (flags & MDB_TGT_SPEC_AUTOSTOP)
1565 flags &= ~(MDB_TGT_SPEC_AUTODEL | MDB_TGT_SPEC_AUTODIS);
1566 else if (flags & MDB_TGT_SPEC_AUTODEL)
1567 flags &= ~MDB_TGT_SPEC_AUTODIS;
1568
1569 /*
1570 * The TEMPORARY property always takes precedence over STICKY.
1571 */
1572 if (flags & MDB_TGT_SPEC_TEMPORARY)
1573 flags &= ~MDB_TGT_SPEC_STICKY;
1574
1575 /*
1576 * If any MDB_TGT_SPEC_AUTO* bits are changing, reset the hit count
1577 * back to zero and clear all of the old auto bits.
1578 */
1579 if ((flags & T_AUTO_BITS) != (vep->ve_flags & T_AUTO_BITS)) {
1580 vep->ve_flags &= ~T_AUTO_BITS;
1581 vep->ve_hits = 0;
1582 }
1583
1584 vep->ve_flags = (vep->ve_flags & T_IMPL_BITS) | (flags & ~T_IMPL_BITS);
1585 vep->ve_data = data;
1586
1587 /*
1588 * If any MDB_TGT_SPEC_AUTO* flags are set, make sure the limit is at
1589 * least one. If none are set, reset it back to zero.
1590 */
1591 if (vep->ve_flags & T_AUTO_BITS)
1592 vep->ve_limit = MAX(limit, 1);
1593 else
1594 vep->ve_limit = 0;
1595
1596 /*
1597 * As a convenience, we allow the caller to specify SPEC_DELETED in
1598 * the flags field as indication that the event should be deleted.
1599 */
1600 if (flags & MDB_TGT_SPEC_DELETED)
1601 (void) mdb_tgt_vespec_delete(t, id);
1602
1603 return (0);
1604 }
1605
1606 /*
1607 * Remove the user disabled bit from the specified vespec, and attempt to
1608 * activate the underlying sespec and move it to the active list if possible.
1609 */
1610 int
mdb_tgt_vespec_enable(mdb_tgt_t * t,int id)1611 mdb_tgt_vespec_enable(mdb_tgt_t *t, int id)
1612 {
1613 mdb_vespec_t *vep = mdb_tgt_vespec_lookup(t, id);
1614
1615 if (vep == NULL)
1616 return (set_errno(EMDB_NOSESPEC));
1617
1618 if (vep->ve_flags & MDB_TGT_SPEC_DISABLED) {
1619 ASSERT(mdb_list_next(vep) == NULL);
1620 vep->ve_flags &= ~MDB_TGT_SPEC_DISABLED;
1621 if (mdb_tgt_sespec_activate_one(t, vep->ve_se) < 0)
1622 return (-1); /* errno is set for us */
1623 }
1624
1625 return (0);
1626 }
1627
1628 /*
1629 * Set the user disabled bit on the specified vespec, and move it to the idle
1630 * list. If the vespec is not alone with its sespec or if it is a currently
1631 * matched event, we must always create a new idle sespec and move the vespec
1632 * there. If the vespec was alone and active, we can simply idle the sespec.
1633 */
1634 int
mdb_tgt_vespec_disable(mdb_tgt_t * t,int id)1635 mdb_tgt_vespec_disable(mdb_tgt_t *t, int id)
1636 {
1637 mdb_vespec_t *vep = mdb_tgt_vespec_lookup(t, id);
1638 mdb_sespec_t *sep;
1639
1640 if (vep == NULL)
1641 return (set_errno(EMDB_NOSESPEC));
1642
1643 if (vep->ve_flags & MDB_TGT_SPEC_DISABLED)
1644 return (0); /* already disabled */
1645
1646 if (mdb_list_prev(vep) != NULL || mdb_list_next(vep) != NULL ||
1647 vep->ve_se->se_matched != NULL) {
1648
1649 sep = mdb_tgt_sespec_insert(t, vep->ve_se->se_ops, &t->t_idle);
1650
1651 mdb_list_delete(&vep->ve_se->se_velist, vep);
1652 mdb_tgt_sespec_rele(t, vep->ve_se);
1653
1654 mdb_list_append(&sep->se_velist, vep);
1655 mdb_tgt_sespec_hold(t, sep);
1656
1657 vep->ve_flags &= ~MDB_TGT_SPEC_MATCHED;
1658 vep->ve_se = sep;
1659
1660 } else if (vep->ve_se->se_state != MDB_TGT_SPEC_IDLE)
1661 mdb_tgt_sespec_idle_one(t, vep->ve_se, EMDB_SPECDIS);
1662
1663 vep->ve_flags |= MDB_TGT_SPEC_DISABLED;
1664 return (0);
1665 }
1666
1667 /*
1668 * Delete the given vespec. We use the MDB_TGT_SPEC_DELETED flag to ensure that
1669 * multiple calls to mdb_tgt_vespec_delete to not attempt to decrement the
1670 * reference count on the vespec more than once. This is because the vespec
1671 * may remain referenced if it is currently held by another routine (e.g.
1672 * vespec_iter), and so the user could attempt to delete it more than once
1673 * since it reference count will be >= 2 prior to the first delete call.
1674 */
1675 int
mdb_tgt_vespec_delete(mdb_tgt_t * t,int id)1676 mdb_tgt_vespec_delete(mdb_tgt_t *t, int id)
1677 {
1678 mdb_vespec_t *vep = mdb_tgt_vespec_lookup(t, id);
1679
1680 if (vep == NULL)
1681 return (set_errno(EMDB_NOSESPEC));
1682
1683 if (vep->ve_flags & MDB_TGT_SPEC_DELETED)
1684 return (set_errno(EBUSY));
1685
1686 vep->ve_flags |= MDB_TGT_SPEC_DELETED;
1687 mdb_tgt_vespec_rele(t, vep);
1688 return (0);
1689 }
1690
1691 int
mdb_tgt_add_vbrkpt(mdb_tgt_t * t,uintptr_t addr,int spec_flags,mdb_tgt_se_f * func,void * p)1692 mdb_tgt_add_vbrkpt(mdb_tgt_t *t, uintptr_t addr,
1693 int spec_flags, mdb_tgt_se_f *func, void *p)
1694 {
1695 return (t->t_ops->t_add_vbrkpt(t, addr, spec_flags, func, p));
1696 }
1697
1698 int
mdb_tgt_add_sbrkpt(mdb_tgt_t * t,const char * symbol,int spec_flags,mdb_tgt_se_f * func,void * p)1699 mdb_tgt_add_sbrkpt(mdb_tgt_t *t, const char *symbol,
1700 int spec_flags, mdb_tgt_se_f *func, void *p)
1701 {
1702 return (t->t_ops->t_add_sbrkpt(t, symbol, spec_flags, func, p));
1703 }
1704
1705 int
mdb_tgt_add_pwapt(mdb_tgt_t * t,physaddr_t pa,size_t n,uint_t flags,int spec_flags,mdb_tgt_se_f * func,void * p)1706 mdb_tgt_add_pwapt(mdb_tgt_t *t, physaddr_t pa, size_t n, uint_t flags,
1707 int spec_flags, mdb_tgt_se_f *func, void *p)
1708 {
1709 if ((flags & ~MDB_TGT_WA_RWX) || flags == 0) {
1710 (void) set_errno(EINVAL);
1711 return (0);
1712 }
1713
1714 if (pa + n < pa) {
1715 (void) set_errno(EMDB_WPRANGE);
1716 return (0);
1717 }
1718
1719 return (t->t_ops->t_add_pwapt(t, pa, n, flags, spec_flags, func, p));
1720 }
1721
1722 int
mdb_tgt_add_vwapt(mdb_tgt_t * t,uintptr_t va,size_t n,uint_t flags,int spec_flags,mdb_tgt_se_f * func,void * p)1723 mdb_tgt_add_vwapt(mdb_tgt_t *t, uintptr_t va, size_t n, uint_t flags,
1724 int spec_flags, mdb_tgt_se_f *func, void *p)
1725 {
1726 if ((flags & ~MDB_TGT_WA_RWX) || flags == 0) {
1727 (void) set_errno(EINVAL);
1728 return (0);
1729 }
1730
1731 if (va + n < va) {
1732 (void) set_errno(EMDB_WPRANGE);
1733 return (0);
1734 }
1735
1736 return (t->t_ops->t_add_vwapt(t, va, n, flags, spec_flags, func, p));
1737 }
1738
1739 int
mdb_tgt_add_iowapt(mdb_tgt_t * t,uintptr_t addr,size_t n,uint_t flags,int spec_flags,mdb_tgt_se_f * func,void * p)1740 mdb_tgt_add_iowapt(mdb_tgt_t *t, uintptr_t addr, size_t n, uint_t flags,
1741 int spec_flags, mdb_tgt_se_f *func, void *p)
1742 {
1743 if ((flags & ~MDB_TGT_WA_RWX) || flags == 0) {
1744 (void) set_errno(EINVAL);
1745 return (0);
1746 }
1747
1748 if (addr + n < addr) {
1749 (void) set_errno(EMDB_WPRANGE);
1750 return (0);
1751 }
1752
1753 return (t->t_ops->t_add_iowapt(t, addr, n, flags, spec_flags, func, p));
1754 }
1755
1756 int
mdb_tgt_add_sysenter(mdb_tgt_t * t,int sysnum,int spec_flags,mdb_tgt_se_f * func,void * p)1757 mdb_tgt_add_sysenter(mdb_tgt_t *t, int sysnum,
1758 int spec_flags, mdb_tgt_se_f *func, void *p)
1759 {
1760 return (t->t_ops->t_add_sysenter(t, sysnum, spec_flags, func, p));
1761 }
1762
1763 int
mdb_tgt_add_sysexit(mdb_tgt_t * t,int sysnum,int spec_flags,mdb_tgt_se_f * func,void * p)1764 mdb_tgt_add_sysexit(mdb_tgt_t *t, int sysnum,
1765 int spec_flags, mdb_tgt_se_f *func, void *p)
1766 {
1767 return (t->t_ops->t_add_sysexit(t, sysnum, spec_flags, func, p));
1768 }
1769
1770 int
mdb_tgt_add_signal(mdb_tgt_t * t,int sig,int spec_flags,mdb_tgt_se_f * func,void * p)1771 mdb_tgt_add_signal(mdb_tgt_t *t, int sig,
1772 int spec_flags, mdb_tgt_se_f *func, void *p)
1773 {
1774 return (t->t_ops->t_add_signal(t, sig, spec_flags, func, p));
1775 }
1776
1777 int
mdb_tgt_add_fault(mdb_tgt_t * t,int flt,int spec_flags,mdb_tgt_se_f * func,void * p)1778 mdb_tgt_add_fault(mdb_tgt_t *t, int flt,
1779 int spec_flags, mdb_tgt_se_f *func, void *p)
1780 {
1781 return (t->t_ops->t_add_fault(t, flt, spec_flags, func, p));
1782 }
1783
1784 int
mdb_tgt_getareg(mdb_tgt_t * t,mdb_tgt_tid_t tid,const char * rname,mdb_tgt_reg_t * rp)1785 mdb_tgt_getareg(mdb_tgt_t *t, mdb_tgt_tid_t tid,
1786 const char *rname, mdb_tgt_reg_t *rp)
1787 {
1788 return (t->t_ops->t_getareg(t, tid, rname, rp));
1789 }
1790
1791 int
mdb_tgt_putareg(mdb_tgt_t * t,mdb_tgt_tid_t tid,const char * rname,mdb_tgt_reg_t r)1792 mdb_tgt_putareg(mdb_tgt_t *t, mdb_tgt_tid_t tid,
1793 const char *rname, mdb_tgt_reg_t r)
1794 {
1795 return (t->t_ops->t_putareg(t, tid, rname, r));
1796 }
1797
1798 int
mdb_tgt_stack_iter(mdb_tgt_t * t,const mdb_tgt_gregset_t * gregs,mdb_tgt_stack_f * cb,void * p)1799 mdb_tgt_stack_iter(mdb_tgt_t *t, const mdb_tgt_gregset_t *gregs,
1800 mdb_tgt_stack_f *cb, void *p)
1801 {
1802 return (t->t_ops->t_stack_iter(t, gregs, cb, p));
1803 }
1804
1805 int
mdb_tgt_xdata_iter(mdb_tgt_t * t,mdb_tgt_xdata_f * func,void * private)1806 mdb_tgt_xdata_iter(mdb_tgt_t *t, mdb_tgt_xdata_f *func, void *private)
1807 {
1808 mdb_xdata_t *xdp;
1809
1810 for (xdp = mdb_list_next(&t->t_xdlist); xdp; xdp = mdb_list_next(xdp)) {
1811 if (func(private, xdp->xd_name, xdp->xd_desc,
1812 xdp->xd_copy(t, NULL, 0)) != 0)
1813 break;
1814 }
1815
1816 return (0);
1817 }
1818
1819 ssize_t
mdb_tgt_getxdata(mdb_tgt_t * t,const char * name,void * buf,size_t nbytes)1820 mdb_tgt_getxdata(mdb_tgt_t *t, const char *name, void *buf, size_t nbytes)
1821 {
1822 mdb_xdata_t *xdp;
1823
1824 for (xdp = mdb_list_next(&t->t_xdlist); xdp; xdp = mdb_list_next(xdp)) {
1825 if (strcmp(xdp->xd_name, name) == 0)
1826 return (xdp->xd_copy(t, buf, nbytes));
1827 }
1828
1829 return (set_errno(ENODATA));
1830 }
1831
1832 long
mdb_tgt_notsup()1833 mdb_tgt_notsup()
1834 {
1835 return (set_errno(EMDB_TGTNOTSUP));
1836 }
1837
1838 void *
mdb_tgt_null()1839 mdb_tgt_null()
1840 {
1841 (void) set_errno(EMDB_TGTNOTSUP);
1842 return (NULL);
1843 }
1844
1845 long
mdb_tgt_nop()1846 mdb_tgt_nop()
1847 {
1848 return (0L);
1849 }
1850
1851 int
mdb_tgt_xdata_insert(mdb_tgt_t * t,const char * name,const char * desc,ssize_t (* copy)(mdb_tgt_t *,void *,size_t))1852 mdb_tgt_xdata_insert(mdb_tgt_t *t, const char *name, const char *desc,
1853 ssize_t (*copy)(mdb_tgt_t *, void *, size_t))
1854 {
1855 mdb_xdata_t *xdp;
1856
1857 for (xdp = mdb_list_next(&t->t_xdlist); xdp; xdp = mdb_list_next(xdp)) {
1858 if (strcmp(xdp->xd_name, name) == 0)
1859 return (set_errno(EMDB_XDEXISTS));
1860 }
1861
1862 xdp = mdb_alloc(sizeof (mdb_xdata_t), UM_SLEEP);
1863 mdb_list_append(&t->t_xdlist, xdp);
1864
1865 xdp->xd_name = name;
1866 xdp->xd_desc = desc;
1867 xdp->xd_copy = copy;
1868
1869 return (0);
1870 }
1871
1872 int
mdb_tgt_xdata_delete(mdb_tgt_t * t,const char * name)1873 mdb_tgt_xdata_delete(mdb_tgt_t *t, const char *name)
1874 {
1875 mdb_xdata_t *xdp;
1876
1877 for (xdp = mdb_list_next(&t->t_xdlist); xdp; xdp = mdb_list_next(xdp)) {
1878 if (strcmp(xdp->xd_name, name) == 0) {
1879 mdb_list_delete(&t->t_xdlist, xdp);
1880 mdb_free(xdp, sizeof (mdb_xdata_t));
1881 return (0);
1882 }
1883 }
1884
1885 return (set_errno(EMDB_NOXD));
1886 }
1887
1888 int
mdb_tgt_sym_match(const GElf_Sym * sym,uint_t mask)1889 mdb_tgt_sym_match(const GElf_Sym *sym, uint_t mask)
1890 {
1891 #if STT_NUM != (STT_TLS + 1)
1892 #error "STT_NUM has grown. update mdb_tgt_sym_match()"
1893 #endif
1894
1895 uchar_t s_bind = GELF_ST_BIND(sym->st_info);
1896 uchar_t s_type = GELF_ST_TYPE(sym->st_info);
1897
1898 /*
1899 * In case you haven't already guessed, this relies on the bitmask
1900 * used by <mdb/mdb_target.h> and <libproc.h> for encoding symbol
1901 * type and binding matching the order of STB and STT constants
1902 * in <sys/elf.h>. Changes to ELF must maintain binary
1903 * compatibility, so I think this is reasonably fair game.
1904 */
1905 if (s_bind < STB_NUM && s_type < STT_NUM) {
1906 uint_t type = (1 << (s_type + 8)) | (1 << s_bind);
1907 return ((type & ~mask) == 0);
1908 }
1909
1910 return (0); /* Unknown binding or type; fail to match */
1911 }
1912
1913 void
mdb_tgt_elf_export(mdb_gelf_file_t * gf)1914 mdb_tgt_elf_export(mdb_gelf_file_t *gf)
1915 {
1916 GElf_Xword d = 0, t = 0;
1917 GElf_Addr b = 0, e = 0;
1918 uint32_t m = 0;
1919 mdb_var_t *v;
1920
1921 /*
1922 * Reset legacy adb variables based on the specified ELF object file
1923 * provided by the target. We define these variables:
1924 *
1925 * b - the address of the data segment (first writeable Phdr)
1926 * d - the size of the data segment
1927 * e - the address of the entry point
1928 * m - the magic number identifying the file
1929 * t - the address of the text segment (first executable Phdr)
1930 */
1931 if (gf != NULL) {
1932 const GElf_Phdr *text = NULL, *data = NULL;
1933 size_t i;
1934
1935 e = gf->gf_ehdr.e_entry;
1936 bcopy(&gf->gf_ehdr.e_ident[EI_MAG0], &m, sizeof (m));
1937
1938 for (i = 0; i < gf->gf_npload; i++) {
1939 if (text == NULL && (gf->gf_phdrs[i].p_flags & PF_X))
1940 text = &gf->gf_phdrs[i];
1941 if (data == NULL && (gf->gf_phdrs[i].p_flags & PF_W))
1942 data = &gf->gf_phdrs[i];
1943 }
1944
1945 if (text != NULL)
1946 t = text->p_memsz;
1947 if (data != NULL) {
1948 b = data->p_vaddr;
1949 d = data->p_memsz;
1950 }
1951 }
1952
1953 if ((v = mdb_nv_lookup(&mdb.m_nv, "b")) != NULL)
1954 mdb_nv_set_value(v, b);
1955 if ((v = mdb_nv_lookup(&mdb.m_nv, "d")) != NULL)
1956 mdb_nv_set_value(v, d);
1957 if ((v = mdb_nv_lookup(&mdb.m_nv, "e")) != NULL)
1958 mdb_nv_set_value(v, e);
1959 if ((v = mdb_nv_lookup(&mdb.m_nv, "m")) != NULL)
1960 mdb_nv_set_value(v, m);
1961 if ((v = mdb_nv_lookup(&mdb.m_nv, "t")) != NULL)
1962 mdb_nv_set_value(v, t);
1963 }
1964
1965 /*ARGSUSED*/
1966 void
mdb_tgt_sespec_hold(mdb_tgt_t * t,mdb_sespec_t * sep)1967 mdb_tgt_sespec_hold(mdb_tgt_t *t, mdb_sespec_t *sep)
1968 {
1969 sep->se_refs++;
1970 ASSERT(sep->se_refs != 0);
1971 }
1972
1973 void
mdb_tgt_sespec_rele(mdb_tgt_t * t,mdb_sespec_t * sep)1974 mdb_tgt_sespec_rele(mdb_tgt_t *t, mdb_sespec_t *sep)
1975 {
1976 ASSERT(sep->se_refs != 0);
1977
1978 if (--sep->se_refs == 0) {
1979 mdb_dprintf(MDB_DBG_TGT, "destroying sespec %p\n", (void *)sep);
1980 ASSERT(mdb_list_next(&sep->se_velist) == NULL);
1981
1982 if (sep->se_state != MDB_TGT_SPEC_IDLE) {
1983 sep->se_ops->se_dtor(t, sep);
1984 mdb_list_delete(&t->t_active, sep);
1985 } else
1986 mdb_list_delete(&t->t_idle, sep);
1987
1988 mdb_free(sep, sizeof (mdb_sespec_t));
1989 }
1990 }
1991
1992 mdb_sespec_t *
mdb_tgt_sespec_insert(mdb_tgt_t * t,const mdb_se_ops_t * ops,mdb_list_t * list)1993 mdb_tgt_sespec_insert(mdb_tgt_t *t, const mdb_se_ops_t *ops, mdb_list_t *list)
1994 {
1995 mdb_sespec_t *sep = mdb_zalloc(sizeof (mdb_sespec_t), UM_SLEEP);
1996
1997 if (list == &t->t_active)
1998 sep->se_state = MDB_TGT_SPEC_ACTIVE;
1999 else
2000 sep->se_state = MDB_TGT_SPEC_IDLE;
2001
2002 mdb_list_append(list, sep);
2003 sep->se_ops = ops;
2004 return (sep);
2005 }
2006
2007 mdb_sespec_t *
mdb_tgt_sespec_lookup_active(mdb_tgt_t * t,const mdb_se_ops_t * ops,void * args)2008 mdb_tgt_sespec_lookup_active(mdb_tgt_t *t, const mdb_se_ops_t *ops, void *args)
2009 {
2010 mdb_sespec_t *sep;
2011
2012 for (sep = mdb_list_next(&t->t_active); sep; sep = mdb_list_next(sep)) {
2013 if (sep->se_ops == ops && sep->se_ops->se_secmp(t, sep, args))
2014 break;
2015 }
2016
2017 return (sep);
2018 }
2019
2020 mdb_sespec_t *
mdb_tgt_sespec_lookup_idle(mdb_tgt_t * t,const mdb_se_ops_t * ops,void * args)2021 mdb_tgt_sespec_lookup_idle(mdb_tgt_t *t, const mdb_se_ops_t *ops, void *args)
2022 {
2023 mdb_sespec_t *sep;
2024
2025 for (sep = mdb_list_next(&t->t_idle); sep; sep = mdb_list_next(sep)) {
2026 if (sep->se_ops == ops && sep->se_ops->se_vecmp(t,
2027 mdb_list_next(&sep->se_velist), args))
2028 break;
2029 }
2030
2031 return (sep);
2032 }
2033
2034 /*ARGSUSED*/
2035 void
mdb_tgt_vespec_hold(mdb_tgt_t * t,mdb_vespec_t * vep)2036 mdb_tgt_vespec_hold(mdb_tgt_t *t, mdb_vespec_t *vep)
2037 {
2038 vep->ve_refs++;
2039 ASSERT(vep->ve_refs != 0);
2040 }
2041
2042 void
mdb_tgt_vespec_rele(mdb_tgt_t * t,mdb_vespec_t * vep)2043 mdb_tgt_vespec_rele(mdb_tgt_t *t, mdb_vespec_t *vep)
2044 {
2045 ASSERT(vep->ve_refs != 0);
2046
2047 if (--vep->ve_refs == 0) {
2048 /*
2049 * Remove this vespec from the sespec's velist and decrement
2050 * the reference count on the sespec.
2051 */
2052 mdb_list_delete(&vep->ve_se->se_velist, vep);
2053 mdb_tgt_sespec_rele(t, vep->ve_se);
2054
2055 /*
2056 * If we are deleting the most recently assigned VID, reset
2057 * t_vepos or t_veneg as appropriate to re-use that number.
2058 * This could be enhanced to re-use any free number by
2059 * maintaining a bitmap or hash of the allocated IDs.
2060 */
2061 if (vep->ve_id > 0 && t->t_vepos == vep->ve_id + 1)
2062 t->t_vepos = vep->ve_id;
2063 else if (vep->ve_id < 0 && t->t_veneg == -vep->ve_id + 1)
2064 t->t_veneg = -vep->ve_id;
2065
2066 /*
2067 * Call the destructor to clean up ve_args, and then free
2068 * the actual vespec structure.
2069 */
2070 vep->ve_dtor(vep);
2071 mdb_free(vep, sizeof (mdb_vespec_t));
2072
2073 ASSERT(t->t_vecnt != 0);
2074 t->t_vecnt--;
2075 }
2076 }
2077
2078 int
mdb_tgt_vespec_insert(mdb_tgt_t * t,const mdb_se_ops_t * ops,int flags,mdb_tgt_se_f * func,void * data,void * args,void (* dtor)(mdb_vespec_t *))2079 mdb_tgt_vespec_insert(mdb_tgt_t *t, const mdb_se_ops_t *ops, int flags,
2080 mdb_tgt_se_f *func, void *data, void *args, void (*dtor)(mdb_vespec_t *))
2081 {
2082 mdb_vespec_t *vep = mdb_zalloc(sizeof (mdb_vespec_t), UM_SLEEP);
2083
2084 int id, mult, *seqp;
2085 mdb_sespec_t *sep;
2086
2087 /*
2088 * Make that only one MDB_TGT_SPEC_AUTO* bit is set in the new flags
2089 * value: extra bits are cleared according to order of precedence.
2090 */
2091 if (flags & MDB_TGT_SPEC_AUTOSTOP)
2092 flags &= ~(MDB_TGT_SPEC_AUTODEL | MDB_TGT_SPEC_AUTODIS);
2093 else if (flags & MDB_TGT_SPEC_AUTODEL)
2094 flags &= ~MDB_TGT_SPEC_AUTODIS;
2095
2096 /*
2097 * The TEMPORARY property always takes precedence over STICKY.
2098 */
2099 if (flags & MDB_TGT_SPEC_TEMPORARY)
2100 flags &= ~MDB_TGT_SPEC_STICKY;
2101
2102 /*
2103 * Find a matching sespec or create a new one on the appropriate list.
2104 * We always create a new sespec if the vespec is created disabled.
2105 */
2106 if (flags & MDB_TGT_SPEC_DISABLED)
2107 sep = mdb_tgt_sespec_insert(t, ops, &t->t_idle);
2108 else if ((sep = mdb_tgt_sespec_lookup_active(t, ops, args)) == NULL &&
2109 (sep = mdb_tgt_sespec_lookup_idle(t, ops, args)) == NULL)
2110 sep = mdb_tgt_sespec_insert(t, ops, &t->t_active);
2111
2112 /*
2113 * Generate a new ID for the vespec. Increasing positive integers are
2114 * assigned to visible vespecs; decreasing negative integers are
2115 * assigned to hidden vespecs. The target saves our most recent choice.
2116 */
2117 if (flags & MDB_TGT_SPEC_INTERNAL) {
2118 seqp = &t->t_veneg;
2119 mult = -1;
2120 } else {
2121 seqp = &t->t_vepos;
2122 mult = 1;
2123 }
2124
2125 id = *seqp;
2126
2127 while (mdb_tgt_vespec_lookup(t, id * mult) != NULL)
2128 id = MAX(id + 1, 1);
2129
2130 *seqp = MAX(id + 1, 1);
2131
2132 vep->ve_id = id * mult;
2133 vep->ve_flags = flags & ~(MDB_TGT_SPEC_MATCHED | MDB_TGT_SPEC_DELETED);
2134 vep->ve_se = sep;
2135 vep->ve_callback = func;
2136 vep->ve_data = data;
2137 vep->ve_args = args;
2138 vep->ve_dtor = dtor;
2139
2140 mdb_list_append(&sep->se_velist, vep);
2141 mdb_tgt_sespec_hold(t, sep);
2142
2143 mdb_tgt_vespec_hold(t, vep);
2144 t->t_vecnt++;
2145
2146 /*
2147 * If this vespec is the first reference to the sespec and it's active,
2148 * then it is newly created and we should attempt to initialize it.
2149 * If se_ctor fails, then move the sespec back to the idle list.
2150 */
2151 if (sep->se_refs == 1 && sep->se_state == MDB_TGT_SPEC_ACTIVE &&
2152 sep->se_ops->se_ctor(t, sep, vep->ve_args) == -1) {
2153
2154 mdb_list_delete(&t->t_active, sep);
2155 mdb_list_append(&t->t_idle, sep);
2156
2157 sep->se_state = MDB_TGT_SPEC_IDLE;
2158 sep->se_errno = errno;
2159 sep->se_data = NULL;
2160 }
2161
2162 /*
2163 * If the sespec is active and the target is currently running (because
2164 * we grabbed it using PGRAB_NOSTOP), then go ahead and attempt to arm
2165 * the sespec so it will take effect immediately.
2166 */
2167 if (sep->se_state == MDB_TGT_SPEC_ACTIVE &&
2168 t->t_status.st_state == MDB_TGT_RUNNING)
2169 mdb_tgt_sespec_arm_one(t, sep);
2170
2171 mdb_dprintf(MDB_DBG_TGT, "inserted [ %d ] sep=%p refs=%u state=%d\n",
2172 vep->ve_id, (void *)sep, sep->se_refs, sep->se_state);
2173
2174 return (vep->ve_id);
2175 }
2176
2177 /*
2178 * Search the target's active, idle, and disabled lists for the vespec matching
2179 * the specified VID, and return a pointer to it, or NULL if no match is found.
2180 */
2181 mdb_vespec_t *
mdb_tgt_vespec_lookup(mdb_tgt_t * t,int vid)2182 mdb_tgt_vespec_lookup(mdb_tgt_t *t, int vid)
2183 {
2184 mdb_sespec_t *sep;
2185 mdb_vespec_t *vep;
2186
2187 if (vid == 0)
2188 return (NULL); /* 0 is never a valid VID */
2189
2190 for (sep = mdb_list_next(&t->t_active); sep; sep = mdb_list_next(sep)) {
2191 for (vep = mdb_list_next(&sep->se_velist); vep;
2192 vep = mdb_list_next(vep)) {
2193 if (vep->ve_id == vid)
2194 return (vep);
2195 }
2196 }
2197
2198 for (sep = mdb_list_next(&t->t_idle); sep; sep = mdb_list_next(sep)) {
2199 for (vep = mdb_list_next(&sep->se_velist); vep;
2200 vep = mdb_list_next(vep)) {
2201 if (vep->ve_id == vid)
2202 return (vep);
2203 }
2204 }
2205
2206 return (NULL);
2207 }
2208
2209 /*ARGSUSED*/
2210 void
no_ve_dtor(mdb_vespec_t * vep)2211 no_ve_dtor(mdb_vespec_t *vep)
2212 {
2213 /* default destructor does nothing */
2214 }
2215
2216 /*ARGSUSED*/
2217 void
no_se_f(mdb_tgt_t * t,int vid,void * data)2218 no_se_f(mdb_tgt_t *t, int vid, void *data)
2219 {
2220 /* default callback does nothing */
2221 }
2222
2223 /*ARGSUSED*/
2224 void
no_se_dtor(mdb_tgt_t * t,mdb_sespec_t * sep)2225 no_se_dtor(mdb_tgt_t *t, mdb_sespec_t *sep)
2226 {
2227 /* default destructor does nothing */
2228 }
2229
2230 /*ARGSUSED*/
2231 int
no_se_secmp(mdb_tgt_t * t,mdb_sespec_t * sep,void * args)2232 no_se_secmp(mdb_tgt_t *t, mdb_sespec_t *sep, void *args)
2233 {
2234 return (sep->se_data == args);
2235 }
2236
2237 /*ARGSUSED*/
2238 int
no_se_vecmp(mdb_tgt_t * t,mdb_vespec_t * vep,void * args)2239 no_se_vecmp(mdb_tgt_t *t, mdb_vespec_t *vep, void *args)
2240 {
2241 return (vep->ve_args == args);
2242 }
2243
2244 /*ARGSUSED*/
2245 int
no_se_arm(mdb_tgt_t * t,mdb_sespec_t * sep)2246 no_se_arm(mdb_tgt_t *t, mdb_sespec_t *sep)
2247 {
2248 return (0); /* return success */
2249 }
2250
2251 /*ARGSUSED*/
2252 int
no_se_disarm(mdb_tgt_t * t,mdb_sespec_t * sep)2253 no_se_disarm(mdb_tgt_t *t, mdb_sespec_t *sep)
2254 {
2255 return (0); /* return success */
2256 }
2257
2258 /*ARGSUSED*/
2259 int
no_se_cont(mdb_tgt_t * t,mdb_sespec_t * sep,mdb_tgt_status_t * tsp)2260 no_se_cont(mdb_tgt_t *t, mdb_sespec_t *sep, mdb_tgt_status_t *tsp)
2261 {
2262 if (tsp != &t->t_status)
2263 bcopy(&t->t_status, tsp, sizeof (mdb_tgt_status_t));
2264
2265 return (0); /* return success */
2266 }
2267
2268 int
mdb_tgt_register_dcmds(mdb_tgt_t * t,const mdb_dcmd_t * dcp,int flags)2269 mdb_tgt_register_dcmds(mdb_tgt_t *t, const mdb_dcmd_t *dcp, int flags)
2270 {
2271 int fail = 0;
2272
2273 for (; dcp->dc_name != NULL; dcp++) {
2274 if (mdb_module_add_dcmd(t->t_module, dcp, flags) == -1) {
2275 warn("failed to add dcmd %s", dcp->dc_name);
2276 fail++;
2277 }
2278 }
2279
2280 return (fail > 0 ? -1 : 0);
2281 }
2282
2283 int
mdb_tgt_register_walkers(mdb_tgt_t * t,const mdb_walker_t * wp,int flags)2284 mdb_tgt_register_walkers(mdb_tgt_t *t, const mdb_walker_t *wp, int flags)
2285 {
2286 int fail = 0;
2287
2288 for (; wp->walk_name != NULL; wp++) {
2289 if (mdb_module_add_walker(t->t_module, wp, flags) == -1) {
2290 warn("failed to add walk %s", wp->walk_name);
2291 fail++;
2292 }
2293 }
2294
2295 return (fail > 0 ? -1 : 0);
2296 }
2297
2298 void
mdb_tgt_register_regvars(mdb_tgt_t * t,const mdb_tgt_regdesc_t * rdp,const mdb_nv_disc_t * disc,int flags)2299 mdb_tgt_register_regvars(mdb_tgt_t *t, const mdb_tgt_regdesc_t *rdp,
2300 const mdb_nv_disc_t *disc, int flags)
2301 {
2302 for (; rdp->rd_name != NULL; rdp++) {
2303 if (!(rdp->rd_flags & MDB_TGT_R_EXPORT))
2304 continue; /* Don't export register as a variable */
2305
2306 if (rdp->rd_flags & MDB_TGT_R_RDONLY)
2307 flags |= MDB_NV_RDONLY;
2308
2309 (void) mdb_nv_insert(&mdb.m_nv, rdp->rd_name, disc,
2310 (uintptr_t)t, MDB_NV_PERSIST | flags);
2311 }
2312 }
2313