Line data Source code
1 : /*
2 : SSSD
3 :
4 : NSS Responder - Mmap Cache
5 :
6 : Copyright (C) Simo Sorce <ssorce@redhat.com> 2011
7 :
8 : This program is free software; you can redistribute it and/or modify
9 : it under the terms of the GNU General Public License as published by
10 : the Free Software Foundation; either version 3 of the License, or
11 : (at your option) any later version.
12 :
13 : This program is distributed in the hope that it will be useful,
14 : but WITHOUT ANY WARRANTY; without even the implied warranty of
15 : MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
16 : GNU General Public License for more details.
17 :
18 : You should have received a copy of the GNU General Public License
19 : along with this program. If not, see <http://www.gnu.org/licenses/>.
20 : */
21 :
22 : #include "util/util.h"
23 : #include "confdb/confdb.h"
24 : #include <sys/mman.h>
25 : #include <fcntl.h>
26 : #include "util/mmap_cache.h"
27 : #include "responder/nss/nsssrv.h"
28 : #include "responder/nss/nsssrv_mmap_cache.h"
29 :
30 : /* arbitrary (avg of my /etc/passwd) */
31 : #define SSS_AVG_PASSWD_PAYLOAD (MC_SLOT_SIZE * 4)
32 : /* short group name and no gids (private user group */
33 : #define SSS_AVG_GROUP_PAYLOAD (MC_SLOT_SIZE * 3)
34 : /* average place for 40 supplementary groups + 2 names */
35 : #define SSS_AVG_INITGROUP_PAYLOAD (MC_SLOT_SIZE * 5)
36 :
37 : #define MC_NEXT_BARRIER(val) ((((val) + 1) & 0x00ffffff) | 0xf0000000)
38 :
39 : #define MC_RAISE_BARRIER(m) do { \
40 : m->b2 = MC_NEXT_BARRIER(m->b1); \
41 : __sync_synchronize(); \
42 : } while (0)
43 :
44 : #define MC_LOWER_BARRIER(m) do { \
45 : __sync_synchronize(); \
46 : m->b1 = m->b2; \
47 : } while (0)
48 :
49 : #define MC_RAISE_INVALID_BARRIER(m) do { \
50 : m->b2 = MC_INVALID_VAL; \
51 : __sync_synchronize(); \
52 : } while (0)
53 :
54 : struct sss_mc_ctx {
55 : char *name; /* mmap cache name */
56 : enum sss_mc_type type; /* mmap cache type */
57 : char *file; /* mmap cache file name */
58 : int fd; /* file descriptor */
59 :
60 : uint32_t seed; /* pseudo-random seed to avoid collision attacks */
61 : time_t valid_time_slot; /* maximum time the entry is valid in seconds */
62 :
63 : void *mmap_base; /* base address of mmap */
64 : size_t mmap_size; /* total size of mmap */
65 :
66 : uint32_t *hash_table; /* hash table address (in mmap) */
67 : uint32_t ht_size; /* size of hash table */
68 :
69 : uint8_t *free_table; /* free list bitmaps */
70 : uint32_t ft_size; /* size of free table */
71 : uint32_t next_slot; /* the next slot after last allocation */
72 :
73 : uint8_t *data_table; /* data table address (in mmap) */
74 : uint32_t dt_size; /* size of data table */
75 : };
76 :
77 : #define MC_FIND_BIT(base, num) \
78 : uint32_t n = (num); \
79 : uint8_t *b = (base) + n / 8; \
80 : uint8_t c = 0x80 >> (n % 8);
81 :
82 : #define MC_SET_BIT(base, num) do { \
83 : MC_FIND_BIT(base, num) \
84 : *b |= c; \
85 : } while (0)
86 :
87 : #define MC_CLEAR_BIT(base, num) do { \
88 : MC_FIND_BIT(base, num) \
89 : *b &= ~c; \
90 : } while (0)
91 :
92 : #define MC_PROBE_BIT(base, num, used) do { \
93 : MC_FIND_BIT(base, num) \
94 : if (*b & c) used = true; \
95 : else used = false; \
96 : } while (0)
97 :
98 : static inline
99 0 : uint32_t sss_mc_next_slot_with_hash(struct sss_mc_rec *rec,
100 : uint32_t hash)
101 : {
102 0 : if (rec->hash1 == hash) {
103 0 : return rec->next1;
104 0 : } else if (rec->hash2 == hash) {
105 0 : return rec->next2;
106 : } else {
107 : /* it should never happen. */
108 0 : return MC_INVALID_VAL;
109 : }
110 : }
111 :
112 : static inline
113 0 : void sss_mc_chain_slot_to_record_with_hash(struct sss_mc_rec *rec,
114 : uint32_t hash,
115 : uint32_t slot)
116 : {
117 : /* changing a single uint32_t is atomic, so there is no
118 : * need to use barriers in this case */
119 0 : if (rec->hash1 == hash) {
120 0 : rec->next1 = slot;
121 0 : } else if (rec->hash2 == hash) {
122 0 : rec->next2 = slot;
123 : }
124 0 : }
125 :
126 : /* This function will store corrupted memcache to disk for later
127 : * analysis. */
128 0 : static void sss_mc_save_corrupted(struct sss_mc_ctx *mc_ctx)
129 : {
130 : int err;
131 0 : int fd = -1;
132 0 : ssize_t written = -1;
133 0 : char *file = NULL;
134 : TALLOC_CTX *tmp_ctx;
135 :
136 0 : if (mc_ctx == NULL) {
137 0 : DEBUG(SSSDBG_TRACE_FUNC,
138 : "Cannot store uninitialized cache. Nothing to do.\n");
139 0 : return;
140 : }
141 :
142 0 : tmp_ctx = talloc_new(NULL);
143 0 : if (tmp_ctx == NULL) {
144 0 : DEBUG(SSSDBG_CRIT_FAILURE, "Out of memory.\n");
145 0 : return;
146 : }
147 :
148 0 : file = talloc_asprintf(tmp_ctx, "%s_%s",
149 : mc_ctx->file, "corrupted");
150 0 : if (file == NULL) {
151 0 : DEBUG(SSSDBG_CRIT_FAILURE, "Out of memory.\n");
152 0 : goto done;
153 : }
154 :
155 : /* We will always store only the last problematic cache state */
156 0 : fd = creat(file, 0600);
157 0 : if (fd == -1) {
158 0 : err = errno;
159 0 : DEBUG(SSSDBG_CRIT_FAILURE,
160 : "Failed to open file '%s' [%d]: %s\n",
161 : file, err, strerror(err));
162 0 : goto done;
163 : }
164 :
165 0 : written = sss_atomic_write_s(fd, mc_ctx->mmap_base, mc_ctx->mmap_size);
166 0 : if (written != mc_ctx->mmap_size) {
167 0 : if (written == -1) {
168 0 : err = errno;
169 0 : DEBUG(SSSDBG_CRIT_FAILURE,
170 : "write() failed [%d]: %s\n", err, strerror(err));
171 : } else {
172 0 : DEBUG(SSSDBG_CRIT_FAILURE,
173 : "write() returned %zd (expected (%zd))\n",
174 : written, mc_ctx->mmap_size);
175 : }
176 0 : goto done;
177 : }
178 :
179 0 : sss_log(SSS_LOG_NOTICE,
180 : "Stored copy of corrupted mmap cache in file '%s\n'", file);
181 : done:
182 0 : if (fd != -1) {
183 0 : close(fd);
184 0 : if (written == -1) {
185 0 : err = unlink(file);
186 0 : if (err != 0) {
187 0 : err = errno;
188 0 : DEBUG(SSSDBG_CRIT_FAILURE,
189 : "Failed to remove file '%s': %s.\n", file,
190 : strerror(err));
191 : }
192 : }
193 : }
194 0 : talloc_free(tmp_ctx);
195 : }
196 :
197 0 : static uint32_t sss_mc_hash(struct sss_mc_ctx *mcc,
198 : const char *key, size_t len)
199 : {
200 0 : return murmurhash3(key, len, mcc->seed) % MC_HT_ELEMS(mcc->ht_size);
201 : }
202 :
203 0 : static void sss_mc_add_rec_to_chain(struct sss_mc_ctx *mcc,
204 : struct sss_mc_rec *rec,
205 : uint32_t hash)
206 : {
207 : struct sss_mc_rec *cur;
208 : uint32_t slot;
209 :
210 0 : if (hash > MC_HT_ELEMS(mcc->ht_size)) {
211 : /* Invalid hash. This should never happen, but better
212 : * return than trying to access out of bounds memory */
213 0 : return;
214 : }
215 :
216 0 : slot = mcc->hash_table[hash];
217 0 : if (slot == MC_INVALID_VAL) {
218 : /* no previous record/collision, just add to hash table */
219 0 : mcc->hash_table[hash] = MC_PTR_TO_SLOT(mcc->data_table, rec);
220 0 : return;
221 : }
222 :
223 : do {
224 0 : cur = MC_SLOT_TO_PTR(mcc->data_table, slot, struct sss_mc_rec);
225 0 : if (cur == rec) {
226 : /* rec already stored in hash chain */
227 0 : return;
228 : }
229 0 : slot = sss_mc_next_slot_with_hash(cur, hash);
230 0 : } while (slot != MC_INVALID_VAL);
231 : /* end of chain, append our record here */
232 :
233 0 : slot = MC_PTR_TO_SLOT(mcc->data_table, rec);
234 0 : sss_mc_chain_slot_to_record_with_hash(cur, hash, slot);
235 : }
236 :
237 0 : static void sss_mc_rm_rec_from_chain(struct sss_mc_ctx *mcc,
238 : struct sss_mc_rec *rec,
239 : uint32_t hash)
240 : {
241 0 : struct sss_mc_rec *prev = NULL;
242 0 : struct sss_mc_rec *cur = NULL;
243 : uint32_t slot;
244 :
245 0 : if (hash > MC_HT_ELEMS(mcc->ht_size)) {
246 : /* It can happen if rec->hash1 and rec->hash2 was the same.
247 : * or it is invalid hash. It is better to return
248 : * than trying to access out of bounds memory
249 : */
250 0 : return;
251 : }
252 :
253 0 : slot = mcc->hash_table[hash];
254 0 : if (slot == MC_INVALID_VAL) {
255 : /* record has already been removed. It may happen if rec->hash1 and
256 : * rec->has2 are the same. (It is not very likely).
257 : */
258 0 : return;
259 : }
260 0 : cur = MC_SLOT_TO_PTR(mcc->data_table, slot, struct sss_mc_rec);
261 0 : if (cur == rec) {
262 0 : mcc->hash_table[hash] = sss_mc_next_slot_with_hash(rec, hash);
263 : } else {
264 0 : slot = sss_mc_next_slot_with_hash(cur, hash);
265 0 : while (slot != MC_INVALID_VAL) {
266 0 : prev = cur;
267 0 : cur = MC_SLOT_TO_PTR(mcc->data_table, slot, struct sss_mc_rec);
268 0 : if (cur == rec) {
269 0 : slot = sss_mc_next_slot_with_hash(cur, hash);
270 :
271 0 : sss_mc_chain_slot_to_record_with_hash(prev, hash, slot);
272 0 : slot = MC_INVALID_VAL;
273 : } else {
274 0 : slot = sss_mc_next_slot_with_hash(cur, hash);
275 : }
276 : }
277 : }
278 : }
279 :
280 0 : static void sss_mc_free_slots(struct sss_mc_ctx *mcc, struct sss_mc_rec *rec)
281 : {
282 : uint32_t slot;
283 : uint32_t num;
284 : uint32_t i;
285 :
286 0 : slot = MC_PTR_TO_SLOT(mcc->data_table, rec);
287 0 : num = MC_SIZE_TO_SLOTS(rec->len);
288 0 : for (i = 0; i < num; i++) {
289 0 : MC_CLEAR_BIT(mcc->free_table, slot + i);
290 : }
291 0 : }
292 :
293 0 : static void sss_mc_invalidate_rec(struct sss_mc_ctx *mcc,
294 : struct sss_mc_rec *rec)
295 : {
296 0 : if (rec->b1 == MC_INVALID_VAL) {
297 : /* record already invalid */
298 0 : return;
299 : }
300 :
301 : /* Remove from hash chains */
302 : /* hash chain 1 */
303 0 : sss_mc_rm_rec_from_chain(mcc, rec, rec->hash1);
304 : /* hash chain 2 */
305 0 : sss_mc_rm_rec_from_chain(mcc, rec, rec->hash2);
306 :
307 : /* Clear from free_table */
308 0 : sss_mc_free_slots(mcc, rec);
309 :
310 : /* Invalidate record fields */
311 0 : MC_RAISE_INVALID_BARRIER(rec);
312 0 : memset(rec->data, MC_INVALID_VAL8, ((MC_SLOT_SIZE * MC_SIZE_TO_SLOTS(rec->len))
313 : - sizeof(struct sss_mc_rec)));
314 0 : rec->len = MC_INVALID_VAL32;
315 0 : rec->expire = MC_INVALID_VAL64;
316 0 : rec->next1 = MC_INVALID_VAL32;
317 0 : rec->next2 = MC_INVALID_VAL32;
318 0 : rec->hash1 = MC_INVALID_VAL32;
319 0 : rec->hash2 = MC_INVALID_VAL32;
320 0 : MC_LOWER_BARRIER(rec);
321 : }
322 :
323 0 : static bool sss_mc_is_valid_rec(struct sss_mc_ctx *mcc, struct sss_mc_rec *rec)
324 : {
325 : struct sss_mc_rec *self;
326 : uint32_t slot;
327 :
328 0 : if (((uint8_t *)rec < mcc->data_table) ||
329 0 : ((uint8_t *)rec > (mcc->data_table + mcc->dt_size - MC_SLOT_SIZE))) {
330 0 : return false;
331 : }
332 :
333 0 : if ((rec->b1 == MC_INVALID_VAL) ||
334 0 : (rec->b1 != rec->b2)) {
335 0 : return false;
336 : }
337 :
338 0 : if (!MC_CHECK_RECORD_LENGTH(mcc, rec)) {
339 0 : return false;
340 : }
341 :
342 0 : if (rec->expire == MC_INVALID_VAL64) {
343 0 : return false;
344 : }
345 :
346 : /* next record can be invalid if there are no next records */
347 :
348 0 : if (rec->hash1 == MC_INVALID_VAL32) {
349 0 : return false;
350 : } else {
351 0 : self = NULL;
352 0 : slot = mcc->hash_table[rec->hash1];
353 0 : while (slot != MC_INVALID_VAL32 && self != rec) {
354 0 : self = MC_SLOT_TO_PTR(mcc->data_table, slot, struct sss_mc_rec);
355 0 : slot = sss_mc_next_slot_with_hash(self, rec->hash1);
356 : }
357 0 : if (self != rec) {
358 0 : return false;
359 : }
360 : }
361 0 : if (rec->hash2 != MC_INVALID_VAL32) {
362 0 : self = NULL;
363 0 : slot = mcc->hash_table[rec->hash2];
364 0 : while (slot != MC_INVALID_VAL32 && self != rec) {
365 0 : self = MC_SLOT_TO_PTR(mcc->data_table, slot, struct sss_mc_rec);
366 0 : slot = sss_mc_next_slot_with_hash(self, rec->hash2);
367 : }
368 0 : if (self != rec) {
369 0 : return false;
370 : }
371 : }
372 :
373 : /* all tests passed */
374 0 : return true;
375 : }
376 :
377 : /* FIXME: This is a very simplistic, inefficient, memory allocator,
378 : * it will just free the oldest entries regardless of expiration if it
379 : * cycled the whole freebits map and found no empty slot */
380 0 : static errno_t sss_mc_find_free_slots(struct sss_mc_ctx *mcc,
381 : int num_slots, uint32_t *free_slot)
382 : {
383 : struct sss_mc_rec *rec;
384 : uint32_t tot_slots;
385 : uint32_t cur;
386 : uint32_t i;
387 : uint32_t t;
388 : bool used;
389 :
390 0 : tot_slots = mcc->ft_size * 8;
391 :
392 : /* Try to find a free slot w/o removing anything first */
393 : /* FIXME: is it really worth it ? May be it is easier to
394 : * just recycle the next set of slots ? */
395 0 : if ((mcc->next_slot + num_slots) > tot_slots) {
396 0 : cur = 0;
397 : } else {
398 0 : cur = mcc->next_slot;
399 : }
400 :
401 : /* search for enough (num_slots) consecutive zero bits, indicating
402 : * consecutive empty slots */
403 0 : for (i = 0; i < mcc->ft_size; i++) {
404 0 : t = cur / 8;
405 : /* if all full in this byte skip directly to the next */
406 0 : if (mcc->free_table[t] == 0xff) {
407 0 : cur = ((cur + 8) & ~7);
408 0 : if (cur >= tot_slots) {
409 0 : cur = 0;
410 : }
411 0 : continue;
412 : }
413 :
414 : /* at least one bit in this byte is marked as empty */
415 0 : for (t = ((cur + 8) & ~7) ; cur < t; cur++) {
416 0 : MC_PROBE_BIT(mcc->free_table, cur, used);
417 0 : if (!used) break;
418 : }
419 : /* check if we have enough slots before hitting the table end */
420 0 : if ((cur + num_slots) > tot_slots) {
421 0 : cur = 0;
422 0 : continue;
423 : }
424 :
425 : /* check if we have at least num_slots empty starting from the first
426 : * we found in the previous steps */
427 0 : for (t = cur + num_slots; cur < t; cur++) {
428 0 : MC_PROBE_BIT(mcc->free_table, cur, used);
429 0 : if (used) break;
430 : }
431 0 : if (cur == t) {
432 : /* ok found num_slots consecutive free bits */
433 0 : *free_slot = cur - num_slots;
434 0 : return EOK;
435 : }
436 : }
437 :
438 : /* no free slots found, free occupied slots after next_slot */
439 0 : if ((mcc->next_slot + num_slots) > tot_slots) {
440 0 : cur = 0;
441 : } else {
442 0 : cur = mcc->next_slot;
443 : }
444 0 : for (i = 0; i < num_slots; i++) {
445 0 : MC_PROBE_BIT(mcc->free_table, cur + i, used);
446 0 : if (used) {
447 : /* the first used slot should be a record header, however we
448 : * carefully check it is a valid header and hardfail if not */
449 0 : rec = MC_SLOT_TO_PTR(mcc->data_table, cur + i, struct sss_mc_rec);
450 0 : if (!sss_mc_is_valid_rec(mcc, rec)) {
451 : /* this is a fatal error, the caller should probaly just
452 : * invalidate the whole cache */
453 0 : return EFAULT;
454 : }
455 : /* next loop skip the whole record */
456 0 : i += MC_SIZE_TO_SLOTS(rec->len) - 1;
457 :
458 : /* finally invalidate record completely */
459 0 : sss_mc_invalidate_rec(mcc, rec);
460 : }
461 : }
462 :
463 0 : mcc->next_slot = cur + num_slots;
464 0 : *free_slot = cur;
465 0 : return EOK;
466 : }
467 :
468 0 : static errno_t sss_mc_get_strs_offset(struct sss_mc_ctx *mcc,
469 : size_t *_offset)
470 : {
471 0 : switch (mcc->type) {
472 : case SSS_MC_PASSWD:
473 0 : *_offset = offsetof(struct sss_mc_pwd_data, strs);
474 0 : return EOK;
475 : case SSS_MC_GROUP:
476 0 : *_offset = offsetof(struct sss_mc_grp_data, strs);
477 0 : return EOK;
478 : case SSS_MC_INITGROUPS:
479 0 : *_offset = offsetof(struct sss_mc_initgr_data, gids);
480 0 : return EOK;
481 : default:
482 0 : DEBUG(SSSDBG_FATAL_FAILURE, "Unknown memory cache type.\n");
483 0 : return EINVAL;
484 : }
485 : }
486 :
487 0 : static errno_t sss_mc_get_strs_len(struct sss_mc_ctx *mcc,
488 : struct sss_mc_rec *rec,
489 : size_t *_len)
490 : {
491 0 : switch (mcc->type) {
492 : case SSS_MC_PASSWD:
493 0 : *_len = ((struct sss_mc_pwd_data *)&rec->data)->strs_len;
494 0 : return EOK;
495 : case SSS_MC_GROUP:
496 0 : *_len = ((struct sss_mc_grp_data *)&rec->data)->strs_len;
497 0 : return EOK;
498 : case SSS_MC_INITGROUPS:
499 0 : *_len = ((struct sss_mc_initgr_data *)&rec->data)->data_len;
500 0 : return EOK;
501 : default:
502 0 : DEBUG(SSSDBG_FATAL_FAILURE, "Unknown memory cache type.\n");
503 0 : return EINVAL;
504 : }
505 : }
506 :
507 0 : static struct sss_mc_rec *sss_mc_find_record(struct sss_mc_ctx *mcc,
508 : struct sized_string *key)
509 : {
510 : struct sss_mc_rec *rec;
511 : uint32_t hash;
512 : uint32_t slot;
513 : rel_ptr_t name_ptr;
514 : char *t_key;
515 : size_t strs_offset;
516 : size_t strs_len;
517 : uint8_t *max_addr;
518 : errno_t ret;
519 :
520 0 : hash = sss_mc_hash(mcc, key->str, key->len);
521 :
522 0 : slot = mcc->hash_table[hash];
523 0 : if (!MC_SLOT_WITHIN_BOUNDS(slot, mcc->dt_size)) {
524 0 : return NULL;
525 : }
526 :
527 : /* Get max address of data table. */
528 0 : max_addr = mcc->data_table + mcc->dt_size;
529 :
530 0 : ret = sss_mc_get_strs_offset(mcc, &strs_offset);
531 0 : if (ret != EOK) {
532 0 : return NULL;
533 : }
534 :
535 0 : while (slot != MC_INVALID_VAL) {
536 0 : if (!MC_SLOT_WITHIN_BOUNDS(slot, mcc->dt_size)) {
537 0 : DEBUG(SSSDBG_FATAL_FAILURE,
538 : "Corrupted fastcache. Slot number too big.\n");
539 0 : sss_mc_save_corrupted(mcc);
540 0 : sss_mmap_cache_reset(mcc);
541 0 : return NULL;
542 : }
543 :
544 0 : rec = MC_SLOT_TO_PTR(mcc->data_table, slot, struct sss_mc_rec);
545 0 : ret = sss_mc_get_strs_len(mcc, rec, &strs_len);
546 0 : if (ret != EOK) {
547 0 : return NULL;
548 : }
549 :
550 0 : safealign_memcpy(&name_ptr, rec->data, sizeof(rel_ptr_t), NULL);
551 0 : if (key->len > strs_len
552 0 : || (name_ptr + key->len) > (strs_offset + strs_len)
553 0 : || (uint8_t *)rec->data + strs_offset + strs_len > max_addr) {
554 0 : DEBUG(SSSDBG_FATAL_FAILURE,
555 : "Corrupted fastcache. name_ptr value is %u.\n", name_ptr);
556 0 : sss_mc_save_corrupted(mcc);
557 0 : sss_mmap_cache_reset(mcc);
558 0 : return NULL;
559 : }
560 :
561 0 : t_key = (char *)rec->data + name_ptr;
562 0 : if (strcmp(key->str, t_key) == 0) {
563 0 : break;
564 : }
565 :
566 0 : slot = sss_mc_next_slot_with_hash(rec, hash);
567 : }
568 :
569 0 : if (slot == MC_INVALID_VAL) {
570 0 : return NULL;
571 : }
572 :
573 0 : return rec;
574 : }
575 :
576 0 : static errno_t sss_mc_get_record(struct sss_mc_ctx **_mcc,
577 : size_t rec_len,
578 : struct sized_string *key,
579 : struct sss_mc_rec **_rec)
580 : {
581 0 : struct sss_mc_ctx *mcc = *_mcc;
582 0 : struct sss_mc_rec *old_rec = NULL;
583 : struct sss_mc_rec *rec;
584 : int old_slots;
585 : int num_slots;
586 : uint32_t base_slot;
587 : errno_t ret;
588 : int i;
589 :
590 0 : num_slots = MC_SIZE_TO_SLOTS(rec_len);
591 :
592 0 : old_rec = sss_mc_find_record(mcc, key);
593 0 : if (old_rec) {
594 0 : old_slots = MC_SIZE_TO_SLOTS(old_rec->len);
595 :
596 0 : if (old_slots == num_slots) {
597 0 : *_rec = old_rec;
598 0 : return EOK;
599 : }
600 :
601 : /* slot size changed, invalidate record and fall through to get a
602 : * fully new record */
603 0 : sss_mc_invalidate_rec(mcc, old_rec);
604 : }
605 :
606 : /* we are going to use more space, find enough free slots */
607 0 : ret = sss_mc_find_free_slots(mcc, num_slots, &base_slot);
608 0 : if (ret != EOK) {
609 0 : if (ret == EFAULT) {
610 0 : DEBUG(SSSDBG_CRIT_FAILURE,
611 : "Fatal internal mmap cache error, invalidating cache!\n");
612 0 : (void)sss_mmap_cache_reinit(talloc_parent(mcc), -1, -1, _mcc);
613 : }
614 0 : return ret;
615 : }
616 :
617 0 : rec = MC_SLOT_TO_PTR(mcc->data_table, base_slot, struct sss_mc_rec);
618 :
619 : /* mark as not valid yet */
620 0 : MC_RAISE_INVALID_BARRIER(rec);
621 0 : rec->len = rec_len;
622 0 : rec->next1 = MC_INVALID_VAL;
623 0 : rec->next2 = MC_INVALID_VAL;
624 0 : rec->padding = MC_INVALID_VAL;
625 0 : MC_LOWER_BARRIER(rec);
626 :
627 : /* and now mark slots as used */
628 0 : for (i = 0; i < num_slots; i++) {
629 0 : MC_SET_BIT(mcc->free_table, base_slot + i);
630 : }
631 :
632 0 : *_rec = rec;
633 0 : return EOK;
634 : }
635 :
636 0 : static inline void sss_mmap_set_rec_header(struct sss_mc_ctx *mcc,
637 : struct sss_mc_rec *rec,
638 : size_t len, int ttl,
639 : const char *key1, size_t key1_len,
640 : const char *key2, size_t key2_len)
641 : {
642 0 : rec->len = len;
643 0 : rec->expire = time(NULL) + ttl;
644 0 : rec->hash1 = sss_mc_hash(mcc, key1, key1_len);
645 0 : rec->hash2 = sss_mc_hash(mcc, key2, key2_len);
646 0 : }
647 :
648 0 : static inline void sss_mmap_chain_in_rec(struct sss_mc_ctx *mcc,
649 : struct sss_mc_rec *rec)
650 : {
651 : /* name first */
652 0 : sss_mc_add_rec_to_chain(mcc, rec, rec->hash1);
653 : /* then uid/gid */
654 0 : sss_mc_add_rec_to_chain(mcc, rec, rec->hash2);
655 0 : }
656 :
657 : /***************************************************************************
658 : * generic invalidation
659 : ***************************************************************************/
660 :
661 4 : static errno_t sss_mmap_cache_invalidate(struct sss_mc_ctx *mcc,
662 : struct sized_string *key)
663 : {
664 : struct sss_mc_rec *rec;
665 :
666 4 : if (mcc == NULL) {
667 : /* cache not initialized ? */
668 4 : return EINVAL;
669 : }
670 :
671 0 : rec = sss_mc_find_record(mcc, key);
672 0 : if (rec == NULL) {
673 : /* nothing to invalidate */
674 0 : return ENOENT;
675 : }
676 :
677 0 : sss_mc_invalidate_rec(mcc, rec);
678 :
679 0 : return EOK;
680 : }
681 :
682 : /***************************************************************************
683 : * passwd map
684 : ***************************************************************************/
685 :
686 0 : errno_t sss_mmap_cache_pw_store(struct sss_mc_ctx **_mcc,
687 : struct sized_string *name,
688 : struct sized_string *pw,
689 : uid_t uid, gid_t gid,
690 : struct sized_string *gecos,
691 : struct sized_string *homedir,
692 : struct sized_string *shell)
693 : {
694 0 : struct sss_mc_ctx *mcc = *_mcc;
695 : struct sss_mc_rec *rec;
696 : struct sss_mc_pwd_data *data;
697 : struct sized_string uidkey;
698 : char uidstr[11];
699 : size_t data_len;
700 : size_t rec_len;
701 : size_t pos;
702 : int ret;
703 :
704 0 : if (mcc == NULL) {
705 : /* cache not initialized ? */
706 0 : return EINVAL;
707 : }
708 :
709 0 : ret = snprintf(uidstr, 11, "%ld", (long)uid);
710 0 : if (ret > 10) {
711 0 : return EINVAL;
712 : }
713 0 : to_sized_string(&uidkey, uidstr);
714 :
715 0 : data_len = name->len + pw->len + gecos->len + homedir->len + shell->len;
716 0 : rec_len = sizeof(struct sss_mc_rec) +
717 : sizeof(struct sss_mc_pwd_data) +
718 : data_len;
719 0 : if (rec_len > mcc->dt_size) {
720 0 : return ENOMEM;
721 : }
722 :
723 0 : ret = sss_mc_get_record(_mcc, rec_len, name, &rec);
724 0 : if (ret != EOK) {
725 0 : return ret;
726 : }
727 :
728 0 : data = (struct sss_mc_pwd_data *)rec->data;
729 0 : pos = 0;
730 :
731 0 : MC_RAISE_BARRIER(rec);
732 :
733 : /* header */
734 0 : sss_mmap_set_rec_header(mcc, rec, rec_len, mcc->valid_time_slot,
735 : name->str, name->len, uidkey.str, uidkey.len);
736 :
737 : /* passwd struct */
738 0 : data->name = MC_PTR_DIFF(data->strs, data);
739 0 : data->uid = uid;
740 0 : data->gid = gid;
741 0 : data->strs_len = data_len;
742 0 : memcpy(&data->strs[pos], name->str, name->len);
743 0 : pos += name->len;
744 0 : memcpy(&data->strs[pos], pw->str, pw->len);
745 0 : pos += pw->len;
746 0 : memcpy(&data->strs[pos], gecos->str, gecos->len);
747 0 : pos += gecos->len;
748 0 : memcpy(&data->strs[pos], homedir->str, homedir->len);
749 0 : pos += homedir->len;
750 0 : memcpy(&data->strs[pos], shell->str, shell->len);
751 0 : pos += shell->len;
752 :
753 0 : MC_LOWER_BARRIER(rec);
754 :
755 : /* finally chain the rec in the hash table */
756 0 : sss_mmap_chain_in_rec(mcc, rec);
757 :
758 0 : return EOK;
759 : }
760 :
761 2 : errno_t sss_mmap_cache_pw_invalidate(struct sss_mc_ctx *mcc,
762 : struct sized_string *name)
763 : {
764 2 : return sss_mmap_cache_invalidate(mcc, name);
765 : }
766 :
767 0 : errno_t sss_mmap_cache_pw_invalidate_uid(struct sss_mc_ctx *mcc, uid_t uid)
768 : {
769 : struct sss_mc_rec *rec;
770 : struct sss_mc_pwd_data *data;
771 : uint32_t hash;
772 : uint32_t slot;
773 : char *uidstr;
774 : errno_t ret;
775 :
776 0 : if (mcc == NULL) {
777 : /* cache not initialized ? */
778 0 : return EINVAL;
779 : }
780 :
781 0 : uidstr = talloc_asprintf(NULL, "%ld", (long)uid);
782 0 : if (!uidstr) {
783 0 : return ENOMEM;
784 : }
785 :
786 0 : hash = sss_mc_hash(mcc, uidstr, strlen(uidstr) + 1);
787 :
788 0 : slot = mcc->hash_table[hash];
789 0 : if (!MC_SLOT_WITHIN_BOUNDS(slot, mcc->dt_size)) {
790 0 : ret = ENOENT;
791 0 : goto done;
792 : }
793 :
794 0 : while (slot != MC_INVALID_VAL) {
795 0 : if (!MC_SLOT_WITHIN_BOUNDS(slot, mcc->dt_size)) {
796 0 : DEBUG(SSSDBG_FATAL_FAILURE, "Corrupted fastcache.\n");
797 0 : sss_mc_save_corrupted(mcc);
798 0 : sss_mmap_cache_reset(mcc);
799 0 : ret = ENOENT;
800 0 : goto done;
801 : }
802 :
803 0 : rec = MC_SLOT_TO_PTR(mcc->data_table, slot, struct sss_mc_rec);
804 0 : data = (struct sss_mc_pwd_data *)(&rec->data);
805 :
806 0 : if (uid == data->uid) {
807 0 : break;
808 : }
809 :
810 0 : slot = sss_mc_next_slot_with_hash(rec, hash);
811 : }
812 :
813 0 : if (slot == MC_INVALID_VAL) {
814 0 : ret = ENOENT;
815 0 : goto done;
816 : }
817 :
818 0 : sss_mc_invalidate_rec(mcc, rec);
819 :
820 0 : ret = EOK;
821 :
822 : done:
823 0 : talloc_zfree(uidstr);
824 0 : return ret;
825 : }
826 :
827 : /***************************************************************************
828 : * group map
829 : ***************************************************************************/
830 :
831 0 : int sss_mmap_cache_gr_store(struct sss_mc_ctx **_mcc,
832 : struct sized_string *name,
833 : struct sized_string *pw,
834 : gid_t gid, size_t memnum,
835 : char *membuf, size_t memsize)
836 : {
837 0 : struct sss_mc_ctx *mcc = *_mcc;
838 : struct sss_mc_rec *rec;
839 : struct sss_mc_grp_data *data;
840 : struct sized_string gidkey;
841 : char gidstr[11];
842 : size_t data_len;
843 : size_t rec_len;
844 : size_t pos;
845 : int ret;
846 :
847 0 : if (mcc == NULL) {
848 : /* cache not initialized ? */
849 0 : return EINVAL;
850 : }
851 :
852 0 : ret = snprintf(gidstr, 11, "%ld", (long)gid);
853 0 : if (ret > 10) {
854 0 : return EINVAL;
855 : }
856 0 : to_sized_string(&gidkey, gidstr);
857 :
858 0 : data_len = name->len + pw->len + memsize;
859 0 : rec_len = sizeof(struct sss_mc_rec) +
860 : sizeof(struct sss_mc_grp_data) +
861 : data_len;
862 0 : if (rec_len > mcc->dt_size) {
863 0 : return ENOMEM;
864 : }
865 :
866 0 : ret = sss_mc_get_record(_mcc, rec_len, name, &rec);
867 0 : if (ret != EOK) {
868 0 : return ret;
869 : }
870 :
871 0 : data = (struct sss_mc_grp_data *)rec->data;
872 0 : pos = 0;
873 :
874 0 : MC_RAISE_BARRIER(rec);
875 :
876 : /* header */
877 0 : sss_mmap_set_rec_header(mcc, rec, rec_len, mcc->valid_time_slot,
878 : name->str, name->len, gidkey.str, gidkey.len);
879 :
880 : /* group struct */
881 0 : data->name = MC_PTR_DIFF(data->strs, data);
882 0 : data->gid = gid;
883 0 : data->members = memnum;
884 0 : data->strs_len = data_len;
885 0 : memcpy(&data->strs[pos], name->str, name->len);
886 0 : pos += name->len;
887 0 : memcpy(&data->strs[pos], pw->str, pw->len);
888 0 : pos += pw->len;
889 0 : memcpy(&data->strs[pos], membuf, memsize);
890 0 : pos += memsize;
891 :
892 0 : MC_LOWER_BARRIER(rec);
893 :
894 : /* finally chain the rec in the hash table */
895 0 : sss_mmap_chain_in_rec(mcc, rec);
896 :
897 0 : return EOK;
898 : }
899 :
900 0 : errno_t sss_mmap_cache_gr_invalidate(struct sss_mc_ctx *mcc,
901 : struct sized_string *name)
902 : {
903 0 : return sss_mmap_cache_invalidate(mcc, name);
904 : }
905 :
906 0 : errno_t sss_mmap_cache_gr_invalidate_gid(struct sss_mc_ctx *mcc, gid_t gid)
907 : {
908 : struct sss_mc_rec *rec;
909 : struct sss_mc_grp_data *data;
910 : uint32_t hash;
911 : uint32_t slot;
912 : char *gidstr;
913 : errno_t ret;
914 :
915 0 : if (mcc == NULL) {
916 : /* cache not initialized ? */
917 0 : return EINVAL;
918 : }
919 :
920 0 : gidstr = talloc_asprintf(NULL, "%ld", (long)gid);
921 0 : if (!gidstr) {
922 0 : return ENOMEM;
923 : }
924 :
925 0 : hash = sss_mc_hash(mcc, gidstr, strlen(gidstr) + 1);
926 :
927 0 : slot = mcc->hash_table[hash];
928 0 : if (!MC_SLOT_WITHIN_BOUNDS(slot, mcc->dt_size)) {
929 0 : ret = ENOENT;
930 0 : goto done;
931 : }
932 :
933 0 : while (slot != MC_INVALID_VAL) {
934 0 : if (!MC_SLOT_WITHIN_BOUNDS(slot, mcc->dt_size)) {
935 0 : DEBUG(SSSDBG_FATAL_FAILURE, "Corrupted fastcache.\n");
936 0 : sss_mc_save_corrupted(mcc);
937 0 : sss_mmap_cache_reset(mcc);
938 0 : ret = ENOENT;
939 0 : goto done;
940 : }
941 :
942 0 : rec = MC_SLOT_TO_PTR(mcc->data_table, slot, struct sss_mc_rec);
943 0 : data = (struct sss_mc_grp_data *)(&rec->data);
944 :
945 0 : if (gid == data->gid) {
946 0 : break;
947 : }
948 :
949 0 : slot = sss_mc_next_slot_with_hash(rec, hash);
950 : }
951 :
952 0 : if (slot == MC_INVALID_VAL) {
953 0 : ret = ENOENT;
954 0 : goto done;
955 : }
956 :
957 0 : sss_mc_invalidate_rec(mcc, rec);
958 :
959 0 : ret = EOK;
960 :
961 : done:
962 0 : talloc_zfree(gidstr);
963 0 : return ret;
964 : }
965 :
966 0 : errno_t sss_mmap_cache_initgr_store(struct sss_mc_ctx **_mcc,
967 : struct sized_string *name,
968 : struct sized_string *unique_name,
969 : uint32_t num_groups,
970 : uint8_t *gids_buf)
971 : {
972 0 : struct sss_mc_ctx *mcc = *_mcc;
973 : struct sss_mc_rec *rec;
974 : struct sss_mc_initgr_data *data;
975 : size_t data_len;
976 : size_t rec_len;
977 : size_t pos;
978 : int ret;
979 :
980 0 : if (mcc == NULL) {
981 : /* cache not initialized ? */
982 0 : return EINVAL;
983 : }
984 :
985 : /* array of gids + name + unique_name */
986 0 : data_len = num_groups * sizeof(uint32_t) + name->len + unique_name->len;
987 0 : rec_len = sizeof(struct sss_mc_rec) + sizeof(struct sss_mc_initgr_data)
988 : + data_len;
989 0 : if (rec_len > mcc->dt_size) {
990 0 : return ENOMEM;
991 : }
992 :
993 : /* use unique name for searching potential old records */
994 0 : ret = sss_mc_get_record(_mcc, rec_len, unique_name, &rec);
995 0 : if (ret != EOK) {
996 0 : return ret;
997 : }
998 :
999 0 : data = (struct sss_mc_initgr_data *)rec->data;
1000 0 : pos = 0;
1001 :
1002 0 : MC_RAISE_BARRIER(rec);
1003 :
1004 : /* We cannot use two keys for searching in intgroups cache.
1005 : * Use the first key twice.
1006 : */
1007 0 : sss_mmap_set_rec_header(mcc, rec, rec_len, mcc->valid_time_slot,
1008 : name->str, name->len,
1009 : unique_name->str, unique_name->len);
1010 :
1011 : /* initgroups struct */
1012 0 : data->strs_len = name->len + unique_name->len;
1013 0 : data->data_len = data_len;
1014 0 : data->num_groups = num_groups;
1015 0 : memcpy((char *)data->gids + pos, gids_buf, num_groups * sizeof(uint32_t));
1016 0 : pos += num_groups * sizeof(uint32_t);
1017 :
1018 0 : memcpy((char *)data->gids + pos, unique_name->str, unique_name->len);
1019 0 : data->strs = data->unique_name = MC_PTR_DIFF((char *)data->gids + pos, data);
1020 0 : pos += unique_name->len;
1021 :
1022 0 : memcpy((char *)data->gids + pos, name->str, name->len);
1023 0 : data->name = MC_PTR_DIFF((char *)data->gids + pos, data);
1024 :
1025 0 : MC_LOWER_BARRIER(rec);
1026 :
1027 : /* finally chain the rec in the hash table */
1028 0 : sss_mmap_chain_in_rec(mcc, rec);
1029 :
1030 0 : return EOK;
1031 : }
1032 :
1033 2 : errno_t sss_mmap_cache_initgr_invalidate(struct sss_mc_ctx *mcc,
1034 : struct sized_string *name)
1035 : {
1036 2 : return sss_mmap_cache_invalidate(mcc, name);
1037 : }
1038 :
1039 : /***************************************************************************
1040 : * initialization
1041 : ***************************************************************************/
1042 :
1043 : /* Copy of sss_mc_set_recycled is present in the src/tools/tools_mc_util.c.
1044 : * If you modify this function, you should modify the duplicated function
1045 : * too. */
1046 0 : static errno_t sss_mc_set_recycled(int fd)
1047 : {
1048 0 : uint32_t w = SSS_MC_HEADER_RECYCLED;
1049 : struct sss_mc_header h;
1050 : off_t offset;
1051 : off_t pos;
1052 : ssize_t written;
1053 :
1054 0 : offset = MC_PTR_DIFF(&h.status, &h);
1055 :
1056 0 : pos = lseek(fd, offset, SEEK_SET);
1057 0 : if (pos == -1) {
1058 : /* What do we do now ? */
1059 0 : return errno;
1060 : }
1061 :
1062 0 : errno = 0;
1063 0 : written = sss_atomic_write_s(fd, (uint8_t *)&w, sizeof(h.status));
1064 0 : if (written == -1) {
1065 0 : return errno;
1066 : }
1067 :
1068 0 : if (written != sizeof(h.status)) {
1069 : /* Write error */
1070 0 : return EIO;
1071 : }
1072 :
1073 0 : return EOK;
1074 : }
1075 :
1076 : /*
1077 : * When we (re)create a new file we must mark the current file as recycled
1078 : * so active clients will abandon its use asap.
1079 : * We unlink the current file and make a new one
1080 : */
1081 0 : static errno_t sss_mc_create_file(struct sss_mc_ctx *mc_ctx)
1082 : {
1083 : mode_t old_mask;
1084 : int ofd;
1085 : int ret, uret;
1086 0 : useconds_t t = 50000;
1087 0 : int retries = 3;
1088 :
1089 0 : ofd = open(mc_ctx->file, O_RDWR);
1090 0 : if (ofd != -1) {
1091 0 : ret = sss_br_lock_file(ofd, 0, 1, retries, t);
1092 0 : if (ret != EOK) {
1093 0 : DEBUG(SSSDBG_FATAL_FAILURE,
1094 : "Failed to lock file %s.\n", mc_ctx->file);
1095 : }
1096 0 : ret = sss_mc_set_recycled(ofd);
1097 0 : if (ret) {
1098 0 : DEBUG(SSSDBG_FATAL_FAILURE, "Failed to mark mmap file %s as"
1099 : " recycled: %d(%s)\n",
1100 : mc_ctx->file, ret, strerror(ret));
1101 : }
1102 :
1103 0 : close(ofd);
1104 0 : } else if (errno != ENOENT) {
1105 0 : ret = errno;
1106 0 : DEBUG(SSSDBG_CRIT_FAILURE,
1107 : "Failed to open old memory cache file %s: %d(%s).\n",
1108 : mc_ctx->file, ret, strerror(ret));
1109 : }
1110 :
1111 0 : errno = 0;
1112 0 : ret = unlink(mc_ctx->file);
1113 0 : if (ret == -1 && errno != ENOENT) {
1114 0 : ret = errno;
1115 0 : DEBUG(SSSDBG_TRACE_FUNC, "Failed to rm mmap file %s: %d(%s)\n",
1116 : mc_ctx->file, ret, strerror(ret));
1117 : }
1118 :
1119 : /* temporarily relax umask as we need the file to be readable
1120 : * by everyone for now */
1121 0 : old_mask = umask(0022);
1122 :
1123 0 : errno = 0;
1124 0 : mc_ctx->fd = open(mc_ctx->file, O_CREAT | O_EXCL | O_RDWR, 0644);
1125 0 : umask(old_mask);
1126 0 : if (mc_ctx->fd == -1) {
1127 0 : ret = errno;
1128 0 : DEBUG(SSSDBG_CRIT_FAILURE, "Failed to open mmap file %s: %d(%s)\n",
1129 : mc_ctx->file, ret, strerror(ret));
1130 0 : return ret;
1131 : }
1132 :
1133 0 : ret = sss_br_lock_file(mc_ctx->fd, 0, 1, retries, t);
1134 0 : if (ret != EOK) {
1135 0 : DEBUG(SSSDBG_FATAL_FAILURE,
1136 : "Failed to lock file %s.\n", mc_ctx->file);
1137 0 : close(mc_ctx->fd);
1138 0 : mc_ctx->fd = -1;
1139 :
1140 : /* Report on unlink failures but don't overwrite the errno
1141 : * from sss_br_lock_file
1142 : */
1143 0 : errno = 0;
1144 0 : uret = unlink(mc_ctx->file);
1145 0 : if (uret == -1) {
1146 0 : uret = errno;
1147 0 : DEBUG(SSSDBG_TRACE_FUNC, "Failed to rm mmap file %s: %d(%s)\n",
1148 : mc_ctx->file, uret, strerror(uret));
1149 : }
1150 :
1151 0 : return ret;
1152 : }
1153 :
1154 0 : return ret;
1155 : }
1156 :
1157 0 : static void sss_mc_header_update(struct sss_mc_ctx *mc_ctx, int status)
1158 : {
1159 : struct sss_mc_header *h;
1160 :
1161 : /* update header using barriers */
1162 0 : h = (struct sss_mc_header *)mc_ctx->mmap_base;
1163 0 : MC_RAISE_BARRIER(h);
1164 0 : if (status == SSS_MC_HEADER_ALIVE) {
1165 : /* no reason to update anything else if the file is recycled or
1166 : * right before reset */
1167 0 : h->hash_table = MC_PTR_DIFF(mc_ctx->hash_table, mc_ctx->mmap_base);
1168 0 : h->free_table = MC_PTR_DIFF(mc_ctx->free_table, mc_ctx->mmap_base);
1169 0 : h->data_table = MC_PTR_DIFF(mc_ctx->data_table, mc_ctx->mmap_base);
1170 0 : h->ht_size = mc_ctx->ht_size;
1171 0 : h->ft_size = mc_ctx->ft_size;
1172 0 : h->dt_size = mc_ctx->dt_size;
1173 0 : h->major_vno = SSS_MC_MAJOR_VNO;
1174 0 : h->minor_vno = SSS_MC_MINOR_VNO;
1175 0 : h->seed = mc_ctx->seed;
1176 0 : h->reserved = 0;
1177 : }
1178 0 : h->status = status;
1179 0 : MC_LOWER_BARRIER(h);
1180 0 : }
1181 :
1182 0 : static int mc_ctx_destructor(struct sss_mc_ctx *mc_ctx)
1183 : {
1184 : int ret;
1185 :
1186 : /* Print debug message to logs if munmap() or close()
1187 : * fail but always return 0 */
1188 :
1189 0 : if (mc_ctx->mmap_base != NULL) {
1190 0 : ret = munmap(mc_ctx->mmap_base, mc_ctx->mmap_size);
1191 0 : if (ret == -1) {
1192 0 : ret = errno;
1193 0 : DEBUG(SSSDBG_CRIT_FAILURE,
1194 : "Failed to unmap old memory cache file."
1195 : "[%d]: %s\n", ret, strerror(ret));
1196 : }
1197 : }
1198 :
1199 0 : if (mc_ctx->fd != -1) {
1200 0 : ret = close(mc_ctx->fd);
1201 0 : if (ret == -1) {
1202 0 : ret = errno;
1203 0 : DEBUG(SSSDBG_CRIT_FAILURE,
1204 : "Failed to close old memory cache file."
1205 : "[%d]: %s\n", ret, strerror(ret));
1206 : }
1207 : }
1208 :
1209 0 : return 0;
1210 : }
1211 :
1212 0 : errno_t sss_mmap_cache_init(TALLOC_CTX *mem_ctx, const char *name,
1213 : enum sss_mc_type type, size_t n_elem,
1214 : time_t timeout, struct sss_mc_ctx **mcc)
1215 : {
1216 0 : struct sss_mc_ctx *mc_ctx = NULL;
1217 : unsigned int rseed;
1218 : int payload;
1219 : int ret, dret;
1220 :
1221 0 : switch (type) {
1222 : case SSS_MC_PASSWD:
1223 0 : payload = SSS_AVG_PASSWD_PAYLOAD;
1224 0 : break;
1225 : case SSS_MC_GROUP:
1226 0 : payload = SSS_AVG_GROUP_PAYLOAD;
1227 0 : break;
1228 : case SSS_MC_INITGROUPS:
1229 0 : payload = SSS_AVG_INITGROUP_PAYLOAD;
1230 0 : break;
1231 : default:
1232 0 : return EINVAL;
1233 : }
1234 :
1235 0 : mc_ctx = talloc_zero(mem_ctx, struct sss_mc_ctx);
1236 0 : if (!mc_ctx) {
1237 0 : return ENOMEM;
1238 : }
1239 0 : mc_ctx->fd = -1;
1240 0 : talloc_set_destructor(mc_ctx, mc_ctx_destructor);
1241 :
1242 0 : mc_ctx->name = talloc_strdup(mc_ctx, name);
1243 0 : if (!mc_ctx->name) {
1244 0 : ret = ENOMEM;
1245 0 : goto done;
1246 : }
1247 :
1248 0 : mc_ctx->type = type;
1249 :
1250 0 : mc_ctx->valid_time_slot = timeout;
1251 :
1252 0 : mc_ctx->file = talloc_asprintf(mc_ctx, "%s/%s",
1253 : SSS_NSS_MCACHE_DIR, name);
1254 0 : if (!mc_ctx->file) {
1255 0 : ret = ENOMEM;
1256 0 : goto done;
1257 : }
1258 :
1259 : /* elements must always be multiple of 8 to make things easier to handle,
1260 : * so we increase by the necessary amount if they are not a multiple */
1261 : /* We can use MC_ALIGN64 for this */
1262 0 : n_elem = MC_ALIGN64(n_elem);
1263 :
1264 : /* hash table is double the size because it will store both forward and
1265 : * reverse keys (name/uid, name/gid, ..) */
1266 0 : mc_ctx->ht_size = MC_HT_SIZE(n_elem * 2);
1267 0 : mc_ctx->dt_size = MC_DT_SIZE(n_elem, payload);
1268 0 : mc_ctx->ft_size = MC_FT_SIZE(n_elem);
1269 0 : mc_ctx->mmap_size = MC_HEADER_SIZE +
1270 0 : MC_ALIGN64(mc_ctx->dt_size) +
1271 0 : MC_ALIGN64(mc_ctx->ft_size) +
1272 0 : MC_ALIGN64(mc_ctx->ht_size);
1273 :
1274 :
1275 : /* for now ALWAYS create a new file on restart */
1276 :
1277 0 : ret = sss_mc_create_file(mc_ctx);
1278 0 : if (ret) {
1279 0 : goto done;
1280 : }
1281 :
1282 0 : ret = ftruncate(mc_ctx->fd, mc_ctx->mmap_size);
1283 0 : if (ret == -1) {
1284 0 : ret = errno;
1285 0 : DEBUG(SSSDBG_CRIT_FAILURE, "Failed to resize file %s: %d(%s)\n",
1286 : mc_ctx->file, ret, strerror(ret));
1287 0 : goto done;
1288 : }
1289 :
1290 0 : mc_ctx->mmap_base = mmap(NULL, mc_ctx->mmap_size,
1291 : PROT_READ | PROT_WRITE,
1292 : MAP_SHARED, mc_ctx->fd, 0);
1293 0 : if (mc_ctx->mmap_base == MAP_FAILED) {
1294 0 : ret = errno;
1295 0 : DEBUG(SSSDBG_CRIT_FAILURE, "Failed to mmap file %s(%zu): %d(%s)\n",
1296 : mc_ctx->file, mc_ctx->mmap_size,
1297 : ret, strerror(ret));
1298 0 : goto done;
1299 : }
1300 :
1301 0 : mc_ctx->data_table = MC_PTR_ADD(mc_ctx->mmap_base, MC_HEADER_SIZE);
1302 0 : mc_ctx->free_table = MC_PTR_ADD(mc_ctx->data_table,
1303 : MC_ALIGN64(mc_ctx->dt_size));
1304 0 : mc_ctx->hash_table = MC_PTR_ADD(mc_ctx->free_table,
1305 : MC_ALIGN64(mc_ctx->ft_size));
1306 :
1307 0 : memset(mc_ctx->data_table, 0xff, mc_ctx->dt_size);
1308 0 : memset(mc_ctx->free_table, 0x00, mc_ctx->ft_size);
1309 0 : memset(mc_ctx->hash_table, 0xff, mc_ctx->ht_size);
1310 :
1311 : /* generate a pseudo-random seed.
1312 : * Needed to fend off dictionary based collision attacks */
1313 0 : rseed = time(NULL) * getpid();
1314 0 : mc_ctx->seed = rand_r(&rseed);
1315 :
1316 0 : sss_mc_header_update(mc_ctx, SSS_MC_HEADER_ALIVE);
1317 :
1318 0 : ret = EOK;
1319 :
1320 : done:
1321 0 : if (ret) {
1322 : /* Closing the file descriptor and ummaping the file
1323 : * from memory is done in the mc_ctx_destructor. */
1324 0 : if (mc_ctx && mc_ctx->file && mc_ctx->fd != -1) {
1325 0 : dret = unlink(mc_ctx->file);
1326 0 : if (dret == -1) {
1327 0 : dret = errno;
1328 0 : DEBUG(SSSDBG_CRIT_FAILURE,
1329 : "Failed to rm mmap file %s: %d(%s)\n", mc_ctx->file,
1330 : dret, strerror(dret));
1331 : }
1332 : }
1333 :
1334 0 : talloc_free(mc_ctx);
1335 : } else {
1336 0 : *mcc = mc_ctx;
1337 : }
1338 0 : return ret;
1339 : }
1340 :
1341 0 : errno_t sss_mmap_cache_reinit(TALLOC_CTX *mem_ctx, size_t n_elem,
1342 : time_t timeout, struct sss_mc_ctx **mc_ctx)
1343 : {
1344 : errno_t ret;
1345 0 : TALLOC_CTX* tmp_ctx = NULL;
1346 : char *name;
1347 : enum sss_mc_type type;
1348 :
1349 0 : if (mc_ctx == NULL || (*mc_ctx) == NULL) {
1350 0 : DEBUG(SSSDBG_CRIT_FAILURE,
1351 : "Unable to re-init uninitialized memory cache.\n");
1352 0 : return EINVAL;
1353 : }
1354 :
1355 0 : tmp_ctx = talloc_new(NULL);
1356 0 : if (tmp_ctx == NULL) {
1357 0 : DEBUG(SSSDBG_CRIT_FAILURE, "Out of memory.\n");
1358 0 : return ENOMEM;
1359 : }
1360 :
1361 0 : name = talloc_strdup(tmp_ctx, (*mc_ctx)->name);
1362 0 : if (name == NULL) {
1363 0 : DEBUG(SSSDBG_CRIT_FAILURE, "Out of memory.\n");
1364 0 : ret = ENOMEM;
1365 0 : goto done;
1366 : }
1367 :
1368 0 : type = (*mc_ctx)->type;
1369 :
1370 0 : if (n_elem == (size_t)-1) {
1371 0 : n_elem = (*mc_ctx)->ft_size * 8;
1372 : }
1373 :
1374 0 : if (timeout == (time_t)-1) {
1375 0 : timeout = (*mc_ctx)->valid_time_slot;
1376 : }
1377 :
1378 0 : talloc_free(*mc_ctx);
1379 :
1380 : /* make sure we do not leave a potentially freed pointer around */
1381 0 : *mc_ctx = NULL;
1382 :
1383 0 : ret = sss_mmap_cache_init(mem_ctx, name, type, n_elem, timeout, mc_ctx);
1384 0 : if (ret != EOK) {
1385 0 : DEBUG(SSSDBG_CRIT_FAILURE, "Failed to re-initialize mmap cache.\n");
1386 0 : goto done;
1387 : }
1388 :
1389 : done:
1390 0 : talloc_free(tmp_ctx);
1391 0 : return ret;
1392 : }
1393 :
1394 : /* Erase all contents of the mmap cache. This will bring the cache
1395 : * to the same state as if it was just initialized. */
1396 0 : void sss_mmap_cache_reset(struct sss_mc_ctx *mc_ctx)
1397 : {
1398 0 : if (mc_ctx == NULL) {
1399 0 : DEBUG(SSSDBG_TRACE_FUNC,
1400 : "Fastcache not initialized. Nothing to do.\n");
1401 0 : return;
1402 : }
1403 :
1404 0 : sss_mc_header_update(mc_ctx, SSS_MC_HEADER_UNINIT);
1405 :
1406 : /* Reset the mmaped area */
1407 0 : memset(mc_ctx->data_table, 0xff, mc_ctx->dt_size);
1408 0 : memset(mc_ctx->free_table, 0x00, mc_ctx->ft_size);
1409 0 : memset(mc_ctx->hash_table, 0xff, mc_ctx->ht_size);
1410 :
1411 0 : sss_mc_header_update(mc_ctx, SSS_MC_HEADER_ALIVE);
1412 : }
|