1 /*
2  * Copyright © 2014 Intel Corporation
3  *
4  * Permission is hereby granted, free of charge, to any person obtaining a
5  * copy of this software and associated documentation files (the "Software"),
6  * to deal in the Software without restriction, including without limitation
7  * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8  * and/or sell copies of the Software, and to permit persons to whom the
9  * Software is furnished to do so, subject to the following conditions:
10  *
11  * The above copyright notice and this permission notice (including the next
12  * paragraph) shall be included in all copies or substantial portions of the
13  * Software.
14  *
15  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
18  * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19  * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20  * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
21  * IN THE SOFTWARE.
22  */
23 
24 
25 #include <assert.h>
26 #include <inttypes.h>
27 #include <stdbool.h>
28 #include <stddef.h>
29 #include <stdlib.h>
30 #include <sys/types.h>
31 #include <sys/stat.h>
32 #include <fcntl.h>
33 
34 #include "util/compress.h"
35 #include "util/crc32.h"
36 #include "util/disk_cache.h"
37 #include "util/disk_cache_os.h"
38 
39 #if DETECT_OS_WINDOWS
40 
41 #include <windows.h>
42 
43 bool
disk_cache_get_function_identifier(void * ptr,struct mesa_sha1 * ctx)44 disk_cache_get_function_identifier(void *ptr, struct mesa_sha1 *ctx)
45 {
46    HMODULE mod = NULL;
47    GetModuleHandleExW(GET_MODULE_HANDLE_EX_FLAG_FROM_ADDRESS | GET_MODULE_HANDLE_EX_FLAG_UNCHANGED_REFCOUNT,
48                       (LPCWSTR)ptr,
49                       &mod);
50    if (!mod)
51       return false;
52 
53    WCHAR filename[MAX_PATH];
54    DWORD filename_length = GetModuleFileNameW(mod, filename, ARRAY_SIZE(filename));
55 
56    if (filename_length == 0 || filename_length == ARRAY_SIZE(filename))
57       return false;
58 
59    HANDLE mod_as_file = CreateFileW(
60         filename,
61         GENERIC_READ,
62         FILE_SHARE_READ,
63         NULL,
64         OPEN_EXISTING,
65         FILE_ATTRIBUTE_NORMAL,
66         NULL);
67    if (mod_as_file == INVALID_HANDLE_VALUE)
68       return false;
69 
70    FILETIME time;
71    bool ret = GetFileTime(mod_as_file, NULL, NULL, &time);
72    if (ret)
73       _mesa_sha1_update(ctx, &time, sizeof(time));
74    CloseHandle(mod_as_file);
75    return ret;
76 }
77 
78 #endif
79 
80 #ifdef ENABLE_SHADER_CACHE
81 
82 #if DETECT_OS_WINDOWS
83 /* TODO: implement disk cache support on windows */
84 
85 #else
86 
87 #include <dirent.h>
88 #include <errno.h>
89 #include <pwd.h>
90 #include <stdio.h>
91 #include <string.h>
92 #include <sys/file.h>
93 #include <sys/mman.h>
94 #include <sys/types.h>
95 #include <sys/stat.h>
96 #include <unistd.h>
97 
98 #include "util/blob.h"
99 #include "util/crc32.h"
100 #include "util/u_debug.h"
101 #include "util/ralloc.h"
102 #include "util/rand_xor.h"
103 
104 /* Create a directory named 'path' if it does not already exist.
105  *
106  * Returns: 0 if path already exists as a directory or if created.
107  *         -1 in all other cases.
108  */
109 static int
mkdir_if_needed(const char * path)110 mkdir_if_needed(const char *path)
111 {
112    struct stat sb;
113 
114    /* If the path exists already, then our work is done if it's a
115     * directory, but it's an error if it is not.
116     */
117    if (stat(path, &sb) == 0) {
118       if (S_ISDIR(sb.st_mode)) {
119          return 0;
120       } else {
121          fprintf(stderr, "Cannot use %s for shader cache (not a directory)"
122                          "---disabling.\n", path);
123          return -1;
124       }
125    }
126 
127    int ret = mkdir(path, 0700);
128    if (ret == 0 || (ret == -1 && errno == EEXIST))
129      return 0;
130 
131    fprintf(stderr, "Failed to create %s for shader cache (%s)---disabling.\n",
132            path, strerror(errno));
133 
134    return -1;
135 }
136 
137 /* Concatenate an existing path and a new name to form a new path.  If the new
138  * path does not exist as a directory, create it then return the resulting
139  * name of the new path (ralloc'ed off of 'ctx').
140  *
141  * Returns NULL on any error, such as:
142  *
143  *      <path> does not exist or is not a directory
144  *      <path>/<name> exists but is not a directory
145  *      <path>/<name> cannot be created as a directory
146  */
147 static char *
concatenate_and_mkdir(void * ctx,const char * path,const char * name)148 concatenate_and_mkdir(void *ctx, const char *path, const char *name)
149 {
150    char *new_path;
151    struct stat sb;
152 
153    if (stat(path, &sb) != 0 || ! S_ISDIR(sb.st_mode))
154       return NULL;
155 
156    new_path = ralloc_asprintf(ctx, "%s/%s", path, name);
157 
158    if (mkdir_if_needed(new_path) == 0)
159       return new_path;
160    else
161       return NULL;
162 }
163 
164 struct lru_file {
165    struct list_head node;
166    char *lru_name;
167    size_t lru_file_size;
168    time_t lru_atime;
169 };
170 
171 static void
free_lru_file_list(struct list_head * lru_file_list)172 free_lru_file_list(struct list_head *lru_file_list)
173 {
174    struct lru_file *e, *next;
175    LIST_FOR_EACH_ENTRY_SAFE(e, next, lru_file_list, node) {
176       free(e->lru_name);
177       free(e);
178    }
179    free(lru_file_list);
180 }
181 
182 /* Given a directory path and predicate function, create a linked list of entrys
183  * with the oldest access time in that directory for which the predicate
184  * returns true.
185  *
186  * Returns: A malloc'ed linkd list for the paths of chosen files, (or
187  * NULL on any error). The caller should free the linked list via
188  * free_lru_file_list() when finished.
189  */
190 static struct list_head *
choose_lru_file_matching(const char * dir_path,bool (* predicate)(const char * dir_path,const struct stat *,const char *,const size_t))191 choose_lru_file_matching(const char *dir_path,
192                          bool (*predicate)(const char *dir_path,
193                                            const struct stat *,
194                                            const char *, const size_t))
195 {
196    DIR *dir;
197    struct dirent *dir_ent;
198 
199    dir = opendir(dir_path);
200    if (dir == NULL)
201       return NULL;
202 
203    const int dir_fd = dirfd(dir);
204 
205    /* First count the number of files in the directory */
206    unsigned total_file_count = 0;
207    while ((dir_ent = readdir(dir)) != NULL) {
208 #ifdef HAVE_DIRENT_D_TYPE
209       if (dir_ent->d_type == DT_REG) { /* If the entry is a regular file */
210          total_file_count++;
211       }
212 #else
213       struct stat st;
214 
215       if (fstatat(dir_fd, dir_ent->d_name, &st, AT_SYMLINK_NOFOLLOW) == 0) {
216          if (S_ISREG(st.st_mode)) {
217             total_file_count++;
218          }
219       }
220 #endif
221    }
222 
223    /* Reset to the start of the directory */
224    rewinddir(dir);
225 
226    /* Collect 10% of files in this directory for removal. Note: This should work
227     * out to only be around 0.04% of total cache items.
228     */
229    unsigned lru_file_count = total_file_count > 10 ? total_file_count / 10 : 1;
230    struct list_head *lru_file_list = malloc(sizeof(struct list_head));
231    list_inithead(lru_file_list);
232 
233    unsigned processed_files = 0;
234    while (1) {
235       dir_ent = readdir(dir);
236       if (dir_ent == NULL)
237          break;
238 
239       struct stat sb;
240       if (fstatat(dir_fd, dir_ent->d_name, &sb, 0) == 0) {
241          struct lru_file *entry = NULL;
242          if (!list_is_empty(lru_file_list))
243             entry = list_first_entry(lru_file_list, struct lru_file, node);
244 
245          if (!entry|| sb.st_atime < entry->lru_atime) {
246             size_t len = strlen(dir_ent->d_name);
247             if (!predicate(dir_path, &sb, dir_ent->d_name, len))
248                continue;
249 
250             bool new_entry = false;
251             if (processed_files < lru_file_count) {
252                entry = calloc(1, sizeof(struct lru_file));
253                new_entry = true;
254             }
255             processed_files++;
256 
257             char *tmp = realloc(entry->lru_name, len + 1);
258             if (tmp) {
259                /* Find location to insert new lru item. We want to keep the
260                 * list ordering from most recently used to least recently used.
261                 * This allows us to just evict the head item from the list as
262                 * we process the directory and find older entrys.
263                 */
264                struct list_head *list_node = lru_file_list;
265                struct lru_file *e;
266                LIST_FOR_EACH_ENTRY(e, lru_file_list, node) {
267                   if (sb.st_atime < entry->lru_atime) {
268                      list_node = &e->node;
269                      break;
270                   }
271                }
272 
273                if (new_entry) {
274                   list_addtail(&entry->node, list_node);
275                } else {
276                   if (list_node != lru_file_list) {
277                      list_del(lru_file_list);
278                      list_addtail(lru_file_list, list_node);
279                   }
280                }
281 
282                entry->lru_name = tmp;
283                memcpy(entry->lru_name, dir_ent->d_name, len + 1);
284                entry->lru_atime = sb.st_atime;
285                entry->lru_file_size = sb.st_blocks * 512;
286             }
287          }
288       }
289    }
290 
291    if (list_is_empty(lru_file_list)) {
292       closedir(dir);
293       free(lru_file_list);
294       return NULL;
295    }
296 
297    /* Create the full path for the file list we found */
298    struct lru_file *e;
299    LIST_FOR_EACH_ENTRY(e, lru_file_list, node) {
300       char *filename = e->lru_name;
301       if (asprintf(&e->lru_name, "%s/%s", dir_path, filename) < 0)
302          e->lru_name = NULL;
303 
304       free(filename);
305    }
306 
307    closedir(dir);
308 
309    return lru_file_list;
310 }
311 
312 /* Is entry a regular file, and not having a name with a trailing
313  * ".tmp"
314  */
315 static bool
is_regular_non_tmp_file(const char * path,const struct stat * sb,const char * d_name,const size_t len)316 is_regular_non_tmp_file(const char *path, const struct stat *sb,
317                         const char *d_name, const size_t len)
318 {
319    if (!S_ISREG(sb->st_mode))
320       return false;
321 
322    if (len >= 4 && strcmp(&d_name[len-4], ".tmp") == 0)
323       return false;
324 
325    return true;
326 }
327 
328 /* Returns the size of the deleted file, (or 0 on any error). */
329 static size_t
unlink_lru_file_from_directory(const char * path)330 unlink_lru_file_from_directory(const char *path)
331 {
332    struct list_head *lru_file_list =
333       choose_lru_file_matching(path, is_regular_non_tmp_file);
334    if (lru_file_list == NULL)
335       return 0;
336 
337    assert(!list_is_empty(lru_file_list));
338 
339    size_t total_unlinked_size = 0;
340    struct lru_file *e;
341    LIST_FOR_EACH_ENTRY(e, lru_file_list, node) {
342       if (unlink(e->lru_name) == 0)
343          total_unlinked_size += e->lru_file_size;
344    }
345    free_lru_file_list(lru_file_list);
346 
347    return total_unlinked_size;
348 }
349 
350 /* Is entry a directory with a two-character name, (and not the
351  * special name of ".."). We also return false if the dir is empty.
352  */
353 static bool
is_two_character_sub_directory(const char * path,const struct stat * sb,const char * d_name,const size_t len)354 is_two_character_sub_directory(const char *path, const struct stat *sb,
355                                const char *d_name, const size_t len)
356 {
357    if (!S_ISDIR(sb->st_mode))
358       return false;
359 
360    if (len != 2)
361       return false;
362 
363    if (strcmp(d_name, "..") == 0)
364       return false;
365 
366    char *subdir;
367    if (asprintf(&subdir, "%s/%s", path, d_name) == -1)
368       return false;
369    DIR *dir = opendir(subdir);
370    free(subdir);
371 
372    if (dir == NULL)
373      return false;
374 
375    unsigned subdir_entries = 0;
376    struct dirent *d;
377    while ((d = readdir(dir)) != NULL) {
378       if(++subdir_entries > 2)
379          break;
380    }
381    closedir(dir);
382 
383    /* If dir only contains '.' and '..' it must be empty */
384    if (subdir_entries <= 2)
385       return false;
386 
387    return true;
388 }
389 
390 /* Create the directory that will be needed for the cache file for \key.
391  *
392  * Obviously, the implementation here must closely match
393  * _get_cache_file above.
394 */
395 static void
make_cache_file_directory(struct disk_cache * cache,const cache_key key)396 make_cache_file_directory(struct disk_cache *cache, const cache_key key)
397 {
398    char *dir;
399    char buf[41];
400 
401    _mesa_sha1_format(buf, key);
402    if (asprintf(&dir, "%s/%c%c", cache->path, buf[0], buf[1]) == -1)
403       return;
404 
405    mkdir_if_needed(dir);
406    free(dir);
407 }
408 
409 static ssize_t
read_all(int fd,void * buf,size_t count)410 read_all(int fd, void *buf, size_t count)
411 {
412    char *in = buf;
413    ssize_t read_ret;
414    size_t done;
415 
416    for (done = 0; done < count; done += read_ret) {
417       read_ret = read(fd, in + done, count - done);
418       if (read_ret == -1 || read_ret == 0)
419          return -1;
420    }
421    return done;
422 }
423 
424 static ssize_t
write_all(int fd,const void * buf,size_t count)425 write_all(int fd, const void *buf, size_t count)
426 {
427    const char *out = buf;
428    ssize_t written;
429    size_t done;
430 
431    for (done = 0; done < count; done += written) {
432       written = write(fd, out + done, count - done);
433       if (written == -1)
434          return -1;
435    }
436    return done;
437 }
438 
439 /* Evict least recently used cache item */
440 void
disk_cache_evict_lru_item(struct disk_cache * cache)441 disk_cache_evict_lru_item(struct disk_cache *cache)
442 {
443    char *dir_path;
444 
445    /* With a reasonably-sized, full cache, (and with keys generated
446     * from a cryptographic hash), we can choose two random hex digits
447     * and reasonably expect the directory to exist with a file in it.
448     * Provides pseudo-LRU eviction to reduce checking all cache files.
449     */
450    uint64_t rand64 = rand_xorshift128plus(cache->seed_xorshift128plus);
451    if (asprintf(&dir_path, "%s/%02" PRIx64 , cache->path, rand64 & 0xff) < 0)
452       return;
453 
454    size_t size = unlink_lru_file_from_directory(dir_path);
455 
456    free(dir_path);
457 
458    if (size) {
459       p_atomic_add(cache->size, - (uint64_t)size);
460       return;
461    }
462 
463    /* In the case where the random choice of directory didn't find
464     * something, we choose the least recently accessed from the
465     * existing directories.
466     *
467     * Really, the only reason this code exists is to allow the unit
468     * tests to work, (which use an artificially-small cache to be able
469     * to force a single cached item to be evicted).
470     */
471    struct list_head *lru_file_list =
472       choose_lru_file_matching(cache->path, is_two_character_sub_directory);
473    if (lru_file_list == NULL)
474       return;
475 
476    assert(!list_is_empty(lru_file_list));
477 
478    struct lru_file *lru_file_dir =
479       list_first_entry(lru_file_list, struct lru_file, node);
480 
481    size = unlink_lru_file_from_directory(lru_file_dir->lru_name);
482 
483    free_lru_file_list(lru_file_list);
484 
485    if (size)
486       p_atomic_add(cache->size, - (uint64_t)size);
487 }
488 
489 void
disk_cache_evict_item(struct disk_cache * cache,char * filename)490 disk_cache_evict_item(struct disk_cache *cache, char *filename)
491 {
492    struct stat sb;
493    if (stat(filename, &sb) == -1) {
494       free(filename);
495       return;
496    }
497 
498    unlink(filename);
499    free(filename);
500 
501    if (sb.st_blocks)
502       p_atomic_add(cache->size, - (uint64_t)sb.st_blocks * 512);
503 }
504 
505 static void *
parse_and_validate_cache_item(struct disk_cache * cache,void * cache_item,size_t cache_item_size,size_t * size)506 parse_and_validate_cache_item(struct disk_cache *cache, void *cache_item,
507                               size_t cache_item_size, size_t *size)
508 {
509    uint8_t *uncompressed_data = NULL;
510 
511    struct blob_reader ci_blob_reader;
512    blob_reader_init(&ci_blob_reader, cache_item, cache_item_size);
513 
514    size_t header_size = cache->driver_keys_blob_size;
515    const void *keys_blob = blob_read_bytes(&ci_blob_reader, header_size);
516    if (ci_blob_reader.overrun)
517       goto fail;
518 
519    /* Check for extremely unlikely hash collisions */
520    if (memcmp(cache->driver_keys_blob, keys_blob, header_size) != 0) {
521       assert(!"Mesa cache keys mismatch!");
522       goto fail;
523    }
524 
525    uint32_t md_type = blob_read_uint32(&ci_blob_reader);
526    if (ci_blob_reader.overrun)
527       goto fail;
528 
529    if (md_type == CACHE_ITEM_TYPE_GLSL) {
530       uint32_t num_keys = blob_read_uint32(&ci_blob_reader);
531       if (ci_blob_reader.overrun)
532          goto fail;
533 
534       /* The cache item metadata is currently just used for distributing
535        * precompiled shaders, they are not used by Mesa so just skip them for
536        * now.
537        * TODO: pass the metadata back to the caller and do some basic
538        * validation.
539        */
540       const void UNUSED *metadata =
541          blob_read_bytes(&ci_blob_reader, num_keys * sizeof(cache_key));
542       if (ci_blob_reader.overrun)
543          goto fail;
544    }
545 
546    /* Load the CRC that was created when the file was written. */
547    struct cache_entry_file_data *cf_data =
548       (struct cache_entry_file_data *)
549          blob_read_bytes(&ci_blob_reader, sizeof(struct cache_entry_file_data));
550    if (ci_blob_reader.overrun)
551       goto fail;
552 
553    size_t cache_data_size = ci_blob_reader.end - ci_blob_reader.current;
554    const uint8_t *data = (uint8_t *) blob_read_bytes(&ci_blob_reader, cache_data_size);
555 
556    /* Check the data for corruption */
557    if (cf_data->crc32 != util_hash_crc32(data, cache_data_size))
558       goto fail;
559 
560    /* Uncompress the cache data */
561    uncompressed_data = malloc(cf_data->uncompressed_size);
562    if (!uncompressed_data)
563       goto fail;
564 
565    if (cache->compression_disabled) {
566       if (cf_data->uncompressed_size != cache_data_size)
567          goto fail;
568 
569       memcpy(uncompressed_data, data, cache_data_size);
570    } else {
571       if (!util_compress_inflate(data, cache_data_size, uncompressed_data,
572                                  cf_data->uncompressed_size))
573          goto fail;
574    }
575 
576    if (size)
577       *size = cf_data->uncompressed_size;
578 
579    return uncompressed_data;
580 
581  fail:
582    if (uncompressed_data)
583       free(uncompressed_data);
584 
585    return NULL;
586 }
587 
588 void *
disk_cache_load_item(struct disk_cache * cache,char * filename,size_t * size)589 disk_cache_load_item(struct disk_cache *cache, char *filename, size_t *size)
590 {
591    uint8_t *data = NULL;
592 
593    int fd = open(filename, O_RDONLY | O_CLOEXEC);
594    if (fd == -1)
595       goto fail;
596 
597    struct stat sb;
598    if (fstat(fd, &sb) == -1)
599       goto fail;
600 
601    data = malloc(sb.st_size);
602    if (data == NULL)
603       goto fail;
604 
605    /* Read entire file into memory */
606    int ret = read_all(fd, data, sb.st_size);
607    if (ret == -1)
608       goto fail;
609 
610     uint8_t *uncompressed_data =
611        parse_and_validate_cache_item(cache, data, sb.st_size, size);
612    if (!uncompressed_data)
613       goto fail;
614 
615    free(data);
616    free(filename);
617    close(fd);
618 
619    return uncompressed_data;
620 
621  fail:
622    if (data)
623       free(data);
624    if (filename)
625       free(filename);
626    if (fd != -1)
627       close(fd);
628 
629    return NULL;
630 }
631 
632 /* Return a filename within the cache's directory corresponding to 'key'.
633  *
634  * Returns NULL if out of memory.
635  */
636 char *
disk_cache_get_cache_filename(struct disk_cache * cache,const cache_key key)637 disk_cache_get_cache_filename(struct disk_cache *cache, const cache_key key)
638 {
639    char buf[41];
640    char *filename;
641 
642    if (cache->path_init_failed)
643       return NULL;
644 
645    _mesa_sha1_format(buf, key);
646    if (asprintf(&filename, "%s/%c%c/%s", cache->path, buf[0],
647                 buf[1], buf + 2) == -1)
648       return NULL;
649 
650    return filename;
651 }
652 
653 static bool
create_cache_item_header_and_blob(struct disk_cache_put_job * dc_job,struct blob * cache_blob)654 create_cache_item_header_and_blob(struct disk_cache_put_job *dc_job,
655                                   struct blob *cache_blob)
656 {
657 
658    /* Compress the cache item data */
659    size_t max_buf = util_compress_max_compressed_len(dc_job->size);
660    size_t compressed_size;
661    void *compressed_data;
662 
663    if (dc_job->cache->compression_disabled) {
664       compressed_size = dc_job->size;
665       compressed_data = dc_job->data;
666    } else {
667       compressed_data = malloc(max_buf);
668       if (compressed_data == NULL)
669          return false;
670       compressed_size =
671          util_compress_deflate(dc_job->data, dc_job->size,
672                               compressed_data, max_buf);
673       if (compressed_size == 0)
674          goto fail;
675    }
676 
677    /* Copy the driver_keys_blob, this can be used find information about the
678     * mesa version that produced the entry or deal with hash collisions,
679     * should that ever become a real problem.
680     */
681    if (!blob_write_bytes(cache_blob, dc_job->cache->driver_keys_blob,
682                          dc_job->cache->driver_keys_blob_size))
683       goto fail;
684 
685    /* Write the cache item metadata. This data can be used to deal with
686     * hash collisions, as well as providing useful information to 3rd party
687     * tools reading the cache files.
688     */
689    if (!blob_write_uint32(cache_blob, dc_job->cache_item_metadata.type))
690       goto fail;
691 
692    if (dc_job->cache_item_metadata.type == CACHE_ITEM_TYPE_GLSL) {
693       if (!blob_write_uint32(cache_blob, dc_job->cache_item_metadata.num_keys))
694          goto fail;
695 
696       size_t metadata_keys_size =
697          dc_job->cache_item_metadata.num_keys * sizeof(cache_key);
698       if (!blob_write_bytes(cache_blob, dc_job->cache_item_metadata.keys[0],
699                             metadata_keys_size))
700          goto fail;
701    }
702 
703    /* Create CRC of the compressed data. We will read this when restoring the
704     * cache and use it to check for corruption.
705     */
706    struct cache_entry_file_data cf_data;
707    cf_data.crc32 = util_hash_crc32(compressed_data, compressed_size);
708    cf_data.uncompressed_size = dc_job->size;
709 
710    if (!blob_write_bytes(cache_blob, &cf_data, sizeof(cf_data)))
711       goto fail;
712 
713    /* Finally copy the compressed cache blob */
714    if (!blob_write_bytes(cache_blob, compressed_data, compressed_size))
715       goto fail;
716 
717    if (!dc_job->cache->compression_disabled)
718       free(compressed_data);
719 
720    return true;
721 
722  fail:
723    if (!dc_job->cache->compression_disabled)
724       free(compressed_data);
725 
726    return false;
727 }
728 
729 void
disk_cache_write_item_to_disk(struct disk_cache_put_job * dc_job,char * filename)730 disk_cache_write_item_to_disk(struct disk_cache_put_job *dc_job,
731                               char *filename)
732 {
733    int fd = -1, fd_final = -1;
734    struct blob cache_blob;
735    blob_init(&cache_blob);
736 
737    /* Write to a temporary file to allow for an atomic rename to the
738     * final destination filename, (to prevent any readers from seeing
739     * a partially written file).
740     */
741    char *filename_tmp = NULL;
742    if (asprintf(&filename_tmp, "%s.tmp", filename) == -1)
743       goto done;
744 
745    fd = open(filename_tmp, O_WRONLY | O_CLOEXEC | O_CREAT, 0644);
746 
747    /* Make the two-character subdirectory within the cache as needed. */
748    if (fd == -1) {
749       if (errno != ENOENT)
750          goto done;
751 
752       make_cache_file_directory(dc_job->cache, dc_job->key);
753 
754       fd = open(filename_tmp, O_WRONLY | O_CLOEXEC | O_CREAT, 0644);
755       if (fd == -1)
756          goto done;
757    }
758 
759    /* With the temporary file open, we take an exclusive flock on
760     * it. If the flock fails, then another process still has the file
761     * open with the flock held. So just let that file be responsible
762     * for writing the file.
763     */
764 #ifdef HAVE_FLOCK
765    int err = flock(fd, LOCK_EX | LOCK_NB);
766 #else
767    struct flock lock = {
768       .l_start = 0,
769       .l_len = 0, /* entire file */
770       .l_type = F_WRLCK,
771       .l_whence = SEEK_SET
772    };
773    int err = fcntl(fd, F_SETLK, &lock);
774 #endif
775    if (err == -1)
776       goto done;
777 
778    /* Now that we have the lock on the open temporary file, we can
779     * check to see if the destination file already exists. If so,
780     * another process won the race between when we saw that the file
781     * didn't exist and now. In this case, we don't do anything more,
782     * (to ensure the size accounting of the cache doesn't get off).
783     */
784    fd_final = open(filename, O_RDONLY | O_CLOEXEC);
785    if (fd_final != -1) {
786       unlink(filename_tmp);
787       goto done;
788    }
789 
790    /* OK, we're now on the hook to write out a file that we know is
791     * not in the cache, and is also not being written out to the cache
792     * by some other process.
793     */
794    if (!create_cache_item_header_and_blob(dc_job, &cache_blob)) {
795       unlink(filename_tmp);
796       goto done;
797    }
798 
799    /* Now, finally, write out the contents to the temporary file, then
800     * rename them atomically to the destination filename, and also
801     * perform an atomic increment of the total cache size.
802     */
803    int ret = write_all(fd, cache_blob.data, cache_blob.size);
804    if (ret == -1) {
805       unlink(filename_tmp);
806       goto done;
807    }
808 
809    ret = rename(filename_tmp, filename);
810    if (ret == -1) {
811       unlink(filename_tmp);
812       goto done;
813    }
814 
815    struct stat sb;
816    if (stat(filename, &sb) == -1) {
817       /* Something went wrong remove the file */
818       unlink(filename);
819       goto done;
820    }
821 
822    p_atomic_add(dc_job->cache->size, sb.st_blocks * 512);
823 
824  done:
825    if (fd_final != -1)
826       close(fd_final);
827    /* This close finally releases the flock, (now that the final file
828     * has been renamed into place and the size has been added).
829     */
830    if (fd != -1)
831       close(fd);
832    free(filename_tmp);
833    blob_finish(&cache_blob);
834 }
835 
836 /* Determine path for cache based on the first defined name as follows:
837  *
838  *   $MESA_SHADER_CACHE_DIR
839  *   $XDG_CACHE_HOME/mesa_shader_cache
840  *   <pwd.pw_dir>/.cache/mesa_shader_cache
841  */
842 char *
disk_cache_generate_cache_dir(void * mem_ctx,const char * gpu_name,const char * driver_id,enum disk_cache_type cache_type)843 disk_cache_generate_cache_dir(void *mem_ctx, const char *gpu_name,
844                               const char *driver_id,
845                               enum disk_cache_type cache_type)
846 {
847    char *cache_dir_name = CACHE_DIR_NAME;
848    if (cache_type == DISK_CACHE_SINGLE_FILE)
849       cache_dir_name = CACHE_DIR_NAME_SF;
850    else if (cache_type == DISK_CACHE_DATABASE)
851       cache_dir_name = CACHE_DIR_NAME_DB;
852 
853    char *path = getenv("MESA_SHADER_CACHE_DIR");
854 
855    if (!path) {
856       path = getenv("MESA_GLSL_CACHE_DIR");
857       if (path)
858          fprintf(stderr,
859                  "*** MESA_GLSL_CACHE_DIR is deprecated; "
860                  "use MESA_SHADER_CACHE_DIR instead ***\n");
861    }
862 
863    if (path) {
864       if (mkdir_if_needed(path) == -1)
865          return NULL;
866 
867       path = concatenate_and_mkdir(mem_ctx, path, cache_dir_name);
868       if (!path)
869          return NULL;
870    }
871 
872    if (path == NULL) {
873       char *xdg_cache_home = getenv("XDG_CACHE_HOME");
874 
875       if (xdg_cache_home) {
876          if (mkdir_if_needed(xdg_cache_home) == -1)
877             return NULL;
878 
879          path = concatenate_and_mkdir(mem_ctx, xdg_cache_home, cache_dir_name);
880          if (!path)
881             return NULL;
882       }
883    }
884 
885    if (!path) {
886       char *buf;
887       size_t buf_size;
888       struct passwd pwd, *result;
889 
890       buf_size = sysconf(_SC_GETPW_R_SIZE_MAX);
891       if (buf_size == -1)
892          buf_size = 512;
893 
894       /* Loop until buf_size is large enough to query the directory */
895       while (1) {
896          buf = ralloc_size(mem_ctx, buf_size);
897 
898          getpwuid_r(getuid(), &pwd, buf, buf_size, &result);
899          if (result)
900             break;
901 
902          if (errno == ERANGE) {
903             ralloc_free(buf);
904             buf = NULL;
905             buf_size *= 2;
906          } else {
907             return NULL;
908          }
909       }
910 
911       path = concatenate_and_mkdir(mem_ctx, pwd.pw_dir, ".cache");
912       if (!path)
913          return NULL;
914 
915       path = concatenate_and_mkdir(mem_ctx, path, cache_dir_name);
916       if (!path)
917          return NULL;
918    }
919 
920    if (cache_type == DISK_CACHE_SINGLE_FILE) {
921       path = concatenate_and_mkdir(mem_ctx, path, driver_id);
922       if (!path)
923          return NULL;
924 
925       path = concatenate_and_mkdir(mem_ctx, path, gpu_name);
926       if (!path)
927          return NULL;
928    }
929 
930    return path;
931 }
932 
933 bool
disk_cache_enabled()934 disk_cache_enabled()
935 {
936    /* Disk cache is not enabled for android, but android's EGL layer
937     * uses EGL_ANDROID_blob_cache to manage the cache itself:
938     */
939    if (DETECT_OS_ANDROID)
940       return false;
941 
942    /* If running as a users other than the real user disable cache */
943    if (geteuid() != getuid())
944       return false;
945 
946    /* At user request, disable shader cache entirely. */
947 #ifdef SHADER_CACHE_DISABLE_BY_DEFAULT
948    bool disable_by_default = true;
949 #else
950    bool disable_by_default = false;
951 #endif
952    char *envvar_name = "MESA_SHADER_CACHE_DISABLE";
953    if (!getenv(envvar_name)) {
954       envvar_name = "MESA_GLSL_CACHE_DISABLE";
955       if (getenv(envvar_name))
956          fprintf(stderr,
957                  "*** MESA_GLSL_CACHE_DISABLE is deprecated; "
958                  "use MESA_SHADER_CACHE_DISABLE instead ***\n");
959    }
960 
961    if (debug_get_bool_option(envvar_name, disable_by_default))
962       return false;
963 
964    return true;
965 }
966 
967 void *
disk_cache_load_item_foz(struct disk_cache * cache,const cache_key key,size_t * size)968 disk_cache_load_item_foz(struct disk_cache *cache, const cache_key key,
969                          size_t *size)
970 {
971    size_t cache_tem_size = 0;
972    void *cache_item = foz_read_entry(&cache->foz_db, key, &cache_tem_size);
973    if (!cache_item)
974       return NULL;
975 
976    uint8_t *uncompressed_data =
977        parse_and_validate_cache_item(cache, cache_item, cache_tem_size, size);
978    free(cache_item);
979 
980    return uncompressed_data;
981 }
982 
983 bool
disk_cache_write_item_to_disk_foz(struct disk_cache_put_job * dc_job)984 disk_cache_write_item_to_disk_foz(struct disk_cache_put_job *dc_job)
985 {
986    struct blob cache_blob;
987    blob_init(&cache_blob);
988 
989    if (!create_cache_item_header_and_blob(dc_job, &cache_blob))
990       return false;
991 
992    bool r = foz_write_entry(&dc_job->cache->foz_db, dc_job->key,
993                             cache_blob.data, cache_blob.size);
994 
995    blob_finish(&cache_blob);
996    return r;
997 }
998 
999 bool
disk_cache_load_cache_index_foz(void * mem_ctx,struct disk_cache * cache)1000 disk_cache_load_cache_index_foz(void *mem_ctx, struct disk_cache *cache)
1001 {
1002    /* Load cache index into a hash map (from fossilise files) */
1003    return foz_prepare(&cache->foz_db, cache->path);
1004 }
1005 
1006 bool
disk_cache_mmap_cache_index(void * mem_ctx,struct disk_cache * cache,char * path)1007 disk_cache_mmap_cache_index(void *mem_ctx, struct disk_cache *cache,
1008                             char *path)
1009 {
1010    int fd = -1;
1011    bool mapped = false;
1012 
1013    path = ralloc_asprintf(mem_ctx, "%s/index", cache->path);
1014    if (path == NULL)
1015       goto path_fail;
1016 
1017    fd = open(path, O_RDWR | O_CREAT | O_CLOEXEC, 0644);
1018    if (fd == -1)
1019       goto path_fail;
1020 
1021    struct stat sb;
1022    if (fstat(fd, &sb) == -1)
1023       goto path_fail;
1024 
1025    /* Force the index file to be the expected size. */
1026    size_t size = sizeof(*cache->size) + CACHE_INDEX_MAX_KEYS * CACHE_KEY_SIZE;
1027    if (sb.st_size != size) {
1028 #if HAVE_POSIX_FALLOCATE
1029       /* posix_fallocate() ensures disk space is allocated otherwise it
1030        * fails if there is not enough space on the disk.
1031        */
1032       if (posix_fallocate(fd, 0, size) != 0)
1033          goto path_fail;
1034 #else
1035       /* ftruncate() allocates disk space lazily. If the disk is full
1036        * and it is unable to allocate disk space when accessed via
1037        * mmap, it will crash with a SIGBUS.
1038        */
1039       if (ftruncate(fd, size) == -1)
1040          goto path_fail;
1041 #endif
1042    }
1043 
1044    /* We map this shared so that other processes see updates that we
1045     * make.
1046     *
1047     * Note: We do use atomic addition to ensure that multiple
1048     * processes don't scramble the cache size recorded in the
1049     * index. But we don't use any locking to prevent multiple
1050     * processes from updating the same entry simultaneously. The idea
1051     * is that if either result lands entirely in the index, then
1052     * that's equivalent to a well-ordered write followed by an
1053     * eviction and a write. On the other hand, if the simultaneous
1054     * writes result in a corrupt entry, that's not really any
1055     * different than both entries being evicted, (since within the
1056     * guarantees of the cryptographic hash, a corrupt entry is
1057     * unlikely to ever match a real cache key).
1058     */
1059    cache->index_mmap = mmap(NULL, size, PROT_READ | PROT_WRITE,
1060                             MAP_SHARED, fd, 0);
1061    if (cache->index_mmap == MAP_FAILED)
1062       goto path_fail;
1063    cache->index_mmap_size = size;
1064 
1065    cache->size = (uint64_t *) cache->index_mmap;
1066    cache->stored_keys = cache->index_mmap + sizeof(uint64_t);
1067    mapped = true;
1068 
1069 path_fail:
1070    if (fd != -1)
1071       close(fd);
1072 
1073    return mapped;
1074 }
1075 
1076 void
disk_cache_destroy_mmap(struct disk_cache * cache)1077 disk_cache_destroy_mmap(struct disk_cache *cache)
1078 {
1079    munmap(cache->index_mmap, cache->index_mmap_size);
1080 }
1081 
1082 void *
disk_cache_db_load_item(struct disk_cache * cache,const cache_key key,size_t * size)1083 disk_cache_db_load_item(struct disk_cache *cache, const cache_key key,
1084                         size_t *size)
1085 {
1086    size_t cache_tem_size = 0;
1087    void *cache_item = mesa_cache_db_multipart_read_entry(&cache->cache_db,
1088                                                          key, &cache_tem_size);
1089    if (!cache_item)
1090       return NULL;
1091 
1092    uint8_t *uncompressed_data =
1093        parse_and_validate_cache_item(cache, cache_item, cache_tem_size, size);
1094    free(cache_item);
1095 
1096    return uncompressed_data;
1097 }
1098 
1099 bool
disk_cache_db_write_item_to_disk(struct disk_cache_put_job * dc_job)1100 disk_cache_db_write_item_to_disk(struct disk_cache_put_job *dc_job)
1101 {
1102    struct blob cache_blob;
1103    blob_init(&cache_blob);
1104 
1105    if (!create_cache_item_header_and_blob(dc_job, &cache_blob))
1106       return false;
1107 
1108    bool r = mesa_cache_db_multipart_entry_write(&dc_job->cache->cache_db,
1109                                                 dc_job->key, cache_blob.data,
1110                                                 cache_blob.size);
1111 
1112    blob_finish(&cache_blob);
1113    return r;
1114 }
1115 
1116 bool
disk_cache_db_load_cache_index(void * mem_ctx,struct disk_cache * cache)1117 disk_cache_db_load_cache_index(void *mem_ctx, struct disk_cache *cache)
1118 {
1119    return mesa_cache_db_multipart_open(&cache->cache_db, cache->path);
1120 }
1121 #endif
1122 
1123 #endif /* ENABLE_SHADER_CACHE */
1124