035-soc-qcom-Add-Shared-Memory-Manager-driver.patch 23 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841
  1. Content-Type: text/plain; charset="utf-8"
  2. MIME-Version: 1.0
  3. Content-Transfer-Encoding: 7bit
  4. Subject: [v2,2/2] soc: qcom: Add Shared Memory Manager driver
  5. From: Bjorn Andersson <bjorn.andersson@sonymobile.com>
  6. X-Patchwork-Id: 6202211
  7. Message-Id: <1428795178-24312-2-git-send-email-bjorn.andersson@sonymobile.com>
  8. To: Kumar Gala <galak@codeaurora.org>, Andy Gross <agross@codeaurora.org>,
  9. David Brown <davidb@codeaurora.org>, Jeffrey Hugo <jhugo@codeaurora.org>
  10. Cc: <linux-kernel@vger.kernel.org>, <linux-arm-msm@vger.kernel.org>,
  11. <linux-soc@vger.kernel.org>
  12. Date: Sat, 11 Apr 2015 16:32:58 -0700
  13. The Shared Memory Manager driver implements an interface for allocating
  14. and accessing items in the memory area shared among all of the
  15. processors in a Qualcomm platform.
  16. Signed-off-by: Bjorn Andersson <bjorn.andersson@sonymobile.com>
  17. Reviewed-by: Andy Gross <agross@codeaurora.org>
  18. Tested-by: Andy Gross <agross@codeaurora.org>
  19. ---
  20. Changes since v1:
  21. - ioremapping the regions nocache
  22. - improved documentation of the two regions of partitions
  23. - corrected free space check in private allocator
  24. drivers/soc/qcom/Kconfig | 7 +
  25. drivers/soc/qcom/Makefile | 1 +
  26. drivers/soc/qcom/smem.c | 768 ++++++++++++++++++++++++++++++++++++++++++
  27. include/linux/soc/qcom/smem.h | 14 +
  28. 4 files changed, 790 insertions(+)
  29. create mode 100644 drivers/soc/qcom/smem.c
  30. create mode 100644 include/linux/soc/qcom/smem.h
  31. --- a/drivers/soc/qcom/Kconfig
  32. +++ b/drivers/soc/qcom/Kconfig
  33. @@ -9,3 +9,10 @@ config QCOM_GSBI
  34. functions for connecting the underlying serial UART, SPI, and I2C
  35. devices to the output pins.
  36. +config QCOM_SMEM
  37. + tristate "Qualcomm Shared Memory Manager (SMEM)"
  38. + depends on ARCH_QCOM
  39. + help
  40. + Say y here to enable support for the Qualcomm Shared Memory Manager.
  41. + The driver provides an interface to items in a heap shared among all
  42. + processors in a Qualcomm platform.
  43. --- a/drivers/soc/qcom/Makefile
  44. +++ b/drivers/soc/qcom/Makefile
  45. @@ -1 +1,2 @@
  46. obj-$(CONFIG_QCOM_GSBI) += qcom_gsbi.o
  47. +obj-$(CONFIG_QCOM_SMEM) += smem.o
  48. --- /dev/null
  49. +++ b/drivers/soc/qcom/smem.c
  50. @@ -0,0 +1,768 @@
  51. +/*
  52. + * Copyright (c) 2015, Sony Mobile Communications AB.
  53. + * Copyright (c) 2012-2013, The Linux Foundation. All rights reserved.
  54. + *
  55. + * This program is free software; you can redistribute it and/or modify
  56. + * it under the terms of the GNU General Public License version 2 and
  57. + * only version 2 as published by the Free Software Foundation.
  58. + *
  59. + * This program is distributed in the hope that it will be useful,
  60. + * but WITHOUT ANY WARRANTY; without even the implied warranty of
  61. + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
  62. + * GNU General Public License for more details.
  63. + */
  64. +
  65. +#include <linux/hwspinlock.h>
  66. +#include <linux/io.h>
  67. +#include <linux/module.h>
  68. +#include <linux/of.h>
  69. +#include <linux/of_address.h>
  70. +#include <linux/platform_device.h>
  71. +#include <linux/slab.h>
  72. +#include <linux/soc/qcom/smem.h>
  73. +
  74. +/*
  75. + * The Qualcomm shared memory system is a allocate only heap structure that
  76. + * consists of one of more memory areas that can be accessed by the processors
  77. + * in the SoC.
  78. + *
  79. + * All systems contains a global heap, accessible by all processors in the SoC,
  80. + * with a table of contents data structure (@smem_header) at the beginning of
  81. + * the main shared memory block.
  82. + *
  83. + * The global header contains metadata for allocations as well as a fixed list
  84. + * of 512 entries (@smem_global_entry) that can be initialized to reference
  85. + * parts of the shared memory space.
  86. + *
  87. + *
  88. + * In addition to this global heap a set of "private" heaps can be set up at
  89. + * boot time with access restrictions so that only certain processor pairs can
  90. + * access the data.
  91. + *
  92. + * These partitions are referenced from an optional partition table
  93. + * (@smem_ptable), that is found 4kB from the end of the main smem region. The
  94. + * partition table entries (@smem_ptable_entry) lists the involved processors
  95. + * (or hosts) and their location in the main shared memory region.
  96. + *
  97. + * Each partition starts with a header (@smem_partition_header) that identifies
  98. + * the partition and holds properties for the two internal memory regions. The
  99. + * two regions are cached and non-cached memory respectively. Each region
  100. + * contain a link list of allocation headers (@smem_private_entry) followed by
  101. + * their data.
  102. + *
  103. + * Items in the non-cached region are allocated from the start of the partition
  104. + * while items in the cached region are allocated from the end. The free area
  105. + * is hence the region between the cached and non-cached offsets.
  106. + *
  107. + *
  108. + * To synchronize allocations in the shared memory heaps a remote spinlock must
  109. + * be held - currently lock number 3 of the sfpb or tcsr is used for this on all
  110. + * platforms.
  111. + *
  112. + */
  113. +
  114. +/**
  115. + * struct smem_proc_comm - proc_comm communication struct (legacy)
  116. + * @command: current command to be executed
  117. + * @status: status of the currently requested command
  118. + * @params: parameters to the command
  119. + */
  120. +struct smem_proc_comm {
  121. + u32 command;
  122. + u32 status;
  123. + u32 params[2];
  124. +};
  125. +
  126. +/**
  127. + * struct smem_global_entry - entry to reference smem items on the heap
  128. + * @allocated: boolean to indicate if this entry is used
  129. + * @offset: offset to the allocated space
  130. + * @size: size of the allocated space, 8 byte aligned
  131. + * @aux_base: base address for the memory region used by this unit, or 0 for
  132. + * the default region. bits 0,1 are reserved
  133. + */
  134. +struct smem_global_entry {
  135. + u32 allocated;
  136. + u32 offset;
  137. + u32 size;
  138. + u32 aux_base; /* bits 1:0 reserved */
  139. +};
  140. +#define AUX_BASE_MASK 0xfffffffc
  141. +
  142. +/**
  143. + * struct smem_header - header found in beginning of primary smem region
  144. + * @proc_comm: proc_comm communication interface (legacy)
  145. + * @version: array of versions for the various subsystems
  146. + * @initialized: boolean to indicate that smem is initialized
  147. + * @free_offset: index of the first unallocated byte in smem
  148. + * @available: number of bytes available for allocation
  149. + * @reserved: reserved field, must be 0
  150. + * toc: array of references to items
  151. + */
  152. +struct smem_header {
  153. + struct smem_proc_comm proc_comm[4];
  154. + u32 version[32];
  155. + u32 initialized;
  156. + u32 free_offset;
  157. + u32 available;
  158. + u32 reserved;
  159. + struct smem_global_entry toc[];
  160. +};
  161. +
  162. +/**
  163. + * struct smem_ptable_entry - one entry in the @smem_ptable list
  164. + * @offset: offset, within the main shared memory region, of the partition
  165. + * @size: size of the partition
  166. + * @flags: flags for the partition (currently unused)
  167. + * @host0: first processor/host with access to this partition
  168. + * @host1: second processor/host with access to this partition
  169. + * @reserved: reserved entries for later use
  170. + */
  171. +struct smem_ptable_entry {
  172. + u32 offset;
  173. + u32 size;
  174. + u32 flags;
  175. + u16 host0;
  176. + u16 host1;
  177. + u32 reserved[8];
  178. +};
  179. +
  180. +/**
  181. + * struct smem_ptable - partition table for the private partitions
  182. + * @magic: magic number, must be SMEM_PTABLE_MAGIC
  183. + * @version: version of the partition table
  184. + * @num_entries: number of partitions in the table
  185. + * @reserved: for now reserved entries
  186. + * @entry: list of @smem_ptable_entry for the @num_entries partitions
  187. + */
  188. +struct smem_ptable {
  189. + u32 magic;
  190. + u32 version;
  191. + u32 num_entries;
  192. + u32 reserved[5];
  193. + struct smem_ptable_entry entry[];
  194. +};
  195. +#define SMEM_PTABLE_MAGIC 0x434f5424 /* "$TOC" */
  196. +
  197. +/**
  198. + * struct smem_partition_header - header of the partitions
  199. + * @magic: magic number, must be SMEM_PART_MAGIC
  200. + * @host0: first processor/host with access to this partition
  201. + * @host1: second processor/host with access to this partition
  202. + * @size: size of the partition
  203. + * @offset_free_uncached: offset to the first free byte of uncached memory in
  204. + * this partition
  205. + * @offset_free_cached: offset to the first free byte of cached memory in this
  206. + * partition
  207. + * @reserved: for now reserved entries
  208. + */
  209. +struct smem_partition_header {
  210. + u32 magic;
  211. + u16 host0;
  212. + u16 host1;
  213. + u32 size;
  214. + u32 offset_free_uncached;
  215. + u32 offset_free_cached;
  216. + u32 reserved[3];
  217. +};
  218. +#define SMEM_PART_MAGIC 0x54525024 /* "$PRT" */
  219. +
  220. +/**
  221. + * struct smem_private_entry - header of each item in the private partition
  222. + * @canary: magic number, must be SMEM_PRIVATE_CANARY
  223. + * @item: identifying number of the smem item
  224. + * @size: size of the data, including padding bytes
  225. + * @padding_data: number of bytes of padding of data
  226. + * @padding_hdr: number of bytes of padding between the header and the data
  227. + * @reserved: for now reserved entry
  228. + */
  229. +struct smem_private_entry {
  230. + u16 canary;
  231. + u16 item;
  232. + u32 size; /* includes padding bytes */
  233. + u16 padding_data;
  234. + u16 padding_hdr;
  235. + u32 reserved;
  236. +};
  237. +#define SMEM_PRIVATE_CANARY 0xa5a5
  238. +
  239. +/*
  240. + * Item 3 of the global heap contains an array of versions for the various
  241. + * software components in the SoC. We verify that the boot loader version is
  242. + * what the expected version (SMEM_EXPECTED_VERSION) as a sanity check.
  243. + */
  244. +#define SMEM_ITEM_VERSION 3
  245. +#define SMEM_MASTER_SBL_VERSION_INDEX 7
  246. +#define SMEM_EXPECTED_VERSION 11
  247. +
  248. +/*
  249. + * The first 8 items are only to be allocated by the boot loader while
  250. + * initializing the heap.
  251. + */
  252. +#define SMEM_ITEM_LAST_FIXED 8
  253. +
  254. +/* Highest accepted item number, for both global and private heaps */
  255. +#define SMEM_ITEM_LAST 512
  256. +
  257. +/* Processor/host identifier for the application processor */
  258. +#define SMEM_HOST_APPS 0
  259. +
  260. +/* Max number of processors/hosts in a system */
  261. +#define SMEM_HOST_COUNT 7
  262. +
  263. +/**
  264. + * struct smem_region - representation of a chunk of memory used for smem
  265. + * @aux_base: identifier of aux_mem base
  266. + * @virt_base: virtual base address of memory with this aux_mem identifier
  267. + * @size: size of the memory region
  268. + */
  269. +struct smem_region {
  270. + u32 aux_base;
  271. + void __iomem *virt_base;
  272. + size_t size;
  273. +};
  274. +
  275. +/**
  276. + * struct qcom_smem - device data for the smem device
  277. + * @dev: device pointer
  278. + * @hwlock: reference to a hwspinlock
  279. + * @partitions: list of pointers to partitions affecting the current
  280. + * processor/host
  281. + * @num_regions: number of @regions
  282. + * @regions: list of the memory regions defining the shared memory
  283. + */
  284. +struct qcom_smem {
  285. + struct device *dev;
  286. +
  287. + struct hwspinlock *hwlock;
  288. +
  289. + struct smem_partition_header *partitions[SMEM_HOST_COUNT];
  290. +
  291. + unsigned num_regions;
  292. + struct smem_region regions[0];
  293. +};
  294. +
  295. +/* Pointer to the one and only smem handle */
  296. +static struct qcom_smem *__smem;
  297. +
  298. +/* Timeout (ms) for the trylock of remote spinlocks */
  299. +#define HWSPINLOCK_TIMEOUT 1000
  300. +
  301. +static int qcom_smem_alloc_private(struct qcom_smem *smem,
  302. + unsigned host,
  303. + unsigned item,
  304. + size_t size)
  305. +{
  306. + struct smem_partition_header *phdr;
  307. + struct smem_private_entry *hdr;
  308. + size_t alloc_size;
  309. + void *p;
  310. +
  311. + /* We're not going to find it if there's no matching partition */
  312. + if (host >= SMEM_HOST_COUNT || !smem->partitions[host])
  313. + return -ENOENT;
  314. +
  315. + phdr = smem->partitions[host];
  316. +
  317. + p = (void *)phdr + sizeof(*phdr);
  318. + while (p < (void *)phdr + phdr->offset_free_uncached) {
  319. + hdr = p;
  320. +
  321. + if (hdr->canary != SMEM_PRIVATE_CANARY) {
  322. + dev_err(smem->dev,
  323. + "Found invalid canary in host %d partition\n",
  324. + host);
  325. + return -EINVAL;
  326. + }
  327. +
  328. + if (hdr->item == item)
  329. + return -EEXIST;
  330. +
  331. + p += sizeof(*hdr) + hdr->padding_hdr + hdr->size;
  332. + }
  333. +
  334. + /* Check that we don't grow into the cached region */
  335. + alloc_size = sizeof(*hdr) + ALIGN(size, 8);
  336. + if (p + alloc_size >= (void *)phdr + phdr->offset_free_cached) {
  337. + dev_err(smem->dev, "Out of memory\n");
  338. + return -ENOSPC;
  339. + }
  340. +
  341. + hdr = p;
  342. + hdr->canary = SMEM_PRIVATE_CANARY;
  343. + hdr->item = item;
  344. + hdr->size = ALIGN(size, 8);
  345. + hdr->padding_data = hdr->size - size;
  346. + hdr->padding_hdr = 0;
  347. +
  348. + /*
  349. + * Ensure the header is written before we advance the free offset, so
  350. + * that remote processors that does not take the remote spinlock still
  351. + * gets a consistent view of the linked list.
  352. + */
  353. + wmb();
  354. + phdr->offset_free_uncached += alloc_size;
  355. +
  356. + return 0;
  357. +}
  358. +
  359. +static int qcom_smem_alloc_global(struct qcom_smem *smem,
  360. + unsigned item,
  361. + size_t size)
  362. +{
  363. + struct smem_header *header;
  364. + struct smem_global_entry *entry;
  365. +
  366. + if (WARN_ON(item >= SMEM_ITEM_LAST))
  367. + return -EINVAL;
  368. +
  369. + header = smem->regions[0].virt_base;
  370. + entry = &header->toc[item];
  371. + if (entry->allocated)
  372. + return -EEXIST;
  373. +
  374. + size = ALIGN(size, 8);
  375. + if (WARN_ON(size > header->available))
  376. + return -ENOMEM;
  377. +
  378. + entry->offset = header->free_offset;
  379. + entry->size = size;
  380. +
  381. + /*
  382. + * Ensure the header is consistent before we mark the item allocated,
  383. + * so that remote processors will get a consistent view of the item
  384. + * even though they do not take the spinlock on read.
  385. + */
  386. + wmb();
  387. + entry->allocated = 1;
  388. +
  389. + header->free_offset += size;
  390. + header->available -= size;
  391. +
  392. + return 0;
  393. +}
  394. +
  395. +/**
  396. + * qcom_smem_alloc - allocate space for a smem item
  397. + * @host: remote processor id, or -1
  398. + * @item: smem item handle
  399. + * @size: number of bytes to be allocated
  400. + *
  401. + * Allocate space for a given smem item of size @size, given that the item is
  402. + * not yet allocated.
  403. + */
  404. +int qcom_smem_alloc(unsigned host, unsigned item, size_t size)
  405. +{
  406. + unsigned long flags;
  407. + int ret;
  408. +
  409. + if (!__smem)
  410. + return -EPROBE_DEFER;
  411. +
  412. + if (item < SMEM_ITEM_LAST_FIXED) {
  413. + dev_err(__smem->dev,
  414. + "Rejecting allocation of static entry %d\n", item);
  415. + return -EINVAL;
  416. + }
  417. +
  418. + ret = hwspin_lock_timeout_irqsave(__smem->hwlock,
  419. + HWSPINLOCK_TIMEOUT,
  420. + &flags);
  421. + if (ret)
  422. + return ret;
  423. +
  424. + ret = qcom_smem_alloc_private(__smem, host, item, size);
  425. + if (ret == -ENOENT)
  426. + ret = qcom_smem_alloc_global(__smem, item, size);
  427. +
  428. + hwspin_unlock_irqrestore(__smem->hwlock, &flags);
  429. +
  430. + return ret;
  431. +}
  432. +EXPORT_SYMBOL(qcom_smem_alloc);
  433. +
  434. +static int qcom_smem_get_global(struct qcom_smem *smem,
  435. + unsigned item,
  436. + void **ptr,
  437. + size_t *size)
  438. +{
  439. + struct smem_header *header;
  440. + struct smem_region *area;
  441. + struct smem_global_entry *entry;
  442. + u32 aux_base;
  443. + unsigned i;
  444. +
  445. + if (WARN_ON(item >= SMEM_ITEM_LAST))
  446. + return -EINVAL;
  447. +
  448. + header = smem->regions[0].virt_base;
  449. + entry = &header->toc[item];
  450. + if (!entry->allocated)
  451. + return -ENXIO;
  452. +
  453. + if (ptr != NULL) {
  454. + aux_base = entry->aux_base & AUX_BASE_MASK;
  455. +
  456. + for (i = 0; i < smem->num_regions; i++) {
  457. + area = &smem->regions[i];
  458. +
  459. + if (area->aux_base == aux_base || !aux_base) {
  460. + *ptr = area->virt_base + entry->offset;
  461. + break;
  462. + }
  463. + }
  464. + }
  465. + if (size != NULL)
  466. + *size = entry->size;
  467. +
  468. + return 0;
  469. +}
  470. +
  471. +static int qcom_smem_get_private(struct qcom_smem *smem,
  472. + unsigned host,
  473. + unsigned item,
  474. + void **ptr,
  475. + size_t *size)
  476. +{
  477. + struct smem_partition_header *phdr;
  478. + struct smem_private_entry *hdr;
  479. + void *p;
  480. +
  481. + /* We're not going to find it if there's no matching partition */
  482. + if (host >= SMEM_HOST_COUNT || !smem->partitions[host])
  483. + return -ENOENT;
  484. +
  485. + phdr = smem->partitions[host];
  486. +
  487. + p = (void *)phdr + sizeof(*phdr);
  488. + while (p < (void *)phdr + phdr->offset_free_uncached) {
  489. + hdr = p;
  490. +
  491. + if (hdr->canary != SMEM_PRIVATE_CANARY) {
  492. + dev_err(smem->dev,
  493. + "Found invalid canary in host %d partition\n",
  494. + host);
  495. + return -EINVAL;
  496. + }
  497. +
  498. + if (hdr->item == item) {
  499. + if (ptr != NULL)
  500. + *ptr = p + sizeof(*hdr) + hdr->padding_hdr;
  501. +
  502. + if (size != NULL)
  503. + *size = hdr->size - hdr->padding_data;
  504. +
  505. + return 0;
  506. + }
  507. +
  508. + p += sizeof(*hdr) + hdr->padding_hdr + hdr->size;
  509. + }
  510. +
  511. + return -ENOENT;
  512. +}
  513. +
  514. +/**
  515. + * qcom_smem_get - resolve ptr of size of a smem item
  516. + * @host: the remote processor, or -1
  517. + * @item: smem item handle
  518. + * @ptr: pointer to be filled out with address of the item
  519. + * @size: pointer to be filled out with size of the item
  520. + *
  521. + * Looks up pointer and size of a smem item.
  522. + */
  523. +int qcom_smem_get(unsigned host, unsigned item, void **ptr, size_t *size)
  524. +{
  525. + unsigned long flags;
  526. + int ret;
  527. +
  528. + if (!__smem)
  529. + return -EPROBE_DEFER;
  530. +
  531. + ret = hwspin_lock_timeout_irqsave(__smem->hwlock,
  532. + HWSPINLOCK_TIMEOUT,
  533. + &flags);
  534. + if (ret)
  535. + return ret;
  536. +
  537. + ret = qcom_smem_get_private(__smem, host, item, ptr, size);
  538. + if (ret == -ENOENT)
  539. + ret = qcom_smem_get_global(__smem, item, ptr, size);
  540. +
  541. + hwspin_unlock_irqrestore(__smem->hwlock, &flags);
  542. + return ret;
  543. +
  544. +}
  545. +EXPORT_SYMBOL(qcom_smem_get);
  546. +
  547. +/**
  548. + * qcom_smem_get_free_space - retrieve amont of free space in a partition
  549. + * @host: the remote processor identifing a partition, or -1
  550. + *
  551. + * To be used by smem clients as a quick way to determine if any new
  552. + * allocations has been made.
  553. + */
  554. +int qcom_smem_get_free_space(unsigned host)
  555. +{
  556. + struct smem_partition_header *phdr;
  557. + struct smem_header *header;
  558. + unsigned ret;
  559. +
  560. + if (!__smem)
  561. + return -EPROBE_DEFER;
  562. +
  563. + if (host < SMEM_HOST_COUNT && __smem->partitions[host]) {
  564. + phdr = __smem->partitions[host];
  565. + ret = phdr->offset_free_uncached;
  566. + } else {
  567. + header = __smem->regions[0].virt_base;
  568. + ret = header->available;
  569. + }
  570. +
  571. + return ret;
  572. +}
  573. +EXPORT_SYMBOL(qcom_smem_get_free_space);
  574. +
  575. +static int qcom_smem_get_sbl_version(struct qcom_smem *smem)
  576. +{
  577. + unsigned *versions;
  578. + size_t size;
  579. + int ret;
  580. +
  581. + ret = qcom_smem_get_global(smem, SMEM_ITEM_VERSION,
  582. + (void **)&versions, &size);
  583. + if (ret < 0) {
  584. + dev_err(smem->dev, "Unable to read the version item\n");
  585. + return -ENOENT;
  586. + }
  587. +
  588. + if (size < sizeof(unsigned) * SMEM_MASTER_SBL_VERSION_INDEX) {
  589. + dev_err(smem->dev, "Version item is too small\n");
  590. + return -EINVAL;
  591. + }
  592. +
  593. + return versions[SMEM_MASTER_SBL_VERSION_INDEX];
  594. +}
  595. +
  596. +static int qcom_smem_enumerate_partitions(struct qcom_smem *smem,
  597. + unsigned local_host)
  598. +{
  599. + struct smem_partition_header *header;
  600. + struct smem_ptable_entry *entry;
  601. + struct smem_ptable *ptable;
  602. + unsigned remote_host;
  603. + int i;
  604. +
  605. + ptable = smem->regions[0].virt_base + smem->regions[0].size - 4 * 1024;
  606. + if (ptable->magic != SMEM_PTABLE_MAGIC)
  607. + return 0;
  608. +
  609. + if (ptable->version != 1) {
  610. + dev_err(smem->dev,
  611. + "Unsupported partition header version %d\n",
  612. + ptable->version);
  613. + return -EINVAL;
  614. + }
  615. +
  616. + for (i = 0; i < ptable->num_entries; i++) {
  617. + entry = &ptable->entry[i];
  618. +
  619. + if (entry->host0 != local_host && entry->host1 != local_host)
  620. + continue;
  621. +
  622. + if (!entry->offset)
  623. + continue;
  624. +
  625. + if (!entry->size)
  626. + continue;
  627. +
  628. + if (entry->host0 == local_host)
  629. + remote_host = entry->host1;
  630. + else
  631. + remote_host = entry->host0;
  632. +
  633. + if (smem->partitions[remote_host]) {
  634. + dev_err(smem->dev,
  635. + "Already found a partition for host %d\n",
  636. + remote_host);
  637. + return -EINVAL;
  638. + }
  639. +
  640. + header = smem->regions[0].virt_base + entry->offset;
  641. +
  642. + if (header->magic != SMEM_PART_MAGIC) {
  643. + dev_err(smem->dev,
  644. + "Partition %d has invalid magic\n", i);
  645. + return -EINVAL;
  646. + }
  647. +
  648. + if (header->host0 != local_host && header->host1 != local_host) {
  649. + dev_err(smem->dev,
  650. + "Partition %d hosts are invalid\n", i);
  651. + return -EINVAL;
  652. + }
  653. +
  654. + if (header->host0 != remote_host && header->host1 != remote_host) {
  655. + dev_err(smem->dev,
  656. + "Partition %d hosts are invalid\n", i);
  657. + return -EINVAL;
  658. + }
  659. +
  660. + if (header->size != entry->size) {
  661. + dev_err(smem->dev,
  662. + "Partition %d has invalid size\n", i);
  663. + return -EINVAL;
  664. + }
  665. +
  666. + if (header->offset_free_uncached > header->size) {
  667. + dev_err(smem->dev,
  668. + "Partition %d has invalid free pointer\n", i);
  669. + return -EINVAL;
  670. + }
  671. +
  672. + smem->partitions[remote_host] = header;
  673. + }
  674. +
  675. + return 0;
  676. +}
  677. +
  678. +static int qcom_smem_count_mem_regions(struct platform_device *pdev)
  679. +{
  680. + struct resource *res;
  681. + int num_regions = 0;
  682. + int i;
  683. +
  684. + for (i = 0; i < pdev->num_resources; i++) {
  685. + res = &pdev->resource[i];
  686. +
  687. + if (resource_type(res) == IORESOURCE_MEM)
  688. + num_regions++;
  689. + }
  690. +
  691. + return num_regions;
  692. +}
  693. +
  694. +static int qcom_smem_probe(struct platform_device *pdev)
  695. +{
  696. + struct smem_header *header;
  697. + struct device_node *np;
  698. + struct qcom_smem *smem;
  699. + struct resource *res;
  700. + struct resource r;
  701. + size_t array_size;
  702. + int num_regions = 0;
  703. + int hwlock_id;
  704. + u32 version;
  705. + int ret;
  706. + int i;
  707. +
  708. + num_regions = qcom_smem_count_mem_regions(pdev) + 1;
  709. +
  710. + array_size = num_regions * sizeof(struct smem_region);
  711. + smem = devm_kzalloc(&pdev->dev, sizeof(*smem) + array_size, GFP_KERNEL);
  712. + if (!smem)
  713. + return -ENOMEM;
  714. +
  715. + smem->dev = &pdev->dev;
  716. + smem->num_regions = num_regions;
  717. +
  718. + np = of_parse_phandle(pdev->dev.of_node, "memory-region", 0);
  719. + if (!np) {
  720. + dev_err(&pdev->dev, "No memory-region specified\n");
  721. + return -EINVAL;
  722. + }
  723. +
  724. + ret = of_address_to_resource(np, 0, &r);
  725. + of_node_put(np);
  726. + if (ret)
  727. + return ret;
  728. +
  729. + smem->regions[0].aux_base = (u32)r.start;
  730. + smem->regions[0].size = resource_size(&r);
  731. + smem->regions[0].virt_base = devm_ioremap_nocache(&pdev->dev,
  732. + r.start,
  733. + resource_size(&r));
  734. + if (!smem->regions[0].virt_base)
  735. + return -ENOMEM;
  736. +
  737. + for (i = 1; i < num_regions; i++) {
  738. + res = platform_get_resource(pdev, IORESOURCE_MEM, i - 1);
  739. +
  740. + smem->regions[i].aux_base = (u32)res->start;
  741. + smem->regions[i].size = resource_size(res);
  742. + smem->regions[i].virt_base = devm_ioremap_nocache(&pdev->dev,
  743. + res->start,
  744. + resource_size(res));
  745. + if (!smem->regions[i].virt_base)
  746. + return -ENOMEM;
  747. + }
  748. +
  749. + header = smem->regions[0].virt_base;
  750. + if (header->initialized != 1 || header->reserved) {
  751. + dev_err(&pdev->dev, "SMEM is not initilized by SBL\n");
  752. + return -EINVAL;
  753. + }
  754. +
  755. + version = qcom_smem_get_sbl_version(smem);
  756. + if (version >> 16 != SMEM_EXPECTED_VERSION) {
  757. + dev_err(&pdev->dev, "Unsupported smem version 0x%x\n", version);
  758. + return -EINVAL;
  759. + }
  760. +
  761. + ret = qcom_smem_enumerate_partitions(smem, SMEM_HOST_APPS);
  762. + if (ret < 0)
  763. + return ret;
  764. +
  765. + hwlock_id = of_hwspin_lock_get_id(pdev->dev.of_node, 0);
  766. + if (hwlock_id < 0) {
  767. + dev_err(&pdev->dev, "failed to retrieve hwlock\n");
  768. + return hwlock_id;
  769. + }
  770. +
  771. + smem->hwlock = hwspin_lock_request_specific(hwlock_id);
  772. + if (!smem->hwlock)
  773. + return -ENXIO;
  774. +
  775. + __smem = smem;
  776. +
  777. + return 0;
  778. +}
  779. +
  780. +static int qcom_smem_remove(struct platform_device *pdev)
  781. +{
  782. + hwspin_lock_free(__smem->hwlock);
  783. + __smem = NULL;
  784. +
  785. + return 0;
  786. +}
  787. +
  788. +static const struct of_device_id qcom_smem_of_match[] = {
  789. + { .compatible = "qcom,smem" },
  790. + {}
  791. +};
  792. +MODULE_DEVICE_TABLE(of, qcom_smem_of_match);
  793. +
  794. +static struct platform_driver qcom_smem_driver = {
  795. + .probe = qcom_smem_probe,
  796. + .remove = qcom_smem_remove,
  797. + .driver = {
  798. + .name = "qcom_smem",
  799. + .of_match_table = qcom_smem_of_match,
  800. + .suppress_bind_attrs = true,
  801. + },
  802. +};
  803. +
  804. +static int __init qcom_smem_init(void)
  805. +{
  806. + return platform_driver_register(&qcom_smem_driver);
  807. +}
  808. +arch_initcall(qcom_smem_init);
  809. +
  810. +static void __exit qcom_smem_exit(void)
  811. +{
  812. + platform_driver_unregister(&qcom_smem_driver);
  813. +}
  814. +module_exit(qcom_smem_exit)
  815. +
  816. +MODULE_AUTHOR("Bjorn Andersson <bjorn.andersson@sonymobile.com>");
  817. +MODULE_DESCRIPTION("Qualcomm Shared Memory Manager");
  818. +MODULE_LICENSE("GPLv2");
  819. --- /dev/null
  820. +++ b/include/linux/soc/qcom/smem.h
  821. @@ -0,0 +1,14 @@
  822. +#ifndef __QCOM_SMEM_H__
  823. +#define __QCOM_SMEM_H__
  824. +
  825. +struct device_node;
  826. +struct qcom_smem;
  827. +
  828. +#define QCOM_SMEM_HOST_ANY -1
  829. +
  830. +int qcom_smem_alloc(unsigned host, unsigned item, size_t size);
  831. +int qcom_smem_get(unsigned host, unsigned item, void **ptr, size_t *size);
  832. +
  833. +int qcom_smem_get_free_space(unsigned host);
  834. +
  835. +#endif