1
2
3
4
5
6
7
8 import os
9 import Ice
10 import time
11 import numpy
12 import signal
13 import logging
14 import threading
15 import traceback
16 import subprocess
17 import exceptions
18 import portalocker
19
20 from path import path
21
22
23 import omero
24 import omero.clients
25 import omero.callbacks
26
27
28 from omero.columns import *
29 from omero.rtypes import *
30 from omero.util.decorators import remoted, locked, perf
31 from omero_ext.functional import wraps
32
33
34 sys = __import__("sys")
35 tables = __import__("tables")
38 """
39 Returns the length of the argument or None
40 if the argument is None
41 """
42 if rv is None:
43 return None
44 return len(rv)
45
47 """
48 Decorator which takes the first argument after "self" and compares
49 that to the last modification time. If the stamp is older, then the
50 method call will throw an omero.OptimisticLockException. Otherwise,
51 execution will complete normally. If update is True, then the
52 last modification time will be updated after the method call if it
53 is successful.
54
55 Note: stamped implies locked
56
57 """
58 def check_and_update_stamp(*args, **kwargs):
59 self = args[0]
60 stamp = args[1]
61 if stamp < self._stamp:
62 raise omero.OptimisticLockException(None, None, "Resource modified by another thread")
63
64 try:
65 return func(*args, **kwargs)
66 finally:
67 if update:
68 self._stamp = time.time()
69 checked_and_update_stamp = wraps(func)(check_and_update_stamp)
70 return locked(check_and_update_stamp)
71
74 """
75 Since two calls to tables.openFile() return non-equal files
76 with equal fileno's, portalocker cannot be used to prevent
77 the creation of two HdfStorage instances from the same
78 Python process.
79 """
80
82 self.logger = logging.getLogger("omero.tables.HdfList")
83 self._lock = threading.RLock()
84 self.__filenos = {}
85 self.__paths = {}
86 self.__locks = {}
87
88 @locked
90
91 if hdfpath in self.__locks:
92 raise omero.LockTimeout(None, None, "Path already in HdfList: %s" % hdfpath)
93
94 parent = path(hdfpath).parent
95 if not parent.exists():
96 raise omero.ApiUsageException(None, None, "Parent directory does not exist: %s" % parent)
97
98 try:
99 lock = open(hdfpath, "a+")
100 portalocker.lock(lock, portalocker.LOCK_NB|portalocker.LOCK_EX)
101 self.__locks[hdfpath] = lock
102 except portalocker.LockException, le:
103 lock.close()
104 raise omero.LockTimeout(None, None, "Cannot acquire exclusive lock on: %s" % hdfpath, 0)
105 except:
106 lock.close()
107 raise
108
109 hdffile = hdfstorage.openfile("a")
110 fileno = hdffile.fileno()
111 if fileno in self.__filenos.keys():
112 hdffile.close()
113 raise omero.LockTimeout(None, None, "File already opened by process: %s" % hdfpath, 0)
114 else:
115 self.__filenos[fileno] = hdfstorage
116 self.__paths[hdfpath] = hdfstorage
117
118 return hdffile
119
120 @locked
122 try:
123 return self.__paths[hdfpath]
124 except KeyError:
125 return HdfStorage(hdfpath)
126
127 @locked
128 - def remove(self, hdfpath, hdffile):
129 del self.__filenos[hdffile.fileno()]
130 del self.__paths[hdfpath]
131 try:
132 if hdfpath in self.__locks:
133 try:
134 lock = self.__locks[hdfpath]
135 lock.close()
136 finally:
137 del self.__locks[hdfpath]
138 except exceptions.Exception, e:
139 self.logger.warn("Exception on remove(%s)" % hdfpath, exc_info=True)
140
141
142 HDFLIST = HdfList()
145 """
146 Provides HDF-storage for measurement results. At most a single
147 instance will be available for any given physical HDF5 file.
148 """
149
150
152
153 """
154 file_path should be the path to a file in a valid directory where
155 this HDF instance can be stored (Not None or Empty). Once this
156 method is finished, self.__hdf_file is guaranteed to be a PyTables HDF
157 file, but not necessarily initialized.
158 """
159
160 if file_path is None or str(file_path) == "":
161 raise omero.ValidationException(None, None, "Invalid file_path")
162
163 self.logger = logging.getLogger("omero.tables.HdfStorage")
164
165 self.__hdf_path = path(file_path)
166
167
168 self.__hdf_file = HDFLIST.addOrThrow(file_path, self)
169 self.__tables = []
170
171 self._lock = threading.RLock()
172 self._stamp = time.time()
173
174
175 self.__mea = None
176 self.__ome = None
177
178 try:
179 self.__ome = self.__hdf_file.root.OME
180 self.__mea = self.__ome.Measurements
181 self.__types = self.__ome.ColumnTypes[:]
182 self.__descriptions = self.__ome.ColumnDescriptions[:]
183 self.__initialized = True
184 except tables.NoSuchNodeError:
185 self.__initialized = False
186
187
188
189
190
192 return self.__hdf_path.size
193
195 try:
196 if self.__hdf_path.exists() and self.__hdf_path.size == 0:
197 mode = "w"
198 return tables.openFile(self.__hdf_path, mode=mode,\
199 title="OMERO HDF Measurement Storage", rootUEP="/")
200 except (tables.HDF5ExtError, IOError), io:
201 msg = "HDFStorage initialized with bad path: %s" % self.__hdf_path
202 self.logger.error(msg)
203 raise omero.ValidationException(None, None, msg)
204
206 if not self.__initialized:
207 raise omero.ApiUsageException(None, None, "Not yet initialized")
208
210 return len(self.__types)
211
213 return self.__mea.nrows
214
216 if colNumbers is not None:
217 if len(colNumbers) > 0:
218 maxcol = max(colNumbers)
219 totcol = self.__width()
220 if maxcol >= totcol:
221 raise omero.ApiUsageException(None, None, "Column overflow: %s >= %s" % (maxcol, totcol))
222 else:
223 raise omero.ApiUsageException(None, None, "Columns not specified: %s" % colNumbers)
224
225
226 if rowNumbers is not None:
227 if len(rowNumbers) > 0:
228 maxrow = max(rowNumbers)
229 totrow = self.__length()
230 if maxrow >= totrow:
231 raise omero.ApiUsageException(None, None, "Row overflow: %s >= %s" % (maxrow, totrow))
232 else:
233 raise omero.ApiUsageException(None, None, "Rows not specified: %s" % rowNumbers)
234
235
236
237
238
239 @locked
241 """
242
243 """
244 if metadata is None: metadata = {}
245
246 if self.__initialized:
247 raise omero.ValidationException(None, None, "Already initialized.")
248
249 if not cols:
250 raise omero.ApiUsageException(None, None, "No columns provided")
251
252 for c in cols:
253 if not c.name:
254 raise omero.ApiUsageException(None, None, "Column unnamed: %s" % c)
255
256 self.__definition = columns2definition(cols)
257 self.__ome = self.__hdf_file.createGroup("/", "OME")
258 self.__mea = self.__hdf_file.createTable(self.__ome, "Measurements", self.__definition)
259
260 self.__types = [ x.ice_staticId() for x in cols ]
261 self.__descriptions = [ (x.description != None) and x.description or "" for x in cols ]
262 self.__hdf_file.createArray(self.__ome, "ColumnTypes", self.__types)
263 self.__hdf_file.createArray(self.__ome, "ColumnDescriptions", self.__descriptions)
264
265 self.__mea.attrs.version = "v1"
266 self.__mea.attrs.initialized = time.time()
267 if metadata:
268 for k, v in metadata.items():
269 self.__mea.attrs[k] = v
270
271
272 self.__mea.flush()
273 self.__hdf_file.flush()
274 self.__initialized = True
275
276 @locked
277 - def incr(self, table):
278 sz = len(self.__tables)
279 self.logger.info("Size: %s - Attaching %s to %s" % (sz, table, self.__hdf_path))
280 if table in self.__tables:
281 self.logger.warn("Already added")
282 raise omero.ApiUsageException(None, None, "Already added")
283 self.__tables.append(table)
284 return sz + 1
285
286 @locked
287 - def decr(self, table):
288 sz = len(self.__tables)
289 self.logger.info("Size: %s - Detaching %s from %s", sz, table, self.__hdf_path)
290 if not (table in self.__tables):
291 self.logger.warn("Unknown table")
292 raise omero.ApiUsageException(None, None, "Unknown table")
293 self.__tables.remove(table)
294 if sz <= 1:
295 self.cleanup()
296 return sz - 1
297
298 @locked
300 return self._stamp <= stamp
301
302 @locked
306
307 @locked
308 - def cols(self, size, current):
327
328 @locked
348
349 @locked
358
359 @locked
377
378
379
380
381
382 @stamped
383 - def update(self, stamp, data):
389
390 @stamped
391 - def getWhereList(self, stamp, condition, variables, unused, start, stop, step):
392 self.__initcheck()
393 try:
394 return self.__mea.getWhereList(condition, variables, None, start, stop, step).tolist()
395 except (exceptions.NameError, exceptions.SyntaxError, exceptions.TypeError, exceptions.ValueError), err:
396 aue = omero.ApiUsageException()
397 aue.message = "Bad condition: %s, %s" % (condition, variables)
398 aue.serverStackTrace = "".join(traceback.format_exc())
399 aue.serverExceptionClass = str(err.__class__.__name__)
400 raise aue
401
403 """
404 Constructs a omero.grid.Data object for returning to the client.
405 """
406 data = omero.grid.Data()
407 data.columns = cols
408 data.rowNumbers = rowNumbers
409 data.lastModification = long(self._stamp*1000)
410 return data
411
412 @stamped
420
421 @stamped
422 - def read(self, stamp, colNumbers, start, stop, current):
434
435 @stamped
436 - def slice(self, stamp, colNumbers, rowNumbers, current):
437 self.__initcheck()
438
439 if colNumbers is None or len(colNumbers) == 0:
440 colNumbers = range(self.__width())
441 if rowNumbers is None or len(rowNumbers) == 0:
442 rowNumbers = range(self.__length())
443
444 self.__sizecheck(colNumbers, rowNumbers)
445 cols = self.cols(None, current)
446 rv = []
447 for i in colNumbers:
448 col = cols[i]
449 col.readCoordinates(self.__mea, rowNumbers)
450 rv.append(col)
451 return self._as_data(rv, rowNumbers)
452
453
454
455
456
459
460 @locked
462 self.logger.info("Cleaning storage: %s", self.__hdf_path)
463 if self.__mea:
464 self.__mea.flush()
465 self.__mea = None
466 if self.__ome:
467 self.__ome = None
468 if self.__hdf_file:
469 HDFLIST.remove(self.__hdf_path, self.__hdf_file)
470 hdffile = self.__hdf_file
471 self.__hdf_file = None
472 hdffile.close()
473
474
475
476
477 -class TableI(omero.grid.Table, omero.util.SimpleServant):
478 """
479 Spreadsheet implementation based on pytables.
480 """
481
482 - def __init__(self, ctx, file_obj, factory, storage, uuid = "unknown"):
483 self.uuid = uuid
484 self.file_obj = file_obj
485 self.factory = factory
486 self.storage = storage
487 self.can_write = factory.getAdminService().canUpdate(file_obj)
488 omero.util.SimpleServant.__init__(self, ctx)
489
490 self.stamp = time.time()
491 self.storage.incr(self)
492
494 """
495 Checks that the current user can write to the given object
496 at the database level. If not, no FS level writes are permitted
497 either.
498
499 ticket:2910
500 """
501 if not self.can_write:
502 raise omero.SecurityViolation("Current user cannot write to file %s" % self.file_obj.id.val)
503
505 """
506 Called periodically to check the resource is alive. Returns
507 False if this resource can be cleaned up. (Resources API)
508 """
509 self.logger.debug("Checking %s" % self)
510 return True
511
513 """
514 Decrements the counter on the held storage to allow it to
515 be cleaned up.
516 """
517 if self.storage:
518 try:
519 self.storage.decr(self)
520 finally:
521 self.storage = None
522
524 return "Table-%s" % self.uuid
525
526 @remoted
527 @perf
528 - def close(self, current = None):
529
530 size = None
531 if self.storage is not None:
532 size = self.storage.size()
533
534 try:
535 self.cleanup()
536 self.logger.info("Closed %s", self)
537 except:
538 self.logger.warn("Closed %s with errors", self)
539
540 if self.file_obj is not None and self.can_write:
541 fid = self.file_obj.id.val
542 if not self.file_obj.isLoaded() or\
543 self.file_obj.getDetails() is None or\
544 self.file_obj.details.group is None:
545 self.logger.warn("Cannot update file object %s since group is none", fid)
546 else:
547 gid = self.file_obj.details.group.id.val
548 client_uuid = self.factory.ice_getIdentity().category[8:]
549 ctx = {"omero.group": str(gid), omero.constants.CLIENTUUID: client_uuid}
550 try:
551 rfs = self.factory.createRawFileStore(ctx)
552 try:
553 rfs.setFileId(fid, ctx)
554 if size:
555 rfs.truncate(size, ctx)
556 rfs.write([], size, 0, ctx)
557 else:
558 rfs.write([], 0, 0, ctx)
559 file_obj = rfs.save(ctx)
560 finally:
561 rfs.close(ctx)
562 self.logger.info("Updated file object %s to sha1=%s (%s bytes)",\
563 self.file_obj.id.val, file_obj.sha1.val, file_obj.size.val)
564 except:
565 self.logger.warn("Failed to update file object %s", self.file_obj.id.val, exc_info=1)
566
567
568
569 @remoted
570 @perf
572 msg = "unknown"
573 if self.file_obj:
574 if self.file_obj.id:
575 msg = self.file_obj.id.val
576 self.logger.info("%s.getOriginalFile() => id=%s", self, msg)
577 return self.file_obj
578
579 @remoted
580 @perf
582 rv = self.storage.cols(None, current)
583 self.logger.info("%s.getHeaders() => size=%s", self, slen(rv))
584 return rv
585
586 @remoted
587 @perf
589 rv = self.storage.rows()
590 self.logger.info("%s.getNumberOfRows() => %s", self, rv)
591 return long(rv)
592
593 @remoted
594 @perf
595 - def getWhereList(self, condition, variables, start, stop, step, current = None):
596 variables = unwrap(variables)
597 if stop == 0:
598 stop = None
599 if step == 0:
600 step = None
601 rv = self.storage.getWhereList(self.stamp, condition, variables, None, start, stop, step)
602 self.logger.info("%s.getWhereList(%s, %s, %s, %s, %s) => size=%s", self, condition, variables, start, stop, step, slen(rv))
603 return rv
604
605 @remoted
606 @perf
608 self.logger.info("%s.readCoordinates(size=%s)", self, slen(rowNumbers))
609 try:
610 return self.storage.readCoordinates(self.stamp, rowNumbers, current)
611 except tables.HDF5ExtError, err:
612 aue = omero.ApiUsageException()
613 aue.message = "Error reading coordinates. Most likely out of range"
614 aue.serverStackTrace = "".join(traceback.format_exc())
615 aue.serverExceptionClass = str(err.__class__.__name__)
616 raise aue
617
618 @remoted
619 @perf
620 - def read(self, colNumbers, start, stop, current = None):
621 self.logger.info("%s.read(%s, %s, %s)", self, colNumbers, start, stop)
622 try:
623 return self.storage.read(self.stamp, colNumbers, start, stop, current)
624 except tables.HDF5ExtError, err:
625 aue = omero.ApiUsageException()
626 aue.message = "Error reading coordinates. Most likely out of range"
627 aue.serverStackTrace = "".join(traceback.format_exc())
628 aue.serverExceptionClass = str(err.__class__.__name__)
629 raise aue
630
631 @remoted
632 @perf
633 - def slice(self, colNumbers, rowNumbers, current = None):
634 self.logger.info("%s.slice(size=%s, size=%s)", self, slen(colNumbers), slen(rowNumbers))
635 return self.storage.slice(self.stamp, colNumbers, rowNumbers, current)
636
637
638
639 @remoted
640 @perf
646
647 @remoted
648 @perf
652
653 @remoted
654 @perf
655 - def addData(self, cols, current = None):
661
662 @remoted
663 @perf
664 - def update(self, data, current = None):
669
670 @remoted
671 @perf
672 - def delete(self, current = None):
692
693
694
695
696 @remoted
697 @perf
703
704 @remoted
705 @perf
710
711 @remoted
712 @perf
717
718 @remoted
719 @perf
724
725
726
727 -class TablesI(omero.grid.Tables, omero.util.Servant):
728 """
729 Implementation of the omero.grid.Tables API. Provides
730 spreadsheet like functionality across the OMERO.grid.
731 This servant serves as a session-less, user-less
732 resource for obtaining omero.grid.Table proxies.
733
734 The first major step in initialization is getting
735 a session. This will block until the Blitz server
736 is reachable.
737 """
738
739 - def __init__(self,\
740 ctx,\
741 table_cast = omero.grid.TablePrx.uncheckedCast,\
742 internal_repo_cast = omero.grid.InternalRepositoryPrx.checkedCast):
743
744 omero.util.Servant.__init__(self, ctx, needs_session = True)
745
746
747
748 self._table_cast = table_cast
749 self._internal_repo_cast = internal_repo_cast
750
751 self.__stores = []
752 self._get_dir()
753 self._get_uuid()
754 self._get_repo()
755
757 """
758 Second step in initialization is to find the .omero/repository
759 directory. If this is not created, then a required server has
760 not started, and so this instance will not start.
761 """
762 wait = int(self.communicator.getProperties().getPropertyWithDefault("omero.repo.wait", "1"))
763 self.repo_dir = self.communicator.getProperties().getProperty("omero.repo.dir")
764
765 if not self.repo_dir:
766
767 self.repo_dir = self.ctx.getSession().getConfigService().getConfigValue("omero.data.dir")
768
769 self.repo_cfg = path(self.repo_dir) / ".omero" / "repository"
770 start = time.time()
771 while not self.repo_cfg.exists() and wait < (time.time() - start):
772 self.logger.info("%s doesn't exist; waiting 5 seconds..." % self.repo_cfg)
773 self.stop_event.wait(5)
774 if not self.repo_cfg.exists():
775 msg = "No repository found: %s" % self.repo_cfg
776 self.logger.error(msg)
777 raise omero.ResourceError(None, None, msg)
778
780 """
781 Third step in initialization is to find the database uuid
782 for this grid instance. Multiple OMERO.grids could be watching
783 the same directory.
784 """
785 cfg = self.ctx.getSession().getConfigService()
786 self.db_uuid = cfg.getDatabaseUuid()
787 self.instance = self.repo_cfg / self.db_uuid
788
790 """
791 Fourth step in initialization is to find the repository object
792 for the UUID found in .omero/repository/<db_uuid>, and then
793 create a proxy for the InternalRepository attached to that.
794 """
795
796
797 self.repo_uuid = (self.instance / "repo_uuid").lines()[0].strip()
798 if len(self.repo_uuid) != 38:
799 raise omero.ResourceError("Poorly formed UUID: %s" % self.repo_uuid)
800 self.repo_uuid = self.repo_uuid[2:]
801
802
803 self.repo_obj = self.ctx.getSession().getQueryService().findByQuery("select f from OriginalFile f where sha1 = :uuid",
804 omero.sys.ParametersI().add("uuid", rstring(self.repo_uuid)))
805 self.repo_mgr = self.communicator.stringToProxy("InternalRepository-%s" % self.repo_uuid)
806 self.repo_mgr = self._internal_repo_cast(self.repo_mgr)
807 self.repo_svc = self.repo_mgr.getProxy()
808
809 @remoted
811 """
812 Returns the Repository object for this Tables server.
813 """
814 return self.repo_svc
815
816 @remoted
817 @perf
818 - def getTable(self, file_obj, factory, current = None):
819 """
820 Create and/or register a table servant.
821 """
822
823
824 file_id = None
825 if file_obj is not None and file_obj.id is not None:
826 file_id = file_obj.id.val
827 self.logger.info("getTable: %s %s", file_id, current.ctx)
828
829 file_path = self.repo_mgr.getFilePath(file_obj)
830 p = path(file_path).dirname()
831 if not p.exists():
832 p.makedirs()
833
834 storage = HDFLIST.getOrCreate(file_path)
835 id = Ice.Identity()
836 id.name = Ice.generateUUID()
837 table = TableI(self.ctx, file_obj, factory, storage, uuid = id.name)
838 self.resources.add(table)
839
840 prx = current.adapter.add(table, id)
841 return self._table_cast(prx)
842